text stringlengths 1 1.05M |
|---|
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/512+512+512-old/model --tokenizer_name model-configs/1536-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/512+512+512-old/1024+0+512-N-VB-FILL-1 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function remove_all_but_nouns_and_verbs_fill_first_two_thirds_full --eval_function last_element_eval |
package info.javaspecproto;
import java.util.function.Consumer;
abstract class ExecutionSpy {
private static final Consumer<String> NOP = x -> { return; };
protected static Consumer<String> notifyEvent = NOP;
public static void setEventListener(Consumer<String> newConsumer) {
notifyEvent = newConsumer == null ? NOP : newConsumer;
}
} |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.atlas.repository.impexp;
import org.apache.atlas.ApplicationProperties;
import org.apache.atlas.AtlasConstants;
import org.apache.atlas.AtlasException;
import org.apache.atlas.exception.AtlasBaseException;
import org.apache.atlas.model.impexp.AtlasServer;
import org.apache.atlas.model.impexp.AtlasExportRequest;
import org.apache.atlas.model.impexp.AtlasExportResult;
import org.apache.atlas.model.impexp.AtlasImportRequest;
import org.apache.atlas.model.impexp.AtlasImportResult;
import org.apache.atlas.model.impexp.ExportImportAuditEntry;
import org.apache.atlas.repository.Constants;
import org.apache.atlas.type.AtlasType;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import org.springframework.util.CollectionUtils;
import javax.inject.Inject;
import java.util.List;
import java.util.Map;
@Component
public class AuditsWriter {
private static final Logger LOG = LoggerFactory.getLogger(AuditsWriter.class);
private static final String CLUSTER_NAME_DEFAULT = "default";
private static final String DC_SERVER_NAME_SEPARATOR = "$";
private AtlasServerService atlasServerService;
private ExportImportAuditService auditService;
private ExportAudits auditForExport = new ExportAudits();
private ImportAudits auditForImport = new ImportAudits();
@Inject
public AuditsWriter(AtlasServerService atlasServerService, ExportImportAuditService auditService) {
this.atlasServerService = atlasServerService;
this.auditService = auditService;
}
public void write(String userName, AtlasExportResult result,
long startTime, long endTime,
List<String> entityCreationOrder) throws AtlasBaseException {
auditForExport.add(userName, result, startTime, endTime, entityCreationOrder);
}
public void write(String userName, AtlasImportResult result,
long startTime, long endTime,
List<String> entityCreationOrder) throws AtlasBaseException {
auditForImport.add(userName, result, startTime, endTime, entityCreationOrder);
}
private boolean isReplicationOptionSet(Map<String, ? extends Object> options, String replicatedKey) {
return options.containsKey(replicatedKey);
}
private void updateReplicationAttribute(boolean isReplicationSet,
String serverName, String serverFullName,
List<String> exportedGuids,
String attrNameReplicated,
long lastModifiedTimestamp) throws AtlasBaseException {
if (!isReplicationSet || CollectionUtils.isEmpty(exportedGuids)) {
return;
}
AtlasServer server = saveServer(serverName, serverFullName, exportedGuids.get(0), lastModifiedTimestamp);
atlasServerService.updateEntitiesWithServer(server, exportedGuids, attrNameReplicated);
}
private String getClusterNameFromOptions(Map options, String key) {
return options.containsKey(key)
? (String) options.get(key)
: StringUtils.EMPTY;
}
private AtlasServer saveServer(String clusterName, String serverFullName,
String entityGuid,
long lastModifiedTimestamp) throws AtlasBaseException {
AtlasServer server = atlasServerService.getCreateAtlasServer(clusterName, serverFullName);
server.setAdditionalInfoRepl(entityGuid, lastModifiedTimestamp);
if (LOG.isDebugEnabled()) {
LOG.debug("saveServer: {}", server);
}
return atlasServerService.save(server);
}
public static String getCurrentClusterName() {
try {
return ApplicationProperties.get().getString(AtlasConstants.CLUSTER_NAME_KEY, CLUSTER_NAME_DEFAULT);
} catch (AtlasException e) {
LOG.error("getCurrentClusterName", e);
}
return StringUtils.EMPTY;
}
static String getServerNameFromFullName(String fullName) {
if (StringUtils.isEmpty(fullName) || !fullName.contains(DC_SERVER_NAME_SEPARATOR)) {
return fullName;
}
String[] splits = StringUtils.split(fullName, DC_SERVER_NAME_SEPARATOR);
if (splits == null || splits.length < 1) {
return "";
} else if (splits.length >= 2) {
return splits[1];
} else {
return splits[0];
}
}
private void saveCurrentServer() throws AtlasBaseException {
atlasServerService.getCreateAtlasServer(getCurrentClusterName(), getCurrentClusterName());
}
private class ExportAudits {
private AtlasExportRequest request;
private String targetServerName;
private String optionKeyReplicatedTo;
private boolean replicationOptionState;
private String targetServerFullName;
public void add(String userName, AtlasExportResult result,
long startTime, long endTime,
List<String> entityGuids) throws AtlasBaseException {
optionKeyReplicatedTo = AtlasExportRequest.OPTION_KEY_REPLICATED_TO;
request = result.getRequest();
replicationOptionState = isReplicationOptionSet(request.getOptions(), optionKeyReplicatedTo);
saveCurrentServer();
targetServerFullName = getClusterNameFromOptions(request.getOptions(), optionKeyReplicatedTo);
targetServerName = getServerNameFromFullName(targetServerFullName);
auditService.add(userName, getCurrentClusterName(), targetServerName,
ExportImportAuditEntry.OPERATION_EXPORT,
AtlasType.toJson(result), startTime, endTime, !entityGuids.isEmpty());
if (result.getOperationStatus() == AtlasExportResult.OperationStatus.FAIL) {
return;
}
updateReplicationAttribute(replicationOptionState, targetServerName, targetServerFullName,
entityGuids, Constants.ATTR_NAME_REPLICATED_TO, result.getChangeMarker());
}
}
private class ImportAudits {
private AtlasImportRequest request;
private boolean replicationOptionState;
private String sourceServerName;
private String optionKeyReplicatedFrom;
private String sourceServerFullName;
public void add(String userName, AtlasImportResult result,
long startTime, long endTime,
List<String> entityGuids) throws AtlasBaseException {
optionKeyReplicatedFrom = AtlasImportRequest.OPTION_KEY_REPLICATED_FROM;
request = result.getRequest();
replicationOptionState = isReplicationOptionSet(request.getOptions(), optionKeyReplicatedFrom);
saveCurrentServer();
sourceServerFullName = getClusterNameFromOptions(request.getOptions(), optionKeyReplicatedFrom);
sourceServerName = getServerNameFromFullName(sourceServerFullName);
auditService.add(userName,
sourceServerName, getCurrentClusterName(),
ExportImportAuditEntry.OPERATION_IMPORT,
AtlasType.toJson(result), startTime, endTime, !entityGuids.isEmpty());
if(result.getOperationStatus() == AtlasImportResult.OperationStatus.FAIL) {
return;
}
updateReplicationAttribute(replicationOptionState, sourceServerName, sourceServerFullName, entityGuids,
Constants.ATTR_NAME_REPLICATED_FROM, result.getExportResult().getChangeMarker());
}
}
}
|
<filename>components/connectivity/TencentCloud_SDK/source/src/coap/qcloud_coap_common.c<gh_stars>1-10
/*
* Tencent is pleased to support the open source community by making IoT Hub available.
* Copyright (C) 2016 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under the License is
* distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifdef __cplusplus
extern "C" {
#endif
#include "qcloud.h"
/**
* @brief Free an option structure that was allocated by coap_msg_op_new
*
* @param[in,out] op Pointer to the option structure
*/
__QCLOUD_STATIC__ void coap_message_option_destroy(coap_msg_option_t *option)
{
QCLOUD_FUNC_ENTRY
if (option->val) {
osal_free(option->val);
}
osal_free(option);
QCLOUD_FUNC_EXIT
}
/**
* @brief Deinitialise an option linked-list structure
*
* @param[in,out] list Pointer to an option linked-list structure
*/
__QCLOUD_STATIC__ void coap_message_option_list_destroy(coap_message_t *message)
{
QCLOUD_FUNC_ENTRY
QCLOUD_POINTER_SANITY_CHECK_RTN(message);
qcloud_list_t *curr, *next;
coap_msg_option_t *option;
if (qcloud_list_empty(&message->option_list)) {
QCLOUD_FUNC_EXIT;
}
QCLOUD_LIST_FOR_EACH_SAFE(curr, next, &message->option_list) {
option = QCLOUD_LIST_ENTRY(curr, coap_msg_option_t, list);
coap_message_option_destroy(option);
}
QCLOUD_FUNC_EXIT;
}
__QCLOUD_INTERNAL__ void coap_message_init(coap_message_t *message)
{
message->version = COAP_VERSION;
qcloud_list_init(&message->option_list);
}
__QCLOUD_INTERNAL__ int coap_message_token_get(qcloud_coap_client_t *client, char *buf)
{
uint32_t token;
token = client->message_token;
buf[0] = ((token & 0x00FF) >> 0);
buf[1] = ((token & 0xFF00) >> 8);
buf[2] = ((token & 0xFF0000) >> 16);
buf[3] = ((token & 0xFF000000) >> 24);
++client->message_token;
return sizeof(uint32_t);
}
__QCLOUD_INTERNAL__ qcloud_err_t coap_message_type_set(coap_message_t *message, uint8_t type)
{
QCLOUD_FUNC_ENTRY
message->type = type;
QCLOUD_FUNC_EXIT_RC(QCLOUD_ERR_SUCCESS)
}
__QCLOUD_INTERNAL__ qcloud_err_t coap_message_code_set(coap_message_t *message, uint32_t code_class, uint32_t code_detail)
{
QCLOUD_FUNC_ENTRY
if (code_class > COAP_MSG_CODE_CLASS_MAX) {
QCLOUD_FUNC_EXIT_RC(QCLOUD_ERR_INVAL)
}
if (code_detail > COAP_MSG_CODE_DETAIL_MAX) {
QCLOUD_FUNC_EXIT_RC(QCLOUD_ERR_INVAL)
}
message->code_class = code_class;
message->code_detail = code_detail;
QCLOUD_FUNC_EXIT_RC(QCLOUD_ERR_SUCCESS)
}
__QCLOUD_INTERNAL__ qcloud_err_t coap_message_id_set(coap_message_t *message, uint16_t id)
{
QCLOUD_FUNC_ENTRY
if (id > COAP_MSG_ID_MAX) {
QCLOUD_FUNC_EXIT_RC(QCLOUD_ERR_INVAL)
}
message->id = id;
QCLOUD_FUNC_EXIT_RC(QCLOUD_ERR_SUCCESS)
}
__QCLOUD_INTERNAL__ qcloud_err_t coap_message_token_set(coap_message_t *message, char *buf, uint8_t len)
{
QCLOUD_FUNC_ENTRY
if (len > COAP_MSG_TOKEN_MAX) {
QCLOUD_FUNC_EXIT_RC(QCLOUD_ERR_INVAL)
}
memcpy(message->token, buf, len);
message->token_len = len;
QCLOUD_FUNC_EXIT_RC(QCLOUD_ERR_SUCCESS)
}
__QCLOUD_INTERNAL__ qcloud_err_t coap_message_payload_set(coap_message_t *message, char *buf, size_t len)
{
QCLOUD_FUNC_ENTRY
if (len > 0 && !message->payload) {
QCLOUD_FUNC_EXIT_RC(QCLOUD_ERR_FAILURE)
}
message->payload_len = 0;
if (len > 0) {
memcpy(message->payload, buf, len);
message->payload_len = len;
}
QCLOUD_FUNC_EXIT_RC(QCLOUD_ERR_SUCCESS)
}
__QCLOUD_INTERNAL__ coap_msg_option_t *coap_message_option_construct(uint16_t option_code, uint32_t len, const char *val)
{
QCLOUD_FUNC_ENTRY
char *this_val = NULL;
coap_msg_option_t *option = NULL;
option = (coap_msg_option_t *)osal_malloc(sizeof(coap_msg_option_t));
if (!option) {
QCLOUD_LOG_E("memory alloc failed");
return NULL;
}
this_val = (char *)osal_malloc(len);
if (!this_val) {
osal_free(option);
QCLOUD_LOG_E("memory alloc failed");
return NULL;
}
option->option_code = option_code;
option->val_len = len;
option->val = this_val;
memcpy(option->val, val, len);
qcloud_list_init(&option->list);
return option;
}
__QCLOUD_STATIC__ void coap_message_option_do_add(coap_message_t *message, coap_msg_option_t *option)
{
coap_msg_option_t *iter;
qcloud_list_t *curr, *option_list;
option_list = &message->option_list;
/* keep option_code in ascending order */
QCLOUD_LIST_FOR_EACH(curr, option_list) {
iter = QCLOUD_LIST_ENTRY(curr, coap_msg_option_t, list);
if (option->option_code <= iter->option_code) {
break;
}
}
qcloud_list_add_tail(&option->list, curr);
}
__QCLOUD_INTERNAL__ qcloud_err_t coap_message_option_add(coap_message_t *message, coap_msg_opt_code_t option_code, uint32_t len, const char *val)
{
QCLOUD_FUNC_ENTRY
QCLOUD_POINTER_SANITY_CHECK(message, QCLOUD_ERR_INVAL);
coap_msg_option_t *option = NULL;
option = coap_message_option_construct(option_code, len, val);
if (!option) {
QCLOUD_LOG_E("option alloc failed.");
QCLOUD_FUNC_EXIT_RC(QCLOUD_ERR_FAILURE)
}
coap_message_option_do_add(message, option);
QCLOUD_FUNC_EXIT_RC(QCLOUD_ERR_SUCCESS)
}
__QCLOUD_INTERNAL__ qcloud_err_t coap_message_callback_set(coap_message_t *message, coap_resp_callback_t callback)
{
QCLOUD_FUNC_ENTRY
message->resp_cb = callback;
QCLOUD_FUNC_EXIT_RC(QCLOUD_ERR_SUCCESS)
}
__QCLOUD_INTERNAL__ qcloud_err_t coap_message_context_set(coap_message_t *message, void *context)
{
QCLOUD_FUNC_ENTRY
message->context = context;
QCLOUD_FUNC_EXIT_RC(QCLOUD_ERR_SUCCESS)
}
__QCLOUD_INTERNAL__ void coap_message_destroy(coap_message_t *message)
{
QCLOUD_FUNC_ENTRY
coap_message_option_list_destroy(message);
if (message->payload) {
osal_free(message->payload);
}
memset(message, 0, sizeof(coap_message_t));
QCLOUD_FUNC_EXIT
}
__QCLOUD_INTERNAL__ void coap_message_dump(coap_message_t* message)
{
QCLOUD_LOG_I("version = %u", message->version);
QCLOUD_LOG_I("type = %d", message->type);
QCLOUD_LOG_I("code_class = %u", message->code_class);
QCLOUD_LOG_I("code_detail = %u", message->code_detail);
QCLOUD_LOG_I("id = %d", message->id);
QCLOUD_LOG_I("payload_len = %d", message->payload_len);
QCLOUD_LOG_I("payload: %s", message->payload);
QCLOUD_LOG_I("token_len = %u", message->token_len);
QCLOUD_LOG_I("token: %s", message->token);
}
#ifdef __cplusplus
}
#endif
|
#!/bin/bash
# process_dems.sh
# Given a high-resolution DEM, derive lower res terrain rasters
# author: perrygeo@gmail.com
# Configuration:
FULLDEM="/g/Basedata/PNW/terrain/dem_prjr6/hdr.adf"
OUTDIR="resamp27"
# x3
OUTRES="27.2480309603"
# x4 36.330707947
# x5 45.4133849338
# OUTRES="45.4133849338"
#---------------------------------------------------------------#
rm -rf $OUTDIR
mkdir $OUTDIR
NEWDEM="$OUTDIR/dem.tif"
NEWSLOPE="$OUTDIR/slope.tif"
NEWASPECT="$OUTDIR/aspect.tif"
NEWCOS="$OUTDIR/cos_aspect.tif"
NEWSIN="$OUTDIR/sin_aspect.tif"
# Resample DEM to 45 meters with integer type, tiled
gdalwarp -of GTiff -r cubic -ot Int16 \
-co "TILED=YES" -co "BLOCKYSIZE=512" -co "BLOCKXSIZE=512" \
-tr $OUTRES $OUTRES $FULLDEM $NEWDEM
# Create derived terrain rasters
gdaldem slope -p $NEWDEM $NEWSLOPE.float.tif -co "TILED=YES" -co "BLOCKYSIZE=512" -co "BLOCKXSIZE=512"
gdaldem aspect $NEWDEM $NEWASPECT.float.tif -co "TILED=YES" -co "BLOCKYSIZE=512" -co "BLOCKXSIZE=512"
gdal_calc.py -A $NEWASPECT.float.tif --calc "cos(radians(A))" --format "GTiff" --outfile $NEWCOS.striped.tif
gdal_calc.py -A $NEWASPECT.float.tif --calc "sin(radians(A))" --format "GTiff" --outfile $NEWSIN.striped.tif
# Convert slope and aspect to Int16
for rast in $OUTDIR/*.float.tif; do
BASE=`echo "$rast" | cut -d'.' -f1`
gdal_translate -ot Int16 $rast $BASE.tif -co "TILED=YES" -co "BLOCKYSIZE=512" -co "BLOCKXSIZE=512"
rm $rast
done
# Convert cos and sin to tiled block GTiffs
for rast in $OUTDIR/*.striped.tif; do
BASE=`echo "$rast" | cut -d'.' -f1`
gdal_translate $rast $BASE.tif -co "TILED=YES" -co "BLOCKYSIZE=512" -co "BLOCKXSIZE=512"
rm $rast
done
ls -alth $OUTDIR
|
<reponame>Boatdude55/staging-website<filename>closure-compiler/src/com/google/javascript/jscomp/js/es6/array/flatmap.js<gh_stars>1-10
/*
* Copyright 2018 The Closure Compiler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @fileoverview @suppress {uselessCode} */
'require util/polyfill';
$jscomp.polyfill('Array.prototype.flatMap', function(orig) {
if (orig) return orig;
/**
* Polyfills Array.prototype.flatMap.
*
* @see https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/flatMap
*
* @param {function(this: THIS, T, number, !IArrayLike<T>): S|!Array<S>} callback
* @param {THIS=} thisArg
* @return {!Array<S>}
* @this {!IArrayLike<T>}
* @template T, THIS, S
* @suppress {reportUnknownTypes}
*/
var flatMap = function(callback, thisArg) {
var mapped = [];
for (var i = 0; i < this.length; i++) {
var result = callback.call(thisArg, this[i], i, this);
if (Array.isArray(result)) {
mapped.push.apply(mapped, result);
} else {
mapped.push(result);
}
}
return mapped;
};
return flatMap;
}, 'es9', 'es5');
|
"use strict";
/**
* Since only a single constructor is being exported as module.exports this comment isn't documented.
* The class and module are the same thing, the contructor comment takes precedence.
* @module SVGDrawnItemFactory
*/
var paper = require('paper/dist/paper-core.js');
/**
* Factory which delegates to the paper.js RegularPoloygon constructor
* @constructor
* @param {external:cartesian-hexagonal} hexDefinition - The DTO defining the hex <--> cartesian relation
* @see {@link http://paperjs.org/reference/path/#path-regularpolygon-object | RegularPolygon }
*/
module.exports = function SVGDrawnItemFactory(hexDefinition) {
this.hexDefinition = hexDefinition;
};
/**
* Return an arrow path item for the given object
* @override
* @param {Object} item - The DTO to produce a paper.js drawn item for
* @param {string} item.svg - The svg string to import into the project
* @param {integer} item.scale - Scale the svg to scale*hexDefinition.hexagon_edge_to_edge_width
* @param {integer} item.rotation - The angle in degrees to rotate, 0 degrees points ???
* @param {onClick=} item.onClick - The callback to use when this item is clicked
* @returns {external:Item} The paper.js Item for the given parameters
* @implements {DrawnItemFactory#getDrawnItem}
* @todo consider using symbols for performance
*/
module.exports.prototype.getDrawnItem = function(item) {
if (!paper.Item.prototype.setRampPoint) {
paper.Item.prototype.setRampPoint = function () {};
}
var drawnItem = paper.project.importSVG(item.svg);
drawnItem.position = new paper.Point(drawnItem.bounds.width/2, drawnItem.bounds.height/2);
// color manipulation are delegated to the svg string production
//TODO cache Symbols for imported SVG
drawnItem.rotate(item.rotation);
drawnItem.scale(item.scale*this.hexDefinition.hexagon_edge_to_edge_width/drawnItem.bounds.width,
item.scale*this.hexDefinition.hexagon_edge_to_edge_width/drawnItem.bounds.height );
drawnItem.scale(1, this.hexDefinition.vScale);
drawnItem.data.item = item;
// Add shadow
drawnItem.shadowColor = new paper.Color(0, 0, 0);
// Set the shadow blur radius to 12:
drawnItem.shadowBlur = 10;
// Offset the shadow by { x: 5, y: 5 }
drawnItem.shadowOffset = new paper.Point(0, 5);
return drawnItem;
};
|
#!/bin/bash -f
#*********************************************************************************************************
# Vivado (TM) v2017.4 (64-bit)
#
# Filename : Memory_in.sh
# Simulator : Synopsys Verilog Compiler Simulator
# Description : Simulation script for compiling, elaborating and verifying the project source files.
# The script will automatically create the design libraries sub-directories in the run
# directory, add the library logical mappings in the simulator setup file, create default
# 'do/prj' file, execute compilation, elaboration and simulation steps.
#
# Generated by Vivado on Sun Oct 31 10:48:37 +0100 2021
# SW Build 2086221 on Fri Dec 15 20:55:39 MST 2017
#
# Copyright 1986-2017 Xilinx, Inc. All Rights Reserved.
#
# usage: Memory_in.sh [-help]
# usage: Memory_in.sh [-lib_map_path]
# usage: Memory_in.sh [-noclean_files]
# usage: Memory_in.sh [-reset_run]
#
# Prerequisite:- To compile and run simulation, you must compile the Xilinx simulation libraries using the
# 'compile_simlib' TCL command. For more information about this command, run 'compile_simlib -help' in the
# Vivado Tcl Shell. Once the libraries have been compiled successfully, specify the -lib_map_path switch
# that points to these libraries and rerun export_simulation. For more information about this switch please
# type 'export_simulation -help' in the Tcl shell.
#
# You can also point to the simulation libraries by either replacing the <SPECIFY_COMPILED_LIB_PATH> in this
# script with the compiled library directory path or specify this path with the '-lib_map_path' switch when
# executing this script. Please type 'Memory_in.sh -help' for more information.
#
# Additional references - 'Xilinx Vivado Design Suite User Guide:Logic simulation (UG900)'
#
#*********************************************************************************************************
# Directory path for design sources and include directories (if any) wrt this path
ref_dir="."
# Override directory with 'export_sim_ref_dir' env path value if set in the shell
if [[ (! -z "$export_sim_ref_dir") && ($export_sim_ref_dir != "") ]]; then
ref_dir="$export_sim_ref_dir"
fi
# Command line options
vlogan_opts="-full64"
vhdlan_opts="-full64"
vcs_elab_opts="-full64 -debug_pp -t ps -licqueue -l elaborate.log"
vcs_sim_opts="-ucli -licqueue -l simulate.log"
# Design libraries
design_libs=(xil_defaultlib xpm)
# Simulation root library directory
sim_lib_dir="vcs_lib"
# Script info
echo -e "Memory_in.sh - Script generated by export_simulation (Vivado v2017.4 (64-bit)-id)\n"
# Main steps
run()
{
check_args $# $1
setup $1 $2
compile
elaborate
simulate
}
# RUN_STEP: <compile>
compile()
{
# Compile design files
vlogan -work xil_defaultlib $vlogan_opts -sverilog \
"C:/Xilinx/Vivado/2017.4/data/ip/xpm/xpm_memory/hdl/xpm_memory.sv" \
2>&1 | tee -a vlogan.log
vhdlan -work xpm $vhdlan_opts \
"C:/Xilinx/Vivado/2017.4/data/ip/xpm/xpm_VCOMP.vhd" \
2>&1 | tee -a vhdlan.log
vhdlan -work xil_defaultlib $vhdlan_opts \
"$ref_dir/../../../../ConvSimd.srcs/sources_1/ip/Memory_in/Memory_in_sim_netlist.vhdl" \
2>&1 | tee -a vhdlan.log
vlogan -work xil_defaultlib $vlogan_opts +v2k \
glbl.v \
2>&1 | tee -a vlogan.log
}
# RUN_STEP: <elaborate>
elaborate()
{
vcs $vcs_elab_opts xil_defaultlib.Memory_in xil_defaultlib.glbl -o Memory_in_simv
}
# RUN_STEP: <simulate>
simulate()
{
./Memory_in_simv $vcs_sim_opts -do simulate.do
}
# STEP: setup
setup()
{
case $1 in
"-lib_map_path" )
if [[ ($2 == "") ]]; then
echo -e "ERROR: Simulation library directory path not specified (type \"./Memory_in.sh -help\" for more information)\n"
exit 1
fi
create_lib_mappings $2
;;
"-reset_run" )
reset_run
echo -e "INFO: Simulation run files deleted.\n"
exit 0
;;
"-noclean_files" )
# do not remove previous data
;;
* )
create_lib_mappings $2
esac
create_lib_dir
# Add any setup/initialization commands here:-
# <user specific commands>
}
# Define design library mappings
create_lib_mappings()
{
file="synopsys_sim.setup"
if [[ -e $file ]]; then
if [[ ($1 == "") ]]; then
return
else
rm -rf $file
fi
fi
touch $file
lib_map_path=""
if [[ ($1 != "") ]]; then
lib_map_path="$1"
fi
for (( i=0; i<${#design_libs[*]}; i++ )); do
lib="${design_libs[i]}"
mapping="$lib:$sim_lib_dir/$lib"
echo $mapping >> $file
done
if [[ ($lib_map_path != "") ]]; then
incl_ref="OTHERS=$lib_map_path/synopsys_sim.setup"
echo $incl_ref >> $file
fi
}
# Create design library directory paths
create_lib_dir()
{
if [[ -e $sim_lib_dir ]]; then
rm -rf $sim_lib_dir
fi
for (( i=0; i<${#design_libs[*]}; i++ )); do
lib="${design_libs[i]}"
lib_dir="$sim_lib_dir/$lib"
if [[ ! -e $lib_dir ]]; then
mkdir -p $lib_dir
fi
done
}
# Delete generated data from the previous run
reset_run()
{
files_to_remove=(ucli.key Memory_in_simv vlogan.log vhdlan.log compile.log elaborate.log simulate.log .vlogansetup.env .vlogansetup.args .vcs_lib_lock scirocco_command.log 64 AN.DB csrc Memory_in_simv.daidir)
for (( i=0; i<${#files_to_remove[*]}; i++ )); do
file="${files_to_remove[i]}"
if [[ -e $file ]]; then
rm -rf $file
fi
done
create_lib_dir
}
# Check command line arguments
check_args()
{
if [[ ($1 == 1 ) && ($2 != "-lib_map_path" && $2 != "-noclean_files" && $2 != "-reset_run" && $2 != "-help" && $2 != "-h") ]]; then
echo -e "ERROR: Unknown option specified '$2' (type \"./Memory_in.sh -help\" for more information)\n"
exit 1
fi
if [[ ($2 == "-help" || $2 == "-h") ]]; then
usage
fi
}
# Script usage
usage()
{
msg="Usage: Memory_in.sh [-help]\n\
Usage: Memory_in.sh [-lib_map_path]\n\
Usage: Memory_in.sh [-reset_run]\n\
Usage: Memory_in.sh [-noclean_files]\n\n\
[-help] -- Print help information for this script\n\n\
[-lib_map_path <path>] -- Compiled simulation library directory path. The simulation library is compiled\n\
using the compile_simlib tcl command. Please see 'compile_simlib -help' for more information.\n\n\
[-reset_run] -- Recreate simulator setup files and library mappings for a clean run. The generated files\n\
from the previous run will be removed. If you don't want to remove the simulator generated files, use the\n\
-noclean_files switch.\n\n\
[-noclean_files] -- Reset previous run, but do not remove simulator generated files from the previous run.\n\n"
echo -e $msg
exit 1
}
# Launch script
run $1 $2
|
<reponame>chlds/util
/* **** Notes
Sleep for Linux.
*/
# define CAR
# include <stdio.h>
# include <errno.h>
# include <time.h>
# include "./../../../incl/config.h"
// # include "./incl/car.h"
signed(__cdecl msleep(signed(arg))) {
/*
auto struct timespec t;
auto signed long criterion = (1000*(1000));
auto signed long m = (1000);
if(arg<(0x00)) arg = (0x01+(~arg));
t.tv_nsec = (0x00);
t.tv_sec = (0x00);
if(arg<(m)) t.tv_nsec = (criterion*(arg));
else {
t.tv_sec = (arg/(m));
t.tv_nsec = (criterion*(arg%(m)));
}
if(DBG) {
printf("[t.tv_sec : %ld] \n",t.tv_sec);
printf("[t.tv_nsec: %ld] \n",t.tv_nsec);
}
return(nanosleep(&t,0x00));
//*/
return(0x01);
}
|
<reponame>doctorpangloss/gogradle<filename>src/main/java/com/github/blindpirate/gogradle/core/dependency/produce/external/trash/VendorDotConfYamlModel.java
/*
* Copyright 2016-2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.github.blindpirate.gogradle.core.dependency.produce.external.trash;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.github.blindpirate.gogradle.util.Assert;
import com.github.blindpirate.gogradle.util.MapUtils;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
@JsonIgnoreProperties(ignoreUnknown = true)
public class VendorDotConfYamlModel {
@JsonProperty("import")
private List<ImportBean> importBeans;
public List<Map<String, Object>> toBuildNotations() {
return importBeans.stream().map(ImportBean::toNotation).collect(Collectors.toList());
}
@JsonIgnoreProperties(ignoreUnknown = true)
public static class ImportBean {
@JsonProperty("package")
private String packageX;
@JsonProperty("version")
private String version;
@JsonProperty("repo")
private String repo;
public Map<String, Object> toNotation() {
Assert.isNotBlank(packageX);
Map<String, Object> ret = MapUtils.asMapWithoutNull(
"name", packageX,
"url", repo,
"transitive", false);
SimpleConfFileHelper.determineVersionAndPutIntoMap(ret, version);
return ret;
}
}
}
|
<filename>Visual Studio 2010/Projects/bjarneStroustrupC++PartIV/bjarneStroustrupC++PartIV/Chapter24Exercise6.cpp
/*
TITLE dot_product() and scale_and_add() Chapter24Exercise6.cpp
COMMENT
Objective: In the Gaussian elimination example, replace the vector operations
dot_product() and scale_and_add() with loops. Test, and comment on
the clarity of the code.
Input: -
Output: -
Author: <NAME>
Date: 06.05.2017
*/
#include <iostream>
#include <sstream>
#include "Matrix.h"
#include "MatrixIO.h"
#include "Chapter24Exercise6.h"
int main()
{
double arr_A[2][2] = { { 3, 2 }, { 2, 3} };
double arr_b[2] = { 3, 15 };
Matrix A(2,2);
Vector b(2);
Vector x(2);
A[0] = arr_A[0];
A[1] = arr_A[1];
b = arr_b;
std::cout <<"A = "<< A <<'\n';
std::cout <<"b = "<< b <<'\n';
try
{
x = classical_gaussian_elimination(A, b);
}
catch(std::runtime_error& r)
{
std::cerr <<"Runtime error::"<< r.what();
getchar();
exit(1);
}
std::cout <<"x = "<< x <<'\n';
getchar();
} |
<filename>js/arsenal-history.js
// set dimensions
var margin = {top: 20, right: 50, bottom: 20, left: 20},
width = 500 - margin.left - margin.right,
height = 300 - margin.top - margin.bottom;
// set ranges
var x = d3.scale.linear().range([0, width - margin.right]);
var y = d3.scale.linear().range([0, height]);
// set color scale
var color = d3.scale.category10();
// define axes
var x_rounds = d3.svg.axis()
.scale(x)
.orient('bottom');
var y_position = d3.svg.axis()
.scale(y)
.ticks(22)
.orient('left');
// define line
var line = d3.svg.line()
.defined(function(d) { return d.position !== 0; })
.x(function(d) { return x(d.round); })
.y(function(d) { return y(d.position); })
.interpolate('linear');
// add svg element
var arsenal = d3.select("div#arsenal-chart")
.append('svg')
.attr("width", width + margin.left + margin.right)
.attr("height", height + margin.top + margin.bottom)
.append('g')
.attr("transform", "translate(" + margin.left + "," + margin.top + ")");
// read data
d3.csv('../../../../data/arsenal-history-2.csv', function(error, data) {
vars = d3.keys(data[0]).filter(function(key) { return key !== 'round'; });
color.domain(vars);
var seasons = vars.map(function(name) {
return {
name: name,
values: data.map(function(d) {
return { round: +d.round, position: +d[name] };
})
};
});
// determine ranges of input data (N.B. x hard-coded)
x.domain([1, 42]);
y.domain([1,
d3.max(seasons, function(c) { return d3.max(c.values, function(v) {
return v.position; }); })
]);
arsenal.append('g')
.attr('class', 'x axis')
.attr('transform', 'translate(0,' + height + ')')
.call(x_rounds);
arsenal.append('g')
.attr('class', 'y axis')
.call(y_position);
// label for x-axis
arsenal.append('text')
.attr('text-anchor', 'middle')
.attr('x', margin.right)
.attr('y', height)
.attr('dy', '.9em')
.attr('dx', '-1.75em')
.text('Round:');
// add dashed lines
arsenal.append('line')
.attr('x1', 0)
.attr('y1', y(20))
.attr('x2', width - margin.right)
.attr('y2', y(20))
.attr('stroke-weight', '2px')
.style("stroke-dasharray", ("1, 3"))
.style('stroke', '#000');
arsenal.append('line')
.attr('x1', x(38))
.attr('y1', 0)
.attr('x2', x(38))
.attr('y2', height)
.attr('stroke-weight', '2px')
.style("stroke-dasharray", ("1, 3"))
.style('stroke', '#000');
var season = arsenal.selectAll('.season')
.data(seasons)
.enter().append('g')
.attr('class', 'seasons');
function highlight(d, index, object) {
if (object.classList.contains('active')) {
d3.selectAll('.season' + index).classed('active', false);
} else {
d3.selectAll('.season' + index).classed('active', true);
d3.selectAll('.season' + index + '.line')
.append('circle')
.style('fill', 'black')
.attr('x', 100)
.attr('y', 100)
.attr('r', 30);
}
};
var lines = season.append('path')
.attr("class", function(d, i) { return "season" + i + " line"; })
.style('stroke', function(d) { return color(d.name); })
.on('click', function(d,i) { highlight(d, i, this); });
// animate drawing of lines
//var total_length = lines.node().getTotalLength();
// lines
// .attr('stroke-dasharray', total_length + ' ' + total_length)
// .attr('stroke-dashoffset', total_length)
// .transition()
// .duration(5000)
// .ease('linear')
// .attr('stroke-dashoffset', 0);
// add final position label
season
.append('text')
.datum(function(d) {
var j = d.values.length - 1;
while (d.values[j].position == 0 && j > 0) { j--; }
return { name: d.name, value: d.values[j] };
})
.attr("transform", function(d) { return "translate(" + x(d.value.round) + "," + y(d.value.position) + ")"; })
.attr('dy', '.30em')
.attr('dx', '.35em')
.attr('class', function(d, i) { return 'season' + i + ' label'; })
//.attr('opacity', 0)
.text(function(d) { return d.value.position; })
season.append('circle')
.datum(function(d) {
var j = d.values.length - 1;
while (d.values[j].position == 0 && j > 0) { j--; }
return { name: d.name, value: d.values[j] };
})
.attr('cx', function(d) { return x(d.value.round); })
.attr('cy', function(d) { return y(d.value.position); })
.style('fill', function(d, i) { return color(d.name) })
.attr('class', function(d, i) { return 'season' + i + ' point'; })
//.attr('opacity', 0)
.attr('r', 3);
// alternative for rolling back animation
// svg.on('click', function() {
// lines
// .transition()
// .duration(2000)
// .ease('linear')
// .attr('stroke-dashoffset', total_length);
// })
var box_dim = 5;
var legend = arsenal.selectAll('.legend')
.data(vars.slice())
.enter().append('g')
.attr("class", function(d, i) { return "season" + i + " legend"; })
.attr("transform", function (d, i) { return "translate(0," + i * (box_dim + 5) + ")"; })
.on('click', function(d,i) { highlight(d, i, this); });
legend.append('rect')
.attr('x', width)
.attr('y', function(d) { return height - (seasons.length * box_dim * 1.5); })
.attr('width', box_dim)
.attr('height', box_dim)
.style('fill', color);
legend.append('text')
.attr('x', width)
.attr('y', function(d) { return height - (seasons.length * box_dim * 1.5); })
.attr('dy', 8.5)
.attr('dx', -2)
.style('text-anchor', 'end')
.text(function(d) { return d; });
arsenal.append('text')
.attr('x', width)
.attr('y', 5)
.attr('dx', 10)
.attr('dy', 4)
.style('font-weight', 'bold')
.style('text-anchor', 'end')
.text('Season');
// reduce opacity of all lines but current season
arsenal.selectAll('.line:not(.season22)')
.attr('opacity', .10);
arsenal.selectAll('.point:not(.season22)')
.attr('opacity', 0);
arsenal.selectAll('.label:not(.season22)')
.attr('opacity', 0);
arsenal.selectAll('.legend:not(.season22)')
.attr('opacity', .1);
// add 'clear' button
arsenal.append('text')
.attr('x', x(38))
.attr('y', y(20))
.attr('dy', '-.2em')
.attr('dx', '-.2em')
.attr('font-size', '12px')
.attr('text-anchor', 'end')
.attr('class', 'button')
.text('Clear selection')
.on('click', function() {
for (var i = 0; i <= 23; i++) {
arsenal.selectAll('.season' + i).classed('active', false);
}
});
});
|
const path = require('path');
const webpack = require('webpack');
const pkg = require('./package.json');
const bannerPack = new webpack.BannerPlugin({
banner: [
`Quill Smart Break v${pkg.version}`,
'https://github.com/simialbi/quill-smart-break',
'Copyright (c) 2017, <NAME>',
'Copyright (c) 2017, <NAME>',
'Copyright (c) 2020, <NAME>'
].join('\n'),
entryOnly: true
});
const constantPack = new webpack.DefinePlugin({
QUILL_SMART_BREAK_VERSION: JSON.stringify(pkg.version)
});
const source = [
'smart-breaker.js',
'blots'
].map(file => {
return path.resolve(__dirname, 'src', file);
});
const jsRules = {
test: /\.js$/,
include: source,
use: [
{
loader: 'babel-loader',
options: {
presets: [
[
'@babel/env',
{
targets: {
browsers: [
'last 2 Chrome major versions',
'last 2 Firefox major versions',
'last 2 Safari major versions',
'last 2 Edge major versions',
'last 2 iOS major versions',
'last 2 ChromeAndroid major versions',
]
}
}
]
]
}
}
]
};
const baseConfig = {
mode: 'development',
context: path.resolve(__dirname, 'src'),
entry: {
'smart-breaker.js': './smart-breaker.js'
},
output: {
filename: '[name]',
path: path.resolve(__dirname, 'dist')
},
externals: {
quill: 'Quill'
},
module: {
rules: [jsRules],
// noParse: []
},
plugins: [
bannerPack,
constantPack
],
devServer: {
contentBase: path.resolve(__dirname, 'dist'),
hot: false,
port: process.env.npm_package_config_ports_webpack,
stats: 'minimal',
disableHostCheck: true
}
};
module.exports = env => {
if (env && env.minimize) {
const { devServer, ...prodConfig } = baseConfig;
return {
...prodConfig,
mode: 'production',
entry: { 'smart-breaker.min.js': './smart-breaker.js' },
devtool: 'source-map'
};
}
if (env && env.coverage) {
baseConfig.module.rules[0].use[0].options.plugins = ['istanbul'];
return baseConfig;
}
return baseConfig;
}; |
#include "pch.h"
#include "InventoryItem.h"
#include "GameObjects/equip_inventory_data.h"
#include "GameObjects/sprj_gaitem_Imp.h"
namespace hoodie_script
{
InventoryItem::InventoryItem(int32_t inventoryIndexArg, uint32_t uniqueidArg, int32_t itemIdArg, uint32_t quantityArg, uint32_t unknown1Arg, ItemParamIdPrefix itemTypeArg)
{
this->inventoryIndex = inventoryIndexArg;
this->uniqueId = uniqueidArg;
this->itemId = itemIdArg;
this->quantity = quantityArg;
this->unknown1 = unknown1Arg;
this->itemType = itemTypeArg;
}
InventoryItem::InventoryItem(InventoryItemInternal* itemStructPtr, size_t gameInventoryIndex)
{
int32_t giveId = itemStructPtr->giveId;
ItemParamIdPrefix varItemType = EquipInventoryData::getItemParamIdPrefixFromGiveId(giveId);
int32_t notFullyQualifiedItemId = giveId - (int32_t)varItemType;
this->inventoryIndex = gameInventoryIndex;
this->uniqueId = itemStructPtr->uniqueId;
this->itemId = notFullyQualifiedItemId;
this->quantity = itemStructPtr->quantity;
this->unknown1 = itemStructPtr->unknown1;
this->itemType = varItemType;
}
SprjGaitemIns InventoryItem::GetGaitemInstance()
{
//logging::write_line(std::format("HasGaitemImp = {0}, ItemType = {1}", SprjGaitemImp::hasInstance(), (int32_t)itemType));
if (SprjGaitemImp::hasInstance() && (itemType == ItemParamIdPrefix::Weapon || itemType == ItemParamIdPrefix::Protector))
{
auto a = SprjGaitemImp::getInstance().getItemByUniqueId(this->uniqueId);
if (a.has_value())
{
return SprjGaitemIns(*a);
}
}
return SprjGaitemIns(0);
}
} |
echo "stop master container..."
sudo docker stop master
echo "copy slave config to bp container..."
cp config.slave.ini ~/dockernode/bp/eosforce/config.ini
echo "start bp with slave config"
sudo docker start master
echo "change master to slave"
sudo docker rename master slave
|
#!/bin/bash
# script to prepare dependencies for AMETHST tools on a blank EC2 node
sudo apt-get update
sudo apt-get install -y git r-base-core python-matplotlib libstatistics-descriptive-perl python-numpy python-scipy
echo 'install.packages("matlab", repos="http://cran.case.edu/")' | sudo R --vanilla
echo 'install.packages("ecodist", repos="http://cran.case.edu/")' | sudo R --vanilla
git clone https://github.com/Droppenheimer/AMETHST
echo 'export PATH=$PATH:$HOME/AMETHST' >> ~/.bash_profile
export PATH=$PATH:$HOME/AMETHST
|
def transformInOrderToPreOrder(Tree):
if Tree is None:
return
else:
print(Tree.data)
transformInOrderToPreOrder(Tree.left)
transformInOrderToPreOrder(Tree.right) |
import template from './product-feed-detail.html.twig';
import './product-feed-detail.scss';
const { Criteria } = Shopware.Data;
const { Context, Mixin, Component } = Shopware;
const { mapPropertyErrors } = Component.getComponentHelper();
Shopware.Component.register('product-feed-detail', {
template,
inject: [
'repositoryFactory',
'feedConditionDataProviderService',
'productfeed',
'template'
],
mixins: [Mixin.getByName('notification')],
metaInfo() {
return {
title: this.$createTitle()
};
},
data() {
return {
repository: null,
fileRepository: null,
productfeedProductStreamRepository: null,
productStreamRepository: null,
isLoading: false,
feed: null,
interval: {},
email: {},
selectedMessageEvents: null,
conditions: null,
processSuccess: false,
rule: null,
deletedIds: [],
generateSuccess: false,
firstTry: true,
loaded: false,
salesChannel: null,
manufacturers: null,
showPopover: false,
showDiscardChangesModal: false,
tempTemplateData: null,
latestFile: null,
dynamicProductGroups: []
}
},
created: function () {
this.createdComponent();
},
computed: {
...mapPropertyErrors('feed', ['name', 'filename']),
templateError() {
if (this.checkErrorFunction(this.feed.template)) return null;
return {
detail: "product-feed.general.textRequired",
code: "fieldMustNotBeEmpty"
}
},
hostUrlError() {
if (this.feed.deliveryEnabled === false || this.checkErrorFunction(this.feed.hostUrl)) return null;
return {
detail: "product-feed.general.textRequired",
code: "fieldMustNotBeEmpty"
}
},
hostUsernameError() {
if (this.feed.deliveryEnabled === false || this.checkErrorFunction(this.feed.hostUsername)) return null;
return {
detail: "product-feed.general.textRequired",
code: "fieldMustNotBeEmpty"
}
},
hostPasswordError() {
if (this.feed.deliveryEnabled === false || this.checkErrorFunction(this.feed.hostPassword)) return null;
return {
detail: "product-feed.general.textRequired",
code: "fieldMustNotBeEmpty"
}
},
hostDirectoryError() {
if (this.feed.deliveryEnabled === false || this.checkErrorFunction(this.feed.hostDirectoryPath)) return null;
return {
detail: "product-feed.general.textRequired",
code: "fieldMustNotBeEmpty"
}
},
emailAddressesError() {
if (!this.feed.notificationsEnabled || this.checkErrorFunction(this.feed.notificationAddresses)) return null;
return {
detail: "product-feed.general.textRequired",
code: "fieldMustNotBeEmpty"
}
},
emailEventsError() {
if (!this.feed.notificationsEnabled || this.checkErrorFunction(this.feed.notificationEvents))
return null;
return {
detail: "product-feed.general.textRequired",
code: "fieldMustNotBeEmpty"
}
},
executionTimeError() {
if (!this.feed.executionMode.includes('cron') || this.checkErrorFunction(this.interval.time)) return null;
return {
detail: "product-feed.general.textRequired",
code: "fieldMustNotBeEmpty"
}
},
manualExecutionMode() {
return this.productfeed.getExecutionModes().find(e => e.includes('manual'));
},
ruleRepository() {
return this.repositoryFactory.create('rule');
},
availableModuleTypes() {
return this.ruleConditionDataProviderService.getModuleTypes(moduleType => moduleType);
},
moduleTypes: {
get() {
if (!this.rule || !this.rule.moduleTypes) {
return [];
}
return this.rule.moduleTypes.types;
},
set(value) {
if (value === null || value.length === 0) {
this.rule.moduleTypes = null;
return;
}
this.rule.moduleTypes = { types: value };
}
},
timeIntervals() {
return [
{
label: this.$tc("product-feed.general.week_key"),
value: 1000 * 60 * 60 * 24 * 7
},
{
label: this.$tc("product-feed.general.day_key"),
value: 1000 * 60 * 60 * 24
},
{
label: this.$tc("product-feed.general.hour_key"),
value: 1000 * 60 * 60
},
{
label: this.$tc("product-feed.general.minute_key"),
value: 1000 * 60
},
];
},
detailMode: function () {
return Boolean(this.$route.params.id);
},
formattedNotificationEvents() {
return this.productfeed.getNotificationEvents().map(event => {
let obj = {};
obj.label = this.$tc(this.productfeed.formatNotificationEvents(event));
obj.value = event;
return obj;
});
},
formattedExecutionModes() {
return this.productfeed.getExecutionModes().map(mode => {
let obj = {};
obj.label = this.$tc(this.productfeed.formatExecutionMode(mode));
obj.value = mode;
return obj;
});
},
formattedFileTypes() {
return this.productfeed.getFileTypes().map(type => {
let obj = {};
obj.label = this.$tc(this.productfeed.formatFileType(type));
obj.value = type;
return obj;
});
},
formattedFileTransferProtocols() {
return this.productfeed.getFileTransferProtocols().map(protocol => {
let obj = {};
obj.label = this.$tc(this.productfeed.formatFileTransferProtocol(protocol));
obj.value = protocol;
return obj;
});
},
conditionRepository() {
if (!this.rule) {
return null;
}
return this.repositoryFactory.create(
this.rule.conditions.entity,
this.rule.conditions.source
);
},
nameCriteria() {
let criteria = new Criteria();
criteria.addSorting(Criteria.sort('name', 'ASC'));
return criteria;
}
},
methods: {
async createdComponent() {
this.repository = this.repositoryFactory.create('productfeed_productfeed');
this.notificationRepository = this.repositoryFactory.create('productfeed_notification');
this.fileRepository = this.repositoryFactory.create('productfeed_file');
let manufacturerRepository = this.repositoryFactory.create('product_manufacturer');
this.productfeedProductStreamRepository = this.repositoryFactory.create('productfeed_product_stream');
this.productStreamRepository = this.repositoryFactory.create('product_stream');
// get sales channels (only promise)
let salesChannelRepository = this.repositoryFactory.create('sales_channel');
this.salesChannel = salesChannelRepository.search(new Criteria(), Shopware.Context.api);
this.manufacturers = await manufacturerRepository.search(new Criteria(), Shopware.Context.api);
// show modal if no feed is given
if (!this.$route.params.id) {
this.showTemplateModal();
}
// wait for enums to be safe loaded
await this.productfeed.wait();
// wait for feed and saleschannel promise
await this.getFeed();
await this.loadDynamicProductGroups();
// resolve promise
this.salesChannel = await this.salesChannel;
// set default value
this.feed.salesChannel = this.salesChannel[0].id;
this.loaded = true;
this.isLoading = false;
},
async getFeed() {
if (!this.$route.params.id) {
return;
}
let criteria = new Criteria();
criteria.addFilter(Criteria.equals('id', this.$route.params.id));
criteria.addAssociation('productfeedProductStreams.productStream');
this.feed = (await this.repository.search(criteria, Shopware.Context.api)).first();
criteria = new Criteria();
criteria.addFilter(Criteria.equals('productfeedId', this.feed.id));
let search = this.notificationRepository.search(criteria, Shopware.Context.api);
// sets interval of feed
this.fillInterval();
this.feed.notification = {};
this.updateLatestFile();
// set events
let events = this.productfeed.getNotificationEvents();
this.feed.notificationEvents = [];
if (this.feed.messageWhenCreateSuccess) {
this.feed.notificationEvents.push(
events.find(e => e.includes('create') && e.includes('success'))
);
}
if (this.feed.messageWhenCreateError) {
this.feed.notificationEvents.push(
events.find(e => e.includes('create') && e.includes('error'))
);
}
if (this.feed.messageWhenDeliverySuccess) {
this.feed.notificationEvents.push(
events.find(e => e.includes('delivery') && e.includes('success'))
);
}
if (this.feed.messageWhenDeliveryError) {
this.feed.notificationEvents.push(
events.find(e => e.includes('delivery') && e.includes('error'))
);
}
// get notifications for feed
this.feed.notificationAddresses = '';
// resolve notification query
let response = await search;
response.forEach((n, i) => {
if (i !== 0) {
this.feed.notificationAddresses += ',';
}
this.feed.notificationAddresses += n.emailAddress;
});
// set rule
if (!this.feed.ruleId) {
this.createRule();
} else {
this.ruleId = this.feed.ruleId;
this.loadEntityData(this.feed.ruleId);
}
},
showTemplateModal() {
this.showPopover = true;
},
closeModal() {
this.showPopover = false;
},
async selectTemplate(name) {
let data = await this.template.fetchDefaults(name);
if (this.feed.name || this.feed.filename || this.feed.template) {
this.openDiscardChangesModal(data);
return;
}
this.feed.name = data.name;
this.feed.filename = data.filename;
this.feed.template = data.template;
this.feed.countryId = data.countryId;
this.feed.currencyId = data.currencyId;
this.feed.languageId = data.languageId;
this.closeModal();
},
openDiscardChangesModal(tempData) {
this.closeModal();
// app crashes with 2 open modals at the same time
// so wait for the tick after first modal was closed
this.$nextTick(() => {
this.tempTemplateData = tempData;
this.showDiscardChangesModal = true;
});
},
closeDiscardChangesModal() {
this.closeModal();
this.showDiscardChangesModal = false;
this.showTemplateData = null;
},
deleteChanges() {
let data = this.tempTemplateData;
this.feed.name = data.name;
this.feed.filename = data.filename;
this.feed.template = data.template;
this.showDiscardChangesModal = false;
this.showTemplateData = null;
},
createRule() {
this.rule = this.ruleRepository.create(Context.api);
this.conditions = this.rule.conditions;
},
loadEntityData(ruleId) {
this.isLoading = true;
this.conditions = null;
return this.ruleRepository.get(ruleId, Context.api).then((rule) => {
this.rule = rule;
return this.loadConditions();
});
},
loadConditions(conditions = null) {
if (conditions === null) {
return this.conditionRepository.search(new Criteria(), Context.api).then((searchResult) => {
return this.loadConditions(searchResult);
});
}
if (conditions.total <= conditions.length) {
this.conditions = conditions;
return Promise.resolve();
}
const criteria = new Criteria(
conditions.criteria.page + 1,
conditions.criteria.limit
);
return this.conditionRepository.search(criteria, conditions.context).then((searchResult) => {
conditions.push(...searchResult);
conditions.criteria = searchResult.criteria;
conditions.total = searchResult.total;
return this.loadConditions(conditions);
});
},
conditionsChanged({ conditions, deletedIds }) {
this.conditionTree = conditions;
this.deletedIds = [...this.deletedIds, ...deletedIds];
this.rule.conditions = conditions;
},
async saveFeed() {
// check if, when notifications are enabled all required fields are filled
if (this.checkBeforeRequest()) {
return;
}
this.isLoading = true;
if (this.detailMode) {
await this.removeOldNotifications();
}
// save filter & rule when given
let filter = this.generateProductFilter();
if (filter) {
this.feed.filter = filter;
this.feed.ruleId = this.rule.id;
this.rule.priority = 1;
this.rule.name = this.feed.name + '_feed_generator_rule';
this.feed.rule = this.rule;
try {
await this.ruleRepository.save(this.rule, Shopware.Context.api);
// complete rule has to be reloaded, with conditions
this.loadEntityData(this.feed.ruleId);
} catch (e) {
this.createNotificationError({
title: this.$tc('product-feed.general.error'),
message: this.$tc('product-feed.general.errorOccurred')
});
this.isLoading = false;
return;
}
} else {
this.feed.rule = null;
this.feed.ruleId = null;
this.feed.filter = null;
}
// prepare some feed values
this.feed.interval = this.interval.time * this.interval.multiplicator;
this.feed.hostDirectory = this.feed.hostDirectoryPath;
if (this.feed.notificationsEnabled) {
this.setNotificationsToFeed();
}
// save feed
try {
await this.repository.save(this.feed, Shopware.Context.api)
} catch (e) {
this.createNotificationError({
title: this.$tc('product-feed.general.error'),
message: this.$tc('product-feed.detail.requiredFieldsAreEmpty')
});
this.isLoading = false;
return;
}
// add notifications
try {
if (this.feed.notificationsEnabled && this.feed.notificationAddresses) {
let mails = this.feed.notificationAddresses.split(',');
let promises = [];
for (let mail of mails) {
let notification = this.notificationRepository.create(Shopware.Context.api);
notification.emailAddress = mail;
notification.enabled = true;
notification.productfeedId = this.feed.id;
promises.push(this.notificationRepository.save(notification, Shopware.Context.api));
}
await Promise.all(promises);
}
} catch (e) {
this.createNotificationError({
title: this.$tc('product-feed.general.error'),
message: this.$tc('product-feed.general.errorOccurred')
});
this.isLoading = false;
return;
}
await this.saveProductStream();
// create user notification
this.createNotificationSuccess({
title: this.$tc('product-feed.general.success'),
message: this.$tc('product-feed.detail.feedWasSaved')
});
if (!this.detailMode) {
this.$router.push({ name: 'product.feed.detail', params: { id: this.feed.id } });
return;
}
// reload feed
this.rule = null;
this.ruleId = null;
await this.getFeed();
this.isLoading = false;
},
async saveProductStream() {
const criteria = new Criteria();
criteria.addFilter(Criteria.equals('productfeedId', this.feed.id));
let oldEntities = await this.productfeedProductStreamRepository.search(criteria, Shopware.Context.api);
await Promise.all(
oldEntities.map(entity => this.productfeedProductStreamRepository.delete(entity.id, Shopware.Context.api))
);
let promises = [];
this.dynamicProductGroups.forEach(group => {
let productfeedProductStreamEntity = this.productfeedProductStreamRepository.create(Shopware.Context.api);
productfeedProductStreamEntity.productfeedId = this.feed.id;
productfeedProductStreamEntity.productStreamId = group.id;
promises.push(this.productfeedProductStreamRepository.save(productfeedProductStreamEntity, Shopware.Context.api));
});
await Promise.all(promises);
},
async removeOldNotifications() {
// remove old notifications
const criteria = new Criteria();
criteria.addFilter(Criteria.equals('productfeedId', this.feed.id));
let notifications = await this.notificationRepository.search(criteria, Shopware.Context.api);
let promises = [];
notifications.forEach(n => promises.push(this.notificationRepository.delete(n.id, Shopware.Context.api)));
await Promise.all(promises);
},
async onBtnGenerate() {
this.isLoading = true;
try {
await this.productfeed.generateFeed(this.feed.id);
this.createNotificationSuccess({
title: this.$tc('global.default.success'),
message: this.$tc('product-feed.detail.generateSuccess')
});
this.generateSuccess = true;
this.updateLatestFile();
setTimeout(() => this.generateSuccess = false, 2000);
} catch (e) {
if (e.response.data.data) {
this.createNotificationError({
title: this.$tc('global.default.error'),
message: this.$tc(e.response.data.data)
});
} else {
this.createNotificationError({
title: this.$tc('global.default.error'),
message: this.$tc('product-feed.detail.generateError')
});
}
} finally {
this.isLoading = false;
}
},
saveFinish() {
this.processSuccess = false;
},
checkBeforeRequest() {
this.firstTry = false;
if (this.checkError()) {
if (this.feed.name && this.feed.filename) {
this.createNotificationError({
title: this.$tc('product-feed.general.error'),
message: this.$tc('product-feed.detail.requiredFieldsAreEmpty')
});
return true;
}
}
return false;
},
generateProductFilter() {
// check if something is in the productfilter
let toCheck = this.rule.conditions[0];
if (!toCheck) {
return null;
}
let exists;
while (true) {
if (toCheck.type === 'andContainer' || toCheck.type === 'orContainer') {
if (toCheck.children[0]) {
toCheck = toCheck.children[0];
} else {
exists = false;
break;
}
} else {
if (!toCheck.type) {
exists = false;
break;
} else {
exists = true;
break;
}
}
}
if (this.rule.conditions[0] && exists) {
return this.resolveCondition(this.rule.conditions[0]);
}
return null;
},
resolveCondition(condition) {
let result = {};
if (condition.type === 'andContainer') {
result.and = [];
condition.children.forEach(child => {
result.and.push(this.resolveCondition(child));
});
} else if (condition.type === 'orContainer') {
result.or = [];
condition.children.forEach(child => {
result.or.push(this.resolveCondition(child));
});
} else {
result.type = condition.type;
result.operator = condition.value.operator;
result.value = condition.value.value;
if (!result.value) {
result.value = condition.value.values;
}
}
return result;
},
fillInterval() {
let minutes = this.feed.interval / 60 / 1000;
let hours = minutes / 60;
let days = hours / 24;
let weeks = days / 7;
if (this.isInt(weeks)) {
this.interval.time = weeks;
this.interval.multiplicator = this.timeIntervals[0].value.toString();
} else if (this.isInt(days)) {
this.interval.time = days;
this.interval.multiplicator = this.timeIntervals[1].value.toString();
} else if (this.isInt(hours)) {
this.interval.time = hours;
this.interval.multiplicator = this.timeIntervals[2].value.toString();
} else if (this.isInt(minutes)) {
this.interval.time = minutes;
this.interval.multiplicator = this.timeIntervals[3].value.toString();
}
},
isInt(n) {
return n % 1 === 0;
},
setNotificationsToFeed() {
if (this.feed.notificationEvents) {
this.feed.messageWhenCreateSuccess = this.feed.notificationEvents.indexOf('notification_events_create_success') >= 0;
this.feed.messageWhenCreateError = this.feed.notificationEvents.indexOf('notification_events_create_error') >= 0;
this.feed.messageWhenDeliverySuccess = this.feed.notificationEvents.indexOf('notification_events_delivery_success') >= 0;
this.feed.messageWhenDeliveryError = this.feed.notificationEvents.indexOf('notification_events_delivery_error') >= 0;
}
},
checkError() {
return this.executionTimeError !== null ||
this.emailEventsError !== null ||
this.emailAddressesError !== null ||
this.hostDirectoryError !== null ||
this.hostPasswordError !== null ||
this.hostUsernameError !== null ||
this.hostUrlError !== null ||
this.templateError !== null;
},
checkErrorFunction(...values) {
values.push(this.firstTry);
// checks if any value is set (true or different value)
return values.some(value => !!value);
},
resetFirstTry() {
this.firstTry = true;
},
async updateLatestFile() {
let criteria = new Criteria();
criteria.addFilter(Criteria.equals('productfeedId', this.feed.id));
criteria.addSorting(Criteria.sort('createdAt', 'DESC'));
let files = await this.fileRepository.search(criteria, Shopware.Context.api)
this.latestFile = files.first();
},
async loadDynamicProductGroups() {
if (!this.feed.productfeedProductStreams || this.feed.productfeedProductStreams.length === 0) {
let criteria = new Criteria(1, 1);
const streams = await this.productStreamRepository.search(criteria, Shopware.Context.api)
streams.pop();
this.setDynamicProductGroups(streams);
return;
}
let criteria = new Criteria();
criteria.addAssociation('productStream');
criteria.addFilter(Criteria.equalsAny('id', this.feed.productfeedProductStreams.getIds()));
const productfeedProductStreams = await this.productfeedProductStreamRepository.search(criteria, Shopware.Context.api);
const ids = productfeedProductStreams.map(e => e.productStreamId);
criteria = new Criteria();
criteria.addFilter(Criteria.equalsAny('id', ids));
const streams = await this.productStreamRepository.search(criteria, Shopware.Context.api);
this.setDynamicProductGroups(streams);
},
setDynamicProductGroups(values) {
this.dynamicProductGroups = values;
}
},
});
|
package me.batizhao.dp.initializr;
import com.fasterxml.jackson.databind.ObjectMapper;
import io.spring.initializr.versionresolver.DependencyManagementVersionResolver;
import me.batizhao.dp.initializr.project.ProjectDescriptionCustomizerConfiguration;
import me.batizhao.dp.initializr.support.CacheableDependencyManagementVersionResolver;
import me.batizhao.dp.initializr.support.StartInitializrMetadataUpdateStrategy;
import me.batizhao.dp.initializr.web.HomeController;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.SpringBootConfiguration;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.web.client.RestTemplateBuilder;
import org.springframework.boot.web.server.ErrorPage;
import org.springframework.boot.web.server.ErrorPageRegistrar;
import org.springframework.cache.annotation.EnableCaching;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Import;
import org.springframework.http.HttpStatus;
import org.springframework.scheduling.annotation.EnableAsync;
import java.io.IOException;
import java.nio.file.Files;
/**
* @author batizhao
* @since 2016/9/28
*/
@EnableAutoConfiguration
@SpringBootConfiguration
@Import(ProjectDescriptionCustomizerConfiguration.class)
@EnableCaching
@EnableAsync
public class PecadoDevInitializrApplication {
public static void main(String[] args) {
SpringApplication.run(PecadoDevInitializrApplication.class, args);
}
@Bean
public HomeController homeController() {
return new HomeController();
}
@Bean
public StartInitializrMetadataUpdateStrategy initializrMetadataUpdateStrategy(
RestTemplateBuilder restTemplateBuilder, ObjectMapper objectMapper) {
return new StartInitializrMetadataUpdateStrategy(restTemplateBuilder.build(), objectMapper);
}
@Bean
public DependencyManagementVersionResolver dependencyManagementVersionResolver() throws IOException {
return new CacheableDependencyManagementVersionResolver(DependencyManagementVersionResolver
.withCacheLocation(Files.createTempDirectory("version-resolver-cache-")));
}
}
|
import React from "react";
import "../styles/styles.css";
import { TestComponent } from '../components/test_component';
// Commonly used with containers
import { Container, Row, Col, Button } from "reactstrap";
import { Link } from "react-router-dom";
// Actions
import { testAction } from "../actions/actions"
export default class TestContainer extends React.Component {
constructor(props) {
super(props);
this.testAction = testAction.bind(this);
}
componentDidMount() {
this.testAction();
}
render() {
return (
<div>
Hello World
<TestComponent name="React Boilerplate"/>
<img src="../img/react_icon.png" alt="myAlt"/>
</div>
);
}
}
|
#ifndef ARISTA_H
#define ARISTA_H
#include "Object.h"
#include <string>
using namespace std;
class Arista : public Object{
public:
Arista();
Arista(int, int, int);
~Arista();
int getOrigen();
int getDireccion();
int getPeso();
void setOrigen(int);
void setDireccion(int);
void setPeso(int);
string toString();
private:
int origen;
int direccion;
int peso;
};
#endif |
//
// client.cpp
// ~~~~~~~~~~
//
// Copyright (c) 2003-2021 <NAME> (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#include <boost/asio.hpp>
#include <boost/lambda/lambda.hpp>
#include <boost/lambda/bind.hpp>
#include <boost/lambda/if.hpp>
#include <boost/shared_ptr.hpp>
#include <algorithm>
#include <cstdlib>
#include <exception>
#include <iostream>
#include <string>
#include "protocol.hpp"
using namespace boost;
using boost::asio::ip::tcp;
using boost::asio::ip::udp;
int main(int argc, char* argv[])
{
try
{
if (argc != 3)
{
std::cerr << "Usage: client <host> <port>\n";
return 1;
}
using namespace std; // For atoi.
std::string host_name = argv[1];
std::string port = argv[2];
boost::asio::io_context io_context;
// Determine the location of the server.
tcp::resolver resolver(io_context);
tcp::endpoint remote_endpoint = *resolver.resolve(host_name, port).begin();
// Establish the control connection to the server.
tcp::socket control_socket(io_context);
control_socket.connect(remote_endpoint);
// Create a datagram socket to receive data from the server.
boost::shared_ptr<udp::socket> data_socket(
new udp::socket(io_context, udp::endpoint(udp::v4(), 0)));
// Determine what port we will receive data on.
udp::endpoint data_endpoint = data_socket->local_endpoint();
// Ask the server to start sending us data.
control_request start = control_request::start(data_endpoint.port());
boost::asio::write(control_socket, start.to_buffers());
unsigned long last_frame_number = 0;
for (;;)
{
// Receive 50 messages on the current data socket.
for (int i = 0; i < 50; ++i)
{
// Receive a frame from the server.
frame f;
data_socket->receive(f.to_buffers(), 0);
if (f.number() > last_frame_number)
{
last_frame_number = f.number();
std::cout << "\n" << f.payload();
}
}
// Time to switch to a new socket. To ensure seamless handover we will
// continue to receive packets using the old socket until data arrives on
// the new one.
std::cout << " Starting renegotiation";
// Create the new data socket.
boost::shared_ptr<udp::socket> new_data_socket(
new udp::socket(io_context, udp::endpoint(udp::v4(), 0)));
// Determine the new port we will use to receive data.
udp::endpoint new_data_endpoint = new_data_socket->local_endpoint();
// Ask the server to switch over to the new port.
control_request change = control_request::change(
data_endpoint.port(), new_data_endpoint.port());
boost::system::error_code control_result;
boost::asio::async_write(control_socket, change.to_buffers(),
(
lambda::var(control_result) = lambda::_1
));
// Try to receive a frame from the server on the new data socket. If we
// successfully receive a frame on this new data socket we can consider
// the renegotation complete. In that case we will close the old data
// socket, which will cause any outstanding receive operation on it to be
// cancelled.
frame f1;
boost::system::error_code new_data_socket_result;
new_data_socket->async_receive(f1.to_buffers(),
(
// Note: lambda::_1 is the first argument to the callback handler,
// which in this case is the error code for the operation.
lambda::var(new_data_socket_result) = lambda::_1,
lambda::if_(!lambda::_1)
[
// We have successfully received a frame on the new data socket,
// so we can close the old data socket. This will cancel any
// outstanding receive operation on the old data socket.
lambda::var(data_socket) = boost::shared_ptr<udp::socket>()
]
));
// This loop will continue until we have successfully completed the
// renegotiation (i.e. received a frame on the new data socket), or some
// unrecoverable error occurs.
bool done = false;
while (!done)
{
// Even though we're performing a renegotation, we want to continue
// receiving data as smoothly as possible. Therefore we will continue to
// try to receive a frame from the server on the old data socket. If we
// receive a frame on this socket we will interrupt the io_context,
// print the frame, and resume waiting for the other operations to
// complete.
frame f2;
done = true; // Let's be optimistic.
if (data_socket) // Might have been closed by new_data_socket's handler.
{
data_socket->async_receive(f2.to_buffers(), 0,
(
lambda::if_(!lambda::_1)
[
// We have successfully received a frame on the old data
// socket. Stop the io_context so that we can print it.
lambda::bind(&boost::asio::io_context::stop, &io_context),
lambda::var(done) = false
]
));
}
// Run the operations in parallel. This will block until all operations
// have finished, or until the io_context is interrupted. (No threads!)
io_context.restart();
io_context.run();
// If the io_context.run() was interrupted then we have received a frame
// on the old data socket. We need to keep waiting for the renegotation
// operations to complete.
if (!done)
{
if (f2.number() > last_frame_number)
{
last_frame_number = f2.number();
std::cout << "\n" << f2.payload();
}
}
}
// Since the loop has finished, we have either successfully completed
// the renegotation, or an error has occurred. First we'll check for
// errors.
if (control_result)
throw boost::system::system_error(control_result);
if (new_data_socket_result)
throw boost::system::system_error(new_data_socket_result);
// If we get here it means we have successfully started receiving data on
// the new data socket. This new data socket will be used from now on
// (until the next time we renegotiate).
std::cout << " Renegotiation complete";
data_socket = new_data_socket;
data_endpoint = new_data_endpoint;
if (f1.number() > last_frame_number)
{
last_frame_number = f1.number();
std::cout << "\n" << f1.payload();
}
}
}
catch (std::exception& e)
{
std::cerr << "Exception: " << e.what() << std::endl;
}
return 0;
}
|
public class Bicycle {
private int speed;
// constructor with no arguments
public Bicycle() {
this.speed = 0;
}
public int getSpeed() {
return this.speed;
}
public void setSpeed(int speed) {
if(speed >= 0){
this.speed = speed;
}
}
public void pedal() {
this.speed++;
}
} |
cd look
transcrypt -a -b -n active.py
cp __javascript__/* static/js
rm -r __javascript__
cd ..
|
<reponame>swelbourn/transpyr-api<gh_stars>0
module.exports = async (Model, query, queryString) => {
const pagination = JSON.parse(queryString.paginate);
const page = Number(pagination.page) || 1;
const limit = Number(pagination.limit) || 10;
const response = await Model.paginate(query, {
page,
limit,
//handle projection in mongoose-paginate to avoid path collision
select: queryString.fields
? queryString.fields.replace(/,/g, ' ')
: undefined,
});
return response;
};
|
import React from 'react'
import { Input } from 'antd'
const { TextArea } = Input
export default ({ onChange, value }) => (
<div style={{marginBottom: 10}}>
<TextArea placeholder="自定义API KEY (当接口无法正常工作时填写)" autosize={{ minRows: 1, maxRows: 3 }} style={{ resize: 'none' }} value={ value } onChange={ onChange } />
</div>
) |
public class ReportProcessor
{
public IEnumerable<Report> FilterReports(IEnumerable<Report> reports, string industryName, DateTime startDate, DateTime endDate)
{
return reports.Where(r =>
!r.IsDeleted &&
r.Industry == industryName &&
r.ReportDate >= startDate &&
r.ReportDate <= endDate
);
}
}
// Test data initialization
var industry2 = TestModelsSeeder.SeedIndustry2();
var report3 = TestModelsSeeder.SeedReport3();
var industry3 = TestModelsSeeder.SeedIndustry3();
report1.IsDeleted = true;
report2.IsDeleted = true;
using (var arrangeContext = new InsightHubContext(options))
{
var processor = new ReportProcessor();
var filteredReports = processor.FilterReports(reports, "SomeIndustry", new DateTime(2022, 1, 1), new DateTime(2022, 12, 31));
// Process the filtered reports as needed
} |
import React from "react";
import ReactDOM from "react-dom";
import "./index.css";
import Dashboard from "./dashboard/Dashboard";
ReactDOM.render(<Dashboard />, document.getElementById("root"));
// test('Controls the component that changes the button text to reflect the state of when unlocked is clicked', () => {
// const controls = render(<Controls locked={false} />);
// const closeGate = controls.getByText(/close gate/i);
// fireEvent.click(closeGate);
// controls.findByText(/closed/i);
// const lockGate = controls.getByText(/lock gate/i);
// fireEvent.click(lockGate);
// controls.findByText(/locked/i);
// });
|
<filename>server/src/printers/KleePrinter.h
/*
* Copyright (c) Huawei Technologies Co., Ltd. 2012-2021. All rights reserved.
*/
#ifndef UNITTESTBOT_KLEEPRINTER_H
#define UNITTESTBOT_KLEEPRINTER_H
#include "PathSubstitution.h"
#include "Printer.h"
#include "ProjectContext.h"
#include "Tests.h"
#include "LineInfo.h"
#include "building/BuildDatabase.h"
#include "types/Types.h"
#include "utils/path/FileSystemPath.h"
#include <cstdio>
#include <sstream>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
using tests::Tests;
namespace printer {
class KleePrinter : public Printer {
public:
KleePrinter(const types::TypesHandler *typesHandler,
std::shared_ptr<BuildDatabase> buildDatabase,
utbot::Language srcLanguage);
utbot::Language getLanguage() const override;
fs::path writeTmpKleeFile(
const Tests &tests,
const std::string &buildDir,
const PathSubstitution &pathSubstitution,
const std::optional<LineInfo::PredicateInfo> &predicateInfo = std::nullopt,
const std::string &testedMethod = "",
const std::optional<std::string> &testedClass = "",
bool onlyForOneFunction = false,
bool onlyForOneClass = false,
const std::function<bool(tests::Tests::MethodDescription const &)> &methodFilter = [](tests::Tests::MethodDescription const &) { return true; });
std::string addTestLineFlag(const std::shared_ptr<LineInfo> &lineInfo,
bool needAssertion,
const utbot::ProjectContext &projectContext);
[[nodiscard]] std::vector<std::string> getIncludePaths(const Tests &tests, const PathSubstitution &substitution) const;
private:
types::TypesHandler const *typesHandler;
std::shared_ptr<BuildDatabase> buildDatabase;
using PredInfo = LineInfo::PredicateInfo;
struct ConstraintsState {
std::string paramName;
std::string curElement;
types::Type curType;
};
void declTestEntryPoint(const Tests &tests, const Tests::MethodDescription &testMethod);
void genGlobalParamsDeclarations(const Tests::MethodDescription &testMethod);
void genPostParamsVariables(const Tests::MethodDescription &testMethod);
void genParamsDeclarations(const Tests::MethodDescription &testMethod);
bool genParamDeclaration(const Tests::MethodDescription &testMethod,
const Tests::MethodParam ¶m);
bool genPointerParamDeclaration(const Tests::MethodParam ¶m);
void genReturnDeclaration(const Tests::MethodDescription &testMethod, const std::optional<PredInfo> &predicateInfo);
void genParamsKleeAssumes(const Tests::MethodDescription &testMethod,
const std::optional<PredInfo> &predicateInfo,
const std::string &testedMethod,
bool onlyForOneEntity);
void genGlobalsKleeAssumes(const Tests::MethodDescription &testMethod);
void genPostParamsKleeAssumes(const Tests::MethodDescription &testMethod);
/*
* Functions for constraints generation.
*/
void genConstraints(const Tests::MethodParam ¶m, const std::string& methodName = "");
void genTwoDimPointers(const Tests::MethodParam ¶m, bool needDeclare);
void genVoidFunctionAssumes(const Tests::MethodDescription &testMethod,
const std::optional<PredInfo> &predicateInfo,
const std::string &testedMethod,
bool onlyForOneEntity);
void genNonVoidFunctionAssumes(const Tests::MethodDescription &testMethod,
const std::optional<PredInfo> &predicateInfo,
const std::string &testedMethod,
bool onlyForOneEntity);
void genKleePathSymbolicIfNeeded(const std::optional<PredInfo> &predicateInfo,
const std::string &testedMethod,
bool onlyForOneEntity);
void genKleePathSymbolicAssumeIfNeeded(const std::optional<PredInfo> &predicateInfo,
const std::string &testedMethod,
bool onlyForOneEntity);
[[maybe_unused]] void addHeaderIncludeIfNecessary(std::unordered_set<std::string> &headers, const types::Type &type);
Stream strKleeMakeSymbolic(SRef varName, bool needAmpersand);
Stream strKleeMakeSymbolic(const types::Type &type, SRef varName, SRef pseudoName, bool needAmpersand);
Stream strKleeMakeSymbolic(const types::Type &type, SRef varName, bool needAmpersand);
void genPostGlobalSymbolicVariables(const Tests::MethodDescription &testMethod);
void genPostParamsSymbolicVariables(const Tests::MethodDescription &testMethod);
void makeBracketsForStrPredicate(const std::optional<PredInfo> &info);
static Tests::MethodParam getKleeMethodParam(tests::Tests::MethodParam const ¶m);
static Tests::MethodParam getKleePostParam(const Tests::MethodParam ¶m);
static Tests::MethodParam getKleeGlobalParam(tests::Tests::MethodParam const ¶m);
static Tests::MethodParam getKleeGlobalPostParam(const Tests::MethodParam &globalParam);
void genPostSymbolicVariable(const Tests::MethodDescription &testMethod, const Tests::MethodParam ¶m);
void genPostAssumes(const Tests::MethodParam ¶m, bool visitGlobal = false);
};
}
#endif //UNITTESTBOT_KLEEPRINTER_H
|
<reponame>zouvier/BlockChain-Voting
import type { ethers } from "ethers";
import { NomicLabsHardhatPluginError } from "hardhat/plugins";
import {
Artifact,
HardhatRuntimeEnvironment,
NetworkConfig,
} from "hardhat/types";
import type { SignerWithAddress } from "../signers";
import type { FactoryOptions, Libraries } from "../types";
interface Link {
sourceName: string;
libraryName: string;
address: string;
}
const pluginName = "hardhat-ethers";
export async function getSigners(
hre: HardhatRuntimeEnvironment
): Promise<SignerWithAddress[]> {
const accounts = await hre.ethers.provider.listAccounts();
const signersWithAddress = await Promise.all(
accounts.map((account) => getSigner(hre, account))
);
return signersWithAddress;
}
export async function getSigner(
hre: HardhatRuntimeEnvironment,
address: string
): Promise<SignerWithAddress> {
const { SignerWithAddress: SignerWithAddressImpl } = await import(
"../signers"
);
const signer = hre.ethers.provider.getSigner(address);
const signerWithAddress = await SignerWithAddressImpl.create(signer);
return signerWithAddress;
}
export function getContractFactory(
hre: HardhatRuntimeEnvironment,
name: string,
signerOrOptions?: ethers.Signer | FactoryOptions
): Promise<ethers.ContractFactory>;
export function getContractFactory(
hre: HardhatRuntimeEnvironment,
abi: any[],
bytecode: ethers.utils.BytesLike,
signer?: ethers.Signer
): Promise<ethers.ContractFactory>;
export async function getContractFactory(
hre: HardhatRuntimeEnvironment,
nameOrAbi: string | any[],
bytecodeOrFactoryOptions?:
| (ethers.Signer | FactoryOptions)
| ethers.utils.BytesLike,
signer?: ethers.Signer
) {
if (typeof nameOrAbi === "string") {
return getContractFactoryByName(
hre,
nameOrAbi,
bytecodeOrFactoryOptions as ethers.Signer | FactoryOptions | undefined
);
}
return getContractFactoryByAbiAndBytecode(
hre,
nameOrAbi,
bytecodeOrFactoryOptions as ethers.utils.BytesLike,
signer
);
}
function isFactoryOptions(
signerOrOptions?: ethers.Signer | FactoryOptions
): signerOrOptions is FactoryOptions {
const { Signer } = require("ethers") as typeof ethers;
if (signerOrOptions === undefined || signerOrOptions instanceof Signer) {
return false;
}
return true;
}
async function getContractFactoryByName(
hre: HardhatRuntimeEnvironment,
contractName: string,
signerOrOptions?: ethers.Signer | FactoryOptions
) {
const artifact = await hre.artifacts.readArtifact(contractName);
let libraries: Libraries = {};
let signer: ethers.Signer | undefined;
if (isFactoryOptions(signerOrOptions)) {
signer = signerOrOptions.signer;
libraries = signerOrOptions.libraries ?? {};
} else {
signer = signerOrOptions;
}
if (artifact.bytecode === "0x") {
throw new NomicLabsHardhatPluginError(
pluginName,
`You are trying to create a contract factory for the contract ${contractName}, which is abstract and can't be deployed.
If you want to call a contract using ${contractName} as its interface use the "getContractAt" function instead.`
);
}
const linkedBytecode = await collectLibrariesAndLink(artifact, libraries);
return getContractFactoryByAbiAndBytecode(
hre,
artifact.abi,
linkedBytecode,
signer
);
}
async function collectLibrariesAndLink(
artifact: Artifact,
libraries: Libraries
) {
const { utils } = require("ethers") as typeof ethers;
const neededLibraries: Array<{
sourceName: string;
libName: string;
}> = [];
for (const [sourceName, sourceLibraries] of Object.entries(
artifact.linkReferences
)) {
for (const libName of Object.keys(sourceLibraries)) {
neededLibraries.push({ sourceName, libName });
}
}
const linksToApply: Map<string, Link> = new Map();
for (const [linkedLibraryName, linkedLibraryAddress] of Object.entries(
libraries
)) {
if (!utils.isAddress(linkedLibraryAddress)) {
throw new NomicLabsHardhatPluginError(
pluginName,
`You tried to link the contract ${artifact.contractName} with the library ${linkedLibraryName}, but provided this invalid address: ${linkedLibraryAddress}`
);
}
const matchingNeededLibraries = neededLibraries.filter((lib) => {
return (
lib.libName === linkedLibraryName ||
`${lib.sourceName}:${lib.libName}` === linkedLibraryName
);
});
if (matchingNeededLibraries.length === 0) {
let detailedMessage: string;
if (neededLibraries.length > 0) {
const libraryFQNames = neededLibraries
.map((lib) => `${lib.sourceName}:${lib.libName}`)
.map((x) => `* ${x}`)
.join("\n");
detailedMessage = `The libraries needed are:
${libraryFQNames}`;
} else {
detailedMessage = "This contract doesn't need linking any libraries.";
}
throw new NomicLabsHardhatPluginError(
pluginName,
`You tried to link the contract ${artifact.contractName} with ${linkedLibraryName}, which is not one of its libraries.
${detailedMessage}`
);
}
if (matchingNeededLibraries.length > 1) {
const matchingNeededLibrariesFQNs = matchingNeededLibraries
.map(({ sourceName, libName }) => `${sourceName}:${libName}`)
.map((x) => `* ${x}`)
.join("\n");
throw new NomicLabsHardhatPluginError(
pluginName,
`The library name ${linkedLibraryName} is ambiguous for the contract ${artifact.contractName}.
It may resolve to one of the following libraries:
${matchingNeededLibrariesFQNs}
To fix this, choose one of these fully qualified library names and replace where appropriate.`
);
}
const [neededLibrary] = matchingNeededLibraries;
const neededLibraryFQN = `${neededLibrary.sourceName}:${neededLibrary.libName}`;
// The only way for this library to be already mapped is
// for it to be given twice in the libraries user input:
// once as a library name and another as a fully qualified library name.
if (linksToApply.has(neededLibraryFQN)) {
throw new NomicLabsHardhatPluginError(
pluginName,
`The library names ${neededLibrary.libName} and ${neededLibraryFQN} refer to the same library and were given as two separate library links.
Remove one of them and review your library links before proceeding.`
);
}
linksToApply.set(neededLibraryFQN, {
sourceName: neededLibrary.sourceName,
libraryName: neededLibrary.libName,
address: linkedLibraryAddress,
});
}
if (linksToApply.size < neededLibraries.length) {
const missingLibraries = neededLibraries
.map((lib) => `${lib.sourceName}:${lib.libName}`)
.filter((libFQName) => !linksToApply.has(libFQName))
.map((x) => `* ${x}`)
.join("\n");
throw new NomicLabsHardhatPluginError(
pluginName,
`The contract ${artifact.contractName} is missing links for the following libraries:
${missingLibraries}
Learn more about linking contracts at https://hardhat.org/plugins/nomiclabs-hardhat-ethers.html#library-linking
`
);
}
return linkBytecode(artifact, [...linksToApply.values()]);
}
async function getContractFactoryByAbiAndBytecode(
hre: HardhatRuntimeEnvironment,
abi: any[],
bytecode: ethers.utils.BytesLike,
signer?: ethers.Signer
) {
const { ContractFactory } = require("ethers") as typeof ethers;
if (signer === undefined) {
const signers = await hre.ethers.getSigners();
signer = signers[0];
}
const abiWithAddedGas = addGasToAbiMethodsIfNecessary(
hre.network.config,
abi
);
return new ContractFactory(abiWithAddedGas, bytecode, signer);
}
export async function getContractAt(
hre: HardhatRuntimeEnvironment,
nameOrAbi: string | any[],
address: string,
signer?: ethers.Signer
) {
const { Contract } = require("ethers") as typeof ethers;
if (typeof nameOrAbi === "string") {
const artifact = await hre.artifacts.readArtifact(nameOrAbi);
const factory = await getContractFactoryByAbiAndBytecode(
hre,
artifact.abi,
"0x",
signer
);
let contract = factory.attach(address);
// If there's no signer, we connect the contract instance to the provider for the selected network.
if (contract.provider === null) {
contract = contract.connect(hre.ethers.provider);
}
return contract;
}
if (signer === undefined) {
const signers = await hre.ethers.getSigners();
signer = signers[0];
}
// If there's no signer, we want to put the provider for the selected network here.
// This allows read only operations on the contract interface.
const signerOrProvider: ethers.Signer | ethers.providers.Provider =
signer !== undefined ? signer : hre.ethers.provider;
const abiWithAddedGas = addGasToAbiMethodsIfNecessary(
hre.network.config,
nameOrAbi
);
return new Contract(address, abiWithAddedGas, signerOrProvider);
}
// This helper adds a `gas` field to the ABI function elements if the network
// is set up to use a fixed amount of gas.
// This is done so that ethers doesn't automatically estimate gas limits on
// every call.
function addGasToAbiMethodsIfNecessary(
networkConfig: NetworkConfig,
abi: any[]
): any[] {
const { BigNumber } = require("ethers") as typeof ethers;
if (networkConfig.gas === "auto" || networkConfig.gas === undefined) {
return abi;
}
// ethers adds 21000 to whatever the abi `gas` field has. This may lead to
// OOG errors, as people may set the default gas to the same value as the
// block gas limit, especially on Hardhat Network.
// To avoid this, we substract 21000.
// HOTFIX: We substract 1M for now. See: https://github.com/ethers-io/ethers.js/issues/1058#issuecomment-703175279
const gasLimit = BigNumber.from(networkConfig.gas).sub(1000000).toHexString();
const modifiedAbi: any[] = [];
for (const abiElement of abi) {
if (abiElement.type !== "function") {
modifiedAbi.push(abiElement);
continue;
}
modifiedAbi.push({
...abiElement,
gas: gasLimit,
});
}
return modifiedAbi;
}
function linkBytecode(artifact: Artifact, libraries: Link[]): string {
let bytecode = artifact.bytecode;
// TODO: measure performance impact
for (const { sourceName, libraryName, address } of libraries) {
const linkReferences = artifact.linkReferences[sourceName][libraryName];
for (const { start, length } of linkReferences) {
bytecode =
bytecode.substr(0, 2 + start * 2) +
address.substr(2) +
bytecode.substr(2 + (start + length) * 2);
}
}
return bytecode;
}
|
<gh_stars>1-10
import React from 'react';
export const EuiIconHome = ({ title, titleId, ...props }) => (
<svg
width={16}
height={16}
viewBox="0 0 16 16"
xmlns="http://www.w3.org/2000/svg"
aria-labelledby={titleId}
{...props}>
{title ? <title id={titleId}>{title}</title> : null}
<path
fillRule="evenodd"
clipRule="evenodd"
d="M13 14V6.43782L7.5 1.3609L2 6.43782L2 14H13ZM1.32172 5.70302C1.11664 5.89233 1 6.15873 1 6.43782V14C1 14.5523 1.44772 15 2 15H13C13.5523 15 14 14.5523 14 14V6.43782C14 6.15873 13.8834 5.89233 13.6783 5.70302L8.17828 0.626098C7.79522 0.272502 7.20478 0.272502 6.82172 0.626097L1.32172 5.70302Z"
/>
</svg>
);
export const home = EuiIconHome;
|
import React, { useMemo, useState } from 'react';
import { Button, CircularProgress, Tooltip, Typography } from '@material-ui/core';
import { Pause, FiberManualRecord } from '@material-ui/icons'
import { useStyles } from './css';
import { useAppState } from '../../../../providers/AppStateProvider';
import { DefaultDeviceController } from 'amazon-chime-sdk-js';
import { RecorderView } from '../ScreenView/RecorderView';
export const RecorderPanel = () => {
const classes = useStyles();
const { activeRecorder, audioInputDeviceSetting } = useAppState()
const [recorderCanvas, setRecorderCanvas] = useState<HTMLCanvasElement|null>(null)
const [ isEncoding, setIsEncoding ] = useState(false)
const [ isRecording, setIsRecording ] = useState(false)
const handleOnClickStartRecord = async() =>{
setIsRecording(true)
const stream = new MediaStream();
const audioElem = document.getElementById("for-speaker") as HTMLAudioElement
// @ts-ignore
const audioStream = audioElem.captureStream() as MediaStream
let localAudioStream = audioInputDeviceSetting?.audioInputForRecord
if(typeof localAudioStream === "string"){
localAudioStream = await navigator.mediaDevices.getUserMedia({audio:{deviceId:localAudioStream}})
}
const audioContext = DefaultDeviceController.getAudioContext();
const outputNode = audioContext.createMediaStreamDestination();
const sourceNode1 = audioContext.createMediaStreamSource(audioStream);
sourceNode1.connect(outputNode)
if(localAudioStream){
const sourceNode2 = audioContext.createMediaStreamSource(localAudioStream as MediaStream);
sourceNode2.connect(outputNode)
}
// @ts-ignore
const videoStream = recorderCanvas.captureStream() as MediaStream
[outputNode.stream, videoStream].forEach(s=>{
s?.getTracks().forEach(t=>{
console.log("added tracks:", t)
stream.addTrack(t)
})
});
activeRecorder?.startRecording(stream)
}
const handleOnClickStopRecord = async() =>{
activeRecorder?.stopRecording()
setIsEncoding(true)
await activeRecorder?.toMp4()
console.log("---------------------------------------------------- 1")
setIsEncoding(false)
console.log("---------------------------------------------------- 2")
setIsRecording(false)
console.log("---------------------------------------------------- 3")
}
const startButton = useMemo(()=>{
return isRecording === false && isEncoding === false ?
(
<Tooltip title={activeRecorder?.isRecording?"stop recording":"start recording"}>
<Button
size="small"
variant="outlined"
className={activeRecorder?.isRecording ? classes.activatedButton : classes.button}
startIcon={<FiberManualRecord />}
onClick={handleOnClickStartRecord}
id="recorder-start"
>
Rec.
</Button>
</Tooltip>
)
:
(
<Tooltip title={activeRecorder?.isRecording?"stop recording":"start recording"}>
<Button
size="small"
variant="outlined"
className={activeRecorder?.isRecording ? classes.activatedButton : classes.button}
startIcon={<FiberManualRecord />}
id="recorder-start"
>
Rec.
</Button>
</Tooltip>
)
},[isRecording, isEncoding, recorderCanvas]) // eslint-disable-line
const stopButton = useMemo(()=>{
if(isRecording === false && isEncoding === false){
return <Tooltip title={activeRecorder?.isRecording?"stop recording":"start recording"}>
<Button
size="small"
variant="outlined"
className={classes.button}
startIcon={<Pause />}
disabled
id="recorder-stop"
>
Stop
</Button>
</Tooltip>
}else if(isRecording === true && isEncoding === false){
return <Tooltip title={activeRecorder?.isRecording?"stop recording":"start recording"}>
<Button
size="small"
variant="outlined"
className={classes.button}
startIcon={<Pause />}
onClick={handleOnClickStopRecord}
id="recorder-stop"
>
Stop
</Button>
</Tooltip>
}else if(isRecording === true && isEncoding === true){
return <CircularProgress />
}
},[isRecording, isEncoding, recorderCanvas]) // eslint-disable-line
return (
<div className={classes.root}>
<Typography className={classes.title} color="textSecondary">
Push REC button to start recording. Push STOP button to end recording and download file.
Note: Please confirm the screen below shows the movie you want to record.
Depends on the browser or its version, you should display the screen below in order to update image on the screen below.
</Typography>
{startButton}
{stopButton}
<RecorderView height={200} width={200} setRecorderCanvas={setRecorderCanvas}/>
</div>
);
} |
# Selection Sort in Python
def selectionSort(arr):
for i in range(len(arr)):
min_idx = i
for j in range(i+1, len(arr)):
if arr[min_idx] > arr[j]:
min_idx = j
arr[i], arr[min_idx] = arr[min_idx], arr[i]
# Test program
arr = [3, 8, 5, 4, 1, 9, 6]
selectionSort(arr)
print ("Sorted array is:")
for i in range(len(arr)):
print ("%d" %arr[i]) |
#!/bin/bash
echo Deleting contents of webapps/
rm -rf /var/lib/tomcat8/webapps/*
BUILD_DIR=/build
SVC_WAR=*.war
echo Checking that WAR exists
WARCOUNT=$(ls ${BUILD_DIR}/${SVC_WAR} 2> /dev/null | wc -l)
if [ $WARCOUNT -ne 0 ] ; then
echo Copying WAR to webapps
cp `ls -t ${BUILD_DIR}/${SVC_WAR} | head -1` /var/lib/tomcat8/webapps/ROOT.war
else
echo "WAR file not found in ${BUILD_DIR}. Exiting..."
exit 1
fi
MYSQL_CMD="mysql -h ${MYSQL_HOSTNAME} -u repouser repo"
$MYSQL_CMD -e 'exit'
MYSQL_NOT_CONNECTING=$?
while [ $MYSQL_NOT_CONNECTING -ne 0 ] ; do
sleep 1;
$MYSQL_CMD -e 'exit'
MYSQL_NOT_CONNECTING=$?
echo -e "\nDatabase (${MYSQL_HOSTNAME}) not ready ... waiting"
done;
echo -e "\nDatabase (${MYSQL_HOSTNAME}) ready!"
/etc/init.d/tomcat8 start
# The container will run as long as the script is running,
# that's why we need something long-lived here
exec tail -f /var/log/tomcat8/catalina.out
|
<filename>DesignPattern/src/Proxy/Main.java
package Proxy;
public class Main {
public static void main(String[] args) {
Printable p = new PrinterProxy("YANG");
System.out.println("현재 이름 : " + p.getPrinterName());
p.setPrinterName("DONG");
p.print("printer test");
p.setPrinterName("JUE");
p.print("hello");
}
}
|
#include "FWCore/Framework/interface/MakerMacros.h"
#include "CommonTools/UtilAlgos/interface/EtMinSelector.h"
#include "CommonTools/UtilAlgos/interface/SingleObjectSelector.h"
#include "DataFormats/JetReco/interface/CaloJet.h"
reco::CaloJetCollection selectJets(const reco::CaloJetCollection& jets, double minEt) {
// Define the EtMinSelector using the provided minimum Et threshold
EtMinSelector<reco::CaloJet> etMinSelector(minEt);
// Create a SingleObjectSelector using the EtMinSelector
SingleObjectSelector<reco::CaloJetCollection, EtMinSelector<reco::CaloJet>> jetSelector(etMinSelector);
// Apply the selection to the input jet collection
reco::CaloJetCollection selectedJets = jetSelector(jets);
return selectedJets;
} |
rm -rf user.json
cp user.info user.json
|
#!/usr/bin/env bash
# Install node_modules, if not already installed
if [ ! -r ./node_modules ]; then
docker run --rm --volume "$PWD:/src" -w "/src" capsulecorplab/asciidoctor-extended:asciidocsy-nodejs 'npm i'
fi
# Install m30pm/node_modules, if not already installed
if [ ! -r ./m30pm/node_modules ]; then
docker run --rm --volume "$PWD:/src" -w "/src" capsulecorplab/asciidoctor-extended:asciidocsy-nodejs 'cd m30pm && npm ci'
fi
# Make dist/ directory, if none exists
if [ ! -r ./dist ]; then
mkdir dist/
fi
# Build the unified model
docker run --rm -v "$PWD":/usr/src/app -w /usr/src/app node:14 node m30pm/buildUnifiedModel.js
# copy dist/architecture.yaml to dist/architecture.yml
cp dist/architecture.yaml dist/architecture.yml
# generate architecture.adoc from liquid template
docker run --rm -v "$PWD":/usr/src/app -w /usr/src/app node:14 node m30pm/generateDoc.js --unifiedModel=dist/architecture.yaml --template=templates/architecture.adoc.liquid --out=dist/architecture.adoc
# generate pdf-theme.yml from liquid template
docker run --rm -v "$PWD:/src" -w "/src" capsulecorplab/asciidoctor-extended:liquidoc 'bundle exec liquidoc -d dist/architecture.yml -t templates/pdf-theme.yml.liquid -o dist/pdf-theme.yml'
# generate index.html
docker run --rm -v "$PWD:/src" -w "/src" asciidoctor/docker-asciidoctor asciidoctor dist/architecture.adoc -r asciidoctor-diagram -o dist/index.html
# generate pylar-architecture.pdf
docker run --rm -v "$PWD:/src" -w "/src" asciidoctor/docker-asciidoctor asciidoctor dist/architecture.adoc -o dist/pylar-docs-architecture.pdf -r asciidoctor-pdf -r asciidoctor-diagram -b pdf -a pdf-theme=dist/pdf-theme.yml
|
#!/bin/bash
set -e
pwd
echo $HOME
mkdir build
cd build
HAVE_X11=NO NETCDF_PATH=/mingw64 HDF5_PATH=/mingw64 CGNS_PATH=/mingw64 MPI=NO bash ../cmake-config
make -k -j4
make install
#ctest -j 4 --output-on-failure
|
# This script is for continuous integration using Jenkins (http://jenkins-ci.org/)
# It is called from the parent directory, i.e. bash -xe trunk/.jenkins.sh
echo "Starting automatic build #$BUILD_NUMBER on" `date`
start=$(date +"%s")
# Show last change to repo in build log
echo `git --git-dir trunk/.git log -1 --pretty="Last change by %cn (%h): %B"`
# link python to python3
ln -sf /usr/bin/python3 python
export PATH=`pwd`:$PATH
# Show system information
lsb_release -d
uname -a
gcc --version
cmake --version
python --version
python -c "import numpy; print(numpy.__version__)"
python -c "import matplotlib; print(matplotlib.__version__)"
# Check if core was changed
core_update=$(git --git-dir=trunk/.git diff-tree -r $GIT_COMMIT | grep -c src || true)
# Set this to 1 if you want clean build (also of dependencies)
export CLEAN=0
export GIMLI_NUM_THREADS=4
################
# Main build #
################
# just do this if something is wrong with the thirdparty sources
#rm -rf thirdParty/src
#rm -rf build # Uncomment for clean build (expensive, but necessary sometimes)
#rm -f build/build_tests.html # remove old test report
#rm -f build/CMakeCache.txt # clean old cache
mkdir -p build
cd build
if [ ! -f CMakeCache.txt ]; then
# Always rebuild core when Cmake cache does not exist
core_update=2
fi
if [[ $core_update -ge 1 ]]; then
echo "# Core changes detected. #"
cmake ../trunk \
-DPYVERSION=3 \
-DPYTHON_EXECUTABLE=/usr/bin/python3 \
-DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.5m.so \
-DBoost_PYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libboost_python-py35.so
make -j 8 gimli
make pygimli J=4
else
echo "# No core changes detected. #"
fi
#############################
# Testing & documentation #
#############################
# Test pygimli
export PYTHONPATH=`pwd`/../trunk/python:$PYTHONPATH
OMP_THREAD_LIMIT=1 python -c "import pygimli; pygimli.test(show=False, abort=True, htmlreport=\"build_tests.html\")"
# Build documentation
export PUBLISH="True" # for correct PATH settings in sidebar gallery
export PATH=`pwd`/../trunk/python/apps:$PATH
chmod +x ../trunk/python/apps/*
make clean-gallery
make doc # = doxygen, sphinxapi, sphinxpdf, sphinxhtml
end=$(date +"%s")
echo "Ending automatic build #$BUILD_NUMBER".
diff=$(($end-$start))
echo "$(($diff / 60)) minutes and $(($diff % 60)) seconds elapsed."
# If this script fails, a mail with the log file will be send to mail@pygimli.org.
# If it succeeds, the documentation will be uploaded to pygimli.org.
# In any case, a badge icon of the current status and the log file will be uploaded.
|
def solve_8_puzzle(board):
init = Node(board)
# add the initial node to the open set
open_set = [init]
closed_set = []
# loop until the open set is empty
while (len(open_set) > 0):
# find the node in open set with the least cost
current = min(open_set, key=Node.f)
# if the current node is a goal state, return the solution
if (current.is_goal):
return Node.solution(current)
# remove the current node from open set
open_set.remove(current)
# add current node to the closed set
closed_set.append(current)
# expand the current node
for node in current.expand():
if (node not in open_set and node not in closed_set):
# add the node to the open set
open_set.append(node)
return None |
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.db.sqlalchemy.models import Release
from nailgun.openstack.common import jsonutils
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import reverse
class TestHandlers(BaseIntegrationTest):
def test_release_list_empty(self):
resp = self.app.get(
reverse('ReleaseCollectionHandler'),
headers=self.default_headers
)
self.assertEqual(200, resp.status_code)
response = jsonutils.loads(resp.body)
self.assertEqual([], response)
def test_release_creation(self):
resp = self.app.post(
reverse('ReleaseCollectionHandler'),
params=jsonutils.dumps({
'name': 'Another test release',
'version': '1.0',
'operating_system': 'CentOS'
}),
headers=self.default_headers
)
self.assertEqual(resp.status_code, 201)
def test_release_create(self):
release_name = "OpenStack"
release_version = "1.0.0"
release_description = "This is test release"
resp = self.app.post(
reverse('ReleaseCollectionHandler'),
jsonutils.dumps({
'name': release_name,
'version': release_version,
'description': release_description,
'operating_system': 'CentOS',
'networks_metadata': {
"nova_network": {
"networks": [
{
"name": "storage",
"cidr": "192.168.1.0/24",
"gateway": "192.168.1.1",
"ip_range": [
"192.168.1.1",
"192.168.1.254"
],
"vlan_start": 102,
"assign_vip": False
},
{
"name": "management",
"cidr": "10.0.0.0/16",
"gateway": "10.0.0.1",
"ip_range": [
"10.0.0.2",
"10.0.255.254"
],
"vlan_start": 103,
"assign_vip": False
}
]
}
}
}),
headers=self.default_headers
)
self.assertEqual(resp.status_code, 201)
resp = self.app.post(
reverse('ReleaseCollectionHandler'),
jsonutils.dumps({
'name': release_name,
'version': release_version,
'description': release_description,
'operating_system': 'CentOS',
'networks_metadata': {
"nova_network": {
"networks": [
{
"name": "management",
"cidr": "10.0.0.0/16",
"gateway": "10.0.0.1",
"ip_range": [
"10.0.0.2",
"10.0.255.254"
],
"vlan_start": 103,
"assign_vip": False
}
]
}
}
}),
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(resp.status_code, 409)
release_from_db = self.db.query(Release).filter_by(
name=release_name,
version=release_version,
description=release_description
).all()
self.assertEqual(len(release_from_db), 1)
def test_release_create_already_exist(self):
release_name = "OpenStack"
release_version = "1.0.0"
release_description = "This is test release"
resp = self.app.post(
reverse('ReleaseCollectionHandler'),
jsonutils.dumps({
'name': release_name,
'version': release_version,
'description': release_description,
'operating_system': 'CentOS',
'networks_metadata': {
"nova_network": {
"networks": [
{
"name": "storage",
"cidr": "192.168.1.0/24",
"gateway": "192.168.1.1",
"ip_range": [
"192.168.1.1",
"192.168.1.254"
],
"vlan_start": 102,
"assign_vip": False
},
{
"name": "management",
"cidr": "10.0.0.0/16",
"gateway": "10.0.0.1",
"ip_range": [
"10.0.0.2",
"10.0.255.254"
],
"vlan_start": 103,
"assign_vip": False
}
]
}
}
}),
headers=self.default_headers
)
self.assertEqual(resp.status_code, 201)
resp = self.app.post(
reverse('ReleaseCollectionHandler'),
jsonutils.dumps({
'name': release_name,
'version': release_version,
'description': release_description,
'operating_system': 'CentOS',
'networks_metadata': {
"nova_network": {
"networks": [
{
"name": "management",
"cidr": "10.0.0.0/16",
"gateway": "10.0.0.1",
"ip_range": [
"10.0.0.2",
"10.0.255.254"
],
"vlan_start": 103,
"assign_vip": False
}
]
}
}
}),
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(resp.status_code, 409)
def test_release_w_orch_data_create(self):
release_name = "OpenStack"
release_version = "1.0.0"
release_description = "This is a release w orchestrator data"
orch_data = {
"repo_metadata": {
"nailgun":
"http://10.20.0.2:8080/centos-5.0/centos/fuelweb/x86_64/"
},
"puppet_modules_source":
"rsync://10.20.0.2/puppet/release/5.0/modules",
"puppet_manifests_source":
"rsync://10.20.0.2/puppet/release/5.0/manifests"
}
resp = self.app.post(
reverse('ReleaseCollectionHandler'),
jsonutils.dumps({
'name': release_name,
'version': release_version,
'description': release_description,
'operating_system': 'CentOS',
"orchestrator_data": orch_data,
'networks_metadata': {
"nova_network": {
"networks": [
{
"name": "storage",
"cidr": "192.168.1.0/24",
"gateway": "192.168.1.1",
"ip_range": [
"192.168.1.1",
"192.168.1.254"
],
"vlan_start": 102,
"assign_vip": False
},
{
"name": "management",
"cidr": "10.0.0.0/16",
"gateway": "10.0.0.1",
"ip_range": [
"10.0.0.2",
"10.0.255.254"
],
"vlan_start": 103,
"assign_vip": False
}]
}
}
}),
headers=self.default_headers
)
self.assertEqual(resp.status_code, 201)
resp = self.app.get(
reverse("ReleaseCollectionHandler"),
headers=self.default_headers
)
self.assertEqual(200, resp.status_code)
response = jsonutils.loads(resp.body)
self.assertEqual(1, len(response))
self.assertEqual(orch_data, response[0]["orchestrator_data"])
|
<gh_stars>1-10
package automation.report.dao;
import org.springframework.beans.BeansException;
import org.springframework.context.ApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;
import org.springframework.dao.DataAccessException;
import org.springframework.jdbc.core.JdbcTemplate;
import javax.sql.DataSource;
import java.util.ArrayList;
import java.util.List;
public class AutoReportImpl implements AutoReportDao {
private static AutoReportDao autoReportDao = null;
private JdbcTemplate jdbcTemplate;
public static AutoReportDao createInstance(String dataSource) {
if (autoReportDao == null) {
try {
// 创建IOC容器
ApplicationContext act = new ClassPathXmlApplicationContext("beans.xml");
autoReportDao = (AutoReportDao) act.getBean(dataSource);
} catch (BeansException e) {
System.out.println(e.getMessage() + e);
}
}
return autoReportDao;
}
// spring通过依赖注入自动给DAO装配dataSource
public void setDataSource(DataSource dataSource) {
this.jdbcTemplate = new JdbcTemplate(dataSource);
}
@Override
public Object[][] getFurtherCheckData() {
return null;
}
@Override
public List<String> getCaseList() {
List<String> caseList = new ArrayList<String>();
List<?> rows = jdbcTemplate.queryForList("select case_name from test_case", new Object[]{});
if (rows.size() > 0)
for (int i = 0; i < rows.size(); i++) {
caseList.add(rows.get(i).toString());
}
return caseList;
}
@Override
public void update(String sql) {
jdbcTemplate.update(sql);
}
@Override
public void update(String sql, Object[] objs) {
jdbcTemplate.update(sql, objs);
}
@Override
public void update(String sql, Object[] objs, int[] types) {
jdbcTemplate.update(sql, objs, types);
}
@Override
public void update(String sql, String log) {
jdbcTemplate.update(sql, new Object[]{log}, new int[]{java.sql.Types.VARCHAR});
}
@Override
public boolean insertImage(String pic_id, byte[] in, String url, String createTime) {
boolean flag = false;
try {
String insertPicture = "INSERT INTO screenshot_db (`picture_id`, `picture_long`, `url`, `create_time`) VALUES (?,?,?,?);";
jdbcTemplate.update(insertPicture, new Object[]{pic_id, in, url, createTime}, new int[]{
java.sql.Types.VARCHAR, java.sql.Types.BLOB, java.sql.Types.VARCHAR, java.sql.Types.VARCHAR});
} catch (DataAccessException e) {
flag = true;
e.printStackTrace();
}
return flag;
}
}
|
// Copyright (C) MongoDB, Inc. 2017-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
package gridfs
import (
"errors"
"math"
"context"
"time"
"github.com/mongodb/mongo-go-driver/bson/primitive"
"github.com/mongodb/mongo-go-driver/mongo"
"github.com/mongodb/mongo-go-driver/x/bsonx"
)
// UploadBufferSize is the size in bytes of one stream batch. Chunks will be written to the db after the sum of chunk
// lengths is equal to the batch size.
const UploadBufferSize = 16 * 1000000 // 16 MB
// ErrStreamClosed is an error returned if an operation is attempted on a closed/aborted stream.
var ErrStreamClosed = errors.New("stream is closed or aborted")
// UploadStream is used to upload files in chunks.
type UploadStream struct {
*Upload // chunk size and metadata
FileID primitive.ObjectID
chunkIndex int
chunksColl *mongo.Collection // collection to store file chunks
filename string
filesColl *mongo.Collection // collection to store file metadata
closed bool
buffer []byte
bufferIndex int
fileLen int64
writeDeadline time.Time
}
// NewUploadStream creates a new upload stream.
func newUploadStream(upload *Upload, fileID primitive.ObjectID, filename string, chunks *mongo.Collection, files *mongo.Collection) *UploadStream {
return &UploadStream{
Upload: upload,
FileID: fileID,
chunksColl: chunks,
filename: filename,
filesColl: files,
buffer: make([]byte, UploadBufferSize),
}
}
// Close closes this upload stream.
func (us *UploadStream) Close() error {
if us.closed {
return ErrStreamClosed
}
ctx, cancel := deadlineContext(us.writeDeadline)
if cancel != nil {
defer cancel()
}
if us.bufferIndex != 0 {
if err := us.uploadChunks(ctx); err != nil {
return err
}
}
if err := us.createFilesCollDoc(ctx); err != nil {
return err
}
us.closed = true
return nil
}
// SetWriteDeadline sets the write deadline for this stream.
func (us *UploadStream) SetWriteDeadline(t time.Time) error {
if us.closed {
return ErrStreamClosed
}
us.writeDeadline = t
return nil
}
// Write transfers the contents of a byte slice into this upload stream. If the stream's underlying buffer fills up,
// the buffer will be uploaded as chunks to the server. Implements the io.Writer interface.
func (us *UploadStream) Write(p []byte) (int, error) {
if us.closed {
return 0, ErrStreamClosed
}
var ctx context.Context
ctx, cancel := deadlineContext(us.writeDeadline)
if cancel != nil {
defer cancel()
}
origLen := len(p)
for {
if len(p) == 0 {
break
}
n := copy(us.buffer[us.bufferIndex:], p) // copy as much as possible
p = p[n:]
us.bufferIndex += n
if us.bufferIndex == UploadBufferSize {
err := us.uploadChunks(ctx)
if err != nil {
return 0, err
}
us.bufferIndex = 0
}
}
return origLen, nil
}
// Abort closes the stream and deletes all file chunks that have already been written.
func (us *UploadStream) Abort() error {
if us.closed {
return ErrStreamClosed
}
ctx, cancel := deadlineContext(us.writeDeadline)
if cancel != nil {
defer cancel()
}
_, err := us.chunksColl.DeleteMany(ctx, bsonx.Doc{{"files_id", bsonx.ObjectID(us.FileID)}})
if err != nil {
return err
}
us.closed = true
return nil
}
func (us *UploadStream) uploadChunks(ctx context.Context) error {
numChunks := math.Ceil(float64(us.bufferIndex) / float64(us.chunkSize))
docs := make([]interface{}, int(numChunks))
begChunkIndex := us.chunkIndex
for i := 0; i < us.bufferIndex; i += int(us.chunkSize) {
var chunkData []byte
if us.bufferIndex-i < int(us.chunkSize) {
chunkData = us.buffer[i:us.bufferIndex]
} else {
chunkData = us.buffer[i : i+int(us.chunkSize)]
}
docs[us.chunkIndex-begChunkIndex] = bsonx.Doc{
{"_id", bsonx.ObjectID(primitive.NewObjectID())},
{"files_id", bsonx.ObjectID(us.FileID)},
{"n", bsonx.Int32(int32(us.chunkIndex))},
{"data", bsonx.Binary(0x00, chunkData)},
}
us.chunkIndex++
us.fileLen += int64(len(chunkData))
}
_, err := us.chunksColl.InsertMany(ctx, docs)
if err != nil {
return err
}
return nil
}
func (us *UploadStream) createFilesCollDoc(ctx context.Context) error {
doc := bsonx.Doc{
{"_id", bsonx.ObjectID(us.FileID)},
{"length", bsonx.Int64(us.fileLen)},
{"chunkSize", bsonx.Int32(us.chunkSize)},
{"uploadDate", bsonx.DateTime(time.Now().UnixNano() / int64(time.Millisecond))},
{"filename", bsonx.String(us.filename)},
}
if us.metadata != nil {
doc = append(doc, bsonx.Elem{"metadata", bsonx.Document(us.metadata)})
}
_, err := us.filesColl.InsertOne(ctx, doc)
if err != nil {
return err
}
return nil
}
|
<reponame>openstreetcam/android
package com.telenav.osv.utils;
import android.animation.Animator;
import android.animation.AnimatorSet;
import android.animation.ObjectAnimator;
import android.app.Activity;
import android.content.res.Resources;
import android.view.View;
import android.view.ViewGroup;
import android.view.animation.Interpolator;
import android.view.animation.LinearInterpolator;
import com.telenav.osv.R;
import androidx.annotation.IntDef;
import androidx.annotation.Nullable;
import androidx.constraintlayout.widget.ConstraintLayout;
import androidx.constraintlayout.widget.ConstraintSet;
import androidx.transition.ChangeBounds;
import androidx.transition.TransitionManager;
import androidx.transition.TransitionSet;
/**
* Utils class for any animation related generic functionality such as:
* <ul>
* <li>{@link #getFadeInAnimator(View)}</li>
* <li>{@link #getFadeOutAnimator(View)}</li>
* <li>{@link #getFadeInAnimator(View, int, int, Interpolator, Animator.AnimatorListener)}</li>
* <li>{@link #getFadeOutAnimator(View, int, int, Interpolator, Animator.AnimatorListener)}</li>
* </ul>
* @author horatiuf
*/
public class AnimationUtils {
/**
* Animation with 200ms duration.
*/
public static final int ANIMATION_DURATION_200 = 200;
/**
* The value for a 180 degree.
*/
public static final float DEGREES_180_VALUE = 180f;
/**
* The value for a 360 degree.
*/
public static final float DEGREES_360_VALUE = 360f;
/**
* The size of the view coordinates array.
*/
private static final int VIEW_COORDINATES_DEFAULT_SIZE = 2;
/**
* The index of the x coordinate of a view.
*/
private static final int VIEW_COORDINATE_X_INDEX = 0;
/**
* The index of the y coordinate of a view.
*/
private static final int VIEW_COORDINATE_Y_INDEX = 1;
/**
* The property name for an object animator x coordinate property.
*/
private static final String OBJECT_ANIMATOR_PROPERTY_X = "x";
/**
* The property name for an object animator rotation property.
*/
private static final String OBJECT_ANIMATOR_PROPERTY_ROTATION = "rotation";
/**
* @param view view that will be animated
* @return {@code ObjectAnimator} representing default fade in animator.
*/
public static ObjectAnimator getFadeInAnimator(View view) {
return getFadeInAnimator(view, 0, 0, null, null);
}
/**
* @param view view that will be animated
* @param duration animation duration
* @param delay animation delay
* @param interpolator animation interpolator
* @param listener animation listener
* @return {@code ObjectAnimator} representing fade in animator.
*/
public static ObjectAnimator getFadeInAnimator(View view, int duration, int delay, Interpolator interpolator, Animator.AnimatorListener listener) {
ObjectAnimator fadeInAnimator = ObjectAnimator.ofFloat(view, View.ALPHA, 0f, 1f);
if (interpolator != null) {
fadeInAnimator.setInterpolator(interpolator);
} else {
fadeInAnimator.setInterpolator(new LinearInterpolator());
}
fadeInAnimator.setDuration(duration);
fadeInAnimator.setStartDelay(delay);
if (listener != null) {
fadeInAnimator.addListener(listener);
}
return fadeInAnimator;
}
/**
* Delayed animation which changes the bounds of a view using {@link TransitionManager}.
* @param viewToAnimate the {@code ViewGroup} to animate.
*/
public static void changeBoundsDelayedTransition(ViewGroup viewToAnimate) {
TransitionManager
.beginDelayedTransition(
viewToAnimate,
new TransitionSet()
.addTransition(new ChangeBounds()));
}
/**
* returns a fade out animator
* @param view view that will be animated
* @return {@code ObjectAnimator} representing fade out animator.
*/
public static ObjectAnimator getFadeOutAnimator(View view) {
return getFadeOutAnimator(view, 0, 0, null, null);
}
/**
* @param view view that will be animated
* @param duration animation duration
* @param delay animation delay
* @param interpolator animation interpolator
* @param listener animation listener
* @return {@code ObjectAnimator} representing fade out animator.
*/
public static ObjectAnimator getFadeOutAnimator(View view, int duration, int delay, Interpolator interpolator, Animator.AnimatorListener listener) {
ObjectAnimator fadeOutAnimator = ObjectAnimator.ofFloat(view, View.ALPHA, 1f, 0f);
if (interpolator != null) {
fadeOutAnimator.setInterpolator(interpolator);
} else {
fadeOutAnimator.setInterpolator(new LinearInterpolator());
}
fadeOutAnimator.setDuration(duration);
fadeOutAnimator.setStartDelay(delay);
if (listener != null) {
fadeOutAnimator.addListener(listener);
}
return fadeOutAnimator;
}
/**
* @param view The view for which the translate animation will be performed.
* @param listener the animation listener if required. Can be null.
* @param pixels A set of values that the animation will animate between over time.
* @return {@code ObjectAnimator} representing the translation animation.
*/
public static ObjectAnimator getTranslationObjectAnimator(View view, @Nullable Animator.AnimatorListener listener, float... pixels) {
ObjectAnimator translateObjectAnimator = ObjectAnimator.ofFloat(view, OBJECT_ANIMATOR_PROPERTY_X, pixels);
if (listener != null) {
translateObjectAnimator.addListener(listener);
}
return translateObjectAnimator;
}
/**
* @param isSequential {@code true} if the animations is required to play sequentially, {@code false} otherwise.
* @param duration the duration for the animation.
* @param delayDuration the delay duration for the animation.
* @param animatorListener the animation listener. Can be null.
* @param animators the animator objects which will be included in the animator set.
* @return {@code AnimatorSet} setup based on the params specified.
*/
public static AnimatorSet createAnimatorSet(boolean isSequential, long duration, long delayDuration, @Nullable Animator.AnimatorListener animatorListener, Animator...
animators) {
AnimatorSet animatorSet = new AnimatorSet();
animatorSet.setDuration(duration);
animatorSet.setStartDelay(delayDuration);
if (isSequential) {
animatorSet.playSequentially(animators);
} else {
animatorSet.playTogether(animators);
}
if (animatorListener != null) {
animatorSet.addListener(animatorListener);
}
return animatorSet;
}
/**
* @param view The view for which the rotation animation will be performed.
* @param duration The duration of the animation.
* @param listener the animation listener if required. Can be null.
* @param pixels A set of values that the animation will animate between over time.
* @return {@code ObjectAnimator} representing the rotation animation.
*/
public static ObjectAnimator getRotationObjectAnimator(View view, long duration, @Nullable Animator.AnimatorListener listener, float... pixels) {
ObjectAnimator objectAnimator = ObjectAnimator.ofFloat(view, View.ROTATION, pixels);
objectAnimator.setDuration(duration);
if (listener != null) {
objectAnimator.addListener(listener);
}
return objectAnimator;
}
/**
* @param myView the view for which the left position will be returned.
* @return {@code int} representing the left position of the view in pixels relative to the parent.
*/
public static int getRelativeLeft(View myView) {
if (myView.getParent() == myView.getRootView())
return myView.getLeft();
else
return myView.getLeft() + getRelativeLeft((View) myView.getParent());
}
/**
* @param myView the view for which the top position will be returned.
* @return {@code int} representing the left position of the view in pixel relative to the parent.
*/
public static int getRelativeTop(View myView) {
if (myView.getParent() == myView.getRootView())
return myView.getTop();
else
return myView.getTop() + getRelativeTop((View) myView.getParent());
}
/**
* @param view The view for which the x coordinate will be returned.
* @return {@code int} representing the absolute value of the x coordinate on the screen. <p> The method calls internally {@link #getViewCoordinate(View)}.</p>
*/
public static int getViewAbsoluteX(View view) {
return getViewCoordinate(view)[VIEW_COORDINATE_X_INDEX];
}
/**
* @param view The view for which the y coordinate will be returned.
* @return {@code int} representing the absolute value of the y coordinate on the screen. <p> The method calls internally {@link #getViewCoordinate(View)}.</p>
*/
public static int getViewAbsoluteY(View view) {
return getViewCoordinate(view)[VIEW_COORDINATE_Y_INDEX];
}
/**
* @param view The view for which the array of coordinates will be returned.
* @return {@code int[2]} representing the array of coordinates for a view on the screen, positioned first the x, respectively the y in this order.
*/
public static int[] getViewCoordinate(View view) {
int[] viewCoordinates = new int[VIEW_COORDINATES_DEFAULT_SIZE];
view.getLocationInWindow(viewCoordinates);
return viewCoordinates;
}
public static void resizeCameraUI(Activity activity, int resizeContainerId, int largerContainerId) {
if (activity == null) {
return;
}
Resources resources = activity.getResources();
ViewGroup resizeView = activity.findViewById(resizeContainerId);
ViewGroup largerView = activity.findViewById(largerContainerId);
AnimationUtils.changeBoundsDelayedTransition(largerView);
AnimationUtils.changeBoundsDelayedTransition(resizeView);
ConstraintSet set = new ConstraintSet();
ConstraintLayout parent = activity.findViewById(R.id.layout_activity_obd_parent);
if (parent == null) {
return;
}
set.clone(parent);
set.clear(largerContainerId, ConstraintSet.TOP);
set.clear(largerContainerId, ConstraintSet.END);
set.clear(resizeContainerId, ConstraintSet.START);
set.clear(resizeContainerId, ConstraintSet.BOTTOM);
set.connect(largerContainerId, ConstraintSet.TOP, ConstraintSet.PARENT_ID, ConstraintSet.TOP, 0);
set.connect(largerContainerId, ConstraintSet.END, ConstraintSet.PARENT_ID, ConstraintSet.END, 0);
set.connect(largerContainerId, ConstraintSet.START, ConstraintSet.PARENT_ID, ConstraintSet.START, 0);
set.connect(largerContainerId, ConstraintSet.BOTTOM, ConstraintSet.PARENT_ID, ConstraintSet.BOTTOM, 0);
largerView.setClipToOutline(false);
largerView.bringToFront();
set.clear(resizeContainerId, ConstraintSet.TOP);
set.clear(resizeContainerId, ConstraintSet.END);
set.clear(resizeContainerId, ConstraintSet.START);
set.clear(resizeContainerId, ConstraintSet.BOTTOM);
int margin = Math.round(resources.getDimension(R.dimen.camera_preview_tagging_margin));
set.connect(resizeContainerId, ConstraintSet.TOP, R.id.guidelineHorizontal, ConstraintSet.BOTTOM, 0);
set.connect(resizeContainerId, ConstraintSet.END, R.id.guidelineVertical, ConstraintSet.START, 0);
set.connect(resizeContainerId, ConstraintSet.START, ConstraintSet.PARENT_ID, ConstraintSet.START, margin);
set.connect(resizeContainerId, ConstraintSet.BOTTOM, ConstraintSet.PARENT_ID, ConstraintSet.BOTTOM, margin);
resizeView.setClipToOutline(true);
resizeView.bringToFront();
ViewGroup cameraControls = activity.findViewById(R.id.layout_activity_obd_fragment_container);
cameraControls.bringToFront();
ViewGroup clickArea = activity.findViewById(R.id.frame_layout_activity_obd_click_area);
clickArea.bringToFront();
ViewGroup loader = activity.findViewById(R.id.frame_layout_activity_obd_loader);
loader.bringToFront();
set.applyTo(parent);
}
/**
* Values for animation durations, such as:
* <ul>
* <li>{@link #ANIMATION_DURATION_1000_MS}</li>
* <li>{@link #ANIMATION_DURATION_2000_MS}</li>
* <li>{@link #ANIMATION_DURATION_1250_MS}</li>
* </ul>
*/
@IntDef
public @interface AnimationDurations {
/**
* The value for 500 milliseconds of animation duration.
*/
int ANIMATION_DURATION_500_MS = 1000;
/**
* The value for 1000 milliseconds of animation duration.
*/
int ANIMATION_DURATION_1000_MS = 1000;
/**
* The value for 1250 milliseconds of animation duration.
*/
int ANIMATION_DURATION_1250_MS = 1250;
/**
* The value for 2000 milliseconds of animation duration.
*/
int ANIMATION_DURATION_2000_MS = 2000;
}
}
|
source test-header.sh
# ======================================================================
#
# Initial setup.
#
# ======================================================================
PYARMOR="${PYTHON} pyarmor.py"
csih_inform "Python is $PYTHON"
csih_inform "Tested Package: $pkgfile"
csih_inform "PyArmor is $PYARMOR"
csih_inform "Make workpath ${workpath}"
rm -rf ${workpath}
mkdir -p ${workpath} || csih_error "Make workpath FAILED"
csih_inform "Clean pyarmor data"
rm -rf ~/.pyarmor ~/.pyarmor_capsule.*
[[ -n "$USERPROFILE" ]] && rm -rf "$USERPROFILE\\.pyarmor*"
cd ${workpath}
[[ ${pkgfile} == *.zip ]] && unzip ${pkgfile} > /dev/null 2>&1
[[ ${pkgfile} == *.tar.bz2 ]] && tar xjf ${pkgfile}
cd pyarmor-$version || csih_error "Invalid pyarmor package file"
# From pyarmor 3.5.1, main scripts are moved to src
[[ -d src ]] && mv src/* ./
# Fix issue: assert_builtin(open) fails in python 3.0
patch_cross_protection_code_for_python3.0
# From pyarmor 4.5.4, platform name is renamed
# From pyarmor 5.7.5, platform name is changed
csih_inform "Add execute permission to dynamic library"
find ./platforms -name _pytransform.dll -exec chmod +x {} \;
csih_inform "Prepare for system testing"
echo ""
# ======================================================================
#
# Bootstrap: help and version
#
# ======================================================================
echo ""
echo "-------------------- Bootstrap ---------------------------------"
echo ""
csih_inform "Case 0.1: show help and import pytransform"
$PYARMOR --help >result.log 2>&1 || csih_bug "Case 0.1 FAILED"
csih_inform "Case 0.2: show version information"
$PYARMOR --version >result.log 2>&1 || csih_bug "show version FAILED"
echo ""
echo "-------------------- Bootstrap End -----------------------------"
echo ""
# ======================================================================
#
# Command: obfuscate
#
# ======================================================================
echo ""
echo "-------------------- Test Command obfuscate --------------------"
echo ""
csih_inform "Case 1.1: obfuscate script"
$PYARMOR obfuscate --output dist examples/simple/queens.py >result.log 2>&1
check_file_exists dist/queens.py
check_file_content dist/queens.py '__pyarmor__(__name__'
( cd dist; $PYTHON queens.py >result.log 2>&1 )
check_file_content dist/result.log 'Found 92 solutions'
csih_inform "Case 1.2-1: obfuscate script with --recursive and --restrict=0"
$PYARMOR obfuscate --recursive --restrict=0 --output dist2 \
examples/py2exe/hello.py >result.log 2>&1
check_return_value
check_file_exists dist2/hello.py
check_file_content dist2/hello.py 'pyarmor_runtime()'
check_file_exists dist2/queens.py
check_file_content dist2/queens.py '__pyarmor__(__name__'
check_file_exists dist2/pytransform/__init__.py
( cd dist2; $PYTHON hello.py >result.log 2>&1 )
check_return_value
check_file_content dist2/result.log 'Found 92 solutions'
csih_inform "Case 1.2-2: obfuscate script with --recursive and --restrict=1"
$PYARMOR obfuscate --recursive --restrict=1 --output dist2-2 \
examples/py2exe/hello.py >result.log 2>&1
check_return_value
check_file_exists dist2-2/hello.py
check_file_content dist2-2/hello.py 'pyarmor_runtime()'
check_file_exists dist2-2/queens.py
check_file_content dist2-2/queens.py '__pyarmor__(__name__'
( cd dist2-2; $PYTHON hello.py >result.log 2>&1 )
check_return_value
check_file_content dist2-2/result.log 'Found 92 solutions'
csih_inform "Case 1.2-3: obfuscate script with --package-runtime=0 and --restrict=0"
$PYARMOR obfuscate --package-runtime=0 --restrict=0 --output dist2-3 \
-r examples/py2exe/hello.py >result.log 2>&1
check_return_value
check_file_exists dist2-3/hello.py
check_file_content dist2-3/hello.py 'pyarmor_runtime()'
check_file_exists dist2-3/pytransform.py
( cd dist2-3; $PYTHON hello.py >result.log 2>&1 )
check_return_value
check_file_content dist2-3/result.log 'Found 92 solutions'
csih_inform "Case 1.3: run obfuscate script with new license"
$PYARMOR obfuscate --output dist3 examples/simple/queens.py >result.log 2>&1
check_return_value
check_file_exists dist3/queens.py
$PYARMOR licenses --expired $(next_month) Jondy >result.log 2>&1
check_return_value
check_file_exists licenses/Jondy/license.lic
cp licenses/Jondy/license.lic dist3/pytransform/
( cd dist3; $PYTHON queens.py >result.log 2>&1 )
check_return_value
check_file_content dist3/result.log 'Found 92 solutions'
csih_inform "Case 1.4: obfuscate one script exactly without runtime files"
$PYARMOR obfuscate --output dist4 --exact --no-runtime \
examples/pybench/pybench.py >result.log 2>&1
check_return_value
check_file_exists dist4/pybench.py
check_file_not_exists dist4/Lists.py
check_file_not_exists dist4/pytransform.py
check_file_not_exists dist4/pytransform/__init__.py
echo ""
echo "-------------------- Test Command obfuscate END ----------------"
echo ""
# ======================================================================
#
# Command: init
#
# ======================================================================
echo ""
echo "-------------------- Test Command init -------------------------"
echo ""
csih_inform "Case 2.1: init pybench"
$PYARMOR init --type=app --src examples/pybench --entry pybench.py \
projects/pybench >result.log 2>&1
check_file_exists projects/pybench/.pyarmor_config
csih_inform "Case 2.1: init py2exe"
$PYARMOR init --src examples/py2exe --entry "hello.py,setup.py" \
projects/py2exe >result.log 2>&1
check_file_exists projects/py2exe/.pyarmor_config
# csih_inform "Case 2.2: init clone py2exe"
# $PYARMOR init --src examples/py2exe2 --clone projects/py2exe \
# projects/py2exe-clone >result.log 2>&1
#
# check_return_value
# check_file_exists projects/py2exe-clone/.pyarmor_config
csih_inform "Case 2.3: init package"
$PYARMOR init --src examples/testpkg/mypkg --entry "../main.py" \
--type=pkg projects/testpkg >result.log 2>&1
check_return_value
$PYARMOR config --disable-restrict-mode=1 projects/testpkg >result.log 2>&1
$PYARMOR info projects/testpkg >result.log 2>&1
check_return_value
check_file_content result.log 'restrict_mode: 0'
check_file_content result.log 'is_package: 1'
echo ""
echo "-------------------- Test Command init END ---------------------"
echo ""
# ======================================================================
#
# Command: config
#
# ======================================================================
echo ""
echo "-------------------- Test Command config -----------------------"
echo ""
csih_inform "Case 3.1: config py2exe"
( cd projects/py2exe; $ARMOR config --rpath='' \
--manifest="global-include *.py, exclude __manifest__.py" \
>result.log 2>&1 )
check_return_value
csih_inform "Case 3.2: config pybench"
( cd projects/pybench; $ARMOR config --disable-restrict-mode=1 \
>result.log 2>&1 )
check_return_value
echo ""
echo "-------------------- Test Command config END -------------------"
echo ""
# ======================================================================
#
# Command: info
#
# ======================================================================
echo ""
echo "-------------------- Test Command info -------------------------"
echo ""
csih_inform "Case 4.1: info pybench"
( cd projects/pybench; $ARMOR info >result.log 2>&1 )
check_return_value
csih_inform "Case 4.2: info py2exe"
( cd projects/py2exe; $ARMOR info >result.log 2>&1 )
check_return_value
echo ""
echo "-------------------- Test Command info END ---------------------"
echo ""
# ======================================================================
#
# Command: check
#
# ======================================================================
echo ""
echo "-------------------- Test Command check ------------------------"
echo ""
csih_inform "Case 5.1: check pybench"
( cd projects/pybench; $ARMOR check >result.log 2>&1 )
check_return_value
csih_inform "Case 5.2: check py2exe"
( cd projects/py2exe; $ARMOR check >result.log 2>&1 )
check_return_value
echo ""
echo "-------------------- Test Command check END --------------------"
echo ""
# ======================================================================
#
# Command: build
#
# ======================================================================
echo ""
echo "-------------------- Test Command build ------------------------"
echo ""
csih_inform "Case 6.1: build pybench"
( cd projects/pybench; $ARMOR build >result.log 2>&1 )
output=projects/pybench/dist
check_file_exists $output/pybench.py
check_file_content $output/pybench.py 'pyarmor_runtime()'
check_file_content $output/pybench.py '__pyarmor__(__name__'
csih_inform "Case 6.2: build package"
( cd projects/testpkg; $ARMOR build >result.log 2>&1 )
output=projects/testpkg/dist
check_file_exists $output/main.py
check_file_content $output/main.py 'pyarmor_runtime()'
check_file_exists $output/mypkg/__init__.py
check_file_exists $output/mypkg/foo.py
check_file_content $output/mypkg/foo.py '__pyarmor__(__name__'
csih_inform "Case 6.3: build package with entry script in package"
cp examples/testpkg/main.py examples/testpkg/mypkg
( cd projects/testpkg;
$ARMOR config --entry=main.py >result.log 2>&1 &&
$ARMOR build -B >result.log 2>&1 )
check_return_value
check_file_exists $output/mypkg/main.py
check_file_content $output/mypkg/main.py 'pyarmor_runtime()'
check_file_content $output/mypkg/main.py '__pyarmor__(__name__'
echo ""
echo "-------------------- Test Command build END --------------------"
echo ""
# ======================================================================
#
# Command: licenses
#
# ======================================================================
echo ""
echo "-------------------- Test Command licenses ---------------------"
echo ""
csih_inform "Case 7.1: Generate project licenses"
output=projects/pybench/licenses
( cd projects/pybench; $ARMOR licenses code1 code2 code3 \
>licenses-result.log 2>&1 )
check_file_exists $output/code1/license.lic
check_file_exists $output/code2/license.lic
check_file_exists $output/code3/license.lic
check_file_exists $output/code1/license.lic.txt
( cd projects/pybench; $ARMOR licenses \
--expired $(next_month) \
--bind-disk "${harddisk_sn}" \
--bind-ipv4 "${ifip_address}" \
--bind-mac "${ifmac_address}" \
customer-tom >licenses-result.log 2>&1 )
check_file_exists $output/customer-tom/license.lic
check_file_exists $output/customer-tom/license.lic.txt
cat <<EOF > projects/pybench/id_rsa
-----BEGIN RSA PRIVATE KEY-----
-----END RSA PRIVATE KEY-----
EOF
( cd projects/pybench; $ARMOR licenses \
--bind-file "id_rsa;id_rsa" \
fixkey >licenses-result.log 2>&1 )
check_file_exists $output/fixkey/license.lic
check_file_exists $output/fixkey/license.lic.txt
csih_inform "Case 7.2: Show license info"
( cd projects/pybench;
$ARMOR build --with-license outer >licenses-result.log 2>&1 )
cat <<EOF > projects/pybench/dist/info.py
from pytransform import pyarmor_init, get_license_info
pyarmor_init(is_runtime=1)
print(get_license_info())
EOF
cp $output/code1/license.lic projects/pybench/dist/pytransform
( cd projects/pybench/dist; $PYTHON info.py >result.log 2>&1 )
check_file_content projects/pybench/dist/result.log "'code1'"
cp $output/customer-tom/license.lic projects/pybench/dist/pytransform
( cd projects/pybench/dist; $PYTHON info.py >result.log 2>&1 )
check_file_content projects/pybench/dist/result.log "'customer-tom'"
check_file_content projects/pybench/dist/result.log "'${harddisk_sn}'"
check_file_content projects/pybench/dist/result.log "'${ifmac_address}'"
check_file_content projects/pybench/dist/result.log "'${ifip_address}'"
cp $output/fixkey/license.lic projects/pybench/dist/pytransform
cp projects/pybench/id_rsa projects/pybench/dist/pytransform
( cd projects/pybench/dist; $PYTHON info.py >result.log 2>&1 )
check_file_content projects/pybench/dist/result.log "'FIXKEY'"
csih_inform "Case 7.3: Generate license which disable all restricts"
output=test-no-restrict-license
$PYARMOR obfuscate -O $output --no-cross-protection --with-license outer \
examples/simple/queens.py >result.log 2>&1
check_return_value
$PYARMOR licenses --disable-restrict-mode NO-RESTRICT >result.log 2>&1
check_return_value
check_file_exists licenses/NO-RESTRICT/license.lic
cp licenses/NO-RESTRICT/license.lic $output/pytransform/
echo -e "\nprint('No restrict mode')" >> $output/queens.py
(cd $output; $PYTHON queens.py >result.log 2>&1 )
check_return_value
check_file_content $output/result.log 'Found 92 solutions'
check_file_content $output/result.log 'No restrict mode'
cat <<EOF > test-license.py
from pytransform import get_license_info
print('Test old licenses')
print(get_license_info())
EOF
$PYARMOR obfuscate --with-license outer --exact \
-O test-legency-licenses test-license.py >result.log 2>&1
csih_inform "Case 7.4: Generate license bind to fixed machine"
$PYARMOR licenses --bind-disk="${harddisk_sn}" r001 >result.log 2>&1
check_return_value
check_file_exists licenses/r001/license.lic
cp licenses/r001/license.lic test-legency-licenses/pytransform
( cd test-legency-licenses; $PYTHON test-license.py >result.log 2>&1 )
check_file_content test-legency-licenses/result.log "Test old licenses"
check_file_content test-legency-licenses/result.log "${harddisk_sn}"
csih_inform "Case 7.5: Generate no expired license bind to fixed machine"
$PYARMOR licenses -e $(next_month) --bind-disk="${harddisk_sn}" r002 >result.log 2>&1
check_return_value
check_file_exists licenses/r002/license.lic
cp licenses/r002/license.lic test-legency-licenses/pytransform
( cd test-legency-licenses; $PYTHON test-license.py >result.log 2>&1 )
check_file_content test-legency-licenses/result.log "Test old licenses"
check_file_content test-legency-licenses/result.log "r002"
csih_inform "Case 7.6: Generate expired license"
$PYARMOR licenses -e 2014-01-01 r003 >result.log 2>&1
check_return_value
check_file_exists licenses/r003/license.lic
cp licenses/r003/license.lic test-legency-licenses/pytransform
( cd test-legency-licenses; $PYTHON test-license.py >result.log 2>&1 )
check_file_content test-legency-licenses/result.log "Test old licenses" not
csih_inform "Case 7.7: generate license bind to mac address"
$PYARMOR licenses --bind-mac="${ifmac_address}" r004 >result.log 2>&1
check_return_value
check_file_exists licenses/r004/license.lic
cp licenses/r004/license.lic test-legency-licenses/pytransform
( cd test-legency-licenses; $PYTHON test-license.py >result.log 2>&1 )
check_file_content test-legency-licenses/result.log "Test old licenses"
check_file_content test-legency-licenses/result.log "r004"
check_file_content test-legency-licenses/result.log "${ifmac_address}"
csih_inform "Case 7.8: Generate license bind to other mac address"
$PYARMOR licenses --bind-mac="xx:yy:zz" r005 >result.log 2>&1
check_return_value
check_file_exists licenses/r005/license.lic
cp licenses/r005/license.lic test-legency-licenses/pytransform
( cd test-legency-licenses; $PYTHON test-license.py >result.log 2>&1 )
check_file_content test-legency-licenses/result.log "Test old licenses" not
check_file_content test-legency-licenses/result.log "r005" not
csih_inform "Case 7.9: Generate license bind to ip address"
$PYARMOR licenses --bind-ipv4="${ifip_address}" r006 >result.log 2>&1
check_return_value
check_file_exists licenses/r006/license.lic
cp licenses/r006/license.lic test-legency-licenses/pytransform
( cd test-legency-licenses; $PYTHON test-license.py >result.log 2>&1 )
check_file_content test-legency-licenses/result.log "Test old licenses"
check_file_content test-legency-licenses/result.log "r006"
check_file_content test-legency-licenses/result.log "${ifip_address}"
csih_inform "Case 7.10: Generate license bind to other ip address"
$PYARMOR licenses --bind-ipv4="xxx.yyy.zzz" r007 >result.log 2>&1
check_return_value
check_file_exists licenses/r007/license.lic
cp licenses/r007/license.lic test-legency-licenses/pytransform
( cd test-legency-licenses; $PYTHON test-license.py >result.log 2>&1 )
check_file_content test-legency-licenses/result.log "Test old licenses" not
check_file_content test-legency-licenses/result.log "r007" not
csih_inform "Case 7.11: Generate license bind to both mac and ip address"
$PYARMOR licenses --bind-mac="${ifmac_address}" \
--bind-ipv4="${ifip_address}" r008 >result.log 2>&1
check_return_value
check_file_exists licenses/r008/license.lic
cp licenses/r008/license.lic test-legency-licenses/pytransform
( cd test-legency-licenses; $PYTHON test-license.py >result.log 2>&1 )
check_file_content test-legency-licenses/result.log "Test old licenses"
check_file_content test-legency-licenses/result.log "r008"
check_file_content test-legency-licenses/result.log "${ifmac_address}"
csih_inform "Case 7.12: Generate license bind to other domain name"
$PYARMOR licenses --bind-domain="snsoffice.com" r009 >result.log 2>&1
check_return_value
check_file_exists licenses/r009/license.lic
cp licenses/r009/license.lic test-legency-licenses/pytransform
( cd test-legency-licenses; $PYTHON test-license.py >result.log 2>&1 )
check_file_content test-legency-licenses/result.log "Test old licenses" not
check_file_content test-legency-licenses/result.log "r009" not
if [[ ${UNAME:0:5} == Linux ]] ; then
csih_inform "Case 7.13: generate license bind to mac address with ifname"
$PYARMOR licenses --bind-mac="${ifname}/${ifmac_address}" r010 >result.log 2>&1
$PYARMOR licenses --bind-mac="eth1/${ifmac_address}" r011 >result.log 2>&1
check_return_value
check_file_exists licenses/r010/license.lic
check_file_exists licenses/r011/license.lic
cp licenses/r010/license.lic test-legency-licenses/pytransform
( cd test-legency-licenses; $PYTHON test-license.py >result.log 2>&1 )
check_file_content test-legency-licenses/result.log "Test old licenses"
check_file_content test-legency-licenses/result.log "r010"
check_file_content test-legency-licenses/result.log "${ifname}/${ifmac_address}"
cp licenses/r011/license.lic test-legency-licenses/pytransform
( cd test-legency-licenses; $PYTHON test-license.py >result.log 2>&1 )
check_file_content test-legency-licenses/result.log "Test old licenses" not
check_file_content test-legency-licenses/result.log "r011" not
fi
echo ""
echo "-------------------- Test Command licenses END -----------------"
echo ""
# ======================================================================
#
# Command: hdinfo
#
# ======================================================================
echo ""
echo "-------------------- Test Command hdinfo -----------------------"
echo ""
csih_inform "Case 8.1: show hardware info"
$PYARMOR hdinfo >result.log 2>&1
check_return_value
csih_inform "Case 8.2: get hardware info"
casepath=test-hardware-info
mkdir -p $casepath
cat <<EOF > $casepath/test_get_hd_info.py
import pytransform
from pytransform import pyarmor_init, get_hd_info
pytransform.plat_path = 'platforms'
pyarmor_init(path='.', is_runtime=1)
print(get_hd_info(0))
EOF
$PYARMOR runtime --no-package -O $casepath >result.log 2>&1
(cd $casepath; $PYTHON test_get_hd_info.py >result.log 2>&1)
check_file_content $casepath/result.log "${harddisk_sn}"
echo ""
echo "-------------------- Test Command hdinfo END -------------------"
echo ""
# ======================================================================
#
# Command: benchmark
#
# ======================================================================
echo ""
echo "-------------------- Test Command benchmark --------------------"
echo ""
csih_inform "Case 9.1: run benchmark test"
for obf_mod in 0 1 2 ; do
for obf_code in 0 1 2 ; do
for obf_wrap_mode in 0 1 ; do
csih_inform "obf_mod: $obf_mod, obf_code: $obf_code, wrap_mode: $obf_wrap_mode"
logfile="log_${obf_mod}_${obf_code}_${obf_wrap_mode}.log"
$PYARMOR benchmark --obf-mod ${obf_mod} --obf-code ${obf_code} \
--wrap-mode ${obf_wrap_mode} >$logfile 2>&1
check_return_value
csih_inform "Write benchmark test results to $logfile"
check_file_content $logfile "call_10000_obfuscated_10k_bytecode"
rm -rf .benchtest
done
done
done
echo ""
echo "-------------------- Test Command benchmark END ----------------"
echo ""
# ======================================================================
#
# Use Cases
#
# ======================================================================
echo ""
echo "-------------------- Test Use Cases ----------------------------"
echo ""
csih_inform "Case T-1.1: obfuscate module with project"
$PYARMOR init --src=examples/py2exe --entry=hello.py \
projects/testmod >result.log 2>&1
$PYARMOR config --manifest="include queens.py" --disable-restrict-mode=1 \
projects/testmod >result.log 2>&1
(cd projects/testmod; $ARMOR build >result.log 2>&1)
check_file_exists projects/testmod/dist/hello.py
check_file_content projects/testmod/dist/hello.py 'pyarmor_runtime'
check_file_exists projects/testmod/dist/queens.py
check_file_content projects/testmod/dist/queens.py '__pyarmor__(__name__'
(cd projects/testmod/dist; $PYTHON hello.py >result.log 2>&1 )
check_file_content projects/testmod/dist/result.log 'Found 92 solutions'
csih_inform "Case T-1.2: obfuscate module with wraparmor"
PROPATH=projects/testmod_wrap
$PYARMOR init --src=examples/testmod --entry=hello.py $PROPATH >result.log 2>&1
$PYARMOR config --manifest="include queens.py" --disable-restrict-mode=1 \
--wrap-mode=0 $PROPATH >result.log 2>&1
(cd $PROPATH; $ARMOR build >result.log 2>&1)
check_file_exists $PROPATH/dist/hello.py
check_file_content $PROPATH/dist/hello.py 'pyarmor_runtime'
check_file_exists $PROPATH/dist/queens.py
check_file_content $PROPATH/dist/queens.py '__pyarmor__(__name__'
(cd $PROPATH/dist; $PYTHON hello.py >result.log 2>&1 )
check_file_content $PROPATH/dist/result.log 'Found 92 solutions'
check_file_content $PROPATH/dist/result.log '__wraparmor__ can not be called out of decorator'
check_file_content $PROPATH/dist/result.log 'The value of __file__ is OK'
check_file_content $PROPATH/dist/result.log '<frozen queens>'
check_file_content $PROPATH/dist/result.log 'Found frame of function foo'
check_file_content $PROPATH/dist/result.log 'Can not get data from frame.f_locals'
check_file_content $PROPATH/dist/result.log 'Got empty from callback'
check_file_content $PROPATH/dist/result.log 'Generator works well'
check_file_content $PROPATH/dist/result.log 'Shared code object works well'
csih_inform "Case T-1.3: obfuscate module with auto-wrap mode"
PROPATH=projects/testmod_auto_wrap
$PYARMOR init --src=examples/py2exe --entry=queens.py $PROPATH >result.log 2>&1
$PYARMOR config --wrap-mode=1 --disable-restrict-mode=1 \
--manifest="include queens.py" $PROPATH >result.log 2>&1
(cd $PROPATH; $ARMOR build >result.log 2>&1)
check_file_exists $PROPATH/dist/queens.py
check_file_content $PROPATH/dist/queens.py 'pyarmor_runtime'
check_file_content $PROPATH/dist/queens.py '__pyarmor__(__name__'
(cd $PROPATH/dist; $PYTHON queens.py >result.log 2>&1 )
check_file_content $PROPATH/dist/result.log 'Found 92 solutions'
csih_inform "Case T-1.4: obfuscate package with auto-wrap mode"
PROPATH=projects/testpkg_auto_wrap
$PYARMOR init --src=examples/testpkg/mypkg \
--entry="__init__.py" $PROPATH >result.log 2>&1
(cd $PROPATH; $ARMOR build >result.log 2>&1)
check_file_exists $PROPATH/dist/mypkg/__init__.py
check_file_content $PROPATH/dist/mypkg/__init__.py '__pyarmor__(__name__'
cp examples/testpkg/main.py $PROPATH/dist
(cd $PROPATH/dist; $PYTHON main.py >result.log 2>&1 )
check_file_content $PROPATH/dist/result.log 'Hello! PyArmor Test Case'
csih_inform "Case T-1.5: obfuscate 2 independent packages"
output=dist-pkgs
$PYARMOR obfuscate -O $output/pkg1 examples/testpkg/mypkg/__init__.py >result.log 2>&1
$PYARMOR obfuscate -O $output/pkg2 examples/testpkg/mypkg/__init__.py >result.log 2>&1
check_file_exists $output/pkg1/__init__.py
check_file_exists $output/pkg2/__init__.py
cat <<EOF > $output/main.py
from pkg1 import foo as foo1
from pkg2 import foo as foo2
foo1.hello('pkg1')
foo2.hello('pkg2')
EOF
(cd $output; $PYTHON main.py >result.log 2>&1)
check_file_content $output/result.log "Hello! pkg1"
check_file_content $output/result.log "Hello! pkg2"
echo ""
echo "-------------------- Test Use Cases END ------------------------"
echo ""
# ======================================================================
#
# Finished and cleanup.
#
# ======================================================================
csih_inform "Clean pyarmor data"
rm -rf ~/.pyarmor ~/.pyarmor_capsule.*
[[ -n "$USERPROFILE" ]] && rm -rf "$USERPROFILE\\.pyarmor*"
# Return test root
cd ../..
echo "----------------------------------------------------------------"
echo ""
csih_inform "Test finished for ${PYTHON}"
(( ${_bug_counter} == 0 )) || csih_error "${_bug_counter} bugs found"
echo "" && \
csih_inform "Remove workpath ${workpath}" \
&& echo "" \
&& rm -rf ${workpath} \
&& csih_inform "Congratulations, there is no bug found"
|
/*
* Copyright (c) 2004-2021, University of Oslo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of the HISP project nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.hisp.dhis.android.core.enrollment;
import org.hisp.dhis.android.core.common.BaseIdentifiableObject;
import org.hisp.dhis.android.core.common.BaseObjectShould;
import org.hisp.dhis.android.core.common.Coordinates;
import org.hisp.dhis.android.core.common.FeatureType;
import org.hisp.dhis.android.core.common.ObjectShould;
import org.junit.Test;
import java.io.IOException;
import java.text.ParseException;
import static com.google.common.truth.Truth.assertThat;
public class EnrollmentShould extends BaseObjectShould implements ObjectShould {
public EnrollmentShould() {
super("enrollment/enrollment.json");
}
@Override
@Test
public void map_from_json_string() throws IOException, ParseException {
Enrollment enrollment = objectMapper.readValue(jsonStream, Enrollment.class);
assertThat(enrollment.created()).isEqualTo(
BaseIdentifiableObject.DATE_FORMAT.parse("2015-03-28T12:27:50.740"));
assertThat(enrollment.lastUpdated()).isEqualTo(
BaseIdentifiableObject.DATE_FORMAT.parse("2015-03-28T12:27:50.748"));
assertThat(enrollment.uid()).isEqualTo("BVJQIxoM2o4");
assertThat(enrollment.organisationUnit()).isEqualTo("Rp268JB6Ne4");
assertThat(enrollment.program()).isEqualTo("ur1Edk5Oe2n");
assertThat(enrollment.enrollmentDate()).isEqualTo(
BaseIdentifiableObject.DATE_FORMAT.parse("2014-08-07T12:27:50.730"));
assertThat(enrollment.incidentDate()).isEqualTo(
BaseIdentifiableObject.DATE_FORMAT.parse("2014-07-21T12:27:50.730"));
assertThat(enrollment.completedDate()).isEqualTo(
BaseIdentifiableObject.DATE_FORMAT.parse("2014-08-21T12:27:50.730"));
assertThat(enrollment.followUp()).isFalse();
assertThat(enrollment.status()).isEqualTo(EnrollmentStatus.COMPLETED);
assertThat(enrollment.coordinate()).isEqualTo(Coordinates.create(10.03, 11.11));
assertThat(enrollment.trackedEntityInstance()).isEqualTo("D2dUWKQErfQ");
assertThat(enrollment.geometry().type()).isEqualTo(FeatureType.POINT);
assertThat(enrollment.geometry().coordinates()).isEqualTo("[11.11, 10.03]");
assertThat(enrollment.deleted()).isFalse();
assertThat(enrollment.notes().get(0).uid()).isEqualTo("enrollmentNote1");
assertThat(enrollment.notes().get(1).uid()).isEqualTo("enrollmentNote2");
assertThat(enrollment.relationships().get(0).uid()).isEqualTo("hm6qYjPfnzn");
assertThat(enrollment.relationships().get(0).from().enrollment().enrollment()).isEqualTo("BVJQIxoM2o4");
}
}
|
<reponame>pradeep-gr/mbed-os5-onsemi
/* mbed Microcontroller Library
* Copyright (c) 2006-2013 ARM Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "can_api.h"
#include "cmsis.h"
#include "mbed_error.h"
#include <math.h>
#include <string.h>
/* Handy defines */
#define RX_MSG_OBJ_COUNT 31
#define TX_MSG_OBJ_COUNT 1
#define DLC_MAX 8
#define ID_STD_MASK 0x07FF
#define ID_EXT_MASK 0x1FFFFFFF
#define DLC_MASK 0x0F
#define CANIFn_ARB2_DIR (1UL << 13)
#define CANIFn_ARB2_XTD (1UL << 14)
#define CANIFn_ARB2_MSGVAL (1UL << 15)
#define CANIFn_MSK2_MXTD (1UL << 15)
#define CANIFn_MSK2_MDIR (1UL << 14)
#define CANIFn_MCTRL_EOB (1UL << 7)
#define CANIFn_MCTRL_TXRQST (1UL << 8)
#define CANIFn_MCTRL_RMTEN (1UL << 9)
#define CANIFn_MCTRL_RXIE (1UL << 10)
#define CANIFn_MCTRL_TXIE (1UL << 11)
#define CANIFn_MCTRL_UMASK (1UL << 12)
#define CANIFn_MCTRL_INTPND (1UL << 13)
#define CANIFn_MCTRL_MSGLST (1UL << 14)
#define CANIFn_MCTRL_NEWDAT (1UL << 15)
#define CANIFn_CMDMSK_DATA_B (1UL << 0)
#define CANIFn_CMDMSK_DATA_A (1UL << 1)
#define CANIFn_CMDMSK_TXRQST (1UL << 2)
#define CANIFn_CMDMSK_NEWDAT (1UL << 2)
#define CANIFn_CMDMSK_CLRINTPND (1UL << 3)
#define CANIFn_CMDMSK_CTRL (1UL << 4)
#define CANIFn_CMDMSK_ARB (1UL << 5)
#define CANIFn_CMDMSK_MASK (1UL << 6)
#define CANIFn_CMDMSK_WR (1UL << 7)
#define CANIFn_CMDMSK_RD (0UL << 7)
#define CANIFn_CMDREQ_BUSY (1UL << 15)
#define CANSTAT_TXOK (1 << 3) // Transmitted a message successfully This bit must be reset by the CPU. It is never reset by the CAN controller.
#define CANSTAT_RXOK (1 << 4) // Received a message successfully This bit must be reset by the CPU. It is never reset by the CAN controller.
#define CANSTAT_EPASS (1 << 5) // Error passive
#define CANSTAT_EWARN (1 << 6) // Warning status
#define CANSTAT_BOFF (1 << 7) // Busoff status
#define CANCNTL_INIT (1 << 0) // Initialization
#define CANCNTL_IE (1 << 1) // Module interrupt enable
#define CANCNTL_SIE (1 << 2) // Status change interrupt enable
#define CANCNTL_EIE (1 << 3) // Error interrupt enable
#define CANCNTL_DAR (1 << 5) // Disable automatic retransmission
#define CANCNTL_CCE (1 << 6) // Configuration change enable
#define CANCNTL_TEST (1 << 7) // Test mode enable
#define CANTEST_BASIC (1 << 2) // Basic mode
#define CANTEST_SILENT (1 << 3) // Silent mode
#define CANTEST_LBACK (1 << 4) // Loop back mode
#define CANTEST_TX_MASK 0x0060 // Control of CAN_TXD pins
#define CANTEST_TX_SHIFT 5
#define CANTEST_RX (1 << 7) // Monitors the actual value of the CAN_RXD pin.
static uint32_t can_irq_id = 0;
static can_irq_handler irq_handler;
#define IRQ_ENABLE_TX (1 << 0)
#define IRQ_ENABLE_RX (1 << 1)
#define IRQ_ENABLE_EW (1 << 2)
#define IRQ_ENABLE_EP (1 << 3)
#define IRQ_ENABLE_BE (1 << 4)
#define IRQ_ENABLE_STATUS (IRQ_ENABLE_TX | IRQ_ENABLE_RX)
#define IRQ_ENABLE_ERROR (IRQ_ENABLE_EW | IRQ_ENABLE_EP | IRQ_ENABLE_BE)
#define IRQ_ENABLE_ANY (IRQ_ENABLE_STATUS | IRQ_ENABLE_ERROR)
static uint32_t enabled_irqs = 0;
static inline void can_disable(can_t *obj) {
LPC_C_CAN0->CANCNTL |= 0x1;
}
static inline void can_enable(can_t *obj) {
if (LPC_C_CAN0->CANCNTL & 0x1) {
LPC_C_CAN0->CANCNTL &= ~(0x1);
}
}
int can_mode(can_t *obj, CanMode mode) {
int success = 0;
switch (mode) {
case MODE_RESET:
LPC_C_CAN0->CANCNTL &=~CANCNTL_TEST;
can_disable(obj);
success = 1;
break;
case MODE_NORMAL:
LPC_C_CAN0->CANCNTL &=~CANCNTL_TEST;
can_enable(obj);
success = 1;
break;
case MODE_SILENT:
LPC_C_CAN0->CANCNTL |= CANCNTL_TEST;
LPC_C_CAN0->CANTEST |= CANTEST_SILENT;
LPC_C_CAN0->CANTEST &=~ CANTEST_LBACK;
success = 1;
break;
case MODE_TEST_LOCAL:
LPC_C_CAN0->CANCNTL |= CANCNTL_TEST;
LPC_C_CAN0->CANTEST &=~CANTEST_SILENT;
LPC_C_CAN0->CANTEST |= CANTEST_LBACK;
success = 1;
break;
case MODE_TEST_SILENT:
LPC_C_CAN0->CANCNTL |= CANCNTL_TEST;
LPC_C_CAN0->CANTEST |= (CANTEST_LBACK | CANTEST_SILENT);
success = 1;
break;
case MODE_TEST_GLOBAL:
default:
success = 0;
break;
}
return success;
}
int can_filter(can_t *obj, uint32_t id, uint32_t mask, CANFormat format, int32_t handle) {
uint16_t i;
// Find first free message object
if (handle == 0) {
uint32_t msgval = LPC_C_CAN0->CANMSGV1 | (LPC_C_CAN0->CANMSGV2 << 16);
// Find first free messagebox
for (i = 0; i < 32; i++) {
if ((msgval & (1 << i)) == 0) {
handle = i+1;
break;
}
}
}
if (handle > 0 && handle <= 32) {
if (format == CANExtended) {
// Mark message valid, Direction = TX, Extended Frame, Set Identifier and mask everything
LPC_C_CAN0->CANIF1_ARB1 = (id & 0xFFFF);
LPC_C_CAN0->CANIF1_ARB2 = CANIFn_ARB2_MSGVAL | CANIFn_ARB2_XTD | ((id >> 16) & 0x1FFF);
LPC_C_CAN0->CANIF1_MSK1 = (mask & 0xFFFF);
LPC_C_CAN0->CANIF1_MSK2 = CANIFn_MSK2_MXTD /*| CANIFn_MSK2_MDIR*/ | ((mask >> 16) & 0x1FFF);
} else {
// Mark message valid, Direction = TX, Set Identifier and mask everything
LPC_C_CAN0->CANIF1_ARB2 = CANIFn_ARB2_MSGVAL | ((id << 2) & 0x1FFF);
LPC_C_CAN0->CANIF1_MSK2 = /*CANIFn_MSK2_MDIR |*/ ((mask << 2) & 0x1FFF);
}
// Use mask, single message object and set DLC
LPC_C_CAN0->CANIF1_MCTRL = CANIFn_MCTRL_UMASK | CANIFn_MCTRL_EOB | (DLC_MAX & 0xF);
// Transfer all fields to message object
LPC_C_CAN0->CANIF1_CMDMSK_W = CANIFn_CMDMSK_WR | CANIFn_CMDMSK_MASK | CANIFn_CMDMSK_ARB | CANIFn_CMDMSK_CTRL;
// Start Transfer to given message number
LPC_C_CAN0->CANIF1_CMDREQ = (handle & 0x3F);
// Wait until transfer to message ram complete - TODO: maybe not block??
while ( LPC_C_CAN0->CANIF1_CMDREQ & CANIFn_CMDREQ_BUSY );
}
return handle;
}
static inline void can_irq() {
uint32_t intid = LPC_C_CAN0->CANINT & 0xFFFF;
if (intid == 0x8000) {
uint32_t status = LPC_C_CAN0->CANSTAT;
// Note that since it's impossible to tell which specific status caused
// the interrupt to fire, this just fires them all.
// In particular, EWARN is not mutually exclusive with the others and
// may fire multiple times with other status transitions, including
// transmit and receive completion (if enabled). Ignoring EWARN with a
// priority system (i.e. blocking EWARN interrupts if EPASS or BOFF is
// set) may discard some EWARN interrupts.
if (status & CANSTAT_BOFF) {
if (enabled_irqs & IRQ_ENABLE_BE) {
irq_handler(can_irq_id, IRQ_BUS);
}
}
if (status & CANSTAT_EPASS) {
if (enabled_irqs & IRQ_ENABLE_EP) {
irq_handler(can_irq_id, IRQ_PASSIVE);
}
}
if (status & CANSTAT_EWARN) {
if (enabled_irqs & IRQ_ENABLE_EW) {
irq_handler(can_irq_id, IRQ_ERROR);
}
}
if ((status & CANSTAT_RXOK) != 0) {
LPC_C_CAN0->CANSTAT &= ~CANSTAT_RXOK;
irq_handler(can_irq_id, IRQ_RX);
}
if ((status & CANSTAT_TXOK) != 0) {
LPC_C_CAN0->CANSTAT &= ~CANSTAT_TXOK;
irq_handler(can_irq_id, IRQ_TX);
}
}
}
// Register CAN object's irq handler
void can_irq_init(can_t *obj, can_irq_handler handler, uint32_t id) {
irq_handler = handler;
can_irq_id = id;
}
// Unregister CAN object's irq handler
void can_irq_free(can_t *obj) {
LPC_C_CAN0->CANCNTL &= ~(1UL << 1); // Disable Interrupts :)
can_irq_id = 0;
NVIC_DisableIRQ(C_CAN0_IRQn);
}
// Clear or set a irq
void can_irq_set(can_t *obj, CanIrqType type, uint32_t enable) {
uint32_t mask_enable;
switch (type) {
case IRQ_RX:
mask_enable = IRQ_ENABLE_RX;
break;
case IRQ_TX:
mask_enable = IRQ_ENABLE_TX;
break;
case IRQ_BUS:
mask_enable = IRQ_ENABLE_BE;
break;
case IRQ_PASSIVE:
mask_enable = IRQ_ENABLE_EP;
break;
case IRQ_ERROR:
mask_enable = IRQ_ENABLE_EW;
break;
default:
return;
}
if (enable) {
enabled_irqs = enabled_irqs | mask_enable;
} else {
enabled_irqs = enabled_irqs & ~mask_enable;
}
// Put CAN in Reset Mode and enable interrupt
can_disable(obj);
if (!(enabled_irqs & IRQ_ENABLE_ANY)) {
LPC_C_CAN0->CANCNTL &= ~(1UL << 1 | 1UL << 2 | 1UL << 3);
} else {
LPC_C_CAN0->CANCNTL |= 1UL << 1;
// Use status interrupts instead of message interrupts to avoid
// stomping over potential filter configurations.
if (enabled_irqs & IRQ_ENABLE_STATUS) {
LPC_C_CAN0->CANCNTL |= 1UL << 2;
} else {
LPC_C_CAN0->CANCNTL &= ~(1UL << 2);
}
if (enabled_irqs & IRQ_ENABLE_ERROR) {
LPC_C_CAN0->CANCNTL |= 1UL << 3;
} else {
LPC_C_CAN0->CANCNTL &= ~(1UL << 3);
}
}
// Take it out of reset...
can_enable(obj);
// Enable NVIC if at least 1 interrupt is active
NVIC_SetVector(C_CAN0_IRQn, (uint32_t) &can_irq);
NVIC_EnableIRQ(C_CAN0_IRQn);
}
// This table has the sampling points as close to 75% as possible. The first
// value is TSEG1, the second TSEG2.
static const int timing_pts[23][2] = {
{0x0, 0x0}, // 2, 50%
{0x1, 0x0}, // 3, 67%
{0x2, 0x0}, // 4, 75%
{0x3, 0x0}, // 5, 80%
{0x3, 0x1}, // 6, 67%
{0x4, 0x1}, // 7, 71%
{0x5, 0x1}, // 8, 75%
{0x6, 0x1}, // 9, 78%
{0x6, 0x2}, // 10, 70%
{0x7, 0x2}, // 11, 73%
{0x8, 0x2}, // 12, 75%
{0x9, 0x2}, // 13, 77%
{0x9, 0x3}, // 14, 71%
{0xA, 0x3}, // 15, 73%
{0xB, 0x3}, // 16, 75%
{0xC, 0x3}, // 17, 76%
{0xD, 0x3}, // 18, 78%
{0xD, 0x4}, // 19, 74%
{0xE, 0x4}, // 20, 75%
{0xF, 0x4}, // 21, 76%
{0xF, 0x5}, // 22, 73%
{0xF, 0x6}, // 23, 70%
{0xF, 0x7}, // 24, 67%
};
static unsigned int can_speed(unsigned int sclk, unsigned int cclk, unsigned char psjw) {
uint32_t btr;
uint32_t clkdiv = 1;
uint16_t brp = 0;
uint32_t calcbit;
uint32_t bitwidth;
int hit = 0;
int bits = 0;
bitwidth = sclk / cclk;
brp = bitwidth / 0x18;
while ((!hit) && (brp < bitwidth / 4)) {
brp++;
for (bits = 22; bits > 0; bits--) {
calcbit = (bits + 3) * (brp + 1);
if (calcbit == bitwidth) {
hit = 1;
break;
}
}
}
clkdiv = clkdiv - 1;
if (hit) {
btr = (timing_pts[bits][1] & 0x7) << 12
| (timing_pts[bits][0] & 0xf) << 8
| (psjw & 0x3) << 6
| (brp & 0x3F);
btr = btr | (clkdiv << 16);
} else {
btr = 0;
}
return btr;
}
int can_config_rxmsgobj(can_t *obj) {
uint16_t i = 0;
// Make sure the interface is available
while ( LPC_C_CAN0->CANIF1_CMDREQ & CANIFn_CMDREQ_BUSY );
// Mark message valid, Direction = RX, Don't care about anything else
LPC_C_CAN0->CANIF1_ARB1 = 0;
LPC_C_CAN0->CANIF1_ARB2 = 0;
LPC_C_CAN0->CANIF1_MCTRL = 0;
for ( i = 1; i <= RX_MSG_OBJ_COUNT; i++ ) {
// Transfer arb and control fields to message object
LPC_C_CAN0->CANIF1_CMDMSK_W = CANIFn_CMDMSK_WR | CANIFn_CMDMSK_ARB | CANIFn_CMDMSK_CTRL;
// Start Transfer to given message number
LPC_C_CAN0->CANIF1_CMDREQ = (i & 0x3F);
// Wait until transfer to message ram complete - TODO: maybe not block??
while ( LPC_C_CAN0->CANIF1_CMDREQ & CANIFn_CMDREQ_BUSY );
}
// Accept all messages
can_filter(obj, 0, 0, CANStandard, 1);
return 1;
}
int can_config_txmsgobj(can_t *obj) {
uint16_t i = 0;
// Make sure the interface is available
while ( LPC_C_CAN0->CANIF1_CMDREQ & CANIFn_CMDREQ_BUSY );
// Mark message valid, Direction = TX, Don't care about anything else
LPC_C_CAN0->CANIF1_ARB1 = 0;
LPC_C_CAN0->CANIF1_ARB2 = CANIFn_ARB2_DIR;
LPC_C_CAN0->CANIF1_MCTRL = 0;
for ( i = RX_MSG_OBJ_COUNT + 1; i <= (TX_MSG_OBJ_COUNT + RX_MSG_OBJ_COUNT); i++ )
{
// Transfer arb and control fields to message object
LPC_C_CAN0->CANIF1_CMDMSK_W = CANIFn_CMDMSK_WR | CANIFn_CMDMSK_ARB | CANIFn_CMDMSK_CTRL;
// In a union with CANIF1_CMDMSK_R
// Start Transfer to given message number
LPC_C_CAN0->CANIF1_CMDREQ = i & 0x3F;
// Wait until transfer to message ram complete - TODO: maybe not block??
while( LPC_C_CAN0->CANIF1_CMDREQ & CANIFn_CMDREQ_BUSY );
}
return 1;
}
void can_init_freq(can_t *obj, PinName rd, PinName td, int hz) {
// Enable power and clock
LPC_SYSCON->SYSAHBCLKCTRL1 |= (1UL << 7);
LPC_SYSCON->PRESETCTRL1 |= (1UL << 7);
LPC_SYSCON->PRESETCTRL1 &= ~(1UL << 7);
// Enable Initialization mode
if (!(LPC_C_CAN0->CANCNTL & (1UL << 0))) {
LPC_C_CAN0->CANCNTL |= (1UL << 0);
}
LPC_SWM->PINASSIGN[6] &= ~(0x00FFFF00L);
LPC_SWM->PINASSIGN[6] |= (rd << 16) | (td << 8);
can_frequency(obj, hz);
// Resume operation
LPC_C_CAN0->CANCNTL &= ~(1UL << 0);
while ( LPC_C_CAN0->CANCNTL & (1UL << 0) );
// Initialize RX message object
can_config_rxmsgobj(obj);
// Initialize TX message object
can_config_txmsgobj(obj);
}
void can_init(can_t *obj, PinName rd, PinName td) {
can_init_freq(obj, rd, td, 100000);
}
void can_free(can_t *obj) {
LPC_SYSCON->SYSAHBCLKCTRL1 &= ~(1UL << 7);
LPC_SYSCON->PRESETCTRL1 &= ~(1UL << 7);
}
int can_frequency(can_t *obj, int f) {
int btr = can_speed(SystemCoreClock, (unsigned int)f, 1);
int clkdiv = (btr >> 16) & 0x0F;
btr = btr & 0xFFFF;
if (btr > 0) {
// Set the bit clock
LPC_C_CAN0->CANCNTL |= (1UL << 6 | 1UL << 0); // set CCE and INIT
LPC_C_CAN0->CANCLKDIV = clkdiv;
LPC_C_CAN0->CANBT = btr;
LPC_C_CAN0->CANBRPE = 0x0000;
LPC_C_CAN0->CANCNTL &= ~(1UL << 6 | 1UL << 0); // clear CCE and INIT
return 1;
}
return 0;
}
int can_write(can_t *obj, CAN_Message msg, int cc) {
// Make sure controller is enabled
can_enable(obj);
// Find first message object that isn't pending to send
uint16_t msgnum = 0;
uint32_t txPending = (LPC_C_CAN0->CANTXREQ1 & 0xFF) | (LPC_C_CAN0->CANTXREQ2 << 16);
uint16_t i = 0;
for(i = RX_MSG_OBJ_COUNT; i < 32; i++) {
if ((txPending & (1 << i)) == 0) {
msgnum = i+1;
break;
}
}
// If no messageboxes are available, stop and return failure
if (msgnum == 0) {
return 0;
}
// Make sure the interface is available
while ( LPC_C_CAN0->CANIF1_CMDREQ & CANIFn_CMDREQ_BUSY );
// Set the direction bit based on the message type
uint32_t direction = 0;
if (msg.type == CANData) {
direction = CANIFn_ARB2_DIR;
}
if (msg.format == CANExtended) {
// Mark message valid, Extended Frame, Set Identifier and mask everything
LPC_C_CAN0->CANIF1_ARB1 = (msg.id & 0xFFFF);
LPC_C_CAN0->CANIF1_ARB2 = CANIFn_ARB2_MSGVAL | CANIFn_ARB2_XTD | direction | ((msg.id >> 16) & 0x1FFFF);
LPC_C_CAN0->CANIF1_MSK1 = (ID_EXT_MASK & 0xFFFF);
LPC_C_CAN0->CANIF1_MSK2 = CANIFn_MSK2_MXTD | CANIFn_MSK2_MDIR | ((ID_EXT_MASK >> 16) & 0x1FFF);
} else {
// Mark message valid, Set Identifier and mask everything
LPC_C_CAN0->CANIF1_ARB2 = CANIFn_ARB2_MSGVAL | direction | ((msg.id << 2) & 0x1FFF);
LPC_C_CAN0->CANIF1_MSK2 = CANIFn_MSK2_MDIR | ((ID_STD_MASK << 2) & 0x1FFF);
}
// Use mask, request transmission, single message object and set DLC
LPC_C_CAN0->CANIF1_MCTRL = CANIFn_MCTRL_UMASK | CANIFn_MCTRL_TXRQST | CANIFn_MCTRL_EOB | (msg.len & 0xF);
LPC_C_CAN0->CANIF1_DA1 = ((msg.data[1] & 0xFF) << 8) | (msg.data[0] & 0xFF);
LPC_C_CAN0->CANIF1_DA2 = ((msg.data[3] & 0xFF) << 8) | (msg.data[2] & 0xFF);
LPC_C_CAN0->CANIF1_DB1 = ((msg.data[5] & 0xFF) << 8) | (msg.data[4] & 0xFF);
LPC_C_CAN0->CANIF1_DB2 = ((msg.data[7] & 0xFF) << 8) | (msg.data[6] & 0xFF);
// Transfer all fields to message object
LPC_C_CAN0->CANIF1_CMDMSK_W = CANIFn_CMDMSK_WR | CANIFn_CMDMSK_MASK | CANIFn_CMDMSK_ARB | CANIFn_CMDMSK_CTRL | CANIFn_CMDMSK_TXRQST | CANIFn_CMDMSK_DATA_A | CANIFn_CMDMSK_DATA_B;
// Start Transfer to given message number
LPC_C_CAN0->CANIF1_CMDREQ = (msgnum & 0x3F);
// Wait until transfer to message ram complete - TODO: maybe not block??
while ( LPC_C_CAN0->CANIF1_CMDREQ & CANIFn_CMDREQ_BUSY);
// Wait until TXOK is set, then clear it - TODO: maybe not block
//while ( !(LPC_C_CAN0->STAT & CANSTAT_TXOK) );
LPC_C_CAN0->CANSTAT &= ~(1UL << 3);
return 1;
}
int can_read(can_t *obj, CAN_Message *msg, int handle) {
uint16_t i;
// Make sure controller is enabled
can_enable(obj);
// Find first message object with new data
if (handle == 0) {
uint32_t newdata = LPC_C_CAN0->CANND1 | (LPC_C_CAN0->CANND2 << 16);
// Find first free messagebox
for (i = 0; i < RX_MSG_OBJ_COUNT; i++) {
if (newdata & (1 << i)) {
handle = i+1;
break;
}
}
}
if (handle > 0 && handle <= 32) {
// Wait until message interface is free
while ( LPC_C_CAN0->CANIF2_CMDREQ & CANIFn_CMDREQ_BUSY );
// Transfer all fields to message object
LPC_C_CAN0->CANIF2_CMDMSK_W = CANIFn_CMDMSK_RD | CANIFn_CMDMSK_MASK | CANIFn_CMDMSK_ARB | CANIFn_CMDMSK_CTRL | CANIFn_CMDMSK_CLRINTPND | CANIFn_CMDMSK_TXRQST | CANIFn_CMDMSK_DATA_A | CANIFn_CMDMSK_DATA_B;
// Start Transfer from given message number
LPC_C_CAN0->CANIF2_CMDREQ = (handle & 0x3F);
// Wait until transfer to message ram complete
while ( LPC_C_CAN0->CANIF2_CMDREQ & CANIFn_CMDREQ_BUSY );
if (LPC_C_CAN0->CANIF2_ARB2 & CANIFn_ARB2_XTD) {
msg->format = CANExtended;
msg->id = (LPC_C_CAN0->CANIF2_ARB1 & 0x1FFF) << 16;
msg->id |= (LPC_C_CAN0->CANIF2_ARB2 & 0x1FFF);
} else {
msg->format = CANStandard;
msg->id = (LPC_C_CAN0->CANIF2_ARB2 & 0x1FFF) >> 2;
}
if (LPC_C_CAN0->CANIF2_ARB2 & CANIFn_ARB2_DIR) {
msg->type = CANRemote;
}
else {
msg->type = CANData;
}
msg->len = (LPC_C_CAN0->CANIF2_MCTRL & 0xF); // TODO: If > 8, len = 8
msg->data[0] = ((LPC_C_CAN0->CANIF2_DA1 >> 0) & 0xFF);
msg->data[1] = ((LPC_C_CAN0->CANIF2_DA1 >> 8) & 0xFF);
msg->data[2] = ((LPC_C_CAN0->CANIF2_DA2 >> 0) & 0xFF);
msg->data[3] = ((LPC_C_CAN0->CANIF2_DA2 >> 8) & 0xFF);
msg->data[4] = ((LPC_C_CAN0->CANIF2_DB1 >> 0) & 0xFF);
msg->data[5] = ((LPC_C_CAN0->CANIF2_DB1 >> 8) & 0xFF);
msg->data[6] = ((LPC_C_CAN0->CANIF2_DB2 >> 0) & 0xFF);
msg->data[7] = ((LPC_C_CAN0->CANIF2_DB2 >> 8) & 0xFF);
LPC_C_CAN0->CANSTAT &= ~(1UL << 4);
return 1;
}
return 0;
}
void can_reset(can_t *obj) {
LPC_SYSCON->PRESETCTRL1 &= ~(1UL << 7);
LPC_C_CAN0->CANSTAT = 0;
can_config_rxmsgobj(obj);
can_config_txmsgobj(obj);
can_enable(obj); // clears a bus-off condition if necessary
}
unsigned char can_rderror(can_t *obj) {
return ((LPC_C_CAN0->CANEC >> 8) & 0x7F);
}
unsigned char can_tderror(can_t *obj) {
return (LPC_C_CAN0->CANEC & 0xFF);
}
void can_monitor(can_t *obj, int silent) {
if (silent) {
LPC_C_CAN0->CANCNTL |= (1UL << 7);
LPC_C_CAN0->CANTEST |= (1UL << 3);
} else {
LPC_C_CAN0->CANCNTL &= ~(1UL << 7);
LPC_C_CAN0->CANTEST &= ~(1UL << 3);
}
if (!(LPC_C_CAN0->CANCNTL & (1UL << 0))) {
LPC_C_CAN0->CANCNTL |= (1UL << 0);
}
}
|
<filename>dashboard/tasks.py
import dramatiq
from periodiq import PeriodiqMiddleware, cron
from dramatiq import get_broker
from .models import Job
from .models import Rpc
broker = get_broker()
broker.add_middleware(PeriodiqMiddleware(skip_delay=30))
@dramatiq.actor
def process_job(job_id):
job = Job.objects.get(pk=job_id)
job.process()
job.status = Job.STATUS_DONE
job.save()
@dramatiq.actor
def process_rpc(rpc_id):
rpc = Rpc.objects.get(pk=rpc_id)
# Call the dispatcher that will eventually send the request to the specified rpc function.
rpc.process()
rpc.status = Job.STATUS_DONE
rpc.save()
@dramatiq.actor(periodic=cron('* * * * *'))
def scheduled():
'''Example function that is configured to run at a regular interval.
Must run scheduler from shell.
e.g. $ python manage.py runperiodiq -v2
Help specifying cron time use https://crontab.guru/
'''
print(f'Testing scheduled tasks') |
function main() {
// detect legacy browsers
var legacy = detectLegacy()
// if "sorry-force-legacy" is in the hash, force legacy
if (/^#?sorry-force-legacy/i.test(window.location.hash))
legacy = true
// on legacy systems, create and insert the sorry popup
if (legacy) {
var popup = createSorryPopup()
document.body.insertBefore(popup, document.body.firstChild)
}
}
|
<reponame>io-m/wedge-api
package main
import (
"context"
"fmt"
"log"
"math/rand"
"strconv"
"time"
"github.com/Wappsto/wedge-api/go/slx"
"github.com/Wappsto/wedge-api/go/wedge"
"github.com/google/uuid"
"google.golang.org/grpc"
)
var (
nodeServer = &wedge.NodeIdentity{
Id: uuid.New().String(),
}
number = &slx.Number{}
state = []*slx.State{}
device = []*slx.Device{}
// value = []slx.Value{}
)
func main() {
// ================================================
// Defining model struct -> with inner embedded structs
state = []*slx.State{
{
Data: "25", //string `json:"data"` -> we can generate pseudo.random number with rand.Intn()
Type: slx.Type_Report, //string `json:"type"`
Id: 2,
},
}
number = &slx.Number{
Min: 0.01, //int `json:"min"`
Max: 100, //int `json:"max"`
Step: 1, //int `json:"step"`
Unit: "Celsius", // string `json:"unit"`
}
value := []*slx.Value{
{
Name: "temperature", //string `json:"name"`
Type: "", //string `json:"type"`
Status: "OK", //string `json:"status"`
Permission: "RW", //string `json:"permission"
Number: number,
State: state,
Id: 1,
},
}
device = []*slx.Device{
{
Name: "Temperature-Sensor", //string `json:"name"`
Manufacturer: "Mitsumi", //string `json:"manufacturer"`
Product: "MM3286", //string `json:"product"`
Serial: "bla-bla-bla", //string `json:"serial"`
Description: "Reliable Japanese temp sensor", //string `json:"description"`
Protocol: "PROFINET/ETHERNET-TCP/IP", //string `json:"protocol"`
Communication: "always", //string `json:"communication"`
Id: 1,
Value: value,
},
}
// ================================================================================
// Making node client -> calling methods from wedge server (setDevice, setModel etc...)
connection, err := grpc.Dial("localhost:50051", grpc.WithInsecure())
if err != nil {
log.Fatalf("Failed to establish connection ")
}
defer connection.Close()
Model := &wedge.Model{
Node: nodeServer,
Device: device,
}
wedgeClient := wedge.NewWedgeClient(connection)
// modelRequest is request for SetModel method
var modelRequest = wedge.SetModelRequest{}
modelRequest = wedge.SetModelRequest{
Model: Model,
}
// SetModel returns slx.Reply{} as response, from wedgeServer
response, err := wedgeClient.SetModel(context.Background(), &modelRequest)
if err != nil {
log.Fatal(err)
}
log.Printf("Response about model received from wedge server: %v\n", response)
fmt.Println("==============================================")
// var updatedData int
// var wg *sync.WaitGroup
for {
rand.Seed(time.Now().Unix())
updatedData := rand.Intn(101)
fmt.Printf("New temperature is %d Celsius\n", updatedData)
setStateRequest := wedge.SetStateRequest{
Node: nodeServer,
DeviceId: 1,
ValueId: 1,
State: &slx.State{
Data: strconv.Itoa(updatedData),
Id: 2,
},
}
response, err := wedgeClient.SetState(context.Background(), &setStateRequest)
if err != nil {
log.Fatal(err)
}
log.Printf("Response about state received from wedge server: %v\n", response)
fmt.Println("==============================================")
time.Sleep(2 * time.Second)
// wg.Add(1)
// go func(sr *wedge.SetStateRequest) {
// response, err := wedgeClient.SetState(context.Background(), sr)
// if err != nil {
// log.Fatal(err)
// }
// log.Printf("Response about state received from wedge server: %v\n", response)
// fmt.Println("==============================================")
// wg.Done()
// }(&setStateRequest)
// wg.Wait()
}
}
|
<filename>src/shared/helpers/converters/stockchart.ts
import moment from 'moment';
import { IChartItem, IServerCandle, ITVChartCandle } from '../../types/models';
function convertCandleDate(date: string) {
return moment.utc(date).valueOf();
}
export function convertChartHistory(data: IServerCandle[]): IChartItem[] {
return data.map(convertChartTick);
}
export function convertChartTick(tick: IServerCandle): IChartItem {
return {
open: tick.open,
close: tick.close,
high: tick.high,
low: tick.low,
volume: tick.volume,
ts: convertCandleDate(tick.start),
};
}
export function convertTVChartHistory(data: IServerCandle[]): ITVChartCandle[] {
return data.map(convertTVChartTick);
}
export function convertTVChartTick(tick: IServerCandle): ITVChartCandle {
return {
open: tick.open,
close: tick.close,
high: tick.high,
low: tick.low,
volume: tick.volume,
time: convertCandleDate(tick.start),
};
}
|
<reponame>SathishRamasubbu/Local----Zipkin
/**
* Copyright 2015-2018 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package zipkin.storage.elasticsearch.http;
import java.io.IOException;
import java.util.List;
import okhttp3.OkHttpClient;
import zipkin.internal.Nullable;
import zipkin.internal.V2StorageComponent;
import zipkin.storage.AsyncSpanStore;
import zipkin2.CheckResult;
import zipkin2.elasticsearch.ElasticsearchStorage;
import zipkin2.storage.SpanConsumer;
import zipkin2.storage.SpanStore;
import zipkin2.storage.StorageComponent;
public final class ElasticsearchHttpStorage extends StorageComponent
implements V2StorageComponent.LegacySpanStoreProvider {
/** @see ElasticsearchStorage.HostsSupplier */
public interface HostsSupplier extends ElasticsearchStorage.HostsSupplier {
}
public static Builder builder(OkHttpClient client) {
return new Builder(ElasticsearchStorage.newBuilder(client)).legacyReadsEnabled(true);
}
public static Builder builder() {
return new Builder(ElasticsearchStorage.newBuilder()).legacyReadsEnabled(true);
}
public final Builder toBuilder() {
return new Builder(ElasticsearchStorage.newBuilder()).legacyReadsEnabled(true);
}
public static final class Builder extends StorageComponent.Builder {
final ElasticsearchStorage.Builder delegate;
boolean legacyReadsEnabled, searchEnabled;
Builder(ElasticsearchStorage.Builder delegate) {
this.delegate = delegate;
}
/** @see ElasticsearchStorage.Builder#hosts(List) */
public final Builder hosts(final List<String> hosts) {
delegate.hosts(hosts);
return this;
}
/** @see ElasticsearchStorage.Builder#hostsSupplier(ElasticsearchStorage.HostsSupplier) */
public final Builder hostsSupplier(ElasticsearchStorage.HostsSupplier hosts) {
delegate.hostsSupplier(hosts);
return this;
}
/** @see ElasticsearchStorage.Builder#maxRequests(int) */
public final Builder maxRequests(int maxRequests) {
delegate.maxRequests(maxRequests);
return this;
}
/** @see ElasticsearchStorage.Builder#pipeline(String) */
public final Builder pipeline(String pipeline) {
delegate.pipeline(pipeline);
return this;
}
/** @see ElasticsearchStorage.Builder#namesLookback(int) */
public final Builder namesLookback(int namesLookback) {
delegate.namesLookback(namesLookback);
return this;
}
/** When true, Redundantly queries indexes made with pre v1.31 collectors. Defaults to true. */
public final Builder legacyReadsEnabled(boolean legacyReadsEnabled) {
this.legacyReadsEnabled = legacyReadsEnabled;
return this;
}
/** Visible for testing */
public final Builder flushOnWrites(boolean flushOnWrites) {
delegate.flushOnWrites(flushOnWrites);
return this;
}
/** @see ElasticsearchStorage.Builder#index(String) */
public final Builder index(String index) {
delegate.index(index);
return this;
}
/** @see ElasticsearchStorage.Builder#dateSeparator(char) */
public final Builder dateSeparator(char dateSeparator) {
delegate.dateSeparator(dateSeparator);
return this;
}
/** @see ElasticsearchStorage.Builder#indexShards(int) */
public final Builder indexShards(int indexShards) {
delegate.indexShards(indexShards);
return this;
}
/** @see ElasticsearchStorage.Builder#indexReplicas(int) */
public final Builder indexReplicas(int indexReplicas) {
delegate.indexReplicas(indexReplicas);
return this;
}
@Override public final Builder strictTraceId(boolean strictTraceId) {
delegate.strictTraceId(strictTraceId);
return this;
}
@Override public final Builder searchEnabled(boolean searchEnabled) {
delegate.searchEnabled(this.searchEnabled = searchEnabled);
return this;
}
@Override public final ElasticsearchHttpStorage build() {
return new ElasticsearchHttpStorage(delegate.build(), legacyReadsEnabled, searchEnabled);
}
}
public final ElasticsearchStorage delegate;
final boolean legacyReadsEnabled, searchEnabled;
ElasticsearchHttpStorage(ElasticsearchStorage delegate, boolean legacyReadsEnabled,
boolean searchEnabled) {
this.delegate = delegate;
this.legacyReadsEnabled = legacyReadsEnabled;
this.searchEnabled = searchEnabled;
}
@Override public SpanStore spanStore() {
return delegate.spanStore();
}
@Override public SpanConsumer spanConsumer() {
return delegate.spanConsumer();
}
@Override @Nullable public AsyncSpanStore legacyAsyncSpanStore() {
if (!legacyReadsEnabled) return null;
if (delegate.version() >= 6 /* multi-type (legacy) index isn't possible */) {
return null;
}
return new LegacyElasticsearchHttpSpanStore(delegate);
}
@Override public CheckResult check() {
return delegate.check();
}
/** This is a blocking call, only used in tests. */
void clear() throws IOException {
delegate.clear();
}
@Override public void close() {
delegate.close();
}
}
|
<filename>src/bundle/JsDateTransformer.ts
import { TransformerSet } from '../TransformerSet';
export class JsDateTransformer implements TransformerSet {
from(value: string | Date): Date {
if (value instanceof Date) {
return value;
}
return new Date(value);
}
to(value: Date): string {
return value.toISOString();
}
}
|
<reponame>tanahiro/sigfil<gh_stars>0
require "#{__dir__}/test_helper"
class SigfilTest < Minitest::Test
def test_that_it_has_a_version_number
refute_nil ::SigFil::VERSION
end
end
|
package provider
// ProviderType represents the type of a provider for a machine
type ProviderType int
const (
None ProviderType = iota
Local
Remote
)
var providerTypes = []string{
"",
"Local",
"Remote",
}
// Given a type, returns its string representation
func (t ProviderType) String() string {
if int(t) >= 0 && int(t) < len(providerTypes) {
return providerTypes[t]
} else {
return ""
}
}
|
echo "http://127.0.0.1:6060/pkg/github.com/taoey/iris-admin/"
godoc -http :6060 |
#!/bin/bash
# This script gathers graded student submission and produces both a
# CSV file (for upload to grades.cs.umd.edu) and directory of
# distribution-ready copies of students' graded files.
usage() {
cat <<EOF
Usage: $0 [directory with student submissions]
The assignment-specific variables ASSIGN, SUBPARTS, SUBTOTALS must be
configured to proper values inside this script.
EOF
}
###
# Input verification
# Exit if not given a submissions directory as input.
if [[ $# -ne 1 || ! -d $1 ]]
then >&2 echo "You must provide the graded submissions directory."
usage
exit 1
else # $1 is a directory, which should contain all graded student
# submissions (directories of the form 'studentid__N').
for d in $1/*
do if grep -Ev "([a-Z]|[0-9])+__[0-9]+" &>/dev/null <<<"$(basename "$d")"
then >&2 echo "Non-submission found in given directory: '$d'"
usage
exit 2
fi
done
SUBS="$1"
fi
# The following variables define which files are graded in each
# student submission:
#
# ASSIGN is the grades.cs.umd.edu 'short assignment' name;
# SUBPARTS is an array of literal file names or regexes for
# each graded file;
# SUBTOTALS are the total amount of points for each corresponding SUBPART.
#
# If some files points are worth less than others, use the DIVISORS
# array to divide each resp. subparts graded points by some
# integer. An empty DIVISORS array is the same as all 1s. See
# ASSIGN=A4 for an example use of DIVISORS.
ASSIGN=
SUBPARTS=()
SUBTOTALS=()
DIVISORS=()
# ASSIGN='A4'
# SUBPARTS=('(calendar|a[ssign]*3).*.rkt' '(editor|83).rkt' '(index|87).*.rkt' 'chips.rkt')
# SUBTOTALS=(100 110 110 100)
# DIVISORS=(2 2 2 1)
# ASSIGN='A5'
# SUBPARTS=('.*nvad.*\.rkt' '.*ist.*\.rkt')
# SUBTOTALS=(100 94)
# ASSIGN='A6'
# SUBPARTS=('.*nvad.*\.rkt' '.*ist.*\.rkt')
# SUBTOTALS=(140 60)
ASSIGN='A7'
SUBPARTS=('^abs\.rkt$' 'invader.*\.rkt')
SUBTOTALS=(55 180)
# ASSIGN='A8'
# SUBPARTS=('ft.rkt' 'ml.rkt')
# SUBTOTALS=(80 140)
# ASSIGN='A9'
# SUBPARTS=('^.*.rkt$')
# SUBTOTALS=(30)
# ASSIGN='A10'
# SUBPARTS=('tml.rkt')
# SUBTOTALS=(100)
# ASSIGN='A11'
# SUBPARTS=('.*.rkt')
# SUBTOTALS=(45)
# Calculate totals for sanity checks later.
NPARTS=${#SUBPARTS[@]}
TOTAL=0
for ((i=0;i<$NPARTS;i++))
do divisor=${DIVISORS[$i]}
TOTAL=$(($TOTAL + (${SUBTOTALS[$i]} / ${divisor:-1})))
done
if [[ $NPARTS -eq 0 || $TOTAL -eq 0 ]]
then >&2 echo "No assignment subparts configured!"
>&2 echo "You need to edit the required variables in the script."
usage
exit 1
fi
#
###
###
# Output configuration
# The current term (used in URL generation)
CURRTERM=fall2017
# The class name (used in URL generation)
CLASSNM=cmsc131A
URLBASE="https://cs.umd.edu/class/${CURRTERM}/${CLASSNM}"
# The generated CSV for upload to grades.cs.umd.edu.
CSVFILE="${ASSIGN,,}-grades.csv"
# The generated directory of distribution-ready student files (with
# generated names). This directory shares the assignment short name,
# and must be uploaded to the following www-public class directory
# (with privileges 0771, so students can't list the directory).
DISTRDIR="${ASSIGN}"
#
###
###
# Utilities
# For more information at runtime, set positive VERBOSITY.
VERBOSITY=0
function errcho {
if [[ VERBOSITY -gt 0 ]]
then >&2 echo $*
fi
}
# text_of_wxme : [racket-file-path] -> [racket-file-path]
# If the given racket file is in the GRacket editor format, create
# a file of the form "$(basename "$1")${NO_WXME_SUFFIX}.rkt" with images,
# comment boxes, etc. removed. Returns either the converted file's
# path or the given path if no need for conversion.
# OUTPUT: The name of the non-GRacket formatted file via STDOUT.
NO_WXME_SUFFIX='-no-wxme'
function text_of_wxme {
if [[ -s $1 && $(head -n 1 "$1") =~ .*wxme.* ]]
then local textf="$(sed "s/.rkt/${NO_WXME_SUFFIX}.rkt/" <<<"$1")"
errcho "WXME file: $1"
errcho "$(head -n 1 "$1")"
racket -e "(require wxme)
(call-with-input-file \"$1\"
(λ (inp)
(call-with-output-file \"$textf\"
(λ (outp)
(when (is-wxme-stream? inp)
(copy-port (wxme-port->text-port inp) outp)))
#:exists 'truncate)))"
echo "$textf"
else echo "$1"
fi
}
#
###
###
# `grep'ing through graded files
# graded_comments : [path] -> [grader-comments]
# Get all grader comments from the given file.
# Output: all grader comments via STDOUT
function graded_comments {
grep -o ";;>.*" "$1"
}
# graded_grade : [path] -> [grade]
# What is the assigned grade in the given file?
# Output: the assigned grade via STDOUT, if found, otherwise nothing.
function graded_grade {
grep -oP ';;> \K[0-9]+(?=/[0-9]+.*)' "$1"
}
# graded_students : [path] -> [student-ids]
# Who are the listed student authors of the given file?
# Output: the student names via STDOUT, if found, otherwise nothing.
function graded_students {
graded_comments "$1" | head -n 1 | \
grep -oP ';;> \K([[:alnum:]]| )+(?=[Nn]/[Aa])*'
}
# graded_hash : [path] -> [hash-of-grader-comments]
# Create a hash from the grader comments of the given file.
# Assumes that grader comments are unique. This is verified
# during `graded_hash_file' generation.
# Output: the hash via STDOUT.
function graded_hash {
graded_comments "$1" | md5sum | awk '{ print $1 }'
}
#
###
###
# Core functionality
# graded_hash_file : [path] -> [hash-file-path]
# Create a distribution-ready file with a generated name. If any
# duplicate file names exist in the distribution directory, print
# duplicates and exit (can only be caused by duplicate grader
# comments).
# Output: the distribution-ready file path
function graded_hash_file {
local ghash="$(graded_hash "$1")"
[[ ! -d $DISTRDIR ]] && mkdir "${DISTRDIR}"
local hashf="${DISTRDIR}/${ghash}.rkt"
if [[ -f $hashf ]]
then if [[ "$(cat "$1" | md5sum)" == "$(cat "$hashf" | md5sum)" ]]
then echo "$hashf"
else errcho "Duplicate grader comments found (for $1 and $hashf)"
errcho $(graded_comments "$1")
exit 2
fi
else cp "$1" "$hashf" && echo "$hashf"
fi
}
# interactive_grade : [file] [max-grade] -> [grade]
# Interactively searches for a grade in the given file $1, with a
# maximum grade of $2.
# Output: The found or entered grade, via STDOUT.
function interactive_grade {
errcho " Looking for grade in '$1'"
local grade=$(graded_grade "$1")
[[ -n $grade ]] && errcho " found grade: $grade"
case $grade in
''|*[!0-9]*) # Grade is not a valid number!
local resp
# Offer to open file in DrRacket to manually look for the grade.
read -e -p "
Error: No grade found in $1, look for it manually [Y|n]? " resp
case "$resp" in
n|N) errcho "Not looking!" ;;
*) errcho "Looking!" ; drracket "$1" ;;
esac
pushd $(dirname "$1") &>/dev/null
>&2 echo -n " Available files: "
ls >&2
read -e -p " Manually enter grade [0/$2] or new file name: " resp
popd &>/dev/null
# If a file is given, attempt to grade that file.
# If a numeric grade is manually given, return that.
case "$resp" in
''|*[!0-9]*) [[ -f $resp ]] && interactive_grade "$resp" $2 || echo 0 ;;
*) echo "$resp" ;;
esac ;;
*) echo "$grade" ;;
esac
}
# auto_file : [submission-dir] [regex-pattern] -> [path]
# Attempts to automatically choose the correct file for the current
# sub-part inside the given directory $1 based on the given regex
# pattern $2. If there are zero or too many matching files, outputs
# nothing.
# Output: If 1 matching file, the relative file path on STDOUT.
function auto_file {
pushd "$1" &>/dev/null
local auto_files=()
# This search will have to be deeper for java source directories.
for f in *.rkt
do if [[ ${f,,} == ${2,,} || ${f,,} =~ ${2,,} ]]
then auto_files+=("$f")
fi
done
if [[ 0 -eq ${#auto_files[@]} ]] # 0 matches
then errcho " No matching ($2) files in $1"
elif [[ 1 -eq ${#auto_files[@]} && -f ${auto_files[0]} ]] # 1 match
then errcho " Matching ($2) file: '${auto_files[0]}'"
echo "${auto_files[0]}"
else [[ 1 -lt ${#auto_files[@]} ]] # 2+ matches
errcho " Too many matching files: '${auto_files[@]}'"
fi
popd &>/dev/null
}
# interactive_file : [submission-dir] [regex-pattern] -> [path]
# Interactively choose a file for the current subpart in the given
# submission directory $1 that should have matched the given regex
# pattern $2 (but didn't).
# Output: The relative file path on STDOUT.
function interactive_file {
local resp
pushd "$1" &>/dev/null
>&2 echo -n "
Available files: "
ls >&2
read -e -p " Enter file for $2 [skip if none]: " resp
if [[ -z $resp ]]
then errcho " Missing ${part_name} for submission $1"
else if [[ -f "$resp" ]]
then echo "$resp"
else errcho " Not a ${part_name} file: $resp"
fi
fi
popd &>/dev/null
}
# grade_parts : [submission-dir] -> [nothing]
# Grade each file of a student submission given a relative path to the
# submission's directory. Produces no output to be parsed, only
# logging and information messages.
function grade_parts {
local students=()
local urls=()
local sub="$1"
local grade=0
for ((i=0;i<$NPARTS;i++))
{
local part_pattern="${SUBPARTS[$i]}"
local sub_total="${SUBTOTALS[$i]}"
local divisor="${DIVISORS[$i]}"
# Select the proper file for this part of the assignment
local file_name="$(auto_file "$sub" "$part_pattern")"
if [[ ! -f "${sub}/${file_name}" ]]
then file_name="$(interactive_file "${sub}" "${part_pattern}")"
fi
local file_path="${sub}/${file_name}"
if [[ -f $file_path ]]
then echo -n "."
# Handle DrRacket wxme encoded files
local txt_path="$(text_of_wxme "$file_path")"
# Get the grade
local subgrade=$(if [[ -z $txt_path ]]
then echo 0
else interactive_grade "$txt_path" $sub_total
fi)
echo -n "."
# look for student user ids for this submission
if [[ 0 -eq ${#students[@]} ]]
then students=($(graded_students "$txt_path"))
fi
echo -n "."
# create hash file and URL
local hashf="$(graded_hash_file "$txt_path")"
echo -n "."
urls+=("${URLBASE}/${hashf}")
grade="$((($subgrade / ${divisor:-1}) + $grade))"
else errcho " No valid file found for $part_pattern in $sub"
fi
}
# last chance if no student ids found
if [[ 0 -eq ${#students[@]} ]]
then errcho " No students found in '$sub', handle manually"
fi
echo -n "."
# assigned grade sanity checks: 0 < $grade <= $TOTAL
if [[ 0 -eq $grade ]]
then errcho " Actually a 0 for $sub (students: ${students[@]})...?"
elif [[ $TOTAL -lt $grade ]]
then errcho " Grade $grade is greater than maximum $TOTAL for $sub (students: ${students[@]})..."
fi
# write CSV lines for each student
for student in "${students[@]}"
do echo "${student},${ASSIGN},${grade},${urls[@]}" >> "${CSVFILE}"
done
echo " wrote ${#students[@]} grade(s) for ${students[@]}"
}
#
###
###
# Main
# Delete any existing grades CSV, hashed files, or textified temporaries.
rm -rf "${CSVFILE}" "${DISTRDIR}" ${SUBS}/*/*${NO_WXME_SUFFIX}.rkt
# Cue up all student submissions to total.
set -- ${SUBS}/*
NSUBS=$#
# Grade each submission.
while [[ $# -gt 0 ]]
do printf "[%02d/%02d] %s" $(($NSUBS - $# + 1)) $NSUBS "$1"
grade_parts "$1"
shift
done
|
# compile executable, assumes that smarties and CUP2D are in the same directory:
COMPILEDIR=${SMARTIES_ROOT}/../CubismUP_2D/makefiles
if [[ "${SKIPMAKE}" != "true" ]] ; then
make -C ${COMPILEDIR} blowfish -j4
fi
# copy executable:
cp ${COMPILEDIR}/blowfish ${RUNDIR}/exec
# copy simulation settings files:
cp runArguments* ${RUNDIR}/
# command line args to find app-required settings, each to be used for fixed
# number of steps so as to increase sim fidelity as training progresses
export EXTRA_LINE_ARGS=" --nStepPappSett 262144,262144,262144,0 \
--appSettings runArguments00.sh,runArguments01.sh,runArguments02.sh,runArguments03.sh "
# heavy application, needs dedicated processes
export MPI_RANKS_PER_ENV=1
#SETTINGS+=" --nStepPappSett 2097152,1048576,524288,0 "
#export LD_LIBRARY_PATH=/cluster/home/novatig/VTK-7.1.0/Build/lib/:$LD_LIBRARY_PATH
#cp ${HOME}/CubismUP_2D/makefiles/blowfish ${BASEPATH}${RUNFOLDER}/ |
def sortArray(arr):
for i in range(len(arr)):
mini = i
for j in range(i+1, len(arr)):
if arr[mini]>arr[j]:
mini = j
arr[i], arr[mini] = arr[mini], arr[i]
return arr |
#!/bin/bash
allInputs=( $@ ) ;
oracleFile=${allInputs[0]} ;
outputFile=${allInputs[1]} ;
echo "Checking $oracleFile against $outputFile" ;
if ! test -f $oracleFile ; then
echo " Test failed" ;
echo " File \"$oracleFile\" is missing" ;
exit 1;
fi
if ! test -f $outputFile ; then
echo " Test failed" ;
echo " File \"$outputFile\" is missing" ;
exit 1;
fi
# Copy the output and oracle files to temporary files
outFileToCheck="`mktemp`" ;
oracleFileToCheck="`mktemp`" ;
cp $oracleFile $oracleFileToCheck ;
cp $outputFile $outFileToCheck ;
# Strip out the last lines to both files
../misc/remove_last_line.sh $oracleFileToCheck ;
../misc/remove_last_line.sh $outFileToCheck ;
# Check the output
diffOut=`diff $oracleFileToCheck $outFileToCheck` ;
if test "$diffOut" != "" ; then
echo " Test failed because output $fileName isn't correct" ;
echo " Output differences can be found in \"diff\"" ;
mkdir -p diff ;
echo "$diffOut" > diff/${i}_diff_output ;
exit 1;
fi
echo "Test passed!" ;
# Check the optimization
maxInvocations=`tail -n 1 $oracleFile | awk '{print $4}'` ;
currentInvocations=`tail -n 1 $outputFile | awk '{print $4}'` ;
enoughOpt=`echo "$currentInvocations <= $maxInvocations" | bc` ;
if test $enoughOpt == "0" ; then
echo " Test failed because there are too many CAT invocations left in the generated bitcode" ;
echo " The maximum number of CAT invocations are $maxInvocations and the generated bitcode has $currentInvocations" ;
exit 1;
fi
echo "Test passed!" ;
|
#!/usr/bin/env bash
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
set -e
if [ "$1" = "--no-env" ]; then
# caller has already run env.bash
shift
else
. ./env.bash
fi
unset MAKEFLAGS # single-threaded make
unset CDPATH # in case user has it set
# no core files, please
ulimit -c 0
# allow make.bash to avoid double-build of everything
rebuild=true
if [ "$1" = "--no-rebuild" ]; then
rebuild=false
shift
fi
xcd() {
echo
echo --- cd $1
builtin cd "$GOROOT"/src/$1
}
if $rebuild; then
(xcd pkg
gomake clean
time gomake install
) || exit $i
fi
(xcd pkg
gomake testshort
) || exit $?
(xcd pkg/sync;
GOMAXPROCS=10 gomake testshort
) || exit $?
(xcd cmd/ebnflint
time gomake test
) || exit $?
[ "$GOARCH" == arm ] ||
[ "$GOHOSTOS" == windows ] ||
(xcd ../misc/cgo/stdio
gomake clean
./test.bash
) || exit $?
[ "$GOARCH" == arm ] ||
(xcd ../misc/cgo/life
gomake clean
./test.bash
) || exit $?
[ "$GOARCH" == arm ] ||
[ "$GOHOSTOS" == windows ] ||
(xcd ../misc/cgo/test
gomake clean
gotest
) || exit $?
(xcd pkg/exp/ogle
gomake clean
time gomake ogle
) || exit $?
(xcd ../doc/progs
time ./run
) || exit $?
[ "$GOARCH" == arm ] || # uses network, fails under QEMU
(xcd ../doc/codelab/wiki
gomake clean
gomake
gomake test
) || exit $?
for i in ../misc/dashboard/builder ../misc/goplay
do
(xcd $i
gomake clean
gomake
) || exit $?
done
[ "$GOARCH" == arm ] ||
(xcd ../test/bench
./timing.sh -test
) || exit $?
[ "$GOHOSTOS" == windows ] ||
(xcd ../test
./run
) || exit $?
echo
echo ALL TESTS PASSED
|
/*
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
*/
package dns
import (
"fmt"
"time"
"github.com/miekg/dns"
"github.com/pinterest/bender"
protocol "github.com/pinterest/bender/dns"
)
// Tester is a load tester for DHCPv6
type Tester struct {
Target string
Timeout time.Duration
client *dns.Client
}
// Before is called before the first test
func (t *Tester) Before(options interface{}) error {
t.client = &dns.Client{
ReadTimeout: t.Timeout,
DialTimeout: t.Timeout,
WriteTimeout: t.Timeout,
}
return nil
}
// After is called after all tests are finished
func (t *Tester) After(_ interface{}) {}
// BeforeEach is called before every test
func (t *Tester) BeforeEach(_ interface{}) error {
return nil
}
// AfterEach is called after every test
func (t *Tester) AfterEach(_ interface{}) {}
func validator(request, response *dns.Msg) error {
if request.Id != response.Id {
return fmt.Errorf("invalid response id: %d, want: %d", request.Id, response.Id)
}
return nil
}
// RequestExecutor returns a request executor
func (t *Tester) RequestExecutor(options interface{}) (bender.RequestExecutor, error) {
return protocol.CreateExecutor(t.client, validator, t.Target), nil
}
|
<gh_stars>0
const Discord = require('discord.js')
module.exports = {
name: '8ball',
description: 'Ask a question, Get an answer',
aliases: ['ask','q'],
async execute (message, args) {
try{
if (!args[0]) {
const logMessage = new Discord.RichEmbed()
.setColor(0x00e6e6)
.setDescription("You forgot to ask your question.. Try again with ?8ball [question]")
message.channel.send(logMessage);
} else {
let AnswerMessage = message.client.eris.getRandomMessage('8ballCommand', 'okay')
let logMessage = new Discord.RichEmbed()
.setTitle("8Ball Response")
.setColor(0x00AAAA)
.setDescription(AnswerMessage)
message.channel.send(logMessage);
}
}catch (e) {
let logMessage = new Discord.RichEmbed()
.setTitle("Can't Answer Question")
.setColor(0xAA00AA)
.setDescription(e)
message.channel.send(message.client.eris.getRandomMessage('8ballCommand', 'error'), logMessage)
}
}
}
/* Author : Tommin */
/* Date : May 25 2018 */
|
#! /usr/bin/bash
# Note: Join assumes the file is sorted
# Run this in an RF2 SNOMED Snapshot/Terminology directory
OWLFILE=sct2_sRefset_OWLExpressionSnapshot_*.txt
DESCFILE=sct2_Description_Snapshot-*.txt
CIDFILE=$(mktemp)
RAWOWLFILE=$(mktemp)
for OWLFILE in sct2_sRefset_OWLExpressionSnapshot_*.txt; do
[ -e "$OWLFILE" ] || continue
for DESCFILE in sct2_Description_Snapshot-*.txt; do
[ -e "$DESCFILE" ] || continue
# Extract all the OWL -- only works on Snapshot
# field 3 is active flag, field 7 is actual OWL
cut -f3,7 $OWLFILE | grep ^1 | cut -f2 > $RAWOWLFILE
# 1: Emit prefixes
grep ^Prefix $RAWOWLFILE
# 2: Emit ontology header (sans closing paren)
# Note: This assumes only ONE paren -- '$' doesn't work on Mac sed
grep -h ^Ontology $RAWOWLFILE | sed "s/)//"
# 3: Emit everything else
grep -v ^Ontology $RAWOWLFILE | grep -v ^Prefix
# Add FSNs
# Extract all of the active concept identifiers
cut -f3,6 $OWLFILE | grep ^1 | cut -f2 | sort > $CIDFILE
# We also need the root concept, which is NOT the subject of any refset entries
echo "138875005" >> $CIDFILE
# 1 2 *3* 4 *5* *6* *7* *8* 9
# id effectiveTime active moduleId conceptId languageCode typeId term caseSignificanceId
cut -f3,5-8 $DESCFILE | grep ^1 | cut -f2-5 | grep "\t900000000000003001\t" | cut -f1,2,4 | sort | sed 's/\"/\\\"/g' | join - $CIDFILE | sed "s/^\([^ ]*\) \([^ ]*\) \(.*\)/AnnotationAssertion(rdfs:label :\1 \"\3\"@\2)/"
# Emit closing bracket
echo ")"
rm $CIDFILE $RAWOWLFILE
done
done
|
def f(n):
if n == 0:
return 0
else:
return f(n-1) + n
print(f(4)) |
.data
value1: .word 18
value2: .word 10
.text
main:
mov $value1, %eax # move the first number to eax
mov $value2, %ebx # move the second number to ebx
cmp %eax, %ebx # compare value1 and value2
ja greater # if value1>value2 (CF=0 & ZF=0), jump to greater label
jmp result # else, jump to result label
greater: # if value1>value2
mov %eax, %ebx #replace value2 with value1
result: # print the maximum of two numbers
mov %ebx, %eax
ret |
#!/usr/bin/env bash
set -eux -o pipefail
VERSION=$(git describe --abbrev=0 --tags)
ARCH="${ARCH:-64}"
CUSTOM_FLAGS=()
unameOut="$(uname -s)"
case "$unameOut" in
Linux*)
OS=linux
CUSTOM_FLAGS+=("-L--export-dynamic")
;;
Darwin*)
OS=osx
CUSTOM_FLAGS+=("-L-dead_strip")
;;
*) echo "Unknown OS: $unameOut"; exit 1
esac
if [[ $(basename "$DMD") =~ ldmd.* ]] ; then
CUSTOM_FLAGS+=("-flto=full")
# ld.gold is required on Linux
if [ ${OS:-} == "linux" ] ; then
CUSTOM_FLAGS+=("-linker=gold")
fi
fi
case "$ARCH" in
64) ARCH_SUFFIX="x86_64";;
32) ARCH_SUFFIX="x86";;
*) echo "Unknown ARCH: $ARCH"; exit 1
esac
archiveName="dub-$VERSION-$OS-$ARCH_SUFFIX.tar.gz"
echo "Building $archiveName"
DFLAGS="-release -m$ARCH ${CUSTOM_FLAGS[@]}" DMD="$(command -v $DMD)" ./build.sh
tar cvfz "bin/$archiveName" -C bin dub
|
#!/bin/bash
# create test cluster: M-19S
make scale n=15
make load_schema
make discover
# change ROW based replication to MIXED for some replicas
for i in {10..12}; do
docker exec -it replica${i} mysql -e "STOP SLAVE; SET @@GLOBAL.BINLOG_FORMAT=MIXED; START SLAVE;"
done
# add an intermediate master
docker exec -it replica6 mysql -e "STOP SLAVE; CHANGE MASTER TO MASTER_HOST='replica5', MASTER_PORT=3306, MASTER_USER='repl', MASTER_PASSWORD='repl', MASTER_AUTO_POSITION=1; START SLAVE;"
# add permanent replication lag
docker exec -it replica9 mysql -e "STOP SLAVE; CHANGE MASTER TO MASTER_DELAY = 3600; START SLAVE;"
|
import React, { useEffect, useState } from 'react';
import axios from 'axios';
function App() {
const [stocks, setStocks] = useState([]);
useEffect(() => {
const getStockData = async () => {
const stocksReq = await axios.get("https://api.iextrading.com/1.0/tops?symbols=GOOGL,MSFT,AAPL")
setStocks(stocksReq.data);
}
getStockData();
}, [])
return (
<>
<h1>Stock Visualizer</h1>
<ul>
{stocks.map(stock => (
<li key={stock.symbol}>
{stock.symbol}: ${stock.price}
({stock.change})
</li>
))}
</ul>
</>
)
}
export default App; |
<reponame>thi4go/dcrtimegui<gh_stars>1-10
export const SUCCESS = 0;
export const FILE_ALREADY_EXISTS = 1;
export const FILE_DOES_NOT_EXIST = 2;
export const DISABLED = 3;
export const INVALID = 4;
export const EMPTY_DIGEST = "";
export const DCRDATA_URL =
process.env.REACT_APP_NETWORK === "testnet"
? "https://testnet.dcrdata.org"
: "https://explorer.dcrdata.org";
|
#! /bin/bash
#kill in TB3
ssh tb304@192.168.1.104 'killall -9 bash'
ssh tb304@192.168.1.104 'killall -9 bringup.sh'
ssh tb304@192.168.1.104 'killall -9 python'
ssh tb304@192.168.1.104 'killall -9 hlds_laser_publisher'
ssh tb304@192.168.1.104 'killall -9 turtlebot3_diagnostics'
ssh tb304@192.168.1.104 'killall -9 sshd'
ssh tb304@192.168.1.104 'killall -9 raspi_camera.sh'
ssh tb304@192.168.1.104 'killall -9 roslaunch'
ssh tb304@192.168.1.104 'killall -9 raspicam_node'
#Kill in LAPTOP
killall -9 ssh
killall -9 roslaunch
killall -9 rosout
killall -9 rosmaster
source /opt/ros/kinetic/setup.bash
source ~/catkin_ws/devel/setup.bash
#run master
roscore &
sleep 5
#bringup in TB3
ssh tb304@192.168.1.104 'source /opt/ros/kinetic/setup.bash && export ROS_MASTER_URI=http://192.168.1.124:11311 && export ROS_HOSTNAME=192.168.1.104 && export TURTLEBOT3_MODEL=burger && source ~/catkin_ws/devel/setup.bash && ~/catkin_ws/src/tb3_shell_scripts/./bringup.sh' &
sleep 20
#machine learning
roslaunch turtlebot3_dqn turtlebot3_dqn_stage_1_robot.launch
sleep 10
#kill in TB3
ssh tb304@192.168.1.104 'killall -9 bash'
ssh tb304@192.168.1.104 'killall -9 bringup.sh'
ssh tb304@192.168.1.104 'killall -9 python'
ssh tb304@192.168.1.104 'killall -9 hlds_laser_publisher'
ssh tb304@192.168.1.104 'killall -9 turtlebot3_diagnostics'
ssh tb304@192.168.1.104 'killall -9 sshd'
ssh tb304@192.168.1.104 'killall -9 raspi_camera.sh'
ssh tb304@192.168.1.104 'killall -9 roslaunch'
ssh tb304@192.168.1.104 'killall -9 raspicam_node'
#Kill in LAPTOP
killall -9 ssh
killall -9 roslaunch
killall -9 rosout
killall -9 rosmaster
echo "Machine learning complete"
|
#!/bin/sh
set -e
TEST=./pq_test
if [ -e ./pq_test.exe ]; then
TEST=./pq_test.exe
fi
$TEST | diff -b $srcdir/pq_expected.txt -
|
<reponame>smagill/opensphere-desktop<gh_stars>10-100
//
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, vJAXB 2.1.10 in JDK 6
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2010.01.26 at 12:20:41 PM MST
//
package net.opengis.wms_130;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlType;
/**
* <p>Java class for anonymous complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType>
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element ref="{http://www.opengis.net/wms}AddressType"/>
* <element ref="{http://www.opengis.net/wms}Address"/>
* <element ref="{http://www.opengis.net/wms}City"/>
* <element ref="{http://www.opengis.net/wms}StateOrProvince"/>
* <element ref="{http://www.opengis.net/wms}PostCode"/>
* <element ref="{http://www.opengis.net/wms}Country"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "", propOrder = {
"addressType",
"address",
"city",
"stateOrProvince",
"postCode",
"country"
})
@XmlRootElement(name = "ContactAddress")
public class ContactAddress {
@XmlElement(name = "AddressType", required = true)
protected String addressType;
@XmlElement(name = "Address", required = true)
protected String address;
@XmlElement(name = "City", required = true)
protected String city;
@XmlElement(name = "StateOrProvince", required = true)
protected String stateOrProvince;
@XmlElement(name = "PostCode", required = true)
protected String postCode;
@XmlElement(name = "Country", required = true)
protected String country;
/**
* Gets the value of the addressType property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getAddressType() {
return addressType;
}
/**
* Sets the value of the addressType property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setAddressType(String value) {
this.addressType = value;
}
/**
* Gets the value of the address property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getAddress() {
return address;
}
/**
* Sets the value of the address property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setAddress(String value) {
this.address = value;
}
/**
* Gets the value of the city property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getCity() {
return city;
}
/**
* Sets the value of the city property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setCity(String value) {
this.city = value;
}
/**
* Gets the value of the stateOrProvince property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getStateOrProvince() {
return stateOrProvince;
}
/**
* Sets the value of the stateOrProvince property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setStateOrProvince(String value) {
this.stateOrProvince = value;
}
/**
* Gets the value of the postCode property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getPostCode() {
return postCode;
}
/**
* Sets the value of the postCode property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setPostCode(String value) {
this.postCode = value;
}
/**
* Gets the value of the country property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getCountry() {
return country;
}
/**
* Sets the value of the country property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setCountry(String value) {
this.country = value;
}
}
|
package org.hiro.map;
import org.hiro.Util;
import java.util.LinkedList;
import java.util.List;
/*
* Coordinate data type
* 座標
*/
public class TwoDimensionsCoordinate implements AbstractCoordinate {
private int x;
private int y;
public TwoDimensionsCoordinate() {
this.x = 0;
this.y = 0;
}
TwoDimensionsCoordinate(int x, int y) {
this.x = x;
this.y = y;
}
@Override
public AbstractCoordinate random(AbstractCoordinate position, AbstractCoordinate size) {
if (!(position instanceof TwoDimensionsCoordinate)) {
throw new IllegalArgumentException("");
}
if (!(size instanceof TwoDimensionsCoordinate)) {
throw new IllegalArgumentException("");
}
int _x = ((TwoDimensionsCoordinate) position).x + Util.rnd(((TwoDimensionsCoordinate) size).x - 2) + 1;
int _y = ((TwoDimensionsCoordinate) position).x + Util.rnd(((TwoDimensionsCoordinate) size).y - 2) + 1;
return new TwoDimensionsCoordinate(_x, _y);
}
@Override
public AbstractCoordinate add(AbstractCoordinate coordinate) {
if (coordinate instanceof TwoDimensionsCoordinate){
throw new RuntimeException("型制限");
}
TwoDimensionsCoordinate c = (TwoDimensionsCoordinate) coordinate;
return new TwoDimensionsCoordinate(this.x + c.x, this.y + c.y);
}
@Override
public boolean equals(Object obj) {
if (obj instanceof TwoDimensionsCoordinate) {
TwoDimensionsCoordinate c = (TwoDimensionsCoordinate) obj;
return this.x == c.x && this.y == c.y;
} else {
return false;
}
}
@Override
public int hashCode() {
return this.y * 31 + this.x;
}
public void setDimensions(int x, int y) {
this.x = x;
this.y = y;
}
@Override
public List<AbstractCoordinate> near() {
List<AbstractCoordinate> result = new LinkedList<>();
result.add(new TwoDimensionsCoordinate(this.x - 1, this.y - 1));
result.add(new TwoDimensionsCoordinate(this.x - 1, this.y));
result.add(new TwoDimensionsCoordinate(this.x - 1, this.y + 1));
result.add(new TwoDimensionsCoordinate(this.x, this.y - 1));
result.add(new TwoDimensionsCoordinate(this.x, this.y + 1));
result.add(new TwoDimensionsCoordinate(this.x + 1, this.y - 1));
result.add(new TwoDimensionsCoordinate(this.x + 1, this.y));
result.add(new TwoDimensionsCoordinate(this.x + 1, this.y + 1));
return result;
}
@Override
public int getX() {
return this.x;
}
@Override
public int getY() {
return this.y;
}
@Override
public int getZ() {
return 0;
}
@Override
public void setX(int x) {
this.x = x;
}
@Override
public void setY(int y) {
this.y = y;
}
@Override
public void setZ(int z) {
}
}
|
<reponame>bhadresh3/AngularSpring
import { Component, OnInit } from '@angular/core';
import { Person } from '../person';
import { ActivatedRoute, Router } from '@angular/router';
import { PersonService } from '../person-service';
@Component({
selector: 'app-person-form',
templateUrl: './person-form.component.html',
styleUrls: ['./person-form.component.scss']
})
export class PersonFormComponent implements OnInit {
person : Person;
constructor(private route : ActivatedRoute,
private router : Router,
private personService : PersonService)
{
this.person = new Person();
}
ngOnInit(): void {
}
onSubmit(){
this.personService.save(this.person).subscribe(result => this.routChange());
}
routChange(){
this.router.navigate(['/people']);
}
}
|
#!/bin/sh
# -*- mode: Python -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""":"
# bash code here; finds a suitable python interpreter and execs this file.
# prefer unqualified "python" if suitable:
python -c 'import sys; sys.exit(not (0x020700b0 < sys.hexversion < 0x03000000))' 2>/dev/null \
&& exec python "$0" "$@"
for pyver in 2.7; do
which python$pyver > /dev/null 2>&1 && exec python$pyver "$0" "$@"
done
echo "No appropriate python interpreter found." >&2
exit 1
":"""
from __future__ import with_statement
import cmd
import codecs
import ConfigParser
import csv
import getpass
import optparse
import os
import platform
import sys
import traceback
import warnings
import webbrowser
from StringIO import StringIO
from contextlib import contextmanager
from glob import glob
from uuid import UUID
if sys.version_info[0] != 2 or sys.version_info[1] != 7:
sys.exit("\nCQL Shell supports only Python 2.7\n")
# see CASSANDRA-10428
if platform.python_implementation().startswith('Jython'):
sys.exit("\nCQL Shell does not run on Jython\n")
UTF8 = 'utf-8'
CP65001 = 'cp65001' # Win utf-8 variant
description = "CQL Shell for Apache Cassandra"
version = "5.0.1"
readline = None
try:
# check if tty first, cause readline doesn't check, and only cares
# about $TERM. we don't want the funky escape code stuff to be
# output if not a tty.
if sys.stdin.isatty():
import readline
except ImportError:
pass
CQL_LIB_PREFIX = 'cassandra-driver-internal-only-'
#CASSANDRA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
CASSANDRA_PATH = os.environ['CASSANDRA_HOME']
CASSANDRA_CQL_HTML_FALLBACK = 'https://cassandra.apache.org/doc/cql3/CQL-3.2.html'
# default location of local CQL.html
if os.path.exists(CASSANDRA_PATH + '/doc/cql3/CQL.html'):
# default location of local CQL.html
CASSANDRA_CQL_HTML = 'file://' + CASSANDRA_PATH + '/doc/cql3/CQL.html'
elif os.path.exists('/usr/share/doc/cassandra/CQL.html'):
# fallback to package file
CASSANDRA_CQL_HTML = 'file:///usr/share/doc/cassandra/CQL.html'
else:
# fallback to online version
CASSANDRA_CQL_HTML = CASSANDRA_CQL_HTML_FALLBACK
# On Linux, the Python webbrowser module uses the 'xdg-open' executable
# to open a file/URL. But that only works, if the current session has been
# opened from _within_ a desktop environment. I.e. 'xdg-open' will fail,
# if the session's been opened via ssh to a remote box.
#
# Use 'python' to get some information about the detected browsers.
# >>> import webbrowser
# >>> webbrowser._tryorder
# >>> webbrowser._browser
#
if len(webbrowser._tryorder) == 0:
CASSANDRA_CQL_HTML = CASSANDRA_CQL_HTML_FALLBACK
elif webbrowser._tryorder[0] == 'xdg-open' and os.environ.get('XDG_DATA_DIRS', '') == '':
# only on Linux (some OS with xdg-open)
webbrowser._tryorder.remove('xdg-open')
webbrowser._tryorder.append('xdg-open')
# use bundled libs for python-cql and thrift, if available. if there
# is a ../lib dir, use bundled libs there preferentially.
ZIPLIB_DIRS = [os.path.join(CASSANDRA_PATH, 'lib')]
myplatform = platform.system()
is_win = myplatform == 'Windows'
# Workaround for supporting CP65001 encoding on python < 3.3 (https://bugs.python.org/issue13216)
if is_win and sys.version_info < (3, 3):
codecs.register(lambda name: codecs.lookup(UTF8) if name == CP65001 else None)
if myplatform == 'Linux':
ZIPLIB_DIRS.append('/usr/share/cassandra/lib')
if os.environ.get('CQLSH_NO_BUNDLED', ''):
ZIPLIB_DIRS = ()
def find_zip(libprefix):
for ziplibdir in ZIPLIB_DIRS:
zips = glob(os.path.join(ziplibdir, libprefix + '*.zip'))
if zips:
return max(zips) # probably the highest version, if multiple
cql_zip = find_zip(CQL_LIB_PREFIX)
if cql_zip:
ver = os.path.splitext(os.path.basename(cql_zip))[0][len(CQL_LIB_PREFIX):]
sys.path.insert(0, os.path.join(cql_zip, 'cassandra-driver-' + ver))
third_parties = ('futures-', 'six-')
for lib in third_parties:
lib_zip = find_zip(lib)
if lib_zip:
sys.path.insert(0, lib_zip)
warnings.filterwarnings("ignore", r".*blist.*")
try:
import cassandra
except ImportError, e:
sys.exit("\nPython Cassandra driver not installed, or not on PYTHONPATH.\n"
'You might try "pip install cassandra-driver".\n\n'
'Python: %s\n'
'Module load path: %r\n\n'
'Error: %s\n' % (sys.executable, sys.path, e))
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster
from cassandra.cqltypes import cql_typename
from cassandra.marshal import int64_unpack
from cassandra.metadata import (ColumnMetadata, KeyspaceMetadata,
TableMetadata, protect_name, protect_names)
from cassandra.policies import WhiteListRoundRobinPolicy
from cassandra.query import SimpleStatement, ordered_dict_factory, TraceUnavailable
from cassandra.util import datetime_from_timestamp
# cqlsh should run correctly when run out of a Cassandra source tree,
# out of an unpacked Cassandra tarball, and after a proper package install.
cqlshlibdir = os.path.join(CASSANDRA_PATH, 'pylib')
if os.path.isdir(cqlshlibdir):
sys.path.insert(0, cqlshlibdir)
from cqlshlib import cql3handling, cqlhandling, pylexotron, sslhandling, cqlshhandling
from cqlshlib.copyutil import ExportTask, ImportTask
from cqlshlib.displaying import (ANSI_RESET, BLUE, COLUMN_NAME_COLORS, CYAN,
RED, WHITE, FormattedValue, colorme)
from cqlshlib.formatting import (DEFAULT_DATE_FORMAT, DEFAULT_NANOTIME_FORMAT,
DEFAULT_TIMESTAMP_FORMAT, CqlType, DateTimeFormat,
format_by_type, formatter_for)
from cqlshlib.tracing import print_trace, print_trace_session
from cqlshlib.util import get_file_encoding_bomsize, trim_if_present
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 9042
DEFAULT_SSL = False
DEFAULT_CONNECT_TIMEOUT_SECONDS = 5
DEFAULT_REQUEST_TIMEOUT_SECONDS = 10
DEFAULT_FLOAT_PRECISION = 5
DEFAULT_DOUBLE_PRECISION = 5
DEFAULT_MAX_TRACE_WAIT = 10
if readline is not None and readline.__doc__ is not None and 'libedit' in readline.__doc__:
DEFAULT_COMPLETEKEY = '\t'
else:
DEFAULT_COMPLETEKEY = 'tab'
cqldocs = None
cqlruleset = None
epilog = """Connects to %(DEFAULT_HOST)s:%(DEFAULT_PORT)d by default. These
defaults can be changed by setting $CQLSH_HOST and/or $CQLSH_PORT. When a
host (and optional port number) are given on the command line, they take
precedence over any defaults.""" % globals()
parser = optparse.OptionParser(description=description, epilog=epilog,
usage="Usage: %prog [options] [host [port]]",
version='cqlsh ' + version)
parser.add_option("-C", "--color", action='store_true', dest='color',
help='Always use color output')
parser.add_option("--no-color", action='store_false', dest='color',
help='Never use color output')
parser.add_option("--browser", dest='browser', help="""The browser to use to display CQL help, where BROWSER can be:
- one of the supported browsers in https://docs.python.org/2/library/webbrowser.html.
- browser path followed by %s, example: /usr/bin/google-chrome-stable %s""")
parser.add_option('--ssl', action='store_true', help='Use SSL', default=False)
parser.add_option('--no_compact', action='store_true', help='No Compact', default=False)
parser.add_option("-u", "--username", help="Authenticate as user.")
parser.add_option("-p", "--password", help="Authenticate using password.")
parser.add_option('-k', '--keyspace', help='Authenticate to the given keyspace.')
parser.add_option("-f", "--file", help="Execute commands from FILE, then exit")
parser.add_option('--debug', action='store_true',
help='Show additional debugging information')
parser.add_option("--encoding", help="Specify a non-default encoding for output." +
" (Default: %s)" % (UTF8,))
parser.add_option("--cqlshrc", help="Specify an alternative cqlshrc file location.")
parser.add_option('--cqlversion', default=None,
help='Specify a particular CQL version, '
'by default the highest version supported by the server will be used.'
' Examples: "3.0.3", "3.1.0"')
parser.add_option("--protocol-version", type="int", default=None,
help='Specify a specific protcol version otherwise the client will default and downgrade as necessary')
parser.add_option("-e", "--execute", help='Execute the statement and quit.')
parser.add_option("--connect-timeout", default=DEFAULT_CONNECT_TIMEOUT_SECONDS, dest='connect_timeout',
help='Specify the connection timeout in seconds (default: %default seconds).')
parser.add_option("--request-timeout", default=DEFAULT_REQUEST_TIMEOUT_SECONDS, dest='request_timeout',
help='Specify the default request timeout in seconds (default: %default seconds).')
parser.add_option("-t", "--tty", action='store_true', dest='tty',
help='Force tty mode (command prompt).')
parser.add_option("-d", "--decompression", default=None, help='')
optvalues = optparse.Values()
(options, arguments) = parser.parse_args(sys.argv[1:], values=optvalues)
# BEGIN history/config definition
HISTORY_DIR = os.path.expanduser(os.path.join('~', '.cassandra'))
if hasattr(options, 'cqlshrc'):
CONFIG_FILE = options.cqlshrc
if not os.path.exists(CONFIG_FILE):
print '\nWarning: Specified cqlshrc location `%s` does not exist. Using `%s` instead.\n' % (CONFIG_FILE, HISTORY_DIR)
CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc')
else:
CONFIG_FILE = os.path.join(HISTORY_DIR, 'cqlshrc')
HISTORY = os.path.join(HISTORY_DIR, 'cqlsh_history')
if not os.path.exists(HISTORY_DIR):
try:
os.mkdir(HISTORY_DIR)
except OSError:
print '\nWarning: Cannot create directory at `%s`. Command history will not be saved.\n' % HISTORY_DIR
OLD_CONFIG_FILE = os.path.expanduser(os.path.join('~', '.cqlshrc'))
if os.path.exists(OLD_CONFIG_FILE):
if os.path.exists(CONFIG_FILE):
print '\nWarning: cqlshrc config files were found at both the old location (%s) and \
the new location (%s), the old config file will not be migrated to the new \
location, and the new location will be used for now. You should manually \
consolidate the config files at the new location and remove the old file.' \
% (OLD_CONFIG_FILE, CONFIG_FILE)
else:
os.rename(OLD_CONFIG_FILE, CONFIG_FILE)
OLD_HISTORY = os.path.expanduser(os.path.join('~', '.cqlsh_history'))
if os.path.exists(OLD_HISTORY):
os.rename(OLD_HISTORY, HISTORY)
# END history/config definition
CQL_ERRORS = (
cassandra.AlreadyExists, cassandra.AuthenticationFailed, cassandra.CoordinationFailure,
cassandra.InvalidRequest, cassandra.Timeout, cassandra.Unauthorized, cassandra.OperationTimedOut,
cassandra.cluster.NoHostAvailable,
cassandra.connection.ConnectionBusy, cassandra.connection.ProtocolError, cassandra.connection.ConnectionException,
cassandra.protocol.ErrorMessage, cassandra.protocol.InternalError, cassandra.query.TraceUnavailable
)
debug_completion = bool(os.environ.get('CQLSH_DEBUG_COMPLETION', '') == 'YES')
class NoKeyspaceError(Exception):
pass
class KeyspaceNotFound(Exception):
pass
class ColumnFamilyNotFound(Exception):
pass
class IndexNotFound(Exception):
pass
class MaterializedViewNotFound(Exception):
pass
class ObjectNotFound(Exception):
pass
class VersionNotSupported(Exception):
pass
class UserTypeNotFound(Exception):
pass
class FunctionNotFound(Exception):
pass
class AggregateNotFound(Exception):
pass
class DecodeError(Exception):
verb = 'decode'
def __init__(self, thebytes, err, colname=None):
self.thebytes = thebytes
self.err = err
self.colname = colname
def __str__(self):
return str(self.thebytes)
def message(self):
what = 'value %r' % (self.thebytes,)
if self.colname is not None:
what = 'value %r (for column %r)' % (self.thebytes, self.colname)
return 'Failed to %s %s : %s' \
% (self.verb, what, self.err)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.message())
class FormatError(DecodeError):
verb = 'format'
def full_cql_version(ver):
while ver.count('.') < 2:
ver += '.0'
ver_parts = ver.split('-', 1) + ['']
vertuple = tuple(map(int, ver_parts[0].split('.')) + [ver_parts[1]])
return ver, vertuple
def format_value(val, cqltype, encoding, addcolor=False, date_time_format=None,
float_precision=None, colormap=None, nullval=None):
if isinstance(val, DecodeError):
if addcolor:
return colorme(repr(val.thebytes), colormap, 'error')
else:
return FormattedValue(repr(val.thebytes))
return format_by_type(val, cqltype=cqltype, encoding=encoding, colormap=colormap,
addcolor=addcolor, nullval=nullval, date_time_format=date_time_format,
float_precision=float_precision)
def show_warning_without_quoting_line(message, category, filename, lineno, file=None, line=None):
if file is None:
file = sys.stderr
try:
file.write(warnings.formatwarning(message, category, filename, lineno, line=''))
except IOError:
pass
warnings.showwarning = show_warning_without_quoting_line
warnings.filterwarnings('always', category=cql3handling.UnexpectedTableStructure)
def insert_driver_hooks():
class DateOverFlowWarning(RuntimeWarning):
pass
# Native datetime types blow up outside of datetime.[MIN|MAX]_YEAR. We will fall back to an int timestamp
def deserialize_date_fallback_int(byts, protocol_version):
timestamp_ms = int64_unpack(byts)
try:
return datetime_from_timestamp(timestamp_ms / 1000.0)
except OverflowError:
warnings.warn(DateOverFlowWarning("Some timestamps are larger than Python datetime can represent. "
"Timestamps are displayed in milliseconds from epoch."))
return timestamp_ms
cassandra.cqltypes.DateType.deserialize = staticmethod(deserialize_date_fallback_int)
if hasattr(cassandra, 'deserializers'):
del cassandra.deserializers.DesDateType
# Return cassandra.cqltypes.EMPTY instead of None for empty values
cassandra.cqltypes.CassandraType.support_empty_values = True
class FrozenType(cassandra.cqltypes._ParameterizedType):
"""
Needed until the bundled python driver adds FrozenType.
"""
typename = "frozen"
num_subtypes = 1
@classmethod
def deserialize_safe(cls, byts, protocol_version):
subtype, = cls.subtypes
return subtype.from_binary(byts)
@classmethod
def serialize_safe(cls, val, protocol_version):
subtype, = cls.subtypes
return subtype.to_binary(val, protocol_version)
class Shell(cmd.Cmd):
custom_prompt = os.getenv('CQLSH_PROMPT', '')
if custom_prompt is not '':
custom_prompt += "\n"
default_prompt = custom_prompt + "cqlsh> "
continue_prompt = " ... "
keyspace_prompt = custom_prompt + "cqlsh:%s> "
keyspace_continue_prompt = "%s ... "
show_line_nums = False
debug = False
stop = False
last_hist = None
shunted_query_out = None
use_paging = True
default_page_size = 100
def __init__(self, hostname, port, color=False,
username=None, password=None, encoding=None, stdin=None, tty=True,
completekey=DEFAULT_COMPLETEKEY, browser=None, use_conn=None,
cqlver=None, keyspace=None,
tracing_enabled=False, expand_enabled=False,
no_compact=False,
display_nanotime_format=DEFAULT_NANOTIME_FORMAT,
display_timestamp_format=DEFAULT_TIMESTAMP_FORMAT,
display_date_format=DEFAULT_DATE_FORMAT,
display_float_precision=DEFAULT_FLOAT_PRECISION,
display_double_precision=DEFAULT_DOUBLE_PRECISION,
display_timezone=None,
max_trace_wait=DEFAULT_MAX_TRACE_WAIT,
ssl=False,
single_statement=None,
request_timeout=DEFAULT_REQUEST_TIMEOUT_SECONDS,
protocol_version=None,
connect_timeout=DEFAULT_CONNECT_TIMEOUT_SECONDS,
decompression=None):
cmd.Cmd.__init__(self, completekey=completekey)
self.hostname = hostname
self.port = port
self.decompression = decompression
self.auth_provider = None
if username:
if not password:
password = getpass.getpass()
self.auth_provider = PlainTextAuthProvider(username=username, password=password)
self.username = username
self.keyspace = keyspace
self.ssl = ssl
self.tracing_enabled = tracing_enabled
self.page_size = self.default_page_size
self.expand_enabled = expand_enabled
if use_conn:
self.conn = use_conn
else:
kwargs = {}
if protocol_version is not None:
kwargs['protocol_version'] = protocol_version
self.conn = Cluster(contact_points=(self.hostname,), port=self.port, cql_version=cqlver,
auth_provider=self.auth_provider,
no_compact=no_compact,
ssl_options=sslhandling.ssl_settings(hostname, CONFIG_FILE) if ssl else None,
load_balancing_policy=WhiteListRoundRobinPolicy([self.hostname]),
control_connection_timeout=connect_timeout,
connect_timeout=connect_timeout,
**kwargs)
self.owns_connection = not use_conn
if keyspace:
self.session = self.conn.connect(keyspace)
else:
self.session = self.conn.connect()
if browser == "":
browser = None
self.browser = browser
self.color = color
self.display_nanotime_format = display_nanotime_format
self.display_timestamp_format = display_timestamp_format
self.display_date_format = display_date_format
self.display_float_precision = display_float_precision
self.display_double_precision = display_double_precision
self.display_timezone = display_timezone
self.session.default_timeout = request_timeout
self.session.row_factory = ordered_dict_factory
self.session.default_consistency_level = cassandra.ConsistencyLevel.ONE
self.get_connection_versions()
self.set_expanded_cql_version(self.connection_versions['cql'])
self.current_keyspace = keyspace
self.max_trace_wait = max_trace_wait
self.session.max_trace_wait = max_trace_wait
self.tty = tty
self.encoding = encoding
self.check_windows_encoding()
self.output_codec = codecs.lookup(encoding)
self.statement = StringIO()
self.lineno = 1
self.in_comment = False
self.prompt = ''
if stdin is None:
stdin = sys.stdin
if tty:
self.reset_prompt()
self.report_connection()
print 'Use HELP for help.'
else:
self.show_line_nums = True
self.stdin = stdin
self.query_out = sys.stdout
self.consistency_level = cassandra.ConsistencyLevel.ONE
self.serial_consistency_level = cassandra.ConsistencyLevel.SERIAL
self.empty_lines = 0
self.statement_error = False
self.single_statement = single_statement
@property
def is_using_utf8(self):
# utf8 encodings from https://docs.python.org/{2,3}/library/codecs.html
return self.encoding.replace('-', '_').lower() in ['utf', 'utf_8', 'u8', 'utf8', CP65001]
def check_windows_encoding(self):
if is_win and os.name == 'nt' and self.tty and \
self.is_using_utf8 and sys.stdout.encoding != CP65001:
self.printerr("\nWARNING: console codepage must be set to cp65001 "
"to support {} encoding on Windows platforms.\n"
"If you experience encoding problems, change your console"
" codepage with 'chcp 65001' before starting cqlsh.\n".format(self.encoding))
def set_expanded_cql_version(self, ver):
ver, vertuple = full_cql_version(ver)
self.cql_version = ver
self.cql_ver_tuple = vertuple
def cqlver_atleast(self, major, minor=0, patch=0):
return self.cql_ver_tuple[:3] >= (major, minor, patch)
def myformat_value(self, val, cqltype=None, **kwargs):
if isinstance(val, DecodeError):
self.decoding_errors.append(val)
try:
dtformats = DateTimeFormat(timestamp_format=self.display_timestamp_format,
date_format=self.display_date_format, nanotime_format=self.display_nanotime_format,
timezone=self.display_timezone)
precision = self.display_double_precision if cqltype is not None and cqltype.type_name == 'double' else self.display_float_precision
try:
if self.decompression == 'snappy' and cqltype.type_name == 'blob':
import snappy
cqltype = None
val = snappy.decompress(val).decode('utf8')
except Exception:
pass
return format_value(val, cqltype=cqltype, encoding=self.output_codec.name,
addcolor=self.color, date_time_format=dtformats,
float_precision=precision, **kwargs)
except Exception, e:
err = FormatError(val, e)
self.decoding_errors.append(err)
return format_value(err, cqltype=cqltype, encoding=self.output_codec.name, addcolor=self.color)
def myformat_colname(self, name, table_meta=None):
column_colors = COLUMN_NAME_COLORS.copy()
# check column role and color appropriately
if table_meta:
if name in [col.name for col in table_meta.partition_key]:
column_colors.default_factory = lambda: RED
elif name in [col.name for col in table_meta.clustering_key]:
column_colors.default_factory = lambda: CYAN
elif name in table_meta.columns and table_meta.columns[name].is_static:
column_colors.default_factory = lambda: WHITE
return self.myformat_value(name, colormap=column_colors)
def report_connection(self):
self.show_host()
self.show_version()
def show_host(self):
print "Connected to %s at %s:%d." % \
(self.applycolor(self.get_cluster_name(), BLUE),
self.hostname,
self.port)
def show_version(self):
vers = self.connection_versions.copy()
vers['shver'] = version
# system.Versions['cql'] apparently does not reflect changes with
# set_cql_version.
vers['cql'] = self.cql_version
print "[cqlsh %(shver)s | Cassandra %(build)s | CQL spec %(cql)s | Native protocol v%(protocol)s]" % vers
def show_session(self, sessionid, partial_session=False):
print_trace_session(self, self.session, sessionid, partial_session)
def get_connection_versions(self):
result, = self.session.execute("select * from system.local where key = 'local'")
vers = {
'build': result['release_version'],
'cql': result['cql_version'],
}
vers['protocol'] = self.conn.protocol_version
self.connection_versions = vers
def get_keyspace_names(self):
return map(str, self.conn.metadata.keyspaces.keys())
def get_columnfamily_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(str, self.get_keyspace_meta(ksname).tables.keys())
def get_materialized_view_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(str, self.get_keyspace_meta(ksname).views.keys())
def get_index_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(str, self.get_keyspace_meta(ksname).indexes.keys())
def get_column_names(self, ksname, cfname):
if ksname is None:
ksname = self.current_keyspace
layout = self.get_table_meta(ksname, cfname)
return [unicode(col) for col in layout.columns]
def get_usertype_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return self.get_keyspace_meta(ksname).user_types.keys()
def get_usertype_layout(self, ksname, typename):
if ksname is None:
ksname = self.current_keyspace
ks_meta = self.get_keyspace_meta(ksname)
try:
user_type = ks_meta.user_types[typename]
except KeyError:
raise UserTypeNotFound("User type %r not found" % typename)
return zip(user_type.field_names, user_type.field_types)
def get_userfunction_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(lambda f: f.name, self.get_keyspace_meta(ksname).functions.values())
def get_useraggregate_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return map(lambda f: f.name, self.get_keyspace_meta(ksname).aggregates.values())
def get_cluster_name(self):
return self.conn.metadata.cluster_name
def get_partitioner(self):
return self.conn.metadata.partitioner
def get_keyspace_meta(self, ksname):
if ksname not in self.conn.metadata.keyspaces:
raise KeyspaceNotFound('Keyspace %r not found.' % ksname)
return self.conn.metadata.keyspaces[ksname]
def get_keyspaces(self):
return self.conn.metadata.keyspaces.values()
def get_ring(self, ks):
self.conn.metadata.token_map.rebuild_keyspace(ks, build_if_absent=True)
return self.conn.metadata.token_map.tokens_to_hosts_by_ks[ks]
def get_table_meta(self, ksname, tablename):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if tablename not in ksmeta.tables:
if ksname == 'system_auth' and tablename in ['roles', 'role_permissions']:
self.get_fake_auth_table_meta(ksname, tablename)
else:
raise ColumnFamilyNotFound("Column family %r not found" % tablename)
else:
return ksmeta.tables[tablename]
def get_fake_auth_table_meta(self, ksname, tablename):
# may be using external auth implementation so internal tables
# aren't actually defined in schema. In this case, we'll fake
# them up
if tablename == 'roles':
ks_meta = KeyspaceMetadata(ksname, True, None, None)
table_meta = TableMetadata(ks_meta, 'roles')
table_meta.columns['role'] = ColumnMetadata(table_meta, 'role', cassandra.cqltypes.UTF8Type)
table_meta.columns['is_superuser'] = ColumnMetadata(table_meta, 'is_superuser', cassandra.cqltypes.BooleanType)
table_meta.columns['can_login'] = ColumnMetadata(table_meta, 'can_login', cassandra.cqltypes.BooleanType)
elif tablename == 'role_permissions':
ks_meta = KeyspaceMetadata(ksname, True, None, None)
table_meta = TableMetadata(ks_meta, 'role_permissions')
table_meta.columns['role'] = ColumnMetadata(table_meta, 'role', cassandra.cqltypes.UTF8Type)
table_meta.columns['resource'] = ColumnMetadata(table_meta, 'resource', cassandra.cqltypes.UTF8Type)
table_meta.columns['permission'] = ColumnMetadata(table_meta, 'permission', cassandra.cqltypes.UTF8Type)
else:
raise ColumnFamilyNotFound("Column family %r not found" % tablename)
def get_index_meta(self, ksname, idxname):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if idxname not in ksmeta.indexes:
raise IndexNotFound("Index %r not found" % idxname)
return ksmeta.indexes[idxname]
def get_view_meta(self, ksname, viewname):
if ksname is None:
ksname = self.current_keyspace
ksmeta = self.get_keyspace_meta(ksname)
if viewname not in ksmeta.views:
raise MaterializedViewNotFound("Materialized view %r not found" % viewname)
return ksmeta.views[viewname]
def get_object_meta(self, ks, name):
if name is None:
if ks and ks in self.conn.metadata.keyspaces:
return self.conn.metadata.keyspaces[ks]
elif self.current_keyspace is None:
raise ObjectNotFound("%r not found in keyspaces" % (ks))
else:
name = ks
ks = self.current_keyspace
if ks is None:
ks = self.current_keyspace
ksmeta = self.get_keyspace_meta(ks)
if name in ksmeta.tables:
return ksmeta.tables[name]
elif name in ksmeta.indexes:
return ksmeta.indexes[name]
elif name in ksmeta.views:
return ksmeta.views[name]
raise ObjectNotFound("%r not found in keyspace %r" % (name, ks))
def get_usertypes_meta(self):
data = self.session.execute("select * from system.schema_usertypes")
if not data:
return cql3handling.UserTypesMeta({})
return cql3handling.UserTypesMeta.from_layout(data)
def get_trigger_names(self, ksname=None):
if ksname is None:
ksname = self.current_keyspace
return [trigger.name
for table in self.get_keyspace_meta(ksname).tables.values()
for trigger in table.triggers.values()]
def reset_statement(self):
self.reset_prompt()
self.statement.truncate(0)
self.empty_lines = 0
def reset_prompt(self):
if self.current_keyspace is None:
self.set_prompt(self.default_prompt, True)
else:
self.set_prompt(self.keyspace_prompt % self.current_keyspace, True)
def set_continue_prompt(self):
if self.empty_lines >= 3:
self.set_prompt("Statements are terminated with a ';'. You can press CTRL-C to cancel an incomplete statement.")
self.empty_lines = 0
return
if self.current_keyspace is None:
self.set_prompt(self.continue_prompt)
else:
spaces = ' ' * len(str(self.current_keyspace))
self.set_prompt(self.keyspace_continue_prompt % spaces)
self.empty_lines = self.empty_lines + 1 if not self.lastcmd else 0
@contextmanager
def prepare_loop(self):
readline = None
if self.tty and self.completekey:
try:
import readline
except ImportError:
if is_win:
print "WARNING: pyreadline dependency missing. Install to enable tab completion."
pass
else:
old_completer = readline.get_completer()
readline.set_completer(self.complete)
if readline.__doc__ is not None and 'libedit' in readline.__doc__:
readline.parse_and_bind("bind -e")
readline.parse_and_bind("bind '" + self.completekey + "' rl_complete")
readline.parse_and_bind("bind ^R em-inc-search-prev")
else:
readline.parse_and_bind(self.completekey + ": complete")
try:
yield
finally:
if readline is not None:
readline.set_completer(old_completer)
def get_input_line(self, prompt=''):
if self.tty:
try:
self.lastcmd = raw_input(prompt).decode(self.encoding)
except UnicodeDecodeError:
self.lastcmd = ''
traceback.print_exc()
self.check_windows_encoding()
line = self.lastcmd + '\n'
else:
self.lastcmd = self.stdin.readline()
line = self.lastcmd
if not len(line):
raise EOFError
self.lineno += 1
return line
def use_stdin_reader(self, until='', prompt=''):
until += '\n'
while True:
try:
newline = self.get_input_line(prompt=prompt)
except EOFError:
return
if newline == until:
return
yield newline
def cmdloop(self):
"""
Adapted from cmd.Cmd's version, because there is literally no way with
cmd.Cmd.cmdloop() to tell the difference between "EOF" showing up in
input and an actual EOF.
"""
with self.prepare_loop():
while not self.stop:
try:
if self.single_statement:
line = self.single_statement
self.stop = True
else:
line = self.get_input_line(self.prompt)
self.statement.write(line)
if self.onecmd(self.statement.getvalue()):
self.reset_statement()
except EOFError:
self.handle_eof()
except CQL_ERRORS, cqlerr:
self.printerr(cqlerr.message.decode(encoding='utf-8'))
except KeyboardInterrupt:
self.reset_statement()
print
def onecmd(self, statementtext):
"""
Returns true if the statement is complete and was handled (meaning it
can be reset).
"""
try:
statements, endtoken_escaped = cqlruleset.cql_split_statements(statementtext)
except pylexotron.LexingError, e:
if self.show_line_nums:
self.printerr('Invalid syntax at char %d' % (e.charnum,))
else:
self.printerr('Invalid syntax at line %d, char %d'
% (e.linenum, e.charnum))
statementline = statementtext.split('\n')[e.linenum - 1]
self.printerr(' %s' % statementline)
self.printerr(' %s^' % (' ' * e.charnum))
return True
while statements and not statements[-1]:
statements = statements[:-1]
if not statements:
return True
if endtoken_escaped or statements[-1][-1][0] != 'endtoken':
self.set_continue_prompt()
return
for st in statements:
try:
self.handle_statement(st, statementtext)
except Exception, e:
if self.debug:
traceback.print_exc()
else:
self.printerr(e)
return True
def handle_eof(self):
if self.tty:
print
statement = self.statement.getvalue()
if statement.strip():
if not self.onecmd(statement):
self.printerr('Incomplete statement at end of file')
self.do_exit()
def handle_statement(self, tokens, srcstr):
# Concat multi-line statements and insert into history
if readline is not None:
nl_count = srcstr.count("\n")
new_hist = srcstr.replace("\n", " ").rstrip()
if nl_count > 1 and self.last_hist != new_hist:
readline.add_history(new_hist.encode(self.encoding))
self.last_hist = new_hist
cmdword = tokens[0][1]
if cmdword == '?':
cmdword = 'help'
custom_handler = getattr(self, 'do_' + cmdword.lower(), None)
if custom_handler:
parsed = cqlruleset.cql_whole_parse_tokens(tokens, srcstr=srcstr,
startsymbol='cqlshCommand')
if parsed and not parsed.remainder:
# successful complete parse
return custom_handler(parsed)
else:
return self.handle_parse_error(cmdword, tokens, parsed, srcstr)
return self.perform_statement(cqlruleset.cql_extract_orig(tokens, srcstr))
def handle_parse_error(self, cmdword, tokens, parsed, srcstr):
if cmdword.lower() in ('select', 'insert', 'update', 'delete', 'truncate',
'create', 'drop', 'alter', 'grant', 'revoke',
'batch', 'list'):
# hey, maybe they know about some new syntax we don't. type
# assumptions won't work, but maybe the query will.
return self.perform_statement(cqlruleset.cql_extract_orig(tokens, srcstr))
if parsed:
self.printerr('Improper %s command (problem at %r).' % (cmdword, parsed.remainder[0]))
else:
self.printerr('Improper %s command.' % cmdword)
def do_use(self, parsed):
ksname = parsed.get_binding('ksname')
success, _ = self.perform_simple_statement(SimpleStatement(parsed.extract_orig()))
if success:
if ksname[0] == '"' and ksname[-1] == '"':
self.current_keyspace = self.cql_unprotect_name(ksname)
else:
self.current_keyspace = ksname.lower()
def do_select(self, parsed):
tracing_was_enabled = self.tracing_enabled
ksname = parsed.get_binding('ksname')
stop_tracing = ksname == 'system_traces' or (ksname is None and self.current_keyspace == 'system_traces')
self.tracing_enabled = self.tracing_enabled and not stop_tracing
statement = parsed.extract_orig()
self.perform_statement(statement)
self.tracing_enabled = tracing_was_enabled
def perform_statement(self, statement):
stmt = SimpleStatement(statement, consistency_level=self.consistency_level, serial_consistency_level=self.serial_consistency_level, fetch_size=self.page_size if self.use_paging else None)
success, future = self.perform_simple_statement(stmt)
if future:
if future.warnings:
self.print_warnings(future.warnings)
if self.tracing_enabled:
try:
for trace in future.get_all_query_traces(max_wait_per=self.max_trace_wait, query_cl=self.consistency_level):
print_trace(self, trace)
except TraceUnavailable:
msg = "Statement trace did not complete within %d seconds; trace data may be incomplete." % (self.session.max_trace_wait,)
self.writeresult(msg, color=RED)
for trace_id in future.get_query_trace_ids():
self.show_session(trace_id, partial_session=True)
except Exception, err:
self.printerr("Unable to fetch query trace: %s" % (str(err),))
return success
def parse_for_select_meta(self, query_string):
try:
parsed = cqlruleset.cql_parse(query_string)[1]
except IndexError:
return None
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
name = self.cql_unprotect_name(parsed.get_binding('cfname', None))
try:
return self.get_table_meta(ks, name)
except ColumnFamilyNotFound:
try:
return self.get_view_meta(ks, name)
except MaterializedViewNotFound:
raise ObjectNotFound("%r not found in keyspace %r" % (name, ks))
def parse_for_update_meta(self, query_string):
try:
parsed = cqlruleset.cql_parse(query_string)[1]
except IndexError:
return None
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
cf = self.cql_unprotect_name(parsed.get_binding('cfname'))
return self.get_table_meta(ks, cf)
def perform_simple_statement(self, statement):
if not statement:
return False, None
future = self.session.execute_async(statement, trace=self.tracing_enabled)
result = None
try:
result = future.result()
except CQL_ERRORS, err:
self.printerr(unicode(err.__class__.__name__) + u": " + err.message.decode(encoding='utf-8'))
except Exception:
import traceback
self.printerr(traceback.format_exc())
# Even if statement failed we try to refresh schema if not agreed (see CASSANDRA-9689)
if not future.is_schema_agreed:
try:
self.conn.refresh_schema_metadata(5) # will throw exception if there is a schema mismatch
except Exception:
self.printerr("Warning: schema version mismatch detected; check the schema versions of your "
"nodes in system.local and system.peers.")
self.conn.refresh_schema_metadata(-1)
if result is None:
return False, None
if statement.query_string[:6].lower() == 'select':
self.print_result(result, self.parse_for_select_meta(statement.query_string))
elif statement.query_string.lower().startswith("list users") or statement.query_string.lower().startswith("list roles"):
self.print_result(result, self.get_table_meta('system_auth', 'roles'))
elif statement.query_string.lower().startswith("list"):
self.print_result(result, self.get_table_meta('system_auth', 'role_permissions'))
elif result:
# CAS INSERT/UPDATE
self.writeresult("")
self.print_static_result(result, self.parse_for_update_meta(statement.query_string))
self.flush_output()
return True, future
def print_result(self, result, table_meta):
self.decoding_errors = []
self.writeresult("")
if result.has_more_pages and self.tty:
num_rows = 0
while True:
if result.current_rows:
num_rows += len(result.current_rows)
self.print_static_result(result, table_meta)
if result.has_more_pages:
raw_input("---MORE---")
result.fetch_next_page()
else:
break
else:
num_rows = len(result.current_rows)
self.print_static_result(result, table_meta)
self.writeresult("(%d rows)" % num_rows)
if self.decoding_errors:
for err in self.decoding_errors[:2]:
self.writeresult(err.message(), color=RED)
if len(self.decoding_errors) > 2:
self.writeresult('%d more decoding errors suppressed.'
% (len(self.decoding_errors) - 2), color=RED)
def print_static_result(self, result, table_meta):
if not result.column_names and not table_meta:
return
column_names = result.column_names or table_meta.columns.keys()
formatted_names = [self.myformat_colname(name, table_meta) for name in column_names]
if not result.current_rows:
# print header only
self.print_formatted_result(formatted_names, None)
return
cql_types = []
if result.column_types:
ks_name = table_meta.keyspace_name if table_meta else self.current_keyspace
ks_meta = self.conn.metadata.keyspaces.get(ks_name, None)
cql_types = [CqlType(cql_typename(t), ks_meta) for t in result.column_types]
#if self.decompression is None:
formatted_values = [map(self.myformat_value, [row[column] for column in column_names], cql_types) for row in result.current_rows]
#if self.decompression == 'snappy':
# import snappy
# cql_types_tmp=[]
# for col in cql_types:
# if col.type_name == CqlType('blob', ks_meta).type_name:
# cql_types_tmp.append(None)
# else:
# cql_types_tmp.append(col)
# formatted_values = [map(self.myformat_value, [snappy.decompress(row[column]) if cql_types_tmp[0] == None else row[column] for column in column_names], cql_types_tmp) for row in result.current_rows]
#else:
# formatted_values = [map(self.myformat_value, [row[column] for column in column_names], cql_types) for row in result.current_rows]
if self.expand_enabled:
self.print_formatted_result_vertically(formatted_names, formatted_values)
else:
self.print_formatted_result(formatted_names, formatted_values)
def print_formatted_result(self, formatted_names, formatted_values):
# determine column widths
widths = [n.displaywidth for n in formatted_names]
if formatted_values is not None:
for fmtrow in formatted_values:
for num, col in enumerate(fmtrow):
widths[num] = max(widths[num], col.displaywidth)
# print header
header = ' | '.join(hdr.ljust(w, color=self.color) for (hdr, w) in zip(formatted_names, widths))
self.writeresult(' ' + header.rstrip())
self.writeresult('-%s-' % '-+-'.join('-' * w for w in widths))
# stop if there are no rows
if formatted_values is None:
self.writeresult("")
return
# print row data
for row in formatted_values:
line = ' | '.join(col.rjust(w, color=self.color) for (col, w) in zip(row, widths))
self.writeresult(' ' + line)
self.writeresult("")
def print_formatted_result_vertically(self, formatted_names, formatted_values):
max_col_width = max([n.displaywidth for n in formatted_names])
max_val_width = max([n.displaywidth for row in formatted_values for n in row])
# for each row returned, list all the column-value pairs
for row_id, row in enumerate(formatted_values):
self.writeresult("@ Row %d" % (row_id + 1))
self.writeresult('-%s-' % '-+-'.join(['-' * max_col_width, '-' * max_val_width]))
for field_id, field in enumerate(row):
column = formatted_names[field_id].ljust(max_col_width, color=self.color)
value = field.ljust(field.displaywidth, color=self.color)
self.writeresult(' ' + " | ".join([column, value]))
self.writeresult('')
def print_warnings(self, warnings):
if warnings is None or len(warnings) == 0:
return
self.writeresult('')
self.writeresult('Warnings :')
for warning in warnings:
self.writeresult(warning)
self.writeresult('')
def emptyline(self):
pass
def parseline(self, line):
# this shouldn't be needed
raise NotImplementedError
def complete(self, text, state):
if readline is None:
return
if state == 0:
try:
self.completion_matches = self.find_completions(text)
except Exception:
if debug_completion:
import traceback
traceback.print_exc()
else:
raise
try:
return self.completion_matches[state]
except IndexError:
return None
def find_completions(self, text):
curline = readline.get_line_buffer()
prevlines = self.statement.getvalue()
wholestmt = prevlines + curline
begidx = readline.get_begidx() + len(prevlines)
stuff_to_complete = wholestmt[:begidx]
return cqlruleset.cql_complete(stuff_to_complete, text, cassandra_conn=self,
debug=debug_completion, startsymbol='cqlshCommand')
def set_prompt(self, prompt, prepend_user=False):
if prepend_user and self.username:
self.prompt = "%s@%s" % (self.username, prompt)
return
self.prompt = prompt
def cql_unprotect_name(self, namestr):
if namestr is None:
return
return cqlruleset.dequote_name(namestr)
def cql_unprotect_value(self, valstr):
if valstr is not None:
return cqlruleset.dequote_value(valstr)
def print_recreate_keyspace(self, ksdef, out):
out.write(ksdef.export_as_string())
out.write("\n")
def print_recreate_columnfamily(self, ksname, cfname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given table.
Writes output to the given out stream.
"""
out.write(self.get_table_meta(ksname, cfname).export_as_string())
out.write("\n")
def print_recreate_index(self, ksname, idxname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given index.
Writes output to the given out stream.
"""
out.write(self.get_index_meta(ksname, idxname).export_as_string())
out.write("\n")
def print_recreate_materialized_view(self, ksname, viewname, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given materialized view.
Writes output to the given out stream.
"""
out.write(self.get_view_meta(ksname, viewname).export_as_string())
out.write("\n")
def print_recreate_object(self, ks, name, out):
"""
Output CQL commands which should be pasteable back into a CQL session
to recreate the given object (ks, table or index).
Writes output to the given out stream.
"""
out.write(self.get_object_meta(ks, name).export_as_string())
out.write("\n")
def describe_keyspaces(self):
print
cmd.Cmd.columnize(self, protect_names(self.get_keyspace_names()))
print
def describe_keyspace(self, ksname):
print
self.print_recreate_keyspace(self.get_keyspace_meta(ksname), sys.stdout)
print
def describe_columnfamily(self, ksname, cfname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
self.print_recreate_columnfamily(ksname, cfname, sys.stdout)
print
def describe_index(self, ksname, idxname):
print
self.print_recreate_index(ksname, idxname, sys.stdout)
print
def describe_materialized_view(self, ksname, viewname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
self.print_recreate_materialized_view(ksname, viewname, sys.stdout)
print
def describe_object(self, ks, name):
print
self.print_recreate_object(ks, name, sys.stdout)
print
def describe_columnfamilies(self, ksname):
print
if ksname is None:
for k in self.get_keyspaces():
name = protect_name(k.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
cmd.Cmd.columnize(self, protect_names(self.get_columnfamily_names(k.name)))
print
else:
cmd.Cmd.columnize(self, protect_names(self.get_columnfamily_names(ksname)))
print
def describe_functions(self, ksname):
print
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
self._columnize_unicode(ksmeta.functions.keys())
else:
ksmeta = self.get_keyspace_meta(ksname)
self._columnize_unicode(ksmeta.functions.keys())
def describe_function(self, ksname, functionname):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
ksmeta = self.get_keyspace_meta(ksname)
functions = filter(lambda f: f.name == functionname, ksmeta.functions.values())
if len(functions) == 0:
raise FunctionNotFound("User defined function %r not found" % functionname)
print "\n\n".join(func.export_as_string() for func in functions)
print
def describe_aggregates(self, ksname):
print
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
self._columnize_unicode(ksmeta.aggregates.keys())
else:
ksmeta = self.get_keyspace_meta(ksname)
self._columnize_unicode(ksmeta.aggregates.keys())
def describe_aggregate(self, ksname, aggregatename):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
ksmeta = self.get_keyspace_meta(ksname)
aggregates = filter(lambda f: f.name == aggregatename, ksmeta.aggregates.values())
if len(aggregates) == 0:
raise FunctionNotFound("User defined aggregate %r not found" % aggregatename)
print "\n\n".join(aggr.export_as_string() for aggr in aggregates)
print
def describe_usertypes(self, ksname):
print
if ksname is None:
for ksmeta in self.get_keyspaces():
name = protect_name(ksmeta.name)
print 'Keyspace %s' % (name,)
print '---------%s' % ('-' * len(name))
self._columnize_unicode(ksmeta.user_types.keys(), quote=True)
else:
ksmeta = self.get_keyspace_meta(ksname)
self._columnize_unicode(ksmeta.user_types.keys(), quote=True)
def describe_usertype(self, ksname, typename):
if ksname is None:
ksname = self.current_keyspace
if ksname is None:
raise NoKeyspaceError("No keyspace specified and no current keyspace")
print
ksmeta = self.get_keyspace_meta(ksname)
try:
usertype = ksmeta.user_types[typename]
except KeyError:
raise UserTypeNotFound("User type %r not found" % typename)
print usertype.export_as_string()
def _columnize_unicode(self, name_list, quote=False):
"""
Used when columnizing identifiers that may contain unicode
"""
names = [n.encode('utf-8') for n in name_list]
if quote:
names = protect_names(names)
cmd.Cmd.columnize(self, names)
print
def describe_cluster(self):
print '\nCluster: %s' % self.get_cluster_name()
p = trim_if_present(self.get_partitioner(), 'org.apache.cassandra.dht.')
print 'Partitioner: %s\n' % p
# TODO: snitch?
# snitch = trim_if_present(self.get_snitch(), 'org.apache.cassandra.locator.')
# print 'Snitch: %s\n' % snitch
if self.current_keyspace is not None and self.current_keyspace != 'system':
print "Range ownership:"
ring = self.get_ring(self.current_keyspace)
for entry in ring.items():
print ' %39s [%s]' % (str(entry[0].value), ', '.join([host.address for host in entry[1]]))
print
def describe_schema(self, include_system=False):
print
for k in self.get_keyspaces():
if include_system or k.name not in cql3handling.SYSTEM_KEYSPACES:
self.print_recreate_keyspace(k, sys.stdout)
print
def do_describe(self, parsed):
"""
DESCRIBE [cqlsh only]
(DESC may be used as a shorthand.)
Outputs information about the connected Cassandra cluster, or about
the data objects stored in the cluster. Use in one of the following ways:
DESCRIBE KEYSPACES
Output the names of all keyspaces.
DESCRIBE KEYSPACE [<keyspacename>]
Output CQL commands that could be used to recreate the given keyspace,
and the objects in it (such as tables, types, functions, etc.).
In some cases, as the CQL interface matures, there will be some metadata
about a keyspace that is not representable with CQL. That metadata will not be shown.
The '<keyspacename>' argument may be omitted, in which case the current
keyspace will be described.
DESCRIBE TABLES
Output the names of all tables in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE TABLE [<keyspace>.]<tablename>
Output CQL commands that could be used to recreate the given table.
In some cases, as above, there may be table metadata which is not
representable and which will not be shown.
DESCRIBE INDEX <indexname>
Output the CQL command that could be used to recreate the given index.
In some cases, there may be index metadata which is not representable
and which will not be shown.
DESCRIBE MATERIALIZED VIEW <viewname>
Output the CQL command that could be used to recreate the given materialized view.
In some cases, there may be materialized view metadata which is not representable
and which will not be shown.
DESCRIBE CLUSTER
Output information about the connected Cassandra cluster, such as the
cluster name, and the partitioner and snitch in use. When you are
connected to a non-system keyspace, also shows endpoint-range
ownership information for the Cassandra ring.
DESCRIBE [FULL] SCHEMA
Output CQL commands that could be used to recreate the entire (non-system) schema.
Works as though "DESCRIBE KEYSPACE k" was invoked for each non-system keyspace
k. Use DESCRIBE FULL SCHEMA to include the system keyspaces.
DESCRIBE TYPES
Output the names of all user-defined-types in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE TYPE [<keyspace>.]<type>
Output the CQL command that could be used to recreate the given user-defined-type.
DESCRIBE FUNCTIONS
Output the names of all user-defined-functions in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE FUNCTION [<keyspace>.]<function>
Output the CQL command that could be used to recreate the given user-defined-function.
DESCRIBE AGGREGATES
Output the names of all user-defined-aggregates in the current keyspace, or in all
keyspaces if there is no current keyspace.
DESCRIBE AGGREGATE [<keyspace>.]<aggregate>
Output the CQL command that could be used to recreate the given user-defined-aggregate.
DESCRIBE <objname>
Output CQL commands that could be used to recreate the entire object schema,
where object can be either a keyspace or a table or an index or a materialized
view (in this order).
"""
what = parsed.matched[1][1].lower()
if what == 'functions':
self.describe_functions(self.current_keyspace)
elif what == 'function':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', None))
functionname = self.cql_unprotect_name(parsed.get_binding('udfname'))
self.describe_function(ksname, functionname)
elif what == 'aggregates':
self.describe_aggregates(self.current_keyspace)
elif what == 'aggregate':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', None))
aggregatename = self.cql_unprotect_name(parsed.get_binding('udaname'))
self.describe_aggregate(ksname, aggregatename)
elif what == 'keyspaces':
self.describe_keyspaces()
elif what == 'keyspace':
ksname = self.cql_unprotect_name(parsed.get_binding('ksname', ''))
if not ksname:
ksname = self.current_keyspace
if ksname is None:
self.printerr('Not in any keyspace.')
return
self.describe_keyspace(ksname)
elif what in ('columnfamily', 'table'):
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
cf = self.cql_unprotect_name(parsed.get_binding('cfname'))
self.describe_columnfamily(ks, cf)
elif what == 'index':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
idx = self.cql_unprotect_name(parsed.get_binding('idxname', None))
self.describe_index(ks, idx)
elif what == 'materialized' and parsed.matched[2][1].lower() == 'view':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
mv = self.cql_unprotect_name(parsed.get_binding('mvname'))
self.describe_materialized_view(ks, mv)
elif what in ('columnfamilies', 'tables'):
self.describe_columnfamilies(self.current_keyspace)
elif what == 'types':
self.describe_usertypes(self.current_keyspace)
elif what == 'type':
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
ut = self.cql_unprotect_name(parsed.get_binding('utname'))
self.describe_usertype(ks, ut)
elif what == 'cluster':
self.describe_cluster()
elif what == 'schema':
self.describe_schema(False)
elif what == 'full' and parsed.matched[2][1].lower() == 'schema':
self.describe_schema(True)
elif what:
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
name = self.cql_unprotect_name(parsed.get_binding('cfname'))
if not name:
name = self.cql_unprotect_name(parsed.get_binding('idxname', None))
if not name:
name = self.cql_unprotect_name(parsed.get_binding('mvname', None))
self.describe_object(ks, name)
do_desc = do_describe
def do_copy(self, parsed):
r"""
COPY [cqlsh only]
COPY x FROM: Imports CSV data into a Cassandra table
COPY x TO: Exports data from a Cassandra table in CSV format.
COPY <table_name> [ ( column [, ...] ) ]
FROM ( '<file_pattern_1, file_pattern_2, ... file_pattern_n>' | STDIN )
[ WITH <option>='value' [AND ...] ];
File patterns are either file names or valid python glob expressions, e.g. *.csv or folder/*.csv.
COPY <table_name> [ ( column [, ...] ) ]
TO ( '<filename>' | STDOUT )
[ WITH <option>='value' [AND ...] ];
Available common COPY options and defaults:
DELIMITER=',' - character that appears between records
QUOTE='"' - quoting character to be used to quote fields
ESCAPE='\' - character to appear before the QUOTE char when quoted
HEADER=false - whether to ignore the first line
NULL='' - string that represents a null value
DATETIMEFORMAT= - timestamp strftime format
'%Y-%m-%d %H:%M:%S%z' defaults to time_format value in cqlshrc
MAXATTEMPTS=5 - the maximum number of attempts per batch or range
REPORTFREQUENCY=0.25 - the frequency with which we display status updates in seconds
DECIMALSEP='.' - the separator for decimal values
THOUSANDSSEP='' - the separator for thousands digit groups
BOOLSTYLE='True,False' - the representation for booleans, case insensitive, specify true followed by false,
for example yes,no or 1,0
NUMPROCESSES=n - the number of worker processes, by default the number of cores minus one
capped at 16
CONFIGFILE='' - a configuration file with the same format as .cqlshrc (see the Python ConfigParser
documentation) where you can specify WITH options under the following optional
sections: [copy], [copy-to], [copy-from], [copy:ks.table], [copy-to:ks.table],
[copy-from:ks.table], where <ks> is your keyspace name and <table> is your table
name. Options are read from these sections, in the order specified
above, and command line options always override options in configuration files.
Depending on the COPY direction, only the relevant copy-from or copy-to sections
are used. If no configfile is specified then .cqlshrc is searched instead.
RATEFILE='' - an optional file where to print the output statistics
Available COPY FROM options and defaults:
CHUNKSIZE=5000 - the size of chunks passed to worker processes
INGESTRATE=100000 - an approximate ingest rate in rows per second
MINBATCHSIZE=10 - the minimum size of an import batch
MAXBATCHSIZE=20 - the maximum size of an import batch
MAXROWS=-1 - the maximum number of rows, -1 means no maximum
SKIPROWS=0 - the number of rows to skip
SKIPCOLS='' - a comma separated list of column names to skip
MAXPARSEERRORS=-1 - the maximum global number of parsing errors, -1 means no maximum
MAXINSERTERRORS=1000 - the maximum global number of insert errors, -1 means no maximum
ERRFILE='' - a file where to store all rows that could not be imported, by default this is
import_ks_table.err where <ks> is your keyspace and <table> is your table name.
PREPAREDSTATEMENTS=True - whether to use prepared statements when importing, by default True. Set this to
False if you don't mind shifting data parsing to the cluster. The cluster will also
have to compile every batch statement. For large and oversized clusters
this will result in a faster import but for smaller clusters it may generate
timeouts.
TTL=3600 - the time to live in seconds, by default data will not expire
Available COPY TO options and defaults:
ENCODING='utf8' - encoding for CSV output
PAGESIZE='1000' - the page size for fetching results
PAGETIMEOUT=10 - the page timeout in seconds for fetching results
BEGINTOKEN='' - the minimum token string to consider when exporting data
ENDTOKEN='' - the maximum token string to consider when exporting data
MAXREQUESTS=6 - the maximum number of requests each worker process can work on in parallel
MAXOUTPUTSIZE='-1' - the maximum size of the output file measured in number of lines,
beyond this maximum the output file will be split into segments,
-1 means unlimited.
FLOATPRECISION=5 - the number of digits displayed after the decimal point for cql float values
DOUBLEPRECISION=12 - the number of digits displayed after the decimal point for cql double values
When entering CSV data on STDIN, you can use the sequence "\."
on a line by itself to end the data input.
"""
ks = self.cql_unprotect_name(parsed.get_binding('ksname', None))
if ks is None:
ks = self.current_keyspace
if ks is None:
raise NoKeyspaceError("Not in any keyspace.")
table = self.cql_unprotect_name(parsed.get_binding('cfname'))
columns = parsed.get_binding('colnames', None)
if columns is not None:
columns = map(self.cql_unprotect_name, columns)
else:
# default to all known columns
columns = self.get_column_names(ks, table)
fname = parsed.get_binding('fname', None)
if fname is not None:
fname = self.cql_unprotect_value(fname)
copyoptnames = map(str.lower, parsed.get_binding('optnames', ()))
copyoptvals = map(self.cql_unprotect_value, parsed.get_binding('optvals', ()))
opts = dict(zip(copyoptnames, copyoptvals))
direction = parsed.get_binding('dir').upper()
if direction == 'FROM':
task = ImportTask(self, ks, table, columns, fname, opts, self.conn.protocol_version, CONFIG_FILE)
elif direction == 'TO':
task = ExportTask(self, ks, table, columns, fname, opts, self.conn.protocol_version, CONFIG_FILE)
else:
raise SyntaxError("Unknown direction %s" % direction)
task.run()
def do_show(self, parsed):
"""
SHOW [cqlsh only]
Displays information about the current cqlsh session. Can be called in
the following ways:
SHOW VERSION
Shows the version and build of the connected Cassandra instance, as
well as the versions of the CQL spec and the Thrift protocol that
the connected Cassandra instance understands.
SHOW HOST
Shows where cqlsh is currently connected.
SHOW SESSION <sessionid>
Pretty-prints the requested tracing session.
"""
showwhat = parsed.get_binding('what').lower()
if showwhat == 'version':
self.get_connection_versions()
self.show_version()
elif showwhat == 'host':
self.show_host()
elif showwhat.startswith('session'):
session_id = parsed.get_binding('sessionid').lower()
self.show_session(UUID(session_id))
else:
self.printerr('Wait, how do I show %r?' % (showwhat,))
def do_source(self, parsed):
"""
SOURCE [cqlsh only]
Executes a file containing CQL statements. Gives the output for each
statement in turn, if any, or any errors that occur along the way.
Errors do NOT abort execution of the CQL source file.
Usage:
SOURCE '<file>';
That is, the path to the file to be executed must be given inside a
string literal. The path is interpreted relative to the current working
directory. The tilde shorthand notation ('~/mydir') is supported for
referring to $HOME.
See also the --file option to cqlsh.
"""
fname = parsed.get_binding('fname')
fname = os.path.expanduser(self.cql_unprotect_value(fname))
try:
encoding, bom_size = get_file_encoding_bomsize(fname)
f = codecs.open(fname, 'r', encoding)
f.seek(bom_size)
except IOError, e:
self.printerr('Could not open %r: %s' % (fname, e))
return
username = self.auth_provider.username if self.auth_provider else None
password = self.auth_provider.password if self.auth_provider else None
subshell = Shell(self.hostname, self.port, color=self.color,
username=username, password=password,
encoding=self.encoding, stdin=f, tty=False, use_conn=self.conn,
cqlver=self.cql_version, keyspace=self.current_keyspace,
tracing_enabled=self.tracing_enabled,
display_nanotime_format=self.display_nanotime_format,
display_timestamp_format=self.display_timestamp_format,
display_date_format=self.display_date_format,
display_float_precision=self.display_float_precision,
display_double_precision=self.display_double_precision,
display_timezone=self.display_timezone,
max_trace_wait=self.max_trace_wait, ssl=self.ssl,
request_timeout=self.session.default_timeout,
connect_timeout=self.conn.connect_timeout)
subshell.cmdloop()
f.close()
def do_capture(self, parsed):
"""
CAPTURE [cqlsh only]
Begins capturing command output and appending it to a specified file.
Output will not be shown at the console while it is captured.
Usage:
CAPTURE '<file>';
CAPTURE OFF;
CAPTURE;
That is, the path to the file to be appended to must be given inside a
string literal. The path is interpreted relative to the current working
directory. The tilde shorthand notation ('~/mydir') is supported for
referring to $HOME.
Only query result output is captured. Errors and output from cqlsh-only
commands will still be shown in the cqlsh session.
To stop capturing output and show it in the cqlsh session again, use
CAPTURE OFF.
To inspect the current capture configuration, use CAPTURE with no
arguments.
"""
fname = parsed.get_binding('fname')
if fname is None:
if self.shunted_query_out is not None:
print "Currently capturing query output to %r." % (self.query_out.name,)
else:
print "Currently not capturing query output."
return
if fname.upper() == 'OFF':
if self.shunted_query_out is None:
self.printerr('Not currently capturing output.')
return
self.query_out.close()
self.query_out = self.shunted_query_out
self.color = self.shunted_color
self.shunted_query_out = None
del self.shunted_color
return
if self.shunted_query_out is not None:
self.printerr('Already capturing output to %s. Use CAPTURE OFF'
' to disable.' % (self.query_out.name,))
return
fname = os.path.expanduser(self.cql_unprotect_value(fname))
try:
f = open(fname, 'a')
except IOError, e:
self.printerr('Could not open %r for append: %s' % (fname, e))
return
self.shunted_query_out = self.query_out
self.shunted_color = self.color
self.query_out = f
self.color = False
print 'Now capturing query output to %r.' % (fname,)
def do_tracing(self, parsed):
"""
TRACING [cqlsh]
Enables or disables request tracing.
TRACING ON
Enables tracing for all further requests.
TRACING OFF
Disables tracing.
TRACING
TRACING with no arguments shows the current tracing status.
"""
self.tracing_enabled = SwitchCommand("TRACING", "Tracing").execute(self.tracing_enabled, parsed, self.printerr)
def do_expand(self, parsed):
"""
EXPAND [cqlsh]
Enables or disables expanded (vertical) output.
EXPAND ON
Enables expanded (vertical) output.
EXPAND OFF
Disables expanded (vertical) output.
EXPAND
EXPAND with no arguments shows the current value of expand setting.
"""
self.expand_enabled = SwitchCommand("EXPAND", "Expanded output").execute(self.expand_enabled, parsed, self.printerr)
def do_consistency(self, parsed):
"""
CONSISTENCY [cqlsh only]
Overrides default consistency level (default level is ONE).
CONSISTENCY <level>
Sets consistency level for future requests.
Valid consistency levels:
ANY, ONE, TWO, THREE, QUORUM, ALL, LOCAL_ONE, LOCAL_QUORUM, EACH_QUORUM, SERIAL and LOCAL_SERIAL.
SERIAL and LOCAL_SERIAL may be used only for SELECTs; will be rejected with updates.
CONSISTENCY
CONSISTENCY with no arguments shows the current consistency level.
"""
level = parsed.get_binding('level')
if level is None:
print 'Current consistency level is %s.' % (cassandra.ConsistencyLevel.value_to_name[self.consistency_level])
return
self.consistency_level = cassandra.ConsistencyLevel.name_to_value[level.upper()]
print 'Consistency level set to %s.' % (level.upper(),)
def do_serial(self, parsed):
"""
SERIAL CONSISTENCY [cqlsh only]
Overrides serial consistency level (default level is SERIAL).
SERIAL CONSISTENCY <level>
Sets consistency level for future conditional updates.
Valid consistency levels:
SERIAL, LOCAL_SERIAL.
SERIAL CONSISTENCY
SERIAL CONSISTENCY with no arguments shows the current consistency level.
"""
level = parsed.get_binding('level')
if level is None:
print 'Current serial consistency level is %s.' % (cassandra.ConsistencyLevel.value_to_name[self.serial_consistency_level])
return
self.serial_consistency_level = cassandra.ConsistencyLevel.name_to_value[level.upper()]
print 'Serial consistency level set to %s.' % (level.upper(),)
def do_login(self, parsed):
"""
LOGIN [cqlsh only]
Changes login information without requiring restart.
LOGIN <username> (<password>)
Login using the specified username. If password is specified, it will be used
otherwise, you will be prompted to enter.
"""
username = parsed.get_binding('username')
password = parsed.get_binding('password')
if password is None:
password = getpass.getpass()
else:
password = password[1:-1]
auth_provider = PlainTextAuthProvider(username=username, password=password)
conn = Cluster(contact_points=(self.hostname,), port=self.port, cql_version=self.conn.cql_version,
protocol_version=self.conn.protocol_version,
auth_provider=auth_provider,
ssl_options=self.conn.ssl_options,
load_balancing_policy=WhiteListRoundRobinPolicy([self.hostname]),
control_connection_timeout=self.conn.connect_timeout,
connect_timeout=self.conn.connect_timeout)
if self.current_keyspace:
session = conn.connect(self.current_keyspace)
else:
session = conn.connect()
# Copy session properties
session.default_timeout = self.session.default_timeout
session.row_factory = self.session.row_factory
session.default_consistency_level = self.session.default_consistency_level
session.max_trace_wait = self.session.max_trace_wait
# Update after we've connected in case we fail to authenticate
self.conn = conn
self.auth_provider = auth_provider
self.username = username
self.session = session
def do_exit(self, parsed=None):
"""
EXIT/QUIT [cqlsh only]
Exits cqlsh.
"""
self.stop = True
if self.owns_connection:
self.conn.shutdown()
do_quit = do_exit
def do_clear(self, parsed):
"""
CLEAR/CLS [cqlsh only]
Clears the console.
"""
import subprocess
subprocess.call(['clear', 'cls'][is_win], shell=True)
do_cls = do_clear
def do_debug(self, parsed):
import pdb
pdb.set_trace()
def get_help_topics(self):
topics = [t[3:] for t in dir(self) if t.startswith('do_') and getattr(self, t, None).__doc__]
for hide_from_help in ('quit',):
topics.remove(hide_from_help)
return topics
def columnize(self, slist, *a, **kw):
return cmd.Cmd.columnize(self, sorted([u.upper() for u in slist]), *a, **kw)
def do_help(self, parsed):
"""
HELP [cqlsh only]
Gives information about cqlsh commands. To see available topics,
enter "HELP" without any arguments. To see help on a topic,
use "HELP <topic>".
"""
topics = parsed.get_binding('topic', ())
if not topics:
shell_topics = [t.upper() for t in self.get_help_topics()]
self.print_topics("\nDocumented shell commands:", shell_topics, 15, 80)
cql_topics = [t.upper() for t in cqldocs.get_help_topics()]
self.print_topics("CQL help topics:", cql_topics, 15, 80)
return
for t in topics:
if t.lower() in self.get_help_topics():
doc = getattr(self, 'do_' + t.lower()).__doc__
self.stdout.write(doc + "\n")
elif t.lower() in cqldocs.get_help_topics():
urlpart = cqldocs.get_help_topic(t)
if urlpart is not None:
url = "%s#%s" % (CASSANDRA_CQL_HTML, urlpart)
if len(webbrowser._tryorder) == 0:
self.printerr("*** No browser to display CQL help. URL for help topic %s : %s" % (t, url))
elif self.browser is not None:
webbrowser.get(self.browser).open_new_tab(url)
else:
webbrowser.open_new_tab(url)
else:
self.printerr("*** No help on %s" % (t,))
def do_unicode(self, parsed):
"""
Textual input/output
When control characters, or other characters which can't be encoded
in your current locale, are found in values of 'text' or 'ascii'
types, it will be shown as a backslash escape. If color is enabled,
any such backslash escapes will be shown in a different color from
the surrounding text.
Unicode code points in your data will be output intact, if the
encoding for your locale is capable of decoding them. If you prefer
that non-ascii characters be shown with Python-style "\\uABCD"
escape sequences, invoke cqlsh with an ASCII locale (for example,
by setting the $LANG environment variable to "C").
"""
def do_paging(self, parsed):
"""
PAGING [cqlsh]
Enables or disables query paging.
PAGING ON
Enables query paging for all further queries.
PAGING OFF
Disables paging.
PAGING
PAGING with no arguments shows the current query paging status.
"""
(self.use_paging, requested_page_size) = SwitchCommandWithValue(
"PAGING", "Query paging", value_type=int).execute(self.use_paging, parsed, self.printerr)
if self.use_paging and requested_page_size is not None:
self.page_size = requested_page_size
if self.use_paging:
print("Page size: {}".format(self.page_size))
else:
self.page_size = self.default_page_size
def applycolor(self, text, color=None):
if not color or not self.color:
return text
return color + text + ANSI_RESET
def writeresult(self, text, color=None, newline=True, out=None):
if out is None:
out = self.query_out
# convert Exceptions, etc to text
if not isinstance(text, (unicode, str)):
text = unicode(text)
if isinstance(text, unicode):
text = text.encode(self.encoding)
to_write = self.applycolor(text, color) + ('\n' if newline else '')
out.write(to_write)
def flush_output(self):
self.query_out.flush()
def printerr(self, text, color=RED, newline=True, shownum=None):
self.statement_error = True
if shownum is None:
shownum = self.show_line_nums
if shownum:
text = '%s:%d:%s' % (self.stdin.name, self.lineno, text)
self.writeresult(text, color, newline=newline, out=sys.stderr)
class SwitchCommand(object):
command = None
description = None
def __init__(self, command, desc):
self.command = command
self.description = desc
def execute(self, state, parsed, printerr):
switch = parsed.get_binding('switch')
if switch is None:
if state:
print "%s is currently enabled. Use %s OFF to disable" \
% (self.description, self.command)
else:
print "%s is currently disabled. Use %s ON to enable." \
% (self.description, self.command)
return state
if switch.upper() == 'ON':
if state:
printerr('%s is already enabled. Use %s OFF to disable.'
% (self.description, self.command))
return state
print 'Now %s is enabled' % (self.description,)
return True
if switch.upper() == 'OFF':
if not state:
printerr('%s is not enabled.' % (self.description,))
return state
print 'Disabled %s.' % (self.description,)
return False
class SwitchCommandWithValue(SwitchCommand):
"""The same as SwitchCommand except it also accepts a value in place of ON.
This returns a tuple of the form: (SWITCH_VALUE, PASSED_VALUE)
eg: PAGING 50 returns (True, 50)
PAGING OFF returns (False, None)
PAGING ON returns (True, None)
The value_type must match for the PASSED_VALUE, otherwise it will return None.
"""
def __init__(self, command, desc, value_type=int):
SwitchCommand.__init__(self, command, desc)
self.value_type = value_type
def execute(self, state, parsed, printerr):
binary_switch_value = SwitchCommand.execute(self, state, parsed, printerr)
switch = parsed.get_binding('switch')
try:
value = self.value_type(switch)
binary_switch_value = True
except (ValueError, TypeError):
value = None
return (binary_switch_value, value)
def option_with_default(cparser_getter, section, option, default=None):
try:
return cparser_getter(section, option)
except ConfigParser.Error:
return default
def raw_option_with_default(configs, section, option, default=None):
"""
Same (almost) as option_with_default() but won't do any string interpolation.
Useful for config values that include '%' symbol, e.g. time format string.
"""
try:
return configs.get(section, option, raw=True)
except ConfigParser.Error:
return default
def should_use_color():
if not sys.stdout.isatty():
return False
if os.environ.get('TERM', '') in ('dumb', ''):
return False
try:
import subprocess
p = subprocess.Popen(['tput', 'colors'], stdout=subprocess.PIPE)
stdout, _ = p.communicate()
if int(stdout.strip()) < 8:
return False
except (OSError, ImportError, ValueError):
# oh well, we tried. at least we know there's a $TERM and it's
# not "dumb".
pass
return True
def read_options(cmdlineargs, environment):
configs = ConfigParser.SafeConfigParser()
configs.read(CONFIG_FILE)
rawconfigs = ConfigParser.RawConfigParser()
rawconfigs.read(CONFIG_FILE)
optvalues = optparse.Values()
optvalues.username = option_with_default(configs.get, 'authentication', 'username')
optvalues.password = option_with_default(rawconfigs.get, 'authentication', 'password')
optvalues.keyspace = option_with_default(configs.get, 'authentication', 'keyspace')
optvalues.browser = option_with_default(configs.get, 'ui', 'browser', None)
optvalues.completekey = option_with_default(configs.get, 'ui', 'completekey',
DEFAULT_COMPLETEKEY)
optvalues.color = option_with_default(configs.getboolean, 'ui', 'color')
optvalues.time_format = raw_option_with_default(configs, 'ui', 'time_format',
DEFAULT_TIMESTAMP_FORMAT)
optvalues.nanotime_format = raw_option_with_default(configs, 'ui', 'nanotime_format',
DEFAULT_NANOTIME_FORMAT)
optvalues.date_format = raw_option_with_default(configs, 'ui', 'date_format',
DEFAULT_DATE_FORMAT)
optvalues.float_precision = option_with_default(configs.getint, 'ui', 'float_precision',
DEFAULT_FLOAT_PRECISION)
optvalues.double_precision = option_with_default(configs.getint, 'ui', 'double_precision',
DEFAULT_DOUBLE_PRECISION)
optvalues.field_size_limit = option_with_default(configs.getint, 'csv', 'field_size_limit', csv.field_size_limit())
optvalues.max_trace_wait = option_with_default(configs.getfloat, 'tracing', 'max_trace_wait',
DEFAULT_MAX_TRACE_WAIT)
optvalues.timezone = option_with_default(configs.get, 'ui', 'timezone', None)
optvalues.debug = False
optvalues.file = None
optvalues.ssl = option_with_default(configs.getboolean, 'connection', 'ssl', DEFAULT_SSL)
optvalues.no_compact = False
optvalues.encoding = option_with_default(configs.get, 'ui', 'encoding', UTF8)
optvalues.tty = option_with_default(configs.getboolean, 'ui', 'tty', sys.stdin.isatty())
optvalues.protocol_version = option_with_default(configs.getint, 'protocol', 'version', None)
optvalues.cqlversion = option_with_default(configs.get, 'cql', 'version', None)
optvalues.connect_timeout = option_with_default(configs.getint, 'connection', 'timeout', DEFAULT_CONNECT_TIMEOUT_SECONDS)
optvalues.request_timeout = option_with_default(configs.getint, 'connection', 'request_timeout', DEFAULT_REQUEST_TIMEOUT_SECONDS)
optvalues.execute = None
optvalues.decompression = None
(options, arguments) = parser.parse_args(cmdlineargs, values=optvalues)
hostname = option_with_default(configs.get, 'connection', 'hostname', DEFAULT_HOST)
port = option_with_default(configs.get, 'connection', 'port', DEFAULT_PORT)
try:
options.connect_timeout = int(options.connect_timeout)
except ValueError:
parser.error('"%s" is not a valid connect timeout.' % (options.connect_timeout,))
options.connect_timeout = DEFAULT_CONNECT_TIMEOUT_SECONDS
try:
options.request_timeout = int(options.request_timeout)
except ValueError:
parser.error('"%s" is not a valid request timeout.' % (options.request_timeout,))
options.request_timeout = DEFAULT_REQUEST_TIMEOUT_SECONDS
hostname = environment.get('CQLSH_HOST', hostname)
port = environment.get('CQLSH_PORT', port)
if len(arguments) > 0:
hostname = arguments[0]
if len(arguments) > 1:
port = arguments[1]
if options.file or options.execute:
options.tty = False
if options.execute and not options.execute.endswith(';'):
options.execute += ';'
if optvalues.color in (True, False):
options.color = optvalues.color
else:
if options.file is not None:
options.color = False
else:
options.color = should_use_color()
if options.cqlversion is not None:
options.cqlversion, cqlvertup = full_cql_version(options.cqlversion)
if cqlvertup[0] < 3:
parser.error('%r is not a supported CQL version.' % options.cqlversion)
options.cqlmodule = cql3handling
try:
port = int(port)
except ValueError:
parser.error('%r is not a valid port number.' % port)
return options, hostname, port
def setup_cqlruleset(cqlmodule):
global cqlruleset
cqlruleset = cqlmodule.CqlRuleSet
cqlruleset.append_rules(cqlshhandling.cqlsh_extra_syntax_rules)
for rulename, termname, func in cqlshhandling.cqlsh_syntax_completers:
cqlruleset.completer_for(rulename, termname)(func)
cqlruleset.commands_end_with_newline.update(cqlshhandling.my_commands_ending_with_newline)
def setup_cqldocs(cqlmodule):
global cqldocs
cqldocs = cqlmodule.cqldocs
def init_history():
if readline is not None:
try:
readline.read_history_file(HISTORY)
except IOError:
pass
delims = readline.get_completer_delims()
delims.replace("'", "")
delims += '.'
readline.set_completer_delims(delims)
def save_history():
if readline is not None:
try:
readline.write_history_file(HISTORY)
except IOError:
pass
def main(options, hostname, port):
setup_cqlruleset(options.cqlmodule)
setup_cqldocs(options.cqlmodule)
init_history()
csv.field_size_limit(options.field_size_limit)
if options.file is None:
stdin = None
else:
try:
encoding, bom_size = get_file_encoding_bomsize(options.file)
stdin = codecs.open(options.file, 'r', encoding)
stdin.seek(bom_size)
except IOError, e:
sys.exit("Can't open %r: %s" % (options.file, e))
if options.debug:
sys.stderr.write("Using CQL driver: %s\n" % (cassandra,))
sys.stderr.write("Using connect timeout: %s seconds\n" % (options.connect_timeout,))
sys.stderr.write("Using '%s' encoding\n" % (options.encoding,))
sys.stderr.write("Using ssl: %s\n" % (options.ssl,))
# create timezone based on settings, environment or auto-detection
timezone = None
if options.timezone or 'TZ' in os.environ:
try:
import pytz
if options.timezone:
try:
timezone = pytz.timezone(options.timezone)
except Exception:
sys.stderr.write("Warning: could not recognize timezone '%s' specified in cqlshrc\n\n" % (options.timezone))
if 'TZ' in os.environ:
try:
timezone = pytz.timezone(os.environ['TZ'])
except Exception:
sys.stderr.write("Warning: could not recognize timezone '%s' from environment value TZ\n\n" % (os.environ['TZ']))
except ImportError:
sys.stderr.write("Warning: Timezone defined and 'pytz' module for timezone conversion not installed. Timestamps will be displayed in UTC timezone.\n\n")
# try auto-detect timezone if tzlocal is installed
if not timezone:
try:
from tzlocal import get_localzone
timezone = get_localzone()
except ImportError:
# we silently ignore and fallback to UTC unless a custom timestamp format (which likely
# does contain a TZ part) was specified
if options.time_format != DEFAULT_TIMESTAMP_FORMAT:
sys.stderr.write("Warning: custom timestamp format specified in cqlshrc, but local timezone could not be detected.\n" +
"Either install Python 'tzlocal' module for auto-detection or specify client timezone in your cqlshrc.\n\n")
try:
shell = Shell(hostname,
port,
color=options.color,
username=options.username,
password=options.password,
stdin=stdin,
tty=options.tty,
completekey=options.completekey,
browser=options.browser,
protocol_version=options.protocol_version,
cqlver=options.cqlversion,
keyspace=options.keyspace,
no_compact=options.no_compact,
display_timestamp_format=options.time_format,
display_nanotime_format=options.nanotime_format,
display_date_format=options.date_format,
display_float_precision=options.float_precision,
display_double_precision=options.double_precision,
display_timezone=timezone,
max_trace_wait=options.max_trace_wait,
ssl=options.ssl,
single_statement=options.execute,
request_timeout=options.request_timeout,
connect_timeout=options.connect_timeout,
encoding=options.encoding,
decompression=options.decompression)
except KeyboardInterrupt:
sys.exit('Connection aborted.')
except CQL_ERRORS, e:
sys.exit('Connection error: %s' % (e,))
except VersionNotSupported, e:
sys.exit('Unsupported CQL version: %s' % (e,))
if options.debug:
shell.debug = True
shell.cmdloop()
save_history()
batch_mode = options.file or options.execute
if batch_mode and shell.statement_error:
sys.exit(2)
# always call this regardless of module name: when a sub-process is spawned
# on Windows then the module name is not __main__, see CASSANDRA-9304
insert_driver_hooks()
if __name__ == '__main__':
main(*read_options(sys.argv[1:], os.environ))
# vim: set ft=python et ts=4 sw=4 :
|
import { OakActionAndRequest } from './model';
import { OakSimulator } from './simulator';
import { OakActionExperiment } from './simulator-model';
export class OakExperiment {
simulator: OakSimulator;
actionAndrequests: OakActionAndRequest[];
actionExperiment: OakActionExperiment[];
constructor() {
this.simulator = new OakSimulator();
this.actionExperiment = [];
this.actionAndrequests = [];
}
}
|
#!/bin/bash
# Asking user for his income and assigning it to variable
echo "Input your income (only number, no currency!): "
read income
#Calculating and displaying taxes for users depending on their income
if [ $income -lt 5000 ]; then
echo "No tax for you!"
elif [[ $income -ge 5000 && $income -le 30000 ]]; then
let "tax=$income/10"; echo "Your tax is: $tax"
else
let "tax=$income/5"; echo "Your tax is: $tax"
fi
exit 0
|
import {observable} from "mobx";
export class Game {
@observable knightPosition = [1, 7];
moveKnight(toX: number, toY: number): void {
this.knightPosition = [toX, toY];
}
canMoveKnight(toX: number, toY: number) {
const [x, y] = this.knightPosition;
const dx = toX - x;
const dy = toY - y;
return (Math.abs(dx) === 2 && Math.abs(dy) === 1) ||
(Math.abs(dx) === 1 && Math.abs(dy) === 2);
}
}
|
package com.katus.data;
import com.katus.exception.InvalidParamException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
/**
* @author <NAME>
* @version 1.0, 2021-10-09
*/
public abstract class AbstractResultRecordWithInfo<R extends Record> implements Record, Coefficient, Prediction, RSquare, Cloneable {
private static final Logger logger = LoggerFactory.getLogger(AbstractResultRecordWithInfo.class);
protected R record;
protected double[] beta = new double[0];
protected double prediction = Constants.NO_DATA;
protected double rSquare = Constants.NO_DATA;
public abstract String put();
public void predict() {
double pre = beta(0);
for (int i = 0; i < xSize(); i++) {
pre += beta(i+1) * x(i);
}
this.prediction = pre;
}
public void predict(double[] beta) {
setBeta(beta);
predict();
}
public void setBaseRecord(R record) {
this.record = record;
}
public R getBaseRecord() {
return record;
}
@Override
public double[] beta() {
return beta;
}
@Override
public double beta(int index) {
if (index < 0 || index >= betaSize()) {
logger.error("index of beta is out of range");
throw new InvalidParamException();
}
return beta[index];
}
@Override
public int betaSize() {
return beta.length;
}
@Override
public void setBeta(int index, double beta) {
if (index < 0 || index >= betaSize()) {
logger.error("index of beta is out of range");
throw new InvalidParamException();
}
this.beta[index] = beta;
}
@Override
public void setBeta(double[] beta) {
if (beta.length != xSize() + 1) {
logger.error("beta size is wrong");
throw new InvalidParamException();
}
this.beta = beta;
}
@Override
public double y() {
return record.y();
}
@Override
public double x(int index) {
return record.x(index);
}
@Override
public double[] x() {
return record.x();
}
@Override
public double prediction() {
return prediction;
}
@Override
public double rSquare() {
return rSquare;
}
@Override
public void setRSquare(double rSquare) {
this.rSquare = rSquare;
}
@Override
@SuppressWarnings("unchecked")
public AbstractResultRecordWithInfo<R> clone() throws CloneNotSupportedException {
AbstractResultRecordWithInfo<R> clone = (AbstractResultRecordWithInfo<R>) super.clone();
clone.beta = Arrays.copyOf(beta(), betaSize());
return clone;
}
}
|
<gh_stars>0
/*
* ED_YES_NO
* English Data table for Yes/No values
* single table that contain "Yes" and "No" to translate our 0/1 flags
*/
create table ed_yes_no(
id number(1) not null,
descr varchar2(10) not null,
constraint ed_yes_no_pk
primary key(id)
using index
pctfree 10
tablespace system
) tablespace system;
insert into ed_yes_no values (0, 'No');
insert into ed_yes_no values (1, 'Yes');
grant select on ed_yes_no to public;
/*
* ED_DATA_LANGUAGE
* English Data table for languages used in for data naming in marco polo
* single table that contain "Yes" and "No" to translate our 0/1 flags
*/
create table ed_data_language(
id number not null,
priority number not null,
about varchar2(1000),
constraint ed_data_language_pk
primary key(id)
using index
pctfree 10
tablespace system
)
tablespace system;
insert into ed_data_language values (1,1,'german, used by amexco and for demos');
insert into ed_data_language values (25,2,'english, whished to be used by someone');
insert into ed_data_language values (34,3,'french...');
grant select on dwh.ed_data_language to public;
/*
* ET_LANGUAGE
* English Tool table for Languages
* single table that contain the language list
*/
create table et_language as
select s.s_id language_id, b.bez language
, s.sysbez shortcut
, decode(s.dialog, 1, 'Yes', 'No') is_dialog_language
, decode(s.output, 1, 'Yes', 'no') is_output_language
, decode(s.gui, 1, 'Yes', 'No') is_interface_language
from root.sprachen s, root.bezeichnung b
where s.bez_id = b.bez_id
and b.typ = 1
and b.s_id = 25;
grant select on dwh.et_language to public;
/*
* ET_FOLDER_STATE
* English Tool table for Folder State
* single table that contain the folder state list
*/
create table et_folder_state as
select unique s.sta_id state_id
, s.sta shortcut
, b.bez name
, to_char(s.sta_id,'99990') || ' - ' || b.bez state
from root.bezeichnung b, root.status_werte s
where b.s_id = 25
and b.typ in (1108, 1112)
and s.typ = b.typ
and b.bez_id = s.bez_id
and s.sta_id > 0;
grant select on dwh.et_folder_state to public;
/*
* EM_CLIENT
* English Main table for Clients
* important table that contain all infos about clients
*/
create table em_client as
select unique p.pers_id person_id
, nvl(ptype.bez, null) person_type
, nvl(ctypedesc.bez, null) client_type
, p.name name
, nvl(lang.bez, null) language
, nvl(curr.krzl, null) currency_code
, nvl(currdesc.bez, null) currency
, k.konto account
, k.leiter leiter_id
, k.attr1 code, k.attr2 attribute
, nvl(atype.bez, null) address_type
, a.ans title, a.adr1 address_1, a.adr2 address_2, a.adr3 address_3
, a.PLZ ZIP, a.ort city, a.kant state
, nvl(coun.krzl, null) country_code, nvl(coundesc.bez, null) country
, nvl(stype.bez, null) sending_mode
, k.tel phone, k.fax fax, k.handy mobile, k.tlx telex, k.email email, k.web web
, a.tel phone_2, a.fax fax_2, a.tlx mobile_2, a.email email_2, a.web web_2
, decode(haupt_flag, 1, 'Yes', 'No') is_main_address
, decode(rch_flag, 1, 'Yes', 'No') is_invoicing_address
, decode(mail_flag, 1, 'Yes', 'No') is_mailing_address
from ed_data_language d
, root.personen p
, root.persadr a
, root.kunden k
, root.bezeichnung ptype
, root.bezeichnung lang
, root.bezeichnung atype
, root.land coun, root.bezeichnung coundesc
, root.bezeichnung stype
, root.kunden_typ ctype, root.bezeichnung ctypedesc
, root.waehrung curr, root.bezeichnung currdesc
where d.priority = 1
and k.pers_id = p.pers_id
and a.pers_id = p.pers_id
and a.haupt_flag = 1
and atype.bez_id (+)= a.adr_typ and atype.bez_id (+)> 0 and atype.typ (+)= 1127 and atype.s_id (+)= 25
and lang.bez_id (+)= p.s_id and lang.bez_id (+)> 0 and lang.typ (+)= 1 and lang.s_id (+)= 25
and ptype.bez_id (+)= p.pers_typ and ptype.bez_id (+)> 0 and ptype.typ (+)= 1104 and ptype.s_id (+)= 25
and coun.land_id (+)= a.land_id and coun.bez_id (+)> 0
and coundesc.bez_id (+)= coun.bez_id and coundesc.bez_id (+)> 0 and coundesc.typ (+)= 10 and coundesc.s_id (+)= 25
and stype.bez_id (+)= a.sta_id and stype.bez_id (+)> 0 and stype.typ (+)= 1111 and stype.s_id (+)= 25
and ctype.k_typ_id (+)= k.k_typ_id and ctype.k_typ_id (+)> 0
and ctypedesc.bez_id(+)= ctype.bez_id and ctypedesc.bez_id(+)> 0 and ctypedesc.typ(+)= 24 and ctypedesc.s_id = d.id
and curr.whr_id (+)= k.whr_id and curr.whr_id (+)> 0
and currdesc.bez_id (+)= curr.bez_id and currdesc.bez_id (+)> 0 and currdesc.typ (+)= 12 and currdesc.s_id (+)= 25;
alter table dwh.em_client add (
constraint em_client_pk primary key (person_id) using index tablespace system );
grant select on dwh.em_client to public;
/*
* EM_CONTINGENT
* English Main table for Contingent
* important table that contain all infos about contingents
*/
create table em_contingent as
select h.hostkont_id host_contingent_id, hdesc.bez host_contingent
, s.sai_id season_id, sdesc.bez season
, part.dl_id service_part_id, partdesc.bez service_part
, part.dla_id offer_id
, root.todate(part.a_zeit) starting_date
, root.todate(part.e_zeit) end_date
, kid client_id
, c.kont_id contingent_id, cdesc.bez contingent
, c.ref contingent_reference
, d.kat_id category_id, catdesc.bez category
, d.anz existing_places, d.busy occupied_places, d.minanz minimal_places
from ed_data_language dlang
, root.host_kont h, root.bezeichnung hdesc
, root.saison s, root.bezeichnung sdesc
, root.dlg_parts part, root.bezeichnung partdesc
, root.kontingent c, root.bezeichnung cdesc
, root.kont_detail d
, root.kategorie cat, root.bezeichnung catdesc
where dlang.priority = 1
and d.kont_id = c.kont_id
and d.hostkont_id = -1
and h.hostkont_id = c.hostkont_id
and hdesc.bez_id = h.bez_id and hdesc.typ = 38 and hdesc.s_id = dlang.id
and part.dl_id = h.dl_id
and partdesc.bez_id = part.bez_id and partdesc.typ = 26 and partdesc.s_id = dlang.id
and s.sai_id = h.sai_id
and sdesc.bez_id = s.bez_id and sdesc.typ = 9 and sdesc.s_id = dlang.id
and cdesc.bez_id = c.bez_id and cdesc.typ = 39 and cdesc.s_id = dlang.id
and c.hostkont_id = h.hostkont_id
and cat.kat_id = d.kat_id and d.kat_id > 0
and catdesc.bez_id = cat.bez_id and catdesc.typ = 16 and catdesc.s_id = dlang.id;
alter table dwh.em_contingent add (
constraint em_contingent_pk primary key (contingent_id, category_id)
using index tablespace system );
alter table em_contingent add
(
constraint em_contingent_em_client_fk
foreign key( client_id)
references dwh.em_client( person_id)
);
grant select on dwh.em_contingent to public;
|
<gh_stars>10-100
class BiddingStatusPresenter::Base
attr_reader :auction
def initialize(auction)
@auction = auction
end
def start_label
''
end
def deadline_label
''
end
def relative_time
''
end
def label_class
''
end
def label
''
end
def tag_data_value_status
''
end
def tag_data_label_2
''
end
def tag_data_value_2
''
end
end
|
import dotty.tools.sbtplugin.DottyPlugin.autoImport._
import org.portablescala.sbtplatformdeps.PlatformDepsPlugin.autoImport._
import sbt._
import sbt.Keys._
object Dependencies {
val laminar: Def.Initialize[Seq[ModuleID]] = Def.setting {
Seq(
("com.raquo" %%% "laminar" % DependencyVersions.laminar).withDottyCompat(scalaVersion.value)
)
}
val stringdiff: Def.Initialize[Seq[ModuleID]] = Def.setting {
Seq(
("app.tulz" %%% "stringdiff" % DependencyVersions.stringdiff % Test).withDottyCompat(scalaVersion.value)
)
}
val domtestutils: Def.Initialize[Seq[ModuleID]] = Def.setting {
Seq(
("com.raquo" %%% "domtestutils" % DependencyVersions.domtestutils % Test).withDottyCompat(scalaVersion.value)
)
}
val cats: Def.Initialize[Seq[ModuleID]] = Def.setting {
Seq(
("org.typelevel" %%% "cats-core" % DependencyVersions.cats).withDottyCompat(scalaVersion.value)
)
}
val circe: Def.Initialize[Seq[ModuleID]] = Def.setting {
Seq(
("io.circe" %%% "circe-core" % DependencyVersions.circe.value).withDottyCompat(scalaVersion.value),
("io.circe" %%% "circe-parser" % DependencyVersions.circe.value).withDottyCompat(scalaVersion.value)
)
}
val frontroute: Def.Initialize[Seq[ModuleID]] = Def.setting {
Seq(
"io.frontroute" %%% "frontroute" % DependencyVersions.frontroute
)
}
val `embedded-files-macro`: Def.Initialize[Seq[ModuleID]] = Def.setting {
Seq(
"com.yurique" %%% "embedded-files-macro" % DependencyVersions.`embedded-files-macro`
)
}
val sourcecode: Def.Initialize[Seq[ModuleID]] = Def.setting {
Seq(
"com.lihaoyi" %%% "sourcecode" % DependencyVersions.sourcecode
)
}
}
|
#!/usr/bin/env bash
set -e
here=$(dirname "$0")
SOLANA_ROOT="$(cd "$here"/..; pwd)"
# shellcheck source=net/common.sh
source "$here"/common.sh
usage() {
exitcode=0
if [[ -n "$1" ]]; then
exitcode=1
echo "Error: $*"
fi
cat <<EOF
usage: $0 [start|stop|restart|sanity] [command-specific options]
Operate a configured testnet
start - Start the network
sanity - Sanity check the network
stop - Stop the network
restart - Shortcut for stop then start
logs - Fetch remote logs from each network node
startnode- Start an individual node (previously stopped with stopNode)
stopnode - Stop an individual node
update - Deploy a new software update to the cluster
start-specific options:
-T [tarFilename] - Deploy the specified release tarball
-t edge|beta|stable|vX.Y.Z - Deploy the latest tarball release for the
specified release channel (edge|beta|stable) or release tag
(vX.Y.Z)
-r / --skip-setup - Reuse existing node/ledger configuration from a
previous |start| (ie, don't run ./multinode-demo/setup.sh).
-d / --debug - Build/deploy the testnet with debug binaries
-c clientType=numClients=extraArgs - Number of clientTypes to start. This options can be specified
more than once. Defaults to bench-tps for all clients if not
specified.
Valid client types are:
idle
bench-tps
bench-exchange
User can optionally provide extraArgs that are transparently
supplied to the client program as command line parameters.
For example,
-c bench-tps=2="--tx_count 25000"
This will start 2 bench-tps clients, and supply "--tx_count 25000"
to the bench-tps client.
--client-delay-start
- Number of seconds to wait after validators have finished starting before starting client programs
(default: $clientDelayStart)
-n NUM_VALIDATORS - Number of validators to apply command to.
--gpu-mode GPU_MODE - Specify GPU mode to launch validators with (default: $gpuMode).
MODE must be one of
on - GPU *required*, any vendor *
off - No GPU, CPU-only
auto - Use GPU if available, any vendor *
cuda - GPU *required*, Nvidia CUDA only
* Currently, Nvidia CUDA is the only supported GPU vendor
--hashes-per-tick NUM_HASHES|sleep|auto
- Override the default --hashes-per-tick for the cluster
--no-airdrop
- If set, disables airdrops. Nodes must be funded in genesis config when airdrops are disabled.
--faucet-lamports NUM_LAMPORTS_TO_MINT
- Override the default 500000000000000000 lamports minted in genesis
--internal-nodes-stake-lamports NUM_LAMPORTS_PER_NODE
- Amount to stake internal nodes.
--internal-nodes-lamports NUM_LAMPORTS_PER_NODE
- Amount to fund internal nodes in genesis config.
--external-accounts-file FILE_PATH
- A YML file with a list of account pubkeys and corresponding lamport balances
in genesis config for external nodes
--no-snapshot-fetch
- If set, disables booting validators from a snapshot
--skip-poh-verify
- If set, validators will skip verifying
the ledger they already have saved to disk at
boot (results in a much faster boot)
--no-deploy
- Don't deploy new software, use the
existing deployment
--no-build
- Don't build new software, deploy the
existing binaries
--deploy-if-newer - Only deploy if newer software is
available (requires -t or -T)
--use-move - Build the move-loader-program and add it to the cluster
--operating-mode development|softlaunch
- Specify whether or not to launch the cluster in "development" mode with all features enabled at epoch 0,
or "softlaunch" mode with some features disabled at epoch 0 (default: development)
sanity/start-specific options:
-F - Discard validator nodes that didn't bootup successfully
-o noInstallCheck - Skip solana-install sanity
-o rejectExtraNodes - Require the exact number of nodes
stop-specific options:
none
logs-specific options:
none
netem-specific options:
--config - Netem configuration (as a double quoted string)
--parition - Percentage of network that should be configured with netem
--config-file - Configuration file for partition and netem configuration
--netem-cmd - Optional command argument to netem. Default is "add". Use "cleanup" to remove rules.
update-specific options:
--platform linux|osx|windows - Deploy the tarball using 'solana-install deploy ...' for the
given platform (multiple platforms may be specified)
(-t option must be supplied as well)
startnode/stopnode-specific options:
-i [ip address] - IP Address of the node to start or stop
Note: if RUST_LOG is set in the environment it will be propogated into the
network nodes.
EOF
exit $exitcode
}
releaseChannel=
deployMethod=local
deployIfNewer=
sanityExtraArgs=
skipSetup=false
updatePlatforms=
nodeAddress=
numIdleClients=0
numBenchTpsClients=0
numBenchExchangeClients=0
benchTpsExtraArgs=
benchExchangeExtraArgs=
failOnValidatorBootupFailure=true
genesisOptions=
numValidatorsRequested=
externalPrimordialAccountsFile=
remoteExternalPrimordialAccountsFile=
internalNodesStakeLamports=
internalNodesLamports=
maybeNoSnapshot=""
maybeLimitLedgerSize=""
maybeSkipLedgerVerify=""
maybeDisableAirdrops=""
debugBuild=false
doBuild=true
gpuMode=auto
maybeUseMove=""
netemPartition=""
netemConfig=""
netemConfigFile=""
netemCommand="add"
clientDelayStart=0
command=$1
[[ -n $command ]] || usage
shift
shortArgs=()
while [[ -n $1 ]]; do
if [[ ${1:0:2} = -- ]]; then
if [[ $1 = --hashes-per-tick ]]; then
genesisOptions="$genesisOptions $1 $2"
shift 2
elif [[ $1 = --slots-per-epoch ]]; then
genesisOptions="$genesisOptions $1 $2"
shift 2
elif [[ $1 = --target-lamports-per-signature ]]; then
genesisOptions="$genesisOptions $1 $2"
shift 2
elif [[ $1 = --faucet-lamports ]]; then
genesisOptions="$genesisOptions $1 $2"
shift 2
elif [[ $1 = --operating-mode ]]; then
case "$2" in
development|softlaunch)
;;
*)
echo "Unexpected operating mode: \"$2\""
exit 1
;;
esac
genesisOptions="$genesisOptions $1 $2"
shift 2
elif [[ $1 = --no-snapshot-fetch ]]; then
maybeNoSnapshot="$1"
shift 1
elif [[ $1 = --deploy-if-newer ]]; then
deployIfNewer=1
shift 1
elif [[ $1 = --no-deploy ]]; then
deployMethod=skip
shift 1
elif [[ $1 = --no-build ]]; then
doBuild=false
shift 1
elif [[ $1 = --limit-ledger-size ]]; then
maybeLimitLedgerSize="$1"
shift 1
elif [[ $1 = --skip-poh-verify ]]; then
maybeSkipLedgerVerify="$1"
shift 1
elif [[ $1 = --skip-setup ]]; then
skipSetup=true
shift 1
elif [[ $1 = --platform ]]; then
updatePlatforms="$updatePlatforms $2"
shift 2
elif [[ $1 = --internal-nodes-stake-lamports ]]; then
internalNodesStakeLamports="$2"
shift 2
elif [[ $1 = --internal-nodes-lamports ]]; then
internalNodesLamports="$2"
shift 2
elif [[ $1 = --external-accounts-file ]]; then
externalPrimordialAccountsFile="$2"
remoteExternalPrimordialAccountsFile=/tmp/external-primordial-accounts.yml
shift 2
elif [[ $1 = --no-airdrop ]]; then
maybeDisableAirdrops="$1"
shift 1
elif [[ $1 = --debug ]]; then
debugBuild=true
shift 1
elif [[ $1 = --use-move ]]; then
maybeUseMove=$1
shift 1
elif [[ $1 = --partition ]]; then
netemPartition=$2
shift 2
elif [[ $1 = --config ]]; then
netemConfig=$2
shift 2
elif [[ $1 == --config-file ]]; then
netemConfigFile=$2
shift 2
elif [[ $1 == --netem-cmd ]]; then
netemCommand=$2
shift 2
elif [[ $1 = --gpu-mode ]]; then
gpuMode=$2
case "$gpuMode" in
on|off|auto|cuda)
;;
*)
echo "Unexpected GPU mode: \"$gpuMode\""
exit 1
;;
esac
shift 2
elif [[ $1 == --client-delay-start ]]; then
clientDelayStart=$2
shift 2
else
usage "Unknown long option: $1"
fi
else
shortArgs+=("$1")
shift
fi
done
while getopts "h?T:t:o:f:rc:Fn:i:d" opt "${shortArgs[@]}"; do
case $opt in
h | \?)
usage
;;
T)
tarballFilename=$OPTARG
[[ -r $tarballFilename ]] || usage "File not readable: $tarballFilename"
deployMethod=tar
;;
t)
case $OPTARG in
edge|beta|stable|v*)
releaseChannel=$OPTARG
deployMethod=tar
;;
*)
usage "Invalid release channel: $OPTARG"
;;
esac
;;
n)
numValidatorsRequested=$OPTARG
;;
r)
skipSetup=true
;;
o)
case $OPTARG in
rejectExtraNodes|noInstallCheck)
sanityExtraArgs="$sanityExtraArgs -o $OPTARG"
;;
*)
usage "Unknown option: $OPTARG"
;;
esac
;;
c)
getClientTypeAndNum() {
if ! [[ $OPTARG == *'='* ]]; then
echo "Error: Expecting tuple \"clientType=numClientType=extraArgs\" but got \"$OPTARG\""
exit 1
fi
local keyValue
IFS='=' read -ra keyValue <<< "$OPTARG"
local clientType=${keyValue[0]}
local numClients=${keyValue[1]}
local extraArgs=${keyValue[2]}
re='^[0-9]+$'
if ! [[ $numClients =~ $re ]] ; then
echo "error: numClientType must be a number but got \"$numClients\""
exit 1
fi
case $clientType in
idle)
numIdleClients=$numClients
# $extraArgs ignored for 'idle'
;;
bench-tps)
numBenchTpsClients=$numClients
benchTpsExtraArgs=$extraArgs
;;
bench-exchange)
numBenchExchangeClients=$numClients
benchExchangeExtraArgs=$extraArgs
;;
*)
echo "Unknown client type: $clientType"
exit 1
;;
esac
}
getClientTypeAndNum
;;
F)
failOnValidatorBootupFailure=false
;;
i)
nodeAddress=$OPTARG
;;
d)
debugBuild=true
;;
*)
usage "Error: unhandled option: $opt"
;;
esac
done
loadConfigFile
netLogDir=
initLogDir() { # Initializes the netLogDir global variable. Idempotent
[[ -z $netLogDir ]] || return 0
netLogDir="$netDir"/log
declare netLogDateDir
netLogDateDir="$netDir"/log-$(date +"%Y-%m-%d_%H_%M_%S")
if [[ -d $netLogDir && ! -L $netLogDir ]]; then
echo "Warning: moving $netLogDir to make way for symlink."
mv "$netLogDir" "$netDir"/log.old
elif [[ -L $netLogDir ]]; then
rm "$netLogDir"
fi
mkdir -p "$netConfigDir" "$netLogDateDir"
ln -sf "$netLogDateDir" "$netLogDir"
echo "Log directory: $netLogDateDir"
}
if [[ -n $numValidatorsRequested ]]; then
truncatedNodeList=( "${validatorIpList[@]:0:$numValidatorsRequested}" )
unset validatorIpList
validatorIpList=( "${truncatedNodeList[@]}" )
fi
numClients=${#clientIpList[@]}
numClientsRequested=$((numBenchTpsClients + numBenchExchangeClients + numIdleClients))
if [[ "$numClientsRequested" -eq 0 ]]; then
numBenchTpsClients=$numClients
numClientsRequested=$numClients
else
if [[ "$numClientsRequested" -gt "$numClients" ]]; then
echo "Error: More clients requested ($numClientsRequested) then available ($numClients)"
exit 1
fi
fi
annotate() {
[[ -z $BUILDKITE ]] || {
buildkite-agent annotate "$@"
}
}
annotateBlockexplorerUrl() {
declare blockstreamer=${blockstreamerIpList[0]}
if [[ -n $blockstreamer ]]; then
annotate --style info --context blockexplorer-url "Block explorer: http://$blockstreamer/"
fi
}
build() {
supported=("18.04")
declare MAYBE_DOCKER=
if [[ $(uname) != Linux || ! " ${supported[*]} " =~ $(lsb_release -sr) ]]; then
# shellcheck source=ci/rust-version.sh
source "$SOLANA_ROOT"/ci/rust-version.sh
MAYBE_DOCKER="ci/docker-run.sh $rust_stable_docker_image"
fi
SECONDS=0
(
cd "$SOLANA_ROOT"
echo "--- Build started at $(date)"
set -x
rm -rf farf
buildVariant=
if $debugBuild; then
buildVariant=debug
fi
$MAYBE_DOCKER bash -c "
set -ex
scripts/cargo-install-all.sh farf \"$buildVariant\" \"$maybeUseMove\"
"
)
echo "Build took $SECONDS seconds"
}
startCommon() {
declare ipAddress=$1
test -d "$SOLANA_ROOT"
if $skipSetup; then
ssh "${sshOptions[@]}" "$ipAddress" "
set -x;
mkdir -p ~/solana/config;
rm -rf ~/config;
mv ~/solana/config ~;
rm -rf ~/solana;
mkdir -p ~/solana ~/.cargo/bin;
mv ~/config ~/solana/
"
else
ssh "${sshOptions[@]}" "$ipAddress" "
set -x;
rm -rf ~/solana;
mkdir -p ~/.cargo/bin
"
fi
[[ -z "$externalNodeSshKey" ]] || ssh-copy-id -f -i "$externalNodeSshKey" "${sshOptions[@]}" "solana@$ipAddress"
syncScripts "$ipAddress"
}
syncScripts() {
echo "rsyncing scripts... to $ipAddress"
declare ipAddress=$1
rsync -vPrc -e "ssh ${sshOptions[*]}" \
--exclude 'net/log*' \
"$SOLANA_ROOT"/{fetch-perf-libs.sh,scripts,net,multinode-demo} \
"$ipAddress":~/solana/ > /dev/null
}
startBootstrapLeader() {
declare ipAddress=$1
declare nodeIndex="$2"
declare logFile="$3"
echo "--- Starting bootstrap leader: $ipAddress"
echo "start log: $logFile"
# Deploy local binaries to bootstrap validator. Other validators and clients later fetch the
# binaries from it
(
set -x
startCommon "$ipAddress" || exit 1
[[ -z "$externalPrimordialAccountsFile" ]] || rsync -vPrc -e "ssh ${sshOptions[*]}" "$externalPrimordialAccountsFile" \
"$ipAddress:$remoteExternalPrimordialAccountsFile"
case $deployMethod in
tar)
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/solana-release/bin/* "$ipAddress:~/.cargo/bin/"
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/solana-release/version.yml "$ipAddress:~/"
;;
local)
rsync -vPrc -e "ssh ${sshOptions[*]}" "$SOLANA_ROOT"/farf/bin/* "$ipAddress:~/.cargo/bin/"
ssh "${sshOptions[@]}" -n "$ipAddress" "rm -f ~/version.yml; touch ~/version.yml"
;;
skip)
;;
*)
usage "Internal error: invalid deployMethod: $deployMethod"
;;
esac
ssh "${sshOptions[@]}" -n "$ipAddress" \
"./solana/net/remote/remote-node.sh \
$deployMethod \
bootstrap-leader \
$entrypointIp \
$((${#validatorIpList[@]} + ${#blockstreamerIpList[@]} + ${#archiverIpList[@]})) \
\"$RUST_LOG\" \
$skipSetup \
$failOnValidatorBootupFailure \
\"$remoteExternalPrimordialAccountsFile\" \
\"$maybeDisableAirdrops\" \
\"$internalNodesStakeLamports\" \
\"$internalNodesLamports\" \
$nodeIndex \
$numBenchTpsClients \"$benchTpsExtraArgs\" \
$numBenchExchangeClients \"$benchExchangeExtraArgs\" \
\"$genesisOptions\" \
\"$maybeNoSnapshot $maybeSkipLedgerVerify $maybeLimitLedgerSize\" \
\"$gpuMode\" \
\"$GEOLOCATION_API_KEY\" \
"
) >> "$logFile" 2>&1 || {
cat "$logFile"
echo "^^^ +++"
exit 1
}
}
startNode() {
declare ipAddress=$1
declare nodeType=$2
declare nodeIndex="$3"
initLogDir
declare logFile="$netLogDir/validator-$ipAddress.log"
if [[ -z $nodeType ]]; then
echo nodeType not specified
exit 1
fi
if [[ -z $nodeIndex ]]; then
echo nodeIndex not specified
exit 1
fi
echo "--- Starting $nodeType: $ipAddress"
echo "start log: $logFile"
(
set -x
startCommon "$ipAddress"
if [[ $nodeType = blockstreamer ]] && [[ -n $letsEncryptDomainName ]]; then
#
# Create/renew TLS certificate
#
declare localArchive=~/letsencrypt-"$letsEncryptDomainName".tgz
if [[ -r "$localArchive" ]]; then
timeout 30s scp "${sshOptions[@]}" "$localArchive" "$ipAddress:letsencrypt.tgz"
fi
ssh "${sshOptions[@]}" -n "$ipAddress" \
"sudo -H /certbot-restore.sh $letsEncryptDomainName maintainers@solana.com"
rm -f letsencrypt.tgz
timeout 30s scp "${sshOptions[@]}" "$ipAddress:/letsencrypt.tgz" letsencrypt.tgz
test -s letsencrypt.tgz # Ensure non-empty before overwriting $localArchive
cp letsencrypt.tgz "$localArchive"
fi
ssh "${sshOptions[@]}" -n "$ipAddress" \
"./solana/net/remote/remote-node.sh \
$deployMethod \
$nodeType \
$entrypointIp \
$((${#validatorIpList[@]} + ${#blockstreamerIpList[@]} + ${#archiverIpList[@]})) \
\"$RUST_LOG\" \
$skipSetup \
$failOnValidatorBootupFailure \
\"$remoteExternalPrimordialAccountsFile\" \
\"$maybeDisableAirdrops\" \
\"$internalNodesStakeLamports\" \
\"$internalNodesLamports\" \
$nodeIndex \
$numBenchTpsClients \"$benchTpsExtraArgs\" \
$numBenchExchangeClients \"$benchExchangeExtraArgs\" \
\"$genesisOptions\" \
\"$maybeNoSnapshot $maybeSkipLedgerVerify $maybeLimitLedgerSize\" \
\"$gpuMode\" \
\"$GEOLOCATION_API_KEY\" \
"
) >> "$logFile" 2>&1 &
declare pid=$!
ln -sf "validator-$ipAddress.log" "$netLogDir/validator-$pid.log"
pids+=("$pid")
}
startClient() {
declare ipAddress=$1
declare clientToRun="$2"
declare clientIndex="$3"
initLogDir
declare logFile="$netLogDir/client-$clientToRun-$ipAddress.log"
echo "--- Starting client: $ipAddress - $clientToRun"
echo "start log: $logFile"
(
set -x
startCommon "$ipAddress"
ssh "${sshOptions[@]}" -f "$ipAddress" \
"./solana/net/remote/remote-client.sh $deployMethod $entrypointIp \
$clientToRun \"$RUST_LOG\" \"$benchTpsExtraArgs\" \"$benchExchangeExtraArgs\" $clientIndex"
) >> "$logFile" 2>&1 || {
cat "$logFile"
echo "^^^ +++"
exit 1
}
}
sanity() {
declare skipBlockstreamerSanity=$1
$metricsWriteDatapoint "testnet-deploy net-sanity-begin=1"
declare ok=true
declare bootstrapLeader=${validatorIpList[0]}
declare blockstreamer=${blockstreamerIpList[0]}
annotateBlockexplorerUrl
echo "--- Sanity: $bootstrapLeader"
(
set -x
# shellcheck disable=SC2029 # remote-client.sh args are expanded on client side intentionally
ssh "${sshOptions[@]}" "$bootstrapLeader" \
"./solana/net/remote/remote-sanity.sh $bootstrapLeader $sanityExtraArgs \"$RUST_LOG\""
) || ok=false
$ok || exit 1
if [[ -z $skipBlockstreamerSanity && -n $blockstreamer ]]; then
# If there's a blockstreamer node run a reduced sanity check on it as well
echo "--- Sanity: $blockstreamer"
(
set -x
# shellcheck disable=SC2029 # remote-client.sh args are expanded on client side intentionally
ssh "${sshOptions[@]}" "$blockstreamer" \
"./solana/net/remote/remote-sanity.sh $blockstreamer $sanityExtraArgs \"$RUST_LOG\""
) || ok=false
$ok || exit 1
fi
$metricsWriteDatapoint "testnet-deploy net-sanity-complete=1"
}
deployUpdate() {
if [[ -z $updatePlatforms ]]; then
echo "No update platforms"
return
fi
if [[ -z $releaseChannel ]]; then
echo "Release channel not specified (use -t option)"
exit 1
fi
declare ok=true
declare bootstrapLeader=${validatorIpList[0]}
for updatePlatform in $updatePlatforms; do
echo "--- Deploying solana-install update: $updatePlatform"
(
set -x
scripts/solana-install-update-manifest-keypair.sh "$updatePlatform"
timeout 30s scp "${sshOptions[@]}" \
update_manifest_keypair.json "$bootstrapLeader:solana/update_manifest_keypair.json"
# shellcheck disable=SC2029 # remote-deploy-update.sh args are expanded on client side intentionally
ssh "${sshOptions[@]}" "$bootstrapLeader" \
"./solana/net/remote/remote-deploy-update.sh $releaseChannel $updatePlatform"
) || ok=false
$ok || exit 1
done
}
getNodeType() {
echo "getNodeType: $nodeAddress"
[[ -n $nodeAddress ]] || {
echo "Error: nodeAddress not set"
exit 1
}
nodeIndex=0 # <-- global
nodeType=validator # <-- global
for ipAddress in "${validatorIpList[@]}" b "${blockstreamerIpList[@]}" r "${archiverIpList[@]}"; do
if [[ $ipAddress = b ]]; then
nodeType=blockstreamer
continue
elif [[ $ipAddress = r ]]; then
nodeType=archiver
continue
fi
if [[ $ipAddress = "$nodeAddress" ]]; then
echo "getNodeType: $nodeType ($nodeIndex)"
return
fi
((nodeIndex = nodeIndex + 1))
done
echo "Error: Unknown node: $nodeAddress"
exit 1
}
prepare_deploy() {
case $deployMethod in
tar)
if [[ -n $releaseChannel ]]; then
rm -f "$SOLANA_ROOT"/solana-release.tar.bz2
declare updateDownloadUrl=http://release.solana.com/"$releaseChannel"/solana-release-x86_64-unknown-linux-gnu.tar.bz2
(
set -x
curl --retry 5 --retry-delay 2 --retry-connrefused \
-o "$SOLANA_ROOT"/solana-release.tar.bz2 "$updateDownloadUrl"
)
tarballFilename="$SOLANA_ROOT"/solana-release.tar.bz2
fi
(
set -x
rm -rf "$SOLANA_ROOT"/solana-release
(cd "$SOLANA_ROOT"; tar jxv) < "$tarballFilename"
cat "$SOLANA_ROOT"/solana-release/version.yml
)
;;
local)
if $doBuild; then
build
else
echo "Build skipped due to --no-build"
fi
;;
skip)
;;
*)
usage "Internal error: invalid deployMethod: $deployMethod"
;;
esac
if [[ -n $deployIfNewer ]]; then
if [[ $deployMethod != tar ]]; then
echo "Error: --deploy-if-newer only supported for tar deployments"
exit 1
fi
echo "Fetching current software version"
(
set -x
rsync -vPrc -e "ssh ${sshOptions[*]}" "${validatorIpList[0]}":~/version.yml current-version.yml
)
cat current-version.yml
if ! diff -q current-version.yml "$SOLANA_ROOT"/solana-release/version.yml; then
echo "Cluster software version is old. Update required"
else
echo "Cluster software version is current. No update required"
exit 0
fi
fi
}
deploy() {
initLogDir
echo "Deployment started at $(date)"
$metricsWriteDatapoint "testnet-deploy net-start-begin=1"
declare bootstrapLeader=true
for nodeAddress in "${validatorIpList[@]}" "${blockstreamerIpList[@]}" "${archiverIpList[@]}"; do
nodeType=
nodeIndex=
getNodeType
if $bootstrapLeader; then
SECONDS=0
declare bootstrapNodeDeployTime=
startBootstrapLeader "$nodeAddress" $nodeIndex "$netLogDir/bootstrap-leader-$ipAddress.log"
bootstrapNodeDeployTime=$SECONDS
$metricsWriteDatapoint "testnet-deploy net-bootnode-leader-started=1"
bootstrapLeader=false
SECONDS=0
pids=()
else
startNode "$ipAddress" $nodeType $nodeIndex
# Stagger additional node start time. If too many nodes start simultaneously
# the bootstrap node gets more rsync requests from the additional nodes than
# it can handle.
sleep 2
fi
done
for pid in "${pids[@]}"; do
declare ok=true
wait "$pid" || ok=false
if ! $ok; then
echo "+++ validator failed to start"
cat "$netLogDir/validator-$pid.log"
if $failOnValidatorBootupFailure; then
exit 1
else
echo "Failure is non-fatal"
fi
fi
done
$metricsWriteDatapoint "testnet-deploy net-validators-started=1"
additionalNodeDeployTime=$SECONDS
annotateBlockexplorerUrl
sanity skipBlockstreamerSanity # skip sanity on blockstreamer node, it may not
# have caught up to the bootstrap leader yet
echo "--- Sleeping $clientDelayStart seconds after validators are started before starting clients"
sleep "$clientDelayStart"
SECONDS=0
for ((i=0; i < "$numClients" && i < "$numClientsRequested"; i++)) do
if [[ $i -lt "$numBenchTpsClients" ]]; then
startClient "${clientIpList[$i]}" "solana-bench-tps" "$i"
elif [[ $i -lt $((numBenchTpsClients + numBenchExchangeClients)) ]]; then
startClient "${clientIpList[$i]}" "solana-bench-exchange" $((i-numBenchTpsClients))
else
startClient "${clientIpList[$i]}" "idle"
fi
done
clientDeployTime=$SECONDS
$metricsWriteDatapoint "testnet-deploy net-start-complete=1"
declare networkVersion=unknown
case $deployMethod in
tar)
networkVersion="$(
(
set -o pipefail
grep "^commit: " "$SOLANA_ROOT"/solana-release/version.yml | head -n1 | cut -d\ -f2
) || echo "tar-unknown"
)"
;;
local)
networkVersion="$(git rev-parse HEAD || echo local-unknown)"
;;
skip)
;;
*)
usage "Internal error: invalid deployMethod: $deployMethod"
;;
esac
$metricsWriteDatapoint "testnet-deploy version=\"${networkVersion:0:9}\""
echo
echo "+++ Deployment Successful"
echo "Bootstrap leader deployment took $bootstrapNodeDeployTime seconds"
echo "Additional validator deployment (${#validatorIpList[@]} validators, ${#blockstreamerIpList[@]} blockstreamer nodes, ${#archiverIpList[@]} archivers) took $additionalNodeDeployTime seconds"
echo "Client deployment (${#clientIpList[@]} instances) took $clientDeployTime seconds"
echo "Network start logs in $netLogDir"
}
stopNode() {
local ipAddress=$1
local block=$2
initLogDir
declare logFile="$netLogDir/stop-validator-$ipAddress.log"
echo "--- Stopping node: $ipAddress"
echo "stop log: $logFile"
syncScripts "$ipAddress"
(
# Since cleanup.sh does a pkill, we cannot pass the command directly,
# otherwise the process which is doing the killing will be killed because
# the script itself will match the pkill pattern
set -x
# shellcheck disable=SC2029 # It's desired that PS4 be expanded on the client side
ssh "${sshOptions[@]}" "$ipAddress" "PS4=\"$PS4\" ./solana/net/remote/cleanup.sh"
) >> "$logFile" 2>&1 &
declare pid=$!
ln -sf "stop-validator-$ipAddress.log" "$netLogDir/stop-validator-$pid.log"
if $block; then
wait $pid
else
pids+=("$pid")
fi
}
stop() {
SECONDS=0
$metricsWriteDatapoint "testnet-deploy net-stop-begin=1"
declare loopCount=0
pids=()
for ipAddress in "${validatorIpList[@]}" "${blockstreamerIpList[@]}" "${archiverIpList[@]}" "${clientIpList[@]}"; do
stopNode "$ipAddress" false
# Stagger additional node stop time to avoid too many concurrent ssh
# sessions
((loopCount++ % 4 == 0)) && sleep 2
done
echo --- Waiting for nodes to finish stopping
for pid in "${pids[@]}"; do
echo -n "$pid "
wait "$pid" || true
done
echo
$metricsWriteDatapoint "testnet-deploy net-stop-complete=1"
echo "Stopping nodes took $SECONDS seconds"
}
checkPremptibleInstances() {
# The validatorIpList nodes may be preemptible instances that can disappear at
# any time. Try to detect when a validator has been preempted to help the user
# out.
#
# Of course this isn't airtight as an instance could always disappear
# immediately after its successfully pinged.
for ipAddress in "${validatorIpList[@]}"; do
(
set -x
timeout 5s ping -c 1 "$ipAddress" | tr - _
) || {
cat <<EOF
Warning: $ipAddress may have been preempted.
Run |./gce.sh config| to restart it
EOF
exit 1
}
done
}
checkPremptibleInstances
case $command in
restart)
prepare_deploy
stop
deploy
;;
start)
prepare_deploy
deploy
;;
sanity)
sanity
;;
stop)
stop
;;
update)
deployUpdate
;;
stopnode)
if [[ -z $nodeAddress ]]; then
usage "node address (-i) not specified"
exit 1
fi
stopNode "$nodeAddress" true
;;
startnode)
if [[ -z $nodeAddress ]]; then
usage "node address (-i) not specified"
exit 1
fi
nodeType=
nodeIndex=
getNodeType
startNode "$nodeAddress" $nodeType $nodeIndex
;;
logs)
initLogDir
fetchRemoteLog() {
declare ipAddress=$1
declare log=$2
echo "--- fetching $log from $ipAddress"
(
set -x
timeout 30s scp "${sshOptions[@]}" \
"$ipAddress":solana/"$log".log "$netLogDir"/remote-"$log"-"$ipAddress".log
) || echo "failed to fetch log"
}
fetchRemoteLog "${validatorIpList[0]}" faucet
for ipAddress in "${validatorIpList[@]}"; do
fetchRemoteLog "$ipAddress" validator
done
for ipAddress in "${clientIpList[@]}"; do
fetchRemoteLog "$ipAddress" client
done
for ipAddress in "${blockstreamerIpList[@]}"; do
fetchRemoteLog "$ipAddress" validator
done
for ipAddress in "${archiverIpList[@]}"; do
fetchRemoteLog "$ipAddress" validator
done
;;
netem)
if [[ -n $netemConfigFile ]]; then
if [[ $netemCommand = "add" ]]; then
for ipAddress in "${validatorIpList[@]}"; do
"$here"/scp.sh "$netemConfigFile" solana@"$ipAddress":~/solana
done
fi
for i in "${!validatorIpList[@]}"; do
"$here"/ssh.sh solana@"${validatorIpList[$i]}" 'solana/scripts/net-shaper.sh' \
"$netemCommand" ~solana/solana/"$netemConfigFile" "${#validatorIpList[@]}" "$i"
done
else
num_nodes=$((${#validatorIpList[@]}*netemPartition/100))
if [[ $((${#validatorIpList[@]}*netemPartition%100)) -gt 0 ]]; then
num_nodes=$((num_nodes+1))
fi
if [[ "$num_nodes" -gt "${#validatorIpList[@]}" ]]; then
num_nodes=${#validatorIpList[@]}
fi
# Stop netem on all nodes
for ipAddress in "${validatorIpList[@]}"; do
"$here"/ssh.sh solana@"$ipAddress" 'solana/scripts/netem.sh delete < solana/netem.cfg || true'
done
# Start netem on required nodes
for ((i=0; i<num_nodes; i++ )); do :
"$here"/ssh.sh solana@"${validatorIpList[$i]}" "echo $netemConfig > solana/netem.cfg; solana/scripts/netem.sh add \"$netemConfig\""
done
fi
;;
*)
echo "Internal error: Unknown command: $command"
usage
exit 1
esac
|
export const sum = (x, y) => {
if (typeof(x) !== 'number' || typeof(y) !== 'number') {
return null;
}
return x + y;
};
|
<gh_stars>1-10
import java.util.*;
import java.lang.*;
public class die_roll{
public static void main(String args[]){
Scanner sc = new Scanner(System.in);
int y=sc.nextInt();
int w=sc.nextInt();
int m=0;
if(y<=w)
m=w;
else
m=y;
if((7-m)==1)
System.out.print("1/6");
else if((7-m)==2)
System.out.print("1/3");
else if((7-m)==3)
System.out.print("1/2");
else if((7-m)==4)
System.out.print("2/3");
else if((7-m)==5)
System.out.print("5/6");
else
System.out.print("1/1");
}
} |
class AutocompleteSystem:
def __init__(self):
self.trie = {}
def insert(self, keyword):
current = self.trie
for letter in keyword:
if letter not in current:
current[letter] = {}
current = current[letter]
current['_end_'] = '_end_'
def search(self, prefix):
current = self.trie
output = []
for letter in prefix:
if letter in current:
current = current[letter]
else:
return output
self.search_helper(prefix, current, output)
return output
def search_helper(self, prefix, node, output):
if '_end_' in node:
output.append(prefix)
for key, value in node.items():
if key != '_end_':
self.search_helper(prefix + key, value, output) |
python eeID.py 'curatingList.txt' $GEMMAUSERNAME $GEMMAPASSWORD; |
const core = require('@actions/core');
const stringToJson = require('@cycjimmy/awesome-js-funcs/typeConversion/stringToJson').default;
const inputs = require('./inputs.json');
/**
* Handle Branches Option
* @returns {{}|{branch: string}}
*/
exports.handleBranchesOption = () => {
const branchesOption = {};
const branches = core.getInput(inputs.branches);
const branch = core.getInput(inputs.branch);
core.debug(`branches input: ${branches}`);
core.debug(`branch input: ${branch}`);
const semanticVersion = require('semantic-release/package.json').version;
const semanticMajorVersion = Number(semanticVersion.replace(/\..+/g, ''));
core.debug(`semanticMajorVersion: ${semanticMajorVersion}`);
// older than v16
if (semanticMajorVersion < 16) {
if (!branch) {
return branchesOption;
}
branchesOption.branch = branch;
return branchesOption;
}
// above v16
const strNeedConvertToJson = branches || branch || '';
if (!strNeedConvertToJson) {
return branchesOption;
}
const jsonOrStr = stringToJson('' + strNeedConvertToJson);
core.debug(`Converted branches attribute: ${JSON.stringify(jsonOrStr)}`);
branchesOption.branches = jsonOrStr;
return branchesOption;
};
/**
* Handle DryRun Option
* @returns {{}|{dryRun: boolean}}
*/
exports.handleDryRunOption = () => {
const dryRun = core.getInput(inputs.dry_run);
switch (dryRun) {
case 'true':
return {dryRun: true};
case 'false':
return {dryRun: false};
default:
return {};
}
};
|
from flask import Flask, render_template
#import SQLALchemy
from flask_sqlalchemy import SQLAlchemy
#from models import Song, Item, Playlist
app = Flask(__name__)
#set the SQLALCHEMY_DATABASE_URI key
app.config['SQLALCHEMY_DATABASE_URI']='sqlite:///song_library.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = 'you-will-never-guess'
#create an SQLAlchemy object named `db` and bind it to your app
db=SQLAlchemy(app)
#a simple initial greeting
@app.route('/')
@app.route('/index')
def greeting():
return render_template('greeting.html')
# app name
@app.errorhandler(404)
def not_found(e):
return render_template("404.html")
#uncomment the code below here when you are done creating database instance db and models
import routes |
export interface Options {
parser?: 'netscape';
}
|
CREATE TABLE example (
id INT UNSIGNED AUTO_INCREMENT PRIMARY KEY,
name VARCHAR(255) NOT NULL,
description VARCHAR(255) NOT NULL,
latitude DECIMAL(9,6) NOT NULL,
longitude DECIMAL(9,6) NOT NULL
); |
#!/bin/sh
MAKEOPTS="-j2"
echo "Changing into simulator directory"
cd ./pyrosim/simulator/external
echo "Unpacking ode-0.12.tar.bz2..." &&
tar -xjf ode-0.12.tar.bz2 &&
echo "done" &&
echo "Building ode-0.12..." &&
cd ode-0.12 &&
./configure --enable-double-precision &&
make $MAKEOPTS
cd ../.. &&
echo "done" &&
echo "Building simulator..." &&
make $MAKEOPTS
echo "done"
|
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2013:1812
#
# Security announcement date: 2013-12-11 05:36:35 UTC
# Script generation date: 2017-01-25 21:21:38 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - firefox.i686:24.2.0-1.el6_5
# - firefox-debuginfo.i686:24.2.0-1.el6_5
# - firefox.x86_64:24.2.0-1.el6_5
# - firefox-debuginfo.x86_64:24.2.0-1.el6_5
#
# Last versions recommanded by security team:
# - firefox.i686:45.7.0-1.el6_8
# - firefox-debuginfo.i686:45.7.0-1.el6_8
# - firefox.x86_64:45.7.0-1.el6_8
# - firefox-debuginfo.x86_64:45.7.0-1.el6_8
#
# CVE List:
# - CVE-2013-5609
# - CVE-2013-5612
# - CVE-2013-5613
# - CVE-2013-5614
# - CVE-2013-5616
# - CVE-2013-5618
# - CVE-2013-6671
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install firefox.i686-45.7.0 -y
sudo yum install firefox-debuginfo.i686-45.7.0 -y
sudo yum install firefox.x86_64-45.7.0 -y
sudo yum install firefox-debuginfo.x86_64-45.7.0 -y
|
import css from 'styled-jsx/css'; // eslint-disable-line import/no-unresolved
export const detailStyle = css`
.root {
padding: 24px;
}
@media screen and (min-width: 768px) {
.root {
padding: 40px;
}
}
.section {
margin-bottom: 64px;
}
.header {
display: flex;
align-items: center;
justify-content: space-between;
}
.message {
border: 1px solid #ccc;
background: #eee;
border-radius: 3px;
padding: 24px;
word-break: break-all;
}
.items {
list-style-type: none;
padding-left: 0;
}
`;
export const tabMenuStyle = css`
.tabs {
display: flex;
font-size: 16px;
font-weight: 500;
margin: 0;
padding: 0;
}
.tabs li {
list-style: none;
}
.tab {
padding: 16px 20px;
border: 1px solid #ccc;
background: #eee;
display: flex;
align-items: center;
}
.tab:hover {
background: #f8f8f8;
}
.tab + .tab {
border-left: 0;
}
.tab.active {
border-bottom-color: transparent;
background: #fff;
}
.tab.disabled {
color: #bbb;
pointer-events: none;
}
.badge {
background: #999;
color: #fff;
padding: 2px 8px;
border-radius: 20px;
font-size: 0.75em;
margin-left: 8px;
}
.empty {
flex: 1;
border-bottom: 1px solid #ccc;
}
`;
|
<gh_stars>1-10
from django.contrib import admin
from contact import models
@admin.register(models.Message)
class MessageAdmin(admin.ModelAdmin):
search_fields = [
"name",
"email",
"phone",
"country",
"city",
"subject",
"message",
]
list_display = [
"subject",
"name",
"email",
"date_created",
"is_read",
]
list_filter = ["country", "city"]
readonly_fields = [
"name",
"email",
"phone",
"country",
"city",
"subject",
"message",
]
def has_add_permission(self, request, obj=None):
return False
|
<filename>src/main/java/it/smartio/version/BuildNumber.java
/*
* Copyright (c) 2001-2019 Territorium Online Srl / TOL GmbH. All Rights Reserved.
*
* This file contains Original Code and/or Modifications of Original Code as defined in and that are
* subject to the Territorium Online License Version 1.0. You may not use this file except in
* compliance with the License. Please obtain a copy of the License at http://www.tol.info/license/
* and read it before using this file.
*
* The Original Code and all software distributed under the License are distributed on an 'AS IS'
* basis, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESS OR IMPLIED, AND TERRITORIUM ONLINE HEREBY
* DISCLAIMS ALL SUCH WARRANTIES, INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. Please see the License for
* the specific language governing rights and limitations under the License.
*/
package it.smartio.version;
import java.time.LocalDate;
import java.time.LocalTime;
import java.time.OffsetDateTime;
import java.time.ZoneOffset;
/**
* The {@link BuildNumber} class.
*/
public class BuildNumber {
private static final long HOURS = 1000 * 3600;
private static final OffsetDateTime START_TIMESTAMP = OffsetDateTime.of(LocalDate.of(2016, 1, 1),
LocalTime.of(0, 0), ZoneOffset.ofHours(0));
/**
* Constructs an instance of {@link BuildNumber}.
*/
private BuildNumber() {
}
/**
* Constructs an instance of {@link BuildNumber}.
*/
public static long get() {
return (System.currentTimeMillis() - START_TIMESTAMP.toInstant().toEpochMilli()) / HOURS;
}
}
|
#!/usr/bin/env bash
# Use this script to test if a given TCP host/port are available
# Source https://github.com/adrian-gheorghe/wait
TIMEOUT=15
INDEX=0
INDEX_COMMAND=0
WAITS=()
COMMANDS=()
cmdname=$(basename $0);
usage()
{
echo "Usage: $cmdname [[-w | --wait \"host:port\"] | [[-w | --wait \"ls -al /var/www\"] | [[-c | --command \"printenv\"] | [[-t | --timeout 15] | [-h | --help]]"
exit 1
};
waitfor()
{
DONE=0
WAITCOMMAND=$1
while [ "$DONE" -eq 0 ];
do
process "$WAITCOMMAND"
done
}
process()
{
case "$1" in
*:* )
hostport=(${1//:/ })
HOST=${hostport[0]}
PORT=${hostport[1]}
nc -z "$HOST" "$PORT" > /dev/null 2>&1
result=$?
if [[ $result -eq 0 ]]; then
echo "Host $HOST on $PORT is now accessible"
DONE=1
else
echo "Sleeping $TIMEOUT seconds waiting for host"
sleep $TIMEOUT
fi
;;
* )
command=$(eval ${1})
if [[ $command && ($? -eq 0) ]]; then
echo "$1 returned $command"
DONE=1
else
echo "Sleeping $TIMEOUT seconds waiting for command"
sleep $TIMEOUT
fi
;;
esac
}
main()
{
for ((i = 0; i < ${#WAITS[@]}; i++))
do
waitfor "${WAITS[$i]}"
done
for ((i = 0; i < ${#COMMANDS[@]}; i++))
do
eval "${COMMANDS[$i]}"
done
exit 1
}
##### Main
while [ "$1" != "" ]; do
case $1 in
-w | --wait ) shift
WAITS["$INDEX"]="$1"
let "INDEX++"
;;
-c | --command ) shift
COMMANDS["$INDEX_COMMAND"]="$1"
let "INDEX_COMMAND++"
;;
-t | --timeout ) shift
TIMEOUT="$1"
;;
-h | --help ) usage
exit
;;
*) echoerr "Invalid option: $1"
usage
exit 1
esac
shift
done
main |
import React from 'react';
import styled, { css, withTheme } from 'styled-components';
import { useSelector } from 'react-redux';
import { View, Text, Image } from 'react-native';
import { ScrollView, TouchableOpacity } from 'react-native-gesture-handler';
import LayoutWrapper from 'sharedUI/LayoutWrapper';
import FlexDiv from 'sharedUI/FlexDiv';
import StarButton from 'sharedUI/Button/StarButton';
import StyledIcon from 'sharedUI/Icon/StyledIcon';
import { device, rgba, replaceTemplate, truncate } from 'utils';
import { SCREENS, STRINGS, FAKE_PHOTO_MAIL_TEMPLATE } from 'configs';
const { EMAIL_CONTENT_TYPES } = STRINGS;
const Header = styled.View`
${({ theme }) => theme.styles.flex('space-between', 'flex-start', 'row', true)}
margin-bottom: 28px;
`;
const SubHeader = styled.View`
${({ theme }) => theme.styles.flex(null, null, 'row', true)}
`;
const Content = styled.View`
margin-top: 28px;
width: 100%;
flex: 1;
`;
const Title = styled.Text`
${({ theme }) => theme.styles.os.h3_alt}
color: ${({ theme }) => theme.colors.charcoal};
letter-spacing: 0.34px;
`;
const Category = styled.Text`
background-color: ${({ theme }) => theme.colors.persianRed};
color: ${({ theme }) => theme.colors.ghostWhite};
font-family: ${({ theme }) => theme.fonts.cairo.semiBold};
font-size: ${({ theme }) => theme.typo.sizes.subtitle};
letter-spacing: 0.2px;
padding: 4px 8px;
margin-top: 8px;
`;
const Sender = styled.Text`
${({ theme }) => theme.styles.os.h3_alt}
color: ${({ theme }) => theme.colors.charcoal};
line-height: 16px;
padding-top: 10px;
`;
const Recipient = styled.Text`
color: ${({ theme }) => theme.colors.charcoal};
font-family: ${({ theme }) => theme.fonts.cairo.light};
font-size: 12px;
letter-spacing: 0.2px;
line-height: 16px;
padding-top: 2px;
`;
const Date = styled.Text`
color: ${({ theme }) => theme.colors.charcoal};
font-family: ${({ theme }) => theme.fonts.cairo.extraLight};
font-size: ${({ theme }) => theme.typo.sizes.subtitle};
letter-spacing: 0.2px;
margin-bottom: 20px;
`;
const Message = styled.Text`
padding-top: 4px;
${({ theme }) => theme.styles.os.body_alt}
letter-spacing: 0.24px;
line-height: 16px;
`;
const MessageBold = styled.Text`
padding-top: 4px;
${({ theme }) => theme.styles.os.body_alt_bold}
letter-spacing: 0.24px;
line-height: 16px;
`;
const MessageLink = styled.Text`
padding-top: 4px;
${({ theme }) => theme.styles.os.body_alt}
letter-spacing: 0.24px;
line-height: 16px;
color: ${({ theme }) => theme.colors.slateBlue};
text-decoration: underline;
`;
const MessageList = styled.Text`
padding: 4px 24px 0 24px;
${({ theme }) => theme.styles.os.body_alt}
letter-spacing: 0.24px;
line-height: 16px;
`;
const EmailDetailsScreen = ({ route, navigation, theme }) => {
const { email } = route.params;
const { object, from, to, formatDate, content, star } = email;
const { gps } = useSelector((state) => state.deviceData);
const { lat, long, address } = gps;
return (
<LayoutWrapper screenName={route.name}>
<ScrollView contentContainerStyle={theme.styles.styleSheet.scrollBodyEmail}>
<Header>
<FlexDiv alignItems="flex-start">
<Title>{object}</Title>
<Category>Boîte de réception</Category>
</FlexDiv>
<StarButton
initialActive={star}
width={28}
height={28}
useImg
redPress
noPress
additionalStyle={css`
top: 10px;
`}
/>
</Header>
<SubHeader>
<StyledIcon
type="PERSON"
size={48}
width={24}
height={24}
additionalStyle={theme.styles.avatar(
rgba(theme.colors.persianRedAlpha, 0.4),
theme.colors.white
)}
/>
<FlexDiv
alignItems="flex-start"
additionalStyle={css`
flex: 1;
margin-left: 8px;
`}>
<Sender>{from}</Sender>
<Recipient>{truncate(`à ${to.join(', ')}`, 80)}</Recipient>
</FlexDiv>
<Date>{formatDate}</Date>
</SubHeader>
<Content>
{content.map(({ type, content: data }, i) => {
switch (type) {
case EMAIL_CONTENT_TYPES.BOLD:
return <MessageBold key={i}>{data}</MessageBold>;
case EMAIL_CONTENT_TYPES.IMAGE:
return (
<Image
key={i}
css={css`
width: ${device().width * 0.86}px;
height: ${device().height * 0.36}px;
margin-bottom: 12px;
`}
resizeMode="contain"
source={{ uri: replaceTemplate(FAKE_PHOTO_MAIL_TEMPLATE, data) }}
/>
);
case EMAIL_CONTENT_TYPES.LINK:
return <MessageLink key={i}>{data}</MessageLink>;
case EMAIL_CONTENT_TYPES.LINK_WIKIHOW:
return (
<TouchableOpacity
key={i}
activeOpacity={0.6}
onPress={() => navigation.navigate(SCREENS.INTERNET)}>
<MessageLink>{data}</MessageLink>
</TouchableOpacity>
);
case EMAIL_CONTENT_TYPES.LIST:
return <MessageList key={i}>{data}</MessageList>;
case EMAIL_CONTENT_TYPES.PLACEHOLDER_GPS:
return (
<MessageBold key={i}>
{replaceTemplate(data, `{{ [${lat}, ${long}] - ${address} }}`)}
</MessageBold>
);
case EMAIL_CONTENT_TYPES.TEXT:
return <Message key={i}>{data}</Message>;
default:
return null;
}
})}
</Content>
</ScrollView>
</LayoutWrapper>
);
};
export default withTheme(EmailDetailsScreen);
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.