text
stringlengths 1
1.05M
|
|---|
#!/bin/bash
# Erstellt WebExpress
export PATH=$PATH:/usr/share/dotnet-sdk/
export DOTNET_ROOT=/usr/share/dotnet-sdk/
dotnet build --configuration Release
#dotnet publish
|
<gh_stars>1-10
Ext.define('Scalr.ui.FarmRoleEditorTab.Scripting', {
extend: 'Scalr.ui.FarmRoleEditorTab',
tabTitle: 'Orchestration',
itemId: 'scripting',
layout: 'fit',
tabData: null,
getDefaultValues: function (record) {
record.set('scripting', []);
return {};
},
beforeShowTab: function (record, handler) {
var me = this;
me.tabData = {};
Scalr.CachedRequestManager.get('farmDesigner').load(
{
url: '/account/orchestration/xGetList',
params: {}
},
function(data, status){
me.tabData['accountScripts'] = data;
if (status) {
Scalr.CachedRequestManager.get('farmDesigner').load(
{
url: '/farms/builder/xGetScripts',
params: {
cloudLocation: record.get('cloud_location'),
roleId: record.get('role_id')
}
},
function(data, status){
if (status) {
Ext.apply(me.tabData, data);
record.loadRoleChefSettings(function(data, status){
if (status) {
me.tabData['chefSettings'] = data['chefSettings'];
handler();
} else {
me.deactivateTab();
}
});
} else {
me.deactivateTab();
}
},
me,
0
);
} else {
me.deactivateTab();
}
},
me,
0
);
},
showTab: function (record) {
var scripts = record.get('scripting'),
accountScripts = this.tabData['accountScripts'] || {},
roleScripts = this.tabData['roleScripts'] || {},
chefSettings = this.tabData['chefSettings'] || {},
roleParams = record.get('scripting_params'),
roleOsFamily = Scalr.utils.getOsById(record.get('osId'), 'family'),
params = {};
if (Ext.isArray(roleParams)) {
for (var i = 0; i < roleParams.length; i++) {
params[roleParams[i]['hash']] = roleParams[i]['params'];
}
}
addSystemScript = function(scripts, script, system) {
var addScript = true;
if (system === 'account' && roleOsFamily && script['script_type'] === 'scalr') {
addScript = script['os'] == roleOsFamily || script['os'] == 'linux' && roleOsFamily != 'windows';
}
if (addScript) {
scripts.push({
role_script_id: script['role_script_id'],
event: script['event_name'],
isSync: script['isSync'],
order_index: script['order_index'],
params: params[script['hash']] || script['params'],
script: script['script_name'],
script_id: script['script_id'],
target: script['target'],
timeout: script['timeout'],
version: script['version'],
system: system,
hash: script['hash'],
script_path: script['script_path'],
script_type: script['script_type'],
os: script['os']
});
}
};
for (var i in roleScripts) {
addSystemScript(scripts, roleScripts[i], 'role');
}
for (var i in accountScripts) {
addSystemScript(scripts, accountScripts[i], 'account');
}
var rolescripting = this.down('#rolescripting');
rolescripting.chefSettings = chefSettings;
rolescripting.setCurrentRoleOptions({
farmRoleAlias: record.get('alias'),
osFamily: roleOsFamily,
chefAvailable: record.hasBehavior('chef'),
isScalarized: record.get('isScalarized')
});
//load farm roles
var farmRoles = [],
farmRolesStore = record.store;
farmRolesStore.getUnfiltered().each(function(item){
if (item.get('isScalarized') == 1) {
farmRoles.push({
farm_role_id: item.get('farm_role_id'),
platform: item.get('platform'),
cloud_location: item.get('cloud_location'),
role_id: item.get('role_id'),
name: item.get('name'),
alias: item.get('alias'),
current: item === record
});
}
});
rolescripting.loadRoles(farmRoles);
//load scripst, events and behaviors
rolescripting.loadScripts(this.tabData['scripts'] || []);
rolescripting.loadEvents(this.tabData['events'] || {});
rolescripting.loadBehaviors(this.up('#farmDesigner').moduleParams.tabParams['behaviors']);
rolescripting.abortIfBeforeHostUpFails = record.get('settings', true)['base.abort_init_on_script_fail'] == 1;
//convert deprecated configurations farm_role_id->farm_role_alias
Ext.each(scripts, function(script){
if (script.target === 'roles') {
var targetFarmRoles = [];
Ext.each(script.target_roles || [], function(farmRoleId){
var farmRoleAlias = null;
Ext.each(farmRoles, function(role){
if (role.farm_role_id == farmRoleId) {
farmRoleAlias = role.alias;
return false;
}
});
if (farmRoleAlias) {
targetFarmRoles.push(farmRoleAlias);
}
});
script.target = 'farmroles';
script.target_farmroles = targetFarmRoles;
delete script.target_roles;
}
});
//load role scripts
rolescripting.loadRoleScripts(scripts);
},
hideTab: function (record) {
var scripts = this.down('#rolescripting').getRoleScripts(),
scripting = [],
scripting_params = [];
scripts.each(function(item) {
var system = item.get('system');
if (!system) {
var script = Ext.clone(item.data);
delete script.id;
scripting.push(script);
} else if (system === 'role') {
scripting_params.push({
role_script_id: item.get('role_script_id'),
params: item.get('params'),
hash: item.get('hash')
});
}
});
record.set('scripting', scripting);
record.set('scripting_params', scripting_params);
this.down('#rolescripting').clearRoleScripts();
},
__items: {
xtype: 'scriptfield',
itemId: 'rolescripting',
addGridCls: 'x-panel-column-left-with-tabs'
}
});
|
#!/bin/bash
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is called by a watchdog trigger to shutdown the user pod
# by calling the DELETE method on the pod broker.
echo "INFO: Shutting down ${APP_NAME?} pod for user ${POD_USER?} through pod broker" >&2
if [[ ${IN_CLUSTER:-"true"} == "true" ]]; then
curl -s -f -v -H "Cookie: ${BROKER_COOKIE?}" -H "Host: ${BROKER_HOST?}" -X DELETE ${IN_CLUSTER_BROKER_ENDPOINT}/${APP_NAME?}/
else
ID_TOKEN=$(curl -s -f -H "Metadata-Flavor: Google" "http://metadata/computeMetadata/v1/instance/service-accounts/default/identity?audience=${CLIENT_ID?}&format=full")
curl -s -f -H "Cookie: ${BROKER_COOKIE?}" -H "Authorization: Bearer ${ID_TOKEN}" -X DELETE ${BROKER_ENDPOINT?}/${APP_NAME?}/
fi
|
ELLIPSIS_VERSION=1.19.11
|
#!/bin/bash
/opt/bin/generate_config > /opt/selenium/config.json
if [ ! -e /opt/selenium/config.json ]; then
echo No Selenium Node configuration file, the node-base image is not intended to be run directly. 1>&2
exit 1
fi
# In the long term the idea is to remove $HUB_PORT_4444_TCP_ADDR and $HUB_PORT_4444_TCP_PORT and only work with
# $HUB_HOST and $HUB_PORT
if [ ! -z "$HUB_HOST" ]; then
HUB_PORT_PARAM=4444
if [ ! -z "$HUB_PORT" ]; then
HUB_PORT_PARAM=${HUB_PORT}
fi
echo "Connecting to the Hub using the host ${HUB_HOST} and port ${HUB_PORT_PARAM}"
HUB_PORT_4444_TCP_ADDR=${HUB_HOST}
HUB_PORT_4444_TCP_PORT=${HUB_PORT_PARAM}
fi
if [ -z "$HUB_PORT_4444_TCP_ADDR" ]; then
echo "Not linked with a running Hub container" 1>&2
exit 1
fi
REMOTE_HOST_PARAM=""
if [ ! -z "$REMOTE_HOST" ]; then
echo "REMOTE_HOST variable is set, appending -remoteHost"
REMOTE_HOST_PARAM="-remoteHost $REMOTE_HOST"
fi
if [ ! -z "$SE_OPTS" ]; then
echo "appending selenium options: ${SE_OPTS}"
fi
rm -f /tmp/.X*lock
java ${JAVA_OPTS} -jar /opt/selenium/selenium-server-standalone.jar \
-role node \
-hub http://$HUB_PORT_4444_TCP_ADDR:$HUB_PORT_4444_TCP_PORT/grid/register \
${REMOTE_HOST_PARAM} \
-nodeConfig /opt/selenium/config.json \
${SE_OPTS}
|
#! /bin/bash
diff -rq . /Users/csev/dev/sakai-scripts/trunk/basiclti/tsugi-util | grep '.java differ$' | awk '{ print "diff ", $2, $4 }'
|
# Function to reverse each word
def word_reverse(s) :
# Splitting each word
words = s.split()
rev_str = ""
for word in words:
# Reversing each word
rev_str += word[::-1] + " "
return rev_str
|
<reponame>kll5h/ShinetechOA
CREATE TABLE GROUP_TYPE(
ID BIGINT NOT NULL,
NAME VARCHAR(50),
TENANT_ID VARCHAR(64),
CONSTRAINT PK_GROUP_TYPE PRIMARY KEY(ID)
) ENGINE=INNODB CHARSET=UTF8;
|
import React, {Component} from 'react';
import './Footer.less';
class Footer extends Component{
render(){
return (
<div className="site-footer">
<div className="site-footer__inner">
Le Foot
</div>
</div>
)
}
}
export default Footer;
|
public class StairwayToHeaven {
// Assuming 'n' is not below 1
private int stairwayToHeaven(int n){
// Initializing ways array
int[] ways = new int[n + 1];
ways[0] = 1;
ways[1] = 1;
for(int i = 2; i <= n; i++){
ways[i] = ways[i - 1] + ways[i - 2];
}
return ways[n];
}
public static void main(String[] args) {
StairwayToHeaven sth = new StairwayToHeaven();
System.out.println("Number of ways to reach heaven with 5 stairs is " + sth.stairwayToHeaven(5));
}
}
|
#!/bin/sh -ex
echo "running cloudrunfastapi-postgres"
docker run --rm --name=cloudrunfastapi-postgres -d -p 5432:5432 \
-e POSTGRES_USER=cloud \
-e POSTGRES_PASSWORD=run \
-e POSTGRES_HOST_AUTH_METHOD=password \
-e POSTGRES_DB=cloudrunfastapi \
postgres:10.5
|
<gh_stars>100-1000
package com.wyp.materialqqlite.qqclient.protocol.protocoldata;
import org.json.JSONObject;
public class UploadCustomFaceResult {
public int m_nRetCode;
public String m_strRemoteFileName;
public void reset() {
m_nRetCode = 0;
m_strRemoteFileName = "";
}
public boolean parse(byte[] bytData) {
try {
reset();
if (bytData == null || bytData.length <= 0)
return false;
String strData = new String(bytData, "UTF-8");
System.out.println(strData);
String strStart = "parent.EQQ.View.ChatBox.uploadCustomFaceCallback(";
String strEnd = ");</script></head>";
int nPos = strData.indexOf(strStart);
if (nPos == -1)
return false;
nPos += strStart.length();
int nPos2 = strData.indexOf(strEnd, nPos);
if (nPos2 == -1)
return false;
strData = strData.substring(nPos, nPos2);
strData.replaceAll("'", "\"");
JSONObject json = new JSONObject(strData);
m_nRetCode = json.optInt("ret");
m_strRemoteFileName = json.optString("msg");
if (4 == m_nRetCode) {
nPos = m_strRemoteFileName.indexOf(' ');
if (nPos != -1)
m_strRemoteFileName = m_strRemoteFileName.substring(0, nPos);
}
return true;
} catch (Exception e) {
e.printStackTrace();
}
return false;
}
}
|
#!/bin/bash
set -ex
source "/tmp/venv/bin/activate"
DIR=$(dirname $0)
# confirm python version
python --version
FRAMEWORK="${CIRCLE_JOB}"
if [[ "${CIRCLE_JOB}" =~ (.*)-py((2|3)\.?[0-9]?\.?[0-9]?) ]]; then
FRAMEWORK=${BASH_REMATCH[1]}
fi
case ${FRAMEWORK} in
PYTORCH)
sh ${DIR}/tests/test_pytorch.sh
;;
TFLITE)
sh ${DIR}/tests/test_tflite.sh
;;
*)
echo "Error, '${FRAMEWORK}' not valid mode; Must be one of {PYTORCH, TFLITE}."
exit 1
;;
esac
|
<filename>tests/cmd_app_touch/touch/src/log/Logger.js
//<feature logger>
/**
* @class Ext.Logger
* Logs messages to help with debugging.
*
* ## Example
*
* Ext.Logger.deprecate('This method is no longer supported.');
*
* @singleton
*/
(function() {
var Logger = Ext.define('Ext.log.Logger', {
extend: 'Ext.log.Base',
statics: {
defaultPriority: 'info',
priorities: {
/**
* @method verbose
* Convenience method for {@link #log} with priority 'verbose'.
*/
verbose: 0,
/**
* @method info
* Convenience method for {@link #log} with priority 'info'.
*/
info: 1,
/**
* @method deprecate
* Convenience method for {@link #log} with priority 'deprecate'.
*/
deprecate: 2,
/**
* @method warn
* Convenience method for {@link #log} with priority 'warn'.
*/
warn: 3,
/**
* @method error
* Convenience method for {@link #log} with priority 'error'.
*/
error: 4
}
},
config: {
enabled: true,
minPriority: 'deprecate',
writers: {}
},
/**
* Logs a message to help with debugging.
* @param {String} message Message to log.
* @param {Number} priority Priority of the log message.
*/
log: function(message, priority, callerId) {
if (!this.getEnabled()) {
return this;
}
var statics = Logger,
priorities = statics.priorities,
priorityValue = priorities[priority],
caller = this.log.caller,
callerDisplayName = '',
writers = this.getWriters(),
event, i, originalCaller;
if (!priority) {
priority = 'info';
}
if (priorities[this.getMinPriority()] > priorityValue) {
return this;
}
if (!callerId) {
callerId = 1;
}
if (Ext.isArray(message)) {
message = message.join(" ");
}
else {
message = String(message);
}
if (typeof callerId == 'number') {
i = callerId;
do {
i--;
caller = caller.caller;
if (!caller) {
break;
}
if (!originalCaller) {
originalCaller = caller.caller;
}
if (i <= 0 && caller.displayName) {
break;
}
}
while (caller !== originalCaller);
callerDisplayName = Ext.getDisplayName(caller);
}
else {
caller = caller.caller;
callerDisplayName = Ext.getDisplayName(callerId) + '#' + caller.$name;
}
event = {
time: Ext.Date.now(),
priority: priorityValue,
priorityName: priority,
message: message,
caller: caller,
callerDisplayName: callerDisplayName
};
for (i in writers) {
if (writers.hasOwnProperty(i)) {
writers[i].write(Ext.merge({}, event));
}
}
return this;
}
}, function() {
Ext.Object.each(this.priorities, function(priority) {
this.override(priority, function(message, callerId) {
if (!callerId) {
callerId = 1;
}
if (typeof callerId == 'number') {
callerId += 1;
}
this.log(message, priority, callerId);
});
}, this);
});
})();
//</feature>
|
/*
* MIT License
*
* Copyright (c) 2021 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package net.jamsimulator.jams.gui.action;
import javafx.scene.input.KeyCombination;
import net.jamsimulator.jams.manager.ManagerResource;
import net.jamsimulator.jams.manager.ResourceProvider;
import net.jamsimulator.jams.utils.Validate;
import java.util.Objects;
import java.util.Optional;
/**
* Represents an action that can be bind to a {@link javafx.scene.input.KeyCombination}.
*/
public abstract class Action implements ManagerResource {
protected final ResourceProvider provider;
protected final String name;
protected final String regionTag;
protected final String languageNode;
protected final KeyCombination defaultCombination;
/**
* Creates the action.
*
* @param name the name of the action. This name must be unique.
* @param regionTag the region tag of this action. This action will only interact on regions that support this tag.
* @param languageNode the language node of this action.
* @param defaultCombination the default combination of keys that a user needs to press to execute this action.
*/
public Action(ResourceProvider provider, String name, String regionTag, String languageNode, KeyCombination defaultCombination) {
Validate.notNull(provider, "Provider cannot be null!");
Validate.notNull(name, "Name cannot be null!");
Validate.notNull(regionTag, "Region tag cannot be null!");
this.provider = provider;
this.name = name;
this.regionTag = regionTag;
this.languageNode = languageNode;
this.defaultCombination = defaultCombination;
}
@Override
public String getName() {
return name;
}
@Override
public ResourceProvider getResourceProvider() {
return provider;
}
/**
* Returns the region tag of the action. Regions must have this tag to execute the action.
*
* @return the region tag.
*/
public String getRegionTag() {
return regionTag;
}
/**
* Returns the language node of the action, if present.
* <p>
* This node is used when the action must be displayed on config or on context menus.
*
* @return the language node of the action, if present.
*/
public Optional<String> getLanguageNode() {
return Optional.ofNullable(languageNode);
}
/**
* Returns the default combination of the action, if present.
* <p>
* If the combination is present, this action is not present in the actions file and
* no actions are bind to this combination will be bind to this action.
* <p>
* The combination will not be bind if the action is present in the actions file, but it has no combinations.
*
* @return the default code combination, if present.
*/
public Optional<KeyCombination> getDefaultCodeCombination() {
return Optional.ofNullable(defaultCombination);
}
/**
* Executes this action.
*
* @param node the current focused node.
*/
public abstract void run(Object node);
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Action action = (Action) o;
return name.equals(action.name);
}
@Override
public int hashCode() {
return Objects.hash(name);
}
@Override
public String toString() {
return "Action{" +
"name='" + name + '\'' +
", regionTag='" + regionTag + '\'' +
", languageNode='" + languageNode + '\'' +
", defaultCombination=" + defaultCombination +
'}';
}
}
|
//
// KtMainTableViewDataSource.h
// KtTableView
//
// Created by bestswifter on 16/4/13.
// Copyright © 2016年 zxy. All rights reserved.
//
#import "KtTableViewDataSource.h"
@interface KtMainTableViewDataSource : KtTableViewDataSource
@end
|
<filename>RRTS/HttpUnit/httpunit-1.7/src/com/meterware/httpunit/dom/HTMLSelectElementImpl.java
package com.meterware.httpunit.dom;
/********************************************************************************************************************
* $Id: HTMLSelectElementImpl.java 902 2008-04-04 19:12:18Z wolfgang_fahl $
*
* Copyright (c) 2004,2006-2007, <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
* to permit persons to whom the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
* THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*******************************************************************************************************************/
import org.w3c.dom.html.HTMLSelectElement;
import org.w3c.dom.html.HTMLElement;
import org.w3c.dom.html.HTMLCollection;
import org.w3c.dom.html.HTMLOptionElement;
import org.w3c.dom.DOMException;
import com.meterware.httpunit.protocol.ParameterProcessor;
import java.io.IOException;
/**
*
* @author <a href="mailto:<EMAIL>"><NAME></a>
**/
public class HTMLSelectElementImpl extends HTMLControl implements HTMLSelectElement {
public static final String TYPE_SELECT_ONE = "select-one";
public static final String TYPE_SELECT_MULTIPLE = "select-multiple";
ElementImpl create() {
return new HTMLSelectElementImpl();
}
public void add( HTMLElement element, HTMLElement before ) throws DOMException {
}
/**
* simulate blur
*/
public void blur() {
handleEvent("onblur");
}
/**
* simulate focus;
*/
public void focus() {
handleEvent("onfocus");
}
public String getType() {
return isMultiSelect() ? TYPE_SELECT_MULTIPLE : TYPE_SELECT_ONE;
}
private boolean isMultiSelect() {
return (getMultiple() && getSize() > 1);
}
public int getLength() {
return getOptions().getLength();
}
public boolean getMultiple() {
return getBooleanAttribute( "multiple" );
}
public HTMLCollection getOptions() {
return HTMLCollectionImpl.createHTMLCollectionImpl( getElementsByTagName( getHtmlDocument().toNodeCase( "option" ) ) );
}
public int getSelectedIndex() {
HTMLCollection options = getOptions();
for (int i = 0; i < options.getLength(); i++) {
if (((HTMLOptionElement)options.item(i)).getSelected()) return i;
}
return isMultiSelect() ? -1 : 0;
}
public String getValue() {
HTMLCollection options = getOptions();
for (int i = 0; i < options.getLength(); i++) {
HTMLOptionElement optionElement = ((HTMLOptionElement)options.item(i));
if (optionElement.getSelected()) return optionElement.getValue();
}
return (isMultiSelect() || options.getLength() == 0) ? null : ((HTMLOptionElement)options.item(0)).getValue();
}
public int getSize() {
return getIntegerAttribute( "size" );
}
public void remove( int index ) {
}
public void setMultiple( boolean multiple ) {
setAttribute( "multiple", multiple );
}
public void setSelectedIndex( int selectedIndex ) {
HTMLCollection options = getOptions();
for (int i = 0; i < options.getLength(); i++) {
HTMLOptionElementImpl optionElement = (HTMLOptionElementImpl) options.item(i);
optionElement.setSelected( i == selectedIndex );
}
}
public void setSize( int size ) {
setAttribute( "size", size );
}
int getIndexOf( HTMLOptionElementImpl option ) {
HTMLCollection options = getOptions();
for (int i = 0; i < options.getLength(); i++) {
if (options.item(i) == option) return i;
}
throw new IllegalStateException( "option is not part of this select" );
}
void clearSelected() {
setSelectedIndex( -1 );
}
void addValues( ParameterProcessor processor, String characterSet ) throws IOException {
HTMLCollection options = getOptions();
String name = getName();
for (int i = 0; i < options.getLength();i++) {
((HTMLOptionElementImpl) options.item( i )).addValueIfSelected( processor, name, characterSet );
}
}
public void setValue( String value ) {
setAttribute( "value", value );
}
public void reset() {
HTMLCollection options = getOptions();
for (int i = 0; i < options.getLength(); i++) {
HTMLControl optionElement = (HTMLControl) options.item(i);
optionElement.reset();
}
}
}
|
/**
* This program and the accompanying materials
* are made available under the terms of the License
* which accompanies this distribution in the file LICENSE.txt
*/
package com.archimatetool.editor.diagram.commands;
import com.archimatetool.editor.model.commands.EObjectFeatureCommand;
import com.archimatetool.model.IArchimatePackage;
import com.archimatetool.model.IDiagramModelObject;
/**
* Fill Color Command
*
* @author <NAME>
*/
public class FillColorCommand extends EObjectFeatureCommand {
public FillColorCommand(IDiagramModelObject object, String rgb) {
super(Messages.FillColorCommand_0, object, IArchimatePackage.Literals.DIAGRAM_MODEL_OBJECT__FILL_COLOR, rgb);
}
}
|
<filename>lib/ruby/truffle/mri/cgi/util.rb
require_relative '../../stdlib/cgi/util'
|
<reponame>m-nakagawa/sample
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jena.tdb.solver;
import java.util.Iterator;
import java.util.List;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
import org.apache.jena.atlas.iterator.* ;
import org.apache.jena.atlas.lib.Tuple ;
import org.apache.jena.graph.Node ;
import org.apache.jena.sparql.core.Var ;
import org.apache.jena.sparql.engine.ExecutionContext ;
import org.apache.jena.tdb.store.NodeId ;
import org.apache.jena.tdb.store.nodetable.NodeTable ;
import org.apache.jena.tdb.store.nodetupletable.NodeTupleTable ;
public class StageMatchTuple extends RepeatApplyIterator<BindingNodeId>
{
private final NodeTupleTable nodeTupleTable ;
private final Tuple<Node> patternTuple ;
private final ExecutionContext execCxt ;
private boolean anyGraphs ;
private Predicate<Tuple<NodeId>> filter ;
public StageMatchTuple(NodeTupleTable nodeTupleTable, Iterator<BindingNodeId> input,
Tuple<Node> tuple, boolean anyGraphs,
Predicate<Tuple<NodeId>> filter,
ExecutionContext execCxt)
{
super(input) ;
this.filter = filter ;
this.nodeTupleTable = nodeTupleTable ;
this.patternTuple = tuple ;
this.execCxt = execCxt ;
this.anyGraphs = anyGraphs ;
}
/** Prepare a pattern (tuple of nodes), and an existing binding of NodeId, into NodeIds and Variables.
* A variable in the pattern is replaced by its binding or null in the Nodeids.
* A variable that is not bound by the binding is placed in the var array.
*/
public static void prepare(NodeTable nodeTable, Tuple<Node> patternTuple, BindingNodeId input, NodeId ids[], Var[] var)
{
// Process the Node to NodeId conversion ourselves because
// we wish to abort if an unknown node is seen.
for ( int i = 0 ; i < patternTuple.size() ; i++ )
{
Node n = patternTuple.get(i) ;
// Substitution and turning into NodeIds
// Variables unsubstituted are null NodeIds
NodeId nId = idFor(nodeTable, input, n) ;
if ( NodeId.isDoesNotExist(nId) )
new NullIterator<BindingNodeId>() ;
ids[i] = nId ;
if ( nId == null )
var[i] = asVar(n) ;
}
}
@Override
protected Iterator<BindingNodeId> makeNextStage(final BindingNodeId input)
{
// ---- Convert to NodeIds
NodeId ids[] = new NodeId[patternTuple.size()] ;
// Variables for this tuple after subsitution
final Var[] var = new Var[patternTuple.size()] ;
prepare(nodeTupleTable.getNodeTable(), patternTuple, input, ids, var) ;
Iterator<Tuple<NodeId>> iterMatches = nodeTupleTable.find(Tuple.create(ids)) ;
// ** Allow a triple or quad filter here.
if ( filter != null )
iterMatches = Iter.filter(iterMatches, filter) ;
// If we want to reduce to RDF semantics over quads,
// we need to reduce the quads to unique triples.
// We do that by having the graph slot as "any", then running
// through a distinct-ifier.
// Assumes quads are GSPO - zaps the first slot.
// Assumes that tuples are not shared.
if ( anyGraphs )
{
iterMatches = Iter.operate(iterMatches, quadsToAnyTriples) ;
//Guaranteed
//iterMatches = Iter.distinct(iterMatches) ;
// This depends on the way indexes are chosen and
// the indexing pattern. It assumes that the index
// chosen ends in G so same triples are adjacent
// in a union query.
//
// If any slot is defined, then the index will be X??G.
// If no slot is defined, then the index will be ???G.
// But the TupleTable
// See TupleTable.scanAllIndex that ensures the latter.
// No G part way through.
iterMatches = Iter.distinctAdjacent(iterMatches) ;
}
// Map Tuple<NodeId> to BindingNodeId
Function<Tuple<NodeId>, BindingNodeId> binder = tuple ->
{
BindingNodeId output = new BindingNodeId(input) ;
for ( int i = 0 ; i < var.length ; i++ )
{
Var v = var[i] ;
if ( v == null )
continue ;
NodeId id = tuple.get(i) ;
if ( reject(output, v, id) )
return null ;
output.put(v, id) ;
}
return output ;
} ;
return Iter.iter(iterMatches).map(binder).removeNulls() ;
}
private static Iterator<Tuple<NodeId>> print(Iterator<Tuple<NodeId>> iter)
{
if ( ! iter.hasNext() )
System.err.println("<empty>") ;
else
{
List<Tuple<NodeId>> r = Iter.toList(iter) ;
String str = Iter.asString(r, "\n") ;
System.err.println(str) ;
// Reset iter
iter = Iter.iter(r) ;
}
return iter ;
}
private static boolean reject(BindingNodeId output , Var var, NodeId value)
{
if ( ! output.containsKey(var) )
return false ;
if ( output.get(var).equals(value) )
return false ;
return true ;
}
private static Var asVar(Node node)
{
if ( Var.isVar(node) )
return Var.alloc(node) ;
return null ;
}
/** Return null for variables, and for nodes, the node id or NodeDoesNotExist */
private static NodeId idFor(NodeTable nodeTable, BindingNodeId input, Node node)
{
if ( Var.isVar(node) )
{
NodeId n = input.get((Var.alloc(node))) ;
// Bound to NodeId or null.
return n ;
}
// May return NodeId.NodeDoesNotExist which must not be null.
return nodeTable.getNodeIdForNode(node) ;
}
// -- Mutating "transform in place"
private static Consumer<Tuple<NodeId>> quadsToAnyTriples = item -> item.tuple()[0] = NodeId.NodeIdAny ;
}
|
USER=`whoami`
LINE=`cat /home/${USER}/.mbox.line`
MSG=`mail -f -p | tail -n +${LINE} | tail -n +2`
JSON="{ \"text\": \"$MSG\" }"
RES=`curl -s -X POST -H 'Content-Type: application/json' -d "$JSON" $SLACK_WEBHOOK_API_URL`
#echo $RES
mail -f -p | wc -l > /home/${USER}/.mbox.line
|
termux_step_create_pacman_package() {
local TERMUX_PKG_INSTALLSIZE
TERMUX_PKG_INSTALLSIZE=$(du -bs . | cut -f 1)
# From here on TERMUX_ARCH is set to "all" if TERMUX_PKG_PLATFORM_INDEPENDENT is set by the package
[ "$TERMUX_PKG_PLATFORM_INDEPENDENT" = "true" ] && TERMUX_ARCH=any
# Configuring the selection of a copress for a batch.
local COMPRESS
local PKG_FORMAT
case $TERMUX_PACMAN_PACKAGE_COMPRESSION in
"gzip")
COMPRESS=(gzip -c -f -n)
PKG_FORMAT="gz";;
"bzip2")
COMPRESS=(bzip2 -c -f)
PKG_FORMAT="bz2";;
"zstd")
COMPRESS=(zstd -c -z -q -)
PKG_FORMAT="zst";;
"lrzip")
COMPRESS=(lrzip -q)
PKG_FORMAT="lrz";;
"lzop")
COMPRESS=(lzop -q)
PKG_FORMAT="lzop";;
"lz4")
COMPRESS=(lz4 -q)
PKG_FORMAT="lz4";;
"lzip")
COMPRESS=(lzip -c -f)
PKG_FORMAT="lz";;
"xz" | *)
COMPRESS=(xz -c -z -)
PKG_FORMAT="xz";;
esac
local PACMAN_FILE=$TERMUX_OUTPUT_DIR/${TERMUX_PKG_NAME}${DEBUG}-${TERMUX_PKG_FULLVERSION}-${TERMUX_ARCH}.pkg.tar.${PKG_FORMAT}
# Version view revisions.
local TERMUX_PKG_VERSION=$(echo $TERMUX_PKG_VERSION | sed "s|-|.|")
local TERMUX_PKG_VERSION=${TERMUX_PKG_VERSION/[a-z]/.${TERMUX_PKG_VERSION//[0-9.]/}}
local BUILD_DATE
BUILD_DATE=$(date +%s)
# Package metadata.
{
echo "pkgname = $TERMUX_PKG_NAME"
echo "pkgbase = $TERMUX_PKG_NAME"
if [ -n "$TERMUX_PKG_REVISION" ]; then
echo "pkgver = $TERMUX_PKG_VERSION-${TERMUX_PKG_REVISION}"
else
echo "pkgver = $TERMUX_PKG_VERSION-0"
fi
echo "pkgdesc = $(echo "$TERMUX_PKG_DESCRIPTION" | tr '\n' ' ')"
echo "url = $TERMUX_PKG_HOMEPAGE"
echo "builddate = $BUILD_DATE"
echo "packager = $TERMUX_PKG_MAINTAINER"
echo "size = $TERMUX_PKG_INSTALLSIZE"
echo "arch = $TERMUX_ARCH"
if [ -n "$TERMUX_PKG_LICENSE" ]; then
tr ',' '\n' <<< "$TERMUX_PKG_LICENSE" | awk '{ printf "license = %s\n", $0 }'
fi
if [ -n "$TERMUX_PKG_REPLACES" ]; then
tr ',' '\n' <<< "$TERMUX_PKG_REPLACES" | sed 's|(||g; s|)||g; s| ||g; s|>>|>|g; s|<<|<|g' | awk '{ printf "replaces = %s\n", $1 }'
fi
if [ -n "$TERMUX_PKG_CONFLICTS" ]; then
tr ',' '\n' <<< "$TERMUX_PKG_CONFLICTS" | sed 's|(||g; s|)||g; s| ||g; s|>>|>|g; s|<<|<|g' | awk '{ printf "conflict = %s\n", $1 }'
fi
if [ -n "$TERMUX_PKG_BREAKS" ]; then
tr ',' '\n' <<< "$TERMUX_PKG_BREAKS" | sed 's|(||g; s|)||g; s| ||g; s|>>|>|g; s|<<|<|g' | awk '{ printf "conflict = %s\n", $1 }'
fi
if [ -n "$TERMUX_PKG_PROVIDES" ]; then
tr ',' '\n' <<< "$TERMUX_PKG_PROVIDES" | sed 's|(||g; s|)||g; s| ||g; s|>>|>|g; s|<<|<|g' | awk '{ printf "provides = %s\n", $1 }'
fi
if [ -n "$TERMUX_PKG_DEPENDS" ]; then
tr ',' '\n' <<< "$TERMUX_PKG_DEPENDS" | sed 's|(||g; s|)||g; s| ||g; s|>>|>|g; s|<<|<|g' | awk '{ printf "depend = %s\n", $1 }'
fi
if [ -n "$TERMUX_PKG_RECOMMENDS" ]; then
tr ',' '\n' <<< "$TERMUX_PKG_RECOMMENDS" | awk '{ printf "optdepend = %s\n", $1 }'
fi
if [ -n "$TERMUX_PKG_SUGGESTS" ]; then
tr ',' '\n' <<< "$TERMUX_PKG_SUGGESTS" | awk '{ printf "optdepend = %s\n", $1 }'
fi
if [ -n "$TERMUX_PKG_BUILD_DEPENDS" ]; then
tr ',' '\n' <<< "$TERMUX_PKG_BUILD_DEPENDS" | sed 's|(||g; s|)||g; s| ||g; s|>>|>|g; s|<<|<|g' | awk '{ printf "makedepend = %s\n", $1 }'
fi
if [ -n "$TERMUX_PKG_CONFFILES" ]; then
tr ',' '\n' <<< "$TERMUX_PKG_CONFFILES" | awk '{ printf "backup = '"${TERMUX_PREFIX:1}"'/%s\n", $1 }'
fi
} > .PKGINFO
# Build metadata.
{
echo "format = 2"
echo "pkgname = $TERMUX_PKG_NAME"
echo "pkgbase = $TERMUX_PKG_NAME"
if [ -n "$TERMUX_PKG_REVISION" ]; then
echo "pkgver = $TERMUX_PKG_VERSION-${TERMUX_PKG_REVISION}"
else
echo "pkgver = $TERMUX_PKG_VERSION-0"
fi
echo "pkgarch = $TERMUX_ARCH"
echo "packager = $TERMUX_PKG_MAINTAINER"
echo "builddate = $BUILD_DATE"
} > .BUILDINFO
# Write installation hooks.
termux_step_create_debscripts
termux_step_create_pacman_install_hook
# Create package
shopt -s dotglob globstar
printf '%s\0' **/* | bsdtar -cnf - --format=mtree \
--options='!all,use-set,type,uid,gid,mode,time,size,md5,sha256,link' \
--null --files-from - --exclude .MTREE | \
gzip -c -f -n > .MTREE
printf '%s\0' **/* | bsdtar --no-fflags -cnf - --null --files-from - | \
$COMPRESS > "$PACMAN_FILE"
shopt -u dotglob globstar
}
|
import math
def solve_quadratic(a, b, c):
# calculate discriminant
d = (b**2) - (4*a*c)
# find two solutions
sol1 = (-b-math.sqrt(d))/(2*a)
sol2 = (-b+math.sqrt(d))/(2*a)
return sol1, sol2
|
package istu.bacs.background.combined;
import istu.bacs.background.combined.db.SubmissionService;
import istu.bacs.db.submission.Submission;
import istu.bacs.db.submission.Verdict;
import istu.bacs.externalapi.ExternalApi;
import istu.bacs.rabbit.QueueName;
import istu.bacs.rabbit.RabbitService;
import lombok.extern.slf4j.Slf4j;
import org.slf4j.Logger;
import org.springframework.stereotype.Component;
import java.util.List;
import static istu.bacs.background.combined.SubmissionCheckerProcessor.PROCESSOR_NAME;
import static istu.bacs.db.submission.Verdict.PENDING;
import static istu.bacs.rabbit.QueueName.CHECKED_SUBMISSIONS;
import static istu.bacs.rabbit.QueueName.SUBMITTED_SUBMISSIONS;
@Slf4j
@Component(PROCESSOR_NAME)
public class SubmissionCheckerProcessor extends SubmissionProcessor {
static final String PROCESSOR_NAME = "SubmissionCheckerProcessor";
private final ExternalApi externalApi;
public SubmissionCheckerProcessor(SubmissionService submissionService, RabbitService rabbitService, ExternalApi externalApi) {
super(submissionService, rabbitService);
this.externalApi = externalApi;
}
@Override
protected boolean process(List<Submission> submissions) {
return externalApi.checkSubmissionResult(submissions);
}
@Override
protected Verdict incomingVerdict() {
return PENDING;
}
@Override
protected QueueName incomingQueueName() {
return SUBMITTED_SUBMISSIONS;
}
@Override
protected QueueName outcomingQueueName() {
return CHECKED_SUBMISSIONS;
}
@Override
protected String processorName() {
return PROCESSOR_NAME;
}
@Override
protected Logger log() {
return log;
}
}
|
#!/usr/bin/env bash
##############
# Install deps
##############
# Ubuntu
apt-get update
apt-get install python-pip jq -y
#####################
# Amazon Linux (RHEL) - NAT instances
yum update
yum install python-pip jq -y
#####################
pip install --upgrade awscli
##############
cat <<"EOF" > /home/${ssh_user}/update_ssh_authorized_keys.sh
#!/usr/bin/env bash
set -e
BUCKET_NAME=${s3_bucket_name}
BUCKET_URI=${s3_bucket_uri}
SSH_USER=${ssh_user}
MARKER="# KEYS_BELOW_WILL_BE_UPDATED_BY_TERRAFORM"
KEYS_FILE=/home/$SSH_USER/.ssh/authorized_keys
TEMP_KEYS_FILE=$(mktemp /tmp/authorized_keys.XXXXXX)
PUB_KEYS_DIR=/home/$SSH_USER/pub_key_files/
[[ -z $BUCKET_URI ]] && BUCKET_URI="s3://$BUCKET_NAME/"
mkdir -p $PUB_KEYS_DIR
# Add marker, if not present, and copy static content.
grep -Fxq "$MARKER" $KEYS_FILE || echo -e "\n$MARKER" >> $KEYS_FILE
line=$(grep -n "$MARKER" $KEYS_FILE | cut -d ":" -f 1)
head -n $line $KEYS_FILE > $TEMP_KEYS_FILE
# Synchronize the keys from the bucket.
aws s3 sync --delete $BUCKET_URI $PUB_KEYS_DIR
for filename in $PUB_KEYS_DIR/*; do
sed 's/\n\?$/\n/' < $filename >> $TEMP_KEYS_FILE
done
# Move the new authorized keys in place.
chown $SSH_USER:$SSH_USER $KEYS_FILE
chmod 600 $KEYS_FILE
mv $TEMP_KEYS_FILE $KEYS_FILE
EOF
chown ${ssh_user}:${ssh_user} /home/${ssh_user}/update_ssh_authorized_keys.sh
chmod 755 /home/${ssh_user}/update_ssh_authorized_keys.sh
# Execute now
su ${ssh_user} -c /home/${ssh_user}/update_ssh_authorized_keys.sh
# Be backwards compatible with old cron update enabler
if [ "${enable_hourly_cron_updates}" = 'true' -a -z "${keys_update_frequency}" ]; then
keys_update_frequency="0 * * * *"
else
keys_update_frequency="${keys_update_frequency}"
fi
# Add to cron
if [ -n "$keys_update_frequency" ]; then
croncmd="/home/${ssh_user}/update_ssh_authorized_keys.sh"
cronjob="$keys_update_frequency $croncmd"
( crontab -u ${ssh_user} -l | grep -v "$croncmd" ; echo "$cronjob" ) | crontab -u ${ssh_user} -
fi
# Append addition user-data script
${additional_user_data_script}
|
<reponame>JimBae/pythonTips<filename>04_map_filter_reduce.py
import os
import sys
#* map
#* filter
#-----------
# * map
#-----------
# map 은 입력 리스트의 모든 항목에 함수를 적용한다.
#
# map(function_to_apply, list_of_inputs)
# without map
itemList = [1,2,3,4,5]
resultList = []
for each in itemList:
resultList.append(each**2)
print(resultList)
# use map
itemList = [1,2,3,4,5]
resultList = map(lambda x: x**2, itemList)
print(list(resultList))
# new example
def mul(x):
return x*x
def add(x):
return x+x
funcs = [mul, add]
for i in range(5):
value = map(lambda x:x(i), funcs)
print(list(value))
#------------
# * filter
#------------
# 함수에서 true를 반환하는 요소들로 구성되는 리스트를 생성한다.
inList = range(-5, 5)
lessThanZero = list(filter(lambda x: x<0, inList))
print(lessThanZero)
#------------
# * reduce
#------------
# reduce는 리스트로 몇가지 계산을 수행하고 반환하는 매우 유용한 함수다.
# 예를 들어 리스트의 곱을 계산하려고 하면,
from functools import reduce
output = reduce( (lambda x, y: x*y), [1,2,3,4] )
print(output)
# 정리
# map
inList = [1,2,3,4,5]
outList = list(map( lambda x: x*x, inList))
print(outList) # [1,4,9, 16, 25]
# filter
inList = [1,2,3,4,5]
outList = list(filter(lambda x: x>3, inList))
print(outList) # [4,5]
# reduce
from functools import reduce
inList = [1,2,3,4,5]
outList = reduce(lambda x,y: x*y, inList)
print(outList) # 120
|
<gh_stars>0
import request from '@/utils/request'
const BASE_API_6 = process.env.VUE_APP_BASE_API_6// yunw
const BASE_API_3 = process.env.VUE_APP_BASE_API_3// wuming
const BASE_API_4 = process.env.VUE_APP_BASE_API_4 // lixianzhao
// 云台控制
export function ptz(params) {
return request({
url: '/video/ptz/ptz',
baseURL: BASE_API_4,
method: 'get',
params: params
})
}
// 停止控制
export function ptzStop(params) {
return request({
url: '/video/ptz/ptzStop',
baseURL: BASE_API_4,
method: 'get',
params: params
})
}
// 焦点光圈控制
export function fi(params) {
return request({
url: '/video/ptz/fi',
baseURL: BASE_API_4,
method: 'get',
params: params
})
}
// 预置位控制
export function preset(params) {
return request({
url: '/video/ptz/preset',
baseURL: BASE_API_4,
method: 'get',
params: params
})
}
// 获取下级通道预置位
export function fetchpreset(params) {
return request({
url: '/video/ptz/fetchpreset',
baseURL: BASE_API_4,
method: 'get',
params: params
})
}
// 查询召测历史
export function getImageList(params) {
return request({
url: '/cmd/getImageList',
baseURL: BASE_API_4,
method: 'get',
params: params
})
}
// 获取发送预警信息集合
export function getSendList(params) {
return request({
url: '/warn/getSendList',
baseURL: BASE_API_4,
method: 'post',
data: params
})
}
// 获取水库关联责任人人员
export function getRsUserInfo(params) {
return request({
url: '/warn/getRsUserInfo',
baseURL: BASE_API_4,
method: 'get',
params: params
})
}
// 查询单个测点监测数据列表
export function getMonitorDataByPointId(params) {
return request({
url: '/monitordata/getMonitorDataByPointId',
baseURL: BASE_API_3,
method: 'GET',
params: params
})
}
// 上报数据
export function getPatrolListByRscd(params) {
return request({
url: '/app/getPatrolListByRscd',
baseURL: BASE_API_6,
method: 'post',
data: params
})
}
// 预警统计
export function group(params) {
return request({
url: '/warn/group',
baseURL: BASE_API_6,
method: 'POST',
data: params
})
}
// 查询智能感知曲线
export function getMonitorCurveData(params) {
return request({
url: '/monitordata/getMonitorCurveData',
baseURL: BASE_API_3,
method: 'GET',
params: params
})
}
export function callImage(query) {
return request({
baseURL: BASE_API_4,
url: '/cmd/callImage?cusno=' + query,
method: 'post',
data: query
})
}
// 查询水库基础信息
export function rsvrBaseInfoBy(params) {
return request({
url: '/rsvrBaseInfoBy',
baseURL: BASE_API_6,
method: 'GET',
params: params
})
}
// 查询视频列表
export function list4Page(params) {
return request({
url: '/video/list4Page',
baseURL: BASE_API_6,
method: 'POST',
data: params
})
}
// 开流
export function openVideo(url) {
return request({
url: '',
baseURL: url,
method: 'GET',
params: ''
})
}
// 查询工程部位列表
export function getRsrPosListByRscd(params) {
return request({
url: '/rsrpos/getRsrPosListByRscd',
baseURL: BASE_API_3,
method: 'get',
params: params
})
}
// 查看图片接口
export function getReservoirImage(params) {
return request({
url: '/web/point/getReservoirImage',
baseURL: BASE_API_6,
method: 'GET',
params: params
})
}
// 水库基本信息列表
export function getReservoirListByPage(params) {
return request({
url: '/web/rsvr/getReservoirListByPage',
baseURL: BASE_API_6,
method: 'GET',
params: params
})
}
// 添加水库
export function addReservoirBaseInfo(params) {
return request({
url: '/web/rsvr/addReservoirBaseInfo',
baseURL: BASE_API_6,
method: 'POST',
data: params
})
}
// 修改水库基本信息
export function updateReservoirBaseInfo(params) {
return request({
url: '/web/rsvr/updateReservoirBaseInfo',
baseURL: BASE_API_6,
method: 'POST',
data: params
})
}
// 水库详情
export function getBaseInfoById(params) {
return request({
url: '/web/rsvr/getBaseInfoById',
baseURL: BASE_API_6,
method: 'GET',
params: params
})
}
// 新增水库管理人员
export function addReservoirInscptor(params) {
return request({
url: '/web/inscptor/addReservoirInscptor',
baseURL: BASE_API_6,
method: 'POST',
data: params
})
}
// 配置水库人员
export function configReservoirInscptor(params) {
return request({
url: '/web/inscptor/configReservoirInscptor',
baseURL: BASE_API_6,
method: 'GET',
params: params
})
}
// 根据人员ID删除人员与水库关系
export function deleteByInscptorId(params) {
return request({
url: '/web/inscptor/deleteByInscptorId',
baseURL: BASE_API_6,
method: 'GET',
params: params
})
}
// 根据水库ID数组和角色ID数组查询人员列表
export function getInscptorByReservoirIdAndRoleId(params) {
return request({
url: '/web/inscptor/getInscptorByReservoirIdAndRoleId',
baseURL: BASE_API_6,
method: 'POST',
data: params
})
}
// 分页查询水库人员列表
export function getReservoirInspctorByPage(params) {
return request({
url: '/web/inscptor/getReservoirInspctorByPage',
baseURL: BASE_API_6,
method: 'GET',
params: params
})
}
// 查询所有点位列表
export function getAllPointLocationList(params) {
return request({
url: '/web/point/getAllPointLocationList',
baseURL: BASE_API_6,
method: 'GET',
params: params
})
}
// 查询可配置项
export function getReservoirNotExistPointByReservoirId(params) {
return request({
url: '/web/point/getReservoirNotExistPointByReservoirId',
baseURL: BASE_API_6,
method: 'GET',
params: params
})
}
// 根据已配置项
export function getReservoirPointByReservoirId(params) {
return request({
url: '/web/point/getReservoirPointByReservoirId',
baseURL: BASE_API_6,
method: 'GET',
params: params
})
}
// 移除水库点位
export function removeAllotPointBatch(params) {
return request({
url: '/web/point/removeAllotPointBatch',
baseURL: BASE_API_6,
method: 'GET',
params: params
})
}
// 添加水库点位
export function reservoirAllotPoint(params) {
return request({
url: '/web/point/reservoirAllotPoint',
baseURL: BASE_API_6,
method: 'GET',
params: params
})
}
// 批量删除水库
export function deleteBatchReservoirInscptor(params) {
return request({
url: '/web/inscptor/deleteBatchReservoirInscptor',
baseURL: BASE_API_6,
method: 'GET',
params: params
})
}
// 根据点位ID查询点位信息
export function getPointLocationById(params) {
return request({
url: '/web/point/getPointLocationById',
baseURL: BASE_API_6,
method: 'GET',
params: params
})
}
// 分页查询水库点位信息列表
export function getPointLocationListByPage(params) {
return request({
url: '/web/point/getPointLocationListByPage',
baseURL: BASE_API_6,
method: 'GET',
params: params
})
}
// 添加点位
export function addPointLocation(params) {
return request({
url: '/web/point/addPointLocation',
baseURL: BASE_API_6,
method: 'POST',
data: params
})
}
// 修改点位
export function updatePointLocation(params) {
return request({
url: '/web/point/updatePointLocation',
baseURL: BASE_API_6,
method: 'POST',
data: params
})
}
// 删除点位信息
export function deletePointLocationById(params) {
return request({
url: '/web/point/deletePointLocationById',
baseURL: BASE_API_6,
method: 'GET',
params: params
})
}
// 获得所有用户
export function getAllUserList(params) {
return request({
url: '/user/getAllUserList',
baseURL: BASE_API_6,
method: 'GET',
params: params
})
}
// 获得所有角色
export function getAllRoles(params) {
return request({
url: '/role/getAllRoles',
baseURL: BASE_API_6,
method: 'GET',
params: params
})
}
// 导入excel水库李彪接口
export function imporData(params) {
return request({
url: '/web/rsvr/imporData',
baseURL: BASE_API_6,
method: 'GET',
params: params
})
}
// 下载链接
export function model(params) {
return request({
url: '/web/rsvr/downloadTemplate',
baseURL: BASE_API_6,
method: 'GET',
params: params
})
}
// 用户列表+角色类型
export function getUserRoleByUserId(params) {
return request({
url: '/web/inscptor/getUserRoleByUserId',
baseURL: BASE_API_6,
method: 'GET',
params: params
})
}
// 查询水库相关资料
export function fileTree(params) {
return request({
url: '/file/tree',
baseURL: BASE_API_6,
method: 'get',
params: params
})
}
// 查询工情监测项目树
export function getRsrEnmnitTree(params) {
return request({
url: '/enmnit/getRsrEnmnitTree',
baseURL: BASE_API_3,
method: 'get',
params: params
})
}
|
package com.firoz.mymvpboilerplate.presenter;
import com.firoz.mymvpboilerplate.IConnections;
import com.firoz.mymvpboilerplate.interactor.MainInteractor;
/**
* Created by firoz on 1/4/17.
*/
public class MainPresenter implements IConnections.MyPresenter, IConnections.Callback {
private IConnections.MyView view;
private IConnections.MyRepository repository;
private IConnections.MyInteractor interactor;
public MainPresenter(IConnections.MyView view, IConnections.MyRepository repository) {
this.view = view;
this.repository = repository;
}
@Override
public void buttonClicked() {
// initialize the interactor
interactor = new MainInteractor(this, repository);
// run the interactor
interactor.execute();
}
@Override
public void onSuccess(String message) {
view.updateText(message);
}
@Override
public void onFail(String error) {
view.updateText(error);
}
}
|
package org.glamey.training.codes.tree;
import java.util.LinkedList;
import java.util.Queue;
/**
* 给定两个二叉树,想象当你将它们中的一个覆盖到另一个上时,两个二叉树的一些节点便会重叠。
* <p>
* 你需要将他们合并为一个新的二叉树。合并的规则是如果两个节点重叠,那么将他们的值相加作为节点合并后的新值,否则不为 NULL 的节点将直接作为新二叉树的节点。
* <p>
* 示例 1:
* <p>
* 输入:
* Tree 1 Tree 2
* 1 2
* / \ / \
* 3 2 1 3
* / \ \
* 5 4 7
* 输出:
* 合并后的树:
* 3
* / \
* 4 5
* / \ \
* 5 4 7
* 注意: 合并必须从两个树的根节点开始。
* <p>
* 通过次数48,260提交次数63,452
* <p>
* 来源:力扣(LeetCode)
* 链接:https://leetcode-cn.com/problems/merge-two-binary-trees
* 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
*/
public class MergeTwoBinaryTree {
/**
* 递归方式
* 以t1为基准进行递归
*/
private static TreeNode mergeTreesByRecursion(TreeNode t1, TreeNode t2) {
if (t1 == null || t2 == null) {
return t1 == null ? t2 : t1;
}
t1.val += t2.val;
t1.left = mergeTreesByRecursion(t1.left, t2.left);
t2.right = mergeTreesByRecursion(t1.right, t2.right);
return t1;
}
/**
* 广度遍历
* 以t1为基准:通过队列实现:压入t1 t2
* 弹出t1 t2,计算 t1.val += t2.val
* 如果t1 t2 left均不为空,压入队列,如果t1.left == null,赋值为t2.left
* 同理查看t1 t2的right
*/
private static TreeNode mergeTreesByBfs(TreeNode t1, TreeNode t2) {
if (t1 == null || t2 == null) {
return t1 == null ? t2 : t1;
}
Queue<TreeNode> queue = new LinkedList<>();
queue.offer(t1);
queue.offer(t2);
while (!queue.isEmpty()) {
TreeNode n1 = queue.poll();
TreeNode n2 = queue.poll();
n1.val += n2.val;
if (n1.left != null && n2.left != null) {
queue.offer(n1.left);
queue.offer(n2.left);
} else if (n1.left == null) {
n1.left = n2.left;
}
if (n1.right != null && n2.right != null) {
queue.offer(n1.right);
queue.offer(n2.right);
} else if (n1.right == null) {
n1.right = n2.right;
}
}
return t1;
}
}
|
source_sh ${srcdir}/emulparams/elf64_ia64.sh
TEXT_START_ADDR="0x2000000000000000"
unset DATA_ADDR
unset SMALL_DATA_CTOR
unset SMALL_DATA_DTOR
source_sh ${srcdir}/emulparams/elf_fbsd.sh
|
#!/usr/bin/env bash
cd ../..
git push
|
<reponame>OS2World/LIB-GRAPHICS-The_Mesa_3D_Graphics_Library<filename>src/MesaDLL/accum.cpp<gh_stars>0
/* $Id: accum.c,v 1.39 2002/10/24 23:57:19 brianp Exp $ */
/*
* Mesa 3-D graphics library
* Version: 4.1
*
* Copyright (C) 1999-2002 <NAME> All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* <NAME> BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "glheader.h"
#include "accum.h"
#include "context.h"
#include "imports.h"
#include "macros.h"
#include "state.h"
#include "mtypes.h"
void
_mesa_ClearAccum( GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha )
{
GLfloat tmp[4];
GET_CURRENT_CONTEXT(ctx);
ASSERT_OUTSIDE_BEGIN_END(ctx);
tmp[0] = CLAMP( red, -1.0F, 1.0F );
tmp[1] = CLAMP( green, -1.0F, 1.0F );
tmp[2] = CLAMP( blue, -1.0F, 1.0F );
tmp[3] = CLAMP( alpha, -1.0F, 1.0F );
if (TEST_EQ_4V(tmp, ctx->Accum.ClearColor))
return;
FLUSH_VERTICES(ctx, _NEW_ACCUM);
COPY_4FV( ctx->Accum.ClearColor, tmp );
}
/* Should really be a driver-supplied function?
*/
void
_mesa_Accum( GLenum op, GLfloat value )
{
GET_CURRENT_CONTEXT(ctx);
GLuint xpos, ypos, width, height;
ASSERT_OUTSIDE_BEGIN_END_AND_FLUSH(ctx);
if (ctx->Visual.accumRedBits == 0 || ctx->DrawBuffer != ctx->ReadBuffer) {
_mesa_error(ctx, GL_INVALID_OPERATION, "glAccum");
return;
}
if (ctx->NewState)
_mesa_update_state( ctx );
/* Determine region to operate upon. */
if (ctx->Scissor.Enabled) {
xpos = ctx->Scissor.X;
ypos = ctx->Scissor.Y;
width = ctx->Scissor.Width;
height = ctx->Scissor.Height;
}
else {
/* whole window */
xpos = 0;
ypos = 0;
width = ctx->DrawBuffer->Width;
height = ctx->DrawBuffer->Height;
}
ctx->Driver.Accum( ctx, op, value, xpos, ypos, width, height );
}
|
import setuptools
with open("README.md", 'r') as f:
long_description = f.read()
with open("requirements.txt", 'r') as f:
install_requires = [line.strip() for line in f.readlines()]
setuptools.setup(
name="qualg",
version="0.1.0",
author="<NAME>",
author_email="<EMAIL>",
description="Symbolic linear algrebra for quantum mechanics",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/AckslD/QuAlg",
include_package_data=True,
packages=setuptools.find_packages(exclude=('tests', 'docs', 'examples')),
install_requires=install_requires,
python_requires='>=3.6',
)
|
const _ = require("lodash");
const jwt = require("jsonwebtoken");
const config = require("../utils/logchimpConfig")();
/**
* Generate JWT token
*
* @param {*} data
* @param {*} payload
*
* @returns {string} JWT token
*/
const createToken = (data, payload) => {
const secretKey = config.server.secretKey;
const token = jwt.sign(data, secretKey, payload);
return token;
};
/**
* Verify JWT token
*
* @param {string} token
* @returns
*/
const verifyToken = (token) => {
if (!_.isString(token)) {
return null;
}
const secretKey = config.server.secretKey;
return jwt.verify(token, secretKey);
}
module.exports = {
createToken,
verifyToken
};
|
#include <iostream>
#include <vector>
#include <string>
class SnapshotManager {
public:
void takeSnapshot(const std::vector<std::pair<int, int>>& nets) {
// Format the snapshot according to the specified "interface"
std::string snapshot = formatSnapshot(nets);
// Store or process the snapshot as required
storeSnapshot(snapshot);
}
private:
std::string formatSnapshot(const std::vector<std::pair<int, int>>& nets) {
// Format the snapshot based on the given nets
std::string formattedSnapshot = "Snapshot: ";
for (const auto& net : nets) {
formattedSnapshot += "(" + std::to_string(net.first) + "-" + std::to_string(net.second) + ") ";
}
return formattedSnapshot;
}
void storeSnapshot(const std::string& snapshot) {
// Store or process the snapshot as required
std::cout << "Snapshot stored: " << snapshot << std::endl;
}
};
int main() {
// Example usage
SnapshotManager manager;
std::vector<std::pair<int, int>> exampleNets = {{1, 2}, {3, 4}, {5, 6}};
manager.takeSnapshot(exampleNets);
return 0;
}
|
<reponame>slai11/mahjong<gh_stars>1-10
package main
import "fmt"
// Move represents what the user will send to the server
type Move struct {
Player `json:"player"`
Action `json:"action"`
Tile int `json:"tile"`
TurnNumber int `json:"turn_number"`
}
type LastWinningHand struct {
Player `json:"player"`
WinningTile int `json:"winning_tile"`
Hand []int `json:"hand"`
Stalemate bool `json:"stalemate"`
}
// GameState represents the mahjong table
type GameState struct {
// anchor player, prevailingWind changes when PlayerTurn == starter
Starter Player `json:"starter"`
PrevailingWind `json:"prevailing_wind"`
// next player who can make a move, can jump
PlayerTurn Player `json:"player_turn"`
// To ensure correct order of moves, no one can move
// if two players click Call at the same time, the first request received by
// the server will hold
TurnNumber int `json:"turn_number"`
IsTransitioning bool `json:"is_transitioning"`
// all tiles in player map(hand and displayed), discarded, remaining and last discarded
// must be unique and be of len 148
PlayerMap map[Player]*PlayerState `json:"player_map"`
DiscardedTiles []int `json:"discarded_tiles"`
RemainingTiles []int `json:"remaining_tiles"`
LastDiscardedTile *int `json:"last_discarded_tile"`
// records each round's winning metadata for player viewing
LastWinningHand `json:"last_winning_hand"`
LastWinningTurn int `json:"last_winning_turn"`
}
func NewGameState() GameState {
// raw game state: prevailing wind
gs := GameState{
PrevailingWind: 0,
TurnNumber: 0,
LastWinningTurn: -1,
}
gs.resetBoard(P0)
return gs
}
// Draw -> {Discard, Gong}
// Discard -> {Draw, Eat, Pong, Gong, Call}
func (gs *GameState) NextTurn(m Move) error {
if gs.TurnNumber != m.TurnNumber {
return fmt.Errorf("turn over")
}
// set action to Stalemate instantly if insufficent remaining tiles
// this is triggered when player who drew last tile cannot win
if len(gs.RemainingTiles) < 16 && m.Action != Call {
m.Action = Stalemate
}
ps, _ := gs.PlayerMap[m.Player]
switch m.Action {
case Draw:
remaining, err := ps.Draw(gs.RemainingTiles)
if err != nil {
return nil
}
gs.RemainingTiles = ps.RepairHand(remaining)
ps.updateInnerGMap()
// if player draws last tile, he/she can still win
// but if player requires to repair, its a stalemate
if len(gs.RemainingTiles) < 15 {
m.Action = Stalemate
}
case Eat, EatLeft, EatRight:
if err := ps.Eat(m.Tile, m.Action); err != nil {
return err
}
case Pong:
if err := ps.Pong(m.Tile); err != nil {
return err
}
case Gong:
if err := ps.Gong(m.Tile); err != nil {
return err
}
// replenish after gong
remaining, err := ps.Draw(gs.RemainingTiles)
if err != nil {
return nil
}
gs.RemainingTiles = ps.RepairHand(remaining)
ps.updateInnerGMap()
if len(gs.RemainingTiles) < 15 {
m.Action = Stalemate
}
case InnerGong:
gs.RemainingTiles = ps.InnerGong(m.Tile, gs.RemainingTiles)
gs.RemainingTiles = ps.RepairHand(gs.RemainingTiles)
case Discard:
// only player's turn can call discard
if gs.PlayerTurn != m.Player {
return fmt.Errorf("not your turn to discard")
}
if err := ps.Discard(m.Tile); err != nil {
return err
}
// trigger update of all players
for k, v := range gs.PlayerMap {
v.ResetStatus()
if k != m.Player {
v.UpdateStatus(m.Tile)
}
}
case Call:
// move to next player
if m.Player != gs.Starter { // check for "diao zhng"
gs.Starter = gs.Starter.next()
// advance wind if need
if gs.Starter == P0 {
gs.PrevailingWind = gs.PrevailingWind.next()
}
}
// record winner
gs.recordWinner(ps, m)
// display winning hand only for next turn
gs.LastWinningTurn = gs.TurnNumber + 1
// reset everything
gs.resetBoard(gs.Starter)
}
if m.Action == Stalemate {
gs.LastWinningTurn = gs.TurnNumber + 1
gs.LastWinningHand = LastWinningHand{Stalemate: true}
gs.resetBoard(gs.Starter)
}
gs.stateTransit(m.Action, m.Player, &m.Tile)
if err := gs.validateTurn(); err != nil {
return err
}
return nil
}
func (gs *GameState) recordWinner(ps *PlayerState, m Move) {
// record winner
whand := LastWinningHand{Hand: ps.Hand, Player: m.Player}
// winning tile is either last discarded or last drawn
if gs.LastDiscardedTile == nil {
whand.WinningTile = ps.LastDrawnTile
} else {
whand.WinningTile = *gs.LastDiscardedTile
// consolidate winners hand
if whand.WinningTile != -1 {
whand.Hand = append(whand.Hand, whand.WinningTile)
}
}
for _, d := range ps.Displayed {
whand.Hand = append(whand.Hand, d...)
}
gs.LastWinningHand = whand
}
// resets board to with `start` being the dealer
// only prevailing wind and turn number is not set here
func (gs *GameState) resetBoard(dealer Player) {
tiles := initSet()
pMap := make(map[Player]*PlayerState)
for _, p := range []Player{P0, P1, P2, P3} {
handSize := 13
if p == dealer {
handSize = 14
}
ps, leftover := NewPlayerState(handSize, tiles)
pMap[p] = ps
tiles = leftover
}
gs.Starter = dealer
gs.PlayerTurn = dealer
gs.IsTransitioning = false
gs.DiscardedTiles = []int{}
gs.PlayerMap = pMap
gs.RemainingTiles = tiles
gs.LastDiscardedTile = nil
}
// Reflects state of the game
// IsTransitioning: a discard just happened
// * only valid moves to players are draw/eat/pong/gong/call
// !IsTransitioning: a player just took a tile either by:
// draw/eat/ping/gong.
// * valid next moves are discard/inner_gong/call
func (gs *GameState) stateTransit(action Action, player Player, tile *int) {
switch action {
case Eat, EatLeft, EatRight, Pong, Gong, InnerGong:
gs.LastDiscardedTile = nil
gs.IsTransitioning = false
gs.PlayerTurn = player
case Draw:
gs.IsTransitioning = false
if gs.LastDiscardedTile != nil {
gs.DiscardedTiles = append(gs.DiscardedTiles, *gs.LastDiscardedTile)
gs.LastDiscardedTile = nil
}
gs.PlayerTurn = player
case Discard:
gs.IsTransitioning = true
if gs.LastDiscardedTile != nil {
gs.DiscardedTiles = append(gs.DiscardedTiles, *gs.LastDiscardedTile)
}
gs.LastDiscardedTile = tile
gs.PlayerTurn = player.next()
}
// ensures turn order correctness
gs.TurnNumber += 1
}
// potentially expensive but only run after a move is performed
// ensures no duplicate tiles and no shortage/surplus
func (gs *GameState) validateTurn() error {
checkMap := make(map[int]bool)
for _, t := range gs.RemainingTiles {
if _, ok := checkMap[t]; ok {
return fmt.Errorf("game state invalid: duplicate @ remaining tiles")
}
checkMap[t] = true
}
for _, t := range gs.DiscardedTiles {
if _, ok := checkMap[t]; ok {
return fmt.Errorf("game state invalid: duplicate @ discarded tile")
}
checkMap[t] = true
}
for _, v := range gs.PlayerMap {
for _, t := range v.Hand {
if _, ok := checkMap[t]; ok {
return fmt.Errorf("game state invalid")
}
checkMap[t] = true
}
for _, s := range v.Displayed {
for _, t := range s {
if _, ok := checkMap[t]; ok {
return fmt.Errorf("game state invalid")
}
checkMap[t] = true
}
}
}
if gs.LastDiscardedTile != nil {
if _, ok := checkMap[*gs.LastDiscardedTile]; ok {
return fmt.Errorf("game state invalid: duplicate @ last discarded tile")
}
checkMap[*gs.LastDiscardedTile] = true
}
if len(checkMap) != 148 {
return fmt.Errorf("game state invalid: insufficent tiles")
}
return nil
}
|
import os
import sys
import requests
import zipfile
import io
# Check if Python 3 is installed
if sys.version_info[0] < 3:
print("Python 3 is required to run this script.")
sys.exit(1)
# Check if kind is already installed
if os.system("kind version") == 0:
print("kind is already installed.")
sys.exit(0)
# Download the latest release of kind from GitHub
response = requests.get("https://github.com/kubernetes-sigs/kind/releases/latest")
latest_release_url = response.url
latest_release_tag = latest_release_url.split("/")[-1]
download_url = f"https://github.com/kubernetes-sigs/kind/releases/download/{latest_release_tag}/kind-linux-amd64"
response = requests.get(download_url)
if response.status_code != 200:
print("Failed to download kind. Please check your internet connection.")
sys.exit(1)
# Extract the downloaded archive
zip_file = zipfile.ZipFile(io.BytesIO(response.content))
zip_file.extract("kind", path=".")
zip_file.close()
# Move the kind binary to a directory in the system's PATH
os.system("sudo mv kind /usr/local/bin/")
os.system("sudo chmod +x /usr/local/bin/kind")
print("kind has been successfully installed.")
|
<reponame>aaronoe/space_launch_manifest
package com.aaronoe.android.spacelaunchmanifest.Launches.MainLaunches;
import android.app.Activity;
import android.content.Intent;
import android.net.Uri;
import android.support.design.widget.FloatingActionButton;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ArrayAdapter;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.TextView;
import com.aaronoe.android.spacelaunchmanifest.Launches.DetailPage.LaunchDetailActivity;
import com.aaronoe.android.spacelaunchmanifest.R;
import com.squareup.picasso.Picasso;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
/**
* Custom Array Adapter to inflate the list layout view with LaunchItems
* Created by aaron on 9/8/2016.
*/
public class LaunchArrayAdapter extends ArrayAdapter<LaunchItem> {
Activity mContext;
/**
* This is our own custom constructor (it doesn't mirror a superclass constructor).
* The context is used to inflate the layout file, and the list is the data we want
* to populate into the lists.
*
* @param context The current context. Used to inflate the layout file.
* @param adapter A List of Words objects to display in a list
*/
public LaunchArrayAdapter(Activity context, List<LaunchItem> adapter) {
// Here, we initialize the ArrayAdapter's internal storage for the context and the list.
// the second argument is used when the ArrayAdapter is populating a single TextView.
// Because this is a custom adapter for two TextViews and an ImageView, the adapter is not
// going to use this second argument, so it can be any value. Here, we used 0.
super(context, 0, adapter);
mContext = context;
}
/**
* Helper method to convert UNIX epoch time to date
* @param dateObject for converting
* @return a formatted date like "Tue, Feb 17, 2015"
*/
private String formatDate(Date dateObject){
SimpleDateFormat dateFormat = new SimpleDateFormat("E, LLL dd, yyyy");
return dateFormat.format(dateObject);
}
/**
* Helper method to convert UNIX epoch time to time in user's current timezone
* @param timeObject
* @return a formatted time like "8:30 pm"
*/
private String formatTime(Date timeObject){
SimpleDateFormat dateFormatter = new SimpleDateFormat("E, LLL dd, h:mm a");
return dateFormatter.format(timeObject);
}
/**
* Provides a view for an AdapterView (ListView, GridView, etc.)
*
* @param position The position in the list of data that should be displayed in the
* list item view.
* @param convertView The recycled view to populate.
* @param parent The parent ViewGroup that is used for inflation.
* @return The View for the position in the AdapterView.
*/
@Override
public View getView(int position, View convertView, ViewGroup parent) {
// Check if the existing view is being reused, otherwise inflate the view
View listItemView = convertView;
if (listItemView == null) {
listItemView = LayoutInflater.from(getContext()).inflate(
R.layout.launch_list_item, parent, false);
}
// get current list item
final LaunchItem currentLaunchItem = getItem(position);
// Set the launch location to the corresponding TextView
TextView locationTextView =(TextView) listItemView.findViewById(R.id.launch_location);
locationTextView.setText(currentLaunchItem.getmLaunchLocation());
// Set launch title to corresponding TextView
TextView launchTitleView = (TextView) listItemView.findViewById(R.id.full_launch_title);
launchTitleView.setText(currentLaunchItem.getmLaunchName());
// Set text launch date to corresponding TextView
TextView launchTextDate = (TextView) listItemView.findViewById(R.id.launch_text_time);
launchTextDate.setText(currentLaunchItem.getmTextLaunchDate());
// Set the mission name to corresponding TextView
TextView missionTitle = (TextView) listItemView.findViewById(R.id.lone_mission_title);
missionTitle.setText(currentLaunchItem.getmMissionName());
// Set mission description to corresponding TextView
TextView missionDescView = (TextView) listItemView.findViewById(R.id.lone_mission_description);
missionDescView.setText(currentLaunchItem.getmMissionDescription());
// --------- Implement Date and Time ----------
// get strings for the date and time
String timeToDisplay = "n/a";
long currentDate = currentLaunchItem.getmNetLaunchDate();
if (currentDate != 0){
currentDate *= 1000;
Date dateObject = new Date(currentDate);
timeToDisplay = formatTime(dateObject);
}
Button watchLiveButton = (Button) listItemView.findViewById(R.id.watch_live_button);
final String liveVideoUrl = currentLaunchItem.getmMediaUrl();
if(liveVideoUrl != null){
watchLiveButton.setVisibility(View.VISIBLE);
watchLiveButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Uri liveMediaUri = Uri.parse(liveVideoUrl);
// Create a new intent to view the earthquake URI
Intent websiteIntent = new Intent(Intent.ACTION_VIEW, liveMediaUri);
// Send the intent to launch a new activity
v.getContext().startActivity(websiteIntent);
}
});
} else {
watchLiveButton.setVisibility(View.GONE);
}
Button moreInfoButton = (Button) listItemView.findViewById(R.id.more_info_button);
moreInfoButton.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
Intent infoIntent = new Intent(getContext().getApplicationContext(), LaunchDetailActivity.class);
infoIntent.setFlags(Intent.FLAG_ACTIVITY_NEW_TASK | Intent.FLAG_ACTIVITY_MULTIPLE_TASK);
infoIntent.putExtra("currentLaunchItem", currentLaunchItem);
getContext().getApplicationContext().startActivity(infoIntent);
}
});
// find correct TextView for location
TextView timeTextView = (TextView) listItemView.findViewById(R.id.time);
// Get the version name from the current AndroidFlavor object and
// set this text on the name TextView
timeTextView.setText(timeToDisplay);
ImageView currentImageView = (ImageView) listItemView.findViewById(R.id.map_picture_view);
/* Use the preview image for each launch
String currentImageURl = currentLaunchItem.getmRocketImageUrl();
String smallerImageUrl = currentImageURl.replace("2560", "480");
smallerImageUrl = smallerImageUrl.replace("1920", "480");
*/
final double latitude = currentLaunchItem.getmLaunchPadLatitude();
final double longitude = currentLaunchItem.getmLaunchPadLongitude();
String locationUrl =
"http://maps.google.com/maps/api/staticmap?center=" +latitude + "," + longitude +
"&zoom=4" +
"&scale=2" +
"&size=450x200" +
"&sensor=false" +
"&maptype=hybrid" +
"&markers=color:red%7Clabel:%7C" +latitude +"," +longitude;
Picasso.with(listItemView.getContext()).load(locationUrl).into(currentImageView);
FloatingActionButton mapsButton = (FloatingActionButton) listItemView.findViewById(R.id.fab);
final String launchPadName = currentLaunchItem.getmLaunchPadName();
mapsButton.setOnClickListener(new View.OnClickListener(){
public void onClick(View v){
Intent intent = new Intent(Intent.ACTION_VIEW,
Uri.parse("geo:" + latitude + "," + longitude + "?q=" + latitude + "," + longitude + "("+ launchPadName +")"));
// Make the Intent explicit by setting the Google Maps package
intent.setPackage("com.google.android.apps.maps");
// Attempt to start an activity that can handle the Intent
getContext().startActivity(intent);
}
});
long netLaunchTimeCountdown = currentLaunchItem.getmNetLaunchDate();
return listItemView;
}
}
|
<gh_stars>0
/*
Copyright 2019 The Ceph-CSI Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
import (
"errors"
"fmt"
)
var (
// ErrInvalidVolID is returned when a CSI passed VolumeID is not conformant to any known volume ID
// formats.
ErrInvalidVolID = errors.New("invalid VolumeID")
// ErrNonStaticVolume is returned when a volume is detected as not being
// statically provisioned.
ErrNonStaticVolume = errors.New("volume not static")
// ErrVolNotEmpty is returned when a subvolume has snapshots in it.
ErrVolNotEmpty = fmt.Errorf("%v", "Error ENOTEMPTY")
// ErrVolumeNotFound is returned when a subvolume is not found in CephFS.
ErrVolumeNotFound = fmt.Errorf("%v", "Error ENOENT")
// ErrInvalidCommand is returned when a command is not known to the cluster
ErrInvalidCommand = fmt.Errorf("%v", "Error EINVAL")
// ErrSnapNotFound is returned when snap name passed is not found in the list of snapshots for the
// given image.
ErrSnapNotFound = fmt.Errorf("%v", "Error ENOENT")
// ErrSnapProtectionExist is returned when the snapshot is already protected
ErrSnapProtectionExist = fmt.Errorf("%v", "Error EEXIST")
// ErrCloneInProgress is returned when snapshot clone state is `in progress`
ErrCloneInProgress = fmt.Errorf("in progress")
)
|
#!/system/bin/sh
# Copyright (c) 2012-2013, 2016, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Configuration for MSM8998
echo 2 > /sys/devices/system/cpu/cpu4/core_ctl/min_cpus
echo 60 > /sys/devices/system/cpu/cpu4/core_ctl/busy_up_thres
echo 30 > /sys/devices/system/cpu/cpu4/core_ctl/busy_down_thres
echo 100 > /sys/devices/system/cpu/cpu4/core_ctl/offline_delay_ms
echo 1 > /sys/devices/system/cpu/cpu4/core_ctl/is_big_cluster
echo 4 > /sys/devices/system/cpu/cpu4/core_ctl/task_thres
# Enable Adaptive LMK
echo 1 > /sys/module/lowmemorykiller/parameters/enable_adaptive_lmk
echo "18432,23040,27648,51256,150296,200640" > /sys/module/lowmemorykiller/parameters/minfree
echo 162500 > /sys/module/lowmemorykiller/parameters/vmpressure_file_min
# Setting b.L scheduler parameters
echo 1 > /proc/sys/kernel/sched_migration_fixup
echo 95 > /proc/sys/kernel/sched_upmigrate
echo 90 > /proc/sys/kernel/sched_downmigrate
echo 100 > /proc/sys/kernel/sched_group_upmigrate
echo 95 > /proc/sys/kernel/sched_group_downmigrate
echo 0 > /proc/sys/kernel/sched_select_prev_cpu_us
echo 400000 > /proc/sys/kernel/sched_freq_inc_notify
echo 400000 > /proc/sys/kernel/sched_freq_dec_notify
echo 5 > /proc/sys/kernel/sched_spill_nr_run
echo 1 > /proc/sys/kernel/sched_restrict_cluster_spill
start iop
# disable thermal bcl hotplug to switch governor
echo 0 > /sys/module/msm_thermal/core_control/enabled
# online CPU0 and CPU4
echo 1 > /sys/devices/system/cpu/cpu0/online
echo 1 > /sys/devices/system/cpu/cpu4/online
available_governors=`cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors`
if [[ "$available_governors" == *"schedutil"* ]]; then
echo "schedutil" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo 500 > /sys/devices/system/cpu/cpufreq/policy0/schedutil/up_rate_limit_us
echo 20000 > /sys/devices/system/cpu/cpufreq/policy0/schedutil/down_rate_limit_us
echo "schedutil" > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
echo 500 > /sys/devices/system/cpu/cpufreq/policy4/schedutil/up_rate_limit_us
echo 20000 > /sys/devices/system/cpu/cpufreq/policy4/schedutil/down_rate_limit_us
echo 1 > /dev/stune/foreground/schedtune.prefer_idle
echo 10 > /dev/stune/top-app/schedtune.boost
echo 1 > /dev/stune/top-app/schedtune.prefer_idle
setprop sys.use_fifo_ui 1
elif [[ "$available_governors" == *"sched"* ]]; then
echo "sched" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo "sched" > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
setprop sys.use_fifo_ui 1
else
echo "interactive" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
echo 1 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/use_sched_load
echo 1 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/use_migration_notif
echo 19000 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/above_hispeed_delay
echo 90 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/go_hispeed_load
echo 20000 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/timer_rate
echo 1248000 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/hispeed_freq
echo 1 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/io_is_busy
echo "83 1804800:95" > /sys/devices/system/cpu/cpu0/cpufreq/interactive/target_loads
echo 19000 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/min_sample_time
echo 79000 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/max_freq_hysteresis
echo 300000 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
echo 1 > /sys/devices/system/cpu/cpu0/cpufreq/interactive/ignore_hispeed_on_notif
echo "interactive" > /sys/devices/system/cpu/cpu4/cpufreq/scaling_governor
echo 1 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/use_sched_load
echo 1 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/use_migration_notif
echo 19000 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/above_hispeed_delay
echo 90 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/go_hispeed_load
echo 20000 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/timer_rate
echo 1574400 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/hispeed_freq
echo 1 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/io_is_busy
echo "83 1939200:90 2016000:95" > /sys/devices/system/cpu/cpu4/cpufreq/interactive/target_loads
echo 19000 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/min_sample_time
echo 79000 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/max_freq_hysteresis
echo 300000 > /sys/devices/system/cpu/cpu4/cpufreq/scaling_min_freq
echo 1 > /sys/devices/system/cpu/cpu4/cpufreq/interactive/ignore_hispeed_on_notif
fi
# re-enable thermal and BCL hotplug
echo 1 > /sys/module/msm_thermal/core_control/enabled
# Enable input boost configuration
echo "0:1324800" > /sys/module/cpu_boost/parameters/input_boost_freq
echo 100 > /sys/module/cpu_boost/parameters/input_boost_ms
# Enable bus-dcvs
for cpubw in /sys/class/devfreq/*qcom,cpubw*
do
echo "bw_hwmon" > $cpubw/governor
echo 50 > $cpubw/polling_interval
echo 1525 > $cpubw/min_freq
echo "3143 5859 11863 13763" > $cpubw/bw_hwmon/mbps_zones
echo 4 > $cpubw/bw_hwmon/sample_ms
echo 34 > $cpubw/bw_hwmon/io_percent
echo 20 > $cpubw/bw_hwmon/hist_memory
echo 10 > $cpubw/bw_hwmon/hyst_length
echo 0 > $cpubw/bw_hwmon/low_power_ceil_mbps
echo 34 > $cpubw/bw_hwmon/low_power_io_percent
echo 20 > $cpubw/bw_hwmon/low_power_delay
echo 0 > $cpubw/bw_hwmon/guard_band_mbps
echo 250 > $cpubw/bw_hwmon/up_scale
echo 1600 > $cpubw/bw_hwmon/idle_mbps
done
for memlat in /sys/class/devfreq/*qcom,memlat-cpu*
do
echo "mem_latency" > $memlat/governor
echo 10 > $memlat/polling_interval
echo 400 > $memlat/mem_latency/ratio_ceil
done
echo "cpufreq" > /sys/class/devfreq/soc:qcom,mincpubw/governor
# Start Touch
start hbtp
echo N > /sys/module/lpm_levels/system/pwr/cpu0/ret/idle_enabled
echo N > /sys/module/lpm_levels/system/pwr/cpu1/ret/idle_enabled
echo N > /sys/module/lpm_levels/system/pwr/cpu2/ret/idle_enabled
echo N > /sys/module/lpm_levels/system/pwr/cpu3/ret/idle_enabled
echo N > /sys/module/lpm_levels/system/perf/cpu4/ret/idle_enabled
echo N > /sys/module/lpm_levels/system/perf/cpu5/ret/idle_enabled
echo N > /sys/module/lpm_levels/system/perf/cpu6/ret/idle_enabled
echo N > /sys/module/lpm_levels/system/perf/cpu7/ret/idle_enabled
echo N > /sys/module/lpm_levels/system/pwr/pwr-l2-dynret/idle_enabled
echo N > /sys/module/lpm_levels/system/pwr/pwr-l2-ret/idle_enabled
echo N > /sys/module/lpm_levels/system/perf/perf-l2-dynret/idle_enabled
echo N > /sys/module/lpm_levels/system/perf/perf-l2-ret/idle_enabled
echo N > /sys/module/lpm_levels/parameters/sleep_disabled
echo 0 > /dev/cpuset/background/cpus
echo 0-2 > /dev/cpuset/system-background/cpus
echo 0 > /proc/sys/kernel/sched_boost
chown -h system /sys/devices/system/cpu/cpufreq/ondemand/sampling_rate
chown -h system /sys/devices/system/cpu/cpufreq/ondemand/sampling_down_factor
chown -h system /sys/devices/system/cpu/cpufreq/ondemand/io_is_busy
setprop sys.post_boot.parsed 1
# Let kernel know our image version/variant/crm_version
if [ -f /sys/devices/soc0/select_image ]; then
image_version="10:"
image_version+=`getprop ro.build.id`
image_version+=":"
image_version+=`getprop ro.build.version.incremental`
image_variant=`getprop ro.product.name`
image_variant+="-"
image_variant+=`getprop ro.build.type`
oem_version=`getprop ro.build.version.codename`
echo 10 > /sys/devices/soc0/select_image
echo $image_version > /sys/devices/soc0/image_version
echo $image_variant > /sys/devices/soc0/image_variant
echo $oem_version > /sys/devices/soc0/image_crm_version
fi
# Change console log level as per console config property
console_config=`getprop persist.console.silent.config`
case "$console_config" in
"1")
echo "Enable console config to $console_config"
echo 0 > /proc/sys/kernel/printk
;;
*)
echo "Enable console config to $console_config"
;;
esac
|
<reponame>PhotogramProject/front-end
import {ToastrService} from '../../services/toastr/toastr.service';
import {AuthService} from '../../services/auth/auth.service';
import {DataService} from '../../services/data/data.service';
import {MapService} from '../../services/map/map.service';
import {JourneyService} from '../../services/journey/journey.service';
import {AdminService} from '../../services/admin/admin.service';
import {UtilityService} from '../../services/utility/utility.service';
import {CommentsService} from '../../services/comments/comments.service';
export const ProvidersExport = [
ToastrService,
AuthService,
DataService,
MapService,
JourneyService,
AdminService,
UtilityService,
CommentsService
];
|
public class ArmstrongNumbers {
public static boolean isArmstrongNumber(int num) {
int originalNum, remainder, result = 0;
originalNum = num;
while (originalNum != 0)
{
remainder = originalNum % 10;
result = result + (remainder * remainder * remainder);
originalNum = originalNum/10;
}
return num == result;
}
public static void main(String[] args) {
int num = 153;
if (isArmstrongNumber(num))
System.out.println(num + " is an Armstrong number");
else
System.out.println(num + " is not an Armstrong number");
}
}
|
package cucumber.runtime.groovy;
import cucumber.runtime.io.ResourceLoader;
import org.codehaus.groovy.runtime.MethodClosure;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
@RunWith(MockitoJUnitRunner.class)
public class GroovyBackendTest {
@Mock
ResourceLoader resourceLoader;
GroovyBackend backend;
@Before
public void setUp() throws Exception {
backend = new GroovyBackend(resourceLoader);
}
@Test
public void should_build_world_by_calling_the_closure() {
backend.registerWorld(new MethodClosure(this, "worldClosureCall"));
backend.buildWorld();
GroovyWorld groovyWorld = backend.getGroovyWorld();
assertEquals(1, groovyWorld.worldsCount());
}
@Test
public void should_build_world_object_even_if_closure_world_was_not_added() {
assertNull(backend.getGroovyWorld());
backend.buildWorld();
assertEquals(0, backend.getGroovyWorld().worldsCount());
}
@Test
public void should_clean_up_worlds_after_dispose() {
backend.registerWorld(new MethodClosure(this, "worldClosureCall"));
backend.buildWorld();
backend.disposeWorld();
assertNull(backend.getGroovyWorld());
}
@SuppressWarnings("UnusedDeclaration")
private AnotherCustomWorld worldClosureCall() {
return new AnotherCustomWorld();
}
}
|
<gh_stars>100-1000
// Copyright 2018 Red Hat.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"io"
"os"
"github.com/ulikunitz/xz"
)
// XZ2File does xz decompression from src file into dst file
func XZ2File(dst, src string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return err
}
defer out.Close()
reader, err := xz.NewReader(in)
if err != nil {
os.Remove(dst)
return err
}
_, err = io.Copy(out, reader)
if err != nil {
os.Remove(dst)
return err
}
return nil
}
|
;(() => {
defineColor()
let R = document.querySelector('#redRange')
let G = document.querySelector('#greenRange')
let B = document.querySelector('#blueRange')
R.addEventListener('input', defineColor)
G.addEventListener('input', defineColor)
B.addEventListener('input', defineColor)
})()
function defineColor() {
let body = document.querySelector('body')
let redUI = document.querySelector('#redUI')
let greenUI = document.querySelector('#greenUI')
let blueUI = document.querySelector('#blueUI')
let R = document.querySelector('#redRange')
let G = document.querySelector('#greenRange')
let B = document.querySelector('#blueRange')
body.style.backgroundColor = `rgb(${R.value},${G.value},${B.value})`
redUI.value = R.value
greenUI.value = G.value
blueUI.value = B.value
}
|
Select course_ID
from university.section
where (semester = 'Fall' and year = '2009')
union
select course_ID
from university.section
where (semester = 'Spring' and year = '2010');
|
#!/bin/bash
set -eux
cleanup () {
if [[ -n "${outdir:-}" ]] ; then
rm -rf $outdir
fi
}
trap cleanup EXIT
outdir=$(mktemp -d gearman.XXXXXXXX)
export TMPDIR=$outdir
outfile=$(mktemp -t gearman.ab.XXXXXXXX)
outfile2=$(mktemp -t gearman.zz.XXXXXXXX)
infile=$(mktemp -t gearman.ab.in.XXXXXXXX)
infile2=$(mktemp -t gearman.zz.in.XXXXXXXX)
cat > $infile <<EOF
a
c
b
EOF
cat > $infile2 <<EOF
z
z
1
EOF
timeout 11s gearman -u myunique -P -f foo < $infile > $outfile &
sleep 1
timeout 10s gearman -u myunique -P -f foo < $infile2 > $outfile2 &
# let them submit
sleep 1
timeout 5s gearman -w -f foo -c 1 sort
wait
wait
ref=$(mktemp -t gearman.ref.XXXXXXXX)
cat > $ref <<EOF
foo: a
b
c
EOF
if ! cmp $ref $outfile ; then
diff $ref $outfile
echo FAIL!
exit 1
elif ! cmp $ref $outfile2 ; then
diff $ref $outfile2
echo FAIL! outfile2
exit 1
else
cleanup
trap - EXIT
echo OK!
fi
|
<filename>TrafficFlowClassification/data/dataLoader.py
'''
@Author: <NAME>
@Date: 2021-01-07 11:06:49
@Description: 用来加载 Pytorch 训练所需要的数据
@LastEditTime: 2021-02-06 19:53:20
'''
import torch
import numpy as np
from TrafficFlowClassification.TrafficLog.setLog import logger
def data_loader(pcap_file, statistic_file, label_file, trimed_file_len, batch_size=256, workers=1, pin_memory=True):
"""读取处理好的 npy 文件, 并返回 pytorch 训练使用的 dataloader 数据
Args:
pcap_file (str): pcap 文件转换得到的 npy 文件的路径
statistic_file (str): 统计特征对应的 npy 文件路径
label_file (str): 上面的 pcap 文件对应的 label 文件的 npy 文件的路径
trimed_file_len (int): pcap 被裁剪成的长度
batch_size (int, optional): 默认一个 batch 有多少数据. Defaults to 256.
workers (int, optional): 处理数据的进程的数量. Defaults to 1.
pin_memory (bool, optional): 锁页内存, 如果内存较多, 可以设置为 True, 可以加快 GPU 的使用. Defaults to True.
Returns:
DataLoader: pytorch 训练所需要的数据
"""
# 载入 npy 数据
pcap_data = np.load(pcap_file) # 获得 pcap 文件
statistic_data = np.load(statistic_file) # 获得 统计特征
label_data = np.load(label_file) # 获得 label 数据
# 将 npy 数据转换为 tensor 数据
pcap_data = torch.from_numpy(pcap_data.reshape(-1, 1, trimed_file_len)).float()
statistic_data = torch.from_numpy(statistic_data).float()
label_data = torch.from_numpy(label_data).long()
logger.info('pcap 文件大小, {}; statistic 文件大小, {}; label 文件大小: {}'.format(pcap_data.shape, statistic_data.shape, label_data.shape))
# 将 tensor 数据转换为 Dataset->Dataloader
res_dataset = torch.utils.data.TensorDataset(pcap_data, statistic_data, label_data) # 合并数据
res_dataloader = torch.utils.data.DataLoader(
dataset=res_dataset,
batch_size=batch_size,
shuffle=True,
pin_memory=pin_memory,
num_workers=1 # set multi-work num read data
)
return res_dataloader
|
package me.eirinimitsopoulou.bakingapp.Utils;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Scanner;
/**
* Created by eirinimitsopoulou on 30/05/2018.
*/
public class NetworkUtils {
public String sendHTTPRequest(String url) {
HttpURLConnection urlConnection = null;
try {
urlConnection = (HttpURLConnection) new URL(url).openConnection();
InputStream in = new BufferedInputStream(urlConnection.getInputStream());
Scanner scanner = new Scanner(in).useDelimiter("\\A");
if (!scanner.hasNext())
return "";
return scanner.next();
} catch (MalformedURLException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
assert urlConnection != null;
urlConnection.disconnect();
}
return "";
}
}
|
<filename>src/lib/tools.h<gh_stars>1-10
/*
* tools.h
*
* Created on: Dec 15, 2017
* by: <NAME>
*
* This file implements some utility functions
*/
#ifndef SRC_LIB_TOOLS_H_
#define SRC_LIB_TOOLS_H_
#include <sys/time.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <string.h>
#include <errno.h>
#include <stdbool.h>
#include <ctype.h>
#include "optimization.h"
#include "log.h"
#define MIN( a, b ) (a>b? b : a )
#define MAX( a, b ) (a<b? b : a )
#define PERCENTAGE( a, tot ) ((tot) == 0? 0 : (a)*100.0/(tot) )
/**
* Swap values of two variables x and y typing T
*/
#define SWAP(x, y, T) do { T tmp = x; x = y; y = tmp; } while (0)
/**
* Number of microseconds per second
*/
#define MICRO_PER_SEC 1000000
/**
* Convert a string to an integer.
* If the result number is outside of [#low, #high] then it will be assigned to #def
* @param string
* @param low
* @param high
* @param def default value that will be returned when the detected number is outside of [#low, #high] range
* @return
*/
static ALWAYS_INLINE int mmt_atoi( const char*string, int low, int high, int def ){
int ret = atoi( string );
if( ret > high || ret < low ){
ret = def;
}
return ret;
}
/**
* Get interval, in micro seconds, between two timeval moments
* @param end
* @param start
*/
static ALWAYS_INLINE long u_second_diff( const struct timeval *end, const struct timeval *start ){
return ( end->tv_sec - start->tv_sec ) * MICRO_PER_SEC + ( end->tv_usec - start->tv_usec );
}
/**
* Whether end occurs after start
* @param end
* @param start
*/
static ALWAYS_INLINE bool is_after( const struct timeval *start, const struct timeval *end ){
return ( end->tv_sec > start->tv_sec )
|| (
( end->tv_sec == start->tv_sec ) && ( end->tv_usec > start->tv_usec )
);
}
static ALWAYS_INLINE bool is_zero_timestamp( const struct timeval *ts ){
return (ts->tv_sec == 0 && ts->tv_usec == 0);
}
/**
* Get number of micro seconds of a timeval
* @param ts
* @return
*/
static ALWAYS_INLINE size_t u_second( const struct timeval *ts ){
return ts->tv_sec * MICRO_PER_SEC + ts->tv_usec;
}
/**
* Get milli seconds of a timeval
* @param ts
* @return
*/
static ALWAYS_INLINE size_t m_second( const struct timeval *ts ){
return (ts->tv_sec << 10) + (ts->tv_usec >> 10);
}
/**
* Check whether a string has a prefix
* @param string
* @param prefix
* @param prefix_len
* @return
*/
static ALWAYS_INLINE bool is_started_by( const char *string, const char *prefix, size_t prefix_len ){
int i;
for( i=0; i<prefix_len; i++ )
if( string[i] != prefix[i] )
return false;
return true;
}
#define IS_EQUAL_STRINGS( s1, s2 ) (strcmp(s1, s2) == 0)
/**
* Append data to a file if the file is existing, otherwise, create a new file then write data to it.
* @param file_path
* @param content
* @param len
* @return
*/
static inline ssize_t append_data_to_file(const char *file_path, const void *content, size_t len) {
int fd;
fd = open( file_path, O_CREAT | O_WRONLY | O_APPEND | O_NOFOLLOW, S_IRUSR | S_IWUSR );
if ( fd < 0 ) {
log_write( LOG_ERR, "Error %d while writing data to \"%s\": %s", errno, file_path, strerror( errno ) );
return -1;
}
ssize_t ret = write( fd, content, len );
close ( fd );
return ret;
}
/**
* Create a file and write data to it if the file does not exist, otherwise override the current content of the file
* @param file_path
* @param content
* @param len
* @return
*/
static inline ssize_t write_data_to_file(const char *file_path, const void *content, size_t len) {
int fd;
fd = open( file_path, O_CREAT | O_WRONLY | O_NOFOLLOW, S_IRUSR | S_IWUSR );
if ( fd < 0 ) {
log_write( LOG_ERR, "Error %d while writing data to \"%s\": %s", errno, file_path, strerror( errno ) );
return -1;
}
ssize_t ret = write( fd, content, len );
close ( fd );
return ret;
}
/**
* Split a string into small string segments.
* @param buffer
* @param separator
* @param argv is an array of string pointers to contain result items
* @param argv_length maximal string items argv can contain
* @return number of result items
*/
static inline size_t string_split(
char *buffer, ///< In/Out : Modifiable String Buffer To Tokenise
const char *separator, ///< In : Separator
char *argv[], ///< Out : Argument String Vector Array
size_t argv_length ///< In : Maximum Count For `*argv[]`
)
{ /* Tokenise string buffer into argc and argv format (req: string.h) */
int i = 0;
for( ; i < argv_length ; i++){ /* Fill argv via strtok_r() */
if ( NULL == (argv[i] = strtok_r( NULL, separator, &buffer)) )
break;
}
return i; // Argument Count
}
/**
* Replace unreadable characters and slash by underscore character
* @param file_name
* @param size is size of file_name, set to zero to reach until NULL character
* @return length of file_name
*/
static inline int string_format_file_name( char *file_name, size_t size ){
int i;
if( size == 0 )
size = INT16_MAX;
for( i=0; i<size && file_name[i] != '\0'; i++ )
if( isalnum( file_name[i] ) )
continue;
else{
switch( file_name[i] ){
case '/':
case '\\':
file_name[i] = '=';
continue;
case '=':
case ';':
case '?':
case '*':
case '&':
case ':':
case '|':
case '"':
case '%':
case '>':
case '<':
case ' ':
file_name[i] = '_';
continue;
default:
continue;
}
}
return i;
}
/* Function to check if x is power of 2*/
static inline bool is_power_of_two (size_t x){
/* First x in the below expression is for the case when x is 0 */
return x && (!(x&(x-1)));
}
#endif /* SRC_LIB_TOOLS_H_ */
|
import { getRootElement, settled } from '@ember/test-helpers';
import { assert } from '@ember/debug';
import MediumEditor from 'medium-editor';
const EMBER_MEDIUM_EDITOR_SELECTOR = '.ember-medium-editor__container';
function nextTickPromise() {
return new Promise((resolve) => {
setTimeout(resolve);
});
}
export function getMediumEditor(target) {
if (!target) {
target = getRootElement().querySelector(EMBER_MEDIUM_EDITOR_SELECTOR);
}
assert('MediumEditor element not found.', target);
let editor = MediumEditor.getEditorFromElement(target);
assert('MediumEditor instance not found.', editor);
return editor;
}
export function getMediumEditorExtension(name, target) {
assert(`You should provide extension name to getMediumEditorExtension('${name}').`, name);
let editor = getMediumEditor(target);
return editor.getExtensionByName(name);
}
export function fillInMediumEditor(text, target) {
return nextTickPromise().then(() => {
let editor = getMediumEditor(target);
editor.setContent(text);
return settled();
});
}
export function triggerMediumEditorEvent(event, options = { data: undefined, editable: undefined }, target) {
return nextTickPromise().then(() => {
let editor = getMediumEditor(target);
editor.trigger(event, options.data, options.editable);
return settled();
});
}
|
<reponame>NCC-AI/ncc
from .image import random_colors, apply_mask
from .palette import palettes
|
import json
import os
import re
import socket
import sys
from codecs import encode, decode
from . import shared
dir_path = os.path.dirname(os.path.realpath(__file__))
tlds = None
dble_ext_str = "chirurgiens-dentistes.fr,in-addr.arpa,uk.net,za.org,mod.uk,org.za,za.com,de.com,us.com,hk.org,co.ca," \
"avocat.fr,com.uy,gr.com,e164.arpa,hu.net,us.org,com.se,aeroport.fr,gov.uk,ru.com,alt.za,africa.com," \
"geometre-expert.fr,in.net,co.com,kr.com,bl.uk,uk.com,port.fr,police.uk,gov.za,eu.com,eu.org,br.com," \
"web.za,net.za,co.za,hk.com,ae.org,edu.ru,ar.com,jet.uk,icnet.uk,com.de,inc.hk,ltd.hk,parliament.uk," \
"jp.net,gb.com,veterinaire.fr,edu.cn,qc.com,pharmacien.fr,ac.za,sa.com,medecin.fr,uy.com,se.net,co.pl," \
"cn.com,hu.com,no.com,ac.uk,jpn.com,priv.at,za.net,nls.uk,nhs.uk,za.bz,experts-comptables.fr," \
"chambagri.fr,gb.net,in.ua,notaires.fr,se.com,british-library.uk "
dble_ext = dble_ext_str.split(",")
with open(os.path.join(dir_path, "tld.json")) as fp:
tlds = json.load(fp)
def get_whois_raw(domain, server="", previous=None, rfc3490=True, never_cut=False, with_server_list=False,
server_list=None):
previous = previous or []
server_list = server_list or []
# Sometimes IANA simply won't give us the right root WHOIS server
exceptions = {
".ac.uk": "whois.ja.net",
".ps": "whois.pnina.ps",
".buzz": "whois.nic.buzz",
".moe": "whois.nic.moe",
# The following is a bit hacky, but IANA won't return the right answer for example.com because it's a direct
# registration.
"example.com": "whois.verisign-grs.com"
}
if rfc3490:
if sys.version_info < (3, 0):
domain = encode(domain if type(domain) is unicode else decode(domain, "utf8"), "idna")
else:
domain = encode(domain, "idna").decode("ascii")
if len(previous) == 0 and server == "":
# Root query
is_exception = False
for exception, exc_serv in exceptions.items():
if domain.endswith(exception):
is_exception = True
target_server = exc_serv
break
if not is_exception:
target_server = get_root_server(domain)
else:
target_server = server
if target_server == "whois.jprs.jp":
request_domain = "%s/e" % domain # Suppress Japanese output
elif domain.endswith(".de") and (target_server == "whois.denic.de" or target_server == "de.whois-servers.net"):
request_domain = "-T dn,ace %s" % domain # regional specific stuff
elif target_server == "whois.verisign-grs.com":
request_domain = "=%s" % domain # Avoid partial matches
else:
request_domain = domain
response = whois_request(request_domain, target_server)
if never_cut:
# If the caller has requested to 'never cut' responses, he will get the original response from the server (
# this is useful for callers that are only interested in the raw data). Otherwise, if the target is
# verisign-grs, we will select the data relevant to the requested domain, and discard the rest, so that in a
# multiple-option response the parsing code will only touch the information relevant to the requested domain.
# The side-effect of this is that when `never_cut` is set to False, any verisign-grs responses in the raw data
# will be missing header, footer, and alternative domain options (this is handled a few lines below,
# after the verisign-grs processing).
new_list = [response] + previous
if target_server == "whois.verisign-grs.com":
# VeriSign is a little... special. As it may return multiple full records and there's no way to do an exact query,
# we need to actually find the correct record in the list.
for record in response.split("\n\n"):
if re.search("Domain Name: %s\n" % domain.upper(), record):
response = record
break
if never_cut == False:
new_list = [response] + previous
server_list.append(target_server)
for line in [x.strip() for x in response.splitlines()]:
match = re.match("(refer|whois server|referral url|registrar whois(?: server)?):\s*([^\s]+\.[^\s]+)", line,
re.IGNORECASE)
if match is not None:
referal_server = match.group(2)
if referal_server != server and "://" not in referal_server: # We want to ignore anything non-WHOIS (eg. HTTP) for now.
# Referal to another WHOIS server...
return get_whois_raw(domain, referal_server, new_list, server_list=server_list,
with_server_list=with_server_list)
if with_server_list:
return new_list, server_list
else:
return new_list
def get_root_server(domain):
ext = domain.split(".")[-1]
for dble in dble_ext:
if domain.endswith(dble):
ext = dble
if ext in tlds.keys():
entry = tlds[ext]
return entry["host"]
else:
raise shared.WhoisException("No root WHOIS server found for domain.")
def whois_request(domain, server, port=43):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((server, port))
sock.send(("%s\r\n" % domain).encode("utf-8"))
buff = b""
while True:
data = sock.recv(1024)
if len(data) == 0:
break
buff += data
sock.close()
try:
d = buff.decode("utf-8")
except UnicodeDecodeError:
d = buff.decode("latin-1")
return d
if __name__ == "__main__":
d = get_whois_raw("orange.cm")
print(d)
|
#!/usr/bin/env bash
set -e
set -o pipefail
SCRIPT_DIR=$(dirname $0)
TAGS="1.1.0-pg11 1.1.1-pg11 1.2.0-pg11 1.2.1-pg11 1.2.2-pg11"
TEST_VERSION="v2"
TAGS=$TAGS TEST_VERSION=$TEST_VERSION bash ${SCRIPT_DIR}/test_updates.sh
EXIT_CODE=$?
if [ $EXIT_CODE -ne 0 ]; then
exit $EXIT_CODE
fi
TAGS="1.3.0-pg11 1.3.1-pg11 1.3.2-pg11 1.4.0-pg11 1.4.1-pg11 1.4.2-pg11"
TEST_VERSION="v4"
TAGS=$TAGS TEST_VERSION=$TEST_VERSION bash ${SCRIPT_DIR}/test_updates.sh
EXIT_CODE=$?
if [ $EXIT_CODE -ne 0 ]; then
exit $EXIT_CODE
fi
TAGS="1.5.0-pg11"
TEST_VERSION="v5-pg11"
TAGS=$TAGS TEST_VERSION=$TEST_VERSION bash ${SCRIPT_DIR}/test_updates.sh
EXIT_CODE=$?
if [ $EXIT_CODE -ne 0 ]; then
exit $EXIT_CODE
fi
|
# Importing Libraries
import numpy as np
import pandas as pd
# Importing dataset
fruits_dataset = pd.read_csv("fruits.csv")
X = fruits_dataset.iloc[:, :10].values
y = fruits_dataset.iloc[:, 10].values
# Splitting the dataset into training and testing set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
scale = StandardScaler()
X_train = scale.fit_transform(X_train)
X_test = scale.transform(X_test)
# Fitting using the Naive Bayes algorithm
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(X_train, y_train)
# Predicting the test set results
y_pred = classifier.predict(X_test)
# Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
|
# Copyright 2017, AppDynamics LLC and its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
testCreateUpdateDeleteCnameRecord(){
local DEBUG_FLAG
local ZONE_NAME=$(_random_alphanumeric_chars 3).$(_random_alphanumeric_chars 3).tld.
local PRIMARY_MASTER=primary.master.$ZONE_NAME
local RECORD_NAME=$(_random_alphanumeric_chars 11)
local CNAME_TARGET=$(_random_alphanumeric_chars 11).$(_random_alphanumeric_chars 3).$(_random_alphanumeric_chars 3).tld.
local TTL=85399
local SCRIPT_STDERR="$(mktemp)"
if [ "$ENABLE_DEBUG" == "true" ]; then
DEBUG_FLAG=-d
fi
# attempt to create cname record
create_update-pdns-cname-record.sh $DEBUG_FLAG -C "$PDNS_TEST_DATA_ROOT/conf/pdns.conf" -t $TTL\
$RECORD_NAME.$ZONE_NAME $CNAME_TARGET 2>"$SCRIPT_STDERR"
# assert that the creation attempt failed because the zone didn't exist
assertEquals "Error: Zone '$ZONE_NAME' does not exist." "$(head -1 "$SCRIPT_STDERR")"
rm -f "$SCRIPT_STDERR"
# create zone
create-pdns-zone.sh $DEBUG_FLAG -C "$PDNS_TEST_DATA_ROOT/conf/pdns.conf" $ZONE_NAME $PRIMARY_MASTER
# create cname
# attempt to create cname record
create_update-pdns-cname-record.sh $DEBUG_FLAG -C "$PDNS_TEST_DATA_ROOT/conf/pdns.conf" -t $TTL\
$RECORD_NAME.$ZONE_NAME $CNAME_TARGET
_wait_for_cache_expiry
# assert that the CNAME record was created with all script parameters present
local DIG_TTL
local DIG_CNAME_TARGET
eval $($TEST_DIG $RECORD_NAME.$ZONE_NAME CNAME | awk '
/[\t\s]CNAME[\t\s]/{
if($1 == "'"$RECORD_NAME.$ZONE_NAME"'"){
print "DIG_TTL="$2;
print "DIG_CNAME_TARGET="$5
}
}
')
assertEquals "CNAME record ttl mismatch" "$TTL" "$DIG_TTL"
assertEquals "CNAME record target mismatch" "$CNAME_TARGET" "$DIG_CNAME_TARGET"
# update cname
CNAME_TARGET=$(_random_alphanumeric_chars 11).$(_random_alphanumeric_chars 3).$(_random_alphanumeric_chars 3).tld.
TTL=86401
create_update-pdns-cname-record.sh $DEBUG_FLAG -C "$PDNS_TEST_DATA_ROOT/conf/pdns.conf" -t $TTL\
$RECORD_NAME.$ZONE_NAME $CNAME_TARGET
_wait_for_cache_expiry
# assert that it updated
DIG_TTL=
DIG_CNAME_TARGET=
eval $($TEST_DIG $RECORD_NAME.$ZONE_NAME CNAME | awk '
/[\t\s]CNAME[\t\s]/{
if($1 == "'"$RECORD_NAME.$ZONE_NAME"'"){
print "DIG_TTL="$2;
print "DIG_CNAME_TARGET="$5
}
}
')
assertEquals "CNAME record ttl mismatch after update" "$TTL" "$DIG_TTL"
assertEquals "CNAME record target mismatch after update" "$CNAME_TARGET" "$DIG_CNAME_TARGET"
# delete cname
delete-pdns-cname-record.sh $DEBUG_FLAG -C "$PDNS_TEST_DATA_ROOT/conf/pdns.conf" $RECORD_NAME.$ZONE_NAME
_wait_for_cache_expiry
# assert that PTR record is gone
DIG_CNAME_TARGET=
eval $($TEST_DIG $RECORD_NAME.$ZONE_NAME CNAME | awk '
/[\t\s]CNAME[\t\s]/{
if($1 == "'"$RECORD_NAME.$ZONE_NAME"'"){
print "DIG_CNAME_TARGET="$5
}
}
')
assertEquals "CNAME record still present after delete" "" "$DIG_CNAME_TARGET"
# delete zone
delete-pdns-zone.sh $DEBUG_FLAG -C "$PDNS_TEST_DATA_ROOT/conf/pdns.conf" $ZONE_NAME
}
# test for '@' record creation and deletion
testCreateDeleteAtCnameRecord(){
local DEBUG_FLAG
local ZONE_SUFFIX=$(_random_alphanumeric_chars 3).tld.
local ZONE_NAME=$(_random_alphanumeric_chars 3).$ZONE_SUFFIX
local PRIMARY_MASTER=primary.master.$ZONE_NAME
local RECORD_NAME=$ZONE_NAME
local CNAME_TARGET=$(_random_alphanumeric_chars 11).$(_random_alphanumeric_chars 3).$(_random_alphanumeric_chars 3).tld.
local TTL=85399
local SCRIPT_STDERR="$(mktemp)"
if [ "$ENABLE_DEBUG" == "true" ]; then
DEBUG_FLAG=-d
fi
# attempt to create cname record
create_update-pdns-cname-record.sh $DEBUG_FLAG -C "$PDNS_TEST_DATA_ROOT/conf/pdns.conf" -t $TTL\
$RECORD_NAME $CNAME_TARGET 2>"$SCRIPT_STDERR"
# assert that the creation attempt failed because the zone didn't exist
assertEquals "Error: Zone '$ZONE_SUFFIX' does not exist." "$(head -1 "$SCRIPT_STDERR")"
rm -f "$SCRIPT_STDERR"
# create zone
create-pdns-zone.sh $DEBUG_FLAG -C "$PDNS_TEST_DATA_ROOT/conf/pdns.conf" $ZONE_NAME $PRIMARY_MASTER
# create cname
# attempt to create cname record
create_update-pdns-cname-record.sh $DEBUG_FLAG -C "$PDNS_TEST_DATA_ROOT/conf/pdns.conf" -t $TTL\
$RECORD_NAME $CNAME_TARGET
_wait_for_cache_expiry
# assert that the CNAME record was created with all script parameters present
local DIG_TTL
local DIG_CNAME_TARGET
eval $($TEST_DIG $RECORD_NAME CNAME | awk '
/[\t\s]CNAME[\t\s]/{
if($1 == "'"$RECORD_NAME"'"){
print "DIG_TTL="$2;
print "DIG_CNAME_TARGET="$5
}
}
')
assertEquals "CNAME record ttl mismatch" "$TTL" "$DIG_TTL"
assertEquals "CNAME record target mismatch" "$CNAME_TARGET" "$DIG_CNAME_TARGET"
# delete cname
delete-pdns-cname-record.sh $DEBUG_FLAG -C "$PDNS_TEST_DATA_ROOT/conf/pdns.conf" $RECORD_NAME
_wait_for_cache_expiry
# assert that PTR record is gone
DIG_CNAME_TARGET=
eval $($TEST_DIG $RECORD_NAME CNAME | awk '
/[\t\s]CNAME[\t\s]/{
if($1 == "'"$RECORD_NAME"'"){
print "DIG_CNAME_TARGET="$5
}
}
')
assertEquals "CNAME record still present after delete" "" "$DIG_CNAME_TARGET"
# delete zone
delete-pdns-zone.sh $DEBUG_FLAG -C "$PDNS_TEST_DATA_ROOT/conf/pdns.conf" $ZONE_NAME
}
testCreateDeleteCnameRecordWithDefaults(){
local DEBUG_FLAG
local ZONE_NAME=$(_random_alphanumeric_chars 3).$(_random_alphanumeric_chars 3).tld.
local PRIMARY_MASTER=primary.master.$ZONE_NAME
local RECORD_NAME=$(_random_alphanumeric_chars 11)
local CNAME_TARGET=$(_random_alphanumeric_chars 11).$(_random_alphanumeric_chars 3).$(_random_alphanumeric_chars 3).tld.
local TTL=86400
local SCRIPT_STDERR="$(mktemp)"
if [ "$ENABLE_DEBUG" == "true" ]; then
DEBUG_FLAG=-d
fi
# create zone
create-pdns-zone.sh $DEBUG_FLAG -C "$PDNS_TEST_DATA_ROOT/conf/pdns.conf" $ZONE_NAME $PRIMARY_MASTER
# create cname
# attempt to create cname record
create_update-pdns-cname-record.sh $DEBUG_FLAG -C "$PDNS_TEST_DATA_ROOT/conf/pdns.conf" $RECORD_NAME.$ZONE_NAME \
$CNAME_TARGET
_wait_for_cache_expiry
_wait_for_cache_expiry
# assert that the CNAME record was created with expected defaults
local DIG_TTL
local DIG_CNAME_TARGET
eval $($TEST_DIG $RECORD_NAME.$ZONE_NAME CNAME | awk '
/[\t\s]CNAME[\t\s]/{
if($1 == "'"$RECORD_NAME.$ZONE_NAME"'"){
print "DIG_TTL="$2;
print "DIG_CNAME_TARGET="$5
}
}
')
assertEquals "CNAME record ttl mismatch" "$TTL" "$DIG_TTL"
assertEquals "CNAME record target mismatch" "$CNAME_TARGET" "$DIG_CNAME_TARGET"
# delete zone
delete-pdns-zone.sh $DEBUG_FLAG -C "$PDNS_TEST_DATA_ROOT/conf/pdns.conf" $ZONE_NAME
}#!/bin/bash
# Copyright 2017, AppDynamics LLC and its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if ! [ "$DEBUG_SHELL" == true ]; then
DEBUG_SHELL=false
fi
declare PDNS_TEST_DATA_ROOT PDNS_PID PDNS_STDERR PDNS_STDOUT
PDNS_TEST_DNS_PORT=5354
PDNS_TEST_HTTP_PORT=8011
CACHE_TTL=1
SLEEP_TIME=$((CACHE_TTL+1))
# Alias dig with recurring options
# include +tcp so that dig fails fast if DNS server is down
TEST_DIG="dig @localhost +noquestion +nocomments +nocmd +nostats +tcp -p $PDNS_TEST_DNS_PORT"
_test_cleanup(){
if $DEBUG_SHELL; then
>&2 echo "Dropping to a shell for debugging purposes. Exit to complete cleanup."
pushd $PDNS_TEST_DATA_ROOT
/bin/bash
popd
fi
if [ -n "$PDNS_PID" ]; then
if kill -TERM $PDNS_PID >/dev/null 2>&1; then
>&2 echo "Terminated pdns_server pid $PDNS_PID"
else
>&2 echo "pdns_server pid $PDNS_PID died prematurely. STDERR below..."
>&2 cat "$PDNS_STDERR"
fi
fi
>&2 echo "Deleting $PDNS_TEST_DATA_ROOT"
rm -rf "$PDNS_TEST_DATA_ROOT"
# since we displace _shunit_cleanup() with 'trap _test_cleanup EXIT', call it after test-specific cleanup is
# complete
_shunit_cleanup EXIT
}
# $1: number of random alphanumeric characters to output
_random_alphanumeric_chars(){
if [[ "$1" =~ ^[0-9]+$ ]]; then
cat /dev/urandom | LC_CTYPE=C tr -dc 'a-z0-9' | head -c $1
else
return 1
fi
}
# Echoes random number between 0 and 254
_random_ipv4_octet(){
echo $((RANDOM % 255 ))
}
_wait_for_cache_expiry(){
local
if [ "$ENABLE_DEBUG" == "true" ]; then
>&2 echo "Waiting $SLEEP_TIME seconds for PDNS cache to expire"
fi
sleep $SLEEP_TIME
}
oneTimeSetUp(){
PDNS_TEST_DATA_ROOT="$(mktemp -d)"
PDNS_CONF_DIR="$PDNS_TEST_DATA_ROOT/conf"
PDNS_SQLITE_DIR="$PDNS_TEST_DATA_ROOT/var"
PDNS_STDOUT="$PDNS_TEST_DATA_ROOT/pdns.out"
PDNS_STDERR="$PDNS_TEST_DATA_ROOT/pdns.err"
trap _test_cleanup EXIT
# generate temporary pdns config / sqlite database
/init-pdns-sqlite3-db-and-config.sh -n -C "$PDNS_CONF_DIR" -D "$PDNS_SQLITE_DIR"\
-p $PDNS_TEST_DNS_PORT -P $CACHE_TTL -q $CACHE_TTL -H $PDNS_TEST_HTTP_PORT -s "$PDNS_TEST_DATA_ROOT"
# start pdns_server, redirect stdout, stderr to files in $PDNS_TEST_DATA_ROOT and background
>&2 echo "Starting test pdns_server from $PDNS_TEST_DATA_ROOT"
pdns_server --config-dir="$PDNS_CONF_DIR" > "$PDNS_STDOUT" 2> "$PDNS_STDERR" &
# save PID
PDNS_PID=$!
if ! ps -p $PDNS_PID; then
>&2 echo "pdns_server failed to start."
>&2 cat "$PDNS_STDERR"
exit 1
fi
}
ENABLE_DEBUG=true
#!/bin/bash
# Copyright 2017, AppDynamics LLC and its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if ! [ "$DEBUG_SHELL" == true ]; then
DEBUG_SHELL=false
fi
declare PDNS_TEST_DATA_ROOT PDNS_PID PDNS_STDERR PDNS_STDOUT
PDNS_TEST_DNS_PORT=5354
PDNS_TEST_HTTP_PORT=8011
CACHE_TTL=1
SLEEP_TIME=$((CACHE_TTL+1))
# Alias dig with recurring options
# include +tcp so that dig fails fast if DNS server is down
TEST_DIG="dig @localhost +noquestion +nocomments +nocmd +nostats +tcp -p $PDNS_TEST_DNS_PORT"
_test_cleanup(){
if $DEBUG_SHELL; then
>&2 echo "Dropping to a shell for debugging purposes. Exit to complete cleanup."
pushd $PDNS_TEST_DATA_ROOT
/bin/bash
popd
fi
if [ -n "$PDNS_PID" ]; then
if kill -TERM $PDNS_PID >/dev/null 2>&1; then
>&2 echo "Terminated pdns_server pid $PDNS_PID"
else
>&2 echo "pdns_server pid $PDNS_PID died prematurely. STDERR below..."
>&2 cat "$PDNS_STDERR"
fi
fi
>&2 echo "Deleting $PDNS_TEST_DATA_ROOT"
rm -rf "$PDNS_TEST_DATA_ROOT"
# since we displace _shunit_cleanup() with 'trap _test_cleanup EXIT', call it after test-specific cleanup is
# complete
_shunit_cleanup EXIT
}
# $1: number of random alphanumeric characters to output
_random_alphanumeric_chars(){
if [[ "$1" =~ ^[0-9]+$ ]]; then
cat /dev/urandom | LC_CTYPE=C tr -dc 'a-z0-9' | head -c $1
else
return 1
fi
}
# Echoes random number between 0 and 254
_random_ipv4_octet(){
echo $((RANDOM % 255 ))
}
_wait_for_cache_expiry(){
local
if [ "$ENABLE_DEBUG" == "true" ]; then
>&2 echo "Waiting $SLEEP_TIME seconds for PDNS cache to expire"
fi
sleep $SLEEP_TIME
}
oneTimeSetUp(){
PDNS_TEST_DATA_ROOT="$(mktemp -d)"
PDNS_CONF_DIR="$PDNS_TEST_DATA_ROOT/conf"
PDNS_SQLITE_DIR="$PDNS_TEST_DATA_ROOT/var"
PDNS_STDOUT="$PDNS_TEST_DATA_ROOT/pdns.out"
PDNS_STDERR="$PDNS_TEST_DATA_ROOT/pdns.err"
trap _test_cleanup EXIT
# generate temporary pdns config / sqlite database
/init-pdns-sqlite3-db-and-config.sh -n -C "$PDNS_CONF_DIR" -D "$PDNS_SQLITE_DIR"\
-p $PDNS_TEST_DNS_PORT -P $CACHE_TTL -q $CACHE_TTL -H $PDNS_TEST_HTTP_PORT -s "$PDNS_TEST_DATA_ROOT"
# start pdns_server, redirect stdout, stderr to files in $PDNS_TEST_DATA_ROOT and background
>&2 echo "Starting test pdns_server from $PDNS_TEST_DATA_ROOT"
pdns_server --config-dir="$PDNS_CONF_DIR" > "$PDNS_STDOUT" 2> "$PDNS_STDERR" &
# save PID
PDNS_PID=$!
if ! ps -p $PDNS_PID; then
>&2 echo "pdns_server failed to start."
>&2 cat "$PDNS_STDERR"
exit 1
fi
}
source /shunit2
|
// Criar um objeto Postagem de blog que vai contem as seguintes propriedades
// postagem
/*
titulo
mensagem
autor
vizualisações
comentarios
(autor, mensagem)
estaAoVivo
*/
let postagem = {
titulo: 'a',
mensagem: 'b',
autor: 'c',
vizualisacoes: 10,
comentarios: [
{ autor: 'a', mensagem: 'b'},
{ autor: 'a', mensagem: 'b'},
{ autor: 'a', mensagem: 'b'}
],
estaAoVivo : true
}
console.log(postagem);
|
<filename>open-sphere-base/mantle/src/main/java/io/opensphere/mantle/mp/event/impl/RootMapAnnotationPointGroupRemovedEvent.java<gh_stars>10-100
package io.opensphere.mantle.mp.event.impl;
import io.opensphere.mantle.mp.AbstractMapAnnotationPointGroupChangeEvent;
import io.opensphere.mantle.mp.MutableMapAnnotationPointGroup;
import io.opensphere.mantle.mp.event.AbstractRootMapAnnotationPointRegistryEvent;
/**
* Signals that a root data group has been removed from the controller.
*/
public class RootMapAnnotationPointGroupRemovedEvent extends AbstractRootMapAnnotationPointRegistryEvent
{
/**
* Instantiates a new RootMapAnnotationPointGroupRemovedEvent.
*
* @param removedRoot the root group that has been removed
* @param originEvent the event that caused this event to be generated ( or
* null if none)
* @param source - the source of the event ( object that caused the event to
* be generated )
*/
public RootMapAnnotationPointGroupRemovedEvent(MutableMapAnnotationPointGroup removedRoot,
AbstractMapAnnotationPointGroupChangeEvent originEvent, Object source)
{
super(removedRoot, originEvent, source);
}
@Override
public String getDescription()
{
return "RootMapAnnotationPointGroupRemovedEvent";
}
}
|
SELECT Name, SUM(Sales) AS TotalSales
FROM Products
WHERE Year = 2018
GROUP BY Name
ORDER BY TotalSales DESC
LIMIT 10;
|
print("========== Example 1 ==========")
team_members = ["Carlos", "Antonio", "Daniel", "Dominika", "Michael"]
print(team_members)
team_members.append(input("Enter new member: "))
print(team_members)
for state in team_members:
print("Team member: " + state)
print("========== Example 2 ==========")
even_numbers = [2, 4, 6, 8]
odd_numbers = [1, 3, 5, 7, 9]
print(even_numbers + odd_numbers)
print(sorted(even_numbers + odd_numbers)) # Arrange the numbers is order
all_numbers = even_numbers + odd_numbers
all_numbers.sort() # Arrange the numbers is order
print(all_numbers)
print("========== Example 3 ==========")
list_1 = []
list_2 = list()
print("List 1: {}".format(list_1))
print("List 2: {}".format(list_2))
if list_1 == list_2:
print("The list are equal")
print(list("The list are equal"))
print("========== Example 4 ==========")
# add to the program below so that if it finds a meal without spam
# it prints out each of the ingredients of the meal.
# You will need to set up the menu as we did before
menu = []
menu.append(["egg", "spam", "bacon"])
menu.append(["egg", "sausage", "bacon"])
menu.append(["egg", "spam"])
menu.append(["egg", "bacon", "spam"])
menu.append(["egg", "bacon", "sausage", "spam"])
menu.append(["spam", "bacon", "sausage", "spam"])
menu.append(["spam", "egg", "spam", "spam", "bacon", "spam"])
menu.append(["spam", "egg", "sausage", "spam"])
print(menu)
for meal in menu:
if "spam" not in meal:
print(meal)
for ingredient in meal:
print(ingredient)
print("========== Example 5 ==========")
# Wrong use of the List
my_list = ["a", "b", "c", "d"]
bad_string = " "
for c in my_list:
bad_string = ", ".join(my_list)
# Every time it goes in the loop, it will create a new List because List is immutable
print(bad_string)
# Good use of the List
good_string = ", ".join(my_list)
print(good_string)
my_letters = "abcdefghijklmnopqrstuvwxy"
print(", ".join(my_letters))
|
#!/bin/sh
cmake -DCMAKE_TOOLCHAIN_FILE="../../../../../../tools/cmake_toolchain_files/armgcc.cmake" -G "Unix Makefiles" -DCMAKE_BUILD_TYPE=Debug .
make -j4
cmake -DCMAKE_TOOLCHAIN_FILE="../../../../../../tools/cmake_toolchain_files/armgcc.cmake" -G "Unix Makefiles" -DCMAKE_BUILD_TYPE=Release .
make -j4
|
<reponame>pipern/Trans-hadoop-release-HDP-2.6.0.3-8
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.util.AbstractList;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.TreeMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.hdfs.util.CyclicIteration;
import org.apache.hadoop.util.ChunkedArrayList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static com.google.common.base.Preconditions.checkArgument;
import static org.apache.hadoop.util.Time.monotonicNow;
/**
* Manages datanode decommissioning. A background monitor thread
* periodically checks the status of datanodes that are in-progress of
* decommissioning.
* <p/>
* A datanode can be decommissioned in a few situations:
* <ul>
* <li>If a DN is dead, it is decommissioned immediately.</li>
* <li>If a DN is alive, it is decommissioned after all of its blocks
* are sufficiently replicated. Merely under-replicated blocks do not
* block decommissioning as long as they are above a replication
* threshold.</li>
* </ul>
* In the second case, the datanode transitions to a
* decommission-in-progress state and is tracked by the monitor thread. The
* monitor periodically scans through the list of insufficiently replicated
* blocks on these datanodes to
* determine if they can be decommissioned. The monitor also prunes this list
* as blocks become replicated, so monitor scans will become more efficient
* over time.
* <p/>
* Decommission-in-progress nodes that become dead do not progress to
* decommissioned until they become live again. This prevents potential
* durability loss for singly-replicated blocks (see HDFS-6791).
* <p/>
* This class depends on the FSNamesystem lock for synchronization.
*/
@InterfaceAudience.Private
public class DecommissionManager {
private static final Logger LOG = LoggerFactory.getLogger(DecommissionManager
.class);
private final Namesystem namesystem;
private final BlockManager blockManager;
private final HeartbeatManager hbManager;
private final ScheduledExecutorService executor;
/**
* Map containing the decommission-in-progress datanodes that are being
* tracked so they can be be marked as decommissioned.
* <p/>
* This holds a set of references to the under-replicated blocks on the DN at
* the time the DN is added to the map, i.e. the blocks that are preventing
* the node from being marked as decommissioned. During a monitor tick, this
* list is pruned as blocks becomes replicated.
* <p/>
* Note also that the reference to the list of under-replicated blocks
* will be null on initial add
* <p/>
* However, this map can become out-of-date since it is not updated by block
* reports or other events. Before being finally marking as decommissioned,
* another check is done with the actual block map.
*/
private final TreeMap<DatanodeDescriptor, AbstractList<BlockInfoContiguous>>
decomNodeBlocks;
/**
* Tracking a node in decomNodeBlocks consumes additional memory. To limit
* the impact on NN memory consumption, we limit the number of nodes in
* decomNodeBlocks. Additional nodes wait in pendingNodes.
*/
private final Queue<DatanodeDescriptor> pendingNodes;
private Monitor monitor = null;
DecommissionManager(final Namesystem namesystem,
final BlockManager blockManager, final HeartbeatManager hbManager) {
this.namesystem = namesystem;
this.blockManager = blockManager;
this.hbManager = hbManager;
executor = Executors.newScheduledThreadPool(1,
new ThreadFactoryBuilder().setNameFormat("DecommissionMonitor-%d")
.setDaemon(true).build());
decomNodeBlocks = new TreeMap<>();
pendingNodes = new LinkedList<>();
}
/**
* Start the decommission monitor thread.
* @param conf
*/
void activate(Configuration conf) {
final int intervalSecs =
conf.getInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT);
checkArgument(intervalSecs >= 0, "Cannot set a negative " +
"value for " + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY);
// By default, the new configuration key overrides the deprecated one.
// No # node limit is set.
int blocksPerInterval = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY,
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_DEFAULT);
int nodesPerInterval = Integer.MAX_VALUE;
// If the expected key isn't present and the deprecated one is,
// use the deprecated one into the new one. This overrides the
// default.
//
// Also print a deprecation warning.
final String deprecatedKey =
"dfs.namenode.decommission.nodes.per.interval";
final String strNodes = conf.get(deprecatedKey);
if (strNodes != null) {
nodesPerInterval = Integer.parseInt(strNodes);
blocksPerInterval = Integer.MAX_VALUE;
LOG.warn("Using deprecated configuration key {} value of {}.",
deprecatedKey, nodesPerInterval);
LOG.warn("Please update your configuration to use {} instead.",
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY);
}
checkArgument(blocksPerInterval > 0,
"Must set a positive value for "
+ DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY);
final int maxConcurrentTrackedNodes = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES,
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES_DEFAULT);
checkArgument(maxConcurrentTrackedNodes >= 0, "Cannot set a negative " +
"value for "
+ DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES);
monitor = new Monitor(blocksPerInterval,
nodesPerInterval, maxConcurrentTrackedNodes);
executor.scheduleAtFixedRate(monitor, intervalSecs, intervalSecs,
TimeUnit.SECONDS);
LOG.debug("Activating DecommissionManager with interval {} seconds, " +
"{} max blocks per interval, {} max nodes per interval, " +
"{} max concurrently tracked nodes.", intervalSecs,
blocksPerInterval, nodesPerInterval, maxConcurrentTrackedNodes);
}
/**
* Stop the decommission monitor thread, waiting briefly for it to terminate.
*/
void close() {
executor.shutdownNow();
try {
executor.awaitTermination(3000, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {}
}
/**
* Start decommissioning the specified datanode.
* @param node
*/
@VisibleForTesting
public void startDecommission(DatanodeDescriptor node) {
if (!node.isDecommissionInProgress() && !node.isDecommissioned()) {
// Update DN stats maintained by HeartbeatManager
hbManager.startDecommission(node);
// hbManager.startDecommission will set dead node to decommissioned.
if (node.isDecommissionInProgress()) {
for (DatanodeStorageInfo storage : node.getStorageInfos()) {
LOG.info("Starting decommission of {} {} with {} blocks",
node, storage, storage.numBlocks());
}
node.decommissioningStatus.setStartTime(monotonicNow());
pendingNodes.add(node);
}
} else {
LOG.trace("startDecommission: Node {} in {}, nothing to do." +
node, node.getAdminState());
}
}
/**
* Stop decommissioning the specified datanode.
* @param node
*/
@VisibleForTesting
public void stopDecommission(DatanodeDescriptor node) {
if (node.isDecommissionInProgress() || node.isDecommissioned()) {
// Update DN stats maintained by HeartbeatManager
hbManager.stopDecommission(node);
// Over-replicated blocks will be detected and processed when
// the dead node comes back and send in its full block report.
if (node.isAlive) {
blockManager.processOverReplicatedBlocksOnReCommission(node);
}
// Remove from tracking in DecommissionManager
pendingNodes.remove(node);
decomNodeBlocks.remove(node);
} else {
LOG.trace("stopDecommission: Node {} in {}, nothing to do." +
node, node.getAdminState());
}
}
private void setDecommissioned(DatanodeDescriptor dn) {
dn.setDecommissioned();
LOG.info("Decommissioning complete for node {}", dn);
}
/**
* Checks whether a block is sufficiently replicated for decommissioning.
* Full-strength replication is not always necessary, hence "sufficient".
* @return true if sufficient, else false.
*/
private boolean isSufficientlyReplicated(BlockInfoContiguous block,
BlockCollection bc,
NumberReplicas numberReplicas) {
final int numExpected = bc.getBlockReplication();
final int numLive = numberReplicas.liveReplicas();
if (numLive >= numExpected
&& blockManager.isPlacementPolicySatisfied(block)) {
// Block has enough replica, skip
LOG.trace("Block {} does not need replication.", block);
return true;
}
// Block is under-replicated
LOG.trace("Block {} numExpected={}, numLive={}", block, numExpected,
numLive);
if (numExpected > numLive) {
if (bc.isUnderConstruction() && block.equals(bc.getLastBlock())) {
// Can decom a UC block as long as there will still be minReplicas
if (numLive >= blockManager.minReplication) {
LOG.trace("UC block {} sufficiently-replicated since numLive ({}) "
+ ">= minR ({})", block, numLive, blockManager.minReplication);
return true;
} else {
LOG.trace("UC block {} insufficiently-replicated since numLive "
+ "({}) < minR ({})", block, numLive,
blockManager.minReplication);
}
} else {
// Can decom a non-UC as long as the default replication is met
if (numLive >= blockManager.defaultReplication) {
return true;
}
}
}
return false;
}
private static void logBlockReplicationInfo(Block block, BlockCollection bc,
DatanodeDescriptor srcNode, NumberReplicas num,
Iterable<DatanodeStorageInfo> storages) {
int curReplicas = num.liveReplicas();
int curExpectedReplicas = bc.getBlockReplication();
StringBuilder nodeList = new StringBuilder();
for (DatanodeStorageInfo storage : storages) {
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
nodeList.append(node);
nodeList.append(" ");
}
LOG.info("Block: " + block + ", Expected Replicas: "
+ curExpectedReplicas + ", live replicas: " + curReplicas
+ ", corrupt replicas: " + num.corruptReplicas()
+ ", decommissioned replicas: " + num.decommissioned()
+ ", decommissioning replicas: " + num.decommissioning()
+ ", excess replicas: " + num.excessReplicas()
+ ", Is Open File: " + bc.isUnderConstruction()
+ ", Datanodes having this block: " + nodeList + ", Current Datanode: "
+ srcNode + ", Is current datanode decommissioning: "
+ srcNode.isDecommissionInProgress());
}
@VisibleForTesting
public int getNumPendingNodes() {
return pendingNodes.size();
}
@VisibleForTesting
public int getNumTrackedNodes() {
return decomNodeBlocks.size();
}
@VisibleForTesting
public int getNumNodesChecked() {
return monitor.numNodesChecked;
}
/**
* Checks to see if DNs have finished decommissioning.
* <p/>
* Since this is done while holding the namesystem lock,
* the amount of work per monitor tick is limited.
*/
private class Monitor implements Runnable {
/**
* The maximum number of blocks to check per tick.
*/
private final int numBlocksPerCheck;
/**
* The maximum number of nodes to check per tick.
*/
private final int numNodesPerCheck;
/**
* The maximum number of nodes to track in decomNodeBlocks. A value of 0
* means no limit.
*/
private final int maxConcurrentTrackedNodes;
/**
* The number of blocks that have been checked on this tick.
*/
private int numBlocksChecked = 0;
/**
* The number of blocks checked after (re)holding lock.
*/
private int numBlocksCheckedPerLock = 0;
/**
* The number of nodes that have been checked on this tick. Used for
* testing.
*/
private int numNodesChecked = 0;
/**
* The last datanode in decomNodeBlocks that we've processed
*/
private DatanodeDescriptor iterkey = new DatanodeDescriptor(new
DatanodeID("", "", "", 0, 0, 0, 0));
Monitor(int numBlocksPerCheck, int numNodesPerCheck, int
maxConcurrentTrackedNodes) {
this.numBlocksPerCheck = numBlocksPerCheck;
this.numNodesPerCheck = numNodesPerCheck;
this.maxConcurrentTrackedNodes = maxConcurrentTrackedNodes;
}
private boolean exceededNumBlocksPerCheck() {
LOG.trace("Processed {} blocks so far this tick", numBlocksChecked);
return numBlocksChecked >= numBlocksPerCheck;
}
@Deprecated
private boolean exceededNumNodesPerCheck() {
LOG.trace("Processed {} nodes so far this tick", numNodesChecked);
return numNodesChecked >= numNodesPerCheck;
}
@Override
public void run() {
if (!namesystem.isRunning()) {
LOG.info("Namesystem is not running, skipping decommissioning checks"
+ ".");
return;
}
// Reset the checked count at beginning of each iteration
numBlocksChecked = 0;
numBlocksCheckedPerLock = 0;
numNodesChecked = 0;
// Check decom progress
namesystem.writeLock();
try {
processPendingNodes();
check();
} finally {
namesystem.writeUnlock();
}
if (numBlocksChecked + numNodesChecked > 0) {
LOG.info("Checked {} blocks and {} nodes this tick", numBlocksChecked,
numNodesChecked);
}
}
/**
* Pop datanodes off the pending list and into decomNodeBlocks,
* subject to the maxConcurrentTrackedNodes limit.
*/
private void processPendingNodes() {
while (!pendingNodes.isEmpty() &&
(maxConcurrentTrackedNodes == 0 ||
decomNodeBlocks.size() < maxConcurrentTrackedNodes)) {
decomNodeBlocks.put(pendingNodes.poll(), null);
}
}
private void check() {
final Iterator<Map.Entry<DatanodeDescriptor, AbstractList<BlockInfoContiguous>>>
it = new CyclicIteration<>(decomNodeBlocks, iterkey).iterator();
final LinkedList<DatanodeDescriptor> toRemove = new LinkedList<>();
while (it.hasNext()
&& !exceededNumBlocksPerCheck()
&& !exceededNumNodesPerCheck()
&& namesystem.isRunning()) {
numNodesChecked++;
final Map.Entry<DatanodeDescriptor, AbstractList<BlockInfoContiguous>>
entry = it.next();
final DatanodeDescriptor dn = entry.getKey();
AbstractList<BlockInfoContiguous> blocks = entry.getValue();
boolean fullScan = false;
if (blocks == null) {
// This is a newly added datanode, run through its list to schedule
// under-replicated blocks for replication and collect the blocks
// that are insufficiently replicated for further tracking
LOG.debug("Newly-added node {}, doing full scan to find " +
"insufficiently-replicated blocks.", dn);
blocks = handleInsufficientlyReplicated(dn);
decomNodeBlocks.put(dn, blocks);
fullScan = true;
} else {
// This is a known datanode, check if its # of insufficiently
// replicated blocks has dropped to zero and if it can be decommed
LOG.debug("Processing decommission-in-progress node {}", dn);
pruneSufficientlyReplicated(dn, blocks);
}
if (blocks.size() == 0) {
if (!fullScan) {
// If we didn't just do a full scan, need to re-check with the
// full block map.
//
// We've replicated all the known insufficiently replicated
// blocks. Re-check with the full block map before finally
// marking the datanode as decommissioned
LOG.debug("Node {} has finished replicating current set of "
+ "blocks, checking with the full block map.", dn);
blocks = handleInsufficientlyReplicated(dn);
decomNodeBlocks.put(dn, blocks);
}
// If the full scan is clean AND the node liveness is okay,
// we can finally mark as decommissioned.
final boolean isHealthy =
blockManager.isNodeHealthyForDecommission(dn);
if (blocks.size() == 0 && isHealthy) {
setDecommissioned(dn);
toRemove.add(dn);
LOG.debug("Node {} is sufficiently replicated and healthy, "
+ "marked as decommissioned.", dn);
} else {
if (LOG.isDebugEnabled()) {
StringBuilder b = new StringBuilder("Node {} ");
if (isHealthy) {
b.append("is ");
} else {
b.append("isn't ");
}
b.append("healthy and still needs to replicate {} more blocks," +
" decommissioning is still in progress.");
LOG.debug(b.toString(), dn, blocks.size());
}
}
} else {
LOG.debug("Node {} still has {} blocks to replicate "
+ "before it is a candidate to finish decommissioning.",
dn, blocks.size());
}
iterkey = dn;
}
// Remove the datanodes that are decommissioned
for (DatanodeDescriptor dn : toRemove) {
Preconditions.checkState(dn.isDecommissioned(),
"Removing a node that is not yet decommissioned!");
decomNodeBlocks.remove(dn);
}
}
/**
* Removes sufficiently replicated blocks from the block list of a
* datanode.
*/
private void pruneSufficientlyReplicated(final DatanodeDescriptor datanode,
AbstractList<BlockInfoContiguous> blocks) {
processBlocksForDecomInternal(datanode, blocks.iterator(), null, true);
}
/**
* Returns a list of blocks on a datanode that are insufficiently
* replicated, i.e. are under-replicated enough to prevent decommission.
* <p/>
* As part of this, it also schedules replication work for
* any under-replicated blocks.
*
* @param datanode
* @return List of insufficiently replicated blocks
*/
private AbstractList<BlockInfoContiguous> handleInsufficientlyReplicated(
final DatanodeDescriptor datanode) {
AbstractList<BlockInfoContiguous> insufficient = new ChunkedArrayList<>();
processBlocksForDecomInternal(datanode, datanode.getBlockIterator(),
insufficient, false);
return insufficient;
}
/**
* Used while checking if decommission-in-progress datanodes can be marked
* as decommissioned. Combines shared logic of
* pruneSufficientlyReplicated and handleInsufficientlyReplicated.
*
* @param datanode Datanode
* @param it Iterator over the blocks on the
* datanode
* @param insufficientlyReplicated Return parameter. If it's not null,
* will contain the insufficiently
* replicated-blocks from the list.
* @param pruneSufficientlyReplicated whether to remove sufficiently
* replicated blocks from the iterator
* @return true if there are under-replicated blocks in the provided block
* iterator, else false.
*/
private void processBlocksForDecomInternal(
final DatanodeDescriptor datanode,
final Iterator<BlockInfoContiguous> it,
final List<BlockInfoContiguous> insufficientlyReplicated,
boolean pruneSufficientlyReplicated) {
boolean firstReplicationLog = true;
int underReplicatedBlocks = 0;
int decommissionOnlyReplicas = 0;
int underReplicatedInOpenFiles = 0;
while (it.hasNext()) {
if (insufficientlyReplicated == null
&& numBlocksCheckedPerLock >= numBlocksPerCheck) {
// During fullscan insufficientlyReplicated will NOT be null, iterator
// will be DN's iterator. So should not yield lock, otherwise
// ConcurrentModificationException could occur.
// Once the fullscan done, iterator will be a copy. So can yield the
// lock.
// Yielding is required in case of block number is greater than the
// configured per-iteration-limit.
namesystem.writeUnlock();
try {
LOG.debug("Yielded lock during decommission check");
Thread.sleep(0, 500);
} catch (InterruptedException ignored) {
return;
}
// reset
numBlocksCheckedPerLock = 0;
namesystem.writeLock();
}
numBlocksChecked++;
numBlocksCheckedPerLock++;
final BlockInfoContiguous block = it.next();
// Remove the block from the list if it's no longer in the block map,
// e.g. the containing file has been deleted
if (blockManager.blocksMap.getStoredBlock(block) == null) {
LOG.trace("Removing unknown block {}", block);
it.remove();
continue;
}
BlockCollection bc = blockManager.blocksMap.getBlockCollection(block);
if (bc == null) {
// Orphan block, will be invalidated eventually. Skip.
continue;
}
final NumberReplicas num = blockManager.countNodes(block);
final int liveReplicas = num.liveReplicas();
final int curReplicas = liveReplicas;
// Schedule under-replicated blocks for replication if not already
// pending
if (blockManager.isNeededReplication(block, bc.getBlockReplication(),
liveReplicas)) {
if (!blockManager.neededReplications.contains(block) &&
blockManager.pendingReplications.getNumReplicas(block) == 0 &&
namesystem.isPopulatingReplQueues()) {
// Process these blocks only when active NN is out of safe mode.
blockManager.neededReplications.add(block,
liveReplicas, num.readOnlyReplicas(),
num.decommissionedAndDecommissioning(),
bc.getBlockReplication());
}
}
// Even if the block is under-replicated,
// it doesn't block decommission if it's sufficiently replicated
if (isSufficientlyReplicated(block, bc, num)) {
if (pruneSufficientlyReplicated) {
it.remove();
}
continue;
}
// We've found an insufficiently replicated block.
if (insufficientlyReplicated != null) {
insufficientlyReplicated.add(block);
}
// Log if this is our first time through
if (firstReplicationLog) {
logBlockReplicationInfo(block, bc, datanode, num,
blockManager.blocksMap.getStorages(block));
firstReplicationLog = false;
}
// Update various counts
underReplicatedBlocks++;
if (bc.isUnderConstruction()) {
underReplicatedInOpenFiles++;
}
if ((curReplicas == 0) && (num.decommissionedAndDecommissioning() > 0)) {
decommissionOnlyReplicas++;
}
}
datanode.decommissioningStatus.set(underReplicatedBlocks,
decommissionOnlyReplicas,
underReplicatedInOpenFiles);
}
}
@VisibleForTesting
void runMonitor() throws ExecutionException, InterruptedException {
Future f = executor.submit(monitor);
f.get();
}
}
|
#!/usr/bin/env bash
source ~/ENV/bin/activate
cd ~/MultiModesPreferenceEstimation
python tune_parameters.py --data-dir data/netflix/ --save-path netflix/tuning_general/mmp-part91.csv --parameters config/netflix/mmp-part91.yml
|
<reponame>sjdodge123/interstellar-online
var express = require('express')
, http = require('http');
var app = express();
var path = require('path');
app.use(express.static(path.join(__dirname, 'public')));
var server = http.createServer(app);
var io = require('socket.io').listen(server);
var objects = require("./public/spaceObjects.js");
var clientID = 0;
var client = {};
var gameObjectList = [];
var debugEnabled = true;
var maxObjects = 20000
"use strict";
var debug = {
log : function(string){
if(debugEnabled){
console.log(string);
}
}
}
console.log("Server Starting..");
process.on( 'SIGINT', function() {
console.log( "\nServer shutting down from (Ctrl-C)" );
for(var socket in io.sockets.sockets){
io.sockets.sockets[socket].emit("serverShutdown","Server terminated");
}
process.exit();
});
io.on('connection', function(socket){
client[socket.id] = {clientID:clientID};
debug.log('User '+ client[socket.id].clientID + ' has joined. From ' + socket.handshake.address);
var newShip = createNewShip(socket);
client[socket.id].shipID = newShip.ID;
client[socket.id].weaponID = newShip.weapon.ID;
debug.log('New ship ID : '+client[socket.id].shipID);
debug.log('New weapon ID : '+client[socket.id].weaponID);
var sendArray = [];
debug.log('Preparing to send game board state to User '+ client[socket.id].clientID);
for(var i=0;i<gameObjectList.length;i+=1) {
if(gameObjectList[i]!=null&& gameObjectList[i].type !='Cannon') {
if(gameObjectList[i].type =='ShipObject'){
debug.log('Sending ' + gameObjectList[i].type + ' located at '+ i + ' - ID: ' +gameObjectList[i].ID+ ' WID: '+gameObjectList[i].weaponID);
sendArray.push({type:gameObjectList[i].type,ID:gameObjectList[i].ID,weaponID:gameObjectList[i].weaponID});
}
}
}
debug.log('Sent '+ sendArray.length + ' items to User ' +client[socket.id].clientID+'\n');
socket.emit('spawnBoardObjects',sendArray);
socket.emit('spawnMyShip',{shipIndex:newShip.ID,weaponIndex:newShip.weaponID});
socket.broadcast.emit('newShipArrvied',{shipIndex:newShip.ID,weaponIndex:newShip.weaponID});
clientID += 1;
socket.on('movement', function(ship){
gameObjectList[ship.ID].x = ship.x;
gameObjectList[ship.ID].y = ship.y;
gameObjectList[ship.ID].angle = ship.angle;
gameObjectList[ship.ID].weapon.angle = ship.weapon.angle;
socket.broadcast.emit('movement',{
index:ship.ID,
x:ship.x,
y:ship.y,
angle:ship.angle,
weaponID:ship.weapon.ID,
weaponAngle:ship.weapon.angle
});
});
socket.on('shotFired',function(shipID){
var newBullet = createNewBullet(shipID);
debug.log("bulletFired stored at: " + newBullet.ID);
socket.emit('bulletFired',newBullet);
socket.broadcast.emit('bulletFired',newBullet);
});
socket.on('disconnect', function() {
socket.broadcast.emit('player has left',client[socket.id].shipID);
gameObjectList[client[socket.id].shipID] = null;
debug.log('Cleaning up object at storage location '+client[socket.id].shipID);
gameObjectList[client[socket.id].weaponID] = null;
debug.log('Cleaning up object at storage location '+client[socket.id].weaponID);
debug.log('User '+ client[socket.id].clientID + ' disconnected.');
});
});
function createNewBullet(shipID){
var ship = gameObjectList[shipID];
var nextBullet = new objects.createBulletObject(ship.x,ship.y,ship.weapon.angle,ship.velx + 1,ship.vely + 1,ship.weapon.ID);
debug.log("Ships current angle" + ship.angle);
nextBullet.ID = findEmptySlot();
gameObjectList[nextBullet.ID] = nextBullet;
return nextBullet;
}
function createNewShip(socket){
var nextShip = new objects.createShipObject;
nextShip.ID = findEmptySlot();
gameObjectList[nextShip.ID] = nextShip;
debug.log('Storing User '+client[socket.id].clientID+"'s ship at "+nextShip.ID);
nextShip.weapon = new objects.createCannonObject;
nextShip.weapon.ID = findEmptySlot();
nextShip.weaponID = nextShip.weapon.ID;
gameObjectList[nextShip.weaponID] = nextShip.weapon;
debug.log('Storing User '+client[socket.id].clientID+"'s weapon at "+gameObjectList[nextShip.ID].weaponID);
return nextShip;
}
function setMaxPlayers(){
for(var i=0; i<maxObjects;i+=1){
gameObjectList.push(null);
}
}
function findEmptySlot(){
for(var i=0;i<gameObjectList.length;i+=1){
if(gameObjectList[i] == null) {
return i;
}
}
}
function sendMovements(){
}
server.listen(3000, function(){
setMaxPlayers();
console.log('listening on *:3000');
});
|
package org.multibit.hd.ui.views.wizards.language_settings;
import com.google.common.base.Optional;
import org.multibit.hd.ui.views.wizards.AbstractWizard;
import org.multibit.hd.ui.views.wizards.AbstractWizardPanelView;
import java.util.Map;
/**
* <p>Wizard to provide the following to UI for "edit contact" wizard:</p>
* <ol>
* <li>Enter details</li>
* </ol>
*
* @since 0.0.1
*
*/
public class LanguageSettingsWizard extends AbstractWizard<LanguageSettingsWizardModel> {
public LanguageSettingsWizard(LanguageSettingsWizardModel model) {
super(model, false, Optional.absent());
}
@Override
protected void populateWizardViewMap(Map<String, AbstractWizardPanelView> wizardViewMap) {
// Use the wizard parameter to retrieve the appropriate mode
wizardViewMap.put(
LanguageSettingsState.LANGUAGE_ENTER_DETAILS.name(),
new LanguageSettingsPanelView(this, LanguageSettingsState.LANGUAGE_ENTER_DETAILS.name())
);
}
}
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.u1F4B4 = void 0;
var u1F4B4 = {
"viewBox": "0 0 2600 2760.837",
"children": [{
"name": "path",
"attribs": {
"d": "M2373 764q25 0 42 17t17 42v1192q0 25-17 42t-42 17H226q-23 0-41-17t-18-42V823q0-25 18-42t41-17h2147zm-59 118H286v1074h2028V882zM438 1094q-2-4-2-8 0-7 5-11.5t11-4.5h143q10 0 13 7l158 242 156-242q6-7 14-7h144q5 0 10 3.5t5 11.5q0 2-1 4l-2 5-167 267h126q12 0 12 13v83q0 13-12 13H858v53h193q5 0 8.5 3.5t3.5 8.5v85q0 13-12 13H858v142q0 6-4.5 10.5T841 1790H689q-6 0-10.5-4.5T674 1775v-142H480q-13 0-13-13v-85q0-5 4-8.5t9-3.5h194v-53H480q-13 0-13-13v-83q0-13 13-13h126z"
},
"children": []
}]
};
exports.u1F4B4 = u1F4B4;
|
public class MyClass {
private boolean bReset;
private Object Opl;
private Object Opr1;
public boolean ProcessAndCheck(boolean check, String agentType, boolean clear, String method, String property) {
bReset |= ((SomeClass) Opl).ResetMembers(check, agentType, clear, method, property);
return Opr1 != null;
}
}
|
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
BCSYMBOLMAP_DIR="BCSymbolMaps"
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
if [ -d "${source}/${BCSYMBOLMAP_DIR}" ]; then
# Locate and install any .bcsymbolmaps if present, and remove them from the .framework before the framework is copied
find "${source}/${BCSYMBOLMAP_DIR}" -name "*.bcsymbolmap"|while read f; do
echo "Installing $f"
install_bcsymbolmap "$f" "$destination"
rm "$f"
done
rmdir "${source}/${BCSYMBOLMAP_DIR}"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
elif [ -L "${binary}" ]; then
echo "Destination binary is symlinked..."
dirname="$(dirname "${binary}")"
binary="${dirname}/$(readlink "${binary}")"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u)
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
warn_missing_arch=${2:-true}
if [ -r "$source" ]; then
# Copy the dSYM into the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .dSYM "$source")"
binary_name="$(ls "$source/Contents/Resources/DWARF")"
binary="${DERIVED_FILES_DIR}/${basename}.dSYM/Contents/Resources/DWARF/${binary_name}"
# Strip invalid architectures from the dSYM.
if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then
strip_invalid_archs "$binary" "$warn_missing_arch"
fi
if [[ $STRIP_BINARY_RETVAL == 0 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.dSYM"
fi
fi
}
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
warn_missing_arch=${2:-true}
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
if [[ "$warn_missing_arch" == "true" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
fi
STRIP_BINARY_RETVAL=1
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary"
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=0
}
# Copies the bcsymbolmap files of a vendored framework
install_bcsymbolmap() {
local bcsymbolmap_path="$1"
local destination="${BUILT_PRODUCTS_DIR}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}"
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identity
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/SwiftPromise/SwiftPromise.framework"
install_framework "${BUILT_PRODUCTS_DIR}/TaskQueue/TaskQueue.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/SwiftPromise/SwiftPromise.framework"
install_framework "${BUILT_PRODUCTS_DIR}/TaskQueue/TaskQueue.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
package mindustry.core;
import java.util.Iterator;
import arc.ApplicationListener;
import arc.Events;
import arc.ai.GdxAI;
import arc.ai.msg.MessageManager;
import arc.util.Time;
import arc.util.Tmp;
import mindustry.annotations.Annotations.Loc;
import mindustry.annotations.Annotations.Remote;
import mindustry.content.Fx;
import mindustry.core.GameState.State;
import mindustry.ctype.UnlockableContent;
import mindustry.entities.Effects;
import mindustry.entities.type.Player;
import mindustry.entities.type.TileEntity;
import mindustry.game.EventType.BlockBuildEndEvent;
import mindustry.game.EventType.BlockDestroyEvent;
import mindustry.game.EventType.GameOverEvent;
import mindustry.game.EventType.LaunchEvent;
import mindustry.game.EventType.LaunchItemEvent;
import mindustry.game.EventType.PlayEvent;
import mindustry.game.EventType.ResetEvent;
import mindustry.game.EventType.Trigger;
import mindustry.game.EventType.WaveEvent;
import mindustry.game.Rules;
import mindustry.game.Stats;
import mindustry.game.Team;
import mindustry.game.Teams;
import mindustry.game.Teams.BrokenBlock;
import mindustry.game.Teams.TeamData;
import mindustry.gen.Call;
import mindustry.type.Item;
import mindustry.type.ItemStack;
import mindustry.world.Block;
import mindustry.world.Tile;
import mindustry.world.blocks.BuildBlock;
import mindustry.world.blocks.BuildBlock.BuildEntity;
import static mindustry.Vars.blockunitGroup;
import static mindustry.Vars.bulletGroup;
import static mindustry.Vars.collisions;
import static mindustry.Vars.content;
import static mindustry.Vars.data;
import static mindustry.Vars.effectGroup;
import static mindustry.Vars.entities;
import static mindustry.Vars.fireGroup;
import static mindustry.Vars.groundEffectGroup;
import static mindustry.Vars.headless;
import static mindustry.Vars.net;
import static mindustry.Vars.netClient;
import static mindustry.Vars.playerGroup;
import static mindustry.Vars.puddleGroup;
import static mindustry.Vars.shieldGroup;
import static mindustry.Vars.spawner;
import static mindustry.Vars.state;
import static mindustry.Vars.systemStrategy;
import static mindustry.Vars.systemTroops;
import static mindustry.Vars.systemWorker;
import static mindustry.Vars.tileGroup;
import static mindustry.Vars.ui;
import static mindustry.Vars.unitGroup;
import static mindustry.Vars.world;
import static z.debug.ZDebug.disable_mindustryEndCheck;
import static z.debug.ZDebug.disable_mindustryPlayer;
import static z.debug.ZDebug.enable_isoInput;
/**
* Logic module.
* Handles all logic for entities and waves.
* Handles game state events.
* Does not store any game state itself.
* <p>
* This class should <i>not</i> call any outside methods to change state of modules, but instead fire events.
*/
public class Logic implements ApplicationListener{
public Logic(){
Events.on(WaveEvent.class, event -> {
for(Player p : playerGroup.all()){
p.respawns = state.rules.respawns;
}
if(world.isZone()){
world.getZone().updateWave(state.wave);
}
});
Events.on(BlockDestroyEvent.class, event -> {
//blocks that get broken are appended to the team's broken block queue
Tile tile = event.tile;
Block block = tile.block();
//skip null entities or un-rebuildables, for obvious reasons; also skip client since they can't modify these requests
if(tile.entity == null || !tile.block().rebuildable || net.client()) return;
if(block instanceof BuildBlock){
BuildEntity entity = tile.ent();
//update block to reflect the fact that something was being constructed
if(entity.cblock != null && entity.cblock.synthetic()){
block = entity.cblock;
}else{
//otherwise this was a deconstruction that was interrupted, don't want to rebuild that
return;
}
}
TeamData data = state.teams.get(tile.getTeam());
//remove existing blocks that have been placed here.
//painful O(n) iteration + copy
for(int i = 0; i < data.brokenBlocks.size; i++){
BrokenBlock b = data.brokenBlocks.get(i);
if(b.x == tile.x && b.y == tile.y){
data.brokenBlocks.removeIndex(i);
break;
}
}
data.brokenBlocks.addFirst(new BrokenBlock(tile.x, tile.y, tile.rotation(), block.id, tile.entity.config()));
});
Events.on(BlockBuildEndEvent.class, event -> {
if(!event.breaking){
TeamData data = state.teams.get(event.team);
Iterator<BrokenBlock> it = data.brokenBlocks.iterator();
while(it.hasNext()){
BrokenBlock b = it.next();
Block block = content.block(b.block);
if(event.tile.block().bounds(event.tile.x, event.tile.y, Tmp.r1).overlaps(block.bounds(b.x, b.y, Tmp.r2))){
it.remove();
}
}
}
});
}
/** Handles the event of content being used by either the player or some block. */
public void handleContent(UnlockableContent content){
if(!headless){
data.unlockContent(content);
}
}
public void play(){
state.set(State.playing);
state.wavetime = state.rules.waveSpacing * 2; //grace period of 2x wave time before game starts
Events.fire(new PlayEvent());
//add starting items
if(!world.isZone()){
for(TeamData team : state.teams.getActive()){
if(team.hasCore()){
TileEntity entity = team.core();
entity.items.clear();
for(ItemStack stack : state.rules.loadout){
entity.items.add(stack.item, stack.amount);
}
}
}
}
}
public void reset(){
state.wave = 1;
state.wavetime = state.rules.waveSpacing;
state.gameOver = state.launched = false;
state.teams = new Teams();
state.rules = new Rules();
state.stats = new Stats();
entities.clear();
Time.clear();
TileEntity.sleepingEntities = 0;
Events.fire(new ResetEvent());
}
/** 执行回合刷兵*/
public void runWave(){
spawner.spawnEnemies();
state.wave++;
state.wavetime = world.isZone() && world.getZone().isLaunchWave(state.wave) ? state.rules.waveSpacing * state.rules.launchWaveMultiplier : state.rules.waveSpacing;
Events.fire(new WaveEvent());
}
private void checkGameOver(){
if (disable_mindustryEndCheck) {
return;
}
if(!state.rules.attackMode && state.teams.playerCores().size == 0 && !state.gameOver){
state.gameOver = true;
Events.fire(new GameOverEvent(state.rules.waveTeam));
}else if(state.rules.attackMode){
Team alive = null;
for(TeamData team : state.teams.getActive()){
if(team.hasCore()){
if(alive != null){
return;
}
alive = team.team;
}
}
if(alive != null && !state.gameOver){
if(world.isZone() && alive == state.rules.defaultTeam){
//in attack maps, a victorious game over is equivalent to a launch
Call.launchZone();
}else{
Events.fire(new GameOverEvent(alive));
}
state.gameOver = true;
}
}
}
@Remote(called = Loc.both)
public static void launchZone(){
if(!headless){
ui.hudfrag.showLaunch();
}
for(TileEntity tile : state.teams.playerCores()){
Effects.effect(Fx.launch, tile);
}
if(world.getZone() != null){
world.getZone().setLaunched();
}
Time.runTask(30f, () -> {
for(TileEntity entity : state.teams.playerCores()){
for(Item item : content.items()){
data.addItem(item, entity.items.get(item));
Events.fire(new LaunchItemEvent(item, entity.items.get(item)));
}
entity.tile.remove();
}
state.launched = true;
state.gameOver = true;
Events.fire(new LaunchEvent());
//manually fire game over event now
Events.fire(new GameOverEvent(state.rules.defaultTeam));
});
}
@Remote(called = Loc.both)
public static void onGameOver(Team winner){
state.stats.wavesLasted = state.wave;
ui.restart.show(winner);
netClient.setQuiet();
}
@Override
public void update(){
Events.fire(Trigger.update);
if(!state.is(State.menu)){
if(!net.client()){
state.enemies = unitGroup.count(b -> b.getTeam() == state.rules.waveTeam && b.countsAsEnemy());
}
if(!state.isPaused()){
Time.update();
if(state.rules.waves && state.rules.waveTimer && !state.gameOver){
if(!state.rules.waitForWaveToEnd || state.enemies == 0){
state.wavetime = Math.max(state.wavetime - Time.delta(), 0);
}
}
if(!net.client() && state.wavetime <= 0 && state.rules.waves){
runWave();
}
if(!headless){
effectGroup.update();
groundEffectGroup.update();
}
if(!state.isEditor()){
unitGroup.update();
if (enable_isoInput) {
blockunitGroup.update();
} // zones add code
puddleGroup.update();
shieldGroup.update();
bulletGroup.update();
tileGroup.update();
fireGroup.update();
}else{
unitGroup.updateEvents();
collisions.updatePhysics(unitGroup);
if (enable_isoInput) {
blockunitGroup.updateEvents();
collisions.updatePhysics(blockunitGroup);
}
}
playerGroup.update();
//effect group only contains item transfers in the headless version, update it!
if(headless){
effectGroup.update();
}
if(!state.isEditor()){
//bulletGroup
collisions.collideGroups(bulletGroup, unitGroup);
if ( !disable_mindustryPlayer) {
collisions.collideGroups(bulletGroup, playerGroup);
}
if (enable_isoInput) {
collisions.collideGroups(bulletGroup, blockunitGroup);
}
}
{ // 更新zones扩展系统
// 扩展AI模块更新
GdxAI.getTimepiece().update(Time.delta()); // 使用
MessageManager.getInstance().update();
systemWorker.updateSystem();
systemTroops.updateSystem();
systemStrategy.updateSystem();
}
}
if(!net.client() && !world.isInvalidMap() && !state.isEditor() && state.rules.canGameOver){
checkGameOver();
}
}
}
}
|
<gh_stars>1-10
package net.runelite.api;
/**
* @author Kris | 10/12/2021
*/
public interface MoveSpeed {
byte speed();
}
|
#!/bin/sh
set -o errexit
set -o nounset
IFS=$(printf '\n\t')
INFO="INFO: [$(basename "$0")] "
echo "$INFO" "Booting in ${SC_BOOT_MODE} mode ..."
echo "$INFO" "User :$(id "$(whoami)")"
echo "$INFO" "Workdir : $(pwd)"
#
# DEVELOPMENT MODE
#
# - prints environ info
# - installs requirements in mounted volume
#
if [ "${SC_BUILD_TARGET}" = "development" ]; then
echo "$INFO" "Environment :"
printenv | sed 's/=/: /' | sed 's/^/ /' | sort
echo "$INFO" "Python :"
python --version | sed 's/^/ /'
command -v python | sed 's/^/ /'
cd services/director-v2 || exit 1
pip --quiet --no-cache-dir install -r requirements/dev.txt
cd - || exit 1
echo "$INFO" "PIP :"
pip list | sed 's/^/ /'
fi
#
# RUNNING application
#
if [ "${SC_BOOT_MODE}" = "debug-ptvsd" ]; then
reload_dir_packages=$(find /devel/packages -maxdepth 3 -type d -path "*/src/*" ! -path "*.*" -exec echo '--reload-dir {} \' \;)
exec sh -c "
cd services/director-v2/src/simcore_service_director_v2 && \
uvicorn main:the_app \
--host 0.0.0.0 \
--reload \
$reload_dir_packages
--reload-dir .
"
else
exec uvicorn simcore_service_director_v2.main:the_app \
--host 0.0.0.0
fi
|
<reponame>Marmelatze/docker-controller<gh_stars>1-10
package de.schub.marathon_scaler.Customer;
import com.google.gson.Gson;
import de.schub.marathon_scaler.Monitoring.MarathonMonitor;
import mesosphere.marathon.client.model.v2.App;
import mesosphere.marathon.client.model.v2.Group;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Template for a marathon group, which is used for multiple customers.
* Will replace all occurences of {customer_id} with the id of the customer.
* It will also keep settings (cpu, memory, instances) from scaled instances.
*/
public class GroupTemplate
{
private final Gson gson;
private final String templateJson;
public GroupTemplate(Gson gson, String templateJson)
{
this.gson = gson;
this.templateJson = templateJson;
}
/**
* create a concrete marathon group from the template
* @param customer Customer which this template is intended for
* @param oldGroup current group data from marathon
* @return
*/
public Group create(Customer customer, Group oldGroup)
{
String json2 = templateJson.replace("{customer_id}", Integer.toString(customer.getId()));
Group group = gson.fromJson(json2, Group.class);
group.setId("/customer/" + customer.getId());
merge(customer, group, oldGroup);
return group;
}
/**
* merge two groups with all apps to keep instance count, cpu and memory limitations
*
* @param customer
* @param newGroup
* @param oldGroup
* @return
*/
private Group merge(Customer customer, Group newGroup, Group oldGroup)
{
Map<String, App> appMap;
Map<String, Group> oldGroupMap;
if (null != oldGroup) {
oldGroupMap = oldGroup.getGroups()
.stream()
.collect(Collectors.toMap(Group::getId, group -> group));
} else {
oldGroupMap = new HashMap<>();
}
// merge sub groups
if (null != newGroup.getGroups()) {
newGroup.getGroups()
.stream()
.map(group -> merge(customer, group, oldGroupMap.get(group.getId())));
}
if (null != oldGroup && null != oldGroup.getApps()) {
appMap = oldGroup.getApps()
.stream()
.collect(Collectors.toMap(App::getId, app -> app));
} else {
appMap = new HashMap<>();
}
// merge apps
if (null != newGroup.getApps()) {
for (App app : newGroup.getApps()) {
App oldApp;
// relative name
if (app.getId().substring(0, 2).equals("./")) {
oldApp = appMap.get(
newGroup.getId() + "/" + app.getId().substring(2, app.getId().length())
);
} else {
oldApp = appMap.get(app.getId());
}
mergeApp(customer, app, oldApp);
}
}
return newGroup;
}
private void mergeApp(Customer customer, App newApp, App oldApp)
{
// add customer id as service tag
Map<String, String> envVariables = newApp.getEnv();
if (envVariables.containsKey("SERVICE_TAGS")) {
envVariables.put("SERVICE_TAGS", envVariables.get("SERVICE_TAGS") + ",customer-" + customer.getId());
} else {
envVariables.put("SERVICE_TAGS", "customer-" + customer.getId());
}
if (null == oldApp) {
return;
}
// only merge settings for scaled applications
if (null != newApp.getLabels() && newApp.getLabels().containsKey(MarathonMonitor.LABEL_SCALING_STRATEGY)) {
return;
}
newApp.setInstances(oldApp.getInstances());
newApp.setCpus(oldApp.getCpus());
newApp.setMem(oldApp.getMem());
}
}
|
import asyncio
import logging
async def delete_table(table_name):
# Simulate table deletion by awaiting a coroutine
await asyncio.sleep(1)
if table_name == "error_table":
raise ValueError("Table deletion failed")
return 100 # Simulated number of rows deleted
async def delete_tables(table_names):
num_errors = 0
logger = logging.getLogger('table_deletion')
tasks = [delete_table(name) for name in table_names]
results = await asyncio.gather(*tasks, return_exceptions=True)
for name, result in zip(table_names, results):
if isinstance(result, Exception):
logger.info(f"{name}: failed ({result})")
num_errors += 1
else:
logger.info(f"{name}: {result:,} rows deleted")
if num_errors:
raise RuntimeError(f"{num_errors} tables failed")
# Example usage
asyncio.run(delete_tables(["table1", "table2", "error_table", "table3"]))
|
// Generated by script, don't edit it please.
import createSvgIcon from '../../createSvgIcon';
import HandORightSvg from '@rsuite/icon-font/lib/legacy/HandORight';
const HandORight = createSvgIcon({
as: HandORightSvg,
ariaLabel: 'hand o right',
category: 'legacy',
displayName: 'HandORight'
});
export default HandORight;
|
package io.opensphere.wps.ui.detail.bbpicker;
import java.awt.Dialog;
import java.awt.EventQueue;
import java.awt.Window;
import java.text.DecimalFormat;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import javafx.application.Platform;
import javafx.beans.property.SimpleStringProperty;
import javafx.beans.property.StringProperty;
import javafx.collections.ListChangeListener;
import javafx.collections.ObservableList;
import javafx.event.ActionEvent;
import javafx.event.Event;
import javafx.geometry.Insets;
import javafx.scene.control.Button;
import javafx.scene.control.Control;
import javafx.scene.control.Label;
import javafx.scene.control.ListView;
import javafx.scene.control.Skin;
import javafx.scene.control.Tooltip;
import javafx.scene.layout.VBox;
import javafx.scene.paint.Color;
import org.apache.log4j.Logger;
import io.opensphere.core.Toolbox;
import io.opensphere.core.event.EventListener;
import io.opensphere.core.event.RegionEvent;
import io.opensphere.core.event.RegionEvent.RegionEventType;
import io.opensphere.core.model.BoundingBox;
import io.opensphere.core.model.GeographicBoundingBox;
import io.opensphere.core.model.GeographicPosition;
import io.opensphere.core.model.LatLonAlt;
import io.opensphere.core.util.MathUtil;
import io.opensphere.core.util.SelectionMode;
import io.opensphere.core.util.ThreadConfined;
import io.opensphere.core.util.collections.CollectionUtilities;
import io.opensphere.core.util.fx.FXUtilities;
import io.opensphere.core.util.image.IconUtil.IconType;
import io.opensphere.core.util.javafx.input.SimpleSkin;
import io.opensphere.core.util.swing.EventQueueUtilities;
import io.opensphere.overlay.OverlayToolboxUtils;
import io.opensphere.overlay.SelectionModeController;
import io.opensphere.wps.util.WpsUtilities;
import jidefx.scene.control.validation.ValidationEvent;
/**
* A "combo-box-like" component, in which a text field and a button are
* displayed, and through which a bounding box may be drawn on the map.
*/
public class BoundingBoxPicker extends Control
{
/** The default style. */
public static final String DEFAULT_STYLE = "-fx-border-style: solid; -fx-border-radius: 3; -fx-border-color: -fx-outer-border;";
/** The <code>Log</code> instance used for logging. */
private static final Logger LOG = Logger.getLogger(BoundingBoxPicker.class);
/**
* The context for the bounding box. This context is used when usurping the
* regular selection region bounding box.
*/
private static final String WPS_USURPATION_CONTEXT = "WPS_USURPATION_CONTEXT";
/** The toolbox through which application interaction is performed. */
private final Toolbox myToolbox;
/** The minimum number of required areas. */
private final int myMinAreas;
/** The maximum number of required areas. */
private final int myMaxAreas;
/** The Selection mode controller. */
private final SelectionModeController mySelectionModeController;
/** The list of bounding boxes. */
private final ListView<BoundingBox<GeographicPosition>> myListView;
/** The count label. */
private final Label myCountLabel;
/** The add button. */
private final Button myAddButton;
/** The node in which the components are rendered. */
private final VBox myNode;
/** The region event handler. */
private final EventListener<? super RegionEvent> myMyRegionEventHandler;
/**
* A flag used to determine if the component is currently in map selection
* mode.
*/
@ThreadConfined("EDT")
private boolean myBoundingBoxSelectionMode;
/** List of visible dialogs. */
@ThreadConfined("EDT")
private List<Window> myVisibleDialogs;
/** The bounding box selected by the user. */
@ThreadConfined("EventManager-0")
private BoundingBox<GeographicPosition> mySelectedBoundingBox;
/** The value of the bounding box. */
private final StringProperty myValue = new SimpleStringProperty(this, "value");
/**
* Creates a new bounding box picker.
*
* @param pToolbox the toolbox through which application interaction is
* performed.
* @param minAreas the minimum number of required areas
* @param maxAreas the maximum number of allowed areas
*/
@SuppressWarnings("PMD.ConstructorCallsOverridableMethod")
public BoundingBoxPicker(Toolbox pToolbox, int minAreas, int maxAreas)
{
myToolbox = pToolbox;
myMinAreas = minAreas;
myMaxAreas = maxAreas;
mySelectionModeController = OverlayToolboxUtils.getOverlayToolbox(myToolbox).getSelectionModeController();
myListView = new ListView<>();
myListView.setCellFactory(param -> new BboxListCell(myListView.getItems()));
myListView.setPrefHeight(75);
myCountLabel = new Label(getCountText());
myAddButton = FXUtilities.newIconButton("Add from Map", IconType.PLUS, Color.GREEN);
myAddButton.setTooltip(new Tooltip("Add an area from the map"));
myAddButton.setOnAction(e -> EventQueueUtilities.invokeLater(() -> buttonClicked(e)));
myNode = new VBox(5);
myNode.setPadding(new Insets(5));
myNode.getChildren().add(FXUtilities.newHBox(myCountLabel, FXUtilities.newHSpacer(), myAddButton));
myNode.getChildren().add(myListView);
getChildren().add(myNode);
setStyle(DEFAULT_STYLE);
myListView.getItems().addListener((ListChangeListener<BoundingBox<GeographicPosition>>)change -> handleItemsChange());
myMyRegionEventHandler = event -> regionSelected(event);
myToolbox.getEventManager().subscribe(RegionEvent.class, myMyRegionEventHandler);
}
/**
* Gets the value.
*
* @return the value
*/
public StringProperty getValue()
{
return myValue;
}
/**
* Fires a validation event.
*/
public void fireValidationEvent()
{
Event.fireEvent(this, new ValidationEvent(isValid() ? ValidationEvent.VALIDATION_OK : ValidationEvent.VALIDATION_ERROR));
}
/**
* Determines if the picker is in a valid state.
*
* @return whether the picker is in a valid state
*/
public boolean isValid()
{
return MathUtil.between(myListView.getItems().size(), myMinAreas, myMaxAreas);
}
/**
* {@inheritDoc}
*
* @see javafx.scene.control.Control#createDefaultSkin()
*/
@Override
protected Skin<?> createDefaultSkin()
{
return new SimpleSkin(this, myNode);
}
/**
* An event handler method invoked when the button is clicked.
*
* @param pEvent the event fired when the button was clicked.
*/
protected void buttonClicked(ActionEvent pEvent)
{
if (!myBoundingBoxSelectionMode)
{
myBoundingBoxSelectionMode = true;
SelectionMode mode = SelectionMode.BOUNDING_BOX;
myToolbox.getUIRegistry().getRegionSelectionManager().usurpRegionContext(WPS_USURPATION_CONTEXT, mode);
mySelectionModeController.setSelectionMode(mode);
myVisibleDialogs = Arrays.stream(Window.getWindows()).filter(w -> w instanceof Dialog && w.isVisible())
.collect(Collectors.toList());
myVisibleDialogs.forEach(w -> w.setVisible(false));
}
else
{
disableSelectionMode();
}
}
/**
* An event handler method called when a region selection is made.
*
* @param event the event fired when the region was selected.
*/
protected void regionSelected(RegionEvent event)
{
GeographicBoundingBox region = event.getRegion();
// Save off the region if provided
if (region != null)
{
mySelectedBoundingBox = region;
}
BoundingBox<GeographicPosition> boundingBox = mySelectedBoundingBox;
if (event.getType() == RegionEventType.REGION_COMPLETED && boundingBox != null)
{
LOG.info("Region Selected: " + boundingBox);
mySelectedBoundingBox = null;
Platform.runLater(() -> myListView.getItems().add(boundingBox));
EventQueue.invokeLater(() ->
{
disableSelectionMode();
if (CollectionUtilities.hasContent(myVisibleDialogs))
{
myVisibleDialogs.forEach(w -> w.setVisible(true));
myVisibleDialogs = null;
}
});
}
}
/**
* Disables selection mode.
*/
private void disableSelectionMode()
{
myBoundingBoxSelectionMode = false;
mySelectionModeController.setSelectionMode(SelectionMode.NONE);
myToolbox.getUIRegistry().getRegionSelectionManager().relinquishRegionContext(WPS_USURPATION_CONTEXT);
}
/**
* Handles a change in the items.
*/
private void handleItemsChange()
{
myValue.set(getBboxText());
myCountLabel.setText(getCountText());
myAddButton.setDisable(myListView.getItems().size() >= myMaxAreas);
fireValidationEvent();
}
/**
* Gets the value of the bounding box picker, as a String.
*
* @return the value of the bounding box picker, as a string.
*/
private String getBboxText()
{
return myListView.getItems().stream().map(WpsUtilities::boundingBoxToString).collect(Collectors.joining(" "));
}
/**
* Gets the count text.
*
* @return the count text
*/
private String getCountText()
{
StringBuilder sb = new StringBuilder();
sb.append(myListView.getItems().size()).append(" of ").append(myMinAreas);
if (myMinAreas != myMaxAreas)
{
sb.append('-').append(myMaxAreas);
}
sb.append(" Areas");
return sb.toString();
}
/** Bounding box list cell. */
private static class BboxListCell extends RemovableListCell<BoundingBox<GeographicPosition>>
{
/** The label. */
private final Label myLabel;
/** The decimal format. */
private static final DecimalFormat FORMAT = new DecimalFormat("#.######");
/**
* Constructor.
*
* @param items the items model
*/
public BboxListCell(ObservableList<BoundingBox<GeographicPosition>> items)
{
super(items);
myLabel = new Label();
getBox().getChildren().addAll(myLabel, FXUtilities.newHSpacer(), createRemoveButton());
}
@Override
protected void updateItem(BoundingBox<GeographicPosition> item)
{
myLabel.setText(boundingBoxToDisplayString(item));
}
/**
* Converts the bounding box to a display string.
*
* @param boundingBox The bounding box.
* @return A display string.
*/
private static String boundingBoxToDisplayString(BoundingBox<GeographicPosition> boundingBox)
{
LatLonAlt lowerLeft = boundingBox.getLowerLeft().getLatLonAlt();
LatLonAlt upperRight = boundingBox.getUpperRight().getLatLonAlt();
StringBuilder builder = new StringBuilder();
builder.append(FORMAT.format(lowerLeft.getLonD())).append(',');
builder.append(FORMAT.format(lowerLeft.getLatD())).append(" to ");
builder.append(FORMAT.format(upperRight.getLonD())).append(',');
builder.append(FORMAT.format(upperRight.getLatD()));
return builder.toString();
}
}
}
|
termux_step_start_build() {
TERMUX_STANDALONE_TOOLCHAIN="$TERMUX_COMMON_CACHEDIR/android-r${TERMUX_NDK_VERSION}-api-${TERMUX_PKG_API_LEVEL}"
# Bump the below version if a change is made in toolchain setup to ensure
# that everyone gets an updated toolchain:
TERMUX_STANDALONE_TOOLCHAIN+="-v4"
# shellcheck source=/dev/null
source "$TERMUX_PKG_BUILDER_SCRIPT"
if [ "$TERMUX_PKG_METAPACKAGE" = "true" ]; then
# Metapackage has no sources and therefore platform-independent.
TERMUX_PKG_SKIP_SRC_EXTRACT=true
TERMUX_PKG_PLATFORM_INDEPENDENT=true
fi
if [ -n "${TERMUX_PKG_BLACKLISTED_ARCHES:=""}" ] && [ "$TERMUX_PKG_BLACKLISTED_ARCHES" != "${TERMUX_PKG_BLACKLISTED_ARCHES/$TERMUX_ARCH/}" ]; then
echo "Skipping building $TERMUX_PKG_NAME for arch $TERMUX_ARCH"
exit 0
fi
TERMUX_PKG_FULLVERSION=$TERMUX_PKG_VERSION
if [ "$TERMUX_PKG_REVISION" != "0" ] || [ "$TERMUX_PKG_FULLVERSION" != "${TERMUX_PKG_FULLVERSION/-/}" ]; then
# "0" is the default revision, so only include it if the upstream versions contains "-" itself
TERMUX_PKG_FULLVERSION+="-$TERMUX_PKG_REVISION"
fi
if [ "$TERMUX_DEBUG_BUILD" = "true" ]; then
if [ "$TERMUX_PKG_HAS_DEBUG" = "true" ]; then
DEBUG="-dbg"
else
echo "Skipping building debug build for $TERMUX_PKG_NAME"
exit 0
fi
else
DEBUG=""
fi
if [ "$TERMUX_DEBUG_BUILD" = "false" ] && [ "$TERMUX_FORCE_BUILD" = "false" ]; then
if [ -e "$TERMUX_BUILT_PACKAGES_DIRECTORY/$TERMUX_PKG_NAME" ] &&
[ "$(cat "$TERMUX_BUILT_PACKAGES_DIRECTORY/$TERMUX_PKG_NAME")" = "$TERMUX_PKG_FULLVERSION" ]; then
echo "$TERMUX_PKG_NAME@$TERMUX_PKG_FULLVERSION built - skipping (rm $TERMUX_BUILT_PACKAGES_DIRECTORY/$TERMUX_PKG_NAME to force rebuild)"
exit 0
elif [ "$TERMUX_ON_DEVICE_BUILD" = "true" ] &&
[ "$(dpkg-query -W -f '${db:Status-Status} ${Version}\n' "$TERMUX_PKG_NAME" 2>/dev/null)" = "installed $TERMUX_PKG_FULLVERSION" ]; then
echo "$TERMUX_PKG_NAME@$TERMUX_PKG_FULLVERSION installed - skipping"
exit 0
fi
fi
if [ "$TERMUX_INSTALL_DEPS" == true ] && [ "$TERMUX_PKG_DEPENDS" != "${TERMUX_PKG_DEPENDS/libllvm/}" ]; then
LLVM_DEFAULT_TARGET_TRIPLE=$TERMUX_HOST_PLATFORM
if [ $TERMUX_ARCH = "arm" ]; then
LLVM_TARGET_ARCH=ARM
elif [ $TERMUX_ARCH = "aarch64" ]; then
LLVM_TARGET_ARCH=AArch64
elif [ $TERMUX_ARCH = "i686" ]; then
LLVM_TARGET_ARCH=X86
elif [ $TERMUX_ARCH = "x86_64" ]; then
LLVM_TARGET_ARCH=X86
fi
LIBLLVM_VERSION=$(. $TERMUX_SCRIPTDIR/packages/libllvm/build.sh; echo $TERMUX_PKG_VERSION)
sed $TERMUX_SCRIPTDIR/packages/libllvm/llvm-config.in \
-e "s|@TERMUX_PKG_VERSION@|$LIBLLVM_VERSION|g" \
-e "s|@TERMUX_PREFIX@|$TERMUX_PREFIX|g" \
-e "s|@TERMUX_PKG_SRCDIR@|$TERMUX_TOPDIR/libllvm/src|g" \
-e "s|@LLVM_TARGET_ARCH@|$LLVM_TARGET_ARCH|g" \
-e "s|@LLVM_DEFAULT_TARGET_TRIPLE@|$LLVM_DEFAULT_TARGET_TRIPLE|g" \
-e "s|@TERMUX_ARCH@|$TERMUX_ARCH|g" > $TERMUX_PREFIX/bin/llvm-config
chmod 755 $TERMUX_PREFIX/bin/llvm-config
fi
if [ "$TERMUX_PKG_QUICK_REBUILD" = "false" ]; then
# Following directories may contain files with read-only permissions which
# makes them undeletable. We need to fix that.
[ -d "$TERMUX_PKG_BUILDDIR" ] && chmod +w -R "$TERMUX_PKG_BUILDDIR"
[ -d "$TERMUX_PKG_SRCDIR" ] && chmod +w -R "$TERMUX_PKG_SRCDIR"
# Cleanup old build state:
rm -Rf "$TERMUX_PKG_BUILDDIR" \
"$TERMUX_PKG_SRCDIR"
else
TERMUX_PKG_SKIP_SRC_EXTRACT=true
fi
# Cleanup old packaging state:
rm -Rf "$TERMUX_PKG_PACKAGEDIR" \
"$TERMUX_PKG_TMPDIR" \
"$TERMUX_PKG_MASSAGEDIR"
# Ensure folders present (but not $TERMUX_PKG_SRCDIR, it will be created in build)
mkdir -p "$TERMUX_COMMON_CACHEDIR" \
"$TERMUX_DEBDIR" \
"$TERMUX_PKG_BUILDDIR" \
"$TERMUX_PKG_PACKAGEDIR" \
"$TERMUX_PKG_TMPDIR" \
"$TERMUX_PKG_CACHEDIR" \
"$TERMUX_PKG_MASSAGEDIR" \
$TERMUX_PREFIX/{bin,etc,lib,libexec,share,share/LICENSES,tmp,include}
# Make $TERMUX_PREFIX/bin/sh executable on the builder, so that build
# scripts can assume that it works on both builder and host later on:
[ "$TERMUX_ON_DEVICE_BUILD" = "false" ] && ln -sf /bin/sh "$TERMUX_PREFIX/bin/sh"
local TERMUX_ELF_CLEANER_SRC=$TERMUX_COMMON_CACHEDIR/termux-elf-cleaner.cpp
local TERMUX_ELF_CLEANER_VERSION
TERMUX_ELF_CLEANER_VERSION=$(bash -c ". $TERMUX_SCRIPTDIR/packages/termux-elf-cleaner/build.sh; echo \$TERMUX_PKG_VERSION")
termux_download \
"https://raw.githubusercontent.com/termux/termux-elf-cleaner/v$TERMUX_ELF_CLEANER_VERSION/termux-elf-cleaner.cpp" \
"$TERMUX_ELF_CLEANER_SRC" \
35a4a88542352879ca1919e2e0a62ef458c96f34ee7ce3f70a3c9f74b721d77a
if [ "$TERMUX_ELF_CLEANER_SRC" -nt "$TERMUX_ELF_CLEANER" ]; then
g++ -std=c++11 -Wall -Wextra -pedantic -Os -D__ANDROID_API__=$TERMUX_PKG_API_LEVEL \
"$TERMUX_ELF_CLEANER_SRC" -o "$TERMUX_ELF_CLEANER"
fi
if [ "$TERMUX_PKG_BUILD_IN_SRC" = "true" ]; then
echo "Building in src due to TERMUX_PKG_BUILD_IN_SRC being set to true" > "$TERMUX_PKG_BUILDDIR/BUILDING_IN_SRC.txt"
TERMUX_PKG_BUILDDIR=$TERMUX_PKG_SRCDIR
fi
echo "termux - building $TERMUX_PKG_NAME for arch $TERMUX_ARCH..."
test -t 1 && printf "\033]0;%s...\007" "$TERMUX_PKG_NAME"
# Avoid exporting PKG_CONFIG_LIBDIR until after termux_step_host_build.
export TERMUX_PKG_CONFIG_LIBDIR=$TERMUX_PREFIX/lib/pkgconfig
}
|
Template.Storages.events({
'click #addStorage': function (event) {
event.preventDefault();
Session.set('storageInScope', {});
$("#createStorage input").val('');
$('#storageCreateModal').modal('show');
},
'click tbody > tr': function (event) {
event.preventDefault();
var dataTable = $(event.target).closest('table').DataTable();
var rowData = dataTable.row(event.currentTarget).data();
if (!rowData) return; // Won't be data if a placeholder row is clicked
if(Roles.userIsInRole(Meteor.userId(),
['storages-manager','admin'], Roles.GLOBAL_GROUP)) {
Session.set('storageInScope', rowData);
$('#storageUpdateModal').modal('show');
}
}
});
Template.Storages.helpers ({
hasStoragesManager: function() {
return Roles.userIsInRole(Meteor.userId(),
['storages-manager','admin'], Roles.GLOBAL_GROUP);
}
});
|
import { HttpClient } from '@angular/common/http';
import { Injectable } from '@angular/core';
import { Observable } from 'rxjs';
import { LoginModel, newUser, passwordChange, Token } from '../Structures/structures';
@Injectable({
providedIn: 'root'
})
export class LoginService {
constructor(private http: HttpClient) { }
public login(login: LoginModel) : Observable<Token> {
return this.http.post<Token>("/api/Authenticate/login", login);
}
public createUser(user: newUser) : Observable<any> {
return this.http.post<any>('/api/Authenticate/createUser', user);
}
public checkUser() : Observable<any> {
return this.http.get<any>('/api/Authenticate/check');
}
public refreshToken(): Observable<Token> {
return this.http.get<Token>("/api/Authenticate/refresh");
}
public passwordChange(pwdChange: passwordChange): Observable<any> {
return this.http.put<any>("/api/Authenticate/passwordChange", pwdChange);
}
}
|
#!/bin/bash
# Start Consul in dev mode with UI.
# The `advertise` address will default to the IP address assigned to the `enp0s8` interface.
# DNS interface is available on port 53 (default DNS port) so we can reference it from `/etc/resolv.conf`
sudo consul agent \
-dev \
-ui \
--data-dir=/opt/consul \
--advertise=$(/sbin/ifconfig enp0s8 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}') \
--client=0.0.0.0 --dns-port=53
|
package impl
import (
"fmt"
"reflect"
"strings"
sqldb "github.com/domonda/go-sqldb"
)
// Insert a new row into table using the values.
func Insert(conn sqldb.Connection, table, argFmt string, values sqldb.Values) error {
if len(values) == 0 {
return fmt.Errorf("Insert into table %s: no values", table)
}
names, vals := values.Sorted()
b := strings.Builder{}
writeInsertQuery(&b, table, argFmt, names)
query := b.String()
err := conn.Exec(query, vals...)
return WrapNonNilErrorWithQuery(err, query, argFmt, vals)
}
// InsertUnique inserts a new row into table using the passed values
// or does nothing if the onConflict statement applies.
// Returns if a row was inserted.
func InsertUnique(conn sqldb.Connection, table, argFmt string, values sqldb.Values, onConflict string) (inserted bool, err error) {
if len(values) == 0 {
return false, fmt.Errorf("InsertUnique into table %s: no values", table)
}
if strings.HasPrefix(onConflict, "(") && strings.HasSuffix(onConflict, ")") {
onConflict = onConflict[1 : len(onConflict)-1]
}
names, vals := values.Sorted()
var query strings.Builder
writeInsertQuery(&query, table, argFmt, names)
fmt.Fprintf(&query, " ON CONFLICT (%s) DO NOTHING RETURNING TRUE", onConflict)
err = conn.QueryRow(query.String(), vals...).Scan(&inserted)
err = sqldb.ReplaceErrNoRows(err, nil)
err = WrapNonNilErrorWithQuery(err, query.String(), argFmt, vals)
return inserted, err
}
// InsertReturning inserts a new row into table using values
// and returns values from the inserted row listed in returning.
func InsertReturning(conn sqldb.Connection, table, argFmt string, values sqldb.Values, returning string) sqldb.RowScanner {
if len(values) == 0 {
return sqldb.RowScannerWithError(fmt.Errorf("InsertReturning into table %s: no values", table))
}
names, vals := values.Sorted()
var query strings.Builder
writeInsertQuery(&query, table, argFmt, names)
query.WriteString(" RETURNING ")
query.WriteString(returning)
return conn.QueryRow(query.String(), vals...)
}
func writeInsertQuery(w *strings.Builder, table, argFmt string, names []string) {
fmt.Fprintf(w, `INSERT INTO %s(`, table)
for i, name := range names {
if i > 0 {
w.WriteByte(',')
}
w.WriteByte('"')
w.WriteString(name)
w.WriteByte('"')
}
w.WriteString(`) VALUES(`)
for i := range names {
if i > 0 {
w.WriteByte(',')
}
fmt.Fprintf(w, argFmt, i+1)
}
w.WriteByte(')')
}
// InsertStruct inserts a new row into table using the exported fields
// of rowStruct which have a `db` tag that is not "-".
// Struct fields with a `db` tag matching any of the passed ignoreColumns will not be used.
// If restrictToColumns are provided, then only struct fields with a `db` tag
// matching any of the passed column names will be used.
func InsertStruct(conn sqldb.Connection, table string, rowStruct interface{}, namer sqldb.StructFieldNamer, argFmt string, ignoreColumns, restrictToColumns []string) error {
columns, vals, err := insertStructValues(table, rowStruct, namer, ignoreColumns, restrictToColumns)
if err != nil {
return err
}
var b strings.Builder
writeInsertQuery(&b, table, argFmt, columns)
query := b.String()
err = conn.Exec(query, vals...)
return WrapNonNilErrorWithQuery(err, query, argFmt, vals)
}
func InsertUniqueStruct(conn sqldb.Connection, table string, rowStruct interface{}, onConflict string, namer sqldb.StructFieldNamer, argFmt string, ignoreColumns, restrictToColumns []string) (inserted bool, err error) {
columns, vals, err := insertStructValues(table, rowStruct, namer, ignoreColumns, restrictToColumns)
if err != nil {
return false, err
}
if strings.HasPrefix(onConflict, "(") && strings.HasSuffix(onConflict, ")") {
onConflict = onConflict[1 : len(onConflict)-1]
}
var b strings.Builder
writeInsertQuery(&b, table, argFmt, columns)
fmt.Fprintf(&b, " ON CONFLICT (%s) DO NOTHING RETURNING TRUE", onConflict)
query := b.String()
err = conn.QueryRow(query, vals...).Scan(&inserted)
err = sqldb.ReplaceErrNoRows(err, nil)
return inserted, WrapNonNilErrorWithQuery(err, query, argFmt, vals)
}
func insertStructValues(table string, rowStruct interface{}, namer sqldb.StructFieldNamer, ignoreColumns, restrictToColumns []string) (columns []string, vals []interface{}, err error) {
v := reflect.ValueOf(rowStruct)
for v.Kind() == reflect.Ptr && !v.IsNil() {
v = v.Elem()
}
switch {
case v.Kind() == reflect.Ptr && v.IsNil():
return nil, nil, fmt.Errorf("InsertStruct into table %s: can't upsert nil", table)
case v.Kind() != reflect.Struct:
return nil, nil, fmt.Errorf("InsertStruct into table %s: expected struct but got %T", table, rowStruct)
}
columns, _, vals = structFieldValues(v, namer, ignoreColumns, restrictToColumns, false)
if len(columns) == 0 {
return nil, nil, fmt.Errorf("InsertStruct into table %s: %T has no exported struct fields with `db` tag", table, rowStruct)
}
return columns, vals, nil
}
|
//此模块:获取平台属性的数据模块
import request from '@/utils/request'
//获取一级分类数据 url:/admin/product/getCategory1 请求方式:get
export const reqCategory1List = () => request({ url: '/admin/product/getCategory1', method: 'get' })
//获取二级分类数据 url:/admin/product/getCategory2/{category1Id} 请求方式:get
export const reqCategory2List = (category1Id) => request({ url: `/admin/product/getCategory2/${category1Id}`, method: 'get' })
//获取三级分类数据 url:/admin/product/getCategory3/{category2Id} 请求方式:get
export const reqCategory3List = (category2Id) => request({ url: `/admin/product/getCategory3/${category2Id}`, method: 'get' })
//获取平台属性 url:/admin/product/attrInfoList/{category1Id}/{category2Id}/{category3Id} 请求方式:get
export const reqAttrList = (category1Id, category2Id, category3Id) => request({ url: `/admin/product/attrInfoList/${category1Id}/${category2Id}/${category3Id}`, method: 'get' })
//添加属性与属性值 url:/admin/product/saveAttrInfo 请求方式:post
export const reqAddOrUpdateAttr = (data) => request({ url: '/admin/product/saveAttrInfo', method: 'post', data })
|
#!/bin/bash
# TODO description.
# Author: Spencer M. Richards
# Autonomous Systems Lab (ASL), Stanford
# (GitHub: spenrich)
for seed in {0..9}
do
for M in 2 5 10 20 30 40 50
do
echo "seed = $seed, M = $M"
python test_all.py $seed $M
done
done
|
import numpy as np
def LinkProbability(A, D):
"""
Calculate the link probability based on the given inputs.
Args:
A: numpy array, matrix representing the links between points (expected to be in upper triangular form)
D: numpy array, matrix of distances between points
Returns:
p: numpy array, calculated link probability
"""
# Calculate the link probability based on the distances and matrix A
p = np.exp(-D) * A # Example calculation, actual calculation may vary based on specific requirements
return p
|
import java.util.*;
import java.io.*;
import java.lang.*;
import org.apache.commons.math3.ml.classification.SVM;
public class ClassificationExample {
static double[][] trainingData = {{1.2, 5.1}, {2.4, 6.7},
{0.8, 3.7}, {1.1, 4.5}};
static double[] labels = {0, 1, 0, 1};
public static void main(String args[])
{
SVM svm = new SVM(SVM.getDefaultParameter(), 1,
SVM.getGaussianKernel(2.0));
// train the SVM Classifier with training data
svm.train(trainingData, labels);
// predict the class of a single data point
double[] dataPoint = {1.3, 55.3};
double output = svm.predict(dataPoint);
System.out.println("The given data point is classified as: "+output);
}
}
|
Object-Oriented programming is a programming paradigm based around the concept of objects, which encapsulate data and functionality, and can be used to model real-world concepts. It involves understanding complex concepts such as classes, inheritance, overriding, polymorphism, and encapsulation.
|
import styles from './index.module.css';
import Layout from './layout';
const logo = require('./assets/variant.svg');
const varianthuset = require('./assets/varianthuset.png');
export default function Home() {
return (
<Layout>
<Wrapper mode="purple" className={styles.page1}>
<Content mode="right_bottom">
<h1>Hallaien!</h1>
<p>
Her har vi skrevet litt om våre tanker om å bygge Variant i Bergen. Nå
er vi spente på hva du tenker. Hva tror du er viktig for å lykkes?
Hvordan ønsker du å utvikle Bergen sammen med oss?
</p>
</Content>
</Wrapper>
<Wrapper className={styles.page2}>
<Content mode="left">
<Block>
<h2>Vil du utvikle Bergen?</h2>
<p>
Det er det vi ønsker du skal gjøre, – sammen med flere og sammen
med oss. Etablere neste generasjons selskapskultur hvor
medarbeideres potensial utnyttes til det fulle ved å gi tillit og
transparens. En kultur som består av gjensidig raushet, åpenhet og
læreglede. En kultur som har mot til å stå som et eksempel for
andre og ydmykhet til å lære av de samme.
</p>
</Block>
<Block>
<h2>Vil du utvikle Variant Bergen AS?</h2>
<img
src={varianthuset}
alt="Varianthuset på gågata i Trondheim"
className={styles.page2__varianthuset}
/>
<p>
Det er det vi ønsker du skal gjøre, – sammen med flere og sammen
med oss. Vi har tillit til dine tanker og forslag til hvordan
selskapet mellom de syv fjell skal bygges og drives. Vi gir dere frihet som om
dere skulle startet for dere selv. Så gir vi dere trygghet for at
det dere ønsker å gjøre lykkes. Frihet og trygghet – vi kommer
tilbake til det.
</p>
</Block>
<Block>
<h2>Vil du utvikle Variant?</h2>
<p>
Det er det vi ønsker du skal gjøre, – sammen med flere og sammen
med oss. Vi vil ikke at du skal bli som oss. Å være en Variant
handler om å lære av hverandre og ha egne tanker. Det er en
Variants plikt å lære bort like sterkt som å lære av. Vi har lovd
oss selv å alltid være ydmyk for andres erfaringer, kunnskap og
idéer. Vi har erfaring med å prøve ut ting. Om det ikke fungerer
så justerer vi. Dette gjelder stort og smått. Vi kommer med et
standpunkt og en filosofi slik håndboken vår beskriver. Utover det
ønsker vi at du og ditt team bidrar til å utvikle konsernet
Variant på lik linje med oss.
</p>
</Block>
</Content>
</Wrapper>
<Wrapper className={styles.page3}>
<Content mode="left">
<h2>2025</h2>
<div className={styles.strong}>
<p>
Vi har latt tankene spinne. Vi har latt fantasien løpe fritt.
Hvordan tror vi at Variant Bergen kan bli? Sånn litt inn i framtida?
Dette er tanker som du og ditt team må være med å forbedre og
foredle. Så med alle forbehold, her er slik vi ser for oss at 2025{' '}
<i>kan</i> se ut.
</p>
</div>
<div className={styles.em}>
<p>
I løpet året flytter Variant Bergen inn et eget lokale på Strandgaten med utsikt mot Bryggen.
Lokalene er store og attraktive, og er
optimalisert for samhandling. Store fellesarealer for å
gjennomføre Variantdager, kundeworkshops og lignende. Her er det
arbeidsplasser til en del, men en god andel jobber ute hos kunde.
Dette er lokaler med sjel og som man føler seg stolte av.
</p>
<p>
I løpet av høsten starter konsulent nr. 30. Det er en god balanse
mellom designere og utviklere. I tillegg er det en litt mindre
gruppe prosjektledere. I år omsetter selskapet for 45 millioner og
genererer et overskudd på 4,5 millioner. Omsetningen kommer fra en
lik fordeling mellom CV-baserte oppdrag, relasjonssalg og
prosjekter med ansvar og risiko.
</p>
<p>
Under både lokale og nasjonale variantdager, bidrar bergensvarianter
med læreglede, faglige bidrag og innsats for å utvikle selskap og
konsern. Variantdagen i november arrangeres bare for Bergen. Her
gjennomføres 3 ulike faglige sesjoner parallelt. Et av dem er variant:skudd som
bidrar til å gjøre konsulenter av de 6 nyutdannede variantene som
startet i august. Hver måned publiserer bergensvarianter faglige
bloggposer i tillegg publiserer de en månedlig video med utviklerinnhold.
</p>
</div>
</Content>
<Content mode="right" className={styles.page3__right}>
<div className={styles.em}>
<p>
Designergruppen har nettopp feiret årsdag for deres månedlige
designprogram i Variants YouTube-kanal. Selskapet hostet 4 ulike
meetups i år, og har hatt faglig bidrag dobbelt så mange.
</p>
<p>
I mai oppnådde Variant som helhet en 3.-plass i Universums kåring
over Norges mest etter-traktede arbeidsgivere. Samme måned mottok
vi 15 relevante søknader fra erfarne designere og utviklere som
ønsker å jobbe i Bergen.
</p>
</div>
</Content>
</Wrapper>
<Wrapper className={styles.page4}>
<Content mode="left">
<h2>Det første året</h2>
<div className={styles.page4__padded}>
<img src="/bg-page4-1.png" alt="" className={styles.page4__img} />
<div className={styles.strong}>
<p>
Også det første året vil i stor grad påvirkes av hva du og ditt
team ønsker å gjøre og ønsker å prøve. Vi har også her noen
tanker over hva vi tror kan være en fin tilnærming de første 12
måneder. Fortsatt må dette ses på som vårt forslag, vi er
superspent på hva du tenker på dette.
</p>
</div>
<div className={styles.em}>
<p>
Ved oppstart 1. september 2022 har vi 6 ansatte. Dette er da
CEO, CTO og CDO sammen med tre andre ganske erfarne konsulenter.
Alle er lett å få ut i oppdrag.
</p>
<p>
Fokus resten av 2022 er todelt, rekruttere flere og få folk ut i
oppdrag. I løpet av høsten har vi signert 6 nye, og de første
begynner i januar. Idet selskapet er ett år jobber det 15
mennesker der, 2 av dem er nyutdannede som startet i august.
Bergen støtter seg kraftig på konsernets rekrutteringskapasitet.
</p>
<p>
Allerede i september inviteres dere med på Varianttur med fadderen
Variant Trondheim. Da blir dere sammen med følge, samboer eller kone
introdusert for oss, og vi og dere får sjansen til å bli godt kjent.
</p>
<p>
Det første halvåret selger vi oppdrag basert på CV-salg til
eksisterende Variantkunder og gjennom megleravtaler og
rammeavtaler som Variant har tilgang til. På våren har Variant
Bergen også begynt å vinne egne kunder. Men gjennom hele det
første året handler det om CV-salg, og det må rekrutteringen ta
høyde for. Konsernet bidrar med salgskompetanse og -kapasitet.
</p>
</div>
</div>
</Content>
<Content mode="right">
<div className={styles.em}>
<p>
Det første året gjennomføres Variantdager dels sammen med
Oslo eller Trondheim og dels på egenhånd. Noen ganger reiser varianter mellom
byene, andre ganger benyttes video. 1. september 2023
feirer Bergen ettårsdagen med en Varianttur på egenhånd sammen med
følge til Reykjavik.
</p>
<p>
Den første tiden leier Variant et kontor i Spaces Vaskerelven.
Antallet rom og arbeidsplasser utvides etter hvert som vi
vokser. Konsernet stiller med struktur- og systemkapital.
Konsernets ledelse jobber tett på en coachende måte med Bergens
ledergruppe.
</p>
</div>
</Content>
</Wrapper>
<Wrapper className={styles.page5}>
<Content mode="left">
<Block>
<h2>Frihet</h2>
<p>
Vi husker det så godt. Hvor givende det var å kunne forme
selskapet slik vi mente det helst skulle gjøres. Noen ganger
trådte vi feil og måtte justere kurs, men det var like fullt så
morsomt. Denne opplevelsen ønsker vi å gi deg og ditt team.
Og det er ikke først og fremst for at vi er så snille eller så
ivrig på å gi folk gode opplevelser, selv om vi er det også. Av
samme grunn som vi har tillit til at summen av enkeltpersoners
vurderinger overgår ledelsens så tror vi også at ved å gi
autonomi, frihet og tillit til de ulike selskapene i konsernet vil
selskapene som sum bli bedre enn hvis styrt fra en konsernledelse.
Vi er derfor ute etter ditt entreprenørskap og din gründerånd.
Dette skal være som å starte for deg selv, bare med trygghet.
</p>
</Block>
</Content>
<Content mode="right">
<Block>
<h2>Trygghet</h2>
<p>
Det er mange ting som må funke for at en konsulentstartup skal
fly. Dette strekker seg fra finansiering, salg, rekruttering,
bemanning, kulturbygging, kompetansebygging, etablering av
strukturkapital og systemkapital. Så skal vi være litt ærlige. Alt
dette er ikke like morsomt. Alt går ikke like fort. På den andre
siden er mye av dette utrolig givende, vi kan hjelpe dere der du
og ditt team ikke har deres styrker, slik at dere kan jobbe med
det dere er gode på.
</p>
<p>
For det er ikke slik at vi tenker at dere skal greie dere selv opp
i autonomi og tillit. Vi har mye erfaring som vi gjerne deler med
dere, men da først og fremst som coacher og ikke veiledere.
</p>
<p>
Mye strukturkapital og systemkapital har vi på plass. Lite av
dette er obligatorisk, men vi antar nesten at du ikke brenner for
å føre regnskapet selv. Eller bygge ditt eget bemanningssystem. Vi
har også en håndbok som vi er ikke så lite stolte av. Den tipper
vi du gjerne vil utnytte.
</p>
</Block>
</Content>
</Wrapper>
<Wrapper mode="purple" className={styles.backPage}>
<Content mode="center_bottom">
<img src={logo} alt="Variant" />
</Content>
</Wrapper>
</Layout>
);
}
type WrapperProps = React.PropsWithChildren<{
mode?: 'purple' | 'default';
className?: string;
}>;
function Wrapper({ children, mode = 'default', className }: WrapperProps) {
const style = styles[`wrapper__${mode}`];
return (
<section className={and(styles.wrapper, style, className)}>
{children}
</section>
);
}
type ContentProps = React.PropsWithChildren<{
mode?: 'right' | 'left' | 'right_bottom' | 'left_bottom' | 'center_bottom';
className?: string;
}>;
function Content({ children, mode = 'right_bottom', className }: ContentProps) {
const style = styles[`content__${mode}`];
return (
<div className={and(styles.content, style, className)}>{children}</div>
);
}
type BlockProps = React.PropsWithChildren<{
className?: string;
}>;
function Block({ children, className }: BlockProps) {
return <div className={and(styles.block, className)}>{children}</div>;
}
function and(...classes: (string | undefined)[]) {
return classes.filter(Boolean).join(' ');
}
|
<gh_stars>0
import config from 'config';
import {authHeader} from '../_helpers';
import axios from 'axios';
import api from './api';
export const toolService = {
getAll
}
function getAll() {
var auth = JSON.parse(localStorage.getItem('user'));
var conf = { headers: {
'Content-Type': 'application/json',
'Authorization': `${auth}`
}}
response = api.get('/get-tools',conf)
return response
}
function handleResponse(response) {
return response.text().then(text => {
const data = text && JSON.parse(text);
if (!response.ok) {https://123moviesca.com/godzilla-king-of-the-monsters-2019/
if (response.status === 401) {
// auto logout if 401 response returned from api
logout();
location.reload(true);
}
const error = (data && data.message) || response.statusText;
return Promise.reject(error);
}
return data;
});
}
|
package csbufio
import (
"bufio"
"io"
"os"
)
type (
bufWriteCloser struct {
*bufio.Writer
c io.Closer
}
)
func OpenWriter(name string) (io.WriteCloser, error) {
f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE, 0665)
if err != nil {
return nil, err
}
return NewWriter(f), nil
}
// NewWriter is a io.WriteCloser.
func NewWriter(rc io.WriteCloser) io.WriteCloser {
return bufWriteCloser{bufio.NewWriter(rc), rc}
}
func (bc bufWriteCloser) Close() error {
if err := bc.Flush(); err != nil {
return err
}
return bc.c.Close()
}
|
#!/bin/bash
gcloud compute instances stop \
doughnut-app-instance doughnut-db-instance \
--zone us-east1-b
|
export const environment = {
production: true,
appVersion: require('../../package.json').version,
gaMeasurementId: 'G-B3JBT7SCC8'
};
|
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/libPhoneNumber-iOS/libPhoneNumber_iOS.framework"
install_framework "$BUILT_PRODUCTS_DIR/phoneid_iOS/phoneid_iOS.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/libPhoneNumber-iOS/libPhoneNumber_iOS.framework"
install_framework "$BUILT_PRODUCTS_DIR/phoneid_iOS/phoneid_iOS.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
<reponame>d2s/visual-vocabulary
function makeChart(data, stylename, media, plotpadding, frames, legAlign, yAlign, numberOfColumns, numberOfRows, xMin, xMax, yAxisHighlight, numTicksy, coloursOverride){
var titleYoffset = d3.select("#"+media+"Title").node().getBBox().height
var subtitleYoffset=d3.select("#"+media+"Subtitle").node().getBBox().height;
// return the series names from the first row of the spreadsheet
var seriesNames = Object.keys(data[0]).filter(function(d){ return d != 'cat'; });
//Select the plot space in the frame from which to take measurements
var frame=d3.select("#"+media+"chart")
var plot=d3.select("#"+media+"plot")
var headerTitle = d3.select("#"+media+"Title");
var headerSubtitle = d3.select("#"+media+"Subtitle");
var yOffset=d3.select("#"+media+"Subtitle").style("font-size");
yOffset=Number(yOffset.replace(/[^\d.-]/g, ''));
//Get the width,height and the marginins unique to this chart
var w=plot.node().getBBox().width;
var h=plot.node().getBBox().height;
var margin=plotpadding.filter(function(d){
return (d.name === media);
});
margin=margin[0].margin[0]
var labelPadding = frames.filter(function(d){
return (d.name === media);
});
labelPadding=labelPadding[0].margins[0]
console.log(labelPadding)
// reposition header
headerTitle.attr('transform', 'translate(' + (-labelPadding.left + margin.left) +',' + 0 + ')')
headerSubtitle.attr('transform', 'translate(' + (-labelPadding.left + margin.left) +',' + 0 + ')')
var colours= d3.scale.ordinal()
.domain([0,0])
if (coloursOverride.length === 0) {
colours.range(stylename.fillcolours);
} else {
colours.range(coloursOverride);
}
//CREATE THE PLOT WIDTHS, BUT FOR EACH INDIVIDUAL GRAPH
var plotWidth = ((w - labelPadding.left - labelPadding.right)/numberOfColumns);
var plotHeight = (h/numberOfRows)-(margin.top + margin.bottom);
// console.log(plotWidth,colours,plotHeight,data)
// console.log(margin)
//you now have a chart area, inner margin data and colour palette - with titles pre-rendered
// calculate maximum and minimum for the y-axis
console.log(xMin,xMax)
if(xMin === null || xMax === null){
data.forEach(function(d,i){
seriesNames.forEach(function(e){
if(i==0) xMin = xMax = Number(d[e]);
xMin = Math.min(xMin, Number(d[e]));
xMax = Math.max(xMax, Number(d[e]));
});
});
}
// override min value if > 0
if (xMin > 0) xMin = 0;
// var yScale = d3.scale.linear()
// .range([plotHeight, 0])
// .domain([yMin,yMax])
var yScale = d3.scale.ordinal()
.rangeRoundBands([0, plotHeight], .1);
var yDomain = data.map(function(d) { return d.cat; });
yScale.domain(yDomain);
console.log(yScale.domain(yDomain));
var yAxis = d3.svg.axis()
.scale(yScale)
.orient("left")
.tickSize(0)
var smallMultiple = plot.selectAll('g')
.data(seriesNames)
.enter()
.append('g')
.attr({
'transform': function(d, i) {
var yPos = yOffset + Number((Math.floor( i / numberOfColumns) * (plotHeight + margin.top + margin.bottom + 4) + margin.top));
var xPos = i % numberOfColumns;
return 'translate(' + ((plotWidth + margin.right + yOffset *1.5) * xPos) + ',' + yPos + ')';
},
'id':function(d){ return d; },
'xPosition': function (d,i) {
xPos = i%numberOfColumns;
return xPos;
}
});
smallMultiple.append('g')
.each(function(d,i){
if ( d3.select(this.parentNode).attr('xPosition') === '0' && xMin < 0) {
var tints = d3.select(this).selectAll('rect');
tints.data(data)
.enter()
.append('rect')
.attr({
'class' : media + 'tint',
'y': function(d) {
return yScale(d.cat);
},
'x' : -labelPadding.left,
'width' : frame.node().getBBox().width,
'height': yScale.rangeBand()
});
}
});
smallMultiple.append('text')
.attr({
'class':media + 'item-title',
'dx': function() {return (plotWidth)/2 ;},
'dy': function() {return -(yOffset * 1.4);},
})
.text(function(d) {return d.toUpperCase(); });
smallMultiple.each(function (d, i) {
if ( d3.select(this).attr('xPosition') === '0') {
d3.select(this).append('g')
.attr("class", media+"yAxis")
.attr('transform', 'translate(-5,0)')
.call(yAxis);
}
})
var yAxisOffset=d3.select("."+media+"yAxis").node().getBBox().width;
d3.select("#"+media+"plot").attr('transform', 'translate(' + (yAxisOffset + 8) + ',' + (yOffset * 2) + ')')
var xScale = d3.scale.linear()
.range([0, plotWidth])
.domain([xMin,xMax]);
var xAxis = d3.svg.axis()
.scale(xScale)
.tickSize(-plotHeight)
.orient('top')
.ticks(numTicksy)
.tickFormat(function(d){
return d/divisor;
});
smallMultiple.append('g')
.attr({
'class': function() { return xMin < 0 ? media+"xAxisTint" : media+"xAxis";},
'transform': 'translate(0,0)'
})
.call(xAxis);
//identify 0 line if there is one
var originValue = 0;
var origin = plot.selectAll(".tick").filter(function(d, i) {
return d==originValue || d==yAxisHighlight;
}).classed(media+"origin",true);
smallMultiple.append('g')
.each(function(seriesNames){
var bars = d3.select(this).selectAll('rect');
bars.data(data)
.enter()
.append('rect')
.style("fill", function (d, i) {
return colours(i)
})
.attr({
'class': function(d){
return d[seriesNames] < 0 ? 'negative' : 'positive';
},
'y': function(d) {
return yScale(d.cat);
},
'x': function(d) {
return xScale(Math.min(0, d[seriesNames]) );
},
'width': function(d) {
return Math.abs(xScale( d[seriesNames] ) - xScale(0) );
},
'id':function(d){
return seriesNames + ' ' + d.cat + ' value: ' + d[seriesNames];
},
'height': yScale.rangeBand()
});
});
}
|
/**
* Copyright (c) <NAME>
* Copyright (c) 2015, PHOENIX
*
*
*
*/
var exec = require('cordova/exec'),
cordova = require('cordova');
var POWERPlugin = function () {
};
POWERPlugin.prototype.goBKModlule = function (successCallback, errorCallback,options) {
if (errorCallback == null) {
errorCallback = function () {
}
}
if (typeof errorCallback != "function") {
console.log("POWERPlugin.goBKModlule: failure: failure parameter not a function");
return
}
if (typeof successCallback != "function") {
console.log("POWERPlugin.goBKModlule: success callback parameter must be a function");
return
}
cordova.exec(successCallback, errorCallback, 'BKPlugin', 'goBKModlule', [options]);
};
var powerplugin = new POWERPlugin();
module.exports = powerplugin;
|
import React, { Fragment } from 'react';
import { NavBar, Footer } from 'Component';
import { compose } from 'redux';
import { withStyles } from 'material-ui/styles';
import Markdown from 'react-markdown';
import styles from './styles';
import scoreSource from './score';
const defaultCover =
'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQYUZbD4KRNnATuFo0ZqOOmAU6ecExSjSLxxzDlnTrHgNQs6bvl';
const Help = ({ classes }) => (
<Fragment>
<NavBar key={1} color="primary" />
<div key={2} className={classes.container}>
<div className={classes.coverContainer}>
<div className={classes.coverWrapper}>
<img src={defaultCover} className={classes.backgroundImg} />
</div>
</div>
<div className={classes.wrapper}>
<Markdown source={scoreSource.content} />
</div>
</div>
<Footer key={3} />
</Fragment>
);
export default compose(withStyles(styles))(Help);
|
print(b"".replace(b"a", b"b"))
print(b"aaa".replace(b"a", b"b", 0))
print(b"aaa".replace(b"a", b"b", -5))
print(b"asdfasdf".replace(b"a", b"b"))
print(b"aabbaabbaabbaa".replace(b"aa", b"cc", 3))
print(b"a".replace(b"aa", b"bb"))
print(b"testingtesting".replace(b"ing", b""))
print(b"testINGtesting".replace(b"ing", b"ING!"))
print(b"".replace(b"", b"1"))
print(b"A".replace(b"", b"1"))
print(b"AB".replace(b"", b"1"))
print(b"AB".replace(b"", b"12"))
|
#! /bin/bash
# Copyright 2019 Nokia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo "active-cold-standby started"
SERVICESDIR=/etc/monitoring/active-standby-services/
VIP=$1
while [ 1 ]; do
allocated=$(/usr/sbin/ip -4 a | grep $VIP | wc -l)
if [ $allocated -gt 0 ]; then
for service in $(ls $SERVICESDIR); do
/bin/systemctl is-active --quiet $service
if [ $? -ne 0 ]; then
echo "monitoring starting $service"
systemctl start --no-block $service
fi
done
else
for service in $(ls $SERVICESDIR); do
/bin/systemctl is-active --quiet $service
if [ $? -eq 0 ]; then
echo "monitoring stopping $service"
systemctl stop --no-block $service
fi
done
fi
sleep 10
done
|
#!/bin/sh
set -exu # Strict shell (w/o -o pipefail)
if [ "${TRAVIS_JDK_VERSION}" = "openjdk11" ] && [ "${TNT_VERSION}" = "2.2" ]; then
mvn coveralls:report -DrepoToken=${COVERALLS_TOKEN}
fi
|
<filename>cmd/core/runner/concurrency.go<gh_stars>0
/*
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
*/
package runner
import (
"context"
"time"
"github.com/pinterest/bender"
"github.com/facebookincubator/fbender/cmd/core/options"
"github.com/facebookincubator/fbender/recorders"
"github.com/facebookincubator/fbender/tester"
"github.com/facebookincubator/fbender/utils"
)
// ConcurrencyRunner is a test runner for load test concurrency commands
type ConcurrencyRunner struct {
runner
workerSem *bender.WorkerSemaphore
spinnerCancel context.CancelFunc
}
// NewConcurrencyRunner returns new ConcurrencyRunner
func NewConcurrencyRunner(params *Params) *ConcurrencyRunner {
return &ConcurrencyRunner{
runner: runner{
Params: params,
},
}
}
// Before prepares requests, recorders and interval generator
func (r *ConcurrencyRunner) Before(workers tester.Workers, opts interface{}) error {
if err := r.runner.Before(workers, opts); err != nil {
return err
}
o, ok := opts.(*options.Options)
if !ok {
return tester.ErrInvalidOptions
}
r.workerSem = bender.NewWorkerSemaphore()
go func() { r.workerSem.Signal(workers) }()
r.requests = make(chan interface{})
ctx, cancel := context.WithCancel(context.Background())
go func() {
for i := 0; ; i++ {
select {
case <-ctx.Done():
close(r.requests)
return
default:
r.requests <- r.Params.RequestGenerator(i)
}
}
}()
// We want bar to measure time
count := int(o.Duration/time.Second) * 10
r.progress, r.bar = recorders.NewLoadTestProgress(count)
r.progress.Start()
go func() {
for i := 0; i < count; i++ {
time.Sleep(time.Second / 10)
r.bar.Incr()
}
cancel()
r.progress.Stop()
r.spinnerCancel = utils.NewBackgroundSpinner("Waiting for tests to finish", 0)
}()
return nil
}
// After cleans up after the test
func (r *ConcurrencyRunner) After(test int, options interface{}) {
r.spinnerCancel()
r.runner.After(test, options)
}
// WorkerSemaphore returns a worker semaphore for concurrency test
func (r *ConcurrencyRunner) WorkerSemaphore() *bender.WorkerSemaphore {
return r.workerSem
}
|
<reponame>Codernauti/Sweetie<filename>app/src/main/java/com/codernauti/sweetie/actions/ActionVM.java
package com.codernauti.sweetie.actions;
import android.content.Context;
import android.support.v4.content.ContextCompat;
import com.codernauti.sweetie.model.ActionFB;
import com.codernauti.sweetie.utils.DataMaker;
import java.text.ParseException;
// TODO abstract class
abstract class ActionVM {
static final int CHAT = ActionFB.CHAT;
static final int GALLERY = ActionFB.GALLERY;
static final int GEOGIFT = ActionFB.GEOGIFT;
static final int TODOLIST= ActionFB.TODOLIST;
protected Context mContext;
private String mKeyFB; //FB actions references
private String mTitle;
private String mLastUser;
private String mDescription;
private String mLastUpdateDate;
private int mType;
private String mChildUid;
private String mImageUrl;
private int mNotificCounter;
ActionVM() {
}
ActionVM(String key, String title, String lastUser, String description, String lastUpdateDate, int type,
String childKey, String imageUrl, int notificCounter) {
mKeyFB = key;
mTitle = title;
mLastUser = lastUser;
mDescription = description;
mLastUpdateDate = lastUpdateDate;
mType = type;
mChildUid = childKey;
mImageUrl = imageUrl;
mNotificCounter = notificCounter;
}
void setContext(Context context){
mContext = context;
}
String getKey(){
return mKeyFB;
}
void setKey(String key){
this.mKeyFB = key;
}
String getTitle(){
return mTitle;
}
void setTitle(String title){
this.mTitle = title;
}
String getDescription(){
return mDescription;
}
void setDescription(String description){
this.mDescription = description;
}
String getLastUpdateDate(){
return mLastUpdateDate;
}
void setLastUpdateDate(String data){
this.mLastUpdateDate = data;
}
int getType() { return mType; }
void setType(int type){
this.mType = type;
}
String getChildUid() { return mChildUid; }
void setChildUid(String childUid) {
this.mChildUid = childUid;
}
String getImageUrl() {
return mImageUrl;
}
void setImageUrl(String imageUrl) {
this.mImageUrl = imageUrl;
}
int getNotificationCounter() { return mNotificCounter; }
void setNotificationCounter(int notificationCounter) { mNotificCounter = notificationCounter; }
/*** abstract methods ***/
public abstract int getChildType();
public abstract void showAction();
public abstract int getIconId();
public abstract int getAvatarTextIdColor();
// TODO: think about make abstract this method
public void configViewHolder(ActionsAdapter.ActionViewHolder viewHolder) {
viewHolder.setTitle(mTitle);
viewHolder.setDescription(mDescription);
viewHolder.setAvatar(mImageUrl, getAvatarTextIdColor());
viewHolder.setTypeIcon(getIconId());
viewHolder.setNotificationCount(mNotificCounter, ContextCompat.getColor(mContext, getAvatarTextIdColor()));
try {
viewHolder.setDateTime(DataMaker.get_Date_4_Action(mLastUpdateDate));
} catch (ParseException e) {
viewHolder.setDateTime("error");
e.printStackTrace();
}
}
}
|
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# This script provides a basic example of how to use composite operations in the Ruby SDK. Composite operations provide
# convenience methods for operations which would otherwise need to be chained together. For example, instead of performing an action
# on a resource and then using a waiter to wait for the resource to enter a given state, you can call a single method in
# a composite operation class to accomplish the same functionality.
#
# This example will use VCNs, subnets and load balancers to demonstrate composite operation functionality.
#
# This will use credentials and settings from the DEFAULT profile at ~/.oci/config (on windows
# "C:\Users\{user}\.oci\config"), however if that file does not exist we will automatically fall back to using
# ~/.oraclebmc/config (on windows "C:\Users\{user}\.oraclebmc\config")
#
# This script accepts three arguments:
# - The first argument is the compartment where we'll create the load balancer and related resources
# - The second argument is the first availability domain where we'll create a subnet
# - The third argument is a second (different) availability domain where we'll create a subnet
#
# Format:
# composite_operations_example.rb <compartmend ID> <first AD> <second AD>
#
# Example run:
# ruby examples/composite_operations_example.rb 'ocid1.compartment.oc1..aaaaaaaac4xqx43texeuonfionxsx4okzfsya5evr2goe2t7v5wntztaymab' 'xxx:PHX-AD-1' 'xxx:PHX-AD-2'
require 'oci'
require 'pp'
def create_vcn_and_subnets(virtual_network_client_composite_operations, compartment_id, first_ad, second_ad)
# Here we use a composite operation to create a VCN and wait for it to enter the given state. Note that the
# states are passed as an array so it is possible to wait on multiple states. The waiter will complete
# (and the method will return) once the resource enters ANY of the provided states.
get_vcn_response = virtual_network_client_composite_operations.create_vcn_and_wait_for_state(
OCI::Core::Models::CreateVcnDetails.new(
cidr_block: '10.0.0.0/16',
display_name: 'ruby_sdk_test_lb_vcn',
compartment_id: compartment_id
),
[OCI::Core::Models::Vcn::LIFECYCLE_STATE_AVAILABLE]
)
vcn = get_vcn_response.data
puts 'Created VCN'
get_subnet_response = virtual_network_client_composite_operations.create_subnet_and_wait_for_state(
OCI::Core::Models::CreateSubnetDetails.new(
compartment_id: compartment_id,
availability_domain: first_ad,
display_name: 'ruby_sdk_test_lb_subnet1',
vcn_id: vcn.id,
cidr_block: '10.0.0.0/24'
),
[OCI::Core::Models::Subnet::LIFECYCLE_STATE_AVAILABLE]
)
subnet_one = get_subnet_response.data
puts 'Created Subnet 1'
get_subnet_response = virtual_network_client_composite_operations.create_subnet_and_wait_for_state(
OCI::Core::Models::CreateSubnetDetails.new(
compartment_id: compartment_id,
availability_domain: second_ad,
display_name: 'ruby_sdk_test_lb_subnet2',
vcn_id: vcn.id,
cidr_block: '10.0.1.0/24'
),
[OCI::Core::Models::Subnet::LIFECYCLE_STATE_AVAILABLE]
)
subnet_two = get_subnet_response.data
puts 'Created Subnet 2'
{ vcn: vcn, subnets: [subnet_one, subnet_two] }
end
def delete_vcn_and_subnets(virtual_network_client_composite_operations, vcn_and_subnets)
vcn = vcn_and_subnets[:vcn]
subnet_one = vcn_and_subnets[:subnets][0]
subnet_two = vcn_and_subnets[:subnets][1]
virtual_network_client_composite_operations.delete_subnet_and_wait_for_state(
subnet_one.id,
[OCI::Core::Models::Subnet::LIFECYCLE_STATE_TERMINATED]
)
puts 'Deleted Subnet 1'
virtual_network_client_composite_operations.delete_subnet_and_wait_for_state(
subnet_two.id,
[OCI::Core::Models::Subnet::LIFECYCLE_STATE_TERMINATED]
)
puts 'Deleted Subnet 2'
virtual_network_client_composite_operations.delete_vcn_and_wait_for_state(
vcn.id,
[OCI::Core::Models::Vcn::LIFECYCLE_STATE_TERMINATED]
)
puts 'Deleted VCN'
end
# Create a client using the default configuration and profile
virtual_network_client = OCI::Core::VirtualNetworkClient.new
# A composite operation class can take an explicit client
virtual_network_client_composite_operations = OCI::Core::VirtualNetworkClientCompositeOperations.new(virtual_network_client)
# If a composite operation class is not passed a client, it will create one. The client created will use
# the default configuration and profile
load_balancer_client_composite_operations = OCI::LoadBalancer::LoadBalancerClientCompositeOperations.new
compartment_id = ARGV[0]
first_ad = ARGV[1]
second_ad = ARGV[2]
vcn_and_subnets = create_vcn_and_subnets(virtual_network_client_composite_operations, compartment_id, first_ad, second_ad)
subnets = vcn_and_subnets[:subnets]
# Load Balancer operations return work requests so when using composite operations we have to wait for the state of the
# work request (e.g. for it to succeed) rather than the state of the load balancer. However, as a convenience, when the
# composite operation completes we'll return information on the load balancer (if possible) rather than the work
# request
get_load_balancer_response = load_balancer_client_composite_operations.create_load_balancer_and_wait_for_state(
OCI::LoadBalancer::Models::CreateLoadBalancerDetails.new(
compartment_id: compartment_id,
display_name: 'RubySdkCompositeOpsExample',
shape_name: '100Mbps',
subnet_ids: subnets.map(&:id)
),
[OCI::LoadBalancer::Models::WorkRequest::LIFECYCLE_STATE_SUCCEEDED]
)
puts 'Created Load Balancer'
# Deleting a load balancer also returns a work request, so in this composite oepration we have to wait on the
# state of the work request rather than the state of the load balancer
load_balancer_client_composite_operations.delete_load_balancer_and_wait_for_state(
get_load_balancer_response.data.id,
[OCI::LoadBalancer::Models::WorkRequest::LIFECYCLE_STATE_SUCCEEDED]
)
delete_vcn_and_subnets(virtual_network_client_composite_operations, vcn_and_subnets)
puts 'Script completed'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.