text stringlengths 1 1.05M |
|---|
"""
App to get the bets
"""
import json
from flask import Flask, jsonify, request
from rq import Queue
from rq.job import Job
from worker import conn
from tasks import get_bets
q = Queue(connection=conn)
app = Flask(__name__)
@app.route('/')
def index():
with open(r'bets.json', 'r') as f:
bets = json.load(f)
return jsonify(bets)
@app.route('/bets', methods=['POST'])
def bets():
data = request.json
period = data.get('period', None)
job = q.enqueue(
get_bets,
data['username'],
data['pin'],
period
)
return job.id
@app.route('/results/<job_key>', methods=['GET'])
def get_results(job_key):
job = Job.fetch(job_key, connection=conn)
if job.is_finished:
return jsonify(job.result), 200
else:
return "Nay!", 202
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
|
#!/usr/bin/env bash
#
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email opensource@seagate.com or cortx-questions@seagate.com.
#
set -e
# Choose one of the patterns
PATTERN="%WRITEV%" # IO write
#PATTERN="%READV%" # IO read
#PATTERN="%CAS_PUT%" # CAS put
#PATTERN="%CAS_GET%" # CAS get
# SQL limit settings
LIMIT_OFFSET=1000
LIMIT=10
echo "============================================================="
echo "Pattern: $PATTERN, limit offset: $LIMIT_OFFSET, limit: $LIMIT"
echo "============================================================="
for x in $(echo "select fom_sm_id, pid from fom_desc where req_opcode like '$PATTERN' limit $LIMIT_OFFSET,$LIMIT;" | sqlite3 m0play.db); do
IFS='|' read -r -a args <<< "$x"
echo "FOM id: ${args[0]}, pid: ${args[1]}"
python3 fom_req.py -f ${args[0]} -p ${args[1]}
echo "-------------------------------------------------------------"
done
|
#!/usr/bin/env bash
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
set -eu
source codebuild/bin/utils.sh
AWS_S3_URL="s3://s2n-tls-logs/release/"
install_deps(){
make install
source "$HOME"/.cargo/env
make -C bindings/rust
}
# There can be only one artifact config per batch job,
# so we're scipting the baseline upload steps here.
upload_artifacts(){
cd tests/integrationv2/target/criterion
echo "Creating zip ${AWS_S3_PATH}"
zip -r "${AWS_S3_PATH}" ./*
aws s3 cp "${AWS_S3_PATH}" "${AWS_S3_URL}"
echo "S3 upload complete"
}
if [ -d "third-party-src" ]; then
# Don't run against c.a.c.
return 0
fi
# Fetch creds and the latest release number.
gh_login s2n_codebuild_PRs
get_latest_release
AWS_S3_PATH="integv2criterion_${INTEGV2_TEST}_${LATEST_RELEASE_VER}.zip"
zip_count=$(aws s3 ls "${AWS_S3_URL}${AWS_S3_PATH}"|wc -l||true)
if [ "$zip_count" -eq 0 ]; then
echo "File ${AWS_S3_URL}${AWS_S3_PATH} not found"
install_deps
TOX_TEST_NAME=${INTEGV2_TEST}.py make integrationv2
upload_artifacts
else
echo "Found existing artifact for ${LATEST_RELEASE_VER}, not rebuilding."
fi
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.admin
import java.util.Properties
import kafka.admin.ReassignPartitionsCommand.Throttle
import kafka.log.LogConfig
import kafka.log.LogConfig._
import kafka.server.{ConfigType, DynamicConfig}
import kafka.utils.CoreUtils._
import kafka.utils.TestUtils._
import kafka.utils.{CoreUtils, Logging, TestUtils}
import kafka.zk.{AdminZkClient, KafkaZkClient, ZooKeeperTestHarness}
import org.easymock.EasyMock._
import org.easymock.{Capture, CaptureType, EasyMock}
import org.junit.{Before, Test}
import org.junit.Assert.{assertEquals, assertNull}
import scala.collection.{Seq, mutable}
import scala.collection.JavaConverters._
import org.apache.kafka.common.TopicPartition
class ReassignPartitionsCommandTest extends ZooKeeperTestHarness with Logging {
var calls = 0
@Test
def shouldFindMovingReplicas() {
val control = new TopicPartition("topic1", 1) -> Seq(100, 102)
val assigner = new ReassignPartitionsCommand(null, null, null, null, null)
//Given partition 0 moves from broker 100 -> 102. Partition 1 does not move.
val existing = Map(new TopicPartition("topic1", 0) -> Seq(100, 101), control)
val proposed = Map(new TopicPartition("topic1", 0) -> Seq(101, 102), control)
class TestAdminZkClient(val zkClient: KafkaZkClient) extends AdminZkClient(zkClient) {
override def changeTopicConfig(topic: String, configChange: Properties): Unit = {
assertEquals(Set("0:102"), toReplicaSet(configChange.get(FollowerReplicationThrottledReplicasProp))) //Should only be follower-throttle the moving replica
assertEquals(Set("0:100","0:101"), toReplicaSet(configChange.get(LeaderReplicationThrottledReplicasProp))) //Should leader-throttle all existing (pre move) replicas
calls += 1
}
override def fetchEntityConfig(entityType: String, entityName: String): Properties = {new Properties}
}
val admin = new TestAdminZkClient(zkClient)
assigner.assignThrottledReplicas(existing, proposed, admin)
assertEquals(1, calls)
}
@Test
def shouldFindMovingReplicasWhenProposedIsSubsetOfExisting() {
val assigner = new ReassignPartitionsCommand(null, null, null, null, null)
//Given we have more existing partitions than we are proposing
val existingSuperset = Map(
new TopicPartition("topic1", 0) -> Seq(100, 101),
new TopicPartition("topic1", 1) -> Seq(100, 102),
new TopicPartition("topic1", 2) -> Seq(100, 101),
new TopicPartition("topic2", 0) -> Seq(100, 101, 102),
new TopicPartition("topic3", 0) -> Seq(100, 101, 102)
)
val proposedSubset = Map(
new TopicPartition("topic1", 0) -> Seq(101, 102),
new TopicPartition("topic1", 1) -> Seq(102),
new TopicPartition("topic1", 2) -> Seq(100, 101, 102)
)
class TestAdminZkClient(val zkClient: KafkaZkClient) extends AdminZkClient(zkClient) {
override def changeTopicConfig(topic: String, configChange: Properties): Unit = {
assertEquals(Set("0:102","2:102"), toReplicaSet(configChange.get(FollowerReplicationThrottledReplicasProp)))
assertEquals(Set("0:100","0:101","2:100","2:101"), toReplicaSet(configChange.get(LeaderReplicationThrottledReplicasProp)))
assertEquals("topic1", topic)
calls += 1
}
override def fetchEntityConfig(entityType: String, entityName: String): Properties = {new Properties}
}
val admin = new TestAdminZkClient(zkClient)
//Then replicas should assign correctly (based on the proposed map)
assigner.assignThrottledReplicas(existingSuperset, proposedSubset, admin)
assertEquals(1, calls)
}
@Test
def shouldFindMovingReplicasMultiplePartitions() {
val control = new TopicPartition("topic1", 2) -> Seq(100, 102)
val assigner = new ReassignPartitionsCommand(null, null, null, null, null)
//Given partitions 0 & 1 moves from broker 100 -> 102. Partition 2 does not move.
val existing = Map(new TopicPartition("topic1", 0) -> Seq(100, 101), new TopicPartition("topic1", 1) -> Seq(100, 101), control)
val proposed = Map(new TopicPartition("topic1", 0) -> Seq(101, 102), new TopicPartition("topic1", 1) -> Seq(101, 102), control)
class TestAdminZkClient(val zkClient: KafkaZkClient) extends AdminZkClient(zkClient) {
override def changeTopicConfig(topic: String, configChange: Properties): Unit = {
assertEquals(Set("0:102","1:102"), toReplicaSet(configChange.get(FollowerReplicationThrottledReplicasProp))) //Should only be follower-throttle the moving replica
assertEquals(Set("0:100","0:101","1:100","1:101"), toReplicaSet(configChange.get(LeaderReplicationThrottledReplicasProp))) //Should leader-throttle all existing (pre move) replicas
calls += 1
}
override def fetchEntityConfig(entityType: String, entityName: String): Properties = {new Properties}
}
val admin = new TestAdminZkClient(zkClient)
//When
assigner.assignThrottledReplicas(existing, proposed, admin)
assertEquals(1, calls)
}
@Test
def shouldFindMovingReplicasMultipleTopics() {
val control = new TopicPartition("topic1", 1) -> Seq(100, 102)
val assigner = new ReassignPartitionsCommand(null, null, null, null, null)
//Given topics 1 -> move from broker 100 -> 102, topics 2 -> move from broker 101 -> 100
val existing = Map(new TopicPartition("topic1", 0) -> Seq(100, 101), new TopicPartition("topic2", 0) -> Seq(101, 102), control)
val proposed = Map(new TopicPartition("topic1", 0) -> Seq(101, 102), new TopicPartition("topic2", 0) -> Seq(100, 102), control)
//Then
class TestAdminZkClient(val zkClient: KafkaZkClient) extends AdminZkClient(zkClient) {
override def changeTopicConfig(topic: String, configChange: Properties): Unit = {
topic match {
case "topic1" =>
assertEquals(Set("0:100","0:101"), toReplicaSet(configChange.get(LeaderReplicationThrottledReplicasProp)))
assertEquals(Set("0:102"), toReplicaSet(configChange.get(FollowerReplicationThrottledReplicasProp)))
case "topic2" =>
assertEquals(Set("0:101","0:102"), toReplicaSet(configChange.get(LeaderReplicationThrottledReplicasProp)))
assertEquals(Set("0:100"), toReplicaSet(configChange.get(FollowerReplicationThrottledReplicasProp)))
case _ => fail(s"Unexpected topic $topic")
}
calls += 1
}
override def fetchEntityConfig(entityType: String, entityName: String): Properties = {new Properties}
}
val admin = new TestAdminZkClient(zkClient)
//When
assigner.assignThrottledReplicas(existing, proposed, admin)
assertEquals(2, calls)
}
@Test
def shouldFindMovingReplicasMultipleTopicsAndPartitions() {
val assigner = new ReassignPartitionsCommand(null, null, null, null, null)
//Given
val existing = Map(
new TopicPartition("topic1", 0) -> Seq(100, 101),
new TopicPartition("topic1", 1) -> Seq(100, 101),
new TopicPartition("topic2", 0) -> Seq(101, 102),
new TopicPartition("topic2", 1) -> Seq(101, 102)
)
val proposed = Map(
new TopicPartition("topic1", 0) -> Seq(101, 102), //moves to 102
new TopicPartition("topic1", 1) -> Seq(101, 102), //moves to 102
new TopicPartition("topic2", 0) -> Seq(100, 102), //moves to 100
new TopicPartition("topic2", 1) -> Seq(101, 100) //moves to 100
)
//Then
class TestAdminZkClient(val zkClient: KafkaZkClient) extends AdminZkClient(zkClient) {
override def changeTopicConfig(topic: String, configChange: Properties): Unit = {
topic match {
case "topic1" =>
assertEquals(Set("0:102","1:102"), toReplicaSet(configChange.get(FollowerReplicationThrottledReplicasProp)))
assertEquals(Set("0:100","0:101","1:100","1:101"), toReplicaSet(configChange.get(LeaderReplicationThrottledReplicasProp)))
case "topic2" =>
assertEquals(Set("0:100","1:100"), toReplicaSet(configChange.get(FollowerReplicationThrottledReplicasProp)))
assertEquals(Set("0:101","0:102","1:101","1:102"), toReplicaSet(configChange.get(LeaderReplicationThrottledReplicasProp)))
case _ => fail(s"Unexpected topic $topic")
}
calls += 1
}
override def fetchEntityConfig(entityType: String, entityName: String): Properties = {new Properties}
}
val admin = new TestAdminZkClient(zkClient)
//When
assigner.assignThrottledReplicas(existing, proposed, admin)
assertEquals(2, calls)
}
@Test
def shouldFindTwoMovingReplicasInSamePartition() {
val control = new TopicPartition("topic1", 1) -> Seq(100, 102)
val assigner = new ReassignPartitionsCommand(null, null, null, null, null)
//Given partition 0 has 2 moves from broker 102 -> 104 & 103 -> 105
val existing = Map(new TopicPartition("topic1", 0) -> Seq(100, 101, 102, 103), control)
val proposed = Map(new TopicPartition("topic1", 0) -> Seq(100, 101, 104, 105), control)
// Then
class TestAdminZkClient(val zkClient: KafkaZkClient) extends AdminZkClient(zkClient) {
override def changeTopicConfig(topic: String, configChange: Properties) = {
assertEquals(Set("0:104","0:105"), toReplicaSet(configChange.get(FollowerReplicationThrottledReplicasProp))) //Should only be follower-throttle the moving replicas
assertEquals(Set("0:100","0:101","0:102","0:103"), toReplicaSet(configChange.get(LeaderReplicationThrottledReplicasProp))) //Should leader-throttle all existing (pre move) replicas
calls += 1
}
override def fetchEntityConfig(entityType: String, entityName: String): Properties = {new Properties}
}
val admin = new TestAdminZkClient(zkClient)
//When
assigner.assignThrottledReplicas(existing, proposed, admin)
assertEquals(1, calls)
}
@Test
def shouldNotOverwriteEntityConfigsWhenUpdatingThrottledReplicas(): Unit = {
val control = new TopicPartition("topic1", 1) -> Seq(100, 102)
val assigner = new ReassignPartitionsCommand(null, null, null, null, null)
val existing = Map(new TopicPartition("topic1", 0) -> Seq(100, 101), control)
val proposed = Map(new TopicPartition("topic1", 0) -> Seq(101, 102), control)
//Given partition there are existing properties
val existingProperties = propsWith("some-key", "some-value")
//Then the dummy property should still be there
class TestAdminZkClient(val zkClient: KafkaZkClient) extends AdminZkClient(zkClient) {
override def changeTopicConfig(topic: String, configChange: Properties): Unit = {
assertEquals("some-value", configChange.getProperty("some-key"))
calls += 1
}
override def fetchEntityConfig(entityType: String, entityName: String): Properties = {
existingProperties
}
}
val admin = new TestAdminZkClient(zkClient)
//When
assigner.assignThrottledReplicas(existing, proposed, admin)
assertEquals(1, calls)
}
@Test
def shouldSetQuotaLimit(): Unit = {
//Given
val existing = Map(new TopicPartition("topic1", 0) -> Seq(100, 101))
val proposed = mutable.Map(new TopicPartition("topic1", 0) -> Seq(101, 102))
//Setup
val zk = stubZKClient(existing)
val admin = createMock(classOf[AdminZkClient])
val propsCapture: Capture[Properties] = newCapture(CaptureType.ALL)
val assigner = new ReassignPartitionsCommand(zk, None, proposed, Map.empty, admin)
expect(admin.fetchEntityConfig(anyString(), anyString())).andStubReturn(new Properties)
expect(admin.changeBrokerConfig(anyObject().asInstanceOf[List[Int]], capture(propsCapture))).anyTimes()
replay(admin)
//When
assigner.maybeLimit(Throttle(1000))
//Then
for (actual <- propsCapture.getValues.asScala) {
assertEquals("1000", actual.getProperty(DynamicConfig.Broker.LeaderReplicationThrottledRateProp))
assertEquals("1000", actual.getProperty(DynamicConfig.Broker.FollowerReplicationThrottledRateProp))
}
assertEquals(3, propsCapture.getValues.size) //3 brokers
}
@Test
def shouldUpdateQuotaLimit(): Unit = {
//Given
val existing = Map(new TopicPartition("topic1", 0) -> Seq(100, 101))
val proposed = mutable.Map(new TopicPartition("topic1", 0) -> Seq(101, 102))
//Setup
val zk = stubZKClient(existing)
val admin = createMock(classOf[AdminZkClient])
val propsCapture: Capture[Properties] = newCapture(CaptureType.ALL)
val assigner = new ReassignPartitionsCommand(zk, None, proposed, Map.empty, admin)
expect(admin.changeBrokerConfig(anyObject().asInstanceOf[List[Int]], capture(propsCapture))).anyTimes()
//Expect the existing broker config to be changed from 10/100 to 1000
val existingConfigs = CoreUtils.propsWith(
(DynamicConfig.Broker.FollowerReplicationThrottledRateProp, "10"),
(DynamicConfig.Broker.LeaderReplicationThrottledRateProp, "100")
)
expect(admin.fetchEntityConfig(is(ConfigType.Broker), is("100"))).andReturn(copyOf(existingConfigs))
expect(admin.fetchEntityConfig(is(ConfigType.Broker), is("101"))).andReturn(copyOf(existingConfigs))
expect(admin.fetchEntityConfig(is(ConfigType.Broker), is("102"))).andReturn(copyOf(existingConfigs))
replay(admin)
//When
assigner.maybeLimit(Throttle(1000))
//Then
for (actual <- propsCapture.getValues.asScala) {
assertEquals("1000", actual.getProperty(DynamicConfig.Broker.LeaderReplicationThrottledRateProp))
assertEquals("1000", actual.getProperty(DynamicConfig.Broker.FollowerReplicationThrottledRateProp))
}
assertEquals(3, propsCapture.getValues.size) //three brokers
}
@Test
def shouldNotOverwriteExistingPropertiesWhenLimitIsAdded(): Unit = {
//Given
val existing = Map(new TopicPartition("topic1", 0) -> Seq(100, 101))
val proposed = mutable.Map(new TopicPartition("topic1", 0) -> Seq(101, 102))
//Setup
val zk = stubZKClient(existing)
val admin = createMock(classOf[AdminZkClient])
val propsCapture: Capture[Properties] = newCapture(CaptureType.ALL)
val assigner = new ReassignPartitionsCommand(zk, None, proposed, Map.empty, admin)
expect(admin.changeBrokerConfig(anyObject().asInstanceOf[List[Int]], capture(propsCapture))).anyTimes()
//Given there is some existing config
expect(admin.fetchEntityConfig(is(ConfigType.Broker), anyString())).andReturn(
propsWith("useful.key", "useful.value")).atLeastOnce()
replay(admin)
//When
assigner.maybeLimit(Throttle(1000))
//Then other property remains
for (actual <- propsCapture.getValues.asScala) {
assertEquals("useful.value", actual.getProperty("useful.key"))
assertEquals("1000", actual.getProperty(DynamicConfig.Broker.LeaderReplicationThrottledRateProp))
assertEquals("1000", actual.getProperty(DynamicConfig.Broker.FollowerReplicationThrottledRateProp))
}
assertEquals(3, propsCapture.getValues.size) //3 brokers
}
@Test
def shouldRemoveThrottleLimitFromAllBrokers(): Unit = {
//Given 3 brokers, but with assignment only covering 2 of them
val brokers = Seq(100, 101, 102)
val status = mutable.Map(new TopicPartition("topic1", 0) -> ReassignmentCompleted)
val existingBrokerConfigs = propsWith(
(DynamicConfig.Broker.FollowerReplicationThrottledRateProp, "10"),
(DynamicConfig.Broker.LeaderReplicationThrottledRateProp, "100"),
("useful.key", "value")
)
//Setup
val zk = stubZKClient(brokers = brokers)
val admin = createMock(classOf[AdminZkClient])
val propsCapture: Capture[Properties] = newCapture(CaptureType.ALL)
expect(admin.fetchEntityConfig(is(ConfigType.Topic), anyString())).andStubReturn(new Properties)
expect(admin.changeBrokerConfig(anyObject().asInstanceOf[Seq[Int]], capture(propsCapture))).anyTimes()
//Stub each invocation as EasyMock caches the return value which can be mutated
expect(admin.fetchEntityConfig(is(ConfigType.Broker), is("100"))).andReturn(copyOf(existingBrokerConfigs))
expect(admin.fetchEntityConfig(is(ConfigType.Broker), is("101"))).andReturn(copyOf(existingBrokerConfigs))
expect(admin.fetchEntityConfig(is(ConfigType.Broker), is("102"))).andReturn(copyOf(existingBrokerConfigs))
replay(admin)
//When
ReassignPartitionsCommand.removeThrottle(zk, status, Map.empty, admin)
//Then props should have gone (dummy remains)
for (capture <- propsCapture.getValues.asScala) {
assertEquals("value", capture.get("useful.key"))
assertNull(capture.get(DynamicConfig.Broker.FollowerReplicationThrottledRateProp))
assertNull(capture.get(DynamicConfig.Broker.LeaderReplicationThrottledRateProp))
}
assertEquals(3, propsCapture.getValues.size) //3 brokers
}
@Test
def shouldRemoveThrottleReplicaListBasedOnProposedAssignment(): Unit = {
//Given two topics with existing config
val status = mutable.Map(new TopicPartition("topic1", 0) -> ReassignmentCompleted,
new TopicPartition("topic2", 0) -> ReassignmentCompleted)
val existingConfigs = CoreUtils.propsWith(
(LogConfig.LeaderReplicationThrottledReplicasProp, "1:100:2:100"),
(LogConfig.FollowerReplicationThrottledReplicasProp, "1:101,2:101"),
("useful.key", "value")
)
//Setup
val zk = stubZKClient(brokers = Seq(100, 101))
val admin = createMock(classOf[AdminZkClient])
val propsCapture: Capture[Properties] = newCapture(CaptureType.ALL)
expect(admin.fetchEntityConfig(is(ConfigType.Broker), anyString())).andStubReturn(new Properties)
expect(admin.fetchEntityConfig(is(ConfigType.Topic), is("topic1"))).andStubReturn(copyOf(existingConfigs))
expect(admin.fetchEntityConfig(is(ConfigType.Topic), is("topic2"))).andStubReturn(copyOf(existingConfigs))
//Should change both topics
expect(admin.changeTopicConfig(is("topic1"), capture(propsCapture)))
expect(admin.changeTopicConfig(is("topic2"), capture(propsCapture)))
replay(admin)
//When
ReassignPartitionsCommand.removeThrottle(zk, status, Map.empty, admin)
//Then props should have gone (dummy remains)
for (actual <- propsCapture.getValues.asScala) {
assertEquals("value", actual.getProperty("useful.key"))
assertNull(actual.getProperty(LogConfig.LeaderReplicationThrottledReplicasProp))
assertNull(actual.getProperty(LogConfig.FollowerReplicationThrottledReplicasProp))
}
assertEquals(2, propsCapture.getValues.size) //2 topics
}
//Override eq as is for brevity
def is[T](v: T): T = EasyMock.eq(v)
@Before
def setup(): Unit = {
calls = 0
}
def stubZKClient(existingAssignment: Map[TopicPartition, Seq[Int]] = Map[TopicPartition, Seq[Int]](),
brokers: Seq[Int] = Seq[Int]()): KafkaZkClient = {
val zkClient = createMock(classOf[KafkaZkClient])
expect(zkClient.getReplicaAssignmentForTopics(anyObject().asInstanceOf[Set[String]])).andStubReturn(existingAssignment)
expect(zkClient.getAllBrokersInCluster).andStubReturn(brokers.map(TestUtils.createBroker(_, "", 1)))
replay(zkClient)
zkClient
}
def toReplicaSet(throttledReplicasString: Any): Set[String] = {
throttledReplicasString.toString.split(",").toSet
}
}
|
<filename>src/lib/calculators/ann-output/weights-to-signals.ts
import { Matrix } from 'matrix-calculus';
import { Signals } from '../../signals';
import { Weights } from '../../weights/layers-pair-weights';
type Data = {
weights:Weights;
signals:Signals;
};
const weightsToSignals = (data:Data):Signals => {
const signals:Matrix = data.signals.getMatrix();
const transposedWeights:Matrix = data.weights.getMatrix().transpose();
return new Signals(transposedWeights.multiply(signals));
};
export default weightsToSignals;
|
#!/bin/sh
CONF=/conf
CONF_SRC=/usr/src/app/conf
# if configuration file doesn't exist, copy the default
if [ ! -f $CONF/appdaemon.yaml ]; then
cp $CONF_SRC/appdaemon.yaml.example $CONF/appdaemon.yaml
fi
# if apps folder doesn't exist, copy the default
if [ ! -d $CONF/apps ]; then
cp -r $CONF_SRC/apps $CONF/apps
fi
# if apps file doesn't exist, copy the default
if [ ! -f $CONF/apps/apps.yaml ]; then
cp $CONF_SRC/apps/apps.yaml.example $CONF/apps/apps.yaml
fi
# if dashboards folder doesn't exist, copy the default
if [ ! -d $CONF/dashboards ]; then
cp -r $CONF_SRC/dashboards $CONF/dashboards
fi
# if ENV HA_URL is set, change the value in appdaemon.yaml
if [ -n "$HA_URL" ]; then
sed -i "s/^ ha_url:.*/ ha_url: $(echo $HA_URL | sed -e 's/\\/\\\\/g; s/\//\\\//g; s/&/\\\&/g')/" $CONF/appdaemon.yaml
fi
# if ENV HA_KEY is set, change the value in appdaemon.yaml
if [ -n "$TOKEN" ]; then
sed -i "s/^ token:.*/ token: $(echo $TOKEN | sed -e 's/\\/\\\\/g; s/\//\\\//g; s/&/\\\&/g')/" $CONF/appdaemon.yaml
fi
# if ENV DASH_URL is set, change the value in appdaemon.yaml
if [ -n "$DASH_URL" ]; then
if grep -q "^ dash_url" $CONF/appdaemon.yaml; then
sed -i "s/^ dash_url:.*/ dash_url: $(echo $DASH_URL | sed -e 's/\\/\\\\/g; s/\//\\\//g; s/&/\\\&/g')/" $CONF/appdaemon.yaml
else
sed -i "s/# Apps/HADashboard:\r\n dash_url: $(echo $DASH_URL | sed -e 's/\\/\\\\/g; s/\//\\\//g; s/&/\\\&/g')\r\n# Apps/" $CONF/appdaemon.yaml
fi
fi
#check recursively under CONF for additional python dependencies defined in requirements.txt
find $CONF -name requirements.txt -exec pip3 install --upgrade -r {} \;
# Lets run it!
exec appdaemon -c $CONF $EXTRA_CMD
|
#!/bin/sh
#Hugh O'Brien 2014, obrien.hugh@gmail.com
#if tshark is slow, but doesn't seem to be cpu or mem bound, check to see if it's trying to do dns lookups
[ -z "$1" ] && echo "specify input file" && exit
post_proc='proc.py'
[ ! -f "$post_proc" ] && echo "$post_proc not found" && exit
base="$(basename "$1")"
outfile="$base.csv.xz"
errfile="$base.csv.err"
tshark -r "$1" -T fields -E separator=',' -e frame.number -e radiotap.length -e radiotap.mactime -e radiotap.flags.preamble -e radiotap.datarate -e frame.len -e radiotap.dbm_antsignal | python "$post_proc" 2>"$errfile" | xz -9 --extreme --verbose > "$outfile"
cat "$errfile"
|
#!/bin/bash
set -e
# install https://github.com/tj/n
sh -c "npm install -g n"
sh -c "n -V"
# adding git config
sh -c "git config --global user.email \"melody-bot@trivago.com\""
sh -c "git config --global user.name \"melody-bot\""
# some useful variables
export CURRENT_COMMIT_TEXT=$(git log --oneline --format=%B -n 1 HEAD)
# For test only
if [ -n "$NPM_AUTH_TOKEN" ]; then
echo "ADDING TOKEN"
echo "//${REGISTRY_URL}/:_authToken=${NPM_AUTH_TOKEN}" > .npmrc
sh -c "npm whoami --registry https://${REGISTRY_URL}"
fi
sh -c "$*"
|
<gh_stars>10-100
package org.multibit.hd.core.dto;
import org.bitcoinj.core.Address;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import java.util.List;
import java.util.UUID;
/**
* <p>DTO to provide the following to Contact API:</p>
* <ul>
* <li>Contact details</li>
* </ul>
*
* @since 0.0.1
*
*/
public class Contact {
private UUID id;
private String name;
private Optional<String> email = Optional.absent();
private Optional<String> imagePath = Optional.absent();
private Optional<Address> bitcoinAddress = Optional.absent();
private Optional<String> extendedPublicKey = Optional.absent();
private Optional<String> notes = Optional.absent();
private List<String> tags = Lists.newArrayList();
/**
* @param id The unique identifier
* @param name The first name
*/
public Contact(UUID id, String name) {
this.id = id;
this.name = name;
}
/**
* @return The unique identifier for this contact
*/
public UUID getId() {
return id;
}
/**
* @return The first name
*/
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
/**
* @return The optional email
*/
public Optional<String> getEmail() {
return email;
}
public void setEmail(String email) {
this.email = Optional.fromNullable(email);
}
/**
* @return The optional image file path
*/
public Optional<String> getImagePath() {
return imagePath;
}
public void setImagePath(String imagePath) {
this.imagePath = Optional.fromNullable(imagePath);
}
/**
* @return The Bitcoin address
*/
public Optional<Address> getBitcoinAddress() {
return bitcoinAddress;
}
public void setBitcoinAddress(Address bitcoinAddress) {
this.bitcoinAddress = Optional.fromNullable(bitcoinAddress);
}
/**
* @return The extended public key (HD address generator)
*/
public Optional<String> getExtendedPublicKey() {
return extendedPublicKey;
}
public void setExtendedPublicKey(String extendedPublicKey) {
this.extendedPublicKey = Optional.fromNullable(extendedPublicKey);
}
/**
* @return The notes associated with the contact
*/
public Optional<String> getNotes() {
return notes;
}
public void setNotes(String notes) {
this.notes = Optional.fromNullable(notes);
}
/**
* @return The tags associated with the contact
*/
public List<String> getTags() {
return tags;
}
public void setTags(List<String> tags) {
Preconditions.checkNotNull(tags, "'tags' must be present");
this.tags = tags;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Contact contact = (Contact) o;
return !(id != null ? !id.equals(contact.id) : contact.id != null);
}
@Override
public int hashCode() {
return id != null ? id.hashCode() : 0;
}
@Override
public String toString() {
return "Contact{" +
"id=" + id +
", name='" + name + '\'' +
", email=" + email +
", imagePath=" + imagePath +
", bitcoinAddress=****" +
", extendedPublicKey=****" +
", notes=****" +
", tags=****" +
'}';
}
}
|
#!/bin/bash
for file in assets/images/posts/*; do
if ! grep -q "$file" _posts/*; then
echo "WARNING: Image $file not used in any post"
fi
done
npm install
cp ./node_modules/jquery/dist/jquery.min.js ./assets/js/jquery.min.js
cp ./node_modules/vanilla-fitvids/jquery.fitvids.js ./assets/js/jquery.fitvids.js
rm -rf ./assets/fonts/*
cp ./node_modules/font-awesome/css/font-awesome.min.css ./assets/css/font-awesome.min.css
cp -r ./node_modules/font-awesome/fonts/ ./assets/fonts/
rm -rf ./_sass/bourbon/*
cp -r ./node_modules/bourbon/core/ ./_sass/bourbon/
rm -rf ./node_modules |
<gh_stars>0
import axios from 'axios';
import {API_BASE_URL} from "../constants/ServerUrl";
export default axios.create({
baseURL: API_BASE_URL,
headers: {
'Content-Type': 'application/json',
}
});
|
public class FieldManager
{
public int MultiFieldSpacing { get; set; }
public int CalculateFieldXPosition(int position, int fieldWidth, int index)
{
return position + (fieldWidth + MultiFieldSpacing) * index;
}
} |
#Artisan
alias art='php artisan'
alias art-migrate='php artisan migrate'
alias art-migrate-new='php artisan migrate-new'
|
fun maxProfit(prices: IntArray): Int {
if (prices.isEmpty()) return 0
var minPrice = prices[0]
var maxProfit = 0
for (price in prices) {
minPrice = minOf(minPrice, price)
maxProfit = maxOf(maxProfit, price - minPrice)
}
return maxProfit
}
// Usage Example
val prices = intArrayOf(7, 1, 5, 3, 6, 4)
val maxProfit = maxProfit(prices)
println("The maximum profit is: $maxProfit") |
import tweepy
def hashtag_search(hashtag):
consumer_key = "YOUR_CONSUMER_KEY"
consumer_secret = "YOUR_CONSUMER_SECRET"
access_token = "YOUR_ACCESS_TOKEN"
access_token_secret = "YOUR_ACCESS_TOKEN_SECRET"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
tweets = api.search(q=hashtag)
return tweets |
import re
from allegation.utils.query import OfficerQuery
from common.constants import ALLEGATION_LIMIT
from common.models import Officer, Allegation
# TODO: Considering refactor this suggestion service by a better design, may use the same strategy which will
# be used in desktop one.
def wrap_as_suggestion(suggestion):
return [suggestion.as_suggestion_entry()] if suggestion else []
def suggest_officer_star(query):
try:
star = float(query)
except ValueError:
return []
officer = Officer.objects.filter(star=star).first()
return wrap_as_suggestion(officer)
def get_crid_from_query(query):
pattern = re.compile('^(cr|crid)?(\s+)?(\d+)$')
matcher = pattern.match(query.lower())
return matcher.groups()[2] if matcher else ''
def suggest_crid(query):
crid = get_crid_from_query(query)
allegation = Allegation.objects.filter(crid=crid).prefetch_related('officerallegation_set__officer',
'officerallegation_set__cat').first()
return wrap_as_suggestion(allegation)
def suggest_officer_name(query):
officers = Officer.objects.filter(OfficerQuery.condition_by_name(query))\
.order_by('-allegations_count')[:ALLEGATION_LIMIT]
suggestions = [officer.as_suggestion_entry() for officer in officers]
return suggestions
def suggest(query):
suggesters = [suggest_officer_name, suggest_crid, suggest_officer_star]
results = []
for suggester in suggesters:
results.extend(suggester(query))
return results
|
<filename>rules-core/src/main/java/org/okra/rules/support/serial/SerialRulesEngine.java
/*
*
*
* Copyright (c) 2021. - TinyZ.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.okra.rules.support.serial;
import org.okra.rules.core.RuleContext;
import org.okra.rules.core.RulesEngine;
import org.okra.rules.core.api.Rule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.ConcurrentHashMap;
/**
* Serial Rule Engine. Thread-safe. Make sure rule execute serial by unique key.
*
* @author TinyZ.
* @since 2019.03.06
*/
public abstract class SerialRulesEngine extends RulesEngine {
private static final Logger LOG = LoggerFactory.getLogger(SerialRulesEngine.class);
protected Map<Object, SerialRuleRunWorker> runWorkerMap = new ConcurrentHashMap<>();
@Override
public void fire(Rule rule, RuleContext context) {
SerialRuleRunWorker observer = getSerialRunObserver(context);
observer.offer(this, rule, context);
}
public void onFireImpl(Rule rule, RuleContext ctx) {
super.fireImpl(rule, ctx);
}
private SerialRuleRunWorker getSerialRunObserver(RuleContext context) {
Object key = getSerialRunWorkerKey(context);
Objects.requireNonNull(key);
return runWorkerMap.computeIfAbsent(key, o -> newSerialRunWorker(context));
}
public void invalidate(RuleContext context) {
this.invalidate(getSerialRunWorkerKey(context));
}
public void invalidate(Object key) {
Objects.requireNonNull(key);
runWorkerMap.remove(key);
}
public void invalidateAll() {
runWorkerMap.clear();
}
public abstract Object getSerialRunWorkerKey(RuleContext context);
/**
* Should be implement by real logic.
*/
public abstract SerialRuleRunWorker newSerialRunWorker(RuleContext context);
}
|
package com.futureplatforms.kirin.generated.demo.hellokirin;
public interface IDumbListScreenModule {
/**
* @param index {@link int}
* @param label {@link String}
*/
void onListItemClick(int index, String label);
} |
<filename>src/main/scala/com/tracy/competition/service/FileService.scala<gh_stars>0
package com.tracy.competition.service
import java.util
import java.util.List
import com.tracy.competition.domain.entity.File
/**
* @author Tracy
* @date 2021/2/9 13:34
*/
trait FileService {
/**
* 插入新文件
*
* @param file file
* @return
*/
def insertFile(file: File): Unit
/**
* 根据比赛id查询文件
*
* @param competitionId competitionId
* @return
*/
def findFileByCompetitionId(competitionId: String): util.List[File]
/**
* 根据比赛id删除文件
*
* @param competitionId competitionId
* @return
*/
def deleteFileByCompetitionId(competitionId: String): Unit
/**
* 根据文件id搜索文件
*
* @param fileId fileId
* @return
*/
def findFileById(fileId: String): File
/**
* 根据文件id删除文件
*
* @param fileId fileId
* @return
*/
def deleteFileById(fileId: String): Unit
/**
* 根据公告id查询文件
*
* @param notificationId notificationId
* @return
*/
def findFileByNotificationId(notificationId: String): util.List[File]
}
|
<reponame>MccCareplan/patientsmartapp<gh_stars>0
import { createAction, props } from '@ngrx/store';
export const SELECT = '[DevMode] Select';
export const EDIT = '[DevMode] Edit';
export const LOAD_DEVMODE_SUCCESS = '[DevMode] Load DevModes Success';
export const LOAD_DEVMODE_FAIL = '[DevMode] Load DevModes Failure';
export const EDIT_DEVMODE_SUCCESS = '[DevMode] Edit DevModes Success';
export const EDIT_DEVMODE_FAIL = '[DevMode] Edit DevModes Failure';
export const LoadDevModesSuccessAction = createAction(
LOAD_DEVMODE_SUCCESS,
props<{ data: boolean }>()
);
export const LoadDevModesFailureAction = createAction(
LOAD_DEVMODE_FAIL,
props<{ error: any }>()
);
export const SelectAction = createAction(
SELECT,
props<{ data: boolean }>()
);
export const EditAction = createAction(
EDIT,
props<{ data: boolean }>()
);
export const EditDevModesSuccessAction = createAction(
EDIT_DEVMODE_SUCCESS,
props<{ data: boolean }>()
);
export const EditDevModesFailureAction = createAction(
EDIT_DEVMODE_FAIL,
props<{ error: any }>()
);
|
<filename>client/javascripts/actions/api/index.ts
import { ActionType as DDoSActionType, getDDoSDetection } from './ddos_detection'
import { ActionType as FilterActionType, getFilter } from './filter'
import { ActionType as FrowRateActionType, getFlowRate } from './flow_rate'
import { ActionType as PacketIntervalActionType, getPacketInterval } from './packet_interval'
import { ActionType as StatisticActionType, getStatistic } from './statistic'
export const ActionType = {
...DDoSActionType,
...FilterActionType,
...FrowRateActionType,
...PacketIntervalActionType,
...StatisticActionType,
}
export type ActionType =
| DDoSActionType
| FilterActionType
| FrowRateActionType
| PacketIntervalActionType
| StatisticActionType
export interface Action {
type: ActionType
payload: any
}
export const actions = {
getDDoSDetection,
getFilter,
getFlowRate,
getPacketInterval,
getStatistic,
}
|
package main
import (
"context"
"fmt"
"log"
"os"
"strings"
"time"
"nhooyr.io/websocket"
)
const (
Name = "client"
)
func main() {
myId := fmt.Sprintf("%s:%d ", Name, os.Getpid())
log.SetPrefix(myId)
log.SetFlags(log.Ltime)
args := os.Args[1:]
if len(args) < 2 {
fmt.Println("Usage:", Name, "serverURL ticker1 [ticker2...tickern]")
return
}
serverURL := args[0]
args = args[1:] // Shift
// Iterate on server timeouts and errors, but give it a breather if it does fail
// so that we're not hammering against a dead or recovering server.
for {
err := askServer(myId, serverURL, args)
if err != nil {
log.Println("Warning:", err.Error())
}
time.Sleep(serverRetryConnectDelay) // Breather
}
}
// askServer connects to the websocket server and sends a ticker update request
// message. Return on error or closed socket and let caller retry.
func askServer(myId, serverURL string, tickers []string) (err error) {
log.Println("Asking", serverURL, "to watch:", strings.Join(tickers, ","))
dialCtx, cancel := context.WithTimeout(context.Background(), serverDialTimeout)
defer cancel()
conn, _, err := websocket.Dial(dialCtx, serverURL, nil)
if err != nil {
return
}
defer conn.Close(websocket.StatusNormalClosure, "")
// Connection is established. Create a parent connection context so that all
// interested parties know when the connection dies.
connCtx, connCancel := context.WithCancel(context.Background())
defer connCancel()
// Send the server our tickers of interest.
out := &TickerRequest{Id: myId, Tickers: tickers}
err = WriteMessage(connCtx, conn, out)
if err != nil {
return
}
go pingPong(connCtx, cancel, conn) // Start application level ping/pong exchange
// Loop forever reading server messages. A timeout is fatal as it means that the
// ping/pong exchange has failed.
for {
ctx, cancel := context.WithTimeout(connCtx, pingpongInterval*2)
in, err := ReadMessage(ctx, conn)
cancel()
if err != nil {
return err
}
switch msg := in.(type) { // Dispatch on the messages we handle
case *TickerChange:
s := ""
for _, change := range msg.Changes {
s += fmt.Sprintf(" %s at $%0.2f", change.Symbol, change.Price)
}
log.Printf("Change notification(s): (%d)%s", len(msg.Changes), s)
case *Pong:
sentAt := time.Unix(msg.Seconds, msg.Nanos)
latency := time.Now().Sub(sentAt)
log.Println("Pong", msg.Sequence, "latency", latency)
default:
return fmt.Errorf("Unexpected Message Type %v", in)
}
}
}
// pingPong is started as a separate go routine which exchanges application-level Ping
// messages with the server until the context tells it to disappear. Any i/o error results
// in cancelling the parent context which notifies all interested parties - in this case
// really just the reader loop. The big-picture is that the websocket will fail if the
// ping exchange fails.
//
// Why is an application-level ping used rather than the intrinsic websocket ping? Mainly
// because the websocket package doesn't expose the Pong message making it impossible for
// a server to determine that a client is unresponsive. Unfortunate but true.
//
// Mind you, there is nothing magic about the websocket ping messages. They are just a
// different message type, so providing our own version in the application layer is not
// any less efficient and supporting it in the server is a mere matter of a few lines of
// code, so no big deal.
func pingPong(parentCtx context.Context, parentCancel context.CancelFunc, conn *websocket.Conn) {
defer parentCancel()
defer log.Println("pingPong Exit")
for seq := 0; ; seq++ {
select {
case <-parentCtx.Done(): // Did the reader loop cancel the context?
return
case now := <-time.After(pingpongInterval): // Or is it time for a Ping?
msg := &Ping{Sequence: seq,
Seconds: now.Unix(), Nanos: int64(now.Nanosecond())}
log.Println("Ping", seq)
err := WriteMessage(parentCtx, conn, msg)
if err != nil {
log.Println("pingPong i/o error", err)
return
}
}
}
}
|
package main
import "fmt"
// a function replacing each character by next character of the alphabet
func nextCharacter(word string) string {
newWord := ""
for _, ch := range word {
// get ASCII value of each char
asciiVal := int(ch)
// if character is 'z' or 'Z', character will become 'a' or 'A' respectively
if asciiVal == 122 || asciiVal == 90 {
asciiVal = 96
} else {
asciiVal += 1
}
// add the next character to the newWord
newWord += string(asciiVal)
}
return newWord
}
func main() {
arr := []string{"cat", "dog"}
newArr := make([]string, len(arr))
// replace each character by next character of the alphabet
for index, word := range arr {
newArr[index] = nextCharacter(word)
}
fmt.Println(newArr)
} |
export class LoginRequest {
public authToken: String;
} |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=GNU-Linux-x86
CND_CONF=Debug
CND_DISTDIR=dist
CND_BUILDDIR=build
NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/glfxc
OUTPUT_BASENAME=glfxc
PACKAGE_TOP_DIR=glfxc/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/glfxc/bin"
copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/glfxc.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/glfxc.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
|
const DrawCard = require('../../../drawcard.js');
class Shae extends DrawCard {
setupCardAbilities(ability) {
this.action({
title: 'Pay 1 gold to stand Shae',
method: 'stand',
phase: 'challenge',
limit: ability.limit.perPhase(2)
});
}
stand(player) {
if(player.gold <= 0 || !this.kneeled) {
return false;
}
this.game.addGold(this.controller, -1);
player.standCard(this);
this.game.addMessage('{0} pays 1 gold to stand {1}', this.controller, this);
}
}
Shae.code = '04029';
module.exports = Shae;
|
def calculate_result(a, b):
result = a - b if a > b else b - a
return result |
#!/usr/bin/env bash
truffle test \
./test/contracts/BrokenLineTest.sol \
./test/BrokenLine.test.js |
<filename>api/src/main/java/org/ednovo/gooru/domain/service/search/QuestionSearchResult.java
/////////////////////////////////////////////////////////////
// QuestionSearchResult.java
// gooru-api
// Created by Gooru on 2014
// Copyright (c) 2014 Gooru. All rights reserved.
// http://www.goorulearning.org/
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
/////////////////////////////////////////////////////////////
package org.ednovo.gooru.domain.service.search;
import java.util.HashSet;
import java.util.Set;
public class QuestionSearchResult extends SearchResult {
/**
*
*/
private static final long serialVersionUID = -7871934464419763101L;
private String questionText;
private Set<Object> answers;
private Set<Object> hints;
private String questionType;
private Long questionId;
private String concept;
private String source;
private String explanation;
private String groupedQuizNames;
private String groupedQuizIds;
private String gooruOid;
public QuestionSearchResult() {
answers = new HashSet<Object>();
hints = new HashSet<Object>();
}
public String getQuestionText() {
return questionText;
}
public void setQuestionText(String questionText) {
this.questionText = questionText;
}
public String getQuestionType() {
return questionType;
}
public void setQuestionType(String questionType) {
this.questionType = questionType;
}
public Long getQuestionId() {
return questionId;
}
public String getConcept() {
return concept;
}
public void setConcept(String concept) {
this.concept = concept;
}
public void setQuestionId(Long questionId) {
this.questionId = questionId;
}
public String getSource() {
return source;
}
public void setSource(String source) {
this.source = source;
}
public String getExplanation() {
return explanation;
}
public void setExplanation(String explanation) {
this.explanation = explanation;
}
public String getGroupedQuizNames() {
return groupedQuizNames;
}
public void setGroupedQuizNames(String groupedQuizNames) {
this.groupedQuizNames = groupedQuizNames;
}
public String getGroupedQuizIds() {
return groupedQuizIds;
}
public void setGroupedQuizIds(String groupedQuizIds) {
this.groupedQuizIds = groupedQuizIds;
}
public Set<Object> getAnswers() {
return answers;
}
public void setAnswers(Set<Object> answers) {
this.answers = answers;
}
public Set<Object> getHints() {
return hints;
}
public void setHints(Set<Object> hints) {
this.hints = hints;
}
public String getGooruOid() {
return gooruOid;
}
public void setGooruOid(String gooruOid) {
this.gooruOid = gooruOid;
}
}
|
def is_palindrome(string):
"""Determines if a string is a palindrome"""
# reverse the string
reverse_string = string[::-1]
# return whether the string is the same when reversed
return string == reverse_string |
# ----------------------------------------------------------------------------
#
# Package : tektoncd/pipeline
# Version : v0.7.0
# Source repo : https://github.com/tektoncd/pipeline.git
# Tested on : rhel_7.6
# Script License : Apache License, Version 2.0
# Maintainer : Siddhesh Ghadi <Siddhesh.Ghadi@ibm.com>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
#!/bin/bash
# ----------------------------------------------------------------------------
# Prerequisites:
#
# Docker 17.05 or later must be installed and running.
#
# Go version 1.12.1 or later must be installed.
#
# Kubectl version 1.15.0 or later must be installed.
# Note: For kubectl version below 1.15.0, the “tkn” plugin may not be identified by kubectl.
# Hence, it is recommended to use 1.15.0 or later versions of kubectl.
#
# ----------------------------------------------------------------------------
set -e
yum update -y
yum install git -y
export GOPATH=${HOME}/go
export PATH=$GOPATH/bin:$GOROOT/bin:$PATH
export GO111MODULE=auto
#Install go packages
go get -u github.com/golang/dep/cmd/dep
#Offical ko repo doesnot support multiplatform build
#PR: https://github.com/google/ko/pull/38
#Once PR is merged, we can use "go get -u github.com/google/ko/cmd/ko"
mkdir -p ${GOPATH}/src/github.com/google&& cd $_
git clone -b multi-platform-wip https://github.com/jonjohnsonjr/ko.git
cd ko/cmd/ko/
go install
#Build tektoncd/pipeline
mkdir -p ${GOPATH}/src/github.com/tektoncd && cd $_
git clone --branch v0.7.0 https://github.com/tektoncd/pipeline.git
cd pipeline
#Download base dockerfiles
curl -o images/Dockerfile.build-base-ubi https://raw.githubusercontent.com/ppc64le/build-scripts/master/tektoncd-components/base-dockerfiles/Dockerfile.build-base-ubi
curl -o images/Dockerfile.busybox-ubi https://raw.githubusercontent.com/ppc64le/build-scripts/master/tektoncd-components/base-dockerfiles/Dockerfile.busybox-ubi
curl -o images/Dockerfile.cloud-sdk-docker-ubi https://raw.githubusercontent.com/ppc64le/build-scripts/master/tektoncd-components/base-dockerfiles/Dockerfile.cloud-sdk-docker-ubi
#Build required base images
docker build -t build/build-base:latest -f images/Dockerfile.build-base-ubi .
docker build -t ppc64le/busybox:ubi -f images/Dockerfile.busybox-ubi .
docker build -t cloud-sdk-docker:ubi -f images/Dockerfile.cloud-sdk-docker-ubi .
#Changes in .ko.yaml file
echo 'baseImageOverrides:
github.com/tektoncd/pipeline/cmd/creds-init: localhost:5000/build/build-base:latest
github.com/tektoncd/pipeline/cmd/git-init: localhost:5000/build/build-base:latest
github.com/tektoncd/pipeline/cmd/bash: localhost:5000/ppc64le/busybox:ubi # image should have shell in $PATH
github.com/tektoncd/pipeline/cmd/entrypoint: localhost:5000/ppc64le/busybox:ubi # image should have shell in $PATH
github.com/tektoncd/pipeline/cmd/gsutil: localhost:5000/cloud-sdk-docker:ubi # image should have gsutil in $PATH
github.com/tektoncd/pipeline/cmd/kubeconfigwriter: localhost:5000/build/build-base:latest # image should have gsutil in $PATH
github.com/tektoncd/pipeline/cmd/controller: localhost:5000/build/build-base:latest # image should have gsutil in $PATH
github.com/tektoncd/pipeline/cmd/imagedigestexporter: localhost:5000/build/build-base:latest # image should have gsutil in $PATH
github.com/tektoncd/pipeline/cmd/nop: localhost:5000/build/build-base:latest # image should have gsutil in $PATH
github.com/tektoncd/pipeline/cmd/pullrequest-init: localhost:5000/build/build-base:latest # image should have gsutil in $PATH
github.com/tektoncd/pipeline/cmd/webhook: localhost:5000/build/build-base:latest # image should have gsutil in $PATH
' >.ko.yaml
#Create a local registry & push required base images
docker run -d --name registry -p 5000:5000 ppc64le/registry:2
#Push images
docker tag build/build-base:latest localhost:5000/build/build-base:latest
docker push localhost:5000/build/build-base:latest
docker tag ppc64le/busybox:ubi localhost:5000/ppc64le/busybox:ubi
docker push localhost:5000/ppc64le/busybox:ubi
docker tag cloud-sdk-docker:ubi localhost:5000/cloud-sdk-docker:ubi
docker push localhost:5000/cloud-sdk-docker:ubi
#Build & publish tektoncd-pipeline images
export KO_DOCKER_REPO=localhost:5000/ko.local
ko publish --platform=linux/ppc64le ./cmd/creds-init/
ko publish --platform=linux/ppc64le ./cmd/git-init
ko publish --platform=linux/ppc64le ./cmd/bash/
ko publish --platform=linux/ppc64le ./cmd/gsutil/
ko publish --platform=linux/ppc64le ./cmd/entrypoint/
ko publish --platform=linux/ppc64le ./cmd/kubeconfigwriter/
ko publish --platform=linux/ppc64le ./cmd/controller/
ko publish --platform=linux/ppc64le ./cmd/imagedigestexporter/
ko publish --platform=linux/ppc64le ./cmd/nop/
ko publish --platform=linux/ppc64le ./cmd/pullrequest-init/
ko publish --platform=linux/ppc64le ./cmd/webhook/
|
#! /bin/bash
sh kill-services.sh
sh start-services.sh
sh create-accounts.sh
sleep 1
sh check-balances.sh
sh send-payment.sh
sh check-balances.sh
# Comment out the next command if you want to keep the services running
sh kill-services.sh |
<gh_stars>1-10
import styled, { css } from "styled-components";
interface ISubmitButtonBoxProps {
big: boolean | undefined;
yellow?: boolean;
blue?: boolean;
}
export const SubmitButtonBox = styled.button<ISubmitButtonBoxProps>`
cursor: pointer;
border: 1px solid ${(props) => props.theme.color.gray_color3};
background-color: ${(props) => props.theme.color.gray_color1};
color: ${(props) => props.theme.color.gray_color4};
${(props) =>
props.big
? css`
height: 50px;
font-size: ${props.theme.fontSize.button_large};
`
: css`
height: 34px;
font-size: ${props.theme.fontSize.button_small};
`};
${(props) =>
props.yellow &&
css`
color: ${(props) => props.theme.color.gray_color1};
background-color: ${(props) => props.theme.color.main_yellow_color};
border: 0px;
`}
${(props) =>
props.blue &&
css`
color: ${(props) => props.theme.color.gray_color1};
background-color: ${(props) => props.theme.color.main_blue_color};
border: 0px;
`};
${(props) =>
props.yellow &&
props.disabled &&
css`
background-color: ${(props) => props.theme.color.secondary_yellow_color};
color: rgba(255, 255, 255, 0.5);
cursor: default;
`}
${(props) =>
props.blue &&
props.disabled &&
css`
background-color: ${(props) => props.theme.color.secondary_blue_color};
color: rgba(255, 255, 255, 0.5);
cursor: default;
`}
display: flex;
align-items: center;
justify-content: center;
border-radius: 25px;
padding: 13px 16px;
font-weight: bold;
a {
width: 100%;
height: 100%;
display: flex;
align-items: center;
justify-content: center;
text-decoration: none;
color: ${(props) => props.theme.color.gray_color1};
}
`;
|
import { ItachiUchiha } from '../models/ItachiUchiha';
import { Crows } from '../models/Crows';
import { CustomLoadingController } from './CustomLoadingController';
import { World } from '../models/World';
export class SceneController {
world: World;
itachi: ItachiUchiha;
crows: Crows;
constructor() {
this.world = new World();
this.itachi = new ItachiUchiha(CustomLoadingController.Instance.gltfLoader);
this.crows = new Crows(CustomLoadingController.Instance.gltfLoader);
}
}
|
<reponame>anticipasean/girakkafunc
package cyclops.monads.function;
import cyclops.function.enhanced.Function1;
import cyclops.monads.AnyMs;
import cyclops.monads.KleisliM;
import cyclops.monads.WitnessType;
import cyclops.monads.AnyM;
import cyclops.reactor.container.transformer.FutureT;
import cyclops.reactor.container.transformer.ListT;
import cyclops.reactive.collection.container.mutable.ListX;
import java.util.function.Function;
/**
* Created by johnmcclean on 18/12/2016.
*/
@FunctionalInterface
public interface AnyMFunction1<W extends WitnessType<W>,T1,R> extends KleisliM<W,AnyM<W,T1>,R> {
static <W1 extends WitnessType<W1>,W2 extends WitnessType<W2>,T,R> Function1<AnyM<W1,T>,AnyM<W2,R>> liftAnyM(Function<? super T, ? extends R> fn,Function<AnyM<W1,T>,AnyM<W2,T>> hktTransform){
return (T1)-> hktTransform.apply(T1).map(fn);
}
static <W extends WitnessType<W>,T,R> AnyMFunction1<W, T,R> liftF(Function<? super T, ? extends R> fn){
return AnyM.liftF(fn);
}
static <W extends WitnessType<W>,T,R> AnyM<W, R> mapF(Function<? super T, ? extends R> fn,AnyM<W, T> functor) {
return functor.map(fn);
}
static <W extends WitnessType<W>,T,R> FutureT<W,R> mapF(Function<? super T, ? extends R> fn, FutureT<W,T> future) {
return future.map(fn);
}
static <W extends WitnessType<W>,T,R> ListT<W,R> mapF(Function<? super T, ? extends R> fn, ListT<W,T> list) {
return list.map(fn);
}
static <W extends WitnessType<W>,T,R> Function1<T, FutureT<W,R>> liftFutureT(Function1<? super T, ? extends R> fn,W witness) {
Function1<T, R> a = Function1.narrow(fn);
Function1<T, FutureT<W, R>> x = a.functionOps().liftFuture().andThen(f -> AnyMs.liftM(f, witness));
return x;
}
static <W extends WitnessType<W>,T,R> Function1<T, ListT<W,R>> liftListT(Function1<? super T, ? extends R> fn,W witness) {
Function1<T,ListX<R>> f = i-> ListX.of(fn.apply(i));
return f.andThen(l->AnyMs.liftM(l,witness));
}
}
|
package com.threathunter.bordercollie.slot.compute.graph.node.propertyhandler.reduction;
import com.threathunter.bordercollie.slot.compute.cache.wrapper.CacheWrapperMeta;
import com.threathunter.bordercollie.slot.util.ClassBasedRegistry;
import com.threathunter.model.Property;
import com.threathunter.model.PropertyReduction;
import com.threathunter.variable.exception.NotSupportException;
import com.threathunter.variable.reduction.*;
import java.util.List;
/**
*
*/
public abstract class PropertyReductionHandlerGenerator<R extends PropertyReduction> {
private static final ClassBasedRegistry<PropertyReduction, PropertyReductionHandlerGenerator> registry =
new ClassBasedRegistry<>(PropertyReduction.class);
static {
registerReduction(LongPropertyReduction.class, LongReductionHandlerGenerator.class);
registerReduction(DoublePropertyReduction.class, DoubleReductionHandlerGenerator.class);
registerReduction(StringPropertyReduction.class, StringReductionHandlerGenerator.class);
registerReduction(TopPropertyReduction.class, TopPropertyReductionHandlerGenerator.class);
}
public static void registerReduction(Class<? extends PropertyReduction> c, Class<? extends PropertyReductionHandlerGenerator> g) {
registry.register(c, g);
}
public static ReductionHandler generateReductionHandler(CacheWrapperMeta meta, List<Property> groupKeys, PropertyReduction r) {
Class<? extends PropertyReductionHandlerGenerator> handlerClass = registry.get(r.getClass());
if (handlerClass == null) {
return null;
}
try {
PropertyReductionHandlerGenerator handlerGenerator = handlerClass.newInstance();
return handlerGenerator.generateHandler(meta, groupKeys, r);
} catch (Exception e) {
throw new RuntimeException("error in property mapping handler generation.", e);
}
}
public abstract ReductionHandler generateHandler(CacheWrapperMeta meta, List<Property> groupKeys, R r);
public static class LongReductionHandlerGenerator extends PropertyReductionHandlerGenerator<LongPropertyReduction> {
@Override
public ReductionHandler generateHandler(CacheWrapperMeta meta, List<Property> groupKeys, LongPropertyReduction longPropertyReduction) {
String type = longPropertyReduction.getType();
if (type.equals("longgroup_count")) {
if (meta.getIndexCount() == 0) {
return new LongReductionHandler.GlobalLongGroupCountReductionHandler(longPropertyReduction, meta, groupKeys);
}
}
if (type.equals("longcount")) {
if (meta.getIndexCount() == 0) {
return new LongReductionHandler.GlobalLongCountReductionHandler(longPropertyReduction, meta, groupKeys);
}
return new LongReductionHandler.LongCountReductionHandler(longPropertyReduction, meta, groupKeys);
}
if (type.equals("longsum")) {
if (meta.getIndexCount() == 0) {
return new LongReductionHandler.GlobalLongSumReductionHandler(longPropertyReduction, meta, groupKeys);
}
return new LongReductionHandler.LongSumReductionHandler(longPropertyReduction, meta, groupKeys);
}
if (type.equals("longgroup_sum")) {
if (meta.getIndexCount() == 0) {
return new LongReductionHandler.GlobalLongGroupSumReductionHandler(longPropertyReduction, meta, groupKeys);
}
}
if (type.equals("longmin")) {
if (meta.getIndexCount() == 1) {
return new LongReductionHandler.LongMinReductionHandler(longPropertyReduction, meta, groupKeys);
}
}
if (type.equals("longlast")) {
if (meta.getIndexCount() == 1) {
return new LongReductionHandler.LongLastReductionHandler(longPropertyReduction, meta, groupKeys);
}
}
if (type.equals("longfirst")) {
return new LongReductionHandler.LongFirstReductionHandler(longPropertyReduction, meta, groupKeys);
}
if (type.equals("longavg")) {
return new LongReductionHandler.LongAvgReductionHandler(longPropertyReduction, meta, groupKeys);
}
if (type.equals("longstddev")) {
return new LongReductionHandler.LongStddevReductionHandler(longPropertyReduction, meta, groupKeys);
}
if (type.equals("longcv")) {
return new LongReductionHandler.LongCVReductionHandler(longPropertyReduction, meta, groupKeys);
}
throw new NotSupportException("reduction type is not support: " + longPropertyReduction.getType());
}
}
public static class DoubleReductionHandlerGenerator extends PropertyReductionHandlerGenerator<DoublePropertyReduction> {
@Override
public ReductionHandler generateHandler(CacheWrapperMeta meta, List<Property> groupKeys, DoublePropertyReduction r) {
if (r.getType().equals("doublesum")) {
return new DoubleReductionHandler.DoubleSumReductionHandler(r, meta, groupKeys);
}
if (r.getType().equals("doublegroup_count")) {
if (meta.getIndexCount() == 0) {
return new DoubleReductionHandler.GlobalDoubleGroupCountReductionHandler(r, meta, groupKeys);
}
return new DoubleReductionHandler.DoubleGroupCountReductionHandler(r, meta, groupKeys);
}
if (r.getType().equals("doublecount")) {
if (meta.getIndexCount() == 0) {
return new DoubleReductionHandler.GlobalDoubleCountReductionHandler(r, meta, groupKeys);
}
return new DoubleReductionHandler.DoubleCountReductionHandler(r, meta, groupKeys);
}
if (r.getType().equals("doublestddev")) {
return new DoubleReductionHandler.DoubleStddevReductionHandler(r, meta, groupKeys);
}
if (r.getType().equals("doublecv")) {
return new DoubleReductionHandler.DoubleCVReductionHandler(r, meta, groupKeys);
}
return null;
}
}
public static class StringReductionHandlerGenerator extends PropertyReductionHandlerGenerator<StringPropertyReduction> {
@Override
public ReductionHandler generateHandler(CacheWrapperMeta meta, List<Property> groupKeys, StringPropertyReduction r) {
if (r.getType().equals("stringcount")) {
return new StringReductionHandler.StringCountReductionHandler(r, meta, groupKeys);
}
if (r.getType().equals("stringdistinct_count")) {
if (meta.getIndexCount() == 0) {
return new StringReductionHandler.GlobalStringDistinctCountReductionHandler(r, meta, groupKeys);
}
return new StringReductionHandler.StringDistinctCountReductionHandler(r, meta, groupKeys);
}
if (r.getType().equals("stringlistdistinct_count")) {
return new StringReductionHandler.StringListDistinctCountReductionHandler(r, meta, groupKeys);
}
throw new NotSupportException("reduction type is not support: " + r.getType());
}
}
public static class TopPropertyReductionHandlerGenerator extends PropertyReductionHandlerGenerator<TopPropertyReduction> {
@Override
public ReductionHandler generateHandler(CacheWrapperMeta meta, List<Property> groupKeys, TopPropertyReduction r) {
if (r.getType().equals("doubledoubletop")) {
return new TopReductionHandler.DoubleTopNReductionHandler(r, meta, groupKeys);
}
if (r.getType().equals("longlongtop")) {
return new TopReductionHandler.LongTopNReductionHandler(r, meta, groupKeys);
}
if (r.getType().equals("doublemaptop")) {
return new TopReductionHandler.DoubleKeyTopNReductionHandler(r, meta, groupKeys);
}
if (r.getType().equals("longmaptop")) {
return new TopReductionHandler.LongKeyTopNReductionHandler(r, meta, groupKeys);
}
throw new NotSupportException("reduction type is not support: " + r.getType());
}
}
}
|
import size from 'lodash/size';
import range from 'lodash/range';
const pager = (list, currentPage = 1, pageSize = 10) => {
// 10 page links shown at any time (e.g. 1 2 3 4 5 6 7 8 9 10)
// unless there are less than 10 total pages
// the active link (current page) is in the 6th position,
// except for when the active link is below 6 or less than 4 from the last position
const totalItems = size(list);
const totalPages = Math.ceil(totalItems / pageSize);
let startPage = 1;
let endPage = totalPages;
if (totalPages > 10) {
if (currentPage <= 6) {
endPage = 10;
} else if ((currentPage + 4) >= totalPages) {
startPage = totalPages - 9;
} else {
startPage = currentPage - 5;
endPage = currentPage + 4;
}
}
const startIndex = (currentPage - 1) * pageSize;
const endIndex = Math.min(startIndex + (pageSize - 1), totalItems - 1);
const pages = range(startPage, endPage + 1);
return {
totalItems,
currentPage,
pageSize,
totalPages,
startPage,
endPage,
startIndex,
endIndex,
pages
};
};
export default pager;
|
#!/bin/bash
XY_OGG_VERSION=1.3.2 #ogg版本,请输入最新的版本
XY_iPhone_SDK_VERSION=`xcrun --sdk iphoneos --show-sdk-version` #用当前电脑iPhoneSDK版本编译
echo $XY_iPhone_SDK_VERSION
mkdir -p libogg/$XY_OGG_VERSION #创建目录
pushd libogg/$XY_OGG_VERSION #cd进入到这个目录
ogg_svn_dir=libogg-$XY_OGG_VERSION #ogg上svn名字规则是libogg-1.3.2
svn co http://svn.xiph.org/tags/ogg/$ogg_svn_dir #把代码克隆下来
pushd $ogg_svn_dir
./autogen.sh #执行
make distclean #每次执行脚本后,需要执行clean
popd #返回上个操作
popd
`pwd`/ogg_configure.sh i386 $XY_iPhone_SDK_VERSION `pwd`/libogg/$XY_OGG_VERSION/$ogg_svn_dir/configure
`pwd`/ogg_configure.sh x86_64 $XY_iPhone_SDK_VERSION `pwd`/libogg/$XY_OGG_VERSION/$ogg_svn_dir/configure
`pwd`/ogg_configure.sh armv6 $XY_iPhone_SDK_VERSION `pwd`/libogg/$XY_OGG_VERSION/$ogg_svn_dir/configure
`pwd`/ogg_configure.sh armv7 $XY_iPhone_SDK_VERSION `pwd`/libogg/$XY_OGG_VERSION/$ogg_svn_dir/configure
`pwd`/ogg_configure.sh armv7s $XY_iPhone_SDK_VERSION `pwd`/libogg/$XY_OGG_VERSION/$ogg_svn_dir/configure
`pwd`/ogg_configure.sh arm64 $XY_iPhone_SDK_VERSION `pwd`/libogg/$XY_OGG_VERSION/$ogg_svn_dir/configure
#speex
`pwd`/speex.sh
|
<gh_stars>0
class Compiler {
constructor(vm) {
this.el = vm.$el
this.vm = vm
this.compile(this.el)
}
//编译模板,处理文本节点和元素节点
compile(el) {
let childNodes = el.childNodes
//Array.from将伪数组
Array.from(childNodes).forEach(node=>{
//处理文本节点
if(this.isTextNode(node)) {
console.log('22222222222')
this.compileText(node)
}else if(this.isElementNode(node)) {
//处理元素节点
this.compileElement(node)
}
//判断node节点,是否有子节点,如果有子节点,要递归调用compile
if(node.childNodes && node.childNodes.length) {
this.compile(node)
}
})
}
//编译元素节点,处理指令
compileElement(node) {
//遍历所有的属性节点
console.log(node)
Array.from(node.attributes).forEach(attr=>{
let attrName = attr.name
console.log(this.isDirective(attrName))
if(this.isDirective(attrName)) {
//v-text ==>text
attrName = attrName.substr(2)
console.log(attrName,'attrName')
let key = attr.value
this.update(node,key,attrName)
}
})
}
update(node,key,attrName) {
let updateFn = this[attrName+'Updater']
updateFn && updateFn(node,this.vm[key])
}
//处理 v-text 指令
textUpdater(node,value) {
node.textContet = value
}
//处理 v-model
modelUpdater(node,value) {
node.textContet = value
}
//编译文本节点,处理差值表达式
compileText(node) {
let reg = /\{\{(.+?)\}\}/
let value = node.textContet
if(reg.test(value)) {
let key = RegExp.$1.trim()
console.log(key,'---key---')
node.textContet = value.replace(reg,this.vm[key])
console.log(node.textContet,'--node.textContet--处理文本节点')
}
}
//判断元素属性是否是指令
isDirective(attrName) {
//判断字符串是否以v-开头
return attrName.startsWith('v-')
}
//判断节点是否是文本节点
isTextNode(node) {
//nodeType === 1 元素节点 nodeType === 3 文本节点
return node.nodeType === 3
}
//判断节点是否是元素节点
isElementNode(node) {
return node.nodeType === 1
}
} |
# Initialize the lists
list1 = [1, 2, 3, 4]
list2 = [2, 4, 6, 8]
# Initialize an empty list
common_list = []
# Find common elements
for element in list1:
if element in list2:
common_list.append(element)
# Print the result
print(common_list) |
"""Project: Eskapade - A python-based package for data analysis.
Class: ApplyFuncToDf
Created: 2016/11/08
Description:
Algorithm to apply one or more functions to a (grouped) dataframe column
or to an entire dataframe.
Authors:
KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands
Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""
import collections
from eskapade import process_manager, DataStore, Link, StatusCode
class ApplyFuncToDf(Link):
"""Apply functions to data-frame.
Applies one or more functions to a (grouped) dataframe column or an
entire dataframe. In the latter case, this can be done row wise or
column wise. The input dataframe will be overwritten.
"""
def __init__(self, **kwargs):
"""Initialize link instance.
:param str read_key: data-store input key
:param str store_key: data-store output key
:param list apply_funcs: functions to apply (list of dicts)
- 'func': function to apply
- 'colout' (string): output column
- 'colin' (string, optional): input column
- 'entire' (boolean, optional): apply to the entire dataframe?
- 'args' (tuple, optional): args for 'func'
- 'kwargs' (dict, optional): kwargs for 'func'
- 'groupby' (list, optional): column names to group by
- 'groupbyColout' (string) output column after the split-apply-combine combination
:param dict add_columns: columns to add to output (name, column)
"""
Link.__init__(self, kwargs.pop('name', 'apply_func_to_dataframe'))
# process keyword arguments
self._process_kwargs(kwargs, read_key='', store_key='', apply_funcs=[], add_columns=None)
self.check_extra_kwargs(kwargs)
def initialize(self):
"""Initialize the link."""
self.check_arg_vals('read_key')
if not self.apply_funcs:
self.logger.warning('No functions to apply')
return StatusCode.Success
def execute(self):
"""Execute the link."""
ds = process_manager.service(DataStore)
assert self.read_key in ds, 'key "{key}" not in DataStore.'.format(key=self.read_key)
df = ds[self.read_key]
for arr in self.apply_funcs:
# get func input
keys = list(arr.keys())
assert 'func' in keys, 'function input is insufficient.'
func = arr['func']
self.logger.debug('Applying function {function!s}.', function=func)
args = ()
kwargs = {}
if 'kwargs' in keys:
kwargs = arr['kwargs']
if 'args' in keys:
args = arr['args']
# apply func
if 'groupby' in keys:
groupby = arr['groupby']
if 'groupbyColout' in keys:
kwargs['groupbyColout'] = arr['groupbyColout']
df = self.groupbyapply(df, groupby, func, *args, **kwargs)
elif 'store_key' in keys:
if 'entire' in keys:
result = func(df, *args, **kwargs)
elif 'colin' in keys:
colin = arr['colin']
assert colin in df.columns
result = df[colin].apply(func, args=args, **kwargs)
else:
result = df.apply(func, args=args, **kwargs)
ds[arr['store_key']] = result
else:
assert 'colout' in keys, 'function input is insufficient'
colout = arr['colout']
if 'entire' in keys:
df[colout] = func(df, *args, **kwargs)
elif 'colin' in keys:
colin = arr['colin']
if isinstance(colin, list):
for c in colin:
assert c in df.columns
else:
assert colin in df.columns
df[colout] = df[colin].apply(func, args=args, **kwargs)
else:
df[colout] = df.apply(func, args=args, **kwargs)
# add columns
if self.add_columns is not None:
for k, v in self.add_columns.items():
df[k] = v
if self.store_key is None:
ds[self.read_key] = df
else:
ds[self.store_key] = df
return StatusCode.Success
def add_apply_func(self, func, out_column, in_column='', *args, **kwargs):
"""Add function to be applied to dataframe."""
# check inputs
if not isinstance(func, collections.Callable):
self.logger.fatal('Specified function object is not callable.')
raise AssertionError('functions in ApplyFuncToDf link must be callable objects')
if not isinstance(out_column, str) or not isinstance(in_column, str):
self.logger.fatal('Types of specified column names are "{in_type}" and "{out_type}."',
in_type=type(out_column).__name__, out_type=type(in_column).__name__)
raise TypeError('Column names in ApplyFuncToDf must be strings.')
if not out_column:
self.logger.fatal('No output column specified.')
raise RuntimeError('An output column must be specified to apply function in ApplyFuncToDf.')
# add function
if in_column == '':
self.apply_funcs.append({'func': func, 'colout': out_column, 'args': args, 'kwargs': kwargs})
else:
self.apply_funcs.append({'colin': in_column, 'func': func, 'colout': out_column, 'args': args,
'kwargs': kwargs})
def groupbyapply(self, df, groupby_columns, applyfunc, *args, **kwargs):
"""Apply groupby to dataframe."""
if 'groupbyColout' not in kwargs:
return df.groupby(groupby_columns).apply(applyfunc, *args, **kwargs).reset_index(drop=True)
else:
colout = kwargs['groupbyColout']
kwargs.pop('groupbyColout')
t = df.groupby(groupby_columns).apply(applyfunc, *args, **kwargs)
for _ in range(0, len(groupby_columns)):
t.index = t.index.droplevel()
df[colout] = t
return df
|
<reponame>LucianoZu/ardulink
/**
Copyright 2013 project Ardulink http://www.ardulink.org/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.github.pfichtner.ardulink.util;
import static org.zu.ardulink.util.Preconditions.checkArgument;
/**
* [ardulinktitle] [ardulinkversion]
* @author <NAME>
*
* [adsense]
*/
public class ProtoBuilder {
private final String command;
private int pin;
public enum ALPProtocolKeys {
POWER_PIN_SWITCH("ppsw"), POWER_PIN_INTENSITY("ppin"), DIGITAL_PIN_READ(
"dred"), ANALOG_PIN_READ("ared"), START_LISTENING_DIGITAL(
"srld"), START_LISTENING_ANALOG("srla"), STOP_LISTENING_DIGITAL(
"spld"), STOP_LISTENING_ANALOG("spla");
private String proto;
private ALPProtocolKeys(String proto) {
this.proto = proto;
}
}
public static ProtoBuilder alpProtocolMessage(ALPProtocolKeys command) {
return new ProtoBuilder(command.proto);
}
public static ProtoBuilder arduinoCommand(String command) {
return new ProtoBuilder(command);
}
private ProtoBuilder(String command) {
this.command = command;
}
public String withoutValue() {
return "alp://" + command + "/" + pin + "\n";
}
public String withValue(int value) {
return "alp://" + command + "/" + pin + "/" + value + "\n";
}
public ProtoBuilder forPin(int pin) {
checkArgument(pin >= 0, "Pin must not be negative but was %s", pin);
this.pin = pin;
return this;
}
}
|
#!/bin/bash
# This is intended to be run inside the docker container as the command of the docker-compose.
set -ex
bundle exec rspec -fd --pattern spec/**/*_spec.rb,spec/**/*_specs.rb
|
import React, { Component } from 'react';
import Chart from './Chart';
class Dashboard extends Component {
render() {
return (
<div>
<h1>Dashboard</h1>
<Chart />
</div>
);
}
}
export default Dashboard; |
<reponame>lengfangbing/min
import { Response, ServerRequest } from "./deps.ts";
import { Cookie } from "./cookie.ts";
export type ReqMethod =
| "get"
| "post"
| "put"
| "delete"
| "patch"
| "options"
| "head"
| "connect"
| "trace";
export type ReqBody = {
type: string;
value: string | number | Record<string, string | number | Uint8Array>;
};
export type HandlerFunc = (req: Req, res: Res) => Promise<void> | void;
export type NextFunc = () => Promise<void>;
export type MiddlewareFunc = (
req: Req,
res: Res,
next: NextFunc,
) => Promise<void> | void;
export type MethodFuncArgument = Array<MiddlewareFunc>;
export interface AppConfig {
server: ListenOptions;
}
export interface RoutesConfig {
url: string;
method: string;
func: HandlerFunc;
middleware?: MethodFuncArgument;
}
export interface RouteHandlers {
middleware?: Array<MiddlewareFunc>;
handler: HandlerFunc;
}
export interface RouteValue {
query: Record<string, string>;
url: string;
params: Record<string, string>;
handler: HandlerFunc;
middleware: Array<MiddlewareFunc>;
}
export interface SingleRoute {
middleware: Array<MiddlewareFunc>;
handler: HandlerFunc;
paramsNames: Record<string, string>;
}
export interface NewRoute {
next: Record<string, NewRoute> | null;
middleware: Array<MiddlewareFunc>;
handler: HandlerFunc | null;
paramsNames: Record<string, string>;
}
export interface CorsOptions {
allowMethods?: Array<string>;
allowHeaders?: Array<string>;
origin?: string | ((req: Req) => Promise<string>);
allowCredentials?: boolean;
maxAge?: number;
exposeHeaders?: Array<string>;
}
export interface ListenOptions {
port: number;
hostname: string;
certFile?: string;
isHttps?: boolean;
secure?: boolean;
keyFile?: string;
}
export interface RealUrl {
url: string;
query?: Record<string, string> | null;
prefix?: string;
params?: string | null;
paramsName?: string;
extName?: string;
}
export type AssetsOptions = {
// developing
path: string;
onerror: (e: Error) => void;
};
export type AssetsArgument = string | AssetsOptions;
export interface Req {
query: Record<string, string>;
body: ReqBody;
url: string;
method: ReqMethod;
headers: Headers;
request: ServerRequest;
params: Record<string, string>;
cookies: Map<string, string | boolean>;
[key: string]: unknown;
}
export interface Res {
response: Response;
// 返回值类型可以是任意的
// deno-lint-ignore no-explicit-any
body: any;
headers: Headers;
status: number;
done: boolean;
redirect: (url: string) => void;
render: (path: string) => Promise<void>;
send: (req: Req, res: Res) => void;
cookies: Cookie;
}
export interface MinConfig {
server: ListenOptions;
routes: Array<RoutesConfig>;
cors?: CorsOptions;
assets?: string | Record<string, string | number | boolean>;
}
export type ErrorMessage = {
path: string;
req: Req;
error: string;
position: string;
};
|
<gh_stars>0
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
require 'date'
require_relative 'upgrade_deployment_details'
# rubocop:disable Lint/UnneededCopDisableDirective, Metrics/LineLength
module OCI
# Definiton of the additional attributes for a Current Release upgrade.
class GoldenGate::Models::UpgradeDeploymentCurrentReleaseDetails < GoldenGate::Models::UpgradeDeploymentDetails
# Attribute mapping from ruby-style variable name to JSON key.
def self.attribute_map
{
# rubocop:disable Style/SymbolLiteral
'type': :'type'
# rubocop:enable Style/SymbolLiteral
}
end
# Attribute type mapping.
def self.swagger_types
{
# rubocop:disable Style/SymbolLiteral
'type': :'String'
# rubocop:enable Style/SymbolLiteral
}
end
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:disable Metrics/MethodLength, Layout/EmptyLines, Style/SymbolLiteral
# Initializes the object
# @param [Hash] attributes Model attributes in the form of hash
def initialize(attributes = {})
return unless attributes.is_a?(Hash)
attributes['type'] = 'CURRENT_RELEASE'
super(attributes)
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity
# rubocop:enable Metrics/MethodLength, Layout/EmptyLines, Style/SymbolLiteral
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity, Layout/EmptyLines
# Checks equality by comparing each attribute.
# @param [Object] other the other object to be compared
def ==(other)
return true if equal?(other)
self.class == other.class &&
type == other.type
end
# rubocop:enable Metrics/CyclomaticComplexity, Metrics/AbcSize, Metrics/PerceivedComplexity, Layout/EmptyLines
# @see the `==` method
# @param [Object] other the other object to be compared
def eql?(other)
self == other
end
# rubocop:disable Metrics/AbcSize, Layout/EmptyLines
# Calculates hash code according to all attributes.
# @return [Fixnum] Hash code
def hash
[type].hash
end
# rubocop:enable Metrics/AbcSize, Layout/EmptyLines
# rubocop:disable Metrics/AbcSize, Layout/EmptyLines
# Builds the object from hash
# @param [Hash] attributes Model attributes in the form of hash
# @return [Object] Returns the model itself
def build_from_hash(attributes)
return nil unless attributes.is_a?(Hash)
self.class.swagger_types.each_pair do |key, type|
if type =~ /^Array<(.*)>/i
# check to ensure the input is an array given that the the attribute
# is documented as an array but the input is not
if attributes[self.class.attribute_map[key]].is_a?(Array)
public_method("#{key}=").call(
attributes[self.class.attribute_map[key]]
.map { |v| OCI::Internal::Util.convert_to_type(Regexp.last_match(1), v) }
)
end
elsif !attributes[self.class.attribute_map[key]].nil?
public_method("#{key}=").call(
OCI::Internal::Util.convert_to_type(type, attributes[self.class.attribute_map[key]])
)
end
# or else data not found in attributes(hash), not an issue as the data can be optional
end
self
end
# rubocop:enable Metrics/AbcSize, Layout/EmptyLines
# Returns the string representation of the object
# @return [String] String presentation of the object
def to_s
to_hash.to_s
end
# Returns the object in the form of hash
# @return [Hash] Returns the object in the form of hash
def to_hash
hash = {}
self.class.attribute_map.each_pair do |attr, param|
value = public_method(attr).call
next if value.nil? && !instance_variable_defined?("@#{attr}")
hash[param] = _to_hash(value)
end
hash
end
private
# Outputs non-array value in the form of hash
# For object, use to_hash. Otherwise, just return the value
# @param [Object] value Any valid value
# @return [Hash] Returns the value in the form of hash
def _to_hash(value)
if value.is_a?(Array)
value.compact.map { |v| _to_hash(v) }
elsif value.is_a?(Hash)
{}.tap do |hash|
value.each { |k, v| hash[k] = _to_hash(v) }
end
elsif value.respond_to? :to_hash
value.to_hash
else
value
end
end
end
end
# rubocop:enable Lint/UnneededCopDisableDirective, Metrics/LineLength
|
<reponame>fabiommendes/fgarcade
from arcade import SpriteList
def extend_sprite_list(lst: SpriteList, iterable):
"""
Extend sprite list with elements on iterable.
"""
append = lst.append
for x in iterable:
append(x)
def fix_all():
"""
Monkey patch external libs.
"""
SpriteList.extend = extend_sprite_list
|
#!/bin/sh
echo Crystal
../xtime.rb ./base64_cr
echo Go
../xtime.rb ./base64_go
echo GccGo
../xtime.rb ./base64_go_gccgo
echo Cpp libcrypto
../xtime.rb ./base64_cpp
echo C
../xtime.rb ./base64_c
echo C aklomp
../xtime.rb ./base64_c_ak
echo Rust
../xtime.rb ./base64_rs
echo D
../xtime.rb ./base64_d
echo D Gdc
../xtime.rb ./base64_d_gdc
echo D Ldc
../xtime.rb ./base64_d_ldc
echo Nim Gcc
../xtime.rb ./base64_nim_gcc
echo Nim Clang
../xtime.rb ./base64_nim_clang
echo Julia
../xtime.rb julia test.jl
echo Scala
../xtime.rb scala Base64
echo Java
../xtime.rb java Base64Java
echo Kotlin
../xtime.rb java -jar Test-kt.jar
echo Javascript Node
../xtime.rb node test.js
echo Python PyPy
../xtime.rb pypy test.py
echo Python
../xtime.rb python test.py
echo Python3
../xtime.rb python3 test.py
echo Ruby
../xtime.rb ruby test.rb
echo Mono
../xtime.rb mono -O=all --gc=sgen test.exe
echo C# .Net Core
../xtime.rb dotnet bin/Release/netcoreapp3.0/base64.dll
echo Perl
../xtime.rb perl -Iperllib/lib/perl5 test.pl
echo Perl XS
../xtime.rb perl test-xs.pl
echo Tcl
../xtime.rb tclsh test.tcl
echo Php
../xtime.rb php test.php
echo V Gcc
../xtime.rb ./base64_v_gcc
echo V Clang
../xtime.rb ./base64_v_clang
|
<reponame>seidu626/vumi<gh_stars>100-1000
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.task import Clock
from twisted.trial.unittest import FailTest
from vumi.errors import ConfigError
from vumi.tests.helpers import VumiTestCase
from vumi.transports.tests.helpers import TransportHelper
from vumi.transports.smpp.pdu_utils import unpacked_pdu_opts
from vumi.transports.smpp.smpp_transport import SmppTransceiverTransport
from vumi.transports.smpp.tests.fake_smsc import FakeSMSC
class DefaultProcessorTestCase(VumiTestCase):
def setUp(self):
self.fake_smsc = FakeSMSC()
self.tx_helper = self.add_helper(
TransportHelper(SmppTransceiverTransport))
self.clock = Clock()
@inlineCallbacks
def get_transport(self, config):
transport = yield self.tx_helper.get_transport(config, start=False)
transport.clock = self.clock
yield transport.startWorker()
self.clock.advance(0)
yield self.fake_smsc.bind()
returnValue(transport)
@inlineCallbacks
def test_data_coding_override_keys_ints(self):
"""
If the keys of the data coding overrides config dictionary are not
integers, they should be cast to integers.
"""
config = {
'system_id': 'foo',
'password': '<PASSWORD>',
'twisted_endpoint': self.fake_smsc.endpoint,
'deliver_short_message_processor_config': {
'data_coding_overrides': {
'0': 'utf-8'
},
},
}
transport = yield self.tx_helper.get_transport(config)
self.assertEqual(
transport.deliver_sm_processor.data_coding_map.get(0), 'utf-8')
@inlineCallbacks
def test_data_coding_override_keys_invalid(self):
"""
If the keys of the data coding overrides config dictionary can not be
cast to integers, a config error with an appropriate message should
be raised.
"""
config = {
'system_id': 'foo',
'password': '<PASSWORD>',
'twisted_endpoint': self.fake_smsc.endpoint,
'deliver_short_message_processor_config': {
'data_coding_overrides': {
'not-an-int': 'utf-8'
},
},
}
try:
yield self.tx_helper.get_transport(config)
except ConfigError as e:
self.assertEqual(
str(e),
"data_coding_overrides keys must be castable to ints. "
"invalid literal for int() with base 10: 'not-an-int'"
)
else:
raise FailTest("Expected ConfigError to be raised")
@inlineCallbacks
def test_multipart_sar_reference_rollover(self):
"""
If the multipart_sar_reference_rollover config value is set, then for
multipart messages, the reference should rollover at that value.
"""
config = {
'system_id': 'foo',
'password': '<PASSWORD>',
'twisted_endpoint': self.fake_smsc.endpoint,
'submit_short_message_processor_config': {
'send_multipart_sar': True,
'multipart_sar_reference_rollover': 0xFF,
},
}
transport = yield self.get_transport(config)
transport.service.sequence_generator.redis.set(
'smpp_last_sequence_number', 0xFF)
yield transport.submit_sm_processor.send_short_message(
transport.service, 'test-id', '+1234', 'test message ' * 20,
optional_parameters={})
pdus = yield self.fake_smsc.await_pdus(2)
msg_refs = [unpacked_pdu_opts(p)['sar_msg_ref_num'] for p in pdus]
self.assertEqual(msg_refs, [1, 1])
|
<gh_stars>0
import React, { Component } from "react";
import PropTypes from "prop-types";
import { DragDropContext } from "react-beautiful-dnd";
import Board from "./Board";
import Sidebar from "./Sidebar";
import YearPicker from "./YearPicker";
import axios from "axios";
import API from "../../config/api";
axios.defaults.withCredentials = true;
const coursesUrl = `${API.courses}/`;
const boardsUrl = `${API.boards}/`;
const columnsUrl = `${API.columns}/`;
const cardsUrl = `${API.cards}/`;
const initialData = {
board: {
board: {},
columns: {}
},
searchBody: [],
searchValue: "",
yearPickerVisible: false
};
const seasons = ["Fall", "Winter", "Spring", "Summer"];
const displayString = course => `${course.subject.symbol} ${course.catalog_num}: ${course.title}`;
class PageBody extends Component {
constructor(props) {
super(props);
this.onDragEnd = this.onDragEnd.bind(this);
this.state = initialData;
this.state.userId = this.props.id;
this.courses = [];
this.courseNames = [];
}
componentWillMount() {
this.initBoard();
this.getCourses();
}
// Gets board and decides whether returning or new user flow is used.
initBoard() {
axios.get(boardsUrl + `user/${this.state.userId}`)
.then(res => {
res = res.data;
if (res.ok) {
let board = res.body.board;
if (board) {
this.getColumns(board);
} else {
this.setState({yearPickerVisible: true});
}
} else {
console.log(res.message);
}
})
.catch(err => {
console.log(err);
});
}
/*
Returning User Flow
*/
// Finds existing columns for the user's board
getColumns(board) {
const boardId = board._id;
axios.get(columnsUrl + `board/${boardId}`)
.then(res => {
res = res.data;
if (res.ok) {
const columns = res.body.columns;
this.getCards(columns, board);
} else {
console.log(res.message);
}
})
.catch(err => {
console.log(err);
});
}
// Finds existing cards for each column and sets the board state
getCards(columns, board) {
let newBoard = {
board: board,
columns: {}
};
for (let column of columns) {
axios.get(cardsUrl + `column/${column._id}`)
.then(cardRes => {
cardRes = cardRes.data;
if (cardRes.ok) {
newBoard.columns[column._id] = {
column: column,
cards: cardRes.body.cards
};
if (Object.keys(newBoard.columns).length === columns.length) {
this.setState({board: newBoard});
}
} else {
console.log(cardRes.message);
}
})
.catch(err => {
console.log(err);
})
}
}
/*
New User Flow
*/
// Callback that fires on year picker submit
onYearPickerSubmit(startYear, endYear) {
this.postBoard(startYear.value, endYear.value);
this.setState({yearPickerVisible: false});
}
// Creates a new board for the user
postBoard(startYear, endYear) {
axios.post(boardsUrl + `user/${this.state.userId}`)
.then(res => {
res = res.data;
if (res.ok) {
const columnNames = this.getColumnNames(startYear, endYear);
this.addColumns({
board: res.body.board,
columns: {}
}, columnNames);
} else {
console.log(res.message);
}
})
.catch(err => {
console.log(err);
});
}
// Helper function to make column names out of year and season
columnName(year, seasonIndex) {
return `${year} ${seasons[seasonIndex]}`;
}
// Generates array of column names to create and POST
getColumnNames(startYear, endYear) {
let names = [this.columnName(startYear, 0)];
const endName = this.columnName(endYear + 1, 3);
while (names[names.length - 1] !== endName) {
const prevName = names[names.length - 1];
const prevSeason = prevName.slice(5);
const nextSeasonIndex = (seasons.indexOf(prevSeason) + 1) % seasons.length;
const year = parseInt(prevName.slice(0, 4), 10) + (nextSeasonIndex === 1 ? 1 : 0);
names.push(this.columnName(year, nextSeasonIndex));
}
return names;
}
// POSTs columns and sets board state
addColumns(board, termNames) {
let columnsAdded = 0;
const callback = columnRes => {
columnRes = columnRes.data;
if (columnRes.ok) {
const column = columnRes.body.column;
board.columns[column._id] = {
column: column,
cards: []
};
if (++columnsAdded === termNames.length) {
this.setState({board: board});
}
} else {
console.log(columnRes.message);
}
}
for (let term of termNames) {
axios.post(columnsUrl + `board/${board.board._id}`, {name: term})
.then(columnRes => callback(columnRes))
.catch(err => {
console.log(err);
});
}
}
/*
Course-handling and Search methods
*/
// Returns list of all courses
getCourses() {
axios.get(coursesUrl)
.then(res => {
res = res.data;
if (res.ok) {
const courses = res.body.courses;
this.courses = courses;
this.searchStrings = courses.map(course => displayString(course).toLowerCase());
this.populateSearchBody(this.state.searchValue);
} else {
console.log(res.message);
}
})
.catch(err => {
console.log(err);
});
}
// Filtering function, returns courses that match text field
onSearchChange(e) {
this.populateSearchBody(e.target.value);
}
populateSearchBody(searchValue) {
let courseResults = [],
nameResults = [];
if (searchValue) {
const courses = this.courses,
searchStrings = this.searchStrings,
termLowerCase = searchValue.toLowerCase();
let courseSet = new Set();
for (let i = 0; i < this.courses.length; i++) {
if (!courseSet.has(courses[i].title)) {
courseSet.add(courses[i].title);
const searchString = searchStrings[i];
if (searchString.includes(termLowerCase)) {
const course = {
_id: courses[i]._id,
course: courses[i]
};
if (searchString.indexOf(termLowerCase) === 0) {
courseResults.push(course);
} else {
nameResults.push(course);
}
}
}
}
}
const resultsSort = (a, b) => (displayString(a.course) > displayString(b.course)? 1:-1);
this.setState({
searchBody: courseResults.sort(resultsSort).concat(nameResults.sort(resultsSort)),
searchValue: searchValue
});
}
/*
Card Manipulation Methods
*/
// Required method for DragDropContext, handles moving cards between columns
onDragEnd(result) {
const { destination, source } = result;
if (!(destination && source)) { return; }
let columns = this.state.board.columns;
columns.searchBody = {
cards: this.state.searchBody
}
const sourceId = source.droppableId;
const destId = destination.droppableId;
let sourceColumnCards = columns[sourceId].cards;
let destColumnCards = columns[destId].cards;
const card = sourceColumnCards.splice(source.index, 1)[0];
destColumnCards.splice(destination.index, 0, card);
columns[sourceId].cards = sourceColumnCards;
columns[destId].cards = destColumnCards;
if (this.isSearchBody(destId) && columns.searchBody.cards.length <= 1) {
this.setState({searchBody: []});
} else {
this.setState({searchBody: columns.searchBody.cards});
}
Reflect.deleteProperty(columns, "searchBody");
let board = this.state.board;
board.columns = columns;
this.setState({board: board});
this.handleCardMove(card, sourceId, destId);
}
// Fires API requests when cards are moved to store state in database
handleCardMove(card, sourceId, destId) {
if (!this.isSearchBody(sourceId) && this.isSearchBody(destId)) {
axios.delete(cardsUrl + card._id)
.catch(err => {
console.log(err);
});
} else if (!this.isSearchBody(sourceId) && !this.isSearchBody(destId)) {
axios.patch(cardsUrl + `column/${destId}`, {
cardId: card._id
})
.catch(err => {
console.log(err);
});
} else if (this.isSearchBody(sourceId) && !this.isSearchBody(destId)) {
axios.post(cardsUrl + `column/${destId}`, {
course: card.course._id
})
.then(res => {
res = res.data;
if (res.ok) {
let board = this.state.board;
const cards = board.columns[destId].cards;
for (let i in cards) {
if (cards[i]._id === res.body.card.course) {
cards[i]._id = res.body.card._id;
board.columns[destId].cards = cards;
break;
}
}
this.setState({board: board});
} else {
console.log(res.message);
}
})
.catch(err => {
console.log(err);
});
}
}
// Helper function to differentiate the searchBody column
isSearchBody(id) {
return id === "searchBody";
}
render() {
return (
<div className="PageBody">
{this.state.yearPickerVisible?
<YearPicker onSubmit={this.onYearPickerSubmit.bind(this)}/> :
<DragDropContext onDragEnd={this.onDragEnd}>
<Board columns={this.state.board.columns}/>
<Sidebar value={this.state.searchValue}
column={this.state.searchBody}
onChange={e => this.onSearchChange(e)}/>
</DragDropContext>}
</div>
)
}
}
PageBody.propTypes = {
id: PropTypes.string
}
export default PageBody; |
#!/usr/bin/env sh
# abort on errors
set -e
# build
npm run docs:prepublish
# navigate into the build output directory
cd docs/.vuepress/dist
git init
git add -A
git commit -m 'deploy'
# if you are deploying to https://<USERNAME>.github.io/<REPO>
git push -f https://github.com/wdc-molfar/amqp-client master:gh-pages
cd - |
# Generated by Django 3.2 on 2021-05-19 05:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('article', '0006_auto_20210519_1353'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='分类名称', max_length=50, verbose_name='分类名称')),
('create_time', models.DateField(auto_now_add=True, help_text='创建时间', verbose_name='创建时间')),
('update_time', models.DateField(auto_now=True, help_text='更新时间', verbose_name='更新时间')),
('status', models.BooleanField(default=1, help_text='是否显示, 0-不显示,1-显示', max_length=1, verbose_name='是否显示')),
],
options={
'db_table': 'resource_category',
'ordering': ['-create_time'],
},
),
migrations.AddField(
model_name='article',
name='category',
field=models.ForeignKey(help_text='分类', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='articles', to='article.category', verbose_name='分类'),
),
]
|
<filename>frontend/src/component/Cart/ConfirmOrder.js
import React, { Fragment } from "react";
import CheckoutSteps from "../Cart/CheckoutSteps";
import { useSelector } from "react-redux";
import MetaData from "../layout/MetaData";
import "./ConfirmOrder.css";
import { Link } from "react-router-dom";
import { Typography } from "@material-ui/core";
const ConfirmOrder = ({ history }) => {
const { shippingInfo, cartItems } = useSelector((state) => state.cart);
const { user } = useSelector((state) => state.user);
const subtotal = cartItems.reduce(
(acc, item) => acc + item.quantity * item.price,
0
);
const shippingCharges = subtotal > 1000 ? 0 : 200;
const tax = subtotal * 0.18;
const totalPrice = subtotal + tax + shippingCharges;
const address = `${shippingInfo.address}, ${shippingInfo.city}, ${shippingInfo.state}, ${shippingInfo.pinCode}, ${shippingInfo.country}`;
const proceedToPayment = () => {
const data = {
subtotal,
shippingCharges,
tax,
totalPrice,
};
sessionStorage.setItem("orderInfo", JSON.stringify(data));
history.push("/process/payment");
};
return (
<Fragment>
<MetaData title="Confirm Order" />
<CheckoutSteps activeStep={1} />
<div className="confirmOrderPage">
<div>
<div className="confirmshippingArea">
<Typography>Shipping Info</Typography>
<div className="confirmshippingAreaBox">
<div>
<p>Name:</p>
<span>{user.name}</span>
</div>
<div>
<p>Phone:</p>
<span>{shippingInfo.phoneNo}</span>
</div>
<div>
<p>Address:</p>
<span>{address}</span>
</div>
</div>
</div>
<div className="confirmCartItems">
<Typography>Your Cart Items:</Typography>
<div className="confirmCartItemsContainer">
{cartItems &&
cartItems.map((item) => (
<div key={item.product}>
<img src={item.image} alt="Product" />
<Link to={`/product/${item.product}`}>
{item.name}
</Link>{" "}
<span>
{item.quantity} X ${item.price} ={" "}
<b>${item.price * item.quantity}</b>
</span>
</div>
))}
</div>
</div>
</div>
{/* */}
<div>
<div className="orderSummary">
<Typography>Order Summery</Typography>
<div>
<div>
<p>Subtotal:</p>
<span>${subtotal}</span>
</div>
<div>
<p>Shipping Charges:</p>
<span>${shippingCharges}</span>
</div>
<div>
<p>GST:</p>
<span>${tax}</span>
</div>
</div>
<div className="orderSummaryTotal">
<p>
<b>Total:</b>
</p>
<span>${totalPrice}</span>
</div>
<button onClick={proceedToPayment}>Proceed To Payment</button>
</div>
</div>
</div>
</Fragment>
);
};
export default ConfirmOrder;
|
<reponame>sheva007/imdb-crawler<gh_stars>0
import { Entity, PrimaryGeneratedColumn, CreateDateColumn, Column, UpdateDateColumn } from "typeorm";
@Entity()
export class Movie {
@PrimaryGeneratedColumn()
id: number;
@Column({
type: "varchar",
length: 11,
nullable: false
})
imdb_id: string;
@Column({
type: "varchar",
length: 300,
nullable: false
})
title: string;
@Column({
type: "varchar",
length: 4,
nullable: false
})
year: string;
@Column({
type: "varchar",
length: 100,
nullable: false
})
release: string;
@Column({
type: "float",
nullable: false
})
rating: number;
@UpdateDateColumn()
lastCrawlingDate: Date;
@CreateDateColumn()
firstCrawlingDate: Date;
} |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
char* convertToDigitsString(int value, int numDigits) {
char* result = (char*)malloc((numDigits + 1) * sizeof(char)); // Allocate memory for the result string
result[numDigits] = '\0'; // Null-terminate the string
int i;
for (i = 0; i < numDigits; i++) {
result[numDigits - i - 1] = '0' + (value % 10); // Extract the least significant digit and store it in the result
value /= 10; // Move to the next digit
}
return result;
}
int main() {
printf("%s\n", convertToDigitsString(123, 5)); // Output: "00123"
printf("%s\n", convertToDigitsString(9876, 3)); // Output: "876"
return 0;
} |
<filename>server/till/enrollment.js<gh_stars>0
/*
* TILL ENROLLMENT MODULE
*/
// DEFINE DEPENDENCIES
var request = require("request-json");
var url = require("url");
// DEFINE MODULE
var tillEnrollment = {
send: {
enrollmentInvite: SendEnrollmentInvite,
referralCodeNotification: SendReferralAlert
}
};
// INITIATE INSTANCE
var TILL_URL = url.parse(process.env.SHOPIFY_CRM_TILL_URL);
var TILL_BASE = TILL_URL.protocol + "//" + TILL_URL.host;
var TILL_PATH = TILL_URL.pathname;
if(TILL_URL.query != null) {
TILL_PATH += "?"+TILL_URL.query;
};
/*
* PRIVATE: CREATE CLIENT
*/
async function _createClient(phones, questions, conclusion) {
// NOTIFY PROGRESS
console.log('till/enrollment/_createClient: ', phones, questions, conclusion);
request.createClient(TILL_BASE).post(TILL_PATH, {
phone: phones,
questions: questions,
conclusion: conclusion
}, function(err, res, body) {
if(err != null) {
console.log('Till/_createClient error: ', err);
}
console.log(res, body);
return;
});
};
/*
* PRIVATE: SEND ALERT
*/
async function _sendAlert(phones, message) {
// NOTIFY
console.log('till/enrollment/_sendAlert', phones, message);
request.createClient(TILL_BASE).post(TILL_PATH, {
phone: phones,
text: message
}, function(err, res, body) {
// NOTIFY
console.log();
if(err != null) { console.log('till/enrollment/_zendAlert error: ', err); }
else { return res.statusCode }
});
};
/*
* SEND REFERRAL ALERT
*/
async function SendReferralAlert(phone, referralObject) {
// NOTIFY PROGRESS
// LOCAL VARIABLES
var phones = [process.env.BIZ_PHONE];
phones.push(phone);
var referalUrl = "http://www.29kettle.com" + referralObject.defaultReferralCodeUrl;
var message = "Welcome to SMS messages from 29 Kettle. Earn $5 towards your next online purchase when friends and family use your $5 off referal link: " + referalUrl + " or your referral code: " + referralObject.defaultReferralCode + " at checkout on their first order. Thanks for being a fan!";
// EXECUTE
try {
return await _sendAlert(phones, message)
} catch (error) {
console.log('till/enrollment/SendReferralAlert error: ', error);
}
};
/*
* SEND ENROLLMENT INVITE
*/
async function SendEnrollmentInvite(phone, enrollmenturl) {
// NOTIFY PROGRESS
console.log('till/enrollment/SendEnrollmentInvite', phone, enrollmenturl);
// DEFINE LOCAL VARABIELS
var phones = [process.env.BIZ_PHONE];
//phones.push(phone);
/*var questions = [{
text: 'Welcome to SMS messages from 29 Kettle. We\'re excited you\'ve joined the Delight Circle Rewards Program. To finish your enrollment and start earning $5 everytime you refere someone to 29 Kettle, follow this link below: ' + enrollmenturl,
tag: "enrollment",
responses: ["HELP", 'END'],
webhook: ""
}];
var conclusion = ""*/
var message = 'Welcome to SMS messages from 29 Kettle. We\'re excited you\'ve joined the Delight Circle Rewards Program. To finish your enrollment and start earning $5 everytime you refere someone to 29 Kettle, follow this link below: ' + enrollmenturl;
try {
return await _sendAlert(phones, message)
} catch (error) {
console.log('till/enrollment/SendEnrollmentInvite error: ', error);
}
// RETURN
return true;
};
// EXPORT MODULE
module.exports = tillEnrollment; |
#!/bin/sh
#1 = path/to/intel/tools
$1/bin/quartus_asm DE10_NANO_SoC_GHRD.qpf &&
$1/bin/quartus_cpf -c sof2rbf.cof
|
/* eslint-disable @typescript-eslint/no-explicit-any */
import { Box } from "@material-ui/core";
interface TabPanelProps {
children?: React.ReactNode;
index: any;
value: any;
}
const TabPanel: React.FC<TabPanelProps> = props => {
const { children, value, index, ...other } = props;
return (
<div
role="createformpanel"
hidden={value !== index}
id={`form-tabpanel-${index}`}
aria-labelledby={`form-tab-${index}`}
{...other}
>
{value === index && <Box p={3}>{children}</Box>}
</div>
);
};
export default TabPanel;
|
#!/usr/bin/env bash
#SBATCH --job-name=py-six@1.14.0
#SBATCH --account=use300
#SBATCH --partition=shared
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task=16
#SBATCH --mem=32G
#SBATCH --time=00:30:00
#SBATCH --output=%x.o%j.%N
declare -xr LOCAL_TIME="$(date +'%Y%m%dT%H%M%S%z')"
declare -xir UNIX_TIME="$(date +'%s')"
declare -xr SYSTEM_NAME='expanse'
declare -xr SPACK_VERSION='0.15.4'
declare -xr SPACK_INSTANCE_NAME='cpu'
declare -xr SPACK_INSTANCE_DIR="${HOME}/cm/shared/apps/spack/${SPACK_VERSION}/${SPACK_INSTANCE_NAME}"
declare -xr SLURM_JOB_SCRIPT="$(scontrol show job ${SLURM_JOB_ID} | awk -F= '/Command=/{print $2}')"
declare -xr SLURM_JOB_MD5SUM="$(md5sum ${SLURM_JOB_SCRIPT})"
declare -xr SCHEDULER_MODULE='slurm/expanse/20.02.3'
echo "${UNIX_TIME} ${SLURM_JOB_ID} ${SLURM_JOB_MD5SUM} ${SLURM_JOB_DEPENDENCY}"
echo ""
cat "${SLURM_JOB_SCRIPT}"
module purge
module load "${SCHEDULER_MODULE}"
module list
. "${SPACK_INSTANCE_DIR}/share/spack/setup-env.sh"
declare -xr SPACK_PACKAGE='py-six@1.14.0'
declare -xr SPACK_COMPILER='gcc@10.2.0'
declare -xr SPACK_VARIANTS=''
declare -xr SPACK_DEPENDENCIES="^python@3.8.5/$(spack find --format '{hash:7}' python@3.8.5 % ${SPACK_COMPILER})"
declare -xr SPACK_SPEC="${SPACK_PACKAGE} % ${SPACK_COMPILER} ${SPACK_VARIANTS} ${SPACK_DEPENDENCIES}"
printenv
spack config get compilers
spack config get config
spack config get mirrors
spack config get modules
spack config get packages
spack config get repos
spack config get upstreams
spack spec --long --namespaces --types "${SPACK_SPEC}"
if [[ "${?}" -ne 0 ]]; then
echo 'ERROR: spack concretization failed.'
exit 1
fi
time -p spack install --jobs "${SLURM_CPUS_PER_TASK}" --fail-fast --yes-to-all "${SPACK_SPEC}"
if [[ "${?}" -ne 0 ]]; then
echo 'ERROR: spack install failed.'
exit 1
fi
spack module lmod refresh --delete-tree -y
#sbatch --dependency="afterok:${SLURM_JOB_ID}" 'py-wheel@0.34.2.sh'
sleep 60
|
import React, { useState } from "react";
import wall from "../assets/img/bg-man-wall.jpg"; // bluish gray background color
// Here we import a helper function that will check if the email is valid
import { validateEmail } from "../utils/helpers";
function Form() {
// Create state variables for the fields in the form
// We are also setting their initial values to an empty string
const [email, setEmail] = useState("");
// const [userName, setUserName] = useState("");
const [message, setMessage] = useState("");
// const [password, setPassword] = useState("");
const [name, setName] = useState("");
const [errorMessage, setErrorMessage] = useState("");
const handleInputChange = (e) => {
// Getting the value and name of the input which triggered the change
const { target } = e;
const inputType = target.name;
const inputValue = target.value;
// Based on the input type, we set the state of either email, username, and password
if (inputType === "email") {
setEmail(inputValue);
} else if (inputType === "message") {
setMessage(inputValue);
} else if (inputType === "name") {
setName(inputValue);
}
// } else if (inputType === "userName") {
// setUserName(inputValue);}
// else {
// setName(inputValue);
// }
};
const handleFormSubmit = (e) => {
// Preventing the default behavior of the form submit (which is to refresh the page)
e.preventDefault();
// First we check to see if the email is not valid or if the userName is empty. If so we set an error message to be displayed on the page.
if (!validateEmail(email) || !message || !name) {
setErrorMessage("Email invalid or message/name is empty");
// We want to exit out of this code block if something is wrong so that the user can correct it
return;
// Then we check to see if the password is not valid. If so, we set an error message regarding the password.
};
// If everything goes according to plan, we want to clear out the input after a successful registration.
setMessage("");
setName("");
setEmail("");
};
return (
<footer id="ts-footer" className="mt-5">
<section
id="contact"
className="ts-block ts-separate-bg-element"
data-bg-image={wall}
data-bg-image-opacity=".1"
>
<div className="container">
<div className="ts-title">
<h2 className="ts-bubble-border">Contact</h2>
</div>
{/* ================== START FORM ================== */}
<div>
<form className="form">
{/* NAME */}
<div className="mb-3">
<label className="form-label">Full Name</label>
<input value={name} name="name" type="text" className="form-control" placeholder="Your First and Last Name" onChange={handleInputChange} />
</div>
{/* EMAIL */}
<div className="mb-3">
<label className="form-label">Email address</label>
<input value={email} name="email" type="email" className="form-control" placeholder="<EMAIL>" onChange={handleInputChange} />
</div>
{/* MESSAGE */}
<div className="mb-3">
<label className="form-label">Message</label>
<textarea value={message} name="message" type="text" className="form-control" placeholder="Leave me a message" onChange={handleInputChange} rows="3" />
</div>
<button type="button" onClick={handleFormSubmit} className="btn btn-primary mb-3">
Submit
</button>
</form>
{errorMessage && (
<div>
<p className="error-text">{errorMessage}</p>
</div>
)}
</div>
<hr className="ts-hr-light mb-5" />
{/* ================== END FORM ================== */}
{/* THESE ARE THE SOCIAL ICONS */}
<div className="row ts-xs-text-center ">
{/* <!-- start: ICON 1 --> */}
<div className="col-sm-6 col-md-3 mb-4">
<i
className="fas fa-map-marker-alt mb-4 footer-icon"
alt="Pink icon of a map pin, indicating the location of Candra Fried"
></i>
<h5>Location</h5>
<div className="ts-opacity__50">
<figure className="mb-0">Dallas, TX, USA</figure>
</div>
</div>
{/* <!-- start: ICON 1 --> */}
{/* <!-- start: ICON 2 --> */}
<div className="col-sm-6 col-md-3 mb-4">
<i
className="fas fa-envelope-open mb-4 footer-icon"
alt="A pink icon of an at symbol indicating the email of <NAME>"
></i>
<h5>Email</h5>
<div className="ts-opacity__50">
<figure className="mb-0"><EMAIL></figure>
</div>
{/* <!--end ts-opacity__50--> */}
</div>
{/* <!-- start: ICON 2 --> */}
{/* <!-- start: ICON 3 --> */}
<div className="col-sm-6 col-md-3 mb-4">
<i
className="fab fa-linkedin mb-4 footer-icon"
alt="A pink LinkedIn icon"
></i>
<h5>LinkedIn</h5>
<div className="ts-opacity__50">
<figure className="mb-0">
<a
href="https://www.linkedin.com/in/candracook/"
target="_blank"
rel="noreferrer"
>
View Profile
</a>
</figure>
</div>
{/* <!--end ts-opacity__50--> */}
</div>
{/* <!-- start: ICON 3 --> */}
{/* <!-- start: ICON 4 --> */}
<div className="col-sm-6 col-md-3 mb-4">
<i
className="fab fa-github mb-4 footer-icon"
alt="A pink LinkedIn icon"
></i>
<h5>Github</h5>
<div className="ts-opacity__50">
<figure className="mb-0">
<a
href="https://github.com/candracodes"
target="_blank"
rel="noreferrer"
>
View Profile
</a>
</figure>
</div>
{/* <!--end ts-opacity__50--> */}
</div>
</div>
{/* <!--end row--> */}
</div>
{/* <!--end container--> */}
</section>
{/* <!--end #contact--> */}
</footer>
);
}
export default Form;
|
#!/bin/bash
set -o errexit -o nounset
BASE_REPO=$PWD
update_website() {
cd ..; mkdir gh-pages; cd gh-pages
git init
git config user.name "Sébastien Rochette"
git config user.email "sebastienrochettefr@gmail.com"
git config --global push.default simple
git remote add upstream "https://$GH_TOKEN@github.com/ThinkR-open/building-shiny-apps-workflow.git"
git fetch upstream
git checkout gh-pages
cp -fvr $BASE_REPO/docs/* .
git add *
git commit -a -m "Updating book from $BASE_REPO (${TRAVIS_BUILD_NUMBER})"
git status
git push
git status
cd ..
}
update_website |
echo "test"
|
#!/usr/bin/env bash
PARA_CLI="docker exec ${NODE3} /root/chain33-para-cli"
PARA_CLI2="docker exec ${NODE2} /root/chain33-para-cli"
PARA_CLI1="docker exec ${NODE1} /root/chain33-para-cli"
PARA_CLI4="docker exec ${NODE4} /root/chain33-para-cli"
PARANAME="para"
xsedfix=""
if [ "$(uname)" == "Darwin" ]; then
xsedfix=".bak"
fi
function para_init() {
para_set_toml chain33.para33.toml
para_set_toml chain33.para32.toml
para_set_toml chain33.para31.toml
para_set_toml chain33.para30.toml
sed -i $xsedfix 's/^authAccount=.*/authAccount="1KSBd17H7ZK8iT37aJztFB22XGwsPTdwE4"/g' chain33.para33.toml
sed -i $xsedfix 's/^authAccount=.*/authAccount="1JRNjdEqp4LJ5fqycUBm9ayCKSeeskgMKR"/g' chain33.para32.toml
sed -i $xsedfix 's/^authAccount=.*/authAccount="1NLHPEcbTWWxxU3dGUZBhayjrCHD3psX7k"/g' chain33.para31.toml
sed -i $xsedfix 's/^authAccount=.*/authAccount="1MCftFynyvG2F4ED5mdHYgziDxx6vDrScs"/g' chain33.para30.toml
}
function para_set_toml() {
cp chain33.para.toml "${1}"
sed -i $xsedfix 's/^Title.*/Title="user.p.'''$PARANAME'''."/g' "${1}"
sed -i $xsedfix 's/^# TestNet=.*/TestNet=true/g' "${1}"
sed -i $xsedfix 's/^startHeight=.*/startHeight=0/g' "${1}"
sed -i $xsedfix 's/^emptyBlockInterval=.*/emptyBlockInterval=4/g' "${1}"
# rpc
sed -i $xsedfix 's/^jrpcBindAddr=.*/jrpcBindAddr="0.0.0.0:8901"/g' "${1}"
sed -i $xsedfix 's/^grpcBindAddr=.*/grpcBindAddr="0.0.0.0:8902"/g' "${1}"
sed -i $xsedfix 's/^whitelist=.*/whitelist=["localhost","127.0.0.1","0.0.0.0"]/g' "${1}"
sed -i $xsedfix 's/^ParaRemoteGrpcClient=.*/ParaRemoteGrpcClient="nginx:8803"/g' "${1}"
}
function para_set_wallet() {
echo "=========== # para set wallet ============="
para_import_key "${PARA_CLI}" "0x6da92a632ab7deb67d38c0f6560bcfed28167998f6496db64c258d5e8393a81b"
para_import_key "${PARA_CLI2}" "0x19c069234f9d3e61135fefbeb7791b149cdf6af536f26bebb310d4cd22c3fee4"
para_import_key "${PARA_CLI1}" "0x7a80a1f75d7360c6123c32a78ecf978c1ac55636f87892df38d8b85a9aeff115"
para_import_key "${PARA_CLI4}" "0xcacb1f5d51700aea07fca2246ab43b0917d70405c65edea9b5063d72eb5c6b71"
}
function para_import_key() {
echo "=========== # save seed to wallet ============="
result=$(${1} seed save -p 1314 -s "tortoise main civil member grace happy century convince father cage beach hip maid merry rib" | jq ".isok")
if [ "${result}" = "false" ]; then
echo "save seed to wallet error seed, result: ${result}"
exit 1
fi
echo "=========== # unlock wallet ============="
result=$(${1} wallet unlock -p 1314 -t 0 | jq ".isok")
if [ "${result}" = "false" ]; then
exit 1
fi
echo "=========== # import private key ============="
echo "key: ${2}"
result=$(${1} account import_key -k "${2}" -l paraAuthAccount | jq ".label")
if [ -z "${result}" ]; then
exit 1
fi
echo "=========== # close auto mining ============="
result=$(${1} wallet auto_mine -f 0 | jq ".isok")
if [ "${result}" = "false" ]; then
exit 1
fi
echo "=========== # wallet status ============="
${1} wallet status
}
function para_transfer() {
echo "=========== # para chain transfer ============="
para_transfer2account "1Q8hGLfoGe63efeWa8fJ4Pnukhkngt6poK"
para_transfer2account "1KSBd17H7ZK8iT37aJztFB22XGwsPTdwE4"
para_transfer2account "1JRNjdEqp4LJ5fqycUBm9ayCKSeeskgMKR"
para_transfer2account "1NLHPEcbTWWxxU3dGUZBhayjrCHD3psX7k"
para_transfer2account "1MCftFynyvG2F4ED5mdHYgziDxx6vDrScs"
block_wait "${CLI}" 1
echo "=========== # para chain send config ============="
para_configkey "${CLI}" "paracross-nodes-user.p.${PARANAME}." "1KSBd17H7ZK8iT37aJztFB22XGwsPTdwE4"
para_configkey "${CLI}" "paracross-nodes-user.p.${PARANAME}." "1JRNjdEqp4LJ5fqycUBm9ayCKSeeskgMKR"
para_configkey "${CLI}" "paracross-nodes-user.p.${PARANAME}." "1NLHPEcbTWWxxU3dGUZBhayjrCHD3psX7k"
para_configkey "${CLI}" "paracross-nodes-user.p.${PARANAME}." "1MCftFynyvG2F4ED5mdHYgziDxx6vDrScs"
block_wait "${CLI}" 1
txhash=$(para_configkey "${PARA_CLI}" "token-blacklist" "BTY")
echo "txhash=$txhash"
query_tx "${PARA_CLI}" "${txhash}"
}
function para_transfer2account() {
echo "${1}"
hash1=$(${CLI} send coins transfer -a 10 -n test -t "${1}" -k 4257D8692EF7FE13C68B65D6A52F03933DB2FA5CE8FAF210B5B8B80C721CED01)
echo "${hash1}"
}
function para_configkey() {
tx=$(${1} config config_tx -o add -k "${2}" -v "${3}")
sign=$(${CLI} wallet sign -k 0xc34b5d9d44ac7b754806f761d3d4d2c4fe5214f6b074c19f069c4f5c2a29c8cc -d "${tx}")
send=$(${CLI} wallet send -d "${sign}")
echo "${send}"
}
function query_tx() {
block_wait "${1}" 3
local times=100
while true; do
ret=$(${1} tx query -s "${2}" | jq -r ".tx.hash")
echo "query hash is ${2}, return ${ret} "
if [ "${ret}" != "${2}" ]; then
block_wait "${1}" 2
times=$((times - 1))
if [ $times -le 0 ]; then
echo "query tx=$2 failed"
exit 1
fi
else
echo "query tx=$2 success"
break
fi
done
}
function token_create() {
echo "=========== # para token test ============="
echo "=========== # 1.token precreate ============="
hash=$(${1} send token precreate -f 0.001 -i test -n guodunjifen -a 1KSBd17H7ZK8iT37aJztFB22XGwsPTdwE4 -p 0 -s GD -t 10000 -k 1KSBd17H7ZK8iT37aJztFB22XGwsPTdwE4)
echo "${hash}"
query_tx "${1}" "${hash}"
${1} token get_precreated
owner=$(${1} token get_precreated | jq -r ".owner")
if [ "${owner}" != "1KSBd17H7ZK8iT37aJztFB22XGwsPTdwE4" ]; then
echo "wrong pre create owner"
exit 1
fi
total=$(${1} token get_precreated | jq -r ".total")
if [ "${total}" != 10000 ]; then
echo "wrong pre create total"
exit 1
fi
echo "=========== # 2.token finish ============="
hash=$(${1} send token finish -f 0.001 -a 1KSBd17H7ZK8iT37aJztFB22XGwsPTdwE4 -s GD -k 0xc34b5d9d44ac7b754806f761d3d4d2c4fe5214f6b074c19f069c4f5c2a29c8cc)
echo "${hash}"
query_tx "${1}" "${hash}"
${1} token get_finish_created
owner=$(${1} token get_finish_created | jq -r ".owner")
if [ "${owner}" != "1KSBd17H7ZK8iT37aJztFB22XGwsPTdwE4" ]; then
echo "wrong finish created owner"
exit 1
fi
total=$(${1} token get_finish_created | jq -r ".total")
if [ "${total}" != 10000 ]; then
echo "wrong finish created total"
exit 1
fi
${1} token token_balance -a 1KSBd17H7ZK8iT37aJztFB22XGwsPTdwE4 -e token -s GD
balance=$(${1} token token_balance -a 1KSBd17H7ZK8iT37aJztFB22XGwsPTdwE4 -e token -s GD | jq -r '.[]|.balance')
if [ "${balance}" != "10000.0000" ]; then
echo "wrong para token genesis create, should be 10000.0000"
exit 1
fi
}
function token_transfer() {
echo "=========== # 2.token transfer ============="
hash=$(${1} send token transfer -a 11 -s GD -t 1GGF8toZd96wCnfJngTwXZnWCBdWHYYvjw -k 0x6da92a632ab7deb67d38c0f6560bcfed28167998f6496db64c258d5e8393a81b)
echo "${hash}"
query_tx "${1}" "${hash}"
${1} token token_balance -a 1GGF8toZd96wCnfJngTwXZnWCBdWHYYvjw -e token -s GD
balance=$(${1} token token_balance -a 1GGF8toZd96wCnfJngTwXZnWCBdWHYYvjw -e token -s GD | jq -r '.[]|.balance')
if [ "${balance}" != "11.0000" ]; then
echo "wrong para token transfer, should be 11.0000"
exit 1
fi
echo "=========== # 3.token send exec ============="
hash=$(${1} send token send_exec -a 11 -s GD -e paracross -k 0x6da92a632ab7deb67d38c0f6560bcfed28167998f6496db64c258d5e8393a81b)
echo "${hash}"
query_tx "${1}" "${hash}"
# $ ./build/chain33-cli exec addr -e user.p.para.paracross
# 19WJJv96nKAU4sHFWqGmsqfjxd37jazqii
${1} token token_balance -a 19WJJv96nKAU4sHFWqGmsqfjxd37jazqii -e token -s GD
balance=$(${1} token token_balance -a 19WJJv96nKAU4sHFWqGmsqfjxd37jazqii -e token -s GD | jq -r '.[]|.balance')
if [ "${balance}" != "11.0000" ]; then
echo "wrong para token send exec, should be 11.0000"
exit 1
fi
echo "=========== # 4.token withdraw ============="
hash=$(${1} send token withdraw -a 11 -s GD -e paracross -k 0x6da92a632ab7deb67d38c0f6560bcfed28167998f6496db64c258d5e8393a81b)
echo "${hash}"
query_tx "${1}" "${hash}"
${1} token token_balance -a 19WJJv96nKAU4sHFWqGmsqfjxd37jazqii -e token -s GD
balance=$(${1} token token_balance -a 19WJJv96nKAU4sHFWqGmsqfjxd37jazqii -e token -s GD | jq -r '.[]|.balance')
if [ "${balance}" != "0.0000" ]; then
echo "wrong para token withdraw, should be 0.0000"
exit 1
fi
}
function para_cross_transfer_withdraw() {
echo "=========== # para cross transfer/withdraw test ============="
paracrossAddr=1HPkPopVe3ERfvaAgedDtJQ792taZFEHCe
${CLI} account list
${CLI} send bty transfer -a 10 -n test -t $paracrossAddr -k 4257D8692EF7FE13C68B65D6A52F03933DB2FA5CE8FAF210B5B8B80C721CED01
hash=$(${CLI} send para asset_transfer --title user.p.para. -a 1.4 -n test -t 12qyocayNF7Lv6C9qW4avxs2E7U41fKSfv -k 4257D8692EF7FE13C68B65D6A52F03933DB2FA5CE8FAF210B5B8B80C721CED01)
echo "${hash}"
sleep 15
${CLI} send para asset_withdraw --title user.p.para. -a 0.7 -n test -t 12qyocayNF7Lv6C9qW4avxs2E7U41fKSfv -k 4257D8692EF7FE13C68B65D6A52F03933DB2FA5CE8FAF210B5B8B80C721CED01
local times=100
while true; do
acc=$(${CLI} account balance -e paracross -a 12qyocayNF7Lv6C9qW4avxs2E7U41fKSfv | jq -r ".balance")
echo "account balance is ${acc}, expect 9.3 "
if [ "${acc}" != "9.3000" ]; then
block_wait "${CLI}" 2
times=$((times - 1))
if [ $times -le 0 ]; then
echo "para_cross_transfer_withdraw failed"
exit 1
fi
else
echo "para_cross_transfer_withdraw success"
break
fi
done
}
function para_test() {
echo "=========== # para chain test ============="
token_create "${PARA_CLI}"
token_transfer "${PARA_CLI}"
para_cross_transfer_withdraw
}
function paracross() {
if [ "${2}" == "init" ]; then
para_init
elif [ "${2}" == "config" ]; then
para_transfer
para_set_wallet
elif [ "${2}" == "test" ]; then
para_test "${1}"
fi
if [ "${2}" == "forkInit" ]; then
para_init
elif [ "${2}" == "forkConfig" ]; then
para_transfer
para_set_wallet
elif [ "${2}" == "forkCheckRst" ]; then
checkParaBlockHashfun 30
fi
if [ "${2}" == "fork2Init" ]; then
para_init
elif [ "${2}" == "fork2Config" ]; then
para_transfer
para_set_wallet
elif [ "${2}" == "fork2CheckRst" ]; then
checkParaBlockHashfun 30
fi
}
|
<reponame>Lissone/lix-attendance<filename>packages/web/src/pages/client.tsx
/* eslint-disable no-console */
import Head from 'next/head'
import Link from 'next/link'
import { useEffect, useState } from 'react'
import { format, parseISO } from 'date-fns'
import ptBR from 'date-fns/locale/pt-BR'
import { BiSend, BiExit } from 'react-icons/bi'
import { useAuth } from '../hooks/useAuth'
import { api } from '../services/api'
import {
Container,
ChatContainer,
ChatContent,
Information,
AdminMessage,
ClientMessage,
ClosedConnection
} from '../styles/client'
interface User {
id: string
name: string
email: string
socket?: string
}
interface Connection {
id: string
adminId: string | null
clientId: string
closedAt: string
messages: Message[]
admin: User
client: User
}
interface Message {
id?: string
adminId: string
clientId: string
text: string
createdHour?: string
createdAt?: string
updatedAt?: string
}
export default function Client({ socket }: any) {
const { user, updateClientConnection } = useAuth()
const [connection, setConnection] = useState({} as Connection)
const [text, setText] = useState('')
useEffect(() => {
if (user.connectionId) {
getAllMessages()
}
}, [])
useEffect(() => {
socket.on('admin_connect_with_client', ({ admin }) => {
setConnection({
...connection,
admin
})
})
socket.on('admin_close_connection_with_client', response => {
setConnection(prevState => ({
...prevState,
closedAt: response.connection.closedAt
}))
})
socket.on('admin_send_to_client', ({ message }) => {
const messageFormatted = {
...message,
createdHour: format(parseISO(message.createdAt), 'HH:mm', {
locale: ptBR
})
}
const newMessages = [...connection.messages, messageFormatted]
setConnection({
...connection,
messages: newMessages
})
})
return () => {
socket.off('admin_connect_with_client')
socket.off('admin_send_to_client')
}
}, [connection])
async function getAllMessages() {
try {
const { data } = await api.get<Connection>(
`/connections/${user.connectionId}`
)
const messagesFormatted = data.messages.map(message => ({
...message,
createdHour: format(parseISO(message.createdAt), 'HH:mm', {
locale: ptBR
})
}))
setConnection({
...data,
messages: messagesFormatted
})
} catch (err) {
console.error(err)
}
}
function handleSendMessage() {
try {
if (text.trim() === '') {
setText('')
return
}
const params = {
connectionId: user.connectionId,
clientId: user.id,
adminId: connection.admin?.id,
text
}
socket.emit('client_send_to_admin', params, connectionId => {
updateClientConnection(connectionId)
})
const message = {
...params,
adminId: null,
createdHour: format(new Date(), 'HH:mm', {
locale: ptBR
})
}
let newMessages
if (connection.messages) {
newMessages = [...connection.messages, message]
} else {
newMessages = [message]
}
setConnection(prevState => ({
...prevState,
messages: newMessages
}))
setText('')
} catch (err) {
console.error(err)
}
}
function handleReopenConnection() {
socket.emit('client_reopen_connection', user.connectionId)
setConnection(prevState => ({
...prevState,
closedAt: null
}))
}
return (
<>
<Head>
<title>Cliente help - LixAttendance</title>
</Head>
<Container>
<ChatContainer>
<header>
<div>
<span>Atendente</span>
{connection.admin ? (
<h2>{connection.admin.name}</h2>
) : (
<h2>Aguardando atendimento...</h2>
)}
</div>
<Link href="/">
<a>
<BiExit size={40} />
</a>
</Link>
</header>
<ChatContent>
{connection.messages?.length <= 0 && (
<Information>
<h4>
Envie sua dúvida para nossos atendentes e aguarde eles
retornarem com a resposta
</h4>
</Information>
)}
{connection.messages?.map(message =>
message.adminId === null || message.adminId === undefined ? (
<ClientMessage key={message.id}>
<div>
<span>{message.text}</span>
</div>
<p>{message.createdHour}</p>
</ClientMessage>
) : (
<AdminMessage key={message.id}>
<div>
<span>{message.text}</span>
</div>
<p>{message.createdHour}</p>
</AdminMessage>
)
)}
{connection.closedAt && (
<ClosedConnection>
<h3>Admin fechou a conexão</h3>
<span>
Se suas dúvidas ainda não foram respondidas, abra novamente a
conexão com seu admin responsável.
</span>
<button type="button" onClick={handleReopenConnection}>
Reabrir chamado
</button>
</ClosedConnection>
)}
</ChatContent>
<footer>
<input
value={text}
onChange={event => setText(event.target.value)}
placeholder="Digite sua mensagem aqui"
disabled={!!connection.closedAt}
/>
<button
type="button"
onClick={handleSendMessage}
disabled={!!connection.closedAt}
>
Enviar
<BiSend size={20} />
</button>
</footer>
</ChatContainer>
</Container>
</>
)
}
|
<filename>pkg/labels/mocklabels/mock_test.go<gh_stars>10-100
package mocklabels_test
import (
"testing"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/orbos/pkg/labels/mocklabels"
)
func TestLabelMockTypes(t *testing.T) {
for _, testcase := range []struct {
labelType string
typed labels.Labels
untyped map[string]string
}{{
"Name",
mocklabels.Name,
mocklabels.NameMap,
}, {
"ClosedNameSelector",
mocklabels.ClosedNameSelector,
mocklabels.ClosedNameSelectorMap,
}, {
"Selectable",
mocklabels.Selectable,
mocklabels.SelectableMap,
}} {
typedStruct := labels.MustK8sMap(testcase.typed)
for k, v := range typedStruct {
untypedVal, ok := testcase.untyped[k]
if !ok {
t.Errorf("%s: key %s is missing in untyped labels", testcase.labelType, k)
continue
}
if untypedVal != v {
t.Errorf("%s: at key %s, typed value is %s, not %s", testcase.labelType, k, v, untypedVal)
}
}
for k := range testcase.untyped {
if _, ok := typedStruct[k]; !ok {
t.Errorf("%s: key %s is expandable in untyped labels", testcase.labelType, k)
}
}
}
}
|
"use strict";
/**
* @author <NAME> <<EMAIL>>
* @copyright 2020
* @license MIT
*/
var __assign = (this && this.__assign) || function () {
__assign = Object.assign || function(t) {
for (var s, i = 1, n = arguments.length; i < n; i++) {
s = arguments[i];
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
t[p] = s[p];
}
return t;
};
return __assign.apply(this, arguments);
};
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.ClickableWrapper = void 0;
var react_1 = __importDefault(require("react"));
var ClickableWrapper_hooks_1 = require("./ClickableWrapper-hooks");
exports.ClickableWrapper = function (props) {
var children = props.children, WrapperTag = props.wrapperTag, passthroughProps = props.passthroughProps, onSingleClick = props.onSingleClick, onDoubleClick = props.onDoubleClick, onKeyboardClick = props.onKeyboardClick;
var handleClick = ClickableWrapper_hooks_1.useClickHandler(onSingleClick, onDoubleClick);
var handleKeyDown = ClickableWrapper_hooks_1.useKeyDownHandler(onKeyboardClick);
var compProps = {};
if (onSingleClick || onDoubleClick || onKeyboardClick) {
compProps.onClick = handleClick;
compProps.onKeyDown = handleKeyDown;
compProps.tabIndex = 0;
}
var mergedProps = __assign(__assign({}, compProps), passthroughProps);
return react_1.default.createElement(WrapperTag, __assign({}, mergedProps), children);
};
//# sourceMappingURL=ClickableWrapper.js.map |
import { mount, shallow } from 'enzyme'
import React from 'react'
import MatchingPeopleListItem from './MatchingPeopleListItem'
it('matches last snapshot', () => {
const wrapper = shallow(<MatchingPeopleListItem />)
expect(wrapper).toMatchSnapshot()
})
it('calls onClick when close button clicked', () => {
const onClick = jest.fn()
const wrapper = mount(
<MatchingPeopleListItem onClick={onClick} />
)
wrapper.find('span').last().simulate('click')
expect(onClick).toHaveBeenCalled()
})
|
def identify_key_phrases(text):
tokens = nltk.word_tokenize(text)
ngrams = nltk.ngrams(tokens, 2)
key_phrases = [ngram for ngram in ngrams if nltk.pos_tag(ngram)[0][1] in ("NN", "NNP", "NNPS", "NNS") and nltk.pos_tag(ngram)[1][1] in ("NN", "NNP", "NNPS", "NNS")]
return key_phrases |
#!/usr/bin/env python
# PyQt tutorial 3
import sys
from PySide import QtCore, QtGui
app = QtGui.QApplication(sys.argv)
window = QtGui.QWidget()
window.resize(200, 120)
quit = QtGui.QPushButton("Quit", window)
quit.setFont(QtGui.QFont("Times", 18, QtGui.QFont.Bold))
quit.setGeometry(10, 40, 180, 40)
QtCore.QObject.connect(quit, QtCore.SIGNAL("clicked()"),
app, QtCore.SLOT("quit()"))
window.show()
sys.exit(app.exec_())
|
<table>
<thead>
<tr>
<th>Name</th>
<th>Date</th>
<th>Time</th>
</tr>
</thead>
<tbody>
<tr>
<td>John</td>
<td>January 5</td>
<td>8:00 am</td>
</tr>
<tr>
<td>Marta</td>
<td>February 12</td>
<td>9:15 pm</td>
</tr>
<tr>
<td>Steve</td>
<td>March 29</td>
<td>10:30 am</td>
</tr>
</tbody>
</table> |
<filename>src/main/java/com/kvn/poi/imp/processor/ResolverAdaptor.java<gh_stars>10-100
package com.kvn.poi.imp.processor;
import java.lang.reflect.Field;
import com.kvn.poi.imp.anno.ExcelColum;
import com.kvn.poi.imp.anno.ExcelDateColum;
/**
* 解析器适配器
* @author wzy
* @date 2017年7月12日 下午3:30:52
*/
public class ResolverAdaptor {
public static AbstractResolver<?> adapt(Field field) {
ExcelColum ec = field.getAnnotation(ExcelColum.class);
if (ec != null) {
return new DefaultFieldResolver(field, ec);
}
ExcelDateColum edc = field.getAnnotation(ExcelDateColum.class);
if (edc != null) {
return new DateFieldResolver(field, edc);
}
return EmptyResolver.SINGLE.INSTANCE;
}
}
|
/* Copyright 2009 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#import <Cocoa/Cocoa.h>
@class MBProject;
// Controller for "Get Project Info" dialog.
@interface MBProjectInfoController : NSWindowController {
@private
IBOutlet NSTextField *nameField_; // static (not editable)
IBOutlet NSTextField *pathField_; // static (not editable)
IBOutlet NSTextField *portField_;
IBOutlet NSButton *clearDSOnLaunchCheckBox_;
IBOutlet NSTextField *extraFlagsField_;
IBOutlet NSTextView *fullFlagsField_;
MBProject *project_;
}
// Designated initializer
- (id)initWithProject:(MBProject *)project;
// Called to update the flag summary, merging checkboxes, extra flags, etc.
- (IBAction)updateFlagsSummary:(id)sender;
- (IBAction)stopModalWithSuccess:(id)sender;
@end
|
#!/bin/sh
fail ()
{
: "${__fail_fast:?$1}";
}
mask_background ()
{
command -v convert >/dev/null 2>&1 || fail "Couldn't locate convert command -- install ImageMagick"
command -v composite >/dev/null 2>&1 || fail "Couldn't locate composite command -- install ImageMagick"
[ -f "${1}" ] || fail "Couldn't find ${1}."
[ -f "${2}" ] && fail "${2} already exists."
printf "${3}" | grep -Eq '^\-?[[:digit:]]*$' || fail "Third argument must be an integer."
printf "${4}" | grep -Eq '^\-?[[:digit:]]*$' || fail "Fourth argument must be an integer."
printf "${5}" | grep -Eq '^\-?[[:digit:]]*\.?[[:digit:]]+$' || fail "Fifth argument must be a float."
color=$(convert "${1}" -format "%[pixel:p{${3},${4}}]" info:-)
convert "${1}" \
-alpha off -bordercolor "${color}" -border 1 \
\( +clone -fuzz "${5}%" -fill none -floodfill "+${3}+${4}" "${color}" \
-alpha extract -geometry 200% -blur 0x0.5 -morphology erode square:1 -geometry 50% \) \
-compose CopyOpacity -composite -shave 1 \
"PNG32:${2}.transparent"
composite "${1}" "${2}.transparent" -compose Dst_In -alpha Set "${2}" || fail "Couldn't copy original alpha channel."
rm "${2}.transparent" || fail "Couldn't remove ${2}.transparent."
}
|
from typing import List
import numpy as np
from skimage.measure import label, regionprops
def count_regions(bImg: List[List[bool]]) -> int:
labelImg = label(np.array(bImg)) # Convert the 2D list to a numpy array and label the connected regions
numLabel = labelImg.max() # Get the maximum label value, which represents the number of distinct regions
return numLabel # Return the number of distinct regions |
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
GOPATH=$(go env GOPATH)
SRC=$GOPATH/src
BIN=$GOPATH/bin
ROOT=$GOPATH
REPO_ROOT=$GOPATH/src/github.com/appscode/kubed
source "$REPO_ROOT/hack/libbuild/common/lib.sh"
source "$REPO_ROOT/hack/libbuild/common/public_image.sh"
APPSCODE_ENV=${APPSCODE_ENV:-dev}
IMG=kubed
OSM_VER=${OSM_VER:-0.6.2}
DIST=$REPO_ROOT/dist
mkdir -p $DIST
if [ -f "$DIST/.tag" ]; then
export $(cat $DIST/.tag | xargs)
fi
clean() {
pushd $REPO_ROOT/hack/docker
rm -rf kubed
popd
}
build_binary() {
pushd $REPO_ROOT
./hack/builddeps.sh
./hack/make.py build kubed
detect_tag $REPO_ROOT/dist/.tag
# Download restic
rm -rf $DIST/osm
mkdir $DIST/osm
cd $DIST/osm
wget https://cdn.appscode.com/binaries/osm/${OSM_VER}/osm-alpine-amd64
chmod +x osm-alpine-amd64
popd
}
build_docker() {
pushd $REPO_ROOT/hack/docker
cp $DIST/kubed/kubed-alpine-amd64 kubed
chmod 755 kubed
cp $DIST/osm/osm-alpine-amd64 osm
chmod 755 osm
cat >Dockerfile <<EOL
FROM alpine
RUN set -x \
&& apk add --update --no-cache ca-certificates
COPY osm /usr/bin/osm
COPY kubed /usr/bin/kubed
ENTRYPOINT ["kubed"]
EOL
local cmd="docker build -t appscode/$IMG:$TAG ."
echo $cmd; $cmd
rm kubed osm Dockerfile
popd
}
build() {
build_binary
build_docker
}
docker_push() {
if [ "$APPSCODE_ENV" = "prod" ]; then
echo "Nothing to do in prod env. Are you trying to 'release' binaries to prod?"
exit 1
fi
if [ "$TAG_STRATEGY" = "git_tag" ]; then
echo "Are you trying to 'release' binaries to prod?"
exit 1
fi
hub_canary
}
docker_release() {
if [ "$APPSCODE_ENV" != "prod" ]; then
echo "'release' only works in PROD env."
exit 1
fi
if [ "$TAG_STRATEGY" != "git_tag" ]; then
echo "'apply_tag' to release binaries and/or docker images."
exit 1
fi
hub_up
}
source_repo $@
|
<filename>src/lucandra/LucandraTermDocs.java
/**
* Copyright T <NAME>
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package lucandra;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.List;
import org.apache.log4j.Logger;
import org.apache.lucene.index.*;
public class LucandraTermDocs implements TermDocs, TermPositions
{
private IndexReader indexReader;
private LucandraTermEnum termEnum;
private LucandraTermInfo[] termDocs;
private int docPosition;
private int[] termPositionArray;
private int termPosition;
private static final Logger logger = Logger.getLogger(LucandraTermDocs.class);
public LucandraTermDocs(IndexReader indexReader) throws IOException
{
this.indexReader = indexReader;
termEnum = new LucandraTermEnum(indexReader);
}
public void close() throws IOException
{
}
public int doc()
{
if (docPosition < 0)
docPosition = 0;
return termDocs[docPosition].docId;
}
public int freq()
{
Integer freq = termDocs[docPosition].freq;
termPositionArray = termDocs[docPosition].positions;
termPosition = 0;
return freq;
}
public boolean next() throws IOException
{
if (termDocs == null)
return false;
return ++docPosition < termDocs.length;
}
public int read(int[] docs, int[] freqs) throws IOException
{
int i = 0;
for (; (termDocs != null && docPosition < termDocs.length && i < docs.length); i++, docPosition++)
{
docs[i] = doc();
freqs[i] = freq();
}
return i;
}
public void seek(Term term) throws IOException
{
if (termEnum.skipTo(term))
{
if (termEnum.term().equals(term))
{
termDocs = termEnum.getTermDocFreq();
}
else
{
termDocs = null;
}
}
docPosition = -1;
}
public void seek(TermEnum termEnum) throws IOException
{
if (termEnum instanceof LucandraTermEnum)
{
this.termEnum = (LucandraTermEnum) termEnum;
}
else
{
this.termEnum = (LucandraTermEnum) indexReader.terms(termEnum.term());
}
termDocs = this.termEnum.getTermDocFreq();
if (logger.isDebugEnabled())
logger.debug("seeked out " + termDocs.length);
docPosition = -1;
}
public LucandraTermInfo[] filteredSeek(Term term, List<ByteBuffer> docNums) throws IOException
{
termDocs = termEnum.loadFilteredTerms(term, docNums);
docPosition = -1;
return termDocs;
}
// this should be used to find a already loaded doc
public boolean skipTo(int target) throws IOException
{
// find the target
if (termDocs == null)
return false;
docPosition = 0;
while (target > doc())
{
if (!next())
return false;
}
return true;
}
public byte[] getPayload(byte[] data, int offset) throws IOException
{
return null;
}
public int getPayloadLength()
{
return 0;
}
public boolean isPayloadAvailable()
{
return false;
}
public int nextPosition() throws IOException
{
if (termPositionArray == null)
return -1;
int pos = termPositionArray[termPosition];
termPosition++;
if (logger.isDebugEnabled())
logger.debug("Doc: " + doc() + ", Position: " + pos);
return pos;
}
}
|
<reponame>maxponte/chess
package com.company;
import java.util.ArrayList;
import java.util.List;
public class Pawn implements PieceType {
// IF ANY OF THESE CHANGE, UPDATE PieceTypeSerializer.java
private int velocity;
Color color;
public Pawn(Color color) {
this.velocity = color == Color.WHITE ? 1 : -1;
this.color = color;
}
@Override
public boolean isPawn() {
return true;
}
@Override
public List<Effect> move(Move mov, Board board) {
Square src = mov.src;
Square dst = mov.dst;
// moving too fast
if(mov.dy() != velocity && mov.dy() != 2*velocity) return null;
// no double hops if not in starting position
if(mov.dy() == 2 && src.row != 1) return null;
if(mov.dy() == -2 && src.row != 6) return null;
List<Effect> effects = new ArrayList<>();
if(mov.dx() != 0) { // diagonal move
if(mov.adx() > 1) return null;
if (dst.piece == null) {
// check for en passant
Square enp = board.squares[dst.row - velocity][dst.col];
if (enp.piece != null && enp.piece.isPawn()) {
Piece victim = enp.piece;
// must be done the next move after the target pawn moves 2 squares
if (victim.color() != color && victim.lastMoveID().intValue() == mov.id-1) {
effects.add(new Effect(enp, null, enp.piece, null));
} else {
return null;
}
}
}
} else if(dst.piece != null) return null; // can't move forward into a piece
effects.add(new Effect(src, dst, src.piece, dst.piece));
return effects;
}
public List<Move> possible(Square s, Board b, int nextMoveID) {
List<Square> sl = new ArrayList<>();
if (s.row == 1 || s.row == 6) {
Square v = b.getSquare(s.row + velocity*2, s.col);
if(v != null) sl.add(v);
}
Square v = b.getSquare(s.row + velocity, s.col);
if(v != null) sl.add(v);
if ((v = b.getSquare(s.row + velocity, s.col + 1)) != null) {
if (v.piece != null) {
sl.add(v);
} else {
// check for en passant
Square ep = b.getSquare(s.row, s.col + 1);
if (ep != null && (s.row == 3 || s.row == 4) && ep.piece != null && ep.piece.color() != color && ep.piece.lastMoveID() == nextMoveID - 1) {
sl.add(b.getSquare(s.row + velocity, s.col + 1));
}
}
}
if ((v = b.getSquare(s.row + velocity, s.col - 1)) != null && v.piece != null) {
sl.add(v);
if (v.piece != null) {
sl.add(v);
} else {
// check for en passant
Square ep = b.getSquare(s.row, s.col - 1);
if (ep != null && (s.row == 3 || s.row == 4) && ep.piece != null && ep.piece.color() != color && ep.piece.lastMoveID() == nextMoveID - 1) {
sl.add(b.getSquare(s.row + velocity, s.col - 1));
}
}
}
return Move.generateMoves(s, sl, nextMoveID);
}
@Override
public String toString() {
return "P";
}
double[][] position = new double[][]{
{0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0},
{5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0},
{1.0, 1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0},
{0.5, 0.5, 1.0, 2.5, 2.5, 1.0, 0.5, 0.5},
{0.0, 0.0, 0.0, 2.0, 2.0, 0.0, 0.0, 0.0},
{0.5, -0.5, -1.0, 0.0, 0.0, -1.0, -0.5, 0.5},
{0.5, 1.0, 1.0, -2.0, -2.0, 1.0, 1.0, 0.5},
{0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}
};
public double score(Square s, int nextMoveID) {
int row = s.piece.color() == Color.WHITE ? s.row : 7-s.row;
double bonus = nextMoveID < 0 ? 100.0 : 1.0;
return 10.0 + (bonus*position[row][s.col]);
}
}
|
<filename>index.js
/*=== imports and exports ===*/
const { generateAPIKey } = require('./modules/generateAPIKey')
const { validateKey } = require('./modules/validateKey')
const { keyLimiter } = require('./modules/keyLimiter')
module.exports = {
generateAPIKey,
validateKey,
keyLimiter
}
|
<reponame>stjude/sjcloud-data-transfer
/**
* This code block is executed if we are running as an Electron application.
*/
/*
Todo:
- transition away from window globals and towards using VueApp backend plugin
- the various window.* objects below may be deleted, pending review
- only window.utils is called outside of this file
*/
window.dx = require('./bin/backend/dx');
window.queue = require('./bin/backend/queue');
window.logging = require('./bin/backend/logging-remote');
window.oauth = require('./bin/backend/oauth');
window.state = require('./bin/backend/state');
window.ui = require('./bin/backend/ui');
window.utils = require('./bin/backend/utils');
/*
* Transition to using backend
* methods specifically to a VueApp instance,
* by defining the Vue plugin below
*/
window.backend = {
install(Vue, options) {
Vue.prototype.$setBackend = function() {
if (this.backend) {
throw 'The $root.backend has already been set.';
} else {
this.backend = {
dx: require('./bin/backend/dx'),
queue: require('./bin/backend/queue'),
logging: require('./bin/backend/logging-remote'),
oauth: require('./bin/backend/oauth'),
state: require('./bin/backend/state'),
ui: require('./bin/backend/ui'),
utils: require('./bin/backend/utils'),
};
}
};
},
};
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.inbox = void 0;
var inbox = {
"viewBox": "0 0 1536 1792",
"children": [{
"name": "path",
"attribs": {
"d": "M1023 960h316q-1-3-2.5-8.5t-2.5-7.5l-212-496h-708l-212 496q-1 3-2.5 8.5t-2.5 7.5h316l95 192h320zM1536 990v482q0 26-19 45t-45 19h-1408q-26 0-45-19t-19-45v-482q0-62 25-123l238-552q10-25 36.5-42t52.5-17h832q26 0 52.5 17t36.5 42l238 552q25 61 25 123z"
}
}]
};
exports.inbox = inbox; |
#!/bin/bash
dieharder -d 102 -g 22 -S 2801043024
|
#!/usr/bin/env bash
curl -H "Authorization: Bearer ..." https://slack.com/api/emoji.list |
#!/bin/bash
set -ue
# This will generate boot disk images
usage () {
echo "usage: ${0##*/}: [--copy <orign cloud image>] [--size <disk size>] [--prefix <prefix>] [--number <number>] [--index <start index>]"
}
ARGS=$(getopt \
-o c:s:p:n:i: \
--long help,copy:,size:,prefix:,number:,index: -n ${0##*/} \
-- "$@")
if [ $? -ne 0 ]; then
usage >&2
exit 2
fi
eval set -- "$ARGS"
while :; do
case "$1" in
--help)
usage
exit 0
;;
-c|--copy)
copy_from="$2"
shift 2
;;
-s|--size)
disk_size="$2"
shift 2
;;
-p|--prefix)
prefix="$2"
shift 2
;;
-n|--number)
number="$2"
shift 2
;;
-i|--index)
index_start="$2"
shift 2
;;
--) shift
break
;;
esac
done
! [[ "$prefix" ]] && echo -e "ERROR: -p, prefix must be given" && exit 1
! [[ "$copy_from" ]] && echo -e "ERROR: -c, copy must be given" && exit 1
# set default value
: ${number:=1}
: ${index_start:=1}
# loop work
for(( i=$index_start;i<$((number+index_start));i++))
do
if [ -f "$prefix-$i.img" ]; then
echo -e "WARNING: $prefix-$i.img already existed, skip"
continue
fi
cp "$copy_from" "$prefix-$i.img"
echo -e "$prefix-$i.img" created
[[ "$disk_size" ]] && qemu-img resize "$prefix-$i.img" "$disk_size"
done |
def process_input_file(input_file, output_file):
try:
with open(input_file, 'r') as file:
data = file.readlines()
processed_data = []
for line in data:
tag_type, tag_name, tag_value = line.strip().split('\t')
processed_data.append('\t'.join([tag_type, tag_name, tag_value]) + '\r\n')
with open(output_file, 'w') as file:
file.writelines(processed_data)
print("Data processing and writing to output file successful.")
except FileNotFoundError:
print("Input file not found.")
except Exception as e:
print("An error occurred:", e)
# Example usage
input_file = "input.txt"
output_file = "output.txt"
process_input_file(input_file, output_file) |
/**
*
*/
package redisson;
import java.util.concurrent.TimeUnit;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.redisson.Redisson;
import org.redisson.api.RBucket;
import org.redisson.api.RList;
import org.redisson.api.RedissonClient;
import org.redisson.client.codec.StringCodec;
import org.redisson.config.Config;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @author dzh
* @date Sep 15, 2017 7:43:21 PM
* @since 0.0.1
*/
public class TestRedisson {
private RedissonClient redis;
static Logger LOG = LoggerFactory.getLogger(TestRedisson.class);
@Before
public void before() {
Config config = new Config();
config.useSingleServer().setAddress("redis://127.0.0.1:6379");
redis = Redisson.create(config);
}
/**
* 每次测试方法运行完之后 运行此方法
* 用于关闭客户端连接服务器的redisson对象
*/
@After
public void after() {
redis.shutdown(2, 15, TimeUnit.SECONDS);
}
@Test
public void testBucket() throws Exception {
RBucket<String> bucket = redis.getBucket("str", new StringCodec("utf-8"));
bucket.set("x", 6, TimeUnit.SECONDS);
LOG.info("get {}", bucket.get());
Thread.sleep(4000);
LOG.info("get {} after {}s", bucket.get(), 4);
Thread.sleep(2000);
LOG.info("get {} after {}s", bucket.get(), 6);
bucket.delete();
RBucket<TestClazz> bucket1 = redis.getBucket("clazz");
bucket1.set(new TestClazz("c", 3));
LOG.info("get {}", bucket1.get());
bucket1.delete();
}
@Test
public void testList() throws Exception {
RList<TestClazz> list = redis.getList("list");
list.add(new TestClazz("a", 1));
LOG.info("get {}", list.get(0));
list.add(new TestClazz("b", 2));
// LOG.info("get {}", list.get(0, 1));
LOG.info("get {}", redis.getList("list"));
LOG.info("size {}", list.size());
list.delete();
}
@Test
public void test1k() {
long s0 = System.currentTimeMillis();
for (int i = 0; i < 10000000; i++) {
RBucket<String> bucket = redis.getBucket("test1k" + i, new StringCodec("utf-8"));
bucket.set(String.valueOf(System.nanoTime()), 1, TimeUnit.HOURS);
}
long s1 = System.currentTimeMillis();
LOG.info("cost {}ms", s1 - s0);
}
static class TestClazz {
public String a;
public int b;
public TestClazz() {}
public TestClazz(String a, int b) {
this.a = a;
this.b = b;
}
public String toString() {
return a + ":" + b;
}
}
}
|
CREATE TABLE reserved_crate_names (
name TEXT PRIMARY KEY
);
CREATE UNIQUE INDEX ON reserved_crate_names (canon_crate_name(name));
INSERT INTO reserved_crate_names (name) VALUES
('alloc'), ('arena'), ('ast'), ('builtins'), ('collections'),
('compiler-builtins'), ('compiler-rt'), ('compiletest'), ('core'), ('coretest'),
('debug'), ('driver'), ('flate'), ('fmt_macros'), ('grammar'), ('graphviz'),
('macro'), ('macros'), ('proc_macro'), ('rbml'), ('rust-installer'), ('rustbook'),
('rustc'), ('rustc_back'), ('rustc_borrowck'), ('rustc_driver'), ('rustc_llvm'),
('rustc_resolve'), ('rustc_trans'), ('rustc_typeck'), ('rustdoc'), ('rustllvm'),
('rustuv'), ('serialize'), ('std'), ('syntax'), ('test'), ('unicode');
CREATE FUNCTION ensure_crate_name_not_reserved() RETURNS trigger AS $$
BEGIN
IF canon_crate_name(NEW.name) IN (
SELECT canon_crate_name(name) FROM reserved_crate_names
) THEN
RAISE EXCEPTION 'cannot upload crate with reserved name';
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trigger_ensure_crate_name_not_reserved
BEFORE INSERT OR UPDATE ON crates
FOR EACH ROW EXECUTE PROCEDURE ensure_crate_name_not_reserved();
|
#!/usr/bin/env python3
from .network import ZeroconfEnumerator
from .local import HIDEnumerator
class DeviceEnumerator:
def __init__(self):
self._network = ZeroconfEnumerator()
self._local = HIDEnumerator()
self._local.start()
def stop(self):
self._network.stop()
self._local.stop()
def connect(self, target):
self._network.connect(target)
self._local.connect(target)
def disconnect(self, target):
self._network.disconnect(target)
self._local.disconnect(target)
|
<reponame>oistvan32/PUBLIC-BOT
const superagent = require("superagent");
const got = require('got');
const Discord = require("discord.js");
module.exports = {
name: "meme",
category: "Fun",
descriptoin: "meme Parancs",
run: async (client, message, args) => {
got('https://www.reddit.com/r/memes/random/.json').then(response => {
const [list] = JSON.parse(response.body);
const [post] = list.data.children;
message.channel.send(
new Discord.MessageEmbed()
.setTitle(post.data.title)
.setURL(`https://reddit.com${post.data.permalink}`)
.setColor('RANDOM')
.setImage(post.data.url)
.setFooter(`👍 ${post.data.ups} 💬 ${post.data.num_comments}`)
);
}).catch(console.error);
}
} |
<filename>src/main/java/seedu/address/logic/parser/patient/SearchPatientApptStatusCommandParser.java
/* @@author wayneswq */
package seedu.address.logic.parser.patient;
import static seedu.address.commons.core.Messages.MESSAGE_INVALID_COMMAND_FORMAT;
import java.util.Arrays;
import seedu.address.logic.commands.patient.SearchPatientApptStatusCommand;
import seedu.address.logic.parser.Parser;
import seedu.address.logic.parser.exceptions.ParseException;
import seedu.address.model.appointment.AppointmentStatus;
import seedu.address.model.person.patient.PatientApptStatusContainsKeywordsPredicate;
/**
* Parses input arguments and creates a new SearchPatientApptStatusCommand object
*/
public class SearchPatientApptStatusCommandParser implements Parser<SearchPatientApptStatusCommand> {
/**
* Parses the given {@code String} of arguments in the context of the SearchPatientApptStatusCommand
* and returns an SearchPatientApptStatusCommand object for execution.
*
* @throws ParseException if the user input does not conform the expected format
*/
public SearchPatientApptStatusCommand parse(String args) throws ParseException {
String trimmedArgs = args.trim();
if (trimmedArgs.isEmpty()) {
throw new ParseException(
String.format(MESSAGE_INVALID_COMMAND_FORMAT, SearchPatientApptStatusCommand.MESSAGE_USAGE));
}
String[] status = trimmedArgs.split("\\s+");
// more than one status or not one of the four status
if (status.length > 1 || !isValidApptStatus(status[0])) {
throw new ParseException(
String.format(MESSAGE_INVALID_COMMAND_FORMAT,
SearchPatientApptStatusCommand.MESSAGE_INVALID_STATUS));
}
return new SearchPatientApptStatusCommand(
new PatientApptStatusContainsKeywordsPredicate(Arrays.asList(status)));
}
/**
* Checks if the input keyword is one of the AppointmentStatus enum
*/
public boolean isValidApptStatus(String keyword) {
for (AppointmentStatus status : AppointmentStatus.values()) {
if (status.toString().equalsIgnoreCase(keyword)) {
return true;
}
}
return false;
}
}
|
package cloud.tianai.csv.converter;
import cloud.tianai.csv.CsvDataConverter;
import java.net.URI;
/**
* @Author: 天爱有情
* @Date: 2019/11/19 20:10
* @Description: uri转换
*/
public class UriCsvDataConverter implements CsvDataConverter<URI> {
@Override
public String converter(Integer index, URI data) {
return data.toString();
}
}
|
def compute_total_cost(price, quantity, shipping, tax):
return price * quantity + shipping + tax |
<gh_stars>10-100
package com.bitsys.common.http.message;
import java.util.Collection;
import org.apache.commons.lang3.StringUtils;
import com.bitsys.common.http.util.StringMatcherIgnoreCase;
import com.google.common.collect.LinkedListMultimap;
import com.google.common.collect.ListMultimap;
import com.google.common.collect.Multimap;
import com.google.common.collect.Multimaps;
/**
* This class provides the basic implementation for an {@link HttpMessage}.
*/
public abstract class AbstractHttpMessage implements HttpMessage
{
/**
* The HTTP headers for this message.
*/
private final ListMultimap<String, String> headers;
/**
* Constructs a <code>AbstractHttpMessage</code>.
*/
public AbstractHttpMessage()
{
headers = LinkedListMultimap.create();
}
@Override
public ListMultimap<String, String> getHeaders()
{
return headers;
}
@Override
public Multimap<String, String> getHeader(final String name)
{
return Multimaps.filterKeys(headers, new StringMatcherIgnoreCase(name));
}
@Override
public String getHeaderValue(final String name)
{
final Multimap<String, String> matchingHeaders = getHeader(name);
final Collection<String> values = matchingHeaders.values();
final String value;
if (values.isEmpty())
{
value = null;
}
// According to RFC 2616, if multiple message-header fields with the
// same field-name are
// present, it MUST be possible to combine the multiple header fields
// into one pair by
// combining the field values separated by a comma.
else
{
value = StringUtils.join(values, ',');
}
return value;
}
}
|
#!/bin/bash
source $(dirname ${0})/lxc_common_helpers.sh
NET_TYPE_VETH="veth"
NET_TYPE_DPDK="dpdk"
DPDK_HUGEFS_HOST_PATH_DEFAULT=$(get_lxc_config_option "lxc.dpdk.hugefs.host_path_default" ${main_config_file})
DPDK_HUGEFS_PATH_DEFAULT=$(get_lxc_config_option "lxc.dpdk.hugefs.path_default" ${main_config_file})
DPDK_HUGEFS_NR_HUGEPAGES=$(get_lxc_config_option "lxc.dpdk.hugefs.nr_hugepages" ${main_config_file})
DPDK_DEFAULT_PCI_DRIVER="igb_uio"
function lxc_get_veth_cn_end_name {
local conn_name=${1}
local cn_name=${2}
echo "veth-${cn_name}-${conn}-0"
}
function lxc_get_veth_remote_end_name {
local conn_name=${1}
local cn_name=${2}
echo "veth-${cn_name}-${conn}-1"
}
function lxc_set_net_cn_end_options {
local cfg_file=${1}
local cn_name=${2}
local cn_init_pid=${3}
local root_fs_mount=${4}
conn_list=$(get_lxc_config_option_list "wr.network.connection" ${cfg_file})
[ -z "${conn_list}" ] && return 1
# Going through each connection
for conn in ${conn_list}; do
type=$(get_lxc_config_option "wr.network.${conn}.type" ${cfg_file})
if [ "${type}" == "${NET_TYPE_VETH}" ]; then
nsenter_ext=""
[ -n "${cn_init_pid}" ] && nsenter_ext="nsenter -n -t ${cn_init_pid} --"
# At this point connection has been setup for "remote end" veth
cn_eth_name=$(lxc_get_veth_cn_end_name ${conn} ${cn_name})
hwaddr=$(get_lxc_config_option "wr.network.${conn}.hwaddr" ${cfg_file})
if [ -n "${hwaddr}" ]; then
${nsenter_ext} ip link set ${cn_eth_name} address ${hwaddr}
[ $? -ne 0 ] && lxc_log "Warning, ${conn}, cannot set hwaddrs ${hwaddr} to ${cn_eth_name}"
fi
ipv4=$(get_lxc_config_option "wr.network.${conn}.ipv4" ${cfg_file})
if [ -n "${ipv4}" ]; then
${nsenter_ext} ip addr add ${ipv4} dev ${cn_eth_name}
[ $? -ne 0 ] && lxc_log "Warning, ${conn}, cannot set ipv4 ${ipv4} to ${cn_eth_name}"
fi
new_name=$(get_lxc_config_option "wr.network.${conn}.name" ${cfg_file})
if [ -n "${new_name}" ]; then
${nsenter_ext} ip link set ${cn_eth_name} name ${new_name}
if [ $? -eq 0 ]; then
cn_eth_name=${new_name}
else
lxc_log "Warning, ${conn}, cannot change name from ${cn_eth_name} to ${new_name}"
fi
fi
flags=$(get_lxc_config_option "wr.network.${conn}.flags" ${cfg_file} | grep 'up')
if [ -n "${flags}" ]; then
${nsenter_ext} ip link set ${cn_eth_name} up
[ $? -ne 0 ] && lxc_log "Warning, ${conn}, cannot activate ${cn_eth_name}"
fi
elif [ "${type}" == "${NET_TYPE_DPDK}" ]; then
nsenter_ext=""
[ -n "${cn_init_pid}" ] && nsenter_ext="nsenter -n -m -p -t ${cn_init_pid} --"
dpdk_pci_list=$(get_lxc_config_option "wr.network.${conn}.dpdk.pci" ${cfg_file} | sed 's/;/ /g')
for pci_info in ${dpdk_pci_list}; do
# pci_info has the following format: <pci address>,<dpdk uio driver>
pci=$(${nsenter_ext} echo ${pci_info} | awk -F ',' '{print $1}')
# Create uio dev
sys_pci_uio_path=$(${nsenter_ext} find ${root_fs_mount}/sys/ -name "*uio*" | grep -F "${pci}" | grep "uio\/uio")
pci_dev_major_minor=$(${nsenter_ext} cat ${sys_pci_uio_path}/dev | sed 's/:/ /g')
${nsenter_ext} rm ${root_fs_mount}/dev/$(basename ${sys_pci_uio_path}) > /dev/null 2>&1
${nsenter_ext} /bin/mknod ${root_fs_mount}/dev/$(basename ${sys_pci_uio_path}) c ${pci_dev_major_minor}
done
# Check any of additional dpdk kernel modules might need to have a
# dev node created.
dpdk_kernmod_list=$(get_lxc_config_option "wr.network.${conn}.dpdk.kernmod" ${cfg_file} | sed 's/;/ /g')
# Right now only dpdk rte_kni requires /dev/kni to be created
res=$(echo ${dpdk_kernmod_list} | grep -F 'rte_kni')
if [ -n "${res}" ]; then
sys_kni_path=$(${nsenter_ext} find ${root_fs_mount}/sys/ -name "kni" | grep -F "devices")
kni_dev_major_minor=$(${nsenter_ext} cat ${sys_kni_path}/dev | sed 's/:/ /g')
${nsenter_ext} /bin/mknod ${root_fs_mount}/dev/kni c ${kni_dev_major_minor}
fi
# Now we mount hugepage fs into container. We support 2 modes:
# "host" makes the host hugepage fs available into container; "private"
# means the container will have its own hugepage mount.
dpdk_hugefs_mount=$(get_lxc_config_option "wr.network.${conn}.dpdk.hugefs.mount" ${cfg_file})
# Default is using host hugepage
[ -z "${dpdk_hugefs_mount}" ] && dpdk_hugefs_mount="${HOST_CN_NAME}"
dpdk_hugefs_path=$(get_lxc_config_option "wr.network.${conn}.dpdk.hugefs.path" ${cfg_file})
[ -z "${dpdk_hugefs_path}" ] && dpdk_hugefs_path=${DPDK_HUGEFS_PATH_DEFAULT}
if [ "${dpdk_hugefs_mount}" == "${HOST_CN_NAME}" ]; then
# Can only share with host hugepage fs when this function is invoked from
# container's hook functions.
if [ -z "${cn_init_pid}" ]; then
/bin/mount -o bind ${DPDK_HUGEFS_HOST_PATH_DEFAULT} ${root_fs_mount}/${dpdk_hugefs_path}
res=$?
[ $? -ne 0 ] && { lxc_log "Error, cannot bind mount, ${res}"; return ${res}; }
else
lxc_log "Warning, do not support host shared hugepage filesystem"
fi
elif [ "${dpdk_hugefs_mount}" == "private" ]; then
# Currently kernel does not support hugetlbfs min_size option yet. So for now just
# use "size" option
dpdk_hugefs_max_size=$(get_lxc_config_option "wr.network.${conn}.dpdk.hugefs.maxsize" ${cfg_file})
[ -n "${dpdk_hugefs_max_size}" ] && dpdk_hugefs_options="${dpdk_hugefs_options} -o size=${dpdk_hugefs_max_size}"
${nsenter_ext} /bin/mount -t hugetlbfs ${dpdk_hugefs_options} none ${root_fs_mount}/${dpdk_hugefs_path}
res=$?
[ ${res} -ne 0 ] && { lxc_log "Error, cannot mount hugetlbfs, ${res}"; return ${res}; }
fi
fi
done
return 0
}
function lxc_setup_net_cn_end {
local cfg_file=${1}
local cn_name=${2}
# Some basic checks
[ ! -e "${cfg_file}" ] && return 1
cn_init_pid=$(get_lxc_init_pid_from_cn_name ${cn_name})
[ -z "${cn_init_pid}" ] && return 1
which ip > /dev/null 2>&1
[ $? -ne 0 ] && lxc_log "Error, ip util is not available" && return 1
conn_list=$(get_lxc_config_option_list "wr.network.connection" ${cfg_file})
[ -z "${conn_list}" ] && return 1
# Going through each connection, at this point,
# connection has been setup for "remote end" veth
for conn in ${conn_list}; do
type=$(get_lxc_config_option "wr.network.${conn}.type" ${cfg_file})
if [ "${type}" == "${NET_TYPE_VETH}" ]; then
cn_eth_name=$(lxc_get_veth_cn_end_name ${conn} ${cn_name})
# Now switch "cn end" to correct namespace
ip link set ${cn_eth_name} netns ${cn_init_pid}
[ $? -ne 0 ] && lxc_log "Error, ${conn}, cannot switch ${cn_eth_name} to ${cn_init_pid} net namespace" \
&& return 1
fi
done
lxc_set_net_cn_end_options ${cfg_file} ${cn_name} ${cn_init_pid}
if [ $? -ne 0 ]; then
lxc_remove_net ${cfg_file} ${cn_name}
return 1
fi
return 0
}
# Read config file, and for each connection setup the "remote end" only.
# On first failed to create connection, the function will invoke
# lxc_remove_net to clean up the mess.
function lxc_setup_net_remote_end {
local cfg_file=${1}
local cn_name=${2}
local ret=0
# Some basic checks
[ ! -e "${cfg_file}" ] && return 1
which ip > /dev/null 2>&1
[ $? -ne 0 ] && lxc_log "Error, ip util is not available" && return 1
# Get list of connections specified in cfg file.
# Each connection is specified by options wr.network.connection
conn_list=$(get_lxc_config_option_list "wr.network.connection" ${cfg_file})
[ -z "${conn_list}" ] && return 0
# Going through each connection
for conn in ${conn_list}; do
# Extract this connection specific info
type=$(get_lxc_config_option "wr.network.${conn}.type" ${cfg_file})
cn_pid=""
# Going through each connection
if [ "${type}" == "${NET_TYPE_VETH}" ]; then
remote_cn=$(get_lxc_config_option "wr.network.${conn}.remote.cn" ${cfg_file})
remote_type=$(get_lxc_config_option "wr.network.${conn}.remote.type" ${cfg_file})
remote_link=$(get_lxc_config_option "wr.network.${conn}.remote.link" ${cfg_file})
cn_eth_name=$(lxc_get_veth_cn_end_name ${conn} ${cn_name})
remote_eth_name=$(lxc_get_veth_remote_end_name ${conn} ${cn_name})
# veth pip has 2 ends: "cn end" and "remote end". "cn end"
# will be in being launching container net namespace, which we do not
# care here. "remote end" will be in net namespace of a container
# (specified in config file) or of host.
#
# Here we will only configure "remote end" as other function
# will configure "cn end". Configure for "host" is a bit special, so
# we need to handle it separately.
if [ "${remote_cn}" == "${HOST_CN_NAME}" ]; then
# Need to create veth from host net namespace
if [ -d "${host_proc_path}/1" ]; then
cn_pid="${host_proc_path}/1"
else
lxc_log "Error, host proc path ${host_proc_path} does not exist."
ret=1
break
fi
nsenter -n -t ${cn_pid} -- ip link add name ${cn_eth_name} type veth peer name ${remote_eth_name}
if [ $? -ne 0 ]; then
lxc_log "Error, ${conn}, cannot create veth [${cn_eth_name}, ${remote_eth_name}] in host net namespace"
ret=1
break
fi
# Need to park the "cn end" into Domain0 net namespace so that other
# net namespace can easily see it.
nsenter -n -t ${cn_pid} -- ip link set ${cn_eth_name} netns 1
if [ $? -ne 0 ]; then
lxc_log "Error, ${conn}, cannot move ${cn_eth_name} into ${cn_pid} net namespace"
nsenter -n -t ${cn_pid} -- ip link delete ${remote_eth_name}
[ $? -ne 0 ] && lxc_log "Error, ${conn}, cannot delete [${cn_eth_name}, ${remote_eth_name}] in host net namespace"
ret=1
break
fi
elif [ -n "${remote_cn}" ]; then
cn_pid=$(get_lxc_init_pid_from_cn_name ${remote_cn})
if [ -n "${cn_pid}" ]; then
# No need to invoke nsenter here as we want to park "cn end" into Domain0
# net namespace, and at this point we are actually in Domain0 net namespace.
ip link add name ${cn_eth_name} type veth peer name ${remote_eth_name}
if [ $? -ne 0 ]; then
lxc_log "Error, ${conn}, cannot create veth pipe in Domain0 net namespace"
ret=1
break
fi
ip link set ${remote_eth_name} netns ${cn_pid}
if [ $? -ne 0 ]; then
lxc_log "Error, ${conn}, cannot move ${remote_eth_name} into ${cn_pid} net namespace"
ip link delete ${cn_eth_name}
[ $? -ne 0 ] && lxc_log "Error, ${conn}, cannot delete [${cn_eth_name}, ${remote_eth_name}] in host net namespace"
ret=1
break
fi
else
continue
fi
fi
if [ -n "${cn_pid}" ]; then
nsenter_netns_ext="nsenter -n -t ${cn_pid} --"
remote_flags=$(get_lxc_config_option "wr.network.${conn}.remote.flags" ${cfg_file})
hwaddr=$(get_lxc_config_option "wr.network.${conn}.remote.hwaddr" ${cfg_file})
if [ -n "${hwaddr}" ]; then
${nsenter_netns_ext} ip link set ${remote_eth_name} address ${hwaddr}
[ $? -ne 0 ] && lxc_log "Warning, ${conn}, cannot set hwaddrs ${hwaddr} to ${remote_eth_name}"
fi
case "${remote_type}" in
ovs)
# As openvswitch client controls ovs switch through sock under /var/run/openvswitch
# so its necessary to jump into mount namespace with option -m. Also there are rare cases
# ovs get into bad state and this causes ovs-vsctl not to return, so use timeout here. 30s
# seems to be reasonable.
res=$(nsenter -m -n -t ${cn_pid} -- find /sys/class/net -name ${remote_link})
if [ -z "${res}" ];then
timeout 30 nsenter -m -n -t ${cn_pid} -- ovs-vsctl add-br ${remote_link}
[ $? -ne 0 ] && lxc_log "Warning, ${conn}, ovs-vsctl cannot create bridge ${remote_link}, res=$?"
fi
timeout 30 nsenter -m -n -t ${cn_pid} -- ovs-vsctl add-port ${remote_link} ${remote_eth_name}
[ $? -ne 0 ] && lxc_log "Warning, ${conn}, ovs-vsctl cannot add ${remote_link} to ${remote_eth_name}, res=$?"
;;
bridge)
res=$(nsenter -m -n -t ${cn_pid} -- find /sys/class/net -name ${remote_link})
if [ -z "${res}" ]; then
${nsenter_netns_ext} brctl addbr ${remote_link}
[ $? -ne 0 ] && lxc_log "Warning, ${conn}, brctl cannot create bridge ${remote_link}, res=$?"
fi
${nsenter_netns_ext} brctl addif ${remote_link} ${remote_eth_name}
[ $? -ne 0 ] && lxc_log "Warning, ${conn}, brctl cannot add ${remote_link} to ${remote_eth_name}"
;;
*)
# Here remote_link might be set inside config file. At this point remote_link must be emtpy.
# Reset here as remote_link will be used later.
remote_link=""
;;
esac
ipv4=$(get_lxc_config_option "wr.network.${conn}.remote.ipv4" ${cfg_file})
if [ -n "${ipv4}" ]; then
if [ -n "${remote_link}" ]; then
${nsenter_netns_ext} ip addr add ${ipv4} dev ${remote_link}
[ $? -ne 0 ] && lxc_log "Warning, ${conn}, cannot set ipv4 ${ipv4} to ${remote_eth_name}"
else
${nsenter_netns_ext} ip addr add ${ipv4} dev ${remote_eth_name}
[ $? -ne 0 ] && lxc_log "Warning, ${conn}, cannot set ipv4 ${ipv4} to ${remote_eth_name}"
fi
fi
if [ "${remote_flags}" == "up" ]; then
${nsenter_netns_ext} ip link set ${remote_eth_name} up
[ $? -ne 0 ] && lxc_log "Warning, ${conn}, cannot activate ${remote_eth_name}"
if [ -n "${remote_link}" ]; then
${nsenter_netns_ext} ip link set ${remote_link} up
[ $? -ne 0 ] && lxc_log "Warning, ${conn}, cannot activate ${remote_link}"
fi
fi
# Up script will be executed in Domain0 all namespaces. Its up to the script
# to decide different namespace to jump into.
script_up=$(get_lxc_config_option "wr.network.${conn}.remote.script.up" ${cfg_file})
if [ -x "${script_up}" ]; then
${script_up} "${type}" "${cn_name}" "${remote_cn}" "${cn_pid}" "${remote_type}" "${remote_link}" "${remote_eth_name}"
elif [ -n "${script_up}" ]; then
lxc_log "Warning, up script ${script_up} is not executed. Make sure it is executable"
fi
fi
fi
if [ "${type}" == "${NET_TYPE_DPDK}" ]; then
# Set the number of hugepages.
if [ -n "${DPDK_HUGEFS_NR_HUGEPAGES}" ]; then
echo ${DPDK_HUGEFS_NR_HUGEPAGES} > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
fi
# Prepare hugepage filesystem used for sharing between containers.
if [ -n "${DPDK_HUGEFS_HOST_PATH_DEFAULT}" ]; then
res=$(cat /proc/mounts | grep -F "${DPDK_HUGEFS_HOST_PATH_DEFAULT}" | awk '{print $3}')
if [ "${res}" != "hugetlbfs" ]; then
mount -t hugetlbfs none ${DPDK_HUGEFS_HOST_PATH_DEFAULT}
fi
fi
# Get list of kernel modules required to be loaded
dpdk_kernmod_list=$(get_lxc_config_option "wr.network.${conn}.dpdk.kernmod" ${cfg_file} | sed 's/;/ /g')
for kermod in ${dpdk_kernmod_list}; do
# Security, only load module that contains "uio" substring
# except for rte_kni.
if [ "${kermod}" != "rte_kni" ]; then
res=$(echo ${kermod} | grep -F "uio")
[ -z "${res}" ] && break
fi
res=$(cat /proc/modules | awk '{print $1}' | grep "^${kermod}")
if [ -z "${res}" ]; then
/sbin/modprobe ${kermod} || { ret=1; break; }
fi
done
dpdk_pci_list=$(get_lxc_config_option "wr.network.${conn}.dpdk.pci" ${cfg_file} | sed 's/;/ /g')
for pci_info in ${dpdk_pci_list}; do
# pci_info has the following format: <pci address>,<dpdk uio driver>
# If driver is not provided then use ${DPDK_DEFAULT_PCI_DRIVER} as default.
pci=$(echo ${pci_info} | awk -F ',' '{print $1}')
[ -z "${pci}" ] && continue
driver=$(echo ${pci_info} | awk -F ',' '{print $2}')
# If driver is not provided then use ${DPDK_DEFAULT_PCI_DRIVER} as default
[ -z "${driver}" ] && driver=${DPDK_DEFAULT_PCI_DRIVER}
# If well known ${DPDK_DEFAULT_PCI_DRIVER} driver is required then make sure
# ${DPDK_DEFAULT_PCI_DRIVER} module is load. This module might already explicitly
# specified in dpdk_kernmod_list above. But load it here any way
if [ "${driver}" == "${DPDK_DEFAULT_PCI_DRIVER}" ]; then
res=$(cat /proc/modules | awk '{print $1}' | grep "^${DPDK_DEFAULT_PCI_DRIVER}")
if [ -z "${res}" ]; then
/sbin/modprobe ${DPDK_DEFAULT_PCI_DRIVER} || { ret=1; break; }
fi
fi
# Make sure that this pci device is not used by any other driver
if [ -e "/sys/bus/pci/devices/${pci}/driver/" -a ! -e "/sys/bus/pci/devices/${pci}/net/" ]; then
lxc_log "Error, pci devide ${pci} is currently being used"
ret=1
break
fi
res=$($(dirname ${0})/dpdk_nic_bind_wrapper.py ${driver} -b ${driver} ${pci} 2>&1)
[ -n "${res}" ] && { ret=1; break; }
done
# Up script will be executed in Domain0 all namespaces. Its up to the script
# to decide different namespace to jump into.
script_up=$(get_lxc_config_option "wr.network.${conn}.remote.script.up" ${cfg_file})
if [ -x "${script_up}" ]; then
${script_up} "${type}" "${cn_name}" "${dpdk_pci_list}" "${dpdk_kernmod_list}"
elif [ -n "${script_up}" ]; then
lxc_log "Warning, up script ${script_up} is not executed. Make sure it is executable"
fi
fi
done
# Some connections failed creation, so cleanup
if [ ${ret} -ne 0 ]; then
lxc_log "Error, cannot setup network ${conn}"
lxc_conn_setup_failed=${conn}
lxc_remove_net ${cfg_file} ${cn_name}
return 1
fi
return 0
}
function lxc_remove_net {
local cfg_file=${1}
local cn_name=${2}
# Some basic checks
[ ! -e "${cfg_file}" ] && return 1
which ip > /dev/null 2>&1
[ $? -ne 0 ] && lxc_log "Error, ip util is not available" && return 1
# Get list of connections specified in cfg file.
# Each connection is specified by options wr.network.connection
conn_list=$(get_lxc_config_option_list "wr.network.connection" ${cfg_file})
[ -z "${conn_list}" ] && return 1
# Going through each connection
for conn in ${conn_list}; do
# Extract this connection specific info
type=$(get_lxc_config_option "wr.network.${conn}.type" ${cfg_file})
cn_pid=""
if [ "${type}" == "${NET_TYPE_VETH}" ]; then
remote_cn=$(get_lxc_config_option "wr.network.${conn}.remote.cn" ${cfg_file})
remote_type=$(get_lxc_config_option "wr.network.${conn}.remote.type" ${cfg_file})
remote_link=$(get_lxc_config_option "wr.network.${conn}.remote.link" ${cfg_file})
remote_eth_name="veth-${cn_name}-${conn}-1"
# Get pid path of correct net namespace
if [ "${remote_cn}" == "${HOST_CN_NAME}" ]; then
if [ -d "${host_proc_path}/1" ]; then
cn_pid="${host_proc_path}/1"
else
lxc_log "Error, host proc path ${host_proc_path} does not exist."
return 1
fi
elif [ -n "${remote_cn}" ]; then
cn_pid=$(get_lxc_init_pid_from_cn_name ${remote_cn})
fi
if [ -n "${cn_pid}" ]; then
# Down script will be executed in Domain 0 all namespaces. Its up to the script
# to decide what namespace to jump into
script_down=$(get_lxc_config_option "wr.network.${conn}.remote.script.down" ${cfg_file})
if [ -x "${script_down}" ]; then
${script_down} "${type}" "${cn_name}" "${remote_cn}" "${cn_pid}" "${remote_type}" "${remote_link}" "${remote_eth_name}"
elif [ -n "${script_down}" ]; then
lxc_log "Warning, down script ${script_down} is not executed. Make sure it is executable"
fi
# Clean up and bridge that has "remote end" attached to.
case "${remote_type}" in
ovs)
# As openvswitch client control ovs switch through sock under /var/run/openvswitch
# so its necessary to jump into mount namespace with option -m. Also there are rare cases
# ovs get into bad state, this causes ovs-vsctl to not return, so use timeout here. 30s
# seem to be reasonable.
timeout 30 nsenter -m -n -t ${cn_pid} -- ovs-vsctl del-port ${remote_link} ${remote_eth_name}
[ $? -ne 0 ] && lxc_log "Warning, ${conn}, ovs-vsctl cannot delete ${remote_link} outof ${remote_eth_name}, res=$?"
;;
bridge)
nsenter -n -t ${cn_pid} -- brctl delif ${remote_link} ${remote_eth_name}
[ $? -ne 0 ] && lxc_log "Warning, ${conn}, brctl cannot delete ${remote_link} outof ${remote_eth_name}"
;;
*)
;;
esac
# Now delete the veth pipe. The "cn end" will disapper after this call as well.
nsenter -n -t ${cn_pid} -- ip link delete ${remote_eth_name}
[ $? -ne 0 ] && lxc_log "Warning, ${conn}, cannot remove ${remote_eth_name}"
else
lxc_log "ERROR. cannot remove net specified by ${cfg_file} because cannot"
lxc_log "find pid namespace of container ${remote_cn}"
return 1
fi
fi
if [ "${type}" == "${NET_TYPE_DPDK}" ]; then
dpdk_pci_list=$(get_lxc_config_option "wr.network.${conn}.dpdk.pci" ${cfg_file} | sed 's/;/ /g')
dpdk_kernmod_list=$(get_lxc_config_option "wr.network.${conn}.dpdk.kernmod" ${cfg_file} | sed 's/;/ /g')
cn_pid=$(get_lxc_init_pid_from_cn_name ${cn_name})
nsenter_ext=""
[ -n "${cn_pid}" ] && nsenter_ext="nsenter -n -m -p -t ${cn_pid} --"
# Down script will be executed in Domain 0 all namespaces. Its up to the script
# to decide what namespace to jump into
script_down=$(get_lxc_config_option "wr.network.${conn}.remote.script.down" ${cfg_file})
if [ -x "${script_down}" ]; then
${script_down} "${type}" "${cn_name}" "${dpdk_pci_list}" "${dpdk_kernmod_list}"
elif [ -n "${script_down}" ]; then
lxc_log "Warning, down script ${script_down} is not executed. Make sure it is executable"
fi
if [ -n "${cn_pid}" ]; then
dpdk_hugefs_path=$(get_lxc_config_option "wr.network.${conn}.dpdk.hugefs.path" ${cfg_file})
[ -z "${dpdk_hugefs_path}" ] && dpdk_hugefs_path=${DPDK_HUGEFS_PATH_DEFAULT}
${nsenter_ext} umount ${dpdk_hugefs_path}
fi
for pci_info in ${dpdk_pci_list}; do
# pci_info has the following format: <pci address>,<dpdk uio driver>
pci=$(echo ${pci_info} | awk -F ',' '{print $1}')
[ -z "${pci}" ] && continue
driver=$(echo ${pci_info} | awk -F ',' '{print $2}')
# If driver is not provided then use ${DPDK_DEFAULT_PCI_DRIVER} as default
[ -z "${driver}" ] && driver=${DPDK_DEFAULT_PCI_DRIVER}
if [ -n "${cn_pid}" ]; then
# pci_info has the following format: <pci address>,<dpdk uio driver>
pci=$(${nsenter_ext} echo ${pci_info} | awk -F ',' '{print $1}')
sys_pci_uio_path=$(${nsenter_ext} find ${root_fs_mount}/sys/ -name "*uio*" | grep -F "${pci}" | grep "uio\/uio")
${nsenter_ext} rm /dev/$(basename ${sys_pci_uio_path}) > /dev/null 2>&1
fi
echo ${pci} >> /sys/bus/pci/drivers/${driver}/unbind
[ $? -ne 0 ] && lxc_log "Warning, dpdk cannot unbind ${pci} from driver ${driver}"
done
# Some dpdk kernel modules caused dev nodes to be created. Remove them here.
# Right now only dpk rte_kni requires /dev/kni to be removed
res=$(echo ${dpdk_kernmod_list} | grep -F 'rte_kni')
[ -n "${res}" ] && ${nsenter_ext} rm /dev/kni > /dev/null 2>&1
# If this dpdk connection failed to setup then there is no need to cleanup
# the rest of connections.
[ "${lxc_conn_setup_failed}" == "${conn}" ] && break
fi
done
return 0
}
# Set lxc.hook.pre-mount to Domain0 WindRiver lxc specific net hook in
# provided lxc container config file. When this lxc container is
# launched this hook will setup additional networking specified within
# config file.
function lxc_add_net_hook_info_cfg {
local cfg_file=${1}
hook_script_path=$(dirname ${0})
res=$(cat ${cfg_file} | sed 's/[ ,\t]//g' | grep "^lxc.hook.pre-mount=${hook_script_path}/lxc_hook_net_pre-mount.sh")
if [ -z "${res}" ]; then
echo >> ${cfg_file}
echo "#################################################" >> ${cfg_file}
echo "### Start WindRiver lxc net specific section ####" >> ${cfg_file}
echo "lxc.hook.pre-mount = ${hook_script_path}/lxc_hook_net_pre-mount.sh" >> ${cfg_file}
# If there is a dpdk type connection then we need to modify
# cgroup to allow uio dev
conn_list=$(get_lxc_config_option_list "wr.network.connection" ${cfg_file})
for conn in ${conn_list}; do
type=$(get_lxc_config_option "wr.network.${conn}.type" ${cfg_file})
if [ "${type}" == "${NET_TYPE_DPDK}" ]; then
echo "lxc.hook.mount = ${hook_script_path}/lxc_hook_net_mount.sh" >> ${cfg_file}
echo "lxc.cgroup.devices.allow = c 249:* rwm" >> ${cfg_file}
break
fi
done
echo "### End WindRiver lxc net specific section ####" >> ${cfg_file}
echo "#################################################" >> ${cfg_file}
echo >> ${cfg_file}
fi
}
|
<reponame>Denis220795/Textokit<gh_stars>0
/*
* Copyright 2015 Textocat
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.textocat.textokit.commons.util;
import java.util.BitSet;
/**
* @author <NAME>
*/
public class BitUtils {
private BitUtils() {
}
/**
* @param arg
* @param filter
* @return true only if arg contains all bits from filter
*/
public static boolean contains(BitSet arg, BitSet filter) {
for (int i = filter.nextSetBit(0); i >= 0; i = filter.nextSetBit(i + 1)) {
if (!arg.get(i)) {
return false;
}
}
return true;
}
}
|
module.exports = async d => {
let data = d.util.aoiFunc(d);
const Leref = await d.util.getUser(d, "608358453580136499");
data.result = Leref.avatarURL({format: 'png', size: 4096, dynamic: true});
return {
code: d.util.setCode(data)
}
} |
import React from 'react';
import ReactTooltip from 'react-tooltip';
import copy from '../../utils/copy';
import { Info } from '../icon/Info';
export default function SlippageSelector({
slippageTolerance,
onChange,
}: {
slippageTolerance: number;
onChange: (slippage: number) => void;
}) {
const validSlippages = [0.1, 0.5, 1];
return (
<>
<fieldset className="flex items-center mb-4 pt-2">
<label className="font-semibold text-xs text-center">Slippage: </label>
<div>
<div
className="pl-1"
data-type="dark"
data-place="top"
data-multiline={true}
data-tip={copy.slippageCopy}
>
<Info />
</div>
<ReactTooltip />
</div>
<div className="border flex rounded-full ml-16 border-1 text-gray-400 border-gray-200">
{validSlippages.map((slippage) => (
<button
key={slippage}
className={`focus:outline-none text-xs hover:text-greenLight font-semibold rounded w-full py-1 px-2 mx-2 ${
slippage === slippageTolerance && 'text-greenLight'
}`}
type="button"
onClick={() => onChange(slippage)}
>
{slippage}%
</button>
))}
</div>
</fieldset>
</>
);
}
|
from environs import Env
# The configuration package uses a __init__.py because users will import these
# methods from their own configs and we want a nicer import which looks
# like - from uvicore.configuration import env
# New environs Env instance
env = Env()
# No, not used publically, and is so early that uvicore.ioc is still None
#from .configuration import Configuration
# Public API for import * and doc gens
# __all__ = [
# 'Env', 'env',
# ]
|
<reponame>artkoshelev/voyager
package httputil
import (
"context"
"fmt"
"net/http"
"time"
chimw "github.com/go-chi/chi/middleware"
"go.uber.org/zap"
)
type AccessLogger interface {
AccessLog(ctx context.Context, req AccessRequest, res AccessResponse, dur time.Duration)
}
type AccessRequest interface {
RequestID() string
RemoteAddr() string
Proto() string
Method() string
RequestURI() string
Host() string
Referer() string
}
type AccessResponse interface {
Status() int
BytesWritten() int
}
func AccessLog(l AccessLogger) func(next http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
res := chimw.NewWrapResponseWriter(w, r.ProtoMajor)
t0 := time.Now()
next.ServeHTTP(res, r)
t1 := time.Now()
tn := t1.Sub(t0)
reqID := chimw.GetReqID(r.Context())
if reqID == "" {
reqID = "-"
}
req := accessRequest{
id: reqID,
req: r,
}
l.AccessLog(r.Context(), &req, res, tn)
})
}
}
type accessRequest struct {
id string
req *http.Request
}
func (r *accessRequest) RequestID() string {
return r.id
}
func (r *accessRequest) RemoteAddr() string {
return r.req.RemoteAddr
}
func (r *accessRequest) Proto() string {
return r.req.Proto
}
func (r *accessRequest) Method() string {
return r.req.Method
}
func (r *accessRequest) RequestURI() string {
return r.req.RequestURI
}
func (r *accessRequest) Host() string {
return r.req.Host
}
func (r *accessRequest) Referer() string {
return r.req.Referer()
}
var (
zapKindAccess = zap.String("kind", "access")
)
type ZapAccessLogger struct {
LogContextKey interface{}
}
func (l *ZapAccessLogger) AccessLog(ctx context.Context, req AccessRequest, res AccessResponse, dur time.Duration) {
log, ok := ctx.Value(l.LogContextKey).(*zap.Logger)
if !ok {
panic(fmt.Sprintf("ZapAccessLogger's LogContextKey: `%v` doesn't point to a zap.Logger!", l.LogContextKey))
}
log.Info("Access",
zapKindAccess,
zap.String("request_reqid", req.RequestID()),
zap.String("request_remote", req.RemoteAddr()),
zap.String("request_proto", req.Proto()),
zap.String("request_method", req.Method()),
zap.String("request_url", req.RequestURI()),
zap.String("request_host", req.Host()),
zap.String("request_referer", req.Referer()),
zap.Int("responses_status", res.Status()),
zap.Int("response_length", res.BytesWritten()),
zap.Duration("duration", dur),
)
}
|
<reponame>hoangmirs/go-scraper
package main
import (
"os"
"os/signal"
"syscall"
"github.com/hoangmirs/go-scraper/bootstrap"
"github.com/hoangmirs/go-scraper/conf"
"github.com/hoangmirs/go-scraper/database"
"github.com/hoangmirs/go-scraper/workers/jobs"
"github.com/gocraft/work"
"github.com/gomodule/redigo/redis"
)
// Make a redis pool
var redisPool = &redis.Pool{
MaxActive: 5,
MaxIdle: 5,
Wait: true,
Dial: database.GetRedisConnection,
}
func init() {
bootstrap.SetUp()
}
func main() {
pool := work.NewWorkerPool(jobs.Context{}, 5, conf.GetString("workerNamespace"), redisPool)
pool.Middleware((*jobs.Context).ScraperLog)
pool.JobWithOptions(conf.GetString("scraperJobName"), work.JobOptions{MaxFails: jobs.MaxFails}, (*jobs.Context).PerformScrape)
pool.Start()
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, os.Interrupt, syscall.SIGTERM)
<-signalChan
pool.Stop()
}
|
#!/usr/bin/env bash
# Extract archives - use: extract <file>
# Credits to http://dotfiles.org/~pseup/.bashrc
extract()
{
if [ -f $1 ]
then
case $1 in
*.tar.bz2) tar xjf $1 ;;
*.tar.gz) tar xzf $1 ;;
*.bz2) bunzip2 $1 ;;
*.rar) rar x $1 ;;
*.gz) gunzip $1 ;;
*.tar) tar xf $1 ;;
*.tbz2) tar xjf $1 ;;
*.tgz) tar xzf $1 ;;
*.zip) unzip $1 ;;
*.Z) uncompress $1 ;;
*.7z) 7z x $1 ;;
*) echo "'$1' cannot be extracted via extract()" ;;
esac
else
echo "'$1' is not a valid file"
fi
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.