text stringlengths 1 1.05M |
|---|
The code snippet is a function. It is a reusable programming component that can be used to determine if a given number is prime or not. |
# -*- test-case-name: vumi.transports.cellulant.tests.test_cellulant_sms -*-
import json
from urllib import urlencode
from twisted.internet.defer import inlineCallbacks
from vumi.utils import http_request_full
from vumi import log
from vumi.config import ConfigDict, ConfigText
from vumi.transports.httprpc import HttpRpcTransport
class CellulantSmsTransportConfig(HttpRpcTransport.CONFIG_CLASS):
"""Cellulant transport config.
"""
credentials = ConfigDict(
"A dictionary where the `from_addr` is used for the key lookup and the"
" returned value should be a dictionary containing the username and"
" password.", required=True, static=True)
outbound_url = ConfigText(
"The URL to send outbound messages to.", required=True, static=True)
class CellulantSmsTransport(HttpRpcTransport):
"""
HTTP transport for Cellulant SMS.
"""
transport_type = 'sms'
agent_factory = None # For swapping out the Agent we use in tests.
CONFIG_CLASS = CellulantSmsTransportConfig
EXPECTED_FIELDS = set(["SOURCEADDR", "DESTADDR", "MESSAGE", "ID"])
IGNORED_FIELDS = set(["channelID", "keyword", "CHANNELID", "serviceID",
"SERVICEID", "unsub", "transactionID"])
KNOWN_ERROR_RESPONSE_CODES = {
'E0': 'Insufficient HTTP Params passed',
'E1': 'Invalid username or password',
'E2': 'Credits have expired or run out',
'1005': 'Suspect source address',
}
def validate_config(self):
config = self.get_static_config()
self._credentials = config.credentials
self._outbound_url = config.outbound_url
return super(CellulantSmsTransport, self).validate_config()
@inlineCallbacks
def handle_outbound_message(self, message):
creds = self._credentials.get(message['from_addr'], {})
username = creds.get('username', '')
password = creds.get('password', '')
params = {
'username': username,
'password': password,
'source': message['from_addr'],
'destination': message['to_addr'],
'message': message['content'],
}
log.msg("Sending outbound message: %s" % (message,))
url = '%s?%s' % (self._outbound_url, urlencode(params))
log.msg("Making HTTP request: %s" % (url,))
response = yield http_request_full(
url, '', method='GET', agent_class=self.agent_factory)
log.msg("Response: (%s) %r" % (response.code, response.delivered_body))
content = response.delivered_body.strip()
# we'll only send 1 message at a time and so the API can only
# return this on a valid ack
if content == '1':
yield self.publish_ack(user_message_id=message['message_id'],
sent_message_id=message['message_id'])
else:
error = self.KNOWN_ERROR_RESPONSE_CODES.get(
content, 'Unknown response code: %s' % (content,))
yield self.publish_nack(message['message_id'], error)
@inlineCallbacks
def handle_raw_inbound_message(self, message_id, request):
values, errors = self.get_field_values(
request, self.EXPECTED_FIELDS, self.IGNORED_FIELDS)
if errors:
log.msg('Unhappy incoming message: %s' % (errors,))
yield self.finish_request(message_id, json.dumps(errors), code=400)
return
log.msg(('CellulantSmsTransport sending from %(SOURCEADDR)s to '
'%(DESTADDR)s message "%(MESSAGE)s"') % values)
yield self.publish_message(
message_id=message_id,
content=values['MESSAGE'],
to_addr=values['DESTADDR'],
from_addr=values['SOURCEADDR'],
provider='vumi',
transport_type=self.transport_type,
transport_metadata={'transport_message_id': values['ID']},
)
yield self.finish_request(
message_id, json.dumps({'message_id': message_id}))
|
#!/bin/bash
#v1.1.0
#------------------------------------------------------------------------------
# tests the full package creation
#------------------------------------------------------------------------------
doTestCreateFullPackage(){
cd $product_instance_dir
doLog " INFO START : create-full-package.test"
cat doc/txt/livy-rester/tests/pckg/create-full-package.test.txt
doSpecCreateFullPackage
doHelpCreateFullPackage
export exit_code=0
bash src/bash/livy-rester/livy-rester.sh -a create-full-package
export exit_code=$?
doLog " create-relative-package.test-1 exit_code: $exit_code "
sleep "$sleep_interval"
test $exit_code -ne 0 && return
bash src/bash/livy-rester/livy-rester.sh -a create-full-package -i $product_instance_dir/met/.tst.livy-rester
export exit_code=$?
doLog " create-relative-package.test-1 exit_code: $exit_code "
sleep "$sleep_interval"
test $exit_code -ne 0 && return
bash src/bash/livy-rester/livy-rester.sh -a create-full-package -i $product_instance_dir/met/.prd.livy-rester
export exit_code=$?
doLog " create-relative-package.test-1 exit_code: $exit_code "
sleep "$sleep_interval"
test $exit_code -ne 0 && return
bash src/bash/livy-rester/livy-rester.sh -a create-full-package -i $product_instance_dir/met/.git.livy-rester
export exit_code=$?
doLog " create-relative-package.test-1 exit_code: $exit_code "
sleep "$sleep_interval"
test $exit_code -ne 0 && return
doLog " INFO STOP : create-full-package.test"
}
#eof test doCreateFullPackage
|
let today = new Date();
let dd = String(today.getDate()).padStart(2, '0');
let mm = String(today.getMonth() + 1).padStart(2, '0'); //January is 0!
let yyyy = today.getFullYear();
let todayDate = mm + '/' + dd + '/' + yyyy;
console.log(todayDate); // output: mm/dd/yyyy |
#if defined(SUSE) || defined(FEDORA)
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <dirent.h>
#include <sys/utsname.h>
#include "pool.h"
#include "repo.h"
#include "repo_rpmdb.h"
#include "repoinfo.h"
#include "repoinfo_config_yum.h"
#ifdef FEDORA
# define REPOINFO_PATH "/etc/yum.repos.d"
#endif
#ifdef SUSE
# define REPOINFO_PATH "/etc/zypp/repos.d"
#endif
char *
yum_substitute(Pool *pool, char *line)
{
char *p, *p2;
static char *releaseevr;
static char *basearch;
if (!line)
{
solv_free(releaseevr);
releaseevr = 0;
solv_free(basearch);
basearch = 0;
return 0;
}
p = line;
while ((p2 = strchr(p, '$')) != 0)
{
if (!strncmp(p2, "$releasever", 11))
{
if (!releaseevr)
{
void *rpmstate;
Queue q;
queue_init(&q);
rpmstate = rpm_state_create(pool, pool_get_rootdir(pool));
rpm_installedrpmdbids(rpmstate, "Providename", "redhat-release", &q);
if (q.count)
{
void *handle;
char *p;
handle = rpm_byrpmdbid(rpmstate, q.elements[0]);
releaseevr = handle ? rpm_query(handle, SOLVABLE_EVR) : 0;
if (releaseevr && (p = strchr(releaseevr, '-')) != 0)
*p = 0;
}
rpm_state_free(rpmstate);
queue_free(&q);
if (!releaseevr)
{
fprintf(stderr, "no installed package provides 'redhat-release', cannot determine $releasever\n");
exit(1);
}
}
*p2 = 0;
p = pool_tmpjoin(pool, line, releaseevr, p2 + 11);
p2 = p + (p2 - line);
line = p;
p = p2 + strlen(releaseevr);
continue;
}
if (!strncmp(p2, "$basearch", 9))
{
if (!basearch)
{
struct utsname un;
if (uname(&un))
{
perror("uname");
exit(1);
}
basearch = strdup(un.machine);
if (basearch[0] == 'i' && basearch[1] && !strcmp(basearch + 2, "86"))
basearch[1] = '3';
}
*p2 = 0;
p = pool_tmpjoin(pool, line, basearch, p2 + 9);
p2 = p + (p2 - line);
line = p;
p = p2 + strlen(basearch);
continue;
}
p = p2 + 1;
}
return line;
}
struct repoinfo *
read_repoinfos_yum(Pool *pool, int *nrepoinfosp)
{
const char *reposdir = REPOINFO_PATH;
char buf[4096];
char buf2[4096], *kp, *vp, *kpe;
DIR *dir;
FILE *fp;
struct dirent *ent;
int l, rdlen;
struct repoinfo *repoinfos = 0, *cinfo;
int nrepoinfos = 0;
rdlen = strlen(reposdir);
dir = opendir(reposdir);
if (!dir)
{
*nrepoinfosp = 0;
return 0;
}
while ((ent = readdir(dir)) != 0)
{
if (ent->d_name[0] == '.')
continue;
l = strlen(ent->d_name);
if (l < 6 || rdlen + 2 + l >= sizeof(buf) || strcmp(ent->d_name + l - 5, ".repo") != 0)
continue;
snprintf(buf, sizeof(buf), "%s/%s", reposdir, ent->d_name);
if ((fp = fopen(buf, "r")) == 0)
{
perror(buf);
continue;
}
cinfo = 0;
while(fgets(buf2, sizeof(buf2), fp))
{
l = strlen(buf2);
if (l == 0)
continue;
while (l && (buf2[l - 1] == '\n' || buf2[l - 1] == ' ' || buf2[l - 1] == '\t'))
buf2[--l] = 0;
kp = buf2;
while (*kp == ' ' || *kp == '\t')
kp++;
if (!*kp || *kp == '#')
continue;
if (strchr(kp, '$'))
kp = yum_substitute(pool, kp);
if (*kp == '[')
{
vp = strrchr(kp, ']');
if (!vp)
continue;
*vp = 0;
repoinfos = solv_extend(repoinfos, nrepoinfos, 1, sizeof(*repoinfos), 15);
cinfo = repoinfos + nrepoinfos++;
memset(cinfo, 0, sizeof(*cinfo));
cinfo->alias = strdup(kp + 1);
cinfo->type = TYPE_RPMMD;
cinfo->autorefresh = 1;
cinfo->priority = 99;
#ifndef FEDORA
cinfo->repo_gpgcheck = 1;
#endif
cinfo->metadata_expire = METADATA_EXPIRE;
continue;
}
if (!cinfo)
continue;
vp = strchr(kp, '=');
if (!vp)
continue;
for (kpe = vp - 1; kpe >= kp; kpe--)
if (*kpe != ' ' && *kpe != '\t')
break;
if (kpe == kp)
continue;
vp++;
while (*vp == ' ' || *vp == '\t')
vp++;
kpe[1] = 0;
if (!strcmp(kp, "name"))
cinfo->name = strdup(vp);
else if (!strcmp(kp, "enabled"))
cinfo->enabled = *vp == '0' ? 0 : 1;
else if (!strcmp(kp, "autorefresh"))
cinfo->autorefresh = *vp == '0' ? 0 : 1;
else if (!strcmp(kp, "gpgcheck"))
cinfo->pkgs_gpgcheck = *vp == '0' ? 0 : 1;
else if (!strcmp(kp, "repo_gpgcheck"))
cinfo->repo_gpgcheck = *vp == '0' ? 0 : 1;
else if (!strcmp(kp, "baseurl"))
cinfo->baseurl = strdup(vp);
else if (!strcmp(kp, "mirrorlist"))
{
if (strstr(vp, "metalink"))
cinfo->metalink = strdup(vp);
else
cinfo->mirrorlist = strdup(vp);
}
else if (!strcmp(kp, "path"))
{
if (vp && strcmp(vp, "/") != 0)
cinfo->path = strdup(vp);
}
else if (!strcmp(kp, "type"))
{
if (!strcmp(vp, "yast2"))
cinfo->type = TYPE_SUSETAGS;
else if (!strcmp(vp, "rpm-md"))
cinfo->type = TYPE_RPMMD;
else if (!strcmp(vp, "plaindir"))
cinfo->type = TYPE_PLAINDIR;
else if (!strcmp(vp, "mdk"))
cinfo->type = TYPE_MDK;
else
cinfo->type = TYPE_UNKNOWN;
}
else if (!strcmp(kp, "priority"))
cinfo->priority = atoi(vp);
else if (!strcmp(kp, "keeppackages"))
cinfo->keeppackages = *vp == '0' ? 0 : 1;
}
fclose(fp);
cinfo = 0;
}
closedir(dir);
*nrepoinfosp = nrepoinfos;
return repoinfos;
}
#endif
|
#!/bin/bash -e
if [ "$1" == "boot" ]; then
# wait a bit before starting the VM to prevent NFS mount issues...
sleep 30
fi
/Applications/VirtualBox.app/Contents/MacOS/VBoxManage startvm mobymac --type headless
|
import requests
import json
# Get the Dow Jones index data for today
response = requests.get("https://api.iextrading.com/1.0/stock/market/batch?symbols=DJIA&types=quote")
# Parse the response data into a json dictionary
data = json.loads(response.text)
# Access the "quote" property of the response data
quote = data["DJIA"]["quote"]
# Access the various properties of the quote
dow_data = {
"open": quote["open"],
"close": quote["close"],
"high": quote["high"],
"low": quote["low"],
"volume": quote["volume"]
}
# Print the Dow Jones index data
print(dow_data) |
#!/bin/bash
set -e
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" postgres <<-EOSQL
CREATE DATABASE $USER_DB OWNER $POSTGRES_USER;
EOSQL
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" $USER_DB < /dump/insert.sql
|
#!/bin/bash
if [[ -z "$GITHUB_TOKEN" ]]; then
echo "ERROR: the GITHUB_TOKEN env variable wasn't set"
exit 1
fi
# A file glob of assets to upload. The docker entrypoint arg is "inputs.assets".
ASSETS_GLOB=$1
AUTH_HEADER="Authorization: token ${GITHUB_TOKEN}"
RELEASE_ID=$(jq --raw-output '.release.id' "$GITHUB_EVENT_PATH")
# Upload each asset file to the GitHub Release
for asset_file in $ASSETS_GLOB; do
filename=$(basename "$asset_file")
upload_url="https://uploads.github.com/repos/${GITHUB_REPOSITORY}/releases/${RELEASE_ID}/assets?name=${filename}"
echo "Uploading asset: $asset_file"
touch curl_log
response_code=$(curl \
-sSL \
-XPOST \
-H "${AUTH_HEADER}" \
--upload-file "${asset_file}" \
--header "Content-Type:application/octet-stream" \
--write-out "%{http_code}" \
--output curl_log \
"$upload_url")
if [ $response_code -ge 400 ]; then
echo "ERROR: curl upload failed with status code $response_code"
cat curl_log && rm curl_log
exit 1
fi
done
|
$(document).click(function() {
// JavaScript function goes here...
}); |
<gh_stars>0
/*
* Copyright (C) 2018-2019 <NAME> (www.helger.com)
* philip[at]helger[dot]com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.helger.aufnahme.simple;
import com.helger.commons.ValueEnforcer;
import com.helger.commons.equals.EqualsHelper;
import com.helger.commons.hashcode.HashCodeGenerator;
import com.helger.commons.state.EChange;
import com.helger.commons.string.ToStringGenerator;
import com.helger.commons.type.ObjectType;
import javax.annotation.Nonnull;
import javax.annotation.concurrent.NotThreadSafe;
/**
* <p>Default implementation of {@link com.helger.aufnahme.simple.IStichprobeDeadwood}</p>
* <p>This class was initially automatically created</p>
*
*
* @author JDMCodeGenerator
*/
@NotThreadSafe
public class StichprobeDeadwood
implements IStichprobeDeadwood
{
public static final ObjectType OT = new ObjectType("StichprobeDeadwood");
private EDecompositionDegreeClass m_eDoD;
private ETreeKind m_eTreeKind;
private int m_nLength;
private int m_nBHD;
public StichprobeDeadwood() {
}
public StichprobeDeadwood(@Nonnull final EDecompositionDegreeClass eDoD,
@Nonnull final ETreeKind eTreeKind,
final int nLength,
final int nBHD) {
setDoD(eDoD);
setTreeKind(eTreeKind);
setLength(nLength);
setBHD(nBHD);
}
public StichprobeDeadwood(@Nonnull final IStichprobeDeadwood aOther) {
ValueEnforcer.notNull(aOther, "Other");
setDoD(aOther.getDoD());
setTreeKind(aOther.getTreeKind());
setLength(aOther.getLength());
setBHD(aOther.getBHD());
}
@Override
public boolean equals(final Object o) {
if (o == this) {
return true;
}
if ((o == null)||(this.getClass()!= o.getClass())) {
return false;
}
final StichprobeDeadwood rhs = ((StichprobeDeadwood) o);
if (!EqualsHelper.equals(m_eDoD, rhs.m_eDoD)) {
return false;
}
if (!EqualsHelper.equals(m_eTreeKind, rhs.m_eTreeKind)) {
return false;
}
if (!EqualsHelper.equals(m_nLength, rhs.m_nLength)) {
return false;
}
if (!EqualsHelper.equals(m_nBHD, rhs.m_nBHD)) {
return false;
}
return true;
}
@Override
public int hashCode() {
return new HashCodeGenerator(this).append(m_eDoD).append(m_eTreeKind).append(m_nLength).append(m_nBHD).getHashCode();
}
@Override
public String toString() {
return new ToStringGenerator(this).append("DoD", m_eDoD).append("treeKind", m_eTreeKind).append("length", m_nLength).append("BHD", m_nBHD).getToString();
}
@Nonnull
public final EDecompositionDegreeClass getDoD() {
return m_eDoD;
}
@Nonnull
final EChange setDoD(@Nonnull final EDecompositionDegreeClass eDoD) {
ValueEnforcer.notNull(eDoD, "DoD");
if (eDoD.equals(m_eDoD)) {
return EChange.UNCHANGED;
}
m_eDoD = eDoD;
return EChange.CHANGED;
}
@Nonnull
public final ETreeKind getTreeKind() {
return m_eTreeKind;
}
@Nonnull
final EChange setTreeKind(@Nonnull final ETreeKind eTreeKind) {
ValueEnforcer.notNull(eTreeKind, "TreeKind");
if (eTreeKind.equals(m_eTreeKind)) {
return EChange.UNCHANGED;
}
m_eTreeKind = eTreeKind;
return EChange.CHANGED;
}
public final int getLength() {
return m_nLength;
}
@Nonnull
final EChange setLength(final int nLength) {
if (nLength == m_nLength) {
return EChange.UNCHANGED;
}
m_nLength = nLength;
return EChange.CHANGED;
}
public final int getBHD() {
return m_nBHD;
}
@Nonnull
final EChange setBHD(final int nBHD) {
if (nBHD == m_nBHD) {
return EChange.UNCHANGED;
}
m_nBHD = nBHD;
return EChange.CHANGED;
}
}
|
<gh_stars>1-10
/*
* button.c
*
* Created on: 2018. 3. 16.
* Author: <NAME>
*/
#include "button.h"
void buttonInit(void)
{
GPIO_InitTypeDef gpio_init_structure;
gpio_init_structure.Pin = GPIO_PIN_12;
gpio_init_structure.Mode = GPIO_MODE_INPUT;
gpio_init_structure.Pull = GPIO_NOPULL;
gpio_init_structure.Speed = GPIO_SPEED_LOW;
HAL_GPIO_Init(GPIOC, &gpio_init_structure);
}
bool buttonGetPressed(uint8_t ch)
{
bool ret = false;
if (HAL_GPIO_ReadPin(GPIOC, GPIO_PIN_12) == GPIO_PIN_SET)
{
ret = true;
}
return ret;
}
|
<filename>Battleships/model/coordinates.py
"""
Module contains class coordinates
Class coordinates represent a pair of coordinates of a single cell on a 10x10 board
"""
class Coordinates:
"""
Represents coordinates of a single cell
Represents coordinates of a single cell with x and y coordinates where x abs is a top row in range a-j and y abs
is leftmost columnt 1-10
"""
def __init__(self, x:str, y:int):
"""
Constructor of the class coordinates
:param x: a string of a single letter in range a-b
:param y: an integer in range [1-10]
"""
if all([y>0, y<11]) and x in ['a','b','c','d','e','f','g','h','i','j']:
self.x = x
self.y = y
else:
raise Exception('invalid coordinates values')
def match(self, coordinates) -> bool:
"""
check if one object has the same coordinates as another
:param coordinates:
:return: True or False
"""
if coordinates.x == self.x and coordinates.y == self.y:
return True
return False
def next_y(self):
"""
next y coordinate
:return: next y value
"""
if self.y<10:
return self.y+1
else:
return None
# raise Exception('can not go beyond the limit')
def prev_y(self):
"""
perv y coordinate
:return: prev y value
"""
if self.y > 1:
return self.y - 1
else:
return None
# raise Exception('can not go beyond the limit')
def next_x(self):
"""
next x coordinate
:return: next x value
"""
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
index = letters.index(self.x)
if index < 9:
return letters[index + 1]
else:
return None
# raise Exception('can not go beyond the limit')
def prev_x(self):
"""
prev x coordinate
:return: prev x value
"""
letters = ['a','b','c','d','e','f','g','h','i','j']
index = letters.index(self.x)
if index > 0 :
return letters[index-1]
else:
return None
# raise Exception('can not go beyond the limit')
|
#! /bin/bash
echo "[jeedom-plugin-snips]"
echo "--------------------------------"
echo "[*] Start to remove dependencies."
if [[ -d "/etc/php5/" ]]; then
echo "[*] Removing extension from PHP5"
if [[ -d "/etc/php5/cli/" && ! `cat /etc/php5/cli/php.ini | grep "mosquitto"` ]]; then
sed -i '/extension=mosquitto.so/d' /etc/php5/cli/php.ini
fi
if [[ -d "/etc/php5/fpm/" && ! `cat /etc/php5/fpm/php.ini | grep "mosquitto"` ]]; then
sed -i '/extension=mosquitto.so/d' /etc/php5/fpm/php.ini
service php5-fpm restart
fi
if [[ -d "/etc/php5/apache2/" && ! `cat /etc/php5/apache2/php.ini | grep "mosquitto"` ]]; then
sed -i '/extension=mosquitto.so/d' /etc/php5/apache2/php.ini
fi
fi
if [[ -d "/etc/php/7.0/" ]]; then
echo "[*] Removing extension from PHP7.0"
if [[ -d "/etc/php/7.0/cli/" && `cat /etc/php/7.0/cli/php.ini | grep "mosquitto"` ]]; then
sed -i '/extension=mosquitto.so/d' /etc/php/7.0/cli/php.ini
fi
if [[ -d "/etc/php/7.0/apache2/" && `cat /etc/php/7.0/apache2/php.ini | grep "mosquitto"` ]]; then
sed -i '/extension=mosquitto.so/d' /etc/php/7.0/apache2/php.ini
fi
fi
echo "--------------------------------"
echo "[*] Uninstallation is done."
service apache2 restart |
module.exports = {
api: {
SUCCESS: 'Request Successfull.',
SERVER_ERROR: 'Error occurred on server. Please, report it back to team.',
SOMETHING_WENT_WRONG: 'Something went wrong.',
UNAUTHORIZED_USER: 'Unauthorized User',
MISSING_QUERY_PARAMETER: 'Please Add Required Query Parameter',
CREATED: 'Created!',
UNSUCCESSFULL: 'Request not fullfiled',
FOUND: 'Resource Successfully fetched !',
NOT_FOUND: 'Resource Looking for is not found.',
USER_FOUND: 'User Account Found',
USER_NOT_FOUND: 'User Account not found!',
JOB_FOUND: 'JOB Found',
JOB_NOT_FOUND: 'JOB not found!',
JOB_ALREADY_ASSIGNED: 'Job is already assigned to someone',
MISSING_CREDENTIALS: 'Required Credentials not given',
INVALID_CREDENTIALS: 'Credentials not matched',
UPDATE_UNSUCCESSFULL: 'Unable to Update Requested Field',
UPDATE_SUCCESSFULL: 'All details has been updated!',
NO_NEW_UPDATE: 'No New Data requested for update',
LINK_EXPIRED:
'Given link has expired. Please repeat the process of generation of new Link otherwise contact our team',
},
validations: {
INVALID_UPDATE: "Requested fields can't be updated",
INVALID_OPERATION: 'Requested fields can be added',
},
};
|
import styled from "styled-components";
const Headline = styled.h1`
color: var(--main-color);
margin-bottom: 1rem;
text-transform: uppercase;
font-size: 2rem;
`;
export default Headline;
|
def max_profit_solution(prices):
min_price = float('inf')
max_profit = 0
for price in prices:
min_price = min(min_price, price)
max_profit = max(max_profit, price - min_price)
return max_profit |
public static boolean isOdd(int num)
{
if (num % 2 == 0)
return false;
else
return true;
} |
# Import necessary modules
from PYB11Generator import *
# Create a PYB11 module
mod = PYB11Module(name='MyModule')
# Define the Vector and Scalar classes (assuming they are defined elsewhere)
mod.add_class('Vector')
mod.add_class('Scalar')
# Define the reinitialize function in the PYB11 module
@PYB11virtual
def reinitialize(self,
xmin = "const Vector&",
xmax = "const Vector&",
htarget = "const Scalar"):
"Reinitialize to possibly more efficient based on the specified box (xmin,xmax) and htarget size"
# Generate the Python binding code
mod.generate() |
#!/bin/bash
#
# runtests.sh
#
# Invoke selected or all unit tests under the tests/ subdir structure.
# run with "-h" for help.
#
# Author: John Randolph (jrand@google.com)
#
TESTSDIR="tests"
PYTHON=""
TOPDIR=""
VERBOSE=""
declare -a TESTS
function die() {
echo error: "$@" >&2
exit 1
}
function printVerbose() {
if [ ! -z "$VERBOSE" ]; then
echo "$@" >&2
fi
}
function printUsageExit() {
cat <<EOM >&2
Usage: $0 [-v] [-h] [test targets ...]
-v verbose
-h this help
test targets are supplied (relative paths to test binaries) they
will be run,
otherwise all *_test.py binaries will be run under the tests dir.
EOM
exit 0
}
function detectPython() {
case `sw_vers -productVersion 2>/dev/null` in
10.10*) PYTHON="python2.7" ;;
10.9*) PYTHON="python2.7" ;;
10.8*) PYTHON="python2.7" ;;
10.7*) PYTHON="python2.7" ;;
10.6*) PYTHON="python2.6" ;;
10.5*) PYTHON="python2.5" ;;
10.4*) PYTHON="python2.4" ;;
*) die "Could not detect OS X version."
esac
}
function detectTopDir() {
scriptname=`basename "$0"`
if [ ! -x "./${scriptname}" ]; then
die "Run $0 from the top of the munki distribution."
fi
TOPDIR=`pwd`
}
function detectTestsDir() {
if [ ! -d "${TOPDIR}/${TESTSDIR}" ]; then
die "No tests to run."
fi
}
function parseArgs() {
while getopts "vh" opt "$@" ; do
case "$opt" in
v) VERBOSE="1" ;;
h) printUsageExit ;;
*) printUsageExit ;;
esac
shift
done
if [[ $# -gt 0 ]]; then
TESTS=("$@")
fi
}
# main
parseArgs "$@"
detectPython
detectTopDir
detectTestsDir
cd "${TESTSDIR}"
# output a list of tests to run in format DIR\tFILENAME\n
# whether they are received from arguments or found with find.
(
# find specific tests
if [[ "$TESTS" ]]; then
n=0
while [[ $n -lt ${#TESTS[@]} ]]; do
file="${TESTS[$n]}"
if [[ ! -f "${file}" ]]; then
die "Test ${file} does not exist."
fi
dir=`dirname "${file}"`
echo -e "${dir}\t${file}"
n=$[$n+1]
done
# find all tests
else
find . -type f -name '*_test.py' -print0 | \
xargs -0 -n 1 dirname | uniq | \
while read dir ; do
printVerbose ====== Directory "$dir"
for file in ${dir}/*_test.py; do
echo -e "${dir}\t${file}"
done
done
fi
) | \
# process the list to run each test
(
final_status=0
while read dir file ; do
printVerbose === File "${file}"
env PYTHONPATH="${TOPDIR}/${dir}" ${PYTHON} "${file}"
rc="$?"
if [[ "${rc}" != "0" ]]; then
printVerbose === Exit status "${rc}"
final_status=1
fi
done ;
exit "${final_status}"
)
exit $?
|
<reponame>resetius/graphtoys
#include "pipeline.h"
void pl_free(struct Pipeline* pl) {
pl->free(pl);
}
void pl_storage_assign(struct Pipeline* p1, int storage_id, int buffer_id)
{
p1->storage_assign(p1, storage_id, buffer_id);
}
void pl_uniform_assign(struct Pipeline* p1, int uniform_id, int buffer_id)
{
p1->uniform_assign(p1, uniform_id, buffer_id);
}
int pl_buffer_assign(struct Pipeline* p1, int descriptor_id, int buffer_id)
{
return p1->buffer_assign(p1, descriptor_id, buffer_id);
}
void pl_use_texture(struct Pipeline* pl, void* texture) {
pl->use_texture(pl, texture);
}
void pl_start(struct Pipeline* pl) {
pl->start(pl);
}
void pl_draw(struct Pipeline* pl, int buffer_id) {
pl->draw(pl, buffer_id);
}
|
#!/usr/bin/env bash
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
# Enable this line when developing a new end-to-end test
#set -Eexuo pipefail
set -o pipefail
if [[ -z $FLINK_DIR ]]; then
echo "FLINK_DIR needs to point to a Flink distribution directory"
exit 1
fi
case "$(uname -s)" in
Linux*) OS_TYPE=linux;;
Darwin*) OS_TYPE=mac;;
CYGWIN*) OS_TYPE=cygwin;;
MINGW*) OS_TYPE=mingw;;
*) OS_TYPE="UNKNOWN:${unameOut}"
esac
export EXIT_CODE=0
export TASK_SLOTS_PER_TM_HA=4
echo "Flink dist directory: $FLINK_DIR"
FLINK_VERSION=$(cat ${END_TO_END_DIR}/pom.xml | sed -n 's/.*<version>\(.*\)<\/version>/\1/p')
TEST_ROOT=`pwd -P`
TEST_INFRA_DIR="$END_TO_END_DIR/test-scripts/"
cd $TEST_INFRA_DIR
TEST_INFRA_DIR=`pwd -P`
cd $TEST_ROOT
NODENAME=${NODENAME:-`hostname -f`}
# REST_PROTOCOL and CURL_SSL_ARGS can be modified in common_ssl.sh if SSL is activated
# they should be used in curl command to query Flink REST API
REST_PROTOCOL="http"
CURL_SSL_ARGS=""
source "${TEST_INFRA_DIR}/common_ssl.sh"
function print_mem_use_osx {
declare -a mem_types=("active" "inactive" "wired down")
used=""
for mem_type in "${mem_types[@]}"
do
used_type=$(vm_stat | grep "Pages ${mem_type}:" | awk '{print $NF}' | rev | cut -c 2- | rev)
let used_type="(${used_type}*4096)/1024/1024"
used="$used $mem_type=${used_type}MB"
done
let mem=$(sysctl -n hw.memsize)/1024/1024
echo "Memory Usage: ${used} total=${mem}MB"
}
function print_mem_use {
if [[ "$OS_TYPE" == "mac" ]]; then
print_mem_use_osx
else
free -m | awk 'NR==2{printf "Memory Usage: used=%sMB total=%sMB %.2f%%\n", $3,$2,$3*100/$2 }'
fi
}
function backup_config() {
# back up the masters and flink-conf.yaml
cp $FLINK_DIR/conf/masters $FLINK_DIR/conf/masters.bak
cp $FLINK_DIR/conf/flink-conf.yaml $FLINK_DIR/conf/flink-conf.yaml.bak
}
function revert_default_config() {
# revert our modifications to the masters file
if [ -f $FLINK_DIR/conf/masters.bak ]; then
mv -f $FLINK_DIR/conf/masters.bak $FLINK_DIR/conf/masters
fi
# revert our modifications to the Flink conf yaml
if [ -f $FLINK_DIR/conf/flink-conf.yaml.bak ]; then
mv -f $FLINK_DIR/conf/flink-conf.yaml.bak $FLINK_DIR/conf/flink-conf.yaml
fi
REST_PROTOCOL="http"
CURL_SSL_ARGS=""
}
function set_conf() {
CONF_NAME=$1
VAL=$2
echo "$CONF_NAME: $VAL" >> $FLINK_DIR/conf/flink-conf.yaml
}
function change_conf() {
CONF_NAME=$1
OLD_VAL=$2
NEW_VAL=$3
sed -i -e "s/${CONF_NAME}: ${OLD_VAL}/${CONF_NAME}: ${NEW_VAL}/" ${FLINK_DIR}/conf/flink-conf.yaml
}
function create_ha_config() {
# clean up the dir that will be used for zookeeper storage
# (see high-availability.zookeeper.storageDir below)
if [ -e $TEST_DATA_DIR/recovery ]; then
echo "File ${TEST_DATA_DIR}/recovery exists. Deleting it..."
rm -rf $TEST_DATA_DIR/recovery
fi
# create the masters file (only one currently).
# This must have all the masters to be used in HA.
echo "localhost:8081" > ${FLINK_DIR}/conf/masters
# then move on to create the flink-conf.yaml
sed 's/^ //g' > ${FLINK_DIR}/conf/flink-conf.yaml << EOL
#==============================================================================
# Common
#==============================================================================
jobmanager.rpc.address: localhost
jobmanager.rpc.port: 6123
jobmanager.heap.mb: 1024
taskmanager.heap.mb: 1024
taskmanager.numberOfTaskSlots: ${TASK_SLOTS_PER_TM_HA}
#==============================================================================
# High Availability
#==============================================================================
high-availability: zookeeper
high-availability.zookeeper.storageDir: file://${TEST_DATA_DIR}/recovery/
high-availability.zookeeper.quorum: localhost:2181
high-availability.zookeeper.path.root: /flink
high-availability.cluster-id: /test_cluster_one
#==============================================================================
# Web Frontend
#==============================================================================
rest.port: 8081
queryable-state.server.ports: 9000-9009
queryable-state.proxy.ports: 9010-9019
EOL
}
function get_node_ip {
local ip_addr
if [[ ${OS_TYPE} == "linux" ]]; then
ip_addr=$(hostname -I)
elif [[ ${OS_TYPE} == "mac" ]]; then
ip_addr=$(
ifconfig |
grep -E "([0-9]{1,3}\.){3}[0-9]{1,3}" | # grep IPv4 addresses only
grep -v 127.0.0.1 | # do not use 127.0.0.1 (to be consistent with hostname -I)
awk '{ print $2 }' | # extract ip from row
paste -sd " " - # combine everything to one line
)
else
echo "Warning: Unsupported OS_TYPE '${OS_TYPE}' for 'get_node_ip'. Falling back to 'hostname -I' (linux)"
ip_addr=$(hostname -I)
fi
echo ${ip_addr}
}
function start_ha_cluster {
create_ha_config
start_local_zk
start_cluster
}
function start_local_zk {
# Parses the zoo.cfg and starts locally zk.
# This is almost the same code as the
# /bin/start-zookeeper-quorum.sh without the SSH part and only running for localhost.
while read server ; do
server=$(echo -e "${server}" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//') # trim
# match server.id=address[:port[:port]]
if [[ $server =~ ^server\.([0-9]+)[[:space:]]*\=[[:space:]]*([^: \#]+) ]]; then
id=${BASH_REMATCH[1]}
address=${BASH_REMATCH[2]}
if [ "${address}" != "localhost" ]; then
echo "[ERROR] Parse error. Only available for localhost. Expected address 'localhost' but got '${address}'"
exit 1
fi
${FLINK_DIR}/bin/zookeeper.sh start $id
else
echo "[WARN] Parse error. Skipping config entry '$server'."
fi
done < <(grep "^server\." "${FLINK_DIR}/conf/zoo.cfg")
}
function wait_dispatcher_running {
# wait at most 10 seconds until the dispatcher is up
local QUERY_URL="${REST_PROTOCOL}://${NODENAME}:8081/taskmanagers"
local TIMEOUT=10
for i in $(seq 1 ${TIMEOUT}); do
# without the || true this would exit our script if the JobManager is not yet up
QUERY_RESULT=$(curl ${CURL_SSL_ARGS} "$QUERY_URL" 2> /dev/null || true)
# ensure the taskmanagers field is there at all and is not empty
if [[ ${QUERY_RESULT} =~ \{\"taskmanagers\":\[.+\]\} ]]; then
echo "Dispatcher REST endpoint is up."
return
fi
echo "Waiting for dispatcher REST endpoint to come up..."
sleep 1
done
echo "Dispatcher REST endpoint has not started within a timeout of ${TIMEOUT} sec"
exit 1
}
function start_cluster {
"$FLINK_DIR"/bin/start-cluster.sh
wait_dispatcher_running
}
function start_taskmanagers {
tmnum=$1
echo "Start ${tmnum} more task managers"
for (( c=0; c<tmnum; c++ ))
do
$FLINK_DIR/bin/taskmanager.sh start
done
}
function start_and_wait_for_tm {
tm_query_result=`query_running_tms`
# we assume that the cluster is running
if ! [[ ${tm_query_result} =~ \{\"taskmanagers\":\[.*\]\} ]]; then
echo "Your cluster seems to be unresponsive at the moment: ${tm_query_result}" 1>&2
exit 1
fi
running_tms=`query_number_of_running_tms`
${FLINK_DIR}/bin/taskmanager.sh start
wait_for_number_of_running_tms $((running_tms+1))
}
function query_running_tms {
local url="${REST_PROTOCOL}://${NODENAME}:8081/taskmanagers"
curl ${CURL_SSL_ARGS} -s "${url}"
}
function query_number_of_running_tms {
query_running_tms | grep -o "id" | wc -l
}
function wait_for_number_of_running_tms {
local TM_NUM_TO_WAIT=${1}
local TIMEOUT_COUNTER=10
local TIMEOUT_INC=4
local TIMEOUT=$(( $TIMEOUT_COUNTER * $TIMEOUT_INC ))
local TM_NUM_TEXT="Number of running task managers"
for i in $(seq 1 ${TIMEOUT_COUNTER}); do
local TM_NUM=`query_number_of_running_tms`
if [ $((TM_NUM - TM_NUM_TO_WAIT)) -eq 0 ]; then
echo "${TM_NUM_TEXT} has reached ${TM_NUM_TO_WAIT}."
return
else
echo "${TM_NUM_TEXT} ${TM_NUM} is not yet ${TM_NUM_TO_WAIT}."
fi
sleep ${TIMEOUT_INC}
done
echo "${TM_NUM_TEXT} has not reached ${TM_NUM_TO_WAIT} within a timeout of ${TIMEOUT} sec"
exit 1
}
function check_logs_for_errors {
echo "Checking for errors..."
error_count=$(grep -rv "GroupCoordinatorNotAvailableException" $FLINK_DIR/log \
| grep -v "RetriableCommitFailedException" \
| grep -v "NoAvailableBrokersException" \
| grep -v "Async Kafka commit failed" \
| grep -v "DisconnectException" \
| grep -v "AskTimeoutException" \
| grep -v "Error while loading kafka-version.properties" \
| grep -v "WARN akka.remote.transport.netty.NettyTransport" \
| grep -v "WARN org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline" \
| grep -v "jvm-exit-on-fatal-error" \
| grep -v '^INFO:.*AWSErrorCode=\[400 Bad Request\].*ServiceEndpoint=\[https://.*\.s3\.amazonaws\.com\].*RequestType=\[HeadBucketRequest\]' \
| grep -v "RejectedExecutionException" \
| grep -v "An exception was thrown by an exception handler" \
| grep -v "java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/exceptions/YarnException" \
| grep -v "java.lang.NoClassDefFoundError: org/apache/hadoop/conf/Configuration" \
| grep -v "org.apache.flink.fs.shaded.hadoop3.org.apache.commons.beanutils.FluentPropertyBeanIntrospector - Error when creating PropertyDescriptor for public final void org.apache.flink.fs.shaded.hadoop3.org.apache.commons.configuration2.AbstractConfiguration.setProperty(java.lang.String,java.lang.Object)! Ignoring this property." \
| grep -v "Error while loading kafka-version.properties :null" \
| grep -v "Failed Elasticsearch item request" \
| grep -ic "error" || true)
if [[ ${error_count} -gt 0 ]]; then
echo "Found error in log files:"
cat $FLINK_DIR/log/*
EXIT_CODE=1
else
echo "No errors in log files."
fi
}
function check_logs_for_exceptions {
echo "Checking for exceptions..."
exception_count=$(grep -rv "GroupCoordinatorNotAvailableException" $FLINK_DIR/log \
| grep -v "RetriableCommitFailedException" \
| grep -v "NoAvailableBrokersException" \
| grep -v "Async Kafka commit failed" \
| grep -v "DisconnectException" \
| grep -v "AskTimeoutException" \
| grep -v "WARN akka.remote.transport.netty.NettyTransport" \
| grep -v "WARN org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline" \
| grep -v '^INFO:.*AWSErrorCode=\[400 Bad Request\].*ServiceEndpoint=\[https://.*\.s3\.amazonaws\.com\].*RequestType=\[HeadBucketRequest\]' \
| grep -v "RejectedExecutionException" \
| grep -v "An exception was thrown by an exception handler" \
| grep -v "Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.yarn.exceptions.YarnException" \
| grep -v "Caused by: java.lang.ClassNotFoundException: org.apache.hadoop.conf.Configuration" \
| grep -v "java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/exceptions/YarnException" \
| grep -v "java.lang.NoClassDefFoundError: org/apache/hadoop/conf/Configuration" \
| grep -v "java.lang.Exception: Execution was suspended" \
| grep -v "java.io.InvalidClassException: org.apache.flink.formats.avro.typeutils.AvroSerializer" \
| grep -v "Caused by: java.lang.Exception: JobManager is shutting down" \
| grep -v "java.lang.Exception: Artificial failure" \
| grep -v "org.apache.flink.runtime.checkpoint.decline" \
| grep -v "org.elasticsearch.ElasticsearchException" \
| grep -v "Elasticsearch exception" \
| grep -ic "exception" || true)
if [[ ${exception_count} -gt 0 ]]; then
echo "Found exception in log files:"
cat $FLINK_DIR/log/*
EXIT_CODE=1
else
echo "No exceptions in log files."
fi
}
function check_logs_for_non_empty_out_files {
echo "Checking for non-empty .out files..."
if grep -ri "." $FLINK_DIR/log/*.out > /dev/null; then
echo "Found non-empty .out files:"
cat $FLINK_DIR/log/*.out
EXIT_CODE=1
else
echo "No non-empty .out files."
fi
}
function shutdown_all {
stop_cluster
tm_kill_all
jm_kill_all
}
function stop_cluster {
"$FLINK_DIR"/bin/stop-cluster.sh
# stop zookeeper only if there are processes running
zookeeper_process_count=$(jps | grep -c 'FlinkZooKeeperQuorumPeer' || true)
if [[ ${zookeeper_process_count} -gt 0 ]]; then
echo "Stopping zookeeper..."
"$FLINK_DIR"/bin/zookeeper.sh stop
fi
}
function wait_for_job_state_transition {
local job=$1
local initial_state=$2
local next_state=$3
echo "Waiting for job ($job) to switch from state ${initial_state} to state ${next_state} ..."
while : ; do
N=$(grep -o "($job) switched from state ${initial_state} to ${next_state}" $FLINK_DIR/log/*standalonesession*.log | tail -1)
if [[ -z $N ]]; then
sleep 1
else
break
fi
done
}
function wait_job_running {
local TIMEOUT=10
for i in $(seq 1 ${TIMEOUT}); do
JOB_LIST_RESULT=$("$FLINK_DIR"/bin/flink list -r | grep "$1")
if [[ "$JOB_LIST_RESULT" == "" ]]; then
echo "Job ($1) is not yet running."
else
echo "Job ($1) is running."
return
fi
sleep 1
done
echo "Job ($1) has not started within a timeout of ${TIMEOUT} sec"
exit 1
}
function wait_job_terminal_state {
local job=$1
local terminal_state=$2
echo "Waiting for job ($job) to reach terminal state $terminal_state ..."
while : ; do
N=$(grep -o "Job $job reached globally terminal state $terminal_state" $FLINK_DIR/log/*standalonesession*.log | tail -1 || true)
if [[ -z $N ]]; then
sleep 1
else
break
fi
done
}
function take_savepoint {
"$FLINK_DIR"/bin/flink savepoint $1 $2
}
function cancel_job {
"$FLINK_DIR"/bin/flink cancel $1
}
function check_result_hash {
local error_code=0
check_result_hash_no_exit "$@" || error_code=$?
if [ "$error_code" != "0" ]
then
exit $error_code
fi
}
function check_result_hash_no_exit {
local name=$1
local outfile_prefix=$2
local expected=$3
local actual
if [ "`command -v md5`" != "" ]; then
actual=$(LC_ALL=C sort $outfile_prefix* | md5 -q)
elif [ "`command -v md5sum`" != "" ]; then
actual=$(LC_ALL=C sort $outfile_prefix* | md5sum | awk '{print $1}')
else
echo "Neither 'md5' nor 'md5sum' binary available."
return 2
fi
if [[ "$actual" != "$expected" ]]
then
echo "FAIL $name: Output hash mismatch. Got $actual, expected $expected."
echo "head hexdump of actual:"
head $outfile_prefix* | hexdump -c
return 1
else
echo "pass $name"
# Output files are left behind in /tmp
fi
return 0
}
# This function starts the given number of task managers and monitors their processes.
# If a task manager process goes away a replacement is started.
function tm_watchdog {
local expectedTm=$1
while true;
do
runningTm=`jps | grep -Eo 'TaskManagerRunner|TaskManager' | wc -l`;
count=$((expectedTm-runningTm))
if (( count != 0 )); then
start_taskmanagers ${count} > /dev/null
fi
sleep 5;
done
}
# Kills all job manager.
function jm_kill_all {
kill_all 'StandaloneSessionClusterEntrypoint'
}
# Kills all task manager.
function tm_kill_all {
kill_all 'TaskManagerRunner|TaskManager'
}
# Kills all processes that match the given name.
function kill_all {
local pid=`jps | grep -E "${1}" | cut -d " " -f 1 || true`
kill ${pid} 2> /dev/null || true
wait ${pid} 2> /dev/null || true
}
function kill_random_taskmanager {
KILL_TM=$(jps | grep "TaskManager" | sort -R | head -n 1 | awk '{print $1}')
kill -9 "$KILL_TM"
echo "TaskManager $KILL_TM killed."
}
function setup_flink_slf4j_metric_reporter() {
INTERVAL="${1:-1 SECONDS}"
cp $FLINK_DIR/opt/flink-metrics-slf4j-*.jar $FLINK_DIR/lib/
set_conf "metrics.reporter.slf4j.class" "org.apache.flink.metrics.slf4j.Slf4jReporter"
set_conf "metrics.reporter.slf4j.interval" "${INTERVAL}"
}
function rollback_flink_slf4j_metric_reporter() {
rm $FLINK_DIR/lib/flink-metrics-slf4j-*.jar
}
function get_job_metric {
local job_id=$1
local metric_name=$2
local json=$(curl ${CURL_SSL_ARGS} -s ${REST_PROTOCOL}://${NODENAME}:8081/jobs/${job_id}/metrics?get=${metric_name})
local metric_value=$(echo ${json} | sed -n 's/.*"value":"\(.*\)".*/\1/p')
echo ${metric_value}
}
function get_metric_processed_records {
OPERATOR=$1
JOB_NAME="${2:-General purpose test job}"
N=$(grep ".${JOB_NAME}.$OPERATOR.numRecordsIn:" $FLINK_DIR/log/*taskexecutor*.log | sed 's/.* //g' | tail -1)
if [ -z $N ]; then
N=0
fi
echo $N
}
function get_num_metric_samples {
OPERATOR=$1
JOB_NAME="${2:-General purpose test job}"
N=$(grep ".${JOB_NAME}.$OPERATOR.numRecordsIn:" $FLINK_DIR/log/*taskexecutor*.log | wc -l)
if [ -z $N ]; then
N=0
fi
echo $N
}
function wait_oper_metric_num_in_records {
OPERATOR=$1
MAX_NUM_METRICS="${2:-200}"
JOB_NAME="${3:-General purpose test job}"
NUM_METRICS=$(get_num_metric_samples ${OPERATOR} '${JOB_NAME}')
OLD_NUM_METRICS=${4:-${NUM_METRICS}}
# monitor the numRecordsIn metric of the state machine operator in the second execution
# we let the test finish once the second restore execution has processed 200 records
while : ; do
NUM_METRICS=$(get_num_metric_samples ${OPERATOR} "${JOB_NAME}")
NUM_RECORDS=$(get_metric_processed_records ${OPERATOR} "${JOB_NAME}")
# only account for metrics that appeared in the second execution
if (( $OLD_NUM_METRICS >= $NUM_METRICS )) ; then
NUM_RECORDS=0
fi
if (( $NUM_RECORDS < $MAX_NUM_METRICS )); then
echo "Waiting for job to process up to ${MAX_NUM_METRICS} records, current progress: ${NUM_RECORDS} records ..."
sleep 1
else
break
fi
done
}
function wait_num_of_occurence_in_logs {
local text=$1
local number=$2
local logs
if [ -z "$3" ]; then
logs="standalonesession"
else
logs="$3"
fi
echo "Waiting for text ${text} to appear ${number} of times in logs..."
while : ; do
N=$(grep -o "${text}" $FLINK_DIR/log/*${logs}*.log | wc -l)
if [ -z $N ]; then
N=0
fi
if (( N < number )); then
sleep 1
else
break
fi
done
}
function wait_num_checkpoints {
JOB=$1
NUM_CHECKPOINTS=$2
echo "Waiting for job ($JOB) to have at least $NUM_CHECKPOINTS completed checkpoints ..."
while : ; do
N=$(grep -o "Completed checkpoint [1-9]* for job $JOB" $FLINK_DIR/log/*standalonesession*.log | awk '{print $3}' | tail -1)
if [ -z $N ]; then
N=0
fi
if (( N < NUM_CHECKPOINTS )); then
sleep 1
else
break
fi
done
}
# Starts the timer. Note that nested timers are not supported.
function start_timer {
SECONDS=0
}
# prints the number of minutes and seconds that have elapsed since the last call to start_timer
function end_timer {
duration=$SECONDS
echo "$(($duration / 60)) minutes and $(($duration % 60)) seconds"
}
function clean_stdout_files {
rm ${FLINK_DIR}/log/*.out
echo "Deleted all stdout files under ${FLINK_DIR}/log/"
}
# Expect a string to appear in the log files of the task manager before a given timeout
# $1: expected string
# $2: timeout in seconds
function expect_in_taskmanager_logs {
local expected="$1"
local timeout=$2
local i=0
local logfile="${FLINK_DIR}/log/flink*taskexecutor*log"
while ! grep "${expected}" ${logfile} > /dev/null; do
sleep 1s
((i++))
if ((i > timeout)); then
echo "A timeout occurred waiting for '${expected}' to appear in the taskmanager logs"
exit 1
fi
done
}
function wait_for_restart_to_complete {
local base_num_restarts=$1
local jobid=$2
local current_num_restarts=${base_num_restarts}
local expected_num_restarts=$((current_num_restarts + 1))
echo "Waiting for restart to happen"
while ! [[ ${current_num_restarts} -eq ${expected_num_restarts} ]]; do
sleep 5
current_num_restarts=$(get_job_metric ${jobid} "fullRestarts")
if [[ -z ${current_num_restarts} ]]; then
current_num_restarts=${base_num_restarts}
fi
done
}
function find_latest_completed_checkpoint {
local checkpoint_root_directory=$1
# a completed checkpoint must contain the _metadata file
local checkpoint_meta_file=$(ls -d ${checkpoint_root_directory}/chk-[1-9]*/_metadata | sort -Vr | head -n1)
echo "$(dirname "${checkpoint_meta_file}")"
}
function retry_times() {
local retriesNumber=$1
local backoff=$2
local command=${@:3}
for (( i = 0; i < ${retriesNumber}; i++ ))
do
if ${command}; then
return 0
fi
echo "Command: ${command} failed. Retrying..."
sleep ${backoff}
done
echo "Command: ${command} failed ${retriesNumber} times."
return 1
}
|
#include <catch.hpp>
#include <lorina/genlib.hpp>
#include <sstream>
#include <string>
using namespace lorina;
struct gate
{
std::string name;
std::string expression;
double area;
std::vector<pin_spec> pins;
std::string output_pin;
};
struct test_reader : public genlib_reader
{
public:
explicit test_reader( std::vector<gate>& gates )
: gates( gates )
{}
void on_gate( std::string const& name, std::string const& expression, double area, std::vector<pin_spec> const& pins, std::string const& output_pin ) const
{
gates.emplace_back( gate{name, expression, area, pins, output_pin} );
}
public:
std::vector<gate>& gates;
};
TEST_CASE( "instantiate genlib_reader", "[genlib]")
{
std::string const genlib_file =
"# comment\n"
"GATE zero 0 O=0;\n"
"GATE one 0 O=1;\n"
"GATE inv1 1 O=!a; PIN * INV 1 999 1 1 1 1\n"
"GATE buf 2 O=a; PIN * NONINV 1 999 1.0 1.0 1.0 1.0\n"
;
text_diagnostics consumer;
diagnostic_engine diag( &consumer );
std::istringstream iss( genlib_file );
std::vector<gate> gate_definitions;
CHECK( read_genlib( iss, genlib_reader{}, &diag ) == return_code::success );
}
TEST_CASE( "error cases", "[genlib]")
{
{
/* not all required fields have been specified */
std::string const genlib_file = "GATE zero";
std::istringstream iss( genlib_file );
diagnostic_consumer consumer;
diagnostic_engine diag( &consumer );
CHECK( read_genlib( iss, genlib_reader{}, &diag ) == return_code::parse_error );
}
{
/* the keyword `GATE` in the beginning is missing */
std::string const genlib_file = "zero 0 O=0; PIN * INV 1 999 1 1 1 1";
std::istringstream iss( genlib_file );
diagnostic_consumer consumer;
diagnostic_engine diag( &consumer );
CHECK( read_genlib( iss, genlib_reader{}, &diag ) == return_code::parse_error );
}
{
/* the expression is not terminated by a semicolon */
std::string const genlib_file = "GATE zero 0 O=0 PIN * INV 1 999 1 1 1 1";
std::istringstream iss( genlib_file );
diagnostic_consumer consumer;
diagnostic_engine diag( &consumer );
CHECK( read_genlib( iss, genlib_reader{}, &diag ) == return_code::parse_error );
}
{
/* the pin specification does not start with the keyword `PIN` */
std::string const genlib_file = "GATE zero 0 O=0; a INV 1 999 1 1 1 1 1";
std::istringstream iss( genlib_file );
diagnostic_consumer consumer;
diagnostic_engine diag( &consumer );
CHECK( read_genlib( iss, genlib_reader{}, &diag ) == return_code::parse_error );
}
{
/* the phase has been specified with an unknown keyword (NONINV is misspelled) */
std::string const genlib_file = "GATE zero 0 O=0; PIN * NOINV 1 999 1 1 1 1\n";
std::istringstream iss( genlib_file );
diagnostic_consumer consumer;
diagnostic_engine diag( &consumer );
CHECK( read_genlib( iss, genlib_reader{}, &diag ) == return_code::success );
}
{
/* the PIN spec is incomplete and not all tokens are consumed */
std::string const genlib_file = "GATE zero 0 O=0; PIN a 1";
std::istringstream iss( genlib_file );
diagnostic_consumer consumer;
diagnostic_engine diag( &consumer );
CHECK( read_genlib( iss, genlib_reader{}, &diag ) == return_code::parse_error );
}
{
/* multiple PINs of which one generic */
std::string const genlib_file = "GATE and 0 O=a*b; PIN a NONINV 1 999 1 1 1 1 PIN * NONINV 1 999 1 1 1 1";
std::istringstream iss( genlib_file );
diagnostic_consumer consumer;
diagnostic_engine diag( &consumer );
CHECK( read_genlib( iss, genlib_reader{}, &diag ) == return_code::parse_error );
}
{
/* misspelled GATE with two gates definition */
std::string const genlib_file = "GTE and 0 O=a*b; PIN * NONINV 1 999 1 1 1 1\n"
"GATE or 0 O=a+b; PIN * NONINV 1 999 1 1 1 1";
std::istringstream iss( genlib_file );
diagnostic_consumer consumer;
diagnostic_engine diag( &consumer );
CHECK( read_genlib( iss, genlib_reader{}, &diag ) == return_code::parse_error );
}
{
/* empty genlib */
std::string const genlib_file = "";
std::istringstream iss( genlib_file );
diagnostic_consumer consumer;
diagnostic_engine diag( &consumer );
CHECK( read_genlib( iss, genlib_reader{}, &diag ) == return_code::success );
}
}
TEST_CASE( "read GENLIB format", "[genlib]")
{
std::string const genlib_file =
"GATE zero 0 O=0;\n"
"GATE one 0 O=1;\n"
"GATE inv1 1 O=!a; PIN * INV 1 999 1 1 1 1\n"
"GATE buf 2 Y=a; PIN * NONINV 1 999 1.0 1.0 1.0 1.0\n"
"GATE and2 2 Y=a * b; PIN * UNKNOWN 1.0 2.0 1.0 1.0 1.0 1.0;\n"
;
text_diagnostics consumer;
diagnostic_engine diag( &consumer );
std::istringstream iss( genlib_file );
std::vector<gate> gate_definitions;
test_reader reader( gate_definitions );
CHECK( read_genlib( iss, reader, &diag ) == return_code::success );
CHECK( gate_definitions.size() == 5u );
CHECK( gate_definitions[0u].name == "zero" );
CHECK( gate_definitions[0u].expression == "0" );
CHECK( gate_definitions[0u].area == 0.0 );
CHECK( gate_definitions[0u].pins.empty() );
CHECK( gate_definitions[0u].output_pin == "O" );
CHECK( gate_definitions[1u].name == "one" );
CHECK( gate_definitions[1u].expression == "1" );
CHECK( gate_definitions[1u].area == 0.0 );
CHECK( gate_definitions[1u].pins.empty() );
CHECK( gate_definitions[1u].output_pin == "O" );
CHECK( gate_definitions[2u].name == "inv1" );
CHECK( gate_definitions[2u].expression == "!a" );
CHECK( gate_definitions[2u].area == 1.0 );
CHECK( gate_definitions[2u].pins.size() == 1u );
CHECK( gate_definitions[2u].output_pin == "O" );
CHECK( gate_definitions[3u].name == "buf" );
CHECK( gate_definitions[3u].expression == "a" );
CHECK( gate_definitions[3u].area == 2.0 );
CHECK( gate_definitions[3u].pins.size() == 1u );
CHECK( gate_definitions[3u].output_pin == "Y" );
CHECK( gate_definitions[4u].name == "and2" );
CHECK( gate_definitions[4u].expression == "a * b" );
CHECK( gate_definitions[4u].area == 2.0 );
CHECK( gate_definitions[4u].pins.size() == 1u );
CHECK( gate_definitions[4u].output_pin == "Y" );
}
TEST_CASE( "PIN specification", "[genlib]")
{
std::string const genlib_file =
"GATE and2 1 O=a*b; PIN a INV 1.0 2.0 1.1 1.2 1.3 1.4 \n"
"\tPIN b INV 1.0 2.0 1.0 1.0 1.0 1.0;\n"
"#GATE zero 0 O=0;\n"
"GATE and3 1 O=a*b*c; PIN * UNKNOWN 1.0 2.0 1.0 1.0 1.0 1.0;\n"
;
text_diagnostics consumer;
diagnostic_engine diag( &consumer );
std::istringstream iss( genlib_file );
std::vector<gate> gate_definitions;
test_reader reader( gate_definitions );
CHECK( read_genlib( iss, reader, &diag ) == return_code::success );
CHECK( gate_definitions.size() == 2u );
/* first gate */
CHECK( gate_definitions[0u].pins.size() == 2u );
CHECK( gate_definitions[0u].pins[0u].name == "a" );
CHECK( gate_definitions[0u].pins[0u].phase == phase_type::INV );
CHECK( gate_definitions[0u].pins[0u].input_load == 1.0 );
CHECK( gate_definitions[0u].pins[0u].max_load == 2.0 );
CHECK( gate_definitions[0u].pins[0u].rise_block_delay == 1.1 );
CHECK( gate_definitions[0u].pins[0u].rise_fanout_delay == 1.2 );
CHECK( gate_definitions[0u].pins[0u].fall_block_delay == 1.3 );
CHECK( gate_definitions[0u].pins[0u].fall_fanout_delay == 1.4 );
CHECK( gate_definitions[0u].pins[1u].name == "b" );
CHECK( gate_definitions[0u].pins[1u].phase == phase_type::INV );
CHECK( gate_definitions[0u].pins[1u].input_load == 1.0 );
CHECK( gate_definitions[0u].pins[1u].max_load == 2.0 );
CHECK( gate_definitions[0u].pins[1u].rise_block_delay == 1.0 );
CHECK( gate_definitions[0u].pins[1u].rise_fanout_delay == 1.0 );
CHECK( gate_definitions[0u].pins[1u].fall_block_delay == 1.0 );
CHECK( gate_definitions[0u].pins[1u].fall_fanout_delay == 1.0 );
/* second gate */
CHECK( gate_definitions[1u].pins.size() == 1u );
CHECK( gate_definitions[1u].pins[0u].name == "*" );
CHECK( gate_definitions[1u].pins[0u].phase == phase_type::UNKNOWN );
CHECK( gate_definitions[1u].pins[0u].input_load == 1.0 );
CHECK( gate_definitions[1u].pins[0u].max_load == 2.0 );
CHECK( gate_definitions[1u].pins[0u].rise_block_delay == 1.0 );
CHECK( gate_definitions[1u].pins[0u].rise_fanout_delay == 1.0 );
CHECK( gate_definitions[1u].pins[0u].fall_block_delay == 1.0 );
CHECK( gate_definitions[1u].pins[0u].fall_fanout_delay == 1.0 );
}
|
from typing import List, Dict, Union
def check_dependency_conflicts(package_dependencies: List[Dict[str, Union[str, List[str]]]]) -> List[str]:
dependency_map = {}
conflicts = []
for package in package_dependencies:
package_name = package["name"]
for dependency in package["install_requires"]:
if dependency in dependency_map:
if dependency_map[dependency] != package_name:
conflicts.append(dependency)
else:
dependency_map[dependency] = package_name
return conflicts |
<gh_stars>1-10
//
// OceanLoad3D.cpp
// AxiSEM3D
//
// Created by <NAME> on 4/12/20.
// Copyright © 2020 <NAME>. All rights reserved.
//
// 3D ocean-load models
#include "OceanLoad3D.hpp"
#include "Quad.hpp"
#include "vicinity.hpp"
#include "mpi.hpp"
// apply to Quad
void OceanLoad3D::applyTo(std::vector<Quad> &quads) const {
if (!isSuperOnly()) {
for (Quad &quad: quads) {
// check surface
int surfEdge = quad.getSurfaceEdge();
if (surfEdge == -1) {
continue;
}
// cardinal coordinates
const eigen::DMatX3 &spz = computeEdgeSPZ(quad, surfEdge);
// compute values
eigen::DColX sumRD;
bool elemInScope = getSumRhoDepth(spz, quad.getNodalSZ(), sumRD);
// set values to quad
if (elemInScope) {
setSumRhoDepthToQuad(sumRD, quad);
}
}
} else {
mpi::enterInfer();
for (int irank = 0; irank < mpi::nproc(); irank++) {
// step 1: gather coords on infer and send to super
std::vector<eigen::DMatX3> spzAll;
std::vector<eigen::DMat24> szAll;
if (irank == mpi::rank()) {
// gather coords
// spzAll.reserve(quads.size());
// szAll.reserve(quads.size());
for (Quad &quad: quads) {
// check surface
int surfEdge = quad.getSurfaceEdge();
if (surfEdge == -1) {
continue;
}
spzAll.push_back(computeEdgeSPZ(quad, surfEdge));
szAll.push_back(quad.getNodalSZ());
}
// send coords to super
mpi::sendVecEigen(0, spzAll, 0);
mpi::sendVecEigen(0, szAll, 1);
}
// step 2: compute values on super and send back to infer
std::vector<eigen::DColX> sumRD_All;
std::vector<eigen::IColX> elemInScopeAll;
if (mpi::root()) {
// recv coords from infer
mpi::recvVecEigen(irank, spzAll, 0);
mpi::recvVecEigen(irank, szAll, 1);
// allocate values
int nQuad = (int)spzAll.size();
sumRD_All.reserve(nQuad);
elemInScopeAll.push_back(eigen::IColX::Zero(nQuad));
// compute values
for (int iq = 0; iq < nQuad; iq++) {
eigen::DColX sumRD;
elemInScopeAll[0](iq) = getSumRhoDepth(spzAll[iq],
szAll[iq], sumRD);
sumRD_All.push_back(sumRD);
}
// send values to infer
mpi::sendVecEigen(irank, sumRD_All, 0);
mpi::sendVecEigen(irank, elemInScopeAll, 1);
}
// step 3: set values to quads on infer
if (irank == mpi::rank()) {
// recv values from super
mpi::recvVecEigen(0, sumRD_All, 0);
mpi::recvVecEigen(0, elemInScopeAll, 1);
int iq = 0;
for (Quad &quad: quads) {
// check surface
int surfEdge = quad.getSurfaceEdge();
if (surfEdge == -1) {
continue;
}
// set values to quads
if (elemInScopeAll[0](iq)) {
setSumRhoDepthToQuad(sumRD_All[iq], quad);
}
iq++;
}
}
// do irank one by one
mpi::barrier();
}
mpi::enterWorld();
}
}
// set sum(rho * depth) to quad
void OceanLoad3D::setSumRhoDepthToQuad(const eigen::DColX &sumRhoDepth,
Quad &quad) const {
// edge points
int surfEdge = quad.getSurfaceEdge();
const std::vector<int> &ipnts = vicinity::constants::gEdgeIPnt[surfEdge];
const eigen::IRowN &pointNr = quad.getPointNr();
// flattened to structured
eigen::arP_DColX sumRD;
int row = 0;
for (int ip = 0; ip < spectral::nPED; ip++) {
int nr = pointNr(ipnts[ip]);
sumRD[ip] = sumRhoDepth.block(row, 0, nr, 1);
row += nr;
}
// set to Quad
quad.getOceanLoadPtr()->addSumRhoDepth(sumRD);
}
#include "StructuredGridO3D.hpp"
#include "sg_tools.hpp"
// build from inparam
std::shared_ptr<const OceanLoad3D> OceanLoad3D::
buildInparam(const ExodusMesh &exodusMesh, const LocalMesh &localMesh,
const std::string &modelName, const std::string &keyInparam) {
// short alias
const InparamYAML &gm = inparam::gInparamModel;
const std::string &root = keyInparam;
// class name
const std::string &className = gm.get<std::string>(root + ":class_name");
// init class
if (className == "StructuredGridO3D") {
// file name
const std::string &fname = gm.get<std::string>(root + ":nc_data_file");
////////////// coords //////////////
const std::string &rootc = root + ":coordinates";
// horizontal
bool sourceCentered = false, xy = false, ellipticity = false;
sg_tools::inparamHorizontal(gm, rootc, modelName, className,
sourceCentered, xy, ellipticity);
// variables
std::array<std::string, 2> crdVarNames;
std::array<int, 2> shuffleData;
sg_tools::inparamVarRank<2>(gm, rootc, modelName, className,
crdVarNames, shuffleData);
// units
double lengthUnit = 1., angleUnit = 1.;
sg_tools::inparamUnits(gm, rootc, xy, lengthUnit, angleUnit);
////////////// data //////////////
const std::string &rootd = root + ":data_sum_rho_depth";
const std::string &dataVarName = gm.get<std::string>(rootd + ":nc_var");
double factor = gm.get<double>(rootd + ":factor");
bool superOnly = gm.get<bool>(root + ":store_grid_only_on_leaders");
// construct
return std::make_shared
<const StructuredGridO3D>(modelName, fname, crdVarNames, shuffleData,
sourceCentered, xy, ellipticity,
lengthUnit, angleUnit, dataVarName, factor,
superOnly);
} else {
// other models
}
// unknown class
return nullptr;
}
|
class MyTableViewController: UITableViewController {
var rows: [Row] = []
override func viewDidLoad() {
super.viewDidLoad()
// Populate the rows array with Row objects
}
override func tableView(_ tableView: UITableView, didSelectRowAt indexPath: IndexPath) {
rows[indexPath.row].runTap()
}
override func tableView(_ tableView: UITableView, numberOfRowsInSection section: Int) -> Int {
return rows.count
}
} |
import React from 'react';
import styled from 'styled-components';
const HeaderStyle = styled.section`
position: relative;
// border: 2px solid beige;
background-image: linear-gradient(
180deg,
rgba(0, 0, 0, 0.3) 2.23%,
rgba(230, 57, 74, 0.5) 82.96%
),
url(${(prop) => prop.Image});
background-repeat: no-repeat;
background-position: center;
background-size: cover;
height: ${(prop) => prop.height};
max-width: 100vw;
@media (max-width: 360px) {
height: 400px;
}
`;
const Content = styled.div`
position: absolute;
bottom: 62px;
width: calc(40% - 10%);
left: 5%;
font-size: 20px;
h1 {
font-size: 22px !important;
text-transform: uppercase;
line-height: 29px;
letter-spacing: 1px;
}
@media (max-width: 360px) {
width: calc(50%);
}
`;
function Header({ Image, height, title, titleTop, titleBottom }) {
// const Content = styled.div`
// position: absolute;
// `;
return (
<>
<HeaderStyle Image={Image} height={height} className='header'>
<div className='container'>
<Content>
<div className='header-main'>
<div className='row'>
<div className='col-md-12'>
<div className='content'>
<h1>
{title ? title : null} {titleTop ? titleTop : null}
</h1>
</div>
{titleBottom ? <br /> : null}
{titleBottom ? titleBottom : null}
</div>
</div>
</div>
</Content>
</div>
</HeaderStyle>
</>
);
}
export default Header;
|
require 'spec_helper'
FIXTURES_PATH = File.dirname(__FILE__) + '/fixtures/'
describe LearnLinter do
it 'has a version number' do
expect(LearnLinter::VERSION).not_to be false
end
describe '#lint_directory' do
#.learn validations only
let(:valid_learn) {
{:dot_learn =>
{:present_dotlearn => true, :valid_yaml => true, :valid_whitespace => true, :attributes => true},
:license =>
{:present_license => false, :valid_license => false},
:readme =>
{:present_readme => false, :valid_readme => false},
:contributing =>
{:present_contributing => false, :valid_contributing => false}
}
}
let(:present_learn_invalid_yaml){
{:dot_learn =>
{:present_dotlearn=>true, :valid_yaml=>false, :valid_whitespace=>false, :attributes => false},
:license =>
{:present_license=>false, :valid_license=>false},
:readme =>
{:present_readme=>false, :valid_readme=>false},
:contributing =>
{:present_contributing => false, :valid_contributing => false}
}
}
let(:present_learn_valid_yaml_invalid_whitespace){
{:dot_learn =>
{:present_dotlearn=>true, :valid_yaml=>true, :valid_whitespace=>false, :attributes => true},
:license =>
{:present_license=>false, :valid_license=>false},
:readme =>
{:present_readme=>false, :valid_readme=>false},
:contributing =>
{:present_contributing => false, :valid_contributing => false}
}
}
let(:missing_learn) {
{:dot_learn =>
{:present_dotlearn=>false, :valid_yaml=>false, :valid_whitespace=>false, :attributes => false},
:license =>
{:present_license=>false, :valid_license=>false},
:readme =>
{:present_readme=>false, :valid_readme=>false},
:contributing =>
{:present_contributing => false, :valid_contributing => false}
}
}
# license validations only
let(:valid_license) {
{:dot_learn =>
{:present_dotlearn=>false, :valid_yaml=>false, :valid_whitespace=>false, :attributes => false},
:license =>
{:present_license=>true, :valid_license=>true},
:readme =>
{:present_readme=>false, :valid_readme=>false},
:contributing =>
{:present_contributing => false, :valid_contributing => false}
}
}
let(:invalid_license) {
{:dot_learn =>
{:present_dotlearn=>false, :valid_yaml=>false, :valid_whitespace=>false, :attributes => false},
:license =>
{:present_license=>true, :valid_license=>false},
:readme =>
{:present_readme=>false, :valid_readme=>false},
:contributing =>
{:present_contributing => false, :valid_contributing => false}
}
}
let(:missing_license) {
{:dot_learn =>
{:present_dotlearn=>false, :valid_yaml=>false, :valid_whitespace=>false, :attributes => false},
:license =>
{:present_license=>false, :valid_license=>false},
:readme =>
{:present_readme=>false, :valid_readme=>false},
:contributing =>
{:present_contributing => false, :valid_contributing => false}
}
}
# readme validations only
let(:missing_readme) {
{:dot_learn =>
{:present_dotlearn=>false, :valid_yaml=>false, :valid_whitespace=>false, :attributes => false},
:license =>
{:present_license=>false, :valid_license=>false},
:readme =>
{:present_readme=>false, :valid_readme=>false},
:contributing =>
{:present_contributing => false, :valid_contributing => false}
}
}
let(:present_and_valid_readme) {
{:dot_learn =>
{:present_dotlearn=>false, :valid_yaml=>false, :valid_whitespace=>false, :attributes => false},
:license =>
{:present_license=>false, :valid_license=>false},
:readme =>
{:present_readme=>true, :valid_readme=>true},
:contributing =>
{:present_contributing => false, :valid_contributing => false}
}
}
let(:invalid_readme) {
{:dot_learn =>
{:present_dotlearn=>false, :valid_yaml=>false, :valid_whitespace=>false, :attributes => false},
:license =>
{:present_license=>false, :valid_license=>false},
:readme =>
{:present_readme=>true, :valid_readme=>false},
:contributing =>
{:present_contributing => false, :valid_contributing => false}
}
}
# contributing validations only
let(:valid_contributing) {
{:dot_learn =>
{:present_dotlearn=>false, :valid_yaml=>false, :valid_whitespace=>false, :attributes => false},
:license =>
{:present_license=>false, :valid_license=>false},
:readme =>
{:present_readme=>false, :valid_readme=>false},
:contributing =>
{:present_contributing => true, :valid_contributing => true}
}
}
let(:invalid_contributing) {
{:dot_learn =>
{:present_dotlearn=>false, :valid_yaml=>false, :valid_whitespace=>false, :attributes => false},
:license =>
{:present_license=>false, :valid_license=>false},
:readme =>
{:present_readme=>false, :valid_readme=>false},
:contributing =>
{:present_contributing => true, :valid_contributing => false}
}
}
let(:missing_contributing) {
{:dot_learn =>
{:present_dotlearn=>false, :valid_yaml=>false, :valid_whitespace=>false, :attributes => false},
:license =>
{:present_license=>false, :valid_license=>false},
:readme =>
{:present_readme=>false, :valid_readme=>false},
:contributing =>
{:present_contributing => false, :valid_contributing => false}
}
}
context '.learn validations' do
it 'approves directory with valid .learn file' do
linter = LearnLinter.new(FIXTURES_PATH + 'valid_learn')
expect(linter.lint_directory).to eq(valid_learn)
end
it 'reports on invalid yaml in a .learn file' do
linter = LearnLinter.new(FIXTURES_PATH + 'present_learn_invalid_yaml')
expect(linter.lint_directory).to eq(present_learn_invalid_yaml)
end
it 'reports on invalid whitespace in a .learn file' do
linter = LearnLinter.new(FIXTURES_PATH + 'present_learn_valid_yaml_invalid_whitespace')
expect(linter.lint_directory).to eq(present_learn_valid_yaml_invalid_whitespace)
end
it 'reports on a missing .learn file' do
linter = LearnLinter.new(FIXTURES_PATH + 'missing_learn')
expect(linter.lint_directory).to eq(missing_learn)
end
end
context 'license validations' do
it 'approves a directory with a valid license' do
linter = LearnLinter.new(FIXTURES_PATH + 'valid_license')
expect(linter.lint_directory).to eq(valid_license)
end
it 'reports on an invalid license' do
linter = LearnLinter.new(FIXTURES_PATH + 'invalid_license')
expect(linter.lint_directory).to eq(invalid_license)
end
it 'reports on a missing license' do
linter = LearnLinter.new(FIXTURES_PATH + 'missing_license')
expect(linter.lint_directory).to eq(missing_license)
end
end
context 'readme validations' do
it 'reports on a missing README.md' do
linter = LearnLinter.new(FIXTURES_PATH + 'missing_readme')
expect(linter.lint_directory).to eq(missing_readme)
end
it 'approves a directory that contains a README.md' do
linter = LearnLinter.new(FIXTURES_PATH + 'present_and_valid_readme')
expect(linter.lint_directory).to eq(present_and_valid_readme)
end
it 'reports on a readme with invalid code snippets' do
linter = LearnLinter.new(FIXTURES_PATH + 'invalid_readme')
expect(linter.lint_directory).to eq(invalid_readme)
end
it 'does not error when code block has three backtics and no language designator' do
linter = LearnLinter.new(FIXTURES_PATH + 'valid_readme_with_three_backtics')
expect(linter.lint_directory).to eq(present_and_valid_readme)
end
it 'errors when in-line code is written with three backtics instead of one' do
linter = LearnLinter.new(FIXTURES_PATH + 'invalid_readme_with_three_inline_backtics')
expect(linter.lint_directory).to eq(invalid_readme)
end
it 'does not error when multiple in-line code snippets are written on a single line' do
linter = LearnLinter.new(FIXTURES_PATH + 'valid_readme_multiple_inline_code_snippets')
expect(linter.lint_directory).to eq(present_and_valid_readme)
end
it 'outputs message reporting on line numbers of all invalid code snippets' do
linter = LearnLinter.new(FIXTURES_PATH + 'invalid_code_snippets_testing_output')
expect(linter.lint_directory).to eq(invalid_readme)
expect{linter.lint_directory}.to output(/INVALID CODE SNIPPET - line 2: This is line two with an invalid code snippet ```too many backtics```/).to_stdout
expect{linter.lint_directory}.to output(/INVALID CODE SNIPPET - line 6: ```rubZ/).to_stdout
end
it 'errors when readme contains code block with three backtics, space, then language designator' do
linter = LearnLinter.new(FIXTURES_PATH + 'testing_linter')
expect(linter.lint_directory).to eq(invalid_readme)
end
end
context 'when quiet' do
it 'does not emit output when given an option of "quiet" ' do
linter = LearnLinter.new(FIXTURES_PATH + 'invalid_readme', "quiet")
expect {linter.lint_directory}.to_not output.to_stdout
end
end
context 'contributing validations' do
it 'approves a directory with a valid contributing file' do
linter = LearnLinter.new(FIXTURES_PATH + 'valid_contributing')
expect(linter.lint_directory).to eq(valid_contributing)
end
it 'reports on an invalid contribution file' do
linter = LearnLinter.new(FIXTURES_PATH + 'invalid_contributing')
expect(linter.lint_directory).to eq(invalid_contributing)
end
it 'reports on a missing contribution file' do
linter = LearnLinter.new(FIXTURES_PATH + 'missing_contributing')
expect(linter.lint_directory).to eq(missing_contributing)
end
end
end
end
|
# MIT License
# Copyright(c) 2020 Futurewei Cloud
#
# Permission is hereby granted,
# free of charge, to any person obtaining a copy of this software and associated documentation files(the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and / or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
curl -X POST -H "Content-Type: application/json" --data @json/cvpc.json http://localhost:9009/project/3dda2801-d675-4688-a63f-dcda8d327f50/vpcs
curl -X POST -H "Content-Type: application/json" --data @json/csn.json http://localhost:9009/project/3dda2801-d675-4688-a63f-dcda8d327f50/subnets
curl -X POST -H "Content-Type: application/json" --data @json/node.json http://localhost:9007/v4/nodes
curl -X POST -H "Content-Type: application/json" --data @json/sg.json http://localhost:9008/v4/3dda2801-d675-4688-a63f-dcda8d327f50/security-groups
curl -X POST -H "Content-Type: application/json" --data @json/cp1b.json http://localhost:9009/project/3dda2801-d675-4688-a63f-dcda8d327f50/ports
|
'use strict';
// Register `veiculoDelete` component, along with its associated controller and template
angular.
module('veiculoDelete', ['ngRoute','core.veiculo']).
component('veiculoDelete', {
templateUrl: 'veiculo-delete/veiculo-delete.template.html',
controller: ['$scope', '$routeParams', 'VeiculoService',
function VeiculoDeleteController($scope, $routeParams, VeiculoService) {
VeiculoService.getDetalheVeiculo($routeParams.veiculoId, function (value) {
$scope.veiculo = value[0];
});
$scope.confirm = function () {
VeiculoService.deleteVeiculo($routeParams.veiculoId, function (value) {
$scope.success = true;
});
}
}
]
});
|
#!/bin/sh
#
# Report the contents of the specified SQLite contact tracing client database
#
# Copyright Diomidis Spinellis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
cat <<\EOF | sqlite3 "${1:-/var/lib/epidose/client-database.db}"
SELECT 'Received (and retained) ephemeral id hashes';
SELECT '';
.mode column
.headers on
SELECT DATE(day, "unixepoch") AS Day, SUBSTR(HEX(ephid_hash), 1, 10) AS 'Ephid Hash',
ocount AS Count, srssi / ocount AS RSSI FROM DailyObservations;
.headers off
SELECT '';
SELECT '';
SELECT 'Stored (and retained) sent ephemeral ids';
SELECT '';
.headers on
SELECT DATETIME(epoch * 60 * 15, "unixepoch") AS Timestamp, epoch AS Epoch,
SUBSTR(HEX(seed), 1, 10) AS Seed, SUBSTR(HEX(ephid), 1, 10) AS Ephid
from EpochIds;
EOF
|
<filename>test/extra/Memory.java
package extra;
import java.util.Collection;
import java.util.Comparator;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.TreeSet;
public class Memory {
private static final int ITERATION_COUNT=1;
private static class Item {
private static int instanceCount=0;
private final int index;
private final int val;
public Item(int i) { val = i; index = instanceCount++; }
public int value() { return val; }
public int index() { return index; }
}
private static void traceFunc(String s) {
if (false) {
System.out.println(s);
}
}
private static void expect(boolean v) {
if (! v) throw new RuntimeException();
}
private static int runningSum(Item[] items) {
int sum=0;
for (Item item : items) {
sum += item.value();
}
return sum;
}
private static int runningSum(Collection<Item> items) {
int sum=0;
for (Item item : items) {
sum += item.value();
}
return sum;
}
private static final void testArray() {
traceFunc("testArray()");
Item[] items = new Item[1750];
for (int iter=0; iter < ITERATION_COUNT; iter++) {
for (int i=0; i < 1000; i++) {
items[i] = new Item(1);
}
for (int i=0; i < 500; i++) {
items[i+1000] = new Item(4);
}
for (int i=0; i < 250; i++) {
items[i+1500] = new Item(9);
}
expect(runningSum(items) == (1000*1 + 500*4 + 250*9));
Item[] zeroItems = new Item[300];
for (int i=0; i < 300; i++) {
zeroItems[i] = new Item(0);
}
System.arraycopy(zeroItems, 0, items, 900, zeroItems.length);
for (int i=0; i < 10000; i++) {
items[0] = new Item(1);
}
expect(runningSum(items) == (900*1 + 300*4 + 250*9));
for (int i=0; i < 300; i++) {
zeroItems[i] = new Item((i+900) < 1000 ? 1 : 4);
}
for (int i=0; i < 10000; i++) {
items[0] = new Item(1);
}
expect(runningSum(items) == (900*1 + 300*4 + 250*9));
System.arraycopy(zeroItems, 0, items, 900, zeroItems.length);
expect(runningSum(items) == (1000*1 + 500*4 + 250*9));
for (int i=0; i < 1750; i++) {
items[i] = null;
}
}
}
private static final void testHashMap() {
traceFunc("testHashMap()");
HashMap<Integer, Item> items = new HashMap<Integer, Item>();
for (int iter=0; iter < ITERATION_COUNT; iter++) {
for (int i=0; i < 1000; i++) {
items.put(i, new Item(1));
}
for (int i=0; i < 500; i++) {
items.put(i+1000, new Item(4));
}
for (int i=0; i < 250; i++) {
items.put(i+1500, new Item(9));
}
expect(runningSum(items.values()) == (1000*1 + 500*4 + 250*9));
for (int i = 900; i < 1200; i++) {
items.remove(i);
}
expect(runningSum(items.values()) == (900*1 + 300*4 + 250*9));
for (int i = 900; i < 1200; i++) {
items.put(i, new Item(i < 1000 ? 1 : 4));
}
expect(runningSum(items.values()) == (1000*1 + 500*4 + 250*9));
items.clear();
}
}
private static final void testLinkedList() {
traceFunc("testLinkedList()");
LinkedList<Item> items = new LinkedList<Item>();
for (int iter=0; iter < ITERATION_COUNT; iter++) {
for (int i=0; i < 1000; i++) {
items.add(new Item(1));
}
for (int i=0; i < 500; i++) {
items.add(new Item(4));
}
for (int i=0; i < 250; i++) {
items.add(new Item(9));
}
expect(runningSum(items) == (1000*1 + 500*4 + 250*9));
for (int i = 1199; i >= 900; i--) {
items.remove(i);
}
expect(runningSum(items) == (900*1 + 300*4 + 250*9));
for (int i = 900; i < 1200; i++) {
items.add(new Item(i < 1000 ? 1 : 4));
}
expect(runningSum(items) == (1000*1 + 500*4 + 250*9));
items.clear();
}
}
private static final void testTreeSet() {
traceFunc("testTreeSet()");
TreeSet<Item> items = new TreeSet<Item>(new Comparator<Item>() {
public int compare(Item i1, Item i2) {
int r = i1.value() - i2.value();
if (r == 0) {
return i1.index() - i2.index();
}
return r;
}
});
for (int iter=0; iter < ITERATION_COUNT; iter++) {
for (int i=0; i < 1000; i++) {
items.add(new Item(1));
}
for (int i=0; i < 500; i++) {
items.add(new Item(4));
}
for (int i=0; i < 250; i++) {
items.add(new Item(9));
}
expect(runningSum(items) == (1000*1 + 500*4 + 250*9));
items.clear();
}
}
public static void main(String args[]) {
for (int i=0; i < 10; i++) {
testArray();
testHashMap();
testLinkedList();
testTreeSet();
}
}
}
|
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
BINS=(
cmd/gendocs
cmd/genkubedocs
cmd/genman
cmd/genyaml
)
make -C "${KUBE_ROOT}" WHAT="${BINS[*]}"
kube::util::ensure-temp-dir
kube::util::gen-docs "${KUBE_TEMP}"
# Verify the list matches the expected list (diff should be empty)
if [[ "$(diff ${KUBE_ROOT}/docs/.generated_docs ${KUBE_TEMP}/docs/.generated_docs)" != "" ]]; then
echo "List of generated docs doesn't match a freshly built list. Please run hack/update-generated-docs.sh"
exit 1
fi
# Verify the files in the repo all contain the boilerplate instead of the actual
# content.
while read file; do
# Ignore docs/.generated_docs-- it should not have the boilerplate!
[[ "${file}" == "docs/.generated_docs" ]] && continue
# Search for "hack/generate-docs.sh" as a proxy for the boilerplate content,
# since the munger adds a bunch of other stuff.
if [[ "$(grep "hack/generate-docs.sh" "${KUBE_ROOT}/${file}")" == "" ]]; then
echo "${file} doesn't seem to have the correct boilerplate content for an autogenerated file."
echo "Please run hack/update-generated-docs.sh"
exit 1
fi
done <"${KUBE_ROOT}/docs/.generated_docs"
|
package cyclops.async.reactive.futurestream.react.lazy.sequence;
import static java.util.Arrays.asList;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.hasItems;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import com.oath.cyclops.ReactiveConvertableSequence;
import cyclops.container.Vector;
import cyclops.container.immutable.tuple.Tuple2;
import cyclops.container.immutable.tuple.Tuple3;
import cyclops.container.immutable.tuple.Tuple4;
import cyclops.function.Monoid;
import cyclops.async.reactive.futurestream.FutureStream;
import cyclops.async.reactive.futurestream.LazyReact;
import cyclops.pure.reactive.ReactiveSeq;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import org.junit.Before;
import org.junit.Test;
//see BaseSequentialSeqTest for in order tests
public class LFSNoOrderTest {
public static final LazyReact r = new LazyReact(10,
10);
public static Executor ex = Executors.newFixedThreadPool(10);
FutureStream<Integer> empty;
FutureStream<Integer> nonEmpty;
public static <T> FutureStream<T> of(T... array) {
return LazyReact.sequentialBuilder()
.of(array);
}
@Before
public void setup() {
empty = of();
nonEmpty = of(1);
}
@Test
public void stream() {
assertThat(of(1,
2,
3).stream()
.to(ReactiveConvertableSequence::converter)
.listX(),
hasItems(1,
2,
3));
}
protected Object value() {
return "jello";
}
private int value2() {
return 200;
}
@Test
public void toStream() {
List<Integer> list = of(1,
2,
3).<Integer>stream().collect(Collectors.toList());
assertThat(list,
equalTo(Arrays.asList(1,
2,
3)));
}
@Test
public void batchBySize() {
System.out.println(of(1,
2,
3,
4,
5,
6).grouped(3)
.collect(Collectors.toList()));
assertThat(of(1,
2,
3,
4,
5,
6).grouped(3)
.collect(Collectors.toList())
.size(),
is(2));
}
@Test
public void limitWhileTest() {
List<Integer> list = new ArrayList<>();
while (list.size() == 0) {
list = of(1,
2,
3,
4,
5,
6).takeWhile(it -> it < 4)
.peek(it -> System.out.println(it))
.collect(Collectors.toList());
}
assertThat(Arrays.asList(1,
2,
3,
4,
5,
6),
hasItem(list.get(0)));
}
@Test
public void testScanLeftStringConcat() {
assertThat(of("a",
"b",
"c").scanLeft("",
String::concat)
.toList()
.size(),
is(4));
}
@Test
public void testScanLeftSum() {
assertThat(of("a",
"ab",
"abc").map(str -> str.length())
.scanLeft(0,
(u, t) -> u + t)
.toList()
.size(),
is(asList(0,
1,
3,
6).size()));
}
@Test
public void testScanRightStringConcatMonoid() {
assertThat(of("a",
"b",
"c").scanRight(Monoid.of("",
String::concat))
.toList()
.size(),
is(asList("",
"c",
"bc",
"abc").size()));
}
@Test
public void testScanRightStringConcat() {
assertThat(of("a",
"b",
"c").scanRight("",
String::concat)
.toList()
.size(),
is(asList("",
"c",
"bc",
"abc").size()));
}
@Test
public void testScanRightSum() {
assertThat(of("a",
"ab",
"abc").map(str -> str.length())
.scanRight(0,
(t, u) -> u + t)
.toList()
.size(),
is(asList(0,
3,
5,
6).size()));
}
@Test
public void testReverse() {
assertThat(of(1,
2,
3).reverse()
.toList(),
equalTo(asList(3,
2,
1)));
}
@Test
public void testReverseList() {
assertThat(LazyReact.sequentialBuilder()
.fromIterable(Arrays.asList(10,
400,
2,
-1))
.reverse()
.toList(),
equalTo(asList(-1,
2,
400,
10)));
}
@Test
public void testReverseListLimit() {
assertThat(LazyReact.sequentialBuilder()
.fromIterable(Arrays.asList(10,
400,
2,
-1))
.reverse()
.limit(2)
.toList(),
equalTo(asList(-1,
2)));
}
@Test
public void testReverseRange() {
assertThat(LazyReact.sequentialBuilder()
.fromStream(IntStream.range(0,
10)
.boxed())
.reverse()
.toList(),
equalTo(asList(9,
8,
7,
6,
5,
4,
3,
2,
1,
0)));
}
@Test
public void testCycle() {
assertEquals(asList(1,
1,
1,
1,
1,
1),
of(1).cycle()
.limit(6)
.toList());
}
@Test
public void testIterable() {
List<Integer> list = of(1,
2,
3).to()
.collection(LinkedList::new);
for (Integer i : of(1,
2,
3)) {
assertThat(list,
hasItem(i));
}
}
@Test
public void testDuplicate() {
Tuple2<ReactiveSeq<Integer>, ReactiveSeq<Integer>> copies = of(1,
2,
3,
4,
5,
6).duplicate();
assertTrue(copies._1()
.anyMatch(i -> i == 2));
assertTrue(copies._2()
.anyMatch(i -> i == 2));
}
@Test
public void testTriplicate() {
Tuple3<ReactiveSeq<Integer>, ReactiveSeq<Integer>, ReactiveSeq<Integer>> copies = of(1,
2,
3,
4,
5,
6).triplicate();
assertTrue(copies._1()
.anyMatch(i -> i == 2));
assertTrue(copies._2()
.anyMatch(i -> i == 2));
assertTrue(copies._3()
.anyMatch(i -> i == 2));
}
@Test
public void testQuadriplicate() {
Tuple4<ReactiveSeq<Integer>, ReactiveSeq<Integer>, ReactiveSeq<Integer>, ReactiveSeq<Integer>> copies = of(1,
2,
3,
4,
5,
6).quadruplicate();
assertTrue(copies._1()
.anyMatch(i -> i == 2));
assertTrue(copies._2()
.anyMatch(i -> i == 2));
assertTrue(copies._3()
.anyMatch(i -> i == 2));
assertTrue(copies._4()
.anyMatch(i -> i == 2));
}
@Test
public void testDuplicateFilter() {
Tuple2<ReactiveSeq<Integer>, ReactiveSeq<Integer>> copies = of(1,
2,
3,
4,
5,
6).duplicate();
assertTrue(copies._1()
.filter(i -> i % 2 == 0)
.toList()
.size() == 3);
assertTrue(copies._2()
.filter(i -> i % 2 == 0)
.toList()
.size() == 3);
}
@Test
public void testTriplicateFilter() {
Tuple3<ReactiveSeq<Integer>, ReactiveSeq<Integer>, ReactiveSeq<Integer>> copies = of(1,
2,
3,
4,
5,
6).triplicate();
assertTrue(copies._1()
.filter(i -> i % 2 == 0)
.toList()
.size() == 3);
assertTrue(copies._2()
.filter(i -> i % 2 == 0)
.toList()
.size() == 3);
assertTrue(copies._3()
.filter(i -> i % 2 == 0)
.toList()
.size() == 3);
}
@Test
public void testQuadriplicateFilter() {
Tuple4<ReactiveSeq<Integer>, ReactiveSeq<Integer>, ReactiveSeq<Integer>, ReactiveSeq<Integer>> copies = of(1,
2,
3,
4,
5,
6).quadruplicate();
assertTrue(copies._1()
.filter(i -> i % 2 == 0)
.toList()
.size() == 3);
assertTrue(copies._2()
.filter(i -> i % 2 == 0)
.toList()
.size() == 3);
assertTrue(copies._3()
.filter(i -> i % 2 == 0)
.toList()
.size() == 3);
assertTrue(copies._4()
.filter(i -> i % 2 == 0)
.toList()
.size() == 3);
}
@Test
public void testDuplicateLimit() {
Tuple2<ReactiveSeq<Integer>, ReactiveSeq<Integer>> copies = of(1,
2,
3,
4,
5,
6).duplicate();
assertTrue(copies._1()
.limit(3)
.toList()
.size() == 3);
assertTrue(copies._2()
.limit(3)
.toList()
.size() == 3);
}
@Test
public void testTriplicateLimit() {
Tuple3<ReactiveSeq<Integer>, ReactiveSeq<Integer>, ReactiveSeq<Integer>> copies = of(1,
2,
3,
4,
5,
6).triplicate();
assertTrue(copies._1()
.limit(3)
.toList()
.size() == 3);
assertTrue(copies._2()
.limit(3)
.toList()
.size() == 3);
assertTrue(copies._3()
.limit(3)
.toList()
.size() == 3);
}
@Test
public void testQuadriplicateLimit() {
Tuple4<ReactiveSeq<Integer>, ReactiveSeq<Integer>, ReactiveSeq<Integer>, ReactiveSeq<Integer>> copies = of(1,
2,
3,
4,
5,
6).quadruplicate();
assertTrue(copies._1()
.limit(3)
.toList()
.size() == 3);
assertTrue(copies._2()
.limit(3)
.toList()
.size() == 3);
assertTrue(copies._3()
.limit(3)
.toList()
.size() == 3);
assertTrue(copies._4()
.limit(3)
.toList()
.size() == 3);
}
@Test
public void testCastException() {
of(1,
"a",
2,
"b",
3).peek(it -> System.out.println(" it is " + it))
.cast(Integer.class)
.recover(t -> {
return -10;
})
.peek(i -> {
System.out.println(i.getClass());
})
.peek(it -> System.out.println(it))
.toList()
.stream()
.map(i -> i.getClass())
.allMatch(c -> Integer.class.equals(c));
}
@Test
public void testGroupByEager() {
cyclops.container.HashMap<Integer, Vector<Integer>> map1 = of(1,
2,
3,
4).groupBy(i -> i % 2);
assertThat(map1.getOrElse(0,
Vector.empty())
.listView(),
hasItem(2));
assertThat(map1.getOrElse(0,
Vector.empty())
.listView(),
hasItem(4));
assertThat(map1.getOrElse(1,
Vector.empty())
.listView(),
hasItem(1));
assertThat(map1.getOrElse(1,
Vector.empty())
.listView(),
hasItem(3));
assertEquals(2,
map1.size());
}
@Test
public void testJoin() {
assertEquals("123".length(),
of(1,
2,
3).join()
.length());
assertEquals("1, 2, 3".length(),
of(1,
2,
3).join(", ")
.length());
assertEquals("^1|2|3$".length(),
of(1,
2,
3).join("|",
"^",
"$")
.length());
}
@Test
public void testSkipWhile() {
Supplier<FutureStream<Integer>> s = () -> of(1,
2,
3,
4,
5);
assertTrue(s.get()
.dropWhile(i -> false)
.toList()
.containsAll(asList(1,
2,
3,
4,
5)));
assertEquals(asList(),
s.get()
.dropWhile(i -> true)
.toList());
}
@Test
public void testSkipUntil() {
Supplier<FutureStream<Integer>> s = () -> of(1,
2,
3,
4,
5);
assertEquals(asList(),
s.get()
.dropUntil(i -> false)
.toList());
assertTrue(s.get()
.dropUntil(i -> true)
.toList()
.containsAll(asList(1,
2,
3,
4,
5)));
}
@Test
public void testSkipUntilWithNulls() {
Supplier<FutureStream<Integer>> s = () -> of(1,
2,
null,
3,
4,
5);
assertTrue(s.get()
.dropUntil(i -> true)
.toList()
.containsAll(asList(1,
2,
null,
3,
4,
5)));
}
@Test
public void testSkipUntilInclusive() {
Supplier<FutureStream<Integer>> s = () -> of(1,
2,
3,
4,
5);
assertEquals(asList(),
s.get()
.dropUntilInclusive(i -> false)
.toList());
assertTrue(s.get()
.dropUntil(i -> true)
.toList()
.containsAll(asList(2,
3,
4,
5)));
}
@Test
public void testSkipUntilWithNullsInclusive() {
Supplier<FutureStream<Integer>> s = () -> of(1,
2,
null,
3,
4,
5);
assertTrue(s.get()
.dropUntilInclusive(i -> true)
.toList()
.containsAll(asList(2,
null,
3,
4,
5)));
}
@Test
public void testLimitWhile() {
Supplier<FutureStream<Integer>> s = () -> of(1,
2,
3,
4,
5);
assertEquals(asList(),
s.get()
.takeWhile(i -> false)
.toList());
assertTrue(s.get()
.takeWhile(i -> i < 3)
.toList()
.size() != 5);
assertTrue(s.get()
.takeWhile(i -> true)
.toList()
.containsAll(asList(1,
2,
3,
4,
5)));
}
@Test
public void testLimitUntil() {
assertTrue(of(1,
2,
3,
4,
5).takeUntil(i -> false)
.toList()
.containsAll(asList(1,
2,
3,
4,
5)));
assertFalse(of(1,
2,
3,
4,
5).takeUntil(i -> i % 3 == 0)
.toList()
.size() == 5);
assertEquals(asList(),
of(1,
2,
3,
4,
5).takeUntil(i -> true)
.toList());
}
@Test
public void testLimitUntilWithNulls() {
System.out.println(of(1,
2,
null,
3,
4,
5).takeUntil(i -> false)
.toList());
assertTrue(of(1,
2,
null,
3,
4,
5).takeUntil(i -> false)
.toList()
.containsAll(asList(1,
2,
null,
3,
4,
5)));
}
@Test
public void testMinByMaxBy() {
Supplier<FutureStream<Integer>> s = () -> of(1,
2,
3,
4,
5,
6);
assertEquals(1,
(int) s.get()
.maxBy(t -> Math.abs(t - 5))
.orElse(-1));
assertEquals(5,
(int) s.get()
.minBy(t -> Math.abs(t - 5))
.orElse(-1));
assertEquals(6,
(int) s.get()
.maxBy(t -> "" + t)
.orElse(-1));
assertEquals(1,
(int) s.get()
.minBy(t -> "" + t)
.orElse(-1));
}
@Test
public void testFoldLeft() {
for (int i = 0; i < 100; i++) {
Supplier<FutureStream<String>> s = () -> of("a",
"b",
"c");
assertTrue(s.get()
.reduce("",
String::concat)
.contains("a"));
assertTrue(s.get()
.reduce("",
String::concat)
.contains("b"));
assertTrue(s.get()
.reduce("",
String::concat)
.contains("c"));
assertEquals(3,
(int) s.get()
.map(str -> str.length())
.foldLeft(0,
(u, t) -> u + t));
assertEquals(3,
(int) s.get()
.map(str -> str.length())
.foldRight(0,
(t, u) -> u + t));
}
}
@Test
public void testFoldRight() {
Supplier<FutureStream<String>> s = () -> of("a",
"b",
"c");
assertTrue(s.get()
.foldRight("",
String::concat)
.contains("a"));
assertTrue(s.get()
.foldRight("",
String::concat)
.contains("b"));
assertTrue(s.get()
.foldRight("",
String::concat)
.contains("c"));
assertEquals(3,
(int) s.get()
.map(str -> str.length())
.foldRight(0,
(t, u) -> u + t));
}
//tests converted from lazy-seq suite
@Test
public void flattenEmpty() throws Exception {
assertTrue(this.<Stream<Integer>>of().to(FutureStream::flatten)
.toList()
.isEmpty());
}
@Test
public void flatten() throws Exception {
assertThat(of(Stream.of(1,
2)).to(FutureStream::flatten)
.toList()
.size(),
equalTo(asList(1,
2).size()));
}
}
|
import {
initializeApp,
auth,
firestore,
} from 'firebase/app';
import 'firebase/auth';
import 'firebase/performance';
import 'firebase/analytics';
import 'firebase/firestore';
const firebaseConfig = {
apiKey: '<KEY>',
authDomain: 'investment-portfolio-manager.firebaseapp.com',
databaseURL: 'https://investment-portfolio-manager.firebaseio.com',
projectId: 'investment-portfolio-manager',
storageBucket: 'investment-portfolio-manager.appspot.com',
messagingSenderId: '398632099201',
appId: '1:398632099201:web:6a472a4e5ff06297029c80',
measurementId: 'G-5YC1Q0ZE9F',
};
initializeApp(firebaseConfig);
const googleAuthProvider = new auth.GoogleAuthProvider();
const db = firestore();
const signInWithGoogle = () => auth().signInWithPopup(googleAuthProvider);
const onAuthStateChanged = (cb) => auth().onAuthStateChanged(cb);
const signOut = () => auth().signOut();
export {
onAuthStateChanged,
signInWithGoogle,
signOut,
db,
};
|
<filename>app/src/main/java/com/hapramp/ui/activity/Splash.java
package com.hapramp.ui.activity;
import android.content.Intent;
import android.os.Bundle;
import android.support.v7.app.AppCompatActivity;
import com.hapramp.notification.NotificationHandler;
import com.hapramp.preferences.DataStoreCachePreference;
import com.hapramp.preferences.HaprampPreferenceManager;
public class Splash extends AppCompatActivity {
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
DataStoreCachePreference.getInstance(this);
NotificationHandler.createNotificationChannel(this);
if (HaprampPreferenceManager.getInstance().isLoggedIn()) {
navigateToHomePage();
return;
}
if (HaprampPreferenceManager.getInstance().isOnBoardingDone()) {
navigateToLogin();
} else {
navigateToOnBoarding();
}
}
private void navigateToHomePage() {
Intent i = new Intent(this, CommunitySelectionActivity.class);
startActivity(i);
finish();
}
private void navigateToLogin() {
Intent intent = new Intent(this, LoginActivity.class);
startActivity(intent);
finish();
}
private void navigateToOnBoarding() {
Intent intent = new Intent(this, OnBoardingActivity.class);
startActivity(intent);
finish();
}
}
|
#!/usr/bin/env bash
# Check gofmt
echo "==> Checking that code complies with gofmt requirements..."
gofmt_files=$(gofmt -l `find . -name '*.go' | grep -v vendor`)
if [[ -n ${gofmt_files} ]]; then
echo 'gofmt needs running on the following files:'
echo "${gofmt_files}"
echo "You can use the command: \`make fmt\` to reformat code."
exit 1
fi
exit 0
|
#!/usr/bin/env bash
##!/bin/bash
# todo check if root
# todo
#set -eou pipefail
# check docker
docker -v | grep -q version || {
printf "Docker does not appear to run, exiting.\n"
exit 1
}
# md5 command
md5-sum() {
if command -v md5sum >/dev/null 2>&1; then
md5sum "$@"
elif command -v md5 >/dev/null 2>&1; then
md5 "$@"
else
printf "Error: no md5 command found\n"
exit 1
fi
}
# go to working dir
pwd="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" || exit 1
cd "$pwd" || exit 1
parent_dir=$(dirname "${pwd}")
inside_base_path="/opt/FileMaker/FileMaker Server/"
# parse config
source "$pwd"/../common/settings.sh
# find certificates
# todo: shorten
c_bundle=$(find . -name "*.ca-bundle")
if [[ -z $c_bundle ]]; then
c_bundle=$(get_setting "ca-bundle" ./config.txt)
check_many "$c_bundle"
if [[ -n $c_bundle ]]; then
cp -v "$c_bundle" . || exit 1
c_bundle=${c_bundle##*/}
fi
fi
c_cert=$(find . -name "*.crt")
if [[ -z $c_cert ]]; then
c_cert=$(get_setting "certificate" ./config.txt)
check_many "$c_cert"
if [[ -n $c_cert ]]; then
cp -v "$c_cert" . || exit 1
c_cert=${c_cert##*/}
fi
fi
c_key=$(find . -name "*.pem")
if [[ -z $c_key ]]; then
c_key=$(get_setting "key-file" ./config.txt)
check_many "$c_key"
if [[ -n $c_key ]]; then
cp -v "$c_key" . || exit 1
c_key=${c_key##*/}
fi
fi
# get settings from config
assisted_install=$(get_setting "assisted_install" ./config.txt)
check_setting "$assisted_install"
# todo not found
start_server=$(get_setting "start_server" ./config.txt)
remove_build_dir=$(get_setting "remove_build_dir" ./config.txt)
admin_user=$(get_setting "Admin Console User" ./"$assisted_install")
admin_pass=$(get_setting "Admin Console Password" ./"$assisted_install")
# set instance id
#while [ $is_valid -eq 0 ] && [ $old_container -eq 1 ]; do
printf "Please enter a name or leave empty for an automatic ID to be assigned to this instance: "
read -r user_input
case $user_input in
"")
# todo while valid (check if exists)
instance_id=$(uuidgen | md5-sum "$@" | cut -c-12) || {
printf "error while generating instance id\n"
exit 1
}
echo "id: " "$instance_id"
;;
*[!abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890_.-]*)
echo >&2 "That ID is not allowed. Please use only characters [a-zA-Z0-9_.-]"
exit 1
;;
*)
# todo while valid (check if exists)
instance_id=$user_input
;;
esac
#done
# write to .env
echo "ID=${instance_id}" >../.env
# Load paths
source "$pwd"/../common/paths.sh
# download filemaker_server package
package_remove=0
# look for installer locally
package=$(find . -name "*.deb" -o -name "*.rpm")
# download from URL if not found locally
if [[ ! $package ]]; then
printf "\ndownloading fms package ...\n"
url=$(get_setting "url" ./config.txt)
STATUS=$(curl -s --head --output /dev/null -w '%{http_code}' "$url")
if [ ! "$STATUS" -eq 200 ]; then
echo "Error while downloading fms package: Got a $STATUS from URL: $url ..."
exit 1
fi
curl "${url}" -O || exit
package_remove=1
fi
# find deb or rpm, set image_name according to installer package
package=$(find . -name "*.deb")
image_name=ubuntu-fms-19_3
# todo pin version tag / digest
base_image=jrei/systemd-ubuntu:18.04
helper_script="helper_ubuntu.sh"
if [[ ! $package ]]; then
package=$(find . -name "*.rpm")
image_name=centos-fms-19_2
# todo pin version tag / digest
base_image=jrei/systemd-centos:7
helper_script="helper_centos.sh"
fi
plines=$(wc -l <<<"$package")
if [[ $plines -gt 1 ]]; then
printf "%s fmserver packages found, 1 expected\n" "$plines"
exit 1
fi
# write to .env
echo "IMAGE=${image_name}" >>../.env
service_name=fms
container_name=fms-${instance_id}
build_image_name=fmsinstall
date=$(date +%Y-%m-%d)
source ../common/get_tz.sh
if ! timezone=$(get_tz); then
>&2 echo "error getting timezone\n"
exit 1
fi
# check if container names are in use
old_container=0
rm_service=0
docker ps -aq --filter "name=${container_name}" | grep -q . && old_container=1
is_valid=0
while [ $is_valid -eq 0 ] && [ $old_container -eq 1 ]; do
# todo reuse service
echo Another "${container_name}" container already exists, remove and build a new image? [y/n]
read remove_service
case $remove_service in
Y | y)
is_valid=1
rm_service=1
;;
N | n)
is_valid=1
rm_service=0
;;
*)
echo Please enter [y]es or [n]o
;;
esac
done
if [ $old_container -eq 1 ] && [ $rm_service -eq 1 ]; then
printf "\nstopping...\n"
docker stop "${container_name}"
printf "\nremoving...\n"
docker rm "${container_name}"
elif [ $old_container -eq 1 ] && [ $rm_service -eq 0 ]; then
printf "\n Exiting.\n"
exit 0
fi
if docker ps -aq --filter "name=${build_image_name}" | grep -q .; then
echo another build container already exists, removing...
docker stop $build_image_name
docker rm $build_image_name
fi
# create bind volumes
printf "\n\e[36mCreating directories on host...\e[39m\n"
for ((i = 1; i < "${#paths[@]}"; i += 2)); do
if [[ ! -d "$parent_dir/fms-data${paths[$i]}" ]]; then
mkdir -p -- "$parent_dir/fms-data${paths[$i]}"
fi
done
printf "\n\e[36mcreating docker volumes...\e[39m\n"
for ((i = 0; i < "${#paths[@]}"; i += 2)); do
docker volume create --driver local -o o=bind -o type=none -o device="$parent_dir/fms-data${paths[$i + 1]}" "${paths[$i]}" || {
printf "error while creating docker volumes\n"
exit 1
}
done
printf "\n"
# run build container
docker run -d \
--name $build_image_name \
--cap-add=SYS_ADMIN \
-e CERT_CERT="$c_cert" \
-e CERT_BUNDLE="$c_bundle" \
-e CERT_KEY="$c_key" \
-e PACKAGE_REMOVE="$package_remove" \
-e ASSISTED_INSTALL="$assisted_install" \
-e FMS_ADMIN_USER="$admin_user" \
-e FMS_ADMIN_PASS="$admin_pass" \
-e TIMEZONE="$timezone" \
--tmpfs /tmp \
--tmpfs /run \
--tmpfs /run/lock \
-v /sys/fs/cgroup:/sys/fs/cgroup:ro \
-v "${pwd}":/root/build/ \
-v fms-admin-conf-"${instance_id}":"/opt/FileMaker/FileMaker Server/Admin/conf":delegated \
-v fms-conf-"${instance_id}":"/opt/FileMaker/FileMaker Server/conf":delegated \
-v fms-data-backups-"${instance_id}":"/opt/FileMaker/FileMaker Server/Data/Backups":delegated \
-v fms-data-databases-"${instance_id}":"/opt/FileMaker/FileMaker Server/Data/Databases":delegated \
-v fms-data-preferences-"${instance_id}":"/opt/FileMaker/FileMaker Server/Data/Preferences":delegated \
-v fms-data-scripts-"${instance_id}":"/opt/FileMaker/FileMaker Server/Data/Scripts":delegated \
-v fms-dbserver-extensions-"${instance_id}":"/opt/FileMaker/FileMaker Server/Database Server/Extensions/":delegated \
-v fms-http-dotconf-"${instance_id}":"/opt/FileMaker/FileMaker Server/HTTPServer/.conf":delegated \
-v fms-http-conf-"${instance_id}":"/opt/FileMaker/FileMaker Server/HTTPServer/conf":delegated \
-v fms-http-htdocs-"${instance_id}":"/opt/FileMaker/FileMaker Server/HTTPServer/htdocs":delegated \
-v fms-http-logs-"${instance_id}":"/opt/FileMaker/FileMaker Server/HTTPServer/logs":delegated \
-v fms-logs-"${instance_id}":"/opt/FileMaker/FileMaker Server/Logs":delegated \
-v fms-webpub-conf-"${instance_id}":"/opt/FileMaker/FileMaker Server/Web Publishing/conf":delegated \
"$base_image" || {
printf "error while running build container\n"
exit 1
}
# run install script inside build container
docker exec $build_image_name /root/build/$helper_script
if [ ! $? ]; then
printf "error while installing!\n"
docker stop $build_image_name
docker rm $build_image_name
exit 1
fi
# check for flag file
build_success=$(find . -name build_success)
if [[ ! $build_success ]]; then
printf "build not successful\n"
printf "stopping & removing build container ...\n"
docker stop $build_image_name
docker rm $build_image_name
exit 1
fi
# remove flag file
rm "$build_success" || exit 1
# docker commit
printf "\ncommit build container to new image ...\n"
docker commit -c "EXPOSE 80" -c "EXPOSE 443" -c "EXPOSE 2399" -c "EXPOSE 5003" -c "EXPOSE 16000-16002" \
--change "ENV CERT_CERT=''" \
--change "ENV CERT_BUNDLE=''" \
--change "ENV CERT_KEY=''" \
--change "ENV PACKAGE_REMOVE=''" \
--change "ENV ASSISTED_INSTALL=''" \
--change "ENV FMS_ADMIN_USER=''" \
--change "ENV FMS_ADMIN_PASS=''" \
"${build_image_name}" "${image_name}":"${date}"
docker tag $image_name:"${date}" "${image_name}":latest
# remove $build...
printf "\nremoving build container ...\n"
docker stop $build_image_name && docker rm $build_image_name
# todo
# backup/zip fms-data directory
# check if fms network exists
network=0
docker network ls -q --filter "name=^fms-net$" | grep -q . && network=1
case $network in
0)
echo "Network fms-net not found, will be created\n"
compose_files="-f ../docker-compose.yml -f ../fms-network.yml"
;;
1)
compose_files="-f ../docker-compose.yml"
;;
*)
printf "error while looking for fms docker network: %s" "$(docker network ls -q --filter "name=fms-net")"
exit 1
;;
esac
if [[ $start_server -eq 1 ]]; then
printf "\nDone. Now starting your server ....\n"
docker-compose $compose_files up -d $service_name
else
compose_files=$(sed 's/..\///g' <<<"$compose_files")
printf "\nDone. You can now start your server with\e[36m ./tools/start_server\e[39m or \e[36mdocker-compose %s up [-d] %s\e[39m\n" "$compose_files" "$service_name"
fi
exit 0
|
# Create environment
# conda create -n algebra python=3.6
# Activate environment
source activate algebra # For some reason this doesn't work
# Install your package
ipip install -e .
# Install other packages
pip install numpy
pip install yapf
|
sentence = "Hello World. This is a sentence."
words = sentence.split()
word_count = len(words)
print(f"The sentence has {word_count} words.") |
package com.yingnuo.web.servlet.admin.handle;
import com.google.gson.Gson;
import com.yingnuo.service.AdminService;
import com.yingnuo.service.UserService;
import javax.security.auth.login.LoginException;
import javax.servlet.RequestDispatcher;
import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.HashMap;
import java.util.Map;
/**
* Created with IntelliJ IDEA.
* User: skyzc
* Date: 2019/12/5
* Time: 19:03
* To change this template use File | Settings | File Templates.
* Description:
*/
@WebServlet("/admin/delUserByUserId")
public class DelUserByUserIdServlet extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
// 判断是否有 session
if (req.getSession().getAttribute("admin") == null){
System.out.println("session 中没有 admin,没有管理员登陆!请登录...");
RequestDispatcher dispatcher = req.getRequestDispatcher("/WEB-INF/views/admin/admin_login.jsp");
dispatcher.forward(req,resp);
return;
}
String user_id = req.getParameter("user_id");
System.out.println(user_id);
UserService userService = new UserService();
try {
Boolean msg = userService.deleteUserByUserId(user_id);
// 利用 GSON 返回json对象
PrintWriter out = resp.getWriter();
Map<String,Boolean> map = new HashMap<String,Boolean>();
map.put("msg",msg);
Gson gson = new Gson();
String json = gson.toJson(map);
out.println(json);
out.flush();
out.close();
} catch (LoginException e) {
e.printStackTrace();
}
}
} |
#!/bin/bash
# Display swap usage for all procs
SUM=0
OVERALL=0
for DIR in `find /proc/ -maxdepth 1 -type d | egrep "^/proc/[0-9]"` ; do
PID=`echo $DIR | cut -d / -f 3`
PROGNAME=`ps -p $PID -o comm --no-headers`
for SWAP in `grep Swap $DIR/smaps 2>/dev/null| awk '{ print $2 }'`
do
let SUM=$SUM+$SWAP
done
echo "PID=$PID - Swap used: $SUM - ($PROGNAME )"
let OVERALL=$OVERALL+$SUM
SUM=0
done
echo "Overall swap used: $OVERALL"
|
#!/bin/bash
set -e
if mount | grep $PWD/build
then
echo "Staging area still mounted - please run 'make delete' manually."
exit 1
fi
if sudo losetup -a | grep rootfs
then
echo "Loopback device still mounted - please run 'make delete' manually."
exit 1
fi
|
<reponame>bizmaercq/eda-reporting<filename>raw/SMS BANKING/schema/table.ddl
CREATE TABLE "SMSUSR"."SMS_SERVICE"
( "ID" NUMBER(19,0) NOT NULL ENABLE,
"CREATED_ON" TIMESTAMP (6),
"MODIFIED_ON" TIMESTAMP (6),
"STATUS" VARCHAR2(255 CHAR),
"DESCRIPTION" VARCHAR2(255 CHAR),
"SERVICE_CODE" VARCHAR2(255 CHAR),
"SERVICE_NAME" VARCHAR2(255 CHAR),
"CREATED_BY" NUMBER(19,0),
"MODIFIED_BY" NUMBER(19,0),
PRIMARY KEY ("ID")
USING INDEX PCTFREE 10 INITRANS 2 MAXTRANS 255 COMPUTE STATISTICS
STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645
PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1 BUFFER_POOL DEFAULT FLASH_CACHE DEFAULT CELL_FLASH_CACHE DEFAULT)
TABLESPACE "USERS" ENABLE,
CONSTRAINT "FKBF18F1C80E5AF" FOREIGN KEY ("CREATED_BY")
REFERENCES "SMSUSR"."APPLICATION_USER" ("ID") ENABLE,
CONSTRAINT "FKBF18F6ED2CA2E" FOREIGN KEY ("MODIFIED_BY")
REFERENCES "SMSUSR"."APPLICATION_USER" ("ID") ENABLE
) SEGMENT CREATION IMMEDIATE
PCTFREE 10 PCTUSED 40 INITRANS 1 MAXTRANS 255 NOCOMPRESS LOGGING
STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645
PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1 BUFFER_POOL DEFAULT FLASH_CACHE DEFAULT CELL_FLASH_CACHE DEFAULT)
TABLESPACE "USERS" ;
CREATE UNIQUE INDEX "SMSUSR"."SYS_C00141094" ON "SMSUSR"."SMS_SERVICE" ("ID")
PCTFREE 10 INITRANS 2 MAXTRANS 255 COMPUTE STATISTICS
STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645
PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1 BUFFER_POOL DEFAULT FLASH_CACHE DEFAULT CELL_FLASH_CACHE DEFAULT)
TABLESPACE "USERS" ;
ALTER TABLE "SMSUSR"."SMS_SERVICE" ADD PRIMARY KEY ("ID")
USING INDEX PCTFREE 10 INITRANS 2 MAXTRANS 255 COMPUTE STATISTICS
STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645
PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1 BUFFER_POOL DEFAULT FLASH_CACHE DEFAULT CELL_FLASH_CACHE DEFAULT)
TABLESPACE "USERS" ENABLE;
ALTER TABLE "SMSUSR"."SMS_SERVICE" MODIFY ("ID" NOT NULL ENABLE); |
// Generated by the gRPC C++ plugin.
// If you make any local change, they will be lost.
// source: discovery.proto
#include "discovery.pb.h"
#include "discovery.grpc.pb.h"
#include <grpc++/impl/codegen/async_stream.h>
#include <grpc++/impl/codegen/async_unary_call.h>
#include <grpc++/impl/codegen/channel_interface.h>
#include <grpc++/impl/codegen/client_unary_call.h>
#include <grpc++/impl/codegen/method_handler_impl.h>
#include <grpc++/impl/codegen/rpc_service_method.h>
#include <grpc++/impl/codegen/service_type.h>
#include <grpc++/impl/codegen/sync_stream.h>
namespace discovery {
static const char* DiscoveryService_method_names[] = {
"/discovery.DiscoveryService/RegisterService",
"/discovery.DiscoveryService/Discover",
"/discovery.DiscoveryService/ListAllServices",
"/discovery.DiscoveryService/State",
};
std::unique_ptr< DiscoveryService::Stub> DiscoveryService::NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options) {
std::unique_ptr< DiscoveryService::Stub> stub(new DiscoveryService::Stub(channel));
return stub;
}
DiscoveryService::Stub::Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel)
: channel_(channel), rpcmethod_RegisterService_(DiscoveryService_method_names[0], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_Discover_(DiscoveryService_method_names[1], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_ListAllServices_(DiscoveryService_method_names[2], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_State_(DiscoveryService_method_names[3], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
{}
::grpc::Status DiscoveryService::Stub::RegisterService(::grpc::ClientContext* context, const ::discovery::RegistryEntry& request, ::discovery::RegistryEntry* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_RegisterService_, context, request, response);
}
::grpc::ClientAsyncResponseReader< ::discovery::RegistryEntry>* DiscoveryService::Stub::AsyncRegisterServiceRaw(::grpc::ClientContext* context, const ::discovery::RegistryEntry& request, ::grpc::CompletionQueue* cq) {
return ::grpc::internal::ClientAsyncResponseReaderFactory< ::discovery::RegistryEntry>::Create(channel_.get(), cq, rpcmethod_RegisterService_, context, request, true);
}
::grpc::ClientAsyncResponseReader< ::discovery::RegistryEntry>* DiscoveryService::Stub::PrepareAsyncRegisterServiceRaw(::grpc::ClientContext* context, const ::discovery::RegistryEntry& request, ::grpc::CompletionQueue* cq) {
return ::grpc::internal::ClientAsyncResponseReaderFactory< ::discovery::RegistryEntry>::Create(channel_.get(), cq, rpcmethod_RegisterService_, context, request, false);
}
::grpc::Status DiscoveryService::Stub::Discover(::grpc::ClientContext* context, const ::discovery::RegistryEntry& request, ::discovery::RegistryEntry* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Discover_, context, request, response);
}
::grpc::ClientAsyncResponseReader< ::discovery::RegistryEntry>* DiscoveryService::Stub::AsyncDiscoverRaw(::grpc::ClientContext* context, const ::discovery::RegistryEntry& request, ::grpc::CompletionQueue* cq) {
return ::grpc::internal::ClientAsyncResponseReaderFactory< ::discovery::RegistryEntry>::Create(channel_.get(), cq, rpcmethod_Discover_, context, request, true);
}
::grpc::ClientAsyncResponseReader< ::discovery::RegistryEntry>* DiscoveryService::Stub::PrepareAsyncDiscoverRaw(::grpc::ClientContext* context, const ::discovery::RegistryEntry& request, ::grpc::CompletionQueue* cq) {
return ::grpc::internal::ClientAsyncResponseReaderFactory< ::discovery::RegistryEntry>::Create(channel_.get(), cq, rpcmethod_Discover_, context, request, false);
}
::grpc::Status DiscoveryService::Stub::ListAllServices(::grpc::ClientContext* context, const ::discovery::Empty& request, ::discovery::ServiceList* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_ListAllServices_, context, request, response);
}
::grpc::ClientAsyncResponseReader< ::discovery::ServiceList>* DiscoveryService::Stub::AsyncListAllServicesRaw(::grpc::ClientContext* context, const ::discovery::Empty& request, ::grpc::CompletionQueue* cq) {
return ::grpc::internal::ClientAsyncResponseReaderFactory< ::discovery::ServiceList>::Create(channel_.get(), cq, rpcmethod_ListAllServices_, context, request, true);
}
::grpc::ClientAsyncResponseReader< ::discovery::ServiceList>* DiscoveryService::Stub::PrepareAsyncListAllServicesRaw(::grpc::ClientContext* context, const ::discovery::Empty& request, ::grpc::CompletionQueue* cq) {
return ::grpc::internal::ClientAsyncResponseReaderFactory< ::discovery::ServiceList>::Create(channel_.get(), cq, rpcmethod_ListAllServices_, context, request, false);
}
::grpc::Status DiscoveryService::Stub::State(::grpc::ClientContext* context, const ::discovery::StateRequest& request, ::discovery::StateResponse* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_State_, context, request, response);
}
::grpc::ClientAsyncResponseReader< ::discovery::StateResponse>* DiscoveryService::Stub::AsyncStateRaw(::grpc::ClientContext* context, const ::discovery::StateRequest& request, ::grpc::CompletionQueue* cq) {
return ::grpc::internal::ClientAsyncResponseReaderFactory< ::discovery::StateResponse>::Create(channel_.get(), cq, rpcmethod_State_, context, request, true);
}
::grpc::ClientAsyncResponseReader< ::discovery::StateResponse>* DiscoveryService::Stub::PrepareAsyncStateRaw(::grpc::ClientContext* context, const ::discovery::StateRequest& request, ::grpc::CompletionQueue* cq) {
return ::grpc::internal::ClientAsyncResponseReaderFactory< ::discovery::StateResponse>::Create(channel_.get(), cq, rpcmethod_State_, context, request, false);
}
DiscoveryService::Service::Service() {
AddMethod(new ::grpc::internal::RpcServiceMethod(
DiscoveryService_method_names[0],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< DiscoveryService::Service, ::discovery::RegistryEntry, ::discovery::RegistryEntry>(
std::mem_fn(&DiscoveryService::Service::RegisterService), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
DiscoveryService_method_names[1],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< DiscoveryService::Service, ::discovery::RegistryEntry, ::discovery::RegistryEntry>(
std::mem_fn(&DiscoveryService::Service::Discover), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
DiscoveryService_method_names[2],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< DiscoveryService::Service, ::discovery::Empty, ::discovery::ServiceList>(
std::mem_fn(&DiscoveryService::Service::ListAllServices), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
DiscoveryService_method_names[3],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< DiscoveryService::Service, ::discovery::StateRequest, ::discovery::StateResponse>(
std::mem_fn(&DiscoveryService::Service::State), this)));
}
DiscoveryService::Service::~Service() {
}
::grpc::Status DiscoveryService::Service::RegisterService(::grpc::ServerContext* context, const ::discovery::RegistryEntry* request, ::discovery::RegistryEntry* response) {
(void) context;
(void) request;
(void) response;
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
::grpc::Status DiscoveryService::Service::Discover(::grpc::ServerContext* context, const ::discovery::RegistryEntry* request, ::discovery::RegistryEntry* response) {
(void) context;
(void) request;
(void) response;
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
::grpc::Status DiscoveryService::Service::ListAllServices(::grpc::ServerContext* context, const ::discovery::Empty* request, ::discovery::ServiceList* response) {
(void) context;
(void) request;
(void) response;
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
::grpc::Status DiscoveryService::Service::State(::grpc::ServerContext* context, const ::discovery::StateRequest* request, ::discovery::StateResponse* response) {
(void) context;
(void) request;
(void) response;
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
} // namespace discovery
|
<reponame>MacCamintosh/Zelda30tribute
/**
* @fileoverview The main script for ace, which at one point stood for
* "adventure construction engine" and at this point just stands for ace.
*
* This file contains global constants and some global helper functions.
*
* @author <NAME> (<EMAIL>)
*/
/**
* Handy DOM access.
*/
var $ = $ || function(id) { return document.getElementById(id); }
/**
* A namespace to hang functions from.
* @namespace
*/
var ace = ace || {};
ace.UNDER_CONSTRUCTION_MESSAGE = ' OH NO! MY DEMO IS\n' +
'SHOWING. THIS DUNGEON\n' +
' IS NOT YET COMPLETE.';
ace.KEY_LEFT = 37;
ace.KEY_UP = 38;
ace.KEY_RIGHT = 39;
ace.KEY_DOWN = 40;
ace.KEY_SPACE = 32;
ace.KEY_SHIFT = 16;
ace.KEY_ENTER = 13;
ace.MIDDLE_MOUSE_BUTTON = 1;
ace.LEFT_MOUSE_BUTTON = 0;
// A handy multiplier, like 10 * facingX['left']
ace.xMultByFacing = {
left: -1,
right: 1,
up: 0,
down: 0,
none: 0
};
ace.yMultByFacing = {
left: 0,
right: 0,
up: 1,
down: -1
};
ace.clockwiseByFacing = {
left: 'up',
right: 'down',
up: 'right',
down: 'left'
};
ace.counterClockwiseByFacing = {
left: 'down',
right: 'up',
up: 'left',
down: 'right'
};
ace.getClockwiseFacing = function(facing) {
return ace.clockwiseByFacing[facing];
};
ace.HITPOINTS_DEAD = -9999;
ace.HITPOINTS_DYING = -8888;
ace.ZINDEX_ALWAYS_BEHIND = 1;
// Each underworld room has 4 potential "doors". These
// constants record which type it is.
ace.OPEN = 0;
ace.WALL = 1;
ace.BOMBABLE = 2;
ace.TILE_SIZE = 16;
ace.OVERWORLD_ROOM_TILE_WIDTH = 16;
ace.OVERWORLD_ROOM_TILE_HEIGHT = 11;
ace.OVERWORLD_ROOM_PIXEL_WIDTH = ace.TILE_SIZE * ace.OVERWORLD_ROOM_TILE_WIDTH;
ace.OVERWORLD_ROOM_PIXEL_HEIGHT = ace.TILE_SIZE * ace.OVERWORLD_ROOM_TILE_HEIGHT;
ace.UNDERWORLD_ROOM_PIXEL_WIDTH = 256;
ace.UNDERWORLD_ROOM_PIXEL_HEIGHT = 176;
var vector = [-2, -2, 4];
vec3.normalize(vector, vector);
ace.OVERWORLD_LIGHT_DIRECTION = vector;
var vector2 = [0, -2, 6];
vec3.normalize(vector2, vector2);
ace.UNDERWORLD_LIGHT_DIRECTION = vector2;
ace.LIGHT_MAP_HEIGHT = 512;
ace.LIGHT_MAP_WIDTH = 512;
ace.QUARTER_LIGHT_MAP_WIDTH = ace.LIGHT_MAP_WIDTH / 4;
ace.HALF_LIGHT_MAP_WIDTH = ace.LIGHT_MAP_WIDTH / 2;
ace.DUNGEON_CANVAS_SIZE = 2048;
/**
* Metadata about items that can be picked up.
*/
ace.selectableItemList = ['boomerang', 'bomb', 'bow', 'candle',
'whistle', 'meat', 'potion', 'wand'];
ace.itemInfoBySpriteName = {
'itemwoodensword': {isOneHanded:true},
'bow': {isOneHanded:true},
'boomerang': {isOneHanded:true},
'boomerang_blue': {isOneHanded:true},
'raft': {isOneHanded:true},
'whistle': {isOneHanded:true}
};
/**
* Returns the number of radians in a given number of degrees.
* @param {number} degrees The number of degrees.
*/
ace.radians = function(degrees) {
return degrees * Math.PI / 180;
};
/**
* Handy bind function.
*/
ace.bind = function(fn, selfObj, var_args) {
var boundArgs = fn.boundArgs_;
if(arguments.length > 2) {
var args = Array.prototype.slice.call(arguments, 2);
if(boundArgs)args.unshift.apply(args, boundArgs);
boundArgs = args
}selfObj = fn.boundSelf_ || selfObj;
fn = fn.boundFn_ || fn;
var newfn;
var context = selfObj || goog.global;
if(boundArgs)newfn = function() {
var args = Array.prototype.slice.call(arguments);
args.unshift.apply(args, boundArgs);
return fn.apply(context, args)
};
else newfn = function() {
return fn.apply(context, arguments)
};
newfn.boundArgs_ = boundArgs;
newfn.boundSelf_ = selfObj;
newfn.boundFn_ = fn;
return newfn
};
/**
* Handy base function, which allows for sub classing in the style of Google's
* Closure library.
*/
ace.base = function(a, b) {
var c = arguments.callee.caller;
if(c.superClass_) {
return c.superClass_.constructor.apply(a, Array.prototype.slice.call(arguments, 1))
}
for(var d = Array.prototype.slice.call(arguments, 2), e = false, f = a.constructor;f;f = f.superClass_ && f.superClass_.constructor) {
if(f.prototype[b] === c) {
e = true
}else {
if(e) {
return f.prototype[b].apply(a, d)
}
}
}
if(a[b] === c) {
return a.constructor.prototype[b].apply(a, d)
}else {
throw Error("ace.base called from a method of one name to a method of a different name");
}
};
/**
* Handy inherits function, which allows for sub classing in the style of Google's
* Closure library.
*/
ace.inherits = function(childCtor, parentCtor) {
function tempCtor() {
}
tempCtor.prototype = parentCtor.prototype;
childCtor.superClass_ = parentCtor.prototype;
childCtor.prototype = new tempCtor;
childCtor.prototype.constructor = childCtor
};
/**
* Loads an image.
* @param {string} url The url to load.
*/
ace.loadImage = function(url) {
var img = new Image();
// TODO(scottlininger): Is this bloating the RAM usage a bunch? Maybe should
// do some additional checking or add a param to control this.
img.onload = function() {
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var context = canvas.getContext('2d');
context.drawImage(img, 0, 0);
img.imgData = context.getImageData(0, 0, canvas.width, canvas.height);
};
img.src = url;
return img;
};
/**
* Reads a handy pixel struct from an image. Worked in conjunction with
* ace.loadImage since it assumes a 2D context will be available.
* @param {Image} img The Image.
* @param {number} x The x position.
* @param {number} y The y position.
* @return {Object} A nice struct with r, g, b, a values.
*/
ace.getPixel = function(img, x, y) {
var imgData = img.imgData;
if (!imgData) { return false };
var index = (y * imgData.width + x) * 4;
var r = imgData.data[index];
var g = imgData.data[index + 1];
var b = imgData.data[index + 2];
var a = imgData.data[index + 3];
return { r: r, g: g, b: b, rgb: r + ',' + g + ',' + b };
}
/**
* How far two numbers are from one another.
* @param {number} a The first number.
* @param {number} a The second number.
*/
ace.diff = function(a, b) {
return Math.abs(a - b);
};
/**
* Clamps the first number between the next two.
* @param {number} n The number.
* @param {number} min The min.
* @param {number} max The max.
*/
ace.clamp = function(n, min, max) {
return Math.max(min, Math.min(max, n));
};
/**
* Gets a facing number from a dx and dy.
* @param {number} dx The diff in x.
* @param {number} dy The diff in y.
*/
ace.getFacing = function(dx, dy) {
if (dy == 0) {
dy = .00001;
}
var ratio = Math.abs(dx / dy);
if (ratio < .6) {
var facing = (dy < 0) ? 0 : 4;
return facing;
}
if (ratio > 3.3) {
var facing = (dx < 0) ? 6 : 2;
return facing;
}
if (dx > 0 && dy < 0) return 1;
if (dx > 0 && dy > 0) return 3;
if (dx < 0 && dy > 0) return 5;
return 7;
};
/**
* Gets a facing number from a dx and dy.
* @param {number} dx The diff in x.
* @param {number} dy The diff in y.
*/
ace.distance = function(dx, dy) {
var dyScaled = dy * 1;
return Math.sqrt(dx * dx + dyScaled * dyScaled);
};
/**
* Gets a random integer between 0 and N.
* @param {number} max The max to roll.
*/
ace.randomInt = function(max) {
return Math.floor(Math.random() * (max + 1));
};
/**
* An number that will be incremented to give a unique id to those elements
* who want one. It's the responsibility of the element to increment this
* when it grabs one.
* @type {number}
*/
ace.nextId = 1;
/**
* Returns a random facing number.
* @return {string} The facing string, like 'right'.
*/
ace.randomFacing = function() {
var roll = ace.randomInt(1000);
if (roll < 250) {
return 'up';
}
if (roll < 500) {
return 'right';
}
if (roll < 750) {
return 'down';
}
return 'left';
};
/**
* A hash of the opposite facing, keyed by facing, of course.
*/
ace.oppositeFacings = {
left: 'right',
right: 'left',
up: 'down',
down: 'up'
};
ace.rotZByFacing_ = {
'right': Math.PI / 2,
'left': -Math.PI / 2,
'down': 0,
'up': Math.PI
};
ace.getRotZByFacing = function(facing) {
return ace.rotZByFacing_[facing];
};
/**
* Whether two facings are opposite one another. Useful for bouncing and stuff.
* @param {string} facing1 The first facing.
* @param {string} facing2 The other.
* @return {boolean} Whether they are opposite.
*/
ace.areOppositeFacings = function(facing1, facing2) {
return (ace.oppositeFacings[facing1] == facing2);
};
|
<reponame>bink81/java-experiments
package patterns.factory.simple;
public class SimpleClient {
public static void main(String[] args) {
Product product1 = Product.createProduct1();
System.out.println(product1.getName());
Product product2 = Product.createProduct2();
System.out.println(product2.getName());
}
}
|
<filename>gateway/index.js
const express = require('express');
const app = express();
const path = require("path");
const uuid = require('uuid');
const {Storage} = require('@google-cloud/storage');
const fileUpload = require('express-fileupload');
app.use(express.urlencoded({ extended: true }));
app.use(fileUpload({
useTempFiles : true,
tempFileDir : '/tmp/'
}));
app.use(express.json());
app.use(express.static(path.join(__dirname,'./public')));
const {ExecutionsClient} = require('@google-cloud/workflows');
const client = new ExecutionsClient();
const projectId = process.env.GOOGLE_CLOUD_PROJECT;
const workflow = 'mail';
const storage = new Storage();
const bucketName = 'yarel-license-plate';
async function uploadImageFile(fileLocation, fileName) {
await storage.bucket(bucketName).upload(fileLocation, {
destination: fileName,
});
console.log(`${fileName} uploaded to ${bucketName}`);
}
app.get('/', function(req,res){
res.sendFile(path.join(__dirname,'./public/index.html'));
});
app.post('/', async (req, res) => {
const name = process.env.NAME || 'World';
if (!req.body.to_email)
return console.error('missing dest email');
if (!req.files || Object.keys(req.files).length === 0) {
return res.status(400).send('No files were uploaded.');
}
fileLocation = req.files.carPhoto.tempFilePath;
imgFileName = uuid.v4() + '.jpg';
await uploadImageFile(fileLocation, imgFileName).catch(console.error);
// Execute workflow
try {
const createExecutionRes = await client.createExecution({
execution: {
argument: JSON.stringify({
car_photos_bucket_name: bucketName,
car_photo_file_name: imgFileName,
to_email: req.body.to_email,
}),
},
parent: client.workflowPath(projectId, 'europe-west4', workflow),
});
console.log(`Here: ${createExecutionRes}`);
const executionName = createExecutionRes[0].name;
console.log(`Created execution: ${executionName}`);
res.send(`Request dispatched! Request ID: ${executionName}, Please check your mail!`)
} catch (e) {
console.error(`Error executing workflow: ${e} ${e.stack}`);
res.send(`Error in dispatching request!`)
}
});
if (!projectId)
return console.error('ERROR: GOOGLE_CLOUD_PROJECT is required.');
const port = process.env.PORT || 8080;
app.listen(port, () => {
console.log(`helloworld: listening on port ${port}`);
}); |
/*
* Copyright 2017-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.incubator.net.virtual.impl;
import com.google.common.collect.Maps;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.onlab.junit.TestUtils;
import org.onlab.osgi.ServiceDirectory;
import org.onlab.osgi.TestServiceDirectory;
import org.onlab.packet.IpAddress;
import org.onosproject.TestApplicationId;
import org.onosproject.cluster.NodeId;
import org.onosproject.common.event.impl.TestEventDispatcher;
import org.onosproject.core.ApplicationId;
import org.onosproject.core.CoreService;
import org.onosproject.event.EventDeliveryService;
import org.onosproject.incubator.net.virtual.NetworkId;
import org.onosproject.incubator.net.virtual.VirtualNetwork;
import org.onosproject.incubator.net.virtual.VirtualNetworkMeterStore;
import org.onosproject.incubator.net.virtual.VirtualNetworkStore;
import org.onosproject.incubator.net.virtual.event.VirtualListenerRegistryManager;
import org.onosproject.incubator.net.virtual.impl.provider.VirtualProviderManager;
import org.onosproject.incubator.net.virtual.provider.AbstractVirtualProvider;
import org.onosproject.incubator.net.virtual.provider.VirtualMeterProvider;
import org.onosproject.incubator.net.virtual.provider.VirtualMeterProviderService;
import org.onosproject.incubator.net.virtual.provider.VirtualProviderRegistryService;
import org.onosproject.incubator.store.virtual.impl.DistributedVirtualNetworkStore;
import org.onosproject.incubator.store.virtual.impl.SimpleVirtualMeterStore;
import org.onosproject.net.DeviceId;
import org.onosproject.net.NetTestTools;
import org.onosproject.net.intent.FakeIntentManager;
import org.onosproject.net.intent.TestableIntentService;
import org.onosproject.net.meter.Band;
import org.onosproject.net.meter.DefaultBand;
import org.onosproject.net.meter.DefaultMeter;
import org.onosproject.net.meter.DefaultMeterFeatures;
import org.onosproject.net.meter.DefaultMeterRequest;
import org.onosproject.net.meter.Meter;
import org.onosproject.net.meter.MeterFeaturesKey;
import org.onosproject.net.meter.MeterId;
import org.onosproject.net.meter.MeterOperation;
import org.onosproject.net.meter.MeterOperations;
import org.onosproject.net.meter.MeterRequest;
import org.onosproject.net.meter.MeterState;
import org.onosproject.net.provider.ProviderId;
import org.onosproject.store.service.StorageService;
import org.onosproject.store.service.TestStorageService;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.*;
/**
* Virtual Network meter manager tests.
*/
public class VirtualNetworkMeterManagerTest extends VirtualNetworkTestUtil {
private static final ProviderId PID = new ProviderId("of", "foo");
private static final NodeId NID_LOCAL = new NodeId("local");
private static final IpAddress LOCALHOST = IpAddress.valueOf("127.0.0.1");
private VirtualNetworkManager manager;
private DistributedVirtualNetworkStore virtualNetworkManagerStore;
private TestableIntentService intentService = new FakeIntentManager();
private ServiceDirectory testDirectory;
private VirtualProviderManager providerRegistryService;
private EventDeliveryService eventDeliveryService;
VirtualListenerRegistryManager listenerRegistryManager =
VirtualListenerRegistryManager.getInstance();
private VirtualNetwork vnet1;
private VirtualNetwork vnet2;
private SimpleVirtualMeterStore meterStore;
private VirtualNetworkMeterManager meterManager1;
private VirtualNetworkMeterManager meterManager2;
private TestProvider provider = new TestProvider();
private VirtualMeterProviderService providerService1;
private VirtualMeterProviderService providerService2;
private ApplicationId appId;
private Meter m1;
private Meter m2;
private MeterRequest.Builder m1Request;
private MeterRequest.Builder m2Request;
private Map<MeterId, Meter> meters = Maps.newHashMap();
@Before
public void setUp() throws Exception {
virtualNetworkManagerStore = new DistributedVirtualNetworkStore();
CoreService coreService = new TestCoreService();
TestStorageService storageService = new TestStorageService();
TestUtils.setField(virtualNetworkManagerStore, "coreService", coreService);
TestUtils.setField(virtualNetworkManagerStore, "storageService", storageService);
virtualNetworkManagerStore.activate();
meterStore = new SimpleVirtualMeterStore();
providerRegistryService = new VirtualProviderManager();
providerRegistryService.registerProvider(provider);
manager = new VirtualNetworkManager();
manager.store = virtualNetworkManagerStore;
TestUtils.setField(manager, "coreService", coreService);
eventDeliveryService = new TestEventDispatcher();
NetTestTools.injectEventDispatcher(manager, eventDeliveryService);
// eventDeliveryService.addSink(VirtualEvent.class, listenerRegistryManager);
appId = new TestApplicationId("MeterManagerTest");
testDirectory = new TestServiceDirectory()
.add(VirtualNetworkStore.class, virtualNetworkManagerStore)
.add(CoreService.class, coreService)
.add(VirtualProviderRegistryService.class, providerRegistryService)
.add(EventDeliveryService.class, eventDeliveryService)
.add(StorageService.class, storageService)
.add(VirtualNetworkMeterStore.class, meterStore);
TestUtils.setField(manager, "serviceDirectory", testDirectory);
manager.activate();
vnet1 = setupVirtualNetworkTopology(manager, TID1);
vnet2 = setupVirtualNetworkTopology(manager, TID2);
meterManager1 = new VirtualNetworkMeterManager(manager, vnet1.id());
meterManager2 = new VirtualNetworkMeterManager(manager, vnet2.id());
providerService1 = (VirtualMeterProviderService)
providerRegistryService.getProviderService(vnet1.id(), VirtualMeterProvider.class);
providerService2 = (VirtualMeterProviderService)
providerRegistryService.getProviderService(vnet2.id(), VirtualMeterProvider.class);
assertTrue("provider should be registered",
providerRegistryService.getProviders().contains(provider.id()));
setupMeterTestVariables();
}
@After
public void tearDown() {
providerRegistryService.unregisterProvider(provider);
assertFalse("provider should not be registered",
providerRegistryService.getProviders().contains(provider.id()));
manager.deactivate();
NetTestTools.injectEventDispatcher(manager, null);
virtualNetworkManagerStore.deactivate();
}
/** Test for meter submit(). */
@Test
public void testAddition() {
meterManager1.submit(m1Request.add());
assertTrue("The meter was not added",
meterManager1.getAllMeters().size() == 1);
assertThat(meterManager1.getMeter(VDID1, MeterId.meterId(1)), is(m1));
assertTrue("The meter shouldn't be added for vnet2",
meterManager2.getAllMeters().size() == 0);
}
/** Test for meter remove(). */
@Test
public void testRemove() {
meterManager1.submit(m1Request.add());
meterManager1.withdraw(m1Request.remove(), m1.id());
assertThat(meterManager1.getMeter(VDID1, MeterId.meterId(1)).state(),
is(MeterState.PENDING_REMOVE));
providerService1.pushMeterMetrics(m1.deviceId(), Collections.emptyList());
assertTrue("The meter was not removed", meterManager1.getAllMeters().size() == 0);
assertTrue("The meter shouldn't be added for vnet2",
meterManager2.getAllMeters().size() == 0);
}
/** Test for meter submit with multiple devices. */
@Test
public void testMultipleDevice() {
meterManager1.submit(m1Request.add());
meterManager1.submit(m2Request.add());
assertTrue("The meters were not added",
meterManager1.getAllMeters().size() == 2);
assertTrue("The meter shouldn't be added for vnet2",
meterManager2.getAllMeters().size() == 0);
assertThat(meterManager1.getMeter(VDID1, MeterId.meterId(1)), is(m1));
assertThat(meterManager1.getMeter(VDID2, MeterId.meterId(1)), is(m2));
}
/** Test for meter features inside store. */
@Test
public void testMeterFeatures() {
//Test for virtual network 1
assertEquals(meterStore.getMaxMeters(vnet1.id(),
MeterFeaturesKey.key(VDID1)), 255L);
assertEquals(meterStore.getMaxMeters(vnet1.id(),
MeterFeaturesKey.key(VDID2)), 2);
//Test for virtual network 2
assertEquals(meterStore.getMaxMeters(vnet2.id(),
MeterFeaturesKey.key(VDID1)), 100);
assertEquals(meterStore.getMaxMeters(vnet2.id(),
MeterFeaturesKey.key(VDID2)), 10);
}
/** Set variables such as meters and request required for testing. */
private void setupMeterTestVariables() {
Band band = DefaultBand.builder()
.ofType(Band.Type.DROP)
.withRate(500)
.build();
m1 = DefaultMeter.builder()
.forDevice(VDID1)
.fromApp(appId)
.withId(MeterId.meterId(1))
.withUnit(Meter.Unit.KB_PER_SEC)
.withBands(Collections.singletonList(band))
.build();
m2 = DefaultMeter.builder()
.forDevice(VDID2)
.fromApp(appId)
.withId(MeterId.meterId(1))
.withUnit(Meter.Unit.KB_PER_SEC)
.withBands(Collections.singletonList(band))
.build();
m1Request = DefaultMeterRequest.builder()
.forDevice(VDID1)
.fromApp(appId)
.withUnit(Meter.Unit.KB_PER_SEC)
.withBands(Collections.singletonList(band));
m2Request = DefaultMeterRequest.builder()
.forDevice(VDID2)
.fromApp(appId)
.withUnit(Meter.Unit.KB_PER_SEC)
.withBands(Collections.singletonList(band));
meterStore.storeMeterFeatures(vnet1.id(),
DefaultMeterFeatures.builder().forDevice(VDID1)
.withMaxMeters(255L)
.withBandTypes(new HashSet<>())
.withUnits(new HashSet<>())
.hasStats(false)
.hasBurst(false)
.withMaxBands((byte) 0)
.withMaxColors((byte) 0)
.build());
meterStore.storeMeterFeatures(vnet1.id(),
DefaultMeterFeatures.builder().forDevice(VDID2)
.withMaxMeters(2)
.withBandTypes(new HashSet<>())
.withUnits(new HashSet<>())
.hasBurst(false)
.hasStats(false)
.withMaxBands((byte) 0)
.withMaxColors((byte) 0)
.build());
meterStore.storeMeterFeatures(vnet2.id(),
DefaultMeterFeatures.builder().forDevice(VDID1)
.withMaxMeters(100L)
.withBandTypes(new HashSet<>())
.withUnits(new HashSet<>())
.hasStats(false)
.hasBurst(false)
.withMaxBands((byte) 0)
.withMaxColors((byte) 0)
.build());
meterStore.storeMeterFeatures(vnet2.id(),
DefaultMeterFeatures.builder().forDevice(VDID2)
.withMaxMeters(10)
.withBandTypes(new HashSet<>())
.withUnits(new HashSet<>())
.hasBurst(false)
.hasStats(false)
.withMaxBands((byte) 0)
.withMaxColors((byte) 0)
.build());
}
private class TestProvider
extends AbstractVirtualProvider
implements VirtualMeterProvider {
protected TestProvider() {
super(PID);
}
@Override
public void performMeterOperation(NetworkId networkId, DeviceId deviceId,
MeterOperations meterOps) {
}
@Override
public void performMeterOperation(NetworkId networkId, DeviceId deviceId,
MeterOperation meterOp) {
meters.put(meterOp.meter().id(), meterOp.meter());
}
}
} |
import React from 'react';
import { TableBody } from './table.body';
import { TableBodyRow, IRow } from './table.body.row';
import { TableHead } from './table.head';
import { TableHeadItem } from './table.head.item';
import { TableHeader, TAddiotinalLabel } from './table.header';
import { TableResponsiveContainer } from './table.responsive';
export interface ITableHead {
key: string;
label?: string;
}
interface IProps {
addiotinalLabel?: TAddiotinalLabel;
label: string;
rows: IRow[];
tableHeads: ITableHead[];
dataKeyColumn: string;
}
export const Table = ({
addiotinalLabel,
label,
rows,
tableHeads,
dataKeyColumn,
}: IProps): JSX.Element => {
const actionKeys = tableHeads
.filter(
({ label: headLabel }) =>
typeof headLabel === 'undefined' || headLabel.length === 0
)
.map(({ key }) => key);
const tableKeys = tableHeads.map(({ key }) => key);
return (
<>
<TableHeader label={label} addiotinalLabel={addiotinalLabel} />
<TableResponsiveContainer>
<TableHead>
{tableHeads.map(({ key, label: headLabel }) => (
<TableHeadItem key={key} isAction={actionKeys.includes(key)}>
{headLabel}
</TableHeadItem>
))}
</TableHead>
<TableBody>
{rows.map((row) => (
<TableBodyRow
key={row[dataKeyColumn] as string}
keys={tableKeys}
row={row}
actionKeys={actionKeys}
dataKeyColumn={dataKeyColumn}
/>
))}
</TableBody>
</TableResponsiveContainer>
</>
);
};
|
#!/bin/bash
####################################################################################################
#
# FILENAME: rebuild-scratch-org
#
# PURPOSE: Deletes then recreates a scratch org based on the SFDX source in this project.
#
# DESCRIPTION: Executing this script will first delete the exisisting default scratch org for
# this project (if it exists), then create a new one using the source and config
# information defined in your dev-tools/lib/local-config.sh file.
#
# INSTRUCTIONS: Execute the following command from the root of your SFDX project directory.
# ./dev-tools/rebuild-scratch-org
#
####################################################################################################
#
##
###
#### LOAD SHARED FUNCTIONS LIBRARY #################################################################
###
##
#
# Make sure that the shared-functions.sh script exists.
if [ ! -r `dirname $0`/lib/shared-functions.sh ]; then
echo "\nFATAL ERROR: `tput sgr0``tput setaf 1`Could not load dev-tools/lib/shared-functions.sh. File not found.\n"
exit 1
fi
# Indicate the operation being carried out by this script.
REQUESTED_OPERATION="REBUILD_SCRATCH_ORG"
# Load the shared-functions.sh library. This action will also cause the
# config variables from dev-tools/lib/local-config.sh to be loaded.
source `dirname $0`/lib/shared-functions.sh
#
##
###
#### CONFIRM SCRIPT EXECUTION ######################################################################
###
##
#
confirmScriptExecution "Do you want to rebuild your scratch org?"
#
##
###
#### CREATE LOCAL VARIABLES ########################################################################
###
##
#
# The default version of this script does not require additional local
# variables. If your customized script does require them, this is where
# you would define and initialize them.
#
##
###
#### FUNCTION: assignPermset () ####################################################################
###
##
#
assignPermset () {
# Assign permission sets to the scratch org's Admin user.
echoStepMsg "Assign the $1 permission set to the scratch org's Admin user"
echo \
"Executing force:user:permset:assign \\
--permsetname "$1" \\
--targetusername $SCRATCH_ORG_ALIAS \\
--loglevel error\n"
(cd $PROJECT_ROOT && exec sfdx force:user:permset:assign \
--permsetname "$1" \
--targetusername $SCRATCH_ORG_ALIAS \
--loglevel error)
if [ $? -ne 0 ]; then
echoErrorMsg "Permission set \"$1\" could not be assigned to the admin user. Aborting Script."
exit 1
fi
}
#
##
###
#### FUNCTION: createScratchOrg () #################################################################
###
##
#
createScratchOrg() {
# Create a new scratch org using the scratch-def.json locally configured for this project.
echoStepMsg "Create a new $SCRATCH_ORG_ALIAS scratch org"
echo "Executing force:org:create -f $SCRATCH_ORG_CONFIG -a $SCRATCH_ORG_ALIAS -v $DEV_HUB_ALIAS -s -d 29"
(cd $PROJECT_ROOT && exec sfdx force:org:create -f $SCRATCH_ORG_CONFIG -a $SCRATCH_ORG_ALIAS -v $DEV_HUB_ALIAS -s -d 29)
if [ $? -ne 0 ]; then
echoErrorMsg "Scratch org could not be created. Aborting Script."
exit 1
fi
}
#
##
###
#### FUNCTION: deleteScratchOrg () #################################################################
###
##
#
deleteScratchOrg () {
# Delete the current scratch org.
echoStepMsg "Delete the $SCRATCH_ORG_ALIAS scratch org"
echo "Executing force:org:delete -p -u $SCRATCH_ORG_ALIAS -v $DEV_HUB_ALIAS"
(cd $PROJECT_ROOT && exec sfdx force:org:delete -p -u $SCRATCH_ORG_ALIAS -v $DEV_HUB_ALIAS )
}
#
##
###
#### FUNCTION: importData () #######################################################################
###
##
#
importData () {
# Setup development data
echoStepMsg "Import data from $1"
echo \
"Executing force:data:tree:import \\
--plan \"$1\" \\
--targetusername $SCRATCH_ORG_ALIAS \\
--loglevel error)\n"
(cd $PROJECT_ROOT && exec sfdx force:data:tree:import \
--plan "$1" \
--targetusername $SCRATCH_ORG_ALIAS \
--loglevel error)
if [ $? -ne 0 ]; then
echoErrorMsg "Data import failed. Aborting Script."
exit 1
fi
}
#
##
###
#### FUNCTION: installPackage () ###################################################################
###
##
#
installPackage () {
# Echo the string provided by argument three. This string should provide the
# user with an easy-to-understand idea of what package is being installed.
echoStepMsg "$3"
# Print the time (HH:MM:SS) when the installation started.
echo "Executing force:package:install -i $1 -p 5 -w 10 -u $SCRATCH_ORG_ALIAS"
echo "\n`tput bold`Package installation started at `date +%T``tput sgr0`\n"
local startTime=`date +%s`
# Perform the package installation. If the installation fails abort the script.
(cd $PROJECT_ROOT && exec sfdx force:package:install -i $1 -p 5 -w 10 -u $SCRATCH_ORG_ALIAS)
if [ $? -ne 0 ]; then
echoErrorMsg "$2 could not be installed. Aborting Script."
exit 1
fi
# Print the time (HH:MM:SS) when the installation completed.
echo "\n`tput bold`Package installation completed at `date +%T``tput sgr0`"
local endTime=`date +%s`
# Determine the total runtime (in seconds) and show the user.
local totalRuntime=$((endTime-startTime))
echo "Total runtime for package installation was $totalRuntime seconds."
}
#
##
###
#### FUNCTION: pushMetadata () #####################################################################
###
##
#
pushMetadata () {
# Push metadata to the new Scratch Org.
echoStepMsg "Push metadata to the new scratch org"
echo "Executing force:source:push -u $SCRATCH_ORG_ALIAS"
(cd $PROJECT_ROOT && exec sfdx force:source:push -u $SCRATCH_ORG_ALIAS)
if [ $? -ne 0 ]; then
echoErrorMsg "SFDX source could not be pushed to the scratch org. Aborting Script."
exit 1
fi
}
#
##
###
#### FUNCTION: validateScratchOrgDeletion () #######################################################
###
##
#
validateScratchOrgDeletion () {
# Confirm that the scratch org is no longer the default username in SFDX local config.
echoStepMsg "Validate deletion of the scratch org"
echo "Executing sfdx force:config:list to confirm deletion from project local config"
(cd $PROJECT_ROOT && exec sfdx force:config:list)
}
#
##
###
#### FUNCTION: cleanup () #######################################################
###
##
#
cleanup () {
# Confirm that the scratch org is no longer the default username in SFDX local config.
echoStepMsg "Delete Temp files"
if [ -d $PROJECT_ROOT/temp/data.out ]; then
rm -R $PROJECT_ROOT/temp/data.out
fi
if [ -d $PROJECT_ROOT/data.out ]; then
rm -R $PROJECT_ROOT/data.out
fi
}
#
##
###
#### FUNCTION: prepareDataImport () #####################################################################
###
##
#
prepareDataImport () {
# Run Anonymous code in the new Scratch Org.
echoStepMsg "Prepare data for import."
cd $PROJECT_ROOT
sfdx wry:file:replace -u $SCRATCH_ORG_ALIAS -i data
mv data.out $PROJECT_ROOT/temp/
}
#
##
###
#### SCRATCH ORG SETUP (DELETE/CREATE/PUSH) ########################################################
###
##
#
# Reset the Step Message counter to reflect the number of TOTAL STEPS
# in your rebuild process. For the baseline SFDX-Falcon template it's 4.
resetStepMsgCounter 11
# Delete the current scratch org.
deleteScratchOrg
# Create a new scratch org using the scratch-def.json locally configured for this project.
createScratchOrg
# Install any packages (managed or unmanaged).
# Template for calling this function:
#installPackage 04ti0000000TzXd "plantuml4force" "PlantUml"
# Assign any permission sets that were added by installed packages.
# Template for calling this function:
# assignPermset #PACKAGED_PERMSET_NAME#
# Push metadata to the new Scratch Org.
pushMetadata
# Assign any permission sets that were added by your Source Push.
# Template for calling this function:
assignPermset ExpenseManager
assignPermset Traveler
# clean project before importing data
cleanup
# Import data used during development. You may need to make multiple calls
# Template for calling this function:
prepareDataImport
importData "$PROJECT_ROOT/data/CurrencyType-plan.json"
importData "$PROJECT_ROOT/data/UP2GO_ITE__CustomSettings__c-plan.json"
importData "$PROJECT_ROOT/temp/data.out/UP2GO_ITE__CompensationRate__c-plan.json"
#clean project
cleanup
# Adjust Admin user
sfdx force:data:record:update -s User -w "Name='User User'" -v "DefaultCurrencyIsoCode=EUR" -u $SCRATCH_ORG_ALIAS
# Run all tests
exec run-all-tests.sh
# Open scratch org
sfdx force:org:open
#
##
###
#### ECHO CLOSING MESSAGE ##########################################################################
###
##
#
echoScriptCompleteMsg \
"Rebuild of scratch org $SCRATCH_ORG_ALIAS completed successfully."
exit 0
##END## |
func handleUserSelection(menuCell: MenuOption, presenter: MenuPresenter) {
switch menuCell {
case .addressBook:
presenter.userSelectedAddressBook()
case .import_:
presenter.userSelectedImport()
case .export:
presenter.userSelectedExport()
case .update:
presenter.userSelectedUpdate()
case .about:
presenter.userSelectedAbout()
case .validate:
presenter.userSelectedValidate()
}
} |
<reponame>lgoldstein/communitychest
/*
*
*/
package net.community.chest.jms.framework;
import javax.jms.JMSException;
import javax.jms.Message;
import javax.jms.MessageListener;
/**
* <P>Copyright 2010 as per GPLv2</P>
*
* @author <NAME>.
* @since Jun 8, 2010 1:49:06 PM
*/
public abstract class AbstractMessageConsumer implements XMessageConsumer {
protected AbstractMessageConsumer ()
{
super();
}
private String _msgSelector;
/*
* @see javax.jms.MessageConsumer#getMessageSelector()
*/
@Override
public String getMessageSelector () throws JMSException
{
return _msgSelector;
}
/*
* @see net.community.chest.jms.framework.XMessageConsumer#setMessageSelector(java.lang.String)
*/
@Override
public void setMessageSelector (String msgSelector) throws JMSException
{
_msgSelector = msgSelector;
}
private MessageListener _msgListener;
/*
* @see javax.jms.MessageConsumer#getMessageListener()
*/
@Override
public MessageListener getMessageListener () throws JMSException
{
return _msgListener;
}
/*
* @see javax.jms.MessageConsumer#setMessageListener(javax.jms.MessageListener)
*/
@Override
public void setMessageListener (MessageListener listener) throws JMSException
{
_msgListener = listener;
}
/*
* @see javax.jms.MessageConsumer#receive()
*/
@Override
public Message receive () throws JMSException
{
return receive(Long.MAX_VALUE);
}
/*
* @see javax.jms.MessageConsumer#receiveNoWait()
*/
@Override
public Message receiveNoWait () throws JMSException
{
return receive(0);
}
}
|
#!/usr/bin/bash
VENV=$PWD/venv34
virtualenv-3.4 $VENV
PATH=/opt/local/bin:$PATH PYCURL_SSL_LIBRARY=openssl $VENV/bin/python setup.py install
|
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPool2D
from tensorflow.keras import Model
# Load MNIST dataset
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# Reshape images to (28, 28, 1)
x_train = x_train.reshape(-1, 28, 28, 1)
x_test = x_test.reshape(-1, 28, 28, 1)
# Build convolutional neural network
class CNN(Model):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = Conv2D(32, (3, 3), activation='relu')
self.flatten = Flatten()
self.d1 = Dense(128, activation='relu')
self.d2 = Dense(10, activation='softmax')
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
# Build and compile model
model = CNN()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Train model
model.fit(x_train, y_train, epochs=10)
# Test model
model.evaluate(x_test, y_test) |
import {
Component,
resolveComponent as _resolveComponent,
} from '@vue/runtime-core'
import { ActionBar, BottomNavigation, isKnownView, Tabs } from '.'
export function resolveComponent(name: string): Component | string | undefined {
// in the standalone compiler, everything is treated as a component because we don't
// know if certain tags are elements or not at runtime
// (they are only registered at runtime with registerElement)
// if we return a string here, vue will render them as normal elements
// with the default slot as children
if (isKnownView(name)) {
return name
}
// todo: refactor to not have to hardcode all built-in components
if (name === 'ActionBar') {
return ActionBar
}
if (name === 'Tabs') {
return Tabs
}
if (name === 'BottomNavigation') {
return BottomNavigation
}
return _resolveComponent(name)
}
|
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2483-1
#
# Security announcement date: 2015-01-26 00:00:00 UTC
# Script generation date: 2017-01-01 21:04:13 UTC
#
# Operating System: Ubuntu 14.10
# Architecture: i686
#
# Vulnerable packages fix on version:
# - libjasper1:1.900.1-debian1-2ubuntu0.2
#
# Last versions recommanded by security team:
# - libjasper1:1.900.1-debian1-2ubuntu0.2
#
# CVE List:
# - CVE-2014-8137
# - CVE-2014-8138
# - CVE-2014-8157
# - CVE-2014-8158
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libjasper1=1.900.1-debian1-2ubuntu0.2 -y
|
import base64
from typing import Iterator, Generator, bytes
def stream_decode_response_base64(stream: Iterator) -> Generator[bytes, None, None]:
partial_chunk_buffer = bytearray()
for chunk in stream:
if len(partial_chunk_buffer) > 0:
chunk = partial_chunk_buffer + chunk
partial_chunk_buffer.clear()
chunk_len = len(chunk)
# Calculate the padding length based on the chunk length
padding_len = (4 - (chunk_len % 4)) % 4
# Append padding characters to the chunk if needed
chunk += b'=' * padding_len
# Decode the chunk using base64
decoded_chunk = base64.b64decode(chunk)
# Yield the decoded bytes
yield decoded_chunk |
package edu.unitn.pbam.androidproject.utilities;
import static edu.unitn.pbam.androidproject.utilities.Constants.DOCTYPE_BOOK;
import static edu.unitn.pbam.androidproject.utilities.Constants.DOCTYPE_MOVIE;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.json.JSONException;
import org.json.JSONObject;
import android.app.Activity;
import android.app.ProgressDialog;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.os.Bundle;
import android.util.Log;
import android.widget.Toast;
import com.facebook.FacebookRequestError;
import com.facebook.HttpMethod;
import com.facebook.Request;
import com.facebook.RequestAsyncTask;
import com.facebook.RequestBatch;
import com.facebook.Response;
import com.facebook.Session;
import com.facebook.SessionState;
import edu.unitn.pbam.androidproject.R;
import edu.unitn.pbam.androidproject.model.DList;
import edu.unitn.pbam.androidproject.model.Document;
import edu.unitn.pbam.androidproject.model.Movie;
public class Facebook {
private final static String TAG = "Facebook";
private static final boolean UPLOAD_IMAGE = true;
private static final String URL_SITE = "http://androidbooksandmovies.appspot.com/";
private static final List<String> PERMISSIONS = Arrays
.asList("publish_actions");
private static Activity activity;
private static boolean pendingPublishReauthorization = false;
private static Document pendingDoc;
private static int docType;
private static ProgressDialog progressDialog;
private static Session.StatusCallback callback = new Session.StatusCallback() {
@Override
public void call(Session session, SessionState state,
Exception exception) {
Log.i(TAG, "callback");
Log.i("Session state= ", state.toString());
if (exception == null) {
if (pendingPublishReauthorization && session.isOpened()){
//&& state == SessionState.OPENED)
shareDocument(pendingDoc, activity);
}
} else {
Log.i(TAG, "call of Session.StatusCallback with exception "
+ exception.getMessage());
}
}
};
private static Request.Callback reqCallback = new Request.Callback() {
public void onCompleted(Response response) {
if (response != null) {
FacebookRequestError error = response.getError();
if (error != null) {
Log.e("onCompleted", error.getErrorMessage());
Toast.makeText(
activity.getApplicationContext(),
App.getAppContext().getResources()
.getString(R.string.error),
Toast.LENGTH_LONG).show();
} else {
Log.i("onCompleted", "OK");
Toast.makeText(
activity.getApplicationContext(),
App.getAppContext().getResources()
.getString(R.string.doc_shared),
Toast.LENGTH_LONG).show();
}
} else {
Toast.makeText(
activity.getApplicationContext(),
App.getAppContext().getResources()
.getString(R.string.error), Toast.LENGTH_LONG)
.show();
Log.e("onCompleted", "null response");
}
}
};
public static void shareDocument(Document doc, Activity activity) {
Facebook.activity = activity;
/*
* if (pendingPublishReauthorization){ Toast.makeText(
* activity.getApplicationContext(), App.getAppContext().getResources()
* .getString(R.string.pending_share), Toast.LENGTH_SHORT).show();
* return; }
*/
if (!Utils.isNetworkAvailable()) {
Toast.makeText(
activity.getApplicationContext(),
App.getAppContext().getResources()
.getString(R.string.no_connetions),
Toast.LENGTH_SHORT).show();
return;
}
Session session = Session.getActiveSession();
if (session != null && session.getState().isOpened()) {
List<String> permissions = session.getPermissions();
if (!Utils.isSubsetOf(PERMISSIONS, permissions)) {
Session.NewPermissionsRequest newPermissionsRequest = new Session.NewPermissionsRequest(
activity, PERMISSIONS);
pendingPublishReauthorization = true;
pendingDoc = doc;
session.requestNewPublishPermissions(newPermissionsRequest);
Log.i("sessionCheck", "requestNewPublishPermissions");
return;
}
Log.i("sessionCheck", "cofirm OPENED e PERMISSIONS");
// publishDocAsMessage(doc);
publishDoc(doc);
} else {
Log.i("sessionCheck", "NULL or notOpened session");
pendingPublishReauthorization = true;
pendingDoc = doc;
Session.openActiveSession(activity, true, callback);
/*
* if (session == null) { Log.i("sessionCheck",
* "NULL session, openActiveSession"); pendingPublishReauthorization
* = true; pendingDoc = doc; Session.openActiveSession(activity,
* true, callback); } else { Log.i("sessionCheck",
* "notOpenedSession, openForRead"); pendingPublishReauthorization =
* true; pendingDoc = doc; session.openForRead(new
* Session.OpenRequest(activity) .setCallback(callback)); //
* session.openForPublish(new Session.OpenRequest(activity). //
* setCallback(callback).setRequestCode(100)); }
*/
}
}
public static void publishDocAsMessage(Document doc) {
Session session = Session.getActiveSession();
Bundle postParams = new Bundle();
postParams.putString("message", doc.toString());
Request request = new Request(session, "me/feed", postParams,
HttpMethod.POST, reqCallback);
RequestAsyncTask task = new RequestAsyncTask(request);
task.execute();
pendingPublishReauthorization = false;
}
/*
* Request: Staging image upload request If uploading an image, set up the
* first batch request to do this.
*/
private static Request getImageRequest(Document doc) {
Bundle imageParams = new Bundle();
Bitmap image;
if (doc.getCover() == null) {
image = BitmapFactory.decodeResource(activity.getResources(),
R.drawable.poster_default);
} else
image = doc.getCover().getImage().getBitmap();
imageParams.putParcelable("file", image);
Request.Callback imageCallback = new Request.Callback() {
@Override
public void onCompleted(Response response) {
FacebookRequestError error = response.getError();
if (error != null) {
progressDialog.dismiss();
Log.i(TAG, error.getErrorMessage());
Toast.makeText(
activity.getApplicationContext(),
App.getAppContext().getResources()
.getString(R.string.error),
Toast.LENGTH_LONG).show();
}
}
};
Request imageRequest = new Request(Session.getActiveSession(),
"me/staging_resources", imageParams, HttpMethod.POST,
imageCallback);
imageRequest.setBatchEntryName("imageUpload");
return imageRequest;
}
/*
* Request: Object request
*/
private static Request getObjectRequest(Document doc) {
String graphPath;
String url;
if (docType == DOCTYPE_MOVIE) {
String urlInfo = doc.getUrlinfo();
String title = urlInfo.substring(0, urlInfo.length() - 1);
int slashIndex = title.lastIndexOf("/");
url = URL_SITE + "movies/"
+ title.subSequence(slashIndex + 1, title.length());
} else {
url = URL_SITE + "books/?id=" + doc.getCode();
}
try {
// Set up the JSON representing the document
JSONObject document = new JSONObject();
if (UPLOAD_IMAGE) {
// "uri" result reefer to the previous batch request
document.put("image", "{result=imageUpload:$.uri}");
} else {
document.put("image", doc.getCover().getRemoteUrl());
}
document.put("title", doc.getTitle());
document.put("url", url);
document.put("description", doc.getDescription());
JSONObject data = new JSONObject();
data.put("isbn", "0-553-57340-3");
document.put("data", data);
if (docType == DOCTYPE_MOVIE) {
graphPath = "me/objects/video.movie";
} else {
graphPath = "me/objects/books.book";
}
// Set up object request parameters
Bundle objectParams = new Bundle();
objectParams.putString("object", document.toString());
Request.Callback objectCallback = new Request.Callback() {
@Override
public void onCompleted(Response response) {
if (response != null) {
Log.d(TAG, response.toString());
}
FacebookRequestError error = response.getError();
if (error != null) {
progressDialog.dismiss();// dismissProgressDialog();
Log.i(TAG, error.getErrorMessage());
Toast.makeText(
activity.getApplicationContext(),
App.getAppContext().getResources()
.getString(R.string.error),
Toast.LENGTH_LONG).show();
}
}
};
Request objectRequest = new Request(Session.getActiveSession(),
graphPath, objectParams, HttpMethod.POST, objectCallback);
// Set the batch name so you can refer to the result
// in the follow-on publish action request
objectRequest.setBatchEntryName("objectCreate");
return objectRequest;
} catch (JSONException e) {
Log.i(TAG, "JSON error " + e.getMessage());
progressDialog.dismiss();
Toast.makeText(
activity.getApplicationContext(),
App.getAppContext().getResources()
.getString(R.string.error), Toast.LENGTH_LONG)
.show();
}
return null;
}
private static Request getActionRequest(Document doc) {
String graphPath;
// Refer to the "id" in the result from the previous batch request
Bundle actionParams = new Bundle();
if (docType == DOCTYPE_MOVIE) {
actionParams.putString("movie", "{result=objectCreate:$.id}");
} else {
actionParams.putString("book", "{result=objectCreate:$.id}");
}
actionParams.putString("fb:explicitly_shared", "true");
Request.Callback actionCallback = new Request.Callback() {
@Override
public void onCompleted(Response response) {
FacebookRequestError error = response.getError();
if (error != null) {
progressDialog.dismiss();
Log.e(TAG, error.getErrorMessage());
Toast.makeText(
activity.getApplicationContext(),
App.getAppContext().getResources()
.getString(R.string.error),
Toast.LENGTH_LONG).show();
} else {
String actionId = null;
try {
JSONObject graphResponse = response.getGraphObject()
.getInnerJSONObject();
actionId = graphResponse.getString("id");
} catch (JSONException e) {
Log.i(TAG, "JSON error " + e.getMessage());
}
Log.i(TAG, "id=" + actionId);
progressDialog.dismiss();
Toast.makeText(
activity.getApplicationContext(),
App.getAppContext().getResources()
.getString(R.string.doc_shared),
Toast.LENGTH_LONG).show();
}
}
};
List<DList> lists = doc.getLists();
Set<Integer> listsID = new HashSet<Integer>();
for (DList l : lists) {
listsID.add((int)l.getId());
}
if (docType == DOCTYPE_MOVIE) {
Log.i("isInstanceof", "movie.class");
if (listsID.contains(Constants.DLIST_TOWATCH)) {
graphPath = "me/video.wants_to_watch";
Log.d(TAG, "toWatch");
} else {
double normValue = (doc.getRating() / 100.);
actionParams.putDouble("value", doc.getRating());
actionParams.putInt("scale", 100);
actionParams.putDouble("normalized_value", normValue);
graphPath = "me/video.rates";
// graphPath = "me/video.watches";
}
} else {
Log.i("isInstanceof", "book.class");
if (listsID.contains(Constants.DLIST_TOREAD)) {
Log.d(TAG, "toRead");
graphPath = "me/books.wants_to_read";
} else {
double normValue = (doc.getRating() - 1) / (100 - 1);
// graphPath = "me/books.reads";
// SimpleDateFormat s = new
// SimpleDateFormat("yyyy-MM-dd-hh:mm:ss", Locale.ENGLISH);
// String format = s.format(new Date());
// actionParams.putString("timestamp", format);
// actionParams.putDouble("percent_complete", 100.0);
actionParams.putDouble("value", doc.getRating());
actionParams.putInt("scale", 100);
actionParams.putDouble("normalized_value", normValue);
graphPath = "me/books.rates";
}
}
Request actionRequest = new Request(Session.getActiveSession(),
graphPath, actionParams, HttpMethod.POST, actionCallback);
return actionRequest;
}
public static void publishDoc(Document doc) {
Log.i("publishDoc", "start publishing");
String docTypeName;
if (Movie.class.isInstance(doc)) {
docType = DOCTYPE_MOVIE;
docTypeName = App.getAppContext().getResources()
.getString(R.string.movie);
} else {
docType = DOCTYPE_BOOK;
docTypeName = App.getAppContext().getResources()
.getString(R.string.book);
}
progressDialog = ProgressDialog.show(activity, "", App.getAppContext()
.getResources().getString(R.string.progress_share)
+ docTypeName, true);
RequestBatch requestBatch = new RequestBatch();
if (UPLOAD_IMAGE) {
requestBatch.add(getImageRequest(doc));
}
requestBatch.add(getObjectRequest(doc));
requestBatch.add(getActionRequest(doc));
requestBatch.executeAsync();
}
}
|
import * as path from "path";
import pkg from "webpack";
const { webpack } = pkg;
import { fileURLToPath } from "url";
const __dirname = path.dirname(fileURLToPath(import.meta.url));
let file = path.join(__dirname, "../src/test.test.tsx");
const compiler = webpack({
mode: "production",
entry: file,
output: {
path: path.join(__dirname, ".snowblind/cache/"),
filename: (Math.random() * 100).toString(36) + "_bundle_.js",
},
resolve: {
extensions: [".tsx", ".ts", ".js", ".jsx"],
},
module: {
rules: [
{
test: /\.tsx?$/,
exclude: /(node_modules|bower_components)/,
use: {
loader: "babel-loader",
options: {
presets: [
[
"@babel/preset-react",
{
pragma: "Snowblind.make",
pragmaFrag: "Snowblind.Fragment",
useSpread: false,
},
],
],
minified: true,
comments: false,
},
},
},
],
},
});
compiler.run((err, stats) => {
console.log(stats.toJson(), err);
});
|
module.exports = {
useClient: true,
arguments: [
{
name: 'name',
autocomplete_target: 'deployment'
},
{
name: 'cluster',
optional: true
},
{
name: 'region',
optional: false
}
],
exec: function({args, client, screen}) {
var data = {
deployment: args.name,
region: args.region
};
if (args.cluster) {
data.cluster = args.cluster;
}
return client.post(args.region, '/deployment-clusters/fetch', data)
.then(result => {
screen.table(['Cluster', 'Region', 'Latest Version', 'Target Version', 'Current Version'])
.addRange(result, x => [x.cluster, x.region, x.latestVersion, x.targetVersion, x.currentVersion])
.output();
});
}
}
|
#!/bin/bash
cd "$( dirname "${BASH_SOURCE[0]}" )/../ca"
mkdir certs crl newcerts private
chmod 700 private
touch index.txt
echo 1000 > serial
# Generate root key
# With key password
# openssl genrsa -aes256 -out private/ca.key.pem 4096
# Without key password
openssl genrsa -out private/ca.key.pem 4096
chmod 400 private/ca.key.pem
openssl req -config openssl.cnf \
-key private/ca.key.pem \
-new -x509 -days 7300 -sha256 -extensions v3_ca \
-out certs/ca.cert.pem
chmod 444 certs/ca.cert.pem |
<gh_stars>1-10
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: lorawan-stack/api/application.proto
package ttnpb
import (
fmt "fmt"
_ "github.com/envoyproxy/protoc-gen-validate/validate"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
types "github.com/gogo/protobuf/types"
golang_proto "github.com/golang/protobuf/proto"
go_thethings_network_lorawan_stack_v3_pkg_types "go.thethings.network/lorawan-stack/v3/pkg/types"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = golang_proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// Application is the message that defines an Application in the network.
type Application struct {
// The identifiers of the application. These are public and can be seen by any authenticated user in the network.
Ids *ApplicationIdentifiers `protobuf:"bytes,1,opt,name=ids,proto3" json:"ids,omitempty"`
// When the application was created. This information is public and can be seen by any authenticated user in the network.
CreatedAt *types.Timestamp `protobuf:"bytes,2,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
// When the application was last updated. This information is public and can be seen by any authenticated user in the network.
UpdatedAt *types.Timestamp `protobuf:"bytes,3,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
// When the application was deleted. This information is public and can be seen by any authenticated user in the network.
DeletedAt *types.Timestamp `protobuf:"bytes,8,opt,name=deleted_at,json=deletedAt,proto3" json:"deleted_at,omitempty"`
// The name of the application.
Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
// A description for the application.
Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"`
// Key-value attributes for this application. Typically used for organizing applications or for storing integration-specific data.
Attributes map[string]string `protobuf:"bytes,6,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Contact information for this application. Typically used to indicate who to contact with technical/security questions about the application.
// This field is deprecated. Use administrative_contact and technical_contact instead.
ContactInfo []*ContactInfo `protobuf:"bytes,7,rep,name=contact_info,json=contactInfo,proto3" json:"contact_info,omitempty"` // Deprecated: Do not use.
AdministrativeContact *OrganizationOrUserIdentifiers `protobuf:"bytes,10,opt,name=administrative_contact,json=administrativeContact,proto3" json:"administrative_contact,omitempty"`
TechnicalContact *OrganizationOrUserIdentifiers `protobuf:"bytes,11,opt,name=technical_contact,json=technicalContact,proto3" json:"technical_contact,omitempty"`
DevEuiCounter uint32 `protobuf:"varint,9,opt,name=dev_eui_counter,json=devEuiCounter,proto3" json:"dev_eui_counter,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Application) Reset() { *m = Application{} }
func (m *Application) String() string { return proto.CompactTextString(m) }
func (*Application) ProtoMessage() {}
func (*Application) Descriptor() ([]byte, []int) {
return fileDescriptor_57d90136b1f4f7b1, []int{0}
}
func (m *Application) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Application.Unmarshal(m, b)
}
func (m *Application) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Application.Marshal(b, m, deterministic)
}
func (m *Application) XXX_Merge(src proto.Message) {
xxx_messageInfo_Application.Merge(m, src)
}
func (m *Application) XXX_Size() int {
return xxx_messageInfo_Application.Size(m)
}
func (m *Application) XXX_DiscardUnknown() {
xxx_messageInfo_Application.DiscardUnknown(m)
}
var xxx_messageInfo_Application proto.InternalMessageInfo
func (m *Application) GetIds() *ApplicationIdentifiers {
if m != nil {
return m.Ids
}
return nil
}
func (m *Application) GetCreatedAt() *types.Timestamp {
if m != nil {
return m.CreatedAt
}
return nil
}
func (m *Application) GetUpdatedAt() *types.Timestamp {
if m != nil {
return m.UpdatedAt
}
return nil
}
func (m *Application) GetDeletedAt() *types.Timestamp {
if m != nil {
return m.DeletedAt
}
return nil
}
func (m *Application) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Application) GetDescription() string {
if m != nil {
return m.Description
}
return ""
}
func (m *Application) GetAttributes() map[string]string {
if m != nil {
return m.Attributes
}
return nil
}
// Deprecated: Do not use.
func (m *Application) GetContactInfo() []*ContactInfo {
if m != nil {
return m.ContactInfo
}
return nil
}
func (m *Application) GetAdministrativeContact() *OrganizationOrUserIdentifiers {
if m != nil {
return m.AdministrativeContact
}
return nil
}
func (m *Application) GetTechnicalContact() *OrganizationOrUserIdentifiers {
if m != nil {
return m.TechnicalContact
}
return nil
}
func (m *Application) GetDevEuiCounter() uint32 {
if m != nil {
return m.DevEuiCounter
}
return 0
}
type Applications struct {
Applications []*Application `protobuf:"bytes,1,rep,name=applications,proto3" json:"applications,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Applications) Reset() { *m = Applications{} }
func (m *Applications) String() string { return proto.CompactTextString(m) }
func (*Applications) ProtoMessage() {}
func (*Applications) Descriptor() ([]byte, []int) {
return fileDescriptor_57d90136b1f4f7b1, []int{1}
}
func (m *Applications) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Applications.Unmarshal(m, b)
}
func (m *Applications) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Applications.Marshal(b, m, deterministic)
}
func (m *Applications) XXX_Merge(src proto.Message) {
xxx_messageInfo_Applications.Merge(m, src)
}
func (m *Applications) XXX_Size() int {
return xxx_messageInfo_Applications.Size(m)
}
func (m *Applications) XXX_DiscardUnknown() {
xxx_messageInfo_Applications.DiscardUnknown(m)
}
var xxx_messageInfo_Applications proto.InternalMessageInfo
func (m *Applications) GetApplications() []*Application {
if m != nil {
return m.Applications
}
return nil
}
type IssueDevEUIResponse struct {
DevEui go_thethings_network_lorawan_stack_v3_pkg_types.EUI64 `protobuf:"bytes,1,opt,name=dev_eui,json=devEui,proto3,customtype=go.thethings.network/lorawan-stack/v3/pkg/types.EUI64" json:"dev_eui"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *IssueDevEUIResponse) Reset() { *m = IssueDevEUIResponse{} }
func (m *IssueDevEUIResponse) String() string { return proto.CompactTextString(m) }
func (*IssueDevEUIResponse) ProtoMessage() {}
func (*IssueDevEUIResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_57d90136b1f4f7b1, []int{2}
}
func (m *IssueDevEUIResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_IssueDevEUIResponse.Unmarshal(m, b)
}
func (m *IssueDevEUIResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_IssueDevEUIResponse.Marshal(b, m, deterministic)
}
func (m *IssueDevEUIResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_IssueDevEUIResponse.Merge(m, src)
}
func (m *IssueDevEUIResponse) XXX_Size() int {
return xxx_messageInfo_IssueDevEUIResponse.Size(m)
}
func (m *IssueDevEUIResponse) XXX_DiscardUnknown() {
xxx_messageInfo_IssueDevEUIResponse.DiscardUnknown(m)
}
var xxx_messageInfo_IssueDevEUIResponse proto.InternalMessageInfo
type GetApplicationRequest struct {
ApplicationIds *ApplicationIdentifiers `protobuf:"bytes,1,opt,name=application_ids,json=applicationIds,proto3" json:"application_ids,omitempty"`
// The names of the application fields that should be returned.
FieldMask *types.FieldMask `protobuf:"bytes,2,opt,name=field_mask,json=fieldMask,proto3" json:"field_mask,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetApplicationRequest) Reset() { *m = GetApplicationRequest{} }
func (m *GetApplicationRequest) String() string { return proto.CompactTextString(m) }
func (*GetApplicationRequest) ProtoMessage() {}
func (*GetApplicationRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_57d90136b1f4f7b1, []int{3}
}
func (m *GetApplicationRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetApplicationRequest.Unmarshal(m, b)
}
func (m *GetApplicationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GetApplicationRequest.Marshal(b, m, deterministic)
}
func (m *GetApplicationRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetApplicationRequest.Merge(m, src)
}
func (m *GetApplicationRequest) XXX_Size() int {
return xxx_messageInfo_GetApplicationRequest.Size(m)
}
func (m *GetApplicationRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetApplicationRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetApplicationRequest proto.InternalMessageInfo
func (m *GetApplicationRequest) GetApplicationIds() *ApplicationIdentifiers {
if m != nil {
return m.ApplicationIds
}
return nil
}
func (m *GetApplicationRequest) GetFieldMask() *types.FieldMask {
if m != nil {
return m.FieldMask
}
return nil
}
type ListApplicationsRequest struct {
// By default we list all applications the caller has rights on.
// Set the user or the organization (not both) to instead list the applications
// where the user or organization is collaborator on.
Collaborator *OrganizationOrUserIdentifiers `protobuf:"bytes,1,opt,name=collaborator,proto3" json:"collaborator,omitempty"`
// The names of the application fields that should be returned.
FieldMask *types.FieldMask `protobuf:"bytes,2,opt,name=field_mask,json=fieldMask,proto3" json:"field_mask,omitempty"`
// Order the results by this field path (must be present in the field mask).
// Default ordering is by ID. Prepend with a minus (-) to reverse the order.
Order string `protobuf:"bytes,3,opt,name=order,proto3" json:"order,omitempty"`
// Limit the number of results per page.
Limit uint32 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"`
// Page number for pagination. 0 is interpreted as 1.
Page uint32 `protobuf:"varint,5,opt,name=page,proto3" json:"page,omitempty"`
// Only return recently deleted applications.
Deleted bool `protobuf:"varint,6,opt,name=deleted,proto3" json:"deleted,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListApplicationsRequest) Reset() { *m = ListApplicationsRequest{} }
func (m *ListApplicationsRequest) String() string { return proto.CompactTextString(m) }
func (*ListApplicationsRequest) ProtoMessage() {}
func (*ListApplicationsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_57d90136b1f4f7b1, []int{4}
}
func (m *ListApplicationsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListApplicationsRequest.Unmarshal(m, b)
}
func (m *ListApplicationsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListApplicationsRequest.Marshal(b, m, deterministic)
}
func (m *ListApplicationsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListApplicationsRequest.Merge(m, src)
}
func (m *ListApplicationsRequest) XXX_Size() int {
return xxx_messageInfo_ListApplicationsRequest.Size(m)
}
func (m *ListApplicationsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ListApplicationsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ListApplicationsRequest proto.InternalMessageInfo
func (m *ListApplicationsRequest) GetCollaborator() *OrganizationOrUserIdentifiers {
if m != nil {
return m.Collaborator
}
return nil
}
func (m *ListApplicationsRequest) GetFieldMask() *types.FieldMask {
if m != nil {
return m.FieldMask
}
return nil
}
func (m *ListApplicationsRequest) GetOrder() string {
if m != nil {
return m.Order
}
return ""
}
func (m *ListApplicationsRequest) GetLimit() uint32 {
if m != nil {
return m.Limit
}
return 0
}
func (m *ListApplicationsRequest) GetPage() uint32 {
if m != nil {
return m.Page
}
return 0
}
func (m *ListApplicationsRequest) GetDeleted() bool {
if m != nil {
return m.Deleted
}
return false
}
type CreateApplicationRequest struct {
Application *Application `protobuf:"bytes,1,opt,name=application,proto3" json:"application,omitempty"`
// Collaborator to grant all rights on the newly created application.
Collaborator *OrganizationOrUserIdentifiers `protobuf:"bytes,2,opt,name=collaborator,proto3" json:"collaborator,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CreateApplicationRequest) Reset() { *m = CreateApplicationRequest{} }
func (m *CreateApplicationRequest) String() string { return proto.CompactTextString(m) }
func (*CreateApplicationRequest) ProtoMessage() {}
func (*CreateApplicationRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_57d90136b1f4f7b1, []int{5}
}
func (m *CreateApplicationRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateApplicationRequest.Unmarshal(m, b)
}
func (m *CreateApplicationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CreateApplicationRequest.Marshal(b, m, deterministic)
}
func (m *CreateApplicationRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateApplicationRequest.Merge(m, src)
}
func (m *CreateApplicationRequest) XXX_Size() int {
return xxx_messageInfo_CreateApplicationRequest.Size(m)
}
func (m *CreateApplicationRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CreateApplicationRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CreateApplicationRequest proto.InternalMessageInfo
func (m *CreateApplicationRequest) GetApplication() *Application {
if m != nil {
return m.Application
}
return nil
}
func (m *CreateApplicationRequest) GetCollaborator() *OrganizationOrUserIdentifiers {
if m != nil {
return m.Collaborator
}
return nil
}
type UpdateApplicationRequest struct {
Application *Application `protobuf:"bytes,1,opt,name=application,proto3" json:"application,omitempty"`
// The names of the application fields that should be updated.
FieldMask *types.FieldMask `protobuf:"bytes,2,opt,name=field_mask,json=fieldMask,proto3" json:"field_mask,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UpdateApplicationRequest) Reset() { *m = UpdateApplicationRequest{} }
func (m *UpdateApplicationRequest) String() string { return proto.CompactTextString(m) }
func (*UpdateApplicationRequest) ProtoMessage() {}
func (*UpdateApplicationRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_57d90136b1f4f7b1, []int{6}
}
func (m *UpdateApplicationRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UpdateApplicationRequest.Unmarshal(m, b)
}
func (m *UpdateApplicationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_UpdateApplicationRequest.Marshal(b, m, deterministic)
}
func (m *UpdateApplicationRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_UpdateApplicationRequest.Merge(m, src)
}
func (m *UpdateApplicationRequest) XXX_Size() int {
return xxx_messageInfo_UpdateApplicationRequest.Size(m)
}
func (m *UpdateApplicationRequest) XXX_DiscardUnknown() {
xxx_messageInfo_UpdateApplicationRequest.DiscardUnknown(m)
}
var xxx_messageInfo_UpdateApplicationRequest proto.InternalMessageInfo
func (m *UpdateApplicationRequest) GetApplication() *Application {
if m != nil {
return m.Application
}
return nil
}
func (m *UpdateApplicationRequest) GetFieldMask() *types.FieldMask {
if m != nil {
return m.FieldMask
}
return nil
}
type ListApplicationAPIKeysRequest struct {
ApplicationIds *ApplicationIdentifiers `protobuf:"bytes,1,opt,name=application_ids,json=applicationIds,proto3" json:"application_ids,omitempty"`
// Limit the number of results per page.
Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"`
// Page number for pagination. 0 is interpreted as 1.
Page uint32 `protobuf:"varint,3,opt,name=page,proto3" json:"page,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListApplicationAPIKeysRequest) Reset() { *m = ListApplicationAPIKeysRequest{} }
func (m *ListApplicationAPIKeysRequest) String() string { return proto.CompactTextString(m) }
func (*ListApplicationAPIKeysRequest) ProtoMessage() {}
func (*ListApplicationAPIKeysRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_57d90136b1f4f7b1, []int{7}
}
func (m *ListApplicationAPIKeysRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListApplicationAPIKeysRequest.Unmarshal(m, b)
}
func (m *ListApplicationAPIKeysRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListApplicationAPIKeysRequest.Marshal(b, m, deterministic)
}
func (m *ListApplicationAPIKeysRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListApplicationAPIKeysRequest.Merge(m, src)
}
func (m *ListApplicationAPIKeysRequest) XXX_Size() int {
return xxx_messageInfo_ListApplicationAPIKeysRequest.Size(m)
}
func (m *ListApplicationAPIKeysRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ListApplicationAPIKeysRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ListApplicationAPIKeysRequest proto.InternalMessageInfo
func (m *ListApplicationAPIKeysRequest) GetApplicationIds() *ApplicationIdentifiers {
if m != nil {
return m.ApplicationIds
}
return nil
}
func (m *ListApplicationAPIKeysRequest) GetLimit() uint32 {
if m != nil {
return m.Limit
}
return 0
}
func (m *ListApplicationAPIKeysRequest) GetPage() uint32 {
if m != nil {
return m.Page
}
return 0
}
type GetApplicationAPIKeyRequest struct {
ApplicationIds *ApplicationIdentifiers `protobuf:"bytes,1,opt,name=application_ids,json=applicationIds,proto3" json:"application_ids,omitempty"`
// Unique public identifier for the API key.
KeyId string `protobuf:"bytes,2,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetApplicationAPIKeyRequest) Reset() { *m = GetApplicationAPIKeyRequest{} }
func (m *GetApplicationAPIKeyRequest) String() string { return proto.CompactTextString(m) }
func (*GetApplicationAPIKeyRequest) ProtoMessage() {}
func (*GetApplicationAPIKeyRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_57d90136b1f4f7b1, []int{8}
}
func (m *GetApplicationAPIKeyRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetApplicationAPIKeyRequest.Unmarshal(m, b)
}
func (m *GetApplicationAPIKeyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GetApplicationAPIKeyRequest.Marshal(b, m, deterministic)
}
func (m *GetApplicationAPIKeyRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetApplicationAPIKeyRequest.Merge(m, src)
}
func (m *GetApplicationAPIKeyRequest) XXX_Size() int {
return xxx_messageInfo_GetApplicationAPIKeyRequest.Size(m)
}
func (m *GetApplicationAPIKeyRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetApplicationAPIKeyRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetApplicationAPIKeyRequest proto.InternalMessageInfo
func (m *GetApplicationAPIKeyRequest) GetApplicationIds() *ApplicationIdentifiers {
if m != nil {
return m.ApplicationIds
}
return nil
}
func (m *GetApplicationAPIKeyRequest) GetKeyId() string {
if m != nil {
return m.KeyId
}
return ""
}
type CreateApplicationAPIKeyRequest struct {
ApplicationIds *ApplicationIdentifiers `protobuf:"bytes,1,opt,name=application_ids,json=applicationIds,proto3" json:"application_ids,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
Rights []Right `protobuf:"varint,3,rep,packed,name=rights,proto3,enum=ttn.lorawan.v3.Right" json:"rights,omitempty"`
ExpiresAt *types.Timestamp `protobuf:"bytes,4,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CreateApplicationAPIKeyRequest) Reset() { *m = CreateApplicationAPIKeyRequest{} }
func (m *CreateApplicationAPIKeyRequest) String() string { return proto.CompactTextString(m) }
func (*CreateApplicationAPIKeyRequest) ProtoMessage() {}
func (*CreateApplicationAPIKeyRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_57d90136b1f4f7b1, []int{9}
}
func (m *CreateApplicationAPIKeyRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateApplicationAPIKeyRequest.Unmarshal(m, b)
}
func (m *CreateApplicationAPIKeyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CreateApplicationAPIKeyRequest.Marshal(b, m, deterministic)
}
func (m *CreateApplicationAPIKeyRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateApplicationAPIKeyRequest.Merge(m, src)
}
func (m *CreateApplicationAPIKeyRequest) XXX_Size() int {
return xxx_messageInfo_CreateApplicationAPIKeyRequest.Size(m)
}
func (m *CreateApplicationAPIKeyRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CreateApplicationAPIKeyRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CreateApplicationAPIKeyRequest proto.InternalMessageInfo
func (m *CreateApplicationAPIKeyRequest) GetApplicationIds() *ApplicationIdentifiers {
if m != nil {
return m.ApplicationIds
}
return nil
}
func (m *CreateApplicationAPIKeyRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *CreateApplicationAPIKeyRequest) GetRights() []Right {
if m != nil {
return m.Rights
}
return nil
}
func (m *CreateApplicationAPIKeyRequest) GetExpiresAt() *types.Timestamp {
if m != nil {
return m.ExpiresAt
}
return nil
}
type UpdateApplicationAPIKeyRequest struct {
ApplicationIds *ApplicationIdentifiers `protobuf:"bytes,1,opt,name=application_ids,json=applicationIds,proto3" json:"application_ids,omitempty"`
ApiKey *APIKey `protobuf:"bytes,2,opt,name=api_key,json=apiKey,proto3" json:"api_key,omitempty"`
// The names of the api key fields that should be updated.
FieldMask *types.FieldMask `protobuf:"bytes,3,opt,name=field_mask,json=fieldMask,proto3" json:"field_mask,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UpdateApplicationAPIKeyRequest) Reset() { *m = UpdateApplicationAPIKeyRequest{} }
func (m *UpdateApplicationAPIKeyRequest) String() string { return proto.CompactTextString(m) }
func (*UpdateApplicationAPIKeyRequest) ProtoMessage() {}
func (*UpdateApplicationAPIKeyRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_57d90136b1f4f7b1, []int{10}
}
func (m *UpdateApplicationAPIKeyRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UpdateApplicationAPIKeyRequest.Unmarshal(m, b)
}
func (m *UpdateApplicationAPIKeyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_UpdateApplicationAPIKeyRequest.Marshal(b, m, deterministic)
}
func (m *UpdateApplicationAPIKeyRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_UpdateApplicationAPIKeyRequest.Merge(m, src)
}
func (m *UpdateApplicationAPIKeyRequest) XXX_Size() int {
return xxx_messageInfo_UpdateApplicationAPIKeyRequest.Size(m)
}
func (m *UpdateApplicationAPIKeyRequest) XXX_DiscardUnknown() {
xxx_messageInfo_UpdateApplicationAPIKeyRequest.DiscardUnknown(m)
}
var xxx_messageInfo_UpdateApplicationAPIKeyRequest proto.InternalMessageInfo
func (m *UpdateApplicationAPIKeyRequest) GetApplicationIds() *ApplicationIdentifiers {
if m != nil {
return m.ApplicationIds
}
return nil
}
func (m *UpdateApplicationAPIKeyRequest) GetApiKey() *APIKey {
if m != nil {
return m.ApiKey
}
return nil
}
func (m *UpdateApplicationAPIKeyRequest) GetFieldMask() *types.FieldMask {
if m != nil {
return m.FieldMask
}
return nil
}
type ListApplicationCollaboratorsRequest struct {
ApplicationIds *ApplicationIdentifiers `protobuf:"bytes,1,opt,name=application_ids,json=applicationIds,proto3" json:"application_ids,omitempty"`
// Limit the number of results per page.
Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"`
// Page number for pagination. 0 is interpreted as 1.
Page uint32 `protobuf:"varint,3,opt,name=page,proto3" json:"page,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListApplicationCollaboratorsRequest) Reset() { *m = ListApplicationCollaboratorsRequest{} }
func (m *ListApplicationCollaboratorsRequest) String() string { return proto.CompactTextString(m) }
func (*ListApplicationCollaboratorsRequest) ProtoMessage() {}
func (*ListApplicationCollaboratorsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_57d90136b1f4f7b1, []int{11}
}
func (m *ListApplicationCollaboratorsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListApplicationCollaboratorsRequest.Unmarshal(m, b)
}
func (m *ListApplicationCollaboratorsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListApplicationCollaboratorsRequest.Marshal(b, m, deterministic)
}
func (m *ListApplicationCollaboratorsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListApplicationCollaboratorsRequest.Merge(m, src)
}
func (m *ListApplicationCollaboratorsRequest) XXX_Size() int {
return xxx_messageInfo_ListApplicationCollaboratorsRequest.Size(m)
}
func (m *ListApplicationCollaboratorsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ListApplicationCollaboratorsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ListApplicationCollaboratorsRequest proto.InternalMessageInfo
func (m *ListApplicationCollaboratorsRequest) GetApplicationIds() *ApplicationIdentifiers {
if m != nil {
return m.ApplicationIds
}
return nil
}
func (m *ListApplicationCollaboratorsRequest) GetLimit() uint32 {
if m != nil {
return m.Limit
}
return 0
}
func (m *ListApplicationCollaboratorsRequest) GetPage() uint32 {
if m != nil {
return m.Page
}
return 0
}
type GetApplicationCollaboratorRequest struct {
ApplicationIds *ApplicationIdentifiers `protobuf:"bytes,1,opt,name=application_ids,json=applicationIds,proto3" json:"application_ids,omitempty"`
Collaborator *OrganizationOrUserIdentifiers `protobuf:"bytes,2,opt,name=collaborator,proto3" json:"collaborator,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetApplicationCollaboratorRequest) Reset() { *m = GetApplicationCollaboratorRequest{} }
func (m *GetApplicationCollaboratorRequest) String() string { return proto.CompactTextString(m) }
func (*GetApplicationCollaboratorRequest) ProtoMessage() {}
func (*GetApplicationCollaboratorRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_57d90136b1f4f7b1, []int{12}
}
func (m *GetApplicationCollaboratorRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetApplicationCollaboratorRequest.Unmarshal(m, b)
}
func (m *GetApplicationCollaboratorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GetApplicationCollaboratorRequest.Marshal(b, m, deterministic)
}
func (m *GetApplicationCollaboratorRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetApplicationCollaboratorRequest.Merge(m, src)
}
func (m *GetApplicationCollaboratorRequest) XXX_Size() int {
return xxx_messageInfo_GetApplicationCollaboratorRequest.Size(m)
}
func (m *GetApplicationCollaboratorRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetApplicationCollaboratorRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetApplicationCollaboratorRequest proto.InternalMessageInfo
func (m *GetApplicationCollaboratorRequest) GetApplicationIds() *ApplicationIdentifiers {
if m != nil {
return m.ApplicationIds
}
return nil
}
func (m *GetApplicationCollaboratorRequest) GetCollaborator() *OrganizationOrUserIdentifiers {
if m != nil {
return m.Collaborator
}
return nil
}
type SetApplicationCollaboratorRequest struct {
ApplicationIds *ApplicationIdentifiers `protobuf:"bytes,1,opt,name=application_ids,json=applicationIds,proto3" json:"application_ids,omitempty"`
Collaborator *Collaborator `protobuf:"bytes,2,opt,name=collaborator,proto3" json:"collaborator,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SetApplicationCollaboratorRequest) Reset() { *m = SetApplicationCollaboratorRequest{} }
func (m *SetApplicationCollaboratorRequest) String() string { return proto.CompactTextString(m) }
func (*SetApplicationCollaboratorRequest) ProtoMessage() {}
func (*SetApplicationCollaboratorRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_57d90136b1f4f7b1, []int{13}
}
func (m *SetApplicationCollaboratorRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SetApplicationCollaboratorRequest.Unmarshal(m, b)
}
func (m *SetApplicationCollaboratorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SetApplicationCollaboratorRequest.Marshal(b, m, deterministic)
}
func (m *SetApplicationCollaboratorRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_SetApplicationCollaboratorRequest.Merge(m, src)
}
func (m *SetApplicationCollaboratorRequest) XXX_Size() int {
return xxx_messageInfo_SetApplicationCollaboratorRequest.Size(m)
}
func (m *SetApplicationCollaboratorRequest) XXX_DiscardUnknown() {
xxx_messageInfo_SetApplicationCollaboratorRequest.DiscardUnknown(m)
}
var xxx_messageInfo_SetApplicationCollaboratorRequest proto.InternalMessageInfo
func (m *SetApplicationCollaboratorRequest) GetApplicationIds() *ApplicationIdentifiers {
if m != nil {
return m.ApplicationIds
}
return nil
}
func (m *SetApplicationCollaboratorRequest) GetCollaborator() *Collaborator {
if m != nil {
return m.Collaborator
}
return nil
}
func init() {
proto.RegisterType((*Application)(nil), "ttn.lorawan.v3.Application")
golang_proto.RegisterType((*Application)(nil), "ttn.lorawan.v3.Application")
proto.RegisterMapType((map[string]string)(nil), "ttn.lorawan.v3.Application.AttributesEntry")
golang_proto.RegisterMapType((map[string]string)(nil), "ttn.lorawan.v3.Application.AttributesEntry")
proto.RegisterType((*Applications)(nil), "ttn.lorawan.v3.Applications")
golang_proto.RegisterType((*Applications)(nil), "ttn.lorawan.v3.Applications")
proto.RegisterType((*IssueDevEUIResponse)(nil), "ttn.lorawan.v3.IssueDevEUIResponse")
golang_proto.RegisterType((*IssueDevEUIResponse)(nil), "ttn.lorawan.v3.IssueDevEUIResponse")
proto.RegisterType((*GetApplicationRequest)(nil), "ttn.lorawan.v3.GetApplicationRequest")
golang_proto.RegisterType((*GetApplicationRequest)(nil), "ttn.lorawan.v3.GetApplicationRequest")
proto.RegisterType((*ListApplicationsRequest)(nil), "ttn.lorawan.v3.ListApplicationsRequest")
golang_proto.RegisterType((*ListApplicationsRequest)(nil), "ttn.lorawan.v3.ListApplicationsRequest")
proto.RegisterType((*CreateApplicationRequest)(nil), "ttn.lorawan.v3.CreateApplicationRequest")
golang_proto.RegisterType((*CreateApplicationRequest)(nil), "ttn.lorawan.v3.CreateApplicationRequest")
proto.RegisterType((*UpdateApplicationRequest)(nil), "ttn.lorawan.v3.UpdateApplicationRequest")
golang_proto.RegisterType((*UpdateApplicationRequest)(nil), "ttn.lorawan.v3.UpdateApplicationRequest")
proto.RegisterType((*ListApplicationAPIKeysRequest)(nil), "ttn.lorawan.v3.ListApplicationAPIKeysRequest")
golang_proto.RegisterType((*ListApplicationAPIKeysRequest)(nil), "ttn.lorawan.v3.ListApplicationAPIKeysRequest")
proto.RegisterType((*GetApplicationAPIKeyRequest)(nil), "ttn.lorawan.v3.GetApplicationAPIKeyRequest")
golang_proto.RegisterType((*GetApplicationAPIKeyRequest)(nil), "ttn.lorawan.v3.GetApplicationAPIKeyRequest")
proto.RegisterType((*CreateApplicationAPIKeyRequest)(nil), "ttn.lorawan.v3.CreateApplicationAPIKeyRequest")
golang_proto.RegisterType((*CreateApplicationAPIKeyRequest)(nil), "ttn.lorawan.v3.CreateApplicationAPIKeyRequest")
proto.RegisterType((*UpdateApplicationAPIKeyRequest)(nil), "ttn.lorawan.v3.UpdateApplicationAPIKeyRequest")
golang_proto.RegisterType((*UpdateApplicationAPIKeyRequest)(nil), "ttn.lorawan.v3.UpdateApplicationAPIKeyRequest")
proto.RegisterType((*ListApplicationCollaboratorsRequest)(nil), "ttn.lorawan.v3.ListApplicationCollaboratorsRequest")
golang_proto.RegisterType((*ListApplicationCollaboratorsRequest)(nil), "ttn.lorawan.v3.ListApplicationCollaboratorsRequest")
proto.RegisterType((*GetApplicationCollaboratorRequest)(nil), "ttn.lorawan.v3.GetApplicationCollaboratorRequest")
golang_proto.RegisterType((*GetApplicationCollaboratorRequest)(nil), "ttn.lorawan.v3.GetApplicationCollaboratorRequest")
proto.RegisterType((*SetApplicationCollaboratorRequest)(nil), "ttn.lorawan.v3.SetApplicationCollaboratorRequest")
golang_proto.RegisterType((*SetApplicationCollaboratorRequest)(nil), "ttn.lorawan.v3.SetApplicationCollaboratorRequest")
}
func init() {
proto.RegisterFile("lorawan-stack/api/application.proto", fileDescriptor_57d90136b1f4f7b1)
}
func init() {
golang_proto.RegisterFile("lorawan-stack/api/application.proto", fileDescriptor_57d90136b1f4f7b1)
}
var fileDescriptor_57d90136b1f4f7b1 = []byte{
// 1172 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x56, 0xcf, 0x6f, 0x1b, 0x45,
0x14, 0xee, 0xd8, 0xb1, 0x13, 0x3f, 0xe7, 0x57, 0x17, 0x52, 0x56, 0x29, 0xb8, 0xee, 0x36, 0xaa,
0x4c, 0xc1, 0x6b, 0xe4, 0x12, 0x44, 0x2a, 0x55, 0xa9, 0x37, 0x84, 0xca, 0x0d, 0x10, 0x18, 0x08,
0x12, 0x8d, 0x8a, 0x35, 0xde, 0x1d, 0x6f, 0x46, 0xb6, 0x77, 0x97, 0xd9, 0xb1, 0x5b, 0x17, 0x71,
0xe9, 0x91, 0x0b, 0x52, 0x8f, 0x1c, 0xe0, 0x2f, 0x80, 0x03, 0xe2, 0x0f, 0xe8, 0x05, 0xa9, 0xe2,
0xc4, 0x99, 0x4a, 0xbd, 0x70, 0xe1, 0xca, 0xd5, 0x27, 0xb4, 0xb3, 0xeb, 0x78, 0x6d, 0xa7, 0x89,
0xd2, 0xa8, 0x81, 0x93, 0x67, 0x76, 0xbe, 0xf7, 0xde, 0xf7, 0xde, 0xbc, 0xf7, 0x79, 0xe0, 0x52,
0xcb, 0xe5, 0xe4, 0x2e, 0x71, 0x8a, 0xbe, 0x20, 0x66, 0xb3, 0x44, 0x3c, 0x56, 0x22, 0x9e, 0xd7,
0x62, 0x26, 0x11, 0xcc, 0x75, 0x74, 0x8f, 0xbb, 0xc2, 0x55, 0xe6, 0x85, 0x70, 0xf4, 0x08, 0xa8,
0x77, 0xaf, 0x2e, 0x57, 0x6c, 0x26, 0xf6, 0x3a, 0x75, 0xdd, 0x74, 0xdb, 0x25, 0xea, 0x74, 0xdd,
0x9e, 0xc7, 0xdd, 0x7b, 0xbd, 0x92, 0x04, 0x9b, 0x45, 0x9b, 0x3a, 0xc5, 0x2e, 0x69, 0x31, 0x8b,
0x08, 0x5a, 0x9a, 0x58, 0x84, 0x2e, 0x97, 0x8b, 0x31, 0x17, 0xb6, 0x6b, 0xbb, 0xa1, 0x71, 0xbd,
0xd3, 0x90, 0x3b, 0xb9, 0x91, 0xab, 0x08, 0x9e, 0xb7, 0x5d, 0xd7, 0x6e, 0xd1, 0x21, 0xaa, 0xc1,
0x68, 0xcb, 0xaa, 0xb5, 0x89, 0xdf, 0x8c, 0x10, 0x17, 0xc6, 0x11, 0x82, 0xb5, 0xa9, 0x2f, 0x48,
0xdb, 0x8b, 0x00, 0x2b, 0x93, 0x99, 0x9a, 0xae, 0x23, 0x88, 0x29, 0x6a, 0xcc, 0x69, 0x0c, 0x02,
0x1d, 0x50, 0x0f, 0x66, 0x51, 0x47, 0xb0, 0x06, 0xa3, 0xdc, 0x8f, 0x40, 0xb9, 0x49, 0x10, 0x67,
0xf6, 0x9e, 0x88, 0xce, 0xb5, 0xdf, 0xd2, 0x90, 0xad, 0x0c, 0xab, 0xa8, 0x18, 0x90, 0x64, 0x96,
0xaf, 0xa2, 0x3c, 0x2a, 0x64, 0xcb, 0x97, 0xf5, 0xd1, 0x6a, 0xea, 0x31, 0x64, 0x75, 0x18, 0xca,
0x98, 0xe9, 0x1b, 0xa9, 0x6f, 0x51, 0x62, 0x11, 0xe1, 0xc0, 0x58, 0x59, 0x03, 0x30, 0x39, 0x25,
0x82, 0x5a, 0x35, 0x22, 0xd4, 0x84, 0x74, 0xb5, 0xac, 0x87, 0x49, 0xeb, 0x83, 0xa4, 0xf5, 0xcf,
0x06, 0x49, 0xe3, 0x4c, 0x84, 0xae, 0x88, 0xc0, 0xb4, 0xe3, 0x59, 0x03, 0xd3, 0xe4, 0xd1, 0xa6,
0x11, 0x3a, 0x34, 0xb5, 0x68, 0x8b, 0x46, 0xa6, 0x33, 0x47, 0x9b, 0x46, 0xe8, 0x8a, 0x50, 0xce,
0xc3, 0x94, 0x43, 0xda, 0x54, 0x9d, 0xca, 0xa3, 0x42, 0xc6, 0x98, 0xee, 0x1b, 0x53, 0x3c, 0xa1,
0x96, 0xb1, 0xfc, 0xa8, 0x5c, 0x81, 0xac, 0x45, 0x7d, 0x93, 0x33, 0x2f, 0x48, 0x5b, 0x4d, 0x49,
0x4c, 0x90, 0x31, 0x4f, 0xaa, 0x7f, 0x2c, 0xe0, 0xf8, 0xa1, 0xf2, 0x00, 0x01, 0x10, 0x21, 0x38,
0xab, 0x77, 0x04, 0xf5, 0xd5, 0x74, 0x3e, 0x59, 0xc8, 0x96, 0xdf, 0x38, 0xa4, 0x8a, 0x7a, 0x65,
0x1f, 0xbd, 0xe9, 0x08, 0xde, 0x33, 0x56, 0xfb, 0x46, 0xf9, 0x7b, 0x54, 0x5a, 0x04, 0x6d, 0x85,
0x6b, 0xea, 0x4a, 0x39, 0xf7, 0xe5, 0x2e, 0x29, 0xde, 0x7f, 0xab, 0xb8, 0x76, 0xa7, 0xb0, 0x7e,
0x6d, 0xb7, 0x78, 0x67, 0x7d, 0xb0, 0x7d, 0xfd, 0xeb, 0xf2, 0x9b, 0xdf, 0xac, 0x5c, 0x09, 0x58,
0x3c, 0x46, 0x38, 0x16, 0x55, 0xb9, 0x05, 0xb3, 0xf1, 0x6e, 0x51, 0xa7, 0x25, 0x8b, 0xf3, 0xe3,
0x2c, 0x36, 0x42, 0x4c, 0xd5, 0x69, 0xb8, 0x06, 0xf4, 0x8d, 0xd4, 0x43, 0x94, 0x58, 0x04, 0x15,
0xe1, 0xac, 0x39, 0x3c, 0x50, 0x2c, 0x38, 0x47, 0xac, 0x36, 0x73, 0x98, 0x2f, 0x38, 0x11, 0xac,
0x4b, 0x6b, 0xd1, 0xa9, 0x0a, 0xb2, 0xc0, 0xc5, 0x71, 0xaf, 0xdb, 0xdc, 0x26, 0x0e, 0xbb, 0x2f,
0x93, 0xdb, 0xe6, 0x3b, 0x3e, 0xe5, 0xb1, 0x46, 0xc1, 0x4b, 0xa3, 0xce, 0x22, 0x0a, 0xca, 0x6d,
0x38, 0x2b, 0xa8, 0xb9, 0xe7, 0x30, 0x93, 0xb4, 0xf6, 0x03, 0x64, 0x9f, 0x27, 0xc0, 0xe2, 0xbe,
0x9f, 0x81, 0xef, 0xcb, 0xb0, 0x60, 0xd1, 0x6e, 0x8d, 0x76, 0x58, 0xcd, 0x74, 0x3b, 0x8e, 0xa0,
0x5c, 0xcd, 0xe4, 0x51, 0x61, 0x0e, 0xcf, 0x59, 0xb4, 0xbb, 0xd9, 0x61, 0x1b, 0xe1, 0xc7, 0xe5,
0xeb, 0xb0, 0x30, 0x76, 0x17, 0xca, 0x22, 0x24, 0x9b, 0xb4, 0x27, 0x67, 0x21, 0x83, 0x83, 0xa5,
0xf2, 0x32, 0xa4, 0xba, 0xa4, 0xd5, 0xa1, 0xb2, 0xa9, 0x33, 0x38, 0xdc, 0x5c, 0x4b, 0xbc, 0x8b,
0xb4, 0x6d, 0x98, 0x8d, 0x5d, 0xab, 0xaf, 0xac, 0xc3, 0x6c, 0x4c, 0x9c, 0x82, 0x81, 0x3a, 0xf0,
0x12, 0x62, 0x36, 0x78, 0xc4, 0x40, 0x6b, 0xc3, 0x4b, 0x55, 0xdf, 0xef, 0xd0, 0xf7, 0x68, 0x77,
0x73, 0xa7, 0x8a, 0xa9, 0xef, 0xb9, 0x8e, 0x4f, 0x95, 0xcf, 0x61, 0x3a, 0x4a, 0x47, 0xf2, 0x9a,
0x35, 0xae, 0x3f, 0x7e, 0x7a, 0xe1, 0xcc, 0x9f, 0x4f, 0x2f, 0xac, 0xda, 0xae, 0x2e, 0xf6, 0xa8,
0xd8, 0x63, 0x8e, 0xed, 0xeb, 0x0e, 0x15, 0x77, 0x5d, 0xde, 0x2c, 0x8d, 0x4e, 0x7f, 0xf7, 0x6a,
0xc9, 0x6b, 0xda, 0x25, 0xd1, 0xf3, 0xa8, 0xaf, 0x6f, 0xee, 0x54, 0xdf, 0x79, 0x1b, 0xa7, 0xc3,
0x2a, 0x68, 0x3f, 0x21, 0x58, 0xba, 0x49, 0x45, 0x9c, 0x0f, 0xfd, 0xaa, 0x43, 0x7d, 0xa1, 0x7c,
0x01, 0x0b, 0x31, 0x62, 0xb5, 0x93, 0xa8, 0xc3, 0x3c, 0x89, 0x23, 0xa4, 0x50, 0x0c, 0xc5, 0xf1,
0x99, 0x42, 0xf1, 0x7e, 0x00, 0xf9, 0x90, 0xf8, 0x4d, 0x9c, 0x69, 0x0c, 0x96, 0xda, 0x93, 0x04,
0xbc, 0xf2, 0x01, 0xf3, 0xe3, 0x84, 0xfd, 0x01, 0xe3, 0x4f, 0x82, 0x01, 0x68, 0xb5, 0x48, 0xdd,
0xe5, 0x44, 0xb8, 0x3c, 0xa2, 0x7b, 0xcc, 0x4e, 0x1a, 0x71, 0x71, 0x02, 0xa6, 0x8a, 0x05, 0x29,
0x97, 0x5b, 0x94, 0x4b, 0x35, 0xcb, 0x18, 0x1f, 0xf5, 0x8d, 0x2d, 0x5e, 0xc5, 0x67, 0x46, 0x8a,
0x51, 0x63, 0x16, 0x5e, 0x28, 0x8e, 0x7d, 0x90, 0xf2, 0x83, 0x53, 0x45, 0xf9, 0x13, 0x53, 0x54,
0x9c, 0x2d, 0xc6, 0x36, 0xa1, 0x73, 0x25, 0x07, 0xa9, 0x16, 0x6b, 0x33, 0x21, 0x35, 0x6c, 0x4e,
0xd6, 0xfc, 0x4a, 0x52, 0xfd, 0x7b, 0x1a, 0x87, 0x9f, 0x15, 0x05, 0xa6, 0x3c, 0x62, 0x53, 0x29,
0x5f, 0x73, 0x58, 0xae, 0x15, 0x35, 0xe8, 0x25, 0xa9, 0x81, 0x6a, 0x3a, 0x8f, 0x0a, 0x33, 0x78,
0xb0, 0xd5, 0x1e, 0x21, 0x50, 0x37, 0x64, 0x8c, 0x03, 0x1a, 0xe2, 0x26, 0x64, 0x63, 0x4c, 0xa3,
0xea, 0x1e, 0xd6, 0xd9, 0xb1, 0x0e, 0x88, 0x5b, 0x2a, 0xbb, 0x63, 0xf7, 0x94, 0x78, 0x8e, 0x7b,
0x8a, 0xf9, 0x1e, 0x71, 0xa6, 0xfd, 0x80, 0x40, 0xdd, 0x91, 0x7f, 0x0e, 0x2f, 0x32, 0x85, 0x13,
0x74, 0xf0, 0xcf, 0x08, 0x5e, 0x1b, 0xeb, 0xe0, 0xca, 0xc7, 0xd5, 0x2d, 0xda, 0xf3, 0x4f, 0x61,
0xf2, 0xf6, 0xdb, 0x25, 0x71, 0x78, 0xbb, 0x24, 0x87, 0xed, 0xa2, 0x7d, 0x87, 0xe0, 0xfc, 0xa8,
0x44, 0x84, 0x7c, 0x4f, 0x81, 0xee, 0x12, 0xa4, 0x9b, 0xb4, 0x57, 0x63, 0xd6, 0x40, 0x78, 0x9b,
0xb4, 0x57, 0xb5, 0xb4, 0x1f, 0x13, 0x90, 0x9b, 0x68, 0xd3, 0x53, 0x23, 0x35, 0x78, 0x35, 0x24,
0x0e, 0x7a, 0x35, 0xdc, 0x80, 0x74, 0xf8, 0xce, 0x52, 0x93, 0xf9, 0x64, 0x61, 0xbe, 0xbc, 0x34,
0x1e, 0x0e, 0x07, 0xa7, 0xc6, 0xd9, 0xbe, 0x31, 0xff, 0x10, 0x65, 0x67, 0x90, 0x8a, 0xb4, 0xd4,
0x03, 0x19, 0x26, 0xb2, 0x53, 0x36, 0x00, 0xe8, 0x3d, 0x8f, 0x71, 0xea, 0x07, 0xef, 0x99, 0xa9,
0xa3, 0xde, 0x33, 0x92, 0xe8, 0x2f, 0x28, 0x71, 0x03, 0xe1, 0x4c, 0x64, 0x57, 0x11, 0xda, 0x3f,
0x08, 0x72, 0x13, 0x53, 0x70, 0x6a, 0x15, 0x5a, 0x83, 0x69, 0xe2, 0xb1, 0x5a, 0xf0, 0x27, 0x1a,
0x8e, 0xc6, 0xb9, 0x09, 0x97, 0x92, 0x4a, 0xcc, 0x45, 0x9a, 0x78, 0x6c, 0x8b, 0xf6, 0xc6, 0x06,
0x2b, 0x79, 0x9c, 0xc1, 0xfa, 0x15, 0xc1, 0xa5, 0xb1, 0xc1, 0xda, 0x88, 0x29, 0xc3, 0xff, 0x75,
0xbc, 0x9e, 0x20, 0xb8, 0x38, 0x3a, 0x5e, 0x71, 0xd6, 0xa7, 0x40, 0xfa, 0x85, 0xca, 0xf1, 0xef,
0x08, 0x2e, 0x7e, 0xfa, 0x5f, 0x66, 0x77, 0xeb, 0xc0, 0xec, 0x5e, 0x9d, 0x7c, 0x15, 0x0f, 0x31,
0xcf, 0x4a, 0xc6, 0x58, 0x7d, 0xf4, 0x57, 0x0e, 0xdd, 0x2e, 0x1d, 0xe3, 0xc5, 0x25, 0x1c, 0xaf,
0x5e, 0x4f, 0xcb, 0xbe, 0xbd, 0xfa, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x98, 0xa7, 0xd2, 0x60,
0xc9, 0x0e, 0x00, 0x00,
}
|
#!/bin/bash
FAMILY=joint
MODEL=ctrl_muniter
MODEL_CONFIG=ctrl_muniter_base
DATA=/science/image/nlp-datasets/emanuele/data
ANNOS=$DATA/conceptual_captions/annotations
WIKIS=$DATA/wikipedia/txt
FEATS=$DATA/conceptual_captions/resnet101_faster_rcnn_genome_imgfeats/volta
OUTPUT_DIR=/science/image/nlp-datasets/emanuele/checkpoints/mc-bert/${FAMILY}/${MODEL}/conceptual_captions_wikipedia
LOGGING_DIR=$HOME/projects/mc-bert/logs/${FAMILY}/${MODEL_CONFIG}/conceptual_captions_wikipedia
source activate /science/image/nlp-datasets/emanuele/envs/mc-bert
cd ../../../../code/volta
python train_concap_wiki.py \
--bert_model bert-base-multilingual-cased --config_file config/${MODEL_CONFIG}.json \
--train_x_batch_size 256 --train_m_batch_size 256 --gradient_accumulation_steps 4 \
--max_x_seq_length 66 --max_m_seq_length 66 --m_pretrained bert-base-multilingual-cased \
--learning_rate 1e-4 --adam_epsilon 1e-6 --adam_betas 0.9 0.999 --weight_decay 0.01 --warmup_proportion 0.1 --clip_grad_norm 5.0 \
--objective 1 \
--annotations_path $ANNOS --features_path $FEATS \
--dataroot $WIKIS --lgs ALL --lg_sampling_factor 0.7 \
--output_dir ${OUTPUT_DIR} \
--logdir ${LOGGING_DIR} \
--num_train_epochs 10 \
--resume_file ${OUTPUT_DIR}/${MODEL_CONFIG}/pytorch_ckpt_latest.tar
conda deactivate
|
<filename>g2o_solver.h<gh_stars>0
/*
* g2o_solver.h
*
* Created on: Jul 29, 2021
* Author: zack
*/
#ifndef G2O_SOLVER_H_
#define G2O_SOLVER_H_
namespace trunk{
} //namespace trunk
#endif /* G2O_SOLVER_H_ */
|
<reponame>tvaisanen/embeddedGraphWidget
/**
* Created by toni on 21.7.2017.
*/
define([
"components/elementStyles",
"configuration/configs"
], function (elementStyles, configs) {
var es = elementStyles;
QUnit.module("Unit Tests - components.elementStyles: ");
QUnit.test("addCategory()", function (assert) {
var newCategoryName = 'newCategory';
var categoryExistsBefore = es.categoryExists(newCategoryName);
es.addCategory({name: newCategoryName});
var categoryExistsAfter = es.categoryExists(newCategoryName);
assert.notOk(categoryExistsBefore, "category do not exist before addition");
assert.ok(categoryExistsAfter, "category after addition");
});
QUnit.test("categoryExists()", function (assert) {
// by default there is category 'generic'
var newCategoryName = "newCategory";
es.addCategory(newCategoryName);
var categoryGenericExists = es.categoryExists('generic');
var categoryNewExists = es.categoryExists(newCategoryName);
assert.ok(categoryGenericExists, "returns existing default category");
assert.ok(categoryNewExists, "returns existing new category");
});
QUnit.test("getDefaultStyle()", function (assert) {
var defaultCategory = es.getDefaultStyle();
assert.equal(defaultCategory, configs.style.generic, "generic style returns correctly");
});
QUnit.test("getStyle()", function (assert) {
var style = es.getStyle('generic');
assert.ok(style, configs.style.generic, "returns style");
});
QUnit.test("getStyleObject()", function (assert) {
var style = es.getStyleObject('generic');
assert.equal(style, configs.style.generic, "returns style");
});
QUnit.test("setStyle()", function (assert) {
var style = {
name: "newStyle",
style: {
lineColor: 'line-color-black',
lineWidth: 'line-width-12',
arrowShape: 'arrow-shape-triangle'
}
};
var expected = Object.values(style.style);
var returnedStyle = es.setStyle(style);
assert.deepEqual(returnedStyle, expected, "style is set and returned");
});
}); |
#!/usr/bin/env bash
cd ..
python scripts/preprocessing/gen_mini_batches.py --dataset_dir Kitti/scratch_300_val/ --plane scratch_300_val
# train the model
#python avod/experiments/run_training.py --pipeline_config=avod/configs/pyramid_cars_with_aug_example_scratch_300_val.config --device='0' --data_split='train'
# evaluate the model
python avod/experiments/run_evaluation.py --pipeline_config=avod/configs/pyramid_cars_with_aug_example_scratch_300_val.config --device='0' --data_split='val' |
#!/bin/bash
VER=apacheds-2.0.0-M17
ADS=apacheds-2.0.0-M17-64bit.bin
if [ -d "/opt/$VER" ]
then
echo "Apache Directory Service 2.0 is already installed, nothing done!"
else
source /vagrant/vagrant-setup/include.sh
wget_and_make_executable http://apache.mirrors.timporter.net/directory/apacheds/dist/2.0.0-M17/ $ADS
echo -e "yes\n\n\n\n\n\n\n" | $SETUP/cache/$ADS
rm $ADS
mv /etc/init.d/$VER-default /etc/init.d/apacheds
# apacheds script gets @user@ as OS user to run the service and it does not work
# replace RUN_AS_USER="@user@" with RUN_AS_USER="apacheds" to make it work
perl -pi -e 's/RUN_AS_USER\x3D\x22\x40user\x40\x22/RUN_AS_USER\x3D\x22apacheds\x22/g' /opt/$VER/bin/apacheds
# Change web server ports from 8080 to 8000 and 8443 to 8400 to avoid clashing with Tomcat
perl -pi -e 's/ads-systemport: 8080/ads-systemport: 8000/g' /var/lib/$VER/default/conf/config.ldif
perl -pi -e 's/ads-systemport: 8443/ads-systemport: 8400/g' /var/lib/$VER/default/conf/config.ldif
# Add users partition
# mkdir /var/lib/$VER/default/partitions/users
# cat $SETUP/ldap/users.ldif >> /var/lib/$VER/default/conf/config.ldif
iptables -A INPUT -p tcp --dport 10389 -j ACCEPT
service iptables save
systemctl restart iptables
service apacheds start default
fi |
#!/bin/bash
#
# A helper script to wait for solr
#
# Usage: wait-for-solr.sh [--max-attempts count] [--wait-seconds seconds] [--solr-url url]
# Deprecated usage: wait-for-solr.sh [ max_attempts [ wait_seconds ] ]
set -euo pipefail
SCRIPT="$0"
if [[ "${VERBOSE:-}" = "yes" ]]; then
set -x
fi
function usage {
echo "$1"
echo "Usage: $SCRIPT [--max-attempts count] [--wait-seconds seconds ] [--solr-url url]"
exit 1
}
max_attempts=12
wait_seconds=5
if [[ -v SOLR_PORT ]] && ! grep -E -q '^[0-9]+$' <<<"$SOLR_PORT"; then
echo "Invalid SOLR_PORT=$SOLR_PORT environment variable specified"
exit 1
fi
solr_url="http://localhost:${SOLR_PORT:-8983}"
while (( $# > 0 )); do
case "$1" in
--help)
cat <<EOM
Usage: $SCRIPT [options]
Options:
--max-attempts count: number of attempts to check Solr is up. Default: $max_attempts
--wait-seconds seconds: number of seconds to wait between attempts. Default: $wait_seconds
--solr-url url: URL for Solr server to check. Default: $solr_url
EOM
exit 0
;;
--solr-url)
solr_url="$2";
shift 2
;;
--max-attempts)
max_attempts="$2";
shift 2;
;;
--wait-seconds)
wait_seconds="$2";
shift 2;
;;
* )
# deprecated invocation, kept for backwards compatibility
max_attempts=$1;
wait_seconds=$2;
echo "WARNING: deprecated invocation. Use $SCRIPT [--max-attempts count] [--wait-seconds seconds]"
shift 2;
break;
;;
esac
done
grep -q -E '^[0-9]+$' <<<"$max_attempts" || usage "--max-attempts $max_attempts: not a number"
if (( max_attempts == 0 )); then
echo "The --max-attempts argument should be >0"
exit 1
fi
grep -q -E '^[0-9]+$' <<<"$wait_seconds" || usage "--wait-seconds $wait_seconds: not a number"
grep -q -E '^https?://' <<<"$solr_url" || usage "--solr-url $solr_url: not a URL"
((attempts_left=max_attempts))
while (( attempts_left > 0 )); do
if wget -q -O - "$solr_url" | grep -q -i solr; then
break
fi
(( attempts_left-- ))
if (( attempts_left == 0 )); then
echo "Solr is still not running; giving up"
exit 1
fi
if (( attempts_left == 1 )); then
attempts=attempt
else
attempts=attempts
fi
echo "Solr is not running yet on $solr_url. $attempts_left $attempts left"
sleep "$wait_seconds"
done
echo "Solr is running on $solr_url"
|
#!/bin/bash
model_dir=`dirname $0`
script_dir=/cs/usr/bareluz/gabi_labs/nematus_clean/nematus/en-de/scripts/
#language-independent variables (toolkit locations)
. $model_dir/../vars
#language-dependent variables (source and target language)
. $model_dir/vars
# temporary files
tmpfile_src=`mktemp`
tmpfile_nbest=`mktemp`
$model_dir/preprocess.sh > $tmpfile_src
#left-to-right n-best list
THEANO_FLAGS=mode=FAST_RUN,floatX=float32,device=$device python $nematus_home/nematus/translate.py < $tmpfile_src \
-m $model_dir/model.l2r.ens{1,2,3,4}.npz \
-k 50 -p 1 --n-best --suppress-unk > $tmpfile_nbest
#rescoring
$model_dir/../scripts/reverse_nbest.py < $tmpfile_nbest | \
THEANO_FLAGS=mode=FAST_RUN,floatX=float32,device=$device python $nematus_home/nematus/rescore.py \
-m $model_dir/model.r2l.ens{1,2,3,4}.npz \
-b 40 -s $tmpfile_src | \
$model_dir/../scripts/rerank_normalize.py 50 1 | \
$model_dir/../scripts/reverse.py | \
$model_dir/postprocess.sh
rm $tmpfile_src
rm $tmpfile_nbest
|
#!/bin/sh
set -e
log(){
echo '-------------------------------------'
echo "$*"
}
deploy(){
log "+ check config file: ${PLUGIN_CONFIG}"
cd ${PLUGIN_CONFIG}
IMAGE=$(cat ../../base/deployment.yaml | shyaml get-value spec.template.spec.containers.0.image)
if [ ${DRONE_TAG} ]; then
log "+ set tag & image: ${IMAGE%:*}:${DRONE_TAG}"
kustomize edit set image ${IMAGE%:*}:${DRONE_TAG}
else
log "+ set image: ${IMAGE%:*}:${DRONE_BUILD_NUMBER}"
kustomize edit set image ${IMAGE%:*}:${DRONE_BUILD_NUMBER}
fi
NAMESPACE=$(cat kustomization.yaml | shyaml get-value namespace)
NAME=$(cat ../../base/deployment.yaml | shyaml get-value metadata.name)
log "+ deploy [$NAME] to [$NAMESPACE] timeout: ${PLUGIN_TIMEOUT}s"
kubectl apply -k . && kubedog rollout track deployment $NAME -n $NAMESPACE -t ${PLUGIN_TIMEOUT}
}
FILES=$(cat env.yaml | shyaml get-values checkList)
for element in $FILES
do
if [ $element == ${PLUGIN_MODNAME} ]; then
IS_DEPLOY=true
break
fi
echo $element
done
#echo $FILES
IS_DEPLOY=false
if ${PLUGIN_CHECK} ; then
for element in $FILES
do
if [ $element == ${PLUGIN_MODNAME} ]; then
IS_DEPLOY=true
break
fi
done
if $IS_DEPLOY ; then
deploy
else
log "+ skip module package deploy"
fi
else
log "+ skip check & start deploy"
deploy
fi
|
package stincmale.server.netty4;
import io.netty.channel.ChannelHandler.Sharable;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.MessageToMessageDecoder;
import io.netty.util.ReferenceCountUtil;
import io.netty.util.ReferenceCounted;
import javax.annotation.concurrent.ThreadSafe;
import java.util.List;
/**
* A {@link MessageToMessageDecoder} that wraps messages of type {@code RQ} into messages of type
* {@link RequestWithMetadata}{@code <RQ>}.
*
* @param <RQ> A type of the request this {@link io.netty.channel.ChannelInboundHandlerAdapter} expects.
*/
@Sharable
@ThreadSafe
public abstract class RequestMetadataDecoder<RQ> extends MessageToMessageDecoder<RQ> {
protected RequestMetadataDecoder() {
}
/**
* @param request Note that {@linkplain ReferenceCountUtil#retain(java.lang.Object)} is called for the {@code msg}.
* This method automatically calls {@link ReferenceCounted#retain() retain} on the {@code request}.
*/
@Override
protected final void decode(final ChannelHandlerContext ctx, final RQ request, final List<Object> out) throws Exception {
final Metadata metadata = createMetadata(ctx, request);
final RequestWithMetadata<RQ> requestWithMetadata = new RequestWithMetadata<>(request, metadata);
out.add(requestWithMetadata);
}
/**
* Is called from {@link #decode(ChannelHandlerContext, Object, List)}.
*/
protected abstract Metadata createMetadata(ChannelHandlerContext ctx, RQ request);
}
|
<reponame>guilhermedias/twu-biblioteca-guilherme<filename>src/com/twu/biblioteca/MenuOptionsConsts.java
package com.twu.biblioteca;
/**
* Created by gdias on 8/3/15.
*/
public class MenuOptionsConsts {
public static final int INVALID_OPTION_NUMBER = 0;
public static final int LIST_RESOURCES_OPTION_NUMBER = 1;
public static final int CHECKOUT_OPTION_NUMBER = 2;
public static final int RETURN_BOOK_OPTION_NUMBER = 3;
public static final int LOGIN_OPTION_NUMBER = 4;
public static final int VERIFY_BOOK_OPTION_NUMBER = 5;
public static final int MY_INFO_OPTION_NUMBER = 6;
public static final int QUIT_OPTION_NUMBER = 7;
public static final String INVALID_OPTION_NAME = "Invalid option";
public static final String LIST_RESOURCES_OPTION_NAME = "List resources";
public static final String CHECKOUT_RESOURCE_OPTION_NAME = "Checkout resource";
public static final String RETURN_RESOURCE_OPTION_NAME = "Return resource";
public static final String LOGIN_OPTION_NAME = "Log in";
public static final String VERIFY_BOOK_OPTION_NAME = "Verify book";
public static final String MY_INFO_OPTION_NAME = "My info";
public static final String QUIT_OPTION_NAME = "Quit";
}
|
<filename>src/views/order/orderList/components/addAddress/index.ts
import {defineComponent, onMounted, ref} from "vue";
import { EluiChinaAreaDht } from 'elui-china-area-dht'
import {ElMessage} from "element-plus";
import api from "@/views/order/api";
import {OrderInfoType} from "@/views/order/orderList/interface";
import {useForm} from "@/views/order/orderList/components/addAddress/form";
/**
* 添加地址弹框
* author xyy
*/
export default defineComponent({
name: 'AddAddress',
components: {
EluiChinaAreaDht,
},
props: {
dialog: {
type: Object
},
orderInfo: {
type: Object
}
},
setup(props:any, {emit}) {
const orderInfo: OrderInfoType =props.orderInfo
const formRef = ref()
const {formData, handleFormChange} = useForm()
const chinaData = new EluiChinaAreaDht.ChinaArea().chinaAreaflat
onMounted(() => {
handleFormChange({customerName: orderInfo.customerName})
})
return {
formRef,
formData,
handleDialogClose: () => {
emit('close');
},
handleDialogConfirm: () => {
formRef.value.validate((val: boolean) => {
if (!val) return
ElMessage.closeAll()
const params: any = formData.form
params.customerNo = orderInfo.customerNo
delete params.customerName
api.addCustomerAddress(params).then(res => {
if (res.status === 'ok') {
emit('close', true) // 关闭后刷新
ElMessage.success('添加成功')
} else {
ElMessage.error(res.message)
}
})
})
},
handleAddressOnChange: (address: string[]) => {
const data = {
addressProvince: chinaData[address[0]].label,
addressCity: chinaData[address[1]].label,
addressCountry: chinaData[address[2]].label
}
handleFormChange(data)
}
}
}
}) |
<reponame>tanshuai/reference-wallet
# pyre-ignore-all-errors
# Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
import time
import typing
import uuid
from datetime import datetime, timedelta
from typing import Optional
from diem_utils.precise_amount import Amount
from diem_utils.types.currencies import DiemCurrency, Currencies
from diem_utils.types.liquidity.currency import Currency, CurrencyPair, CurrencyPairs
from wallet import services
from wallet import storage
from wallet.services import inventory, INVENTORY_ACCOUNT_NAME
from wallet.services.fx.fx import get_rate
from wallet.services.inventory import buy_funds, INVENTORY_COVER_CURRENCY
from wallet.services.transaction import (
internal_transaction,
validate_balance,
)
from wallet.storage import (
get_order,
update_order,
Order,
get_account,
get_user,
)
from wallet.types import (
OrderStatus,
Direction,
CoverStatus,
OrderId,
TransactionType,
BalanceError,
ConvertResult,
PaymentMethodAction,
OrderType,
)
import logging
logging.getLogger(__name__)
PAYMENT_PROCESSING_DUMMY_SLEEP_TIME = 3
def process_payment_method(
payment_method: str, amount: int, action: PaymentMethodAction
):
"""
In real scenario charge token will be provided by the PSP.
This is only a very simplified simulation of it...
:param payment_method:
:return:
"""
if payment_method:
return str(uuid.uuid4())
else:
return None
def process_order_payment(order_id, payment_method, action: PaymentMethodAction):
order = get_order(order_id)
time.sleep(PAYMENT_PROCESSING_DUMMY_SLEEP_TIME)
charge_token = process_payment_method(payment_method, order.exchange_amount, action)
if charge_token:
update_order(
order_id=order_id,
charge_token=charge_token,
order_status=OrderStatus.Charged.value,
payment_method=payment_method,
)
else:
update_order(
order_id=order_id,
order_status=OrderStatus.FailedCharge.value
if action == PaymentMethodAction.Charge
else OrderStatus.FailedCredit,
payment_method=payment_method,
)
return charge_token
def create_order(
user_id: int,
direction: Direction,
amount: int,
base_currency: Currencies,
quote_currency: Currencies,
) -> Order:
expiration_time = datetime.utcnow() + timedelta(minutes=10)
conversion_rate = get_rate(
base_currency=Currency(base_currency),
quote_currency=Currency(quote_currency),
)
request_amount = Amount().deserialize(amount)
exchange_amount = request_amount * conversion_rate
order_type = OrderType.Trade
if CurrencyPair.is_diem_to_diem(
CurrencyPair(Currency(base_currency), Currency(quote_currency))
):
order_type = OrderType.DirectConvert
return storage.create_order(
user_id=user_id,
amount=request_amount.serialize(),
direction=direction,
base_currency=base_currency.value,
quote_currency=quote_currency.value,
expiration_time=expiration_time,
exchange_amount=exchange_amount.serialize(),
order_type=order_type.value,
)
def process_order(order_id: OrderId, payment_method: str):
if services.run_bg_tasks():
from ..background_tasks.background import async_execute_order
async_execute_order.send(order_id, payment_method)
else:
execute_order(order_id=order_id, payment_method=payment_method)
def execute_order(order_id: OrderId, payment_method: Optional[str] = None):
if order_expired(order_id):
return
order = get_order(order_id)
if payment_method:
process_payment_method(
payment_method=payment_method,
amount=order.amount,
action=PaymentMethodAction.Charge,
)
if order.order_type == OrderType.Trade:
if execute_trade(order):
if services.run_bg_tasks():
from ..background_tasks.background import async_cover_order
async_cover_order.send(order_id)
else:
cover_order(order_id=order_id)
else:
execute_convert(order)
def execute_trade(order: Order):
inventory_account_id = get_account(account_name=INVENTORY_ACCOUNT_NAME).id
user_account_id = get_user(order.user_id).account.id
order_id = typing.cast(OrderId, order.id)
base_diem_currency = DiemCurrency[order.base_currency]
if Direction[order.direction] == Direction.Buy:
sender_id = inventory_account_id
receiver_id = user_account_id
if not validate_balance(sender_id, order.amount, base_diem_currency):
buy_funds(CurrencyPairs[f"{base_diem_currency}_{INVENTORY_COVER_CURRENCY}"])
else:
sender_id = user_account_id
receiver_id = inventory_account_id
try:
transaction = internal_transaction(
sender_id=sender_id,
receiver_id=receiver_id,
amount=order.amount,
currency=base_diem_currency,
payment_type=TransactionType.INTERNAL,
)
update_order(
order_id=order_id,
internal_ledger_tx=transaction.id,
order_status=OrderStatus.Executed,
)
return True
except BalanceError:
logging.exception("execute trade")
update_order(order_id=order_id, order_status=OrderStatus.FailedExecute)
return False
def execute_convert(order: Order) -> ConvertResult:
inventory_account = get_account(account_name=INVENTORY_ACCOUNT_NAME).id
user_account = get_user(order.user_id).account.id
order_id = typing.cast(OrderId, order.id)
from_amount = order.amount
from_diem_currency = DiemCurrency[order.base_currency]
to_amount = order.exchange_amount
to_diem_currency = DiemCurrency[order.quote_currency]
if not validate_balance(
sender_id=user_account, amount=from_amount, currency=from_diem_currency
):
return ConvertResult.InsufficientBalance
if not validate_balance(
sender_id=inventory_account, amount=to_amount, currency=to_diem_currency
):
return ConvertResult.InsufficientInventoryBalance
try:
to_inventory_tx = internal_transaction(
sender_id=user_account,
receiver_id=inventory_account,
amount=from_amount,
currency=from_diem_currency,
payment_type=TransactionType.INTERNAL,
)
from_inventory_tx = internal_transaction(
sender_id=inventory_account,
receiver_id=user_account,
amount=to_amount,
currency=to_diem_currency,
payment_type=TransactionType.INTERNAL,
)
update_order(
order_id=order_id,
internal_ledger_tx=to_inventory_tx.id,
correlated_tx=from_inventory_tx.id,
order_status=OrderStatus.Executed,
)
return ConvertResult.Success
except Exception:
logging.exception("execute convert")
update_order(order_id=order_id, order_status=OrderStatus.FailedExecute)
return ConvertResult.TransferFailure
def order_expired(order_id: OrderId):
order = get_order(order_id)
is_expired = datetime.utcnow() > order.order_expiration
if is_expired:
update_order(order_id=order_id, order_status=OrderStatus.Expired)
return True
return False
def is_executed(order_id: OrderId):
order = get_order(order_id)
return OrderStatus[order.order_status] == OrderStatus.Executed
def cover_order(order_id: OrderId):
order = get_order(order_id)
if order.order_type == OrderType.DirectConvert.value:
update_order(order_id=order_id, cover_status=CoverStatus.Covered)
return
inventory.cover_order(order)
|
#!/bin/bash
usage() {
# deploys pacakges
cat <<EOF
Usage:
$0 <dist-dir> unstable|testing|stable [nocommit]
Available "targets":
unstable = beta versions which contain latest changes
testing = only used for testing a release
stable = stable releases only
If 'nocommit' is specified, the generated files will be kept in the temporary
directory and not commited to the OpenSuse build service.
This is useful for building a package locally.
EOF
exit 0
}
# parse command line options
if [ -z "$2" ]; then
usage
fi
DIR=$1
CMD=$2
NOCOMMIT=$3
VERSION=`cat $DIR/VER`
# Directory which contains the files that will be uploaded to the OBS. Path does not include "$DIR".
XTREEMFS_DIRECTORY="xtreemfs"
HOME_PROJECT_PREFIX="home:xtreemfs"
SELECTED_PROJECT=$HOME_PROJECT_PREFIX
SELECTED_PACKAGE="xtreemfs"
TMP_DIR=/tmp/xtreemfs-upload
if [ ! -f "/usr/bin/osc" ]; then
echo "osc command not found - please install osc first!"
exit 1
fi
if [ -d "$TMP_DIR" ]; then
rm -rf "$TMP_DIR"
fi
cd $DIR
if [ $CMD = "unstable" -o $CMD = "testing" ]; then
SELECTED_PROJECT=$HOME_PROJECT_PREFIX":"$CMD
SELECTED_PACKAGE="xtreemfs-testing"
else
SELECTED_PROJECT=$HOME_PROJECT_PREFIX:"1.5.x"
SELECTED_PACKAGE="xtreemfs"
fi
# create a tmp dir, check out current build files, delete all files
mkdir -p $TMP_DIR
cd $TMP_DIR
osc co $SELECTED_PROJECT $SELECTED_PACKAGE
rm $TMP_DIR/$SELECTED_PROJECT/$SELECTED_PACKAGE/*
cd -
# copy all new files, add new and delete old files, check in project
cp $XTREEMFS_DIRECTORY/* $TMP_DIR/$SELECTED_PROJECT/$SELECTED_PACKAGE
osc addremove $TMP_DIR/$SELECTED_PROJECT/$SELECTED_PACKAGE/
if [ -z "$NOCOMMIT" ]; then
osc ci -m "update" $TMP_DIR/$SELECTED_PROJECT/$SELECTED_PACKAGE/
rm -rf $TMP_DIR
fi
if [ $CMD = "DISABLEDstable" ]; then
# Determine subproject for given version number
subproject=$(echo "$VER" | awk -F. '{ if (NF == 3) print $1"."$2".x"; else print "no_stable_version" }')
if [ "$subproject" = "no_stable_version" ]; then
echo "Failed to determine the subproject for this stable release: $VER"
echo
echo "Check if the required version number format x.y.z was used."
exit 1
fi
SELECTED_PROJECT=$HOME_PROJECT_PREFIX":"$subproject
SELECTED_PACKAGE="xtreemfs"
# Check if the determined subproject already exists
osc meta prj "$SELECTED_PROJECT" &>/dev/null
if [ $rc -ne 0 ]; then
echo "The subproject '$SELECTED_PROJECT' does not exist yet. Create it first from the webinterface and see the docu for additional steps."
exit 1
fi
echo "ERROR: Deploying stable packages is currently broken in this script. Talk to Nico Kruber first to find out what changes are required for this script and then fix it."
exit 1
# create release packages on the server
osc meta pkg home:xtreemfs xtreemfs-$VERSION --file meta.xml
# create a tmp dir, check out current build files
mkdir -p $TMP_DIR
cd $TMP_DIR
# copy the source packes to the new packages
osc co home:xtreemfs xtreemfs-$VERSION
cd -
cp $XTREEMFS_DIRECTORY/* $TMP_DIR/home:xtreemfs/xtreemfs-$VERSION
# add and commit the new files
osc add $TMP_DIR/home:xtreemfs/xtreemfs-$VERSION/*
cd -
cd $TMP_DIR/home:xtreemfs
if [ -z "$NOCOMMIT" ]; then
osc ci -m "imported xtreemfs $VERSION" xtreemfs-$VERSION
cd -
rm -rf $TMP_DIR
fi
fi
if [ -n "$NOCOMMIT" ]; then
cat <<EOF
The generated build service files were NOT commited to the OpenSuse build
service. Instead they are still present in: $TMP_DIR
You can use them to test building a package locally e.g.,
cd $TMP_DIR
cd $SELECTED_PROJECT/$SELECTED_PACKAGE
# Build the package for openSUSE_12.1:
osc build --ccache openSUSE_12.1 xtreemfs.spec
EOF
fi
|
/* eslint-disable */
/* tslint:disable */
/**
* This is an autogenerated file created by the Stencil compiler.
* It contains typing information for all components that exist in this project.
*/
import { HTMLStencilElement, JSXBase } from "@stencil/core/internal";
import { ChildType } from "@stencil/core/internal";
import { ChildType as ChildType1 } from "@stencil/core/internal/index";
export namespace Components {
interface StencilVirtualScrollDemo {
}
interface VirtualScroll {
"autoDetectSize"?: boolean;
"itemCount": number;
"itemHeight"?: number;
"itemWidth"?: number;
"renderItem": (i: number) => ChildType | Promise<ChildType>
| ChildType[] | Promise<ChildType[]> | Promise<ChildType>[];
"sameSize"?: boolean;
"tick"?: any;
}
interface VirtualScrollDemo {
}
interface VirtualScrollList {
"estimatedItemHeight": number;
"itemCount": number;
"itemHeights": number[];
"itemWidth": number;
"renderItem": (i: number) => ChildType | Promise<ChildType> | ChildType[] | Promise<ChildType[]> | Promise<ChildType>[];
}
interface VirtualScrollListDemo {
}
}
declare global {
interface HTMLStencilVirtualScrollDemoElement extends Components.StencilVirtualScrollDemo, HTMLStencilElement {
}
var HTMLStencilVirtualScrollDemoElement: {
prototype: HTMLStencilVirtualScrollDemoElement;
new (): HTMLStencilVirtualScrollDemoElement;
};
interface HTMLVirtualScrollElement extends Components.VirtualScroll, HTMLStencilElement {
}
var HTMLVirtualScrollElement: {
prototype: HTMLVirtualScrollElement;
new (): HTMLVirtualScrollElement;
};
interface HTMLVirtualScrollDemoElement extends Components.VirtualScrollDemo, HTMLStencilElement {
}
var HTMLVirtualScrollDemoElement: {
prototype: HTMLVirtualScrollDemoElement;
new (): HTMLVirtualScrollDemoElement;
};
interface HTMLVirtualScrollListElement extends Components.VirtualScrollList, HTMLStencilElement {
}
var HTMLVirtualScrollListElement: {
prototype: HTMLVirtualScrollListElement;
new (): HTMLVirtualScrollListElement;
};
interface HTMLVirtualScrollListDemoElement extends Components.VirtualScrollListDemo, HTMLStencilElement {
}
var HTMLVirtualScrollListDemoElement: {
prototype: HTMLVirtualScrollListDemoElement;
new (): HTMLVirtualScrollListDemoElement;
};
interface HTMLElementTagNameMap {
"stencil-virtual-scroll-demo": HTMLStencilVirtualScrollDemoElement;
"virtual-scroll": HTMLVirtualScrollElement;
"virtual-scroll-demo": HTMLVirtualScrollDemoElement;
"virtual-scroll-list": HTMLVirtualScrollListElement;
"virtual-scroll-list-demo": HTMLVirtualScrollListDemoElement;
}
}
declare namespace LocalJSX {
interface StencilVirtualScrollDemo {
}
interface VirtualScroll {
"autoDetectSize"?: boolean;
"itemCount": number;
"itemHeight"?: number;
"itemWidth"?: number;
"renderItem": (i: number) => ChildType | Promise<ChildType>
| ChildType[] | Promise<ChildType[]> | Promise<ChildType>[];
"sameSize"?: boolean;
"tick"?: any;
}
interface VirtualScrollDemo {
}
interface VirtualScrollList {
"estimatedItemHeight": number;
"itemCount": number;
"itemHeights": number[];
"itemWidth": number;
"renderItem": (i: number) => ChildType | Promise<ChildType> | ChildType[] | Promise<ChildType[]> | Promise<ChildType>[];
}
interface VirtualScrollListDemo {
}
interface IntrinsicElements {
"stencil-virtual-scroll-demo": StencilVirtualScrollDemo;
"virtual-scroll": VirtualScroll;
"virtual-scroll-demo": VirtualScrollDemo;
"virtual-scroll-list": VirtualScrollList;
"virtual-scroll-list-demo": VirtualScrollListDemo;
}
}
export { LocalJSX as JSX };
declare module "@stencil/core" {
export namespace JSX {
interface IntrinsicElements {
"stencil-virtual-scroll-demo": LocalJSX.StencilVirtualScrollDemo & JSXBase.HTMLAttributes<HTMLStencilVirtualScrollDemoElement>;
"virtual-scroll": LocalJSX.VirtualScroll & JSXBase.HTMLAttributes<HTMLVirtualScrollElement>;
"virtual-scroll-demo": LocalJSX.VirtualScrollDemo & JSXBase.HTMLAttributes<HTMLVirtualScrollDemoElement>;
"virtual-scroll-list": LocalJSX.VirtualScrollList & JSXBase.HTMLAttributes<HTMLVirtualScrollListElement>;
"virtual-scroll-list-demo": LocalJSX.VirtualScrollListDemo & JSXBase.HTMLAttributes<HTMLVirtualScrollListDemoElement>;
}
}
}
|
#!/bin/bash
input=/dev/stdin
output=/dev/stdout
function run_file {
ifile=$1
ofile=$2
cat ${ifile} | \
/usr/bin/xmi2naf.py | \
java -jar /usr/bin/ixa-pipe-tok-exec.jar tok -l fr --inputkaf 2>/dev/null | \
java -jar /usr/bin/ixa-pipe-pos-exec.jar tag -m /usr/bin/pos.bin -lm /usr/bin/lemma.bin 2>/dev/null | \
java -jar /usr/bin/ixa-pipe-nerc-exec.jar tag -m /usr/bin/nerc.bin 2>/dev/null 2>/dev/null | \
/usr/bin/naf2xmi.py > ${ofile}
}
declare -a POSITIONAL
while [[ $# -gt 0 ]]; do
key="$1"
case $key in
--input)
input="$2"
shift # past argument
shift # past value
;;
--output)
output="$2"
shift # past argument
shift # past value
;;
*) # unknown option
POSITIONAL+=("$1") # save it in an array for later
shift # past argument
;;
esac
done
if [ ! -d ${input} ]; then
echo "ERROR: --input must be a directory"
exit 1
fi
if [ ! -d ${output} ]; then
mkdir ${output}
echo "Creating --output directory"
fi
declare -a IFILES
IFILES=( $(ls ${input}/*.xmi) )
for i in "${IFILES[@]}"; do
bfile=$(basename $i)
ifile=${input}/$bfile
ofile=${output}/$bfile
run_file ${ifile} ${ofile}
done
cp -f /usr/bin/typesystem.xml ${output} >& /dev/null
|
<filename>core/src/mindustry/ai/Pathfinder.java
package mindustry.ai;
import arc.Events;
import arc.func.Cons2;
import arc.math.geom.Geometry;
import arc.math.geom.Point2;
import arc.math.geom.Position;
import arc.struct.Array;
import arc.struct.GridBits;
import arc.struct.IntArray;
import arc.struct.IntQueue;
import arc.util.ArcAnnotate.Nullable;
import arc.util.Structs;
import arc.util.TaskQueue;
import arc.util.Time;
import arc.util.async.Threads;
import mindustry.annotations.Annotations.Struct;
import mindustry.game.EventType.ResetEvent;
import mindustry.game.EventType.TileChangeEvent;
import mindustry.game.EventType.WorldLoadEvent;
import mindustry.game.Team;
import mindustry.gen.PathTile;
import mindustry.world.Pos;
import mindustry.world.Tile;
import mindustry.world.meta.BlockFlag;
import z.ai.astar.FlatTiledAStar;
import z.ai.astar.FlatTiledNode;
import z.ai.astar.TiledSmoothableGraphPath;
import z.utils.FinalCons;
import static mindustry.Vars.indexer;
import static mindustry.Vars.net;
import static mindustry.Vars.spawner;
import static mindustry.Vars.state;
import static mindustry.Vars.world;
import static z.debug.ZDebug.enable_allPassable;
public class Pathfinder implements Runnable{
private static final long maxUpdate = Time.millisToNanos(4);
private static final int updateFPS = 60;
private static final int updateInterval = 1000 / updateFPS;
private static final int impassable = -1;
/** tile data, see PathTileStruct */
private int[][] tiles;
/** unordered array of path data for iteration only. DO NOT iterate ot access this in the main thread.*/
private Array<PathData> list = new Array<>();
/** Maps teams + flags to a valid path to get to that flag for that team. */
private PathData[][] pathMap = new PathData[Team.all().length][PathTarget.all.length];
/** Grid map of created path data that should not be queued again. */
private GridBits created = new GridBits(Team.all().length, PathTarget.all.length);
/** handles task scheduling on the update thread. */
private TaskQueue queue = new TaskQueue();
/** current pathfinding thread */
private @Nullable Thread thread;
public Pathfinder(){
Events.on(WorldLoadEvent.class, event -> {
stop();
//reset and update internal tile array
tiles = new int[world.width()][world.height()];
pathMap = new PathData[Team.all().length][PathTarget.all.length];
created = new GridBits(Team.all().length, PathTarget.all.length);
list = new Array<>();
// zones add begon
for (int s = 0; s < squadTarget.length; s++) {
for (int m = 0; m < squadTarget[s].length; m++) {
final int _squad = s, _member = m;
squadTarget[s][m] = new PathTarget((team, out) -> {
Tile _target = squadTargetTile[team.id][_squad][_member];
if (_target != null) {
out.add(_target.pos());
}
});
}
}
squadTargetPos = new int[teamCount][squadCount][memberCount];
for (int i = 0, len = extendCreated.length; i < len; i++) {
extendCreated[i] = new GridBits(squadCount, memberCount);
}
extendPathMap = new PathData[teamCount][squadCount][memberCount];
aStar.initGraph(world.getTiles(), t -> t.block().name.startsWith("road") ? false : true); // AStar算法初始化 .name.startsWith("road")
// zones add end
for(int x = 0; x < world.width(); x++){
for(int y = 0; y < world.height(); y++){
tiles[x][y] = packTile(world.rawTile(x, y));
}
}
//特别预置可以帮助加快速度;这是可选的. special preset which may help speed things up; this is optional
preloadPath(state.rules.waveTeam, PathTarget.enemyCores);
start();
});
Events.on(ResetEvent.class, event -> stop());
Events.on(TileChangeEvent.class, event -> updateTile(event.tile));
}
/** Packs a tile into its internal representation. */
private int packTile(Tile tile){
return PathTile.get(tile.cost, tile.getTeamID(), (byte)0, !tile.solid() && tile.floor().drownTime <= 0f);
}
/** Starts or restarts the pathfinding thread. */
private void start(){
stop();
thread = Threads.daemon(this);
}
/** Stops the pathfinding thread. */
private void stop(){
if(thread != null){
thread.interrupt();
thread = null;
}
queue.clear();
}
public int debugValue(Team team, int x, int y){
if(pathMap[team.id][PathTarget.enemyCores.ordinal()] == null) return 0;
return pathMap[team.id][PathTarget.enemyCores.ordinal()].weights[x][y];
}
/** Update a tile in the internal pathfinding grid. Causes a complete pathfinding reclaculation. */
public void updateTile(Tile tile){
if(net.client()) return;
int x = tile.x, y = tile.y;
tile.getLinkedTiles(t -> {
if(Structs.inBounds(t.x, t.y, tiles)){
tiles[t.x][t.y] = packTile(t);
// 更新AStar路径图 zones add begon
aStar.updateTile(t.x, t.y, () -> tile.block().name.startsWith("road") ? false : true); // .name.startsWith("road")
// zones add end
}
});
//can't iterate through array so use the map, which should not lead to problems
for(PathData[] arr : pathMap){
for(PathData path : arr){
if(path != null){
synchronized(path.targets){
path.targets.clear();
path.target.getTargets(path.team, path.targets);
}
}
}
}
// zones add begon
for(PathData[][] arrTeam : extendPathMap){
for(PathData[] arrSquad : arrTeam){
for (PathData path : arrSquad) {
if(path != null){
synchronized(path.targets){
path.targets.clear();
path.target.getTargets(path.team, path.targets);
}
}
}
}
}
// zones add end
queue.post(() -> {
for(PathData data : list){
updateTargets(data, x, y);
}
});
}
/** Thread implementation. */
@Override
public void run(){
while(true){
if(net.client()) return;
try{
queue.run();
//total update time no longer than maxUpdate
for(PathData data : list){
updateFrontier(data, maxUpdate / list.size);
}
try{
Thread.sleep(updateInterval);
}catch(InterruptedException e){
//stop looping when interrupted externally
return;
}
}catch(Throwable e){
e.printStackTrace();
}
}
}
/** 获取目标点下一个有效瓦砾,Only主线程调用.<p/>Gets next tile to travel to. Main thread only. */
public Tile getTargetTile(Tile tile, Team team, PathTarget target){
if(tile == null) return null;
PathData data = pathMap[team.id][target.ordinal()];
if(data == null){
//if this combination is not found, create it on request
if(!created.get(team.id, target.ordinal())){
created.set(team.id, target.ordinal());
//grab targets since this is run on main thread
IntArray targets = target.getTargets(team, new IntArray());
queue.post(() -> createPath(team, target, targets));
}
return tile;
}
// zones add begon
// if (target == PathTarget.moveIndexer) {
// if (indexer.moveIndexer != null && indexer.moveIndexer.pos() != movePos) {
// movePos = indexer.moveIndexer.pos();
//
// if(net.client()) return tile;
// Tile targetTile = indexer.moveIndexer;
// if (targetTile == null) return tile;
//
// int x = targetTile.x, y = targetTile.y;
//
// targetTile.getLinkedTiles(t -> {
// if(Structs.inBounds(t.x, t.y, tiles)){
// tiles[t.x][t.y] = packTile(t);
// }
// });
//
// //can't iterate through array so use the map, which should not lead to problems
// if(data != null){
// synchronized(data.targets){
// data.targets.clear();
// data.target.getTargets(data.team, data.targets);
// }
//
// queue.post(() -> {
// updateTargets(data, x, y);
// });
// }
//
// return tile;
// }
// }
// zones add end
int[][] values = data.weights;
int value = values[tile.x][tile.y];
Tile current = null;
int tl = 0;
for(Point2 point : Geometry.d8){
int dx = tile.x + point.x, dy = tile.y + point.y;
Tile other = world.tile(dx, dy);
if(other == null) continue;
if(values[dx][dy] < value && (current == null || values[dx][dy] < tl) && !other.solid() && other.floor().drownTime <= 0 &&
!(point.x != 0 && point.y != 0 && (world.solid(tile.x + point.x, tile.y) || world.solid(tile.x, tile.y + point.y)))){ //diagonal corner trap
current = other;
tl = values[dx][dy];
}
}
if(current == null || tl == impassable) return tile;
return current;
}
/** @return whether a tile can be passed through by this team. Pathfinding thread only.*/
private boolean passable(int x, int y, Team team){
if (enable_allPassable)
return true;
int tile = tiles[x][y];
return PathTile.passable(tile) || (PathTile.team(tile) != team.id && PathTile.team(tile) != (int)Team.derelict.id);
}
/** 清除边界,增加搜索,并设置所有流源.这只发生在活跃的团队中.<p/>
* Clears the frontier, increments the search and sets up all flow sources.
* This only occurs for active teams.
*/
private void updateTargets(PathData path, int x, int y){
if(!Structs.inBounds(x, y, path.weights)) return;
if(path.weights[x][y] == 0){
//this was a previous target
path.frontier.clear();
}else if(!path.frontier.isEmpty()){
//skip if this path is processing
return;
}
//assign impassability to the tile
if(!passable(x, y, path.team)){
path.weights[x][y] = impassable;
}
//increment search, clear frontier
path.search++;
path.frontier.clear();
synchronized(path.targets){
//add targets
for(int i = 0; i < path.targets.size; i++){
int pos = path.targets.get(i);
int tx = Pos.x(pos), ty = Pos.y(pos);
path.weights[tx][ty] = 0;
path.searches[tx][ty] = (short)path.search;
path.frontier.addFirst(pos);
}
}
}
private void preloadPath(Team team, PathTarget target){
updateFrontier(createPath(team, target, target.getTargets(team, new IntArray())), -1);
}
/** Created a new flowfield that aims to get to a certain target for a certain team.
* Pathfinding thread only. */
private PathData createPath(Team team, PathTarget target, IntArray targets){
PathData path = new PathData(team, target, world.width(), world.height());
list.add(path);
pathMap[team.id][target.ordinal()] = path;
//grab targets from passed array
synchronized(path.targets){
path.targets.clear();
path.targets.addAll(targets);
}
//fill with impassables by default
for(int x = 0; x < world.width(); x++){
for(int y = 0; y < world.height(); y++){
path.weights[x][y] = impassable;
}
}
//add targets
for(int i = 0; i < path.targets.size; i++){
int pos = path.targets.get(i);
path.weights[Pos.x(pos)][Pos.y(pos)] = 0;
path.frontier.addFirst(pos);
}
return path;
}
/** Update the frontier for a path. Pathfinding thread only. */
private void updateFrontier(PathData path, long nsToRun){
long start = Time.nanos();
while(path.frontier.size > 0 && (nsToRun < 0 || Time.timeSinceNanos(start) <= nsToRun)){
Tile tile = world.tile(path.frontier.removeLast());
if(tile == null || path.weights == null) return; //something went horribly wrong, bail
int cost = path.weights[tile.x][tile.y];
//pathfinding overflowed for some reason, time to bail. the next block update will handle this, hopefully
if(path.frontier.size >= world.width() * world.height()){
path.frontier.clear();
return;
}
if(cost != impassable){
for(Point2 point : Geometry.d4){
int dx = tile.x + point.x, dy = tile.y + point.y;
Tile other = world.tile(dx, dy);
if(other != null && (path.weights[dx][dy] > cost + other.cost || path.searches[dx][dy] < path.search) && passable(dx, dy, path.team)){
if(other.cost < 0) throw new IllegalArgumentException("Tile cost cannot be negative! " + other);
path.frontier.addFirst(Pos.get(dx, dy));
path.weights[dx][dy] = cost + other.cost;
path.searches[dx][dy] = (short)path.search;
}
}
}
}
}
/** A path target defines a set of targets for a path.*/
static public class PathTarget{
public static final PathTarget enemyCores = new PathTarget(0, (team, out) -> {
for(Tile other : indexer.getEnemy(team, BlockFlag.core)){
out.add(other.pos());
}
//spawn points are also enemies.
if(state.rules.waves && team == state.rules.defaultTeam){
for(Tile other : spawner.getGroundSpawns()){
out.add(other.pos());
}
}
});
public static final PathTarget rallyPoints = new PathTarget(1, (team, out) -> {
for(Tile other : indexer.getAllied(team, BlockFlag.rally)){
out.add(other.pos());
}
});
// zones add begon
// public static final PathTarget waveRallyPoints = new PathTarget(2, (team, out) -> {
//// if(state.rules.waves && team == state.rules.defaultTeam){
// for(Tile other : spawner.getRallyPoints()){
// out.add(other.pos());
//// }
// }
// });
// // temp code begon
/** 点击移动的目标点*/
// public static final PathTarget moveIndexer = new PathTarget(2, (team, out) -> {
// if (squadGroup[team.id][0] != null) {
// Vec2 pos = squadGroup[team.id][0].getTarget().getPosition();
// out.add(Pos.get((int) pos.x, (int) pos.y));
// }
// });
// zones add end
;
private final int ordinal;
public int ordinal() {
return ordinal;
}
public static final PathTarget[] all = new PathTarget[] {enemyCores, rallyPoints /*, moveIndexer*/};
/** 队伍目标点容器*/
private final Cons2<Team, IntArray> targeter;
PathTarget(Cons2<Team, IntArray> targeter){
this(-1, targeter);
}
PathTarget(int ordinal, Cons2<Team, IntArray> targeter){
this.ordinal = ordinal;
this.targeter = targeter;
}
/** Get targets. This must run on the main thread.*/
public IntArray getTargets(Team team, IntArray out){
targeter.get(team, out);
return out;
}
}
/** Data for a specific flow field to some set of destinations. */
class PathData{
/** Team this path is for. */
final Team team;
/** Flag that is being targeted. */
final PathTarget target;
/** costs of getting to a specific tile */
final int[][] weights;
/** search IDs of each position - the highest, most recent search is prioritized and overwritten */
final short[][] searches;
/** search frontier, these are Pos objects */
final IntQueue frontier = new IntQueue();
/** all target positions; these positions have a cost of 0, and must be synchronized on! */
final IntArray targets = new IntArray();
/** current search ID */
int search = 1;
PathData(Team team, PathTarget target, int width, int height){
this.team = team;
this.target = target;
this.weights = new int[width][height];
this.searches = new short[width][height];
this.frontier.ensureCapacity((width + height) * 3);
}
}
/** Holds a copy of tile data for a specific tile position. */
@Struct
class PathTileStruct{
//traversal cost
byte cost;
//team of block, if applicable (0 by default)
byte team;
//type of target; TODO remove
byte type;
//whether it's viable to pass this block
boolean passable;
}
// zones add begon
// astar add begon
/** 获取AStar算法路径*/
public void getPathList(Tile from, Tile target, TiledSmoothableGraphPath<FlatTiledNode> path) {
path.clear(); // therad safe code not must
queue.post(() -> {
aStar.getPath(from.x, from.y, target.x, target.y, path);
});
}
/** 获取AStar算法路径
* @apiNote 线程function不要使用static temp数据
* */
public void getPathListNoLastNode(Tile from, Tile target, TiledSmoothableGraphPath<FlatTiledNode> path) {
path.clear(); // therad safe code not must
// 方案1
// final float widthDistance = target.block().offsetTile() + ((target.block().size + 1) / 2); // int
// final float heightDistance = target.block().offsetTile() + ((target.block().size + 1) / 2);
// queue.post(() -> {
// aStar.getPathNoLastNode(from.x, from.y, target.x, target.y, path,
// (curNode, endNode) -> {
// float xDis = Math.abs(curNode.x - target.getX());
// float yDis = Math.abs(curNode.y - target.getY());
// return (xDis <= widthDistance - tileunit && yDis <= heightDistance) || (xDis <= widthDistance && yDis <= heightDistance - tileunit);
// }
// );
// });
// 方案4
int offset = (target.block().size-1) / 2;
final int startx = target.x - offset - 1;
final int starty = target.y - offset - 1;
final int sizex = target.block().size + 2;
final int sizey = sizex;
final int arrsize = sizex * sizey;
final boolean[] arrContains = new boolean[arrsize];
{ // 四个角设置为true
arrContains[0] = true; //
arrContains[sizex - 1] = true;
arrContains[(sizey - 1) * sizex] = true;
arrContains[sizex * sizey - 1] = true;
}
queue.post(() -> {
aStar.getPathNoLastNode(from.x, from.y, target.x, target.y, path,
(curNode, endNode) -> {
int x = curNode.x - startx;
int y = (curNode.y - starty);
int index = x + y * sizex;
if (x < 0 || y < 0 || x >= sizex || y >= sizey) return false; // || index < 0 || index >= arrsize
else return !arrContains[index];
}
);
});
}
/** 获取Astar路径节点用于巡逻自动移动*/
public FlatTiledNode getGraphNode(float x, float y) {
return aStar.getGraphNode(Math.round(x), Math.round(y));
}
// astar add end
/** 获取目标点下一个有效瓦砾,Only主线程调用.<p/>Gets next tile to travel to. Main thread only. */
public Position getTargetMember(Tile tile, Team team, int squad, int member){
if(tile == null) return null;
PathData data = extendPathMap[team.id][squad][member];
if(data == null){
//if this combination is not found, create it on request
if(!extendCreated[team.id].get(squad, member)){
extendCreated[team.id].set(squad, member);
//grab targets since this is run on main thread
IntArray targets = squadTarget[squad][member].getTargets(team, new IntArray());
queue.post(() -> createPathSquad(team, squad, member, squadTarget[squad][member], targets));
}
return tile;
}
// zones add begon
// 动态计算需求目标路径,获取目标延时增加但能减少不必要主线程更新
{ // 检测移动目标点路径是否需要更新
Tile moveIndexer = squadTargetTile[team.id][squad][member];
if (moveIndexer != null && moveIndexer.pos() != squadTargetPos[team.id][squad][member]) {
squadTargetPos[team.id][squad][member] = moveIndexer.pos();
updateTile(moveIndexer, data);
return tile;
}
}
// zones add end
int[][] values = data.weights;
int value = values[tile.x][tile.y];
Tile current = null;
int tl = 0;
for(Point2 point : Geometry.d8){
int dx = tile.x + point.x, dy = tile.y + point.y;
Tile other = world.tile(dx, dy);
if(other == null) continue;
if(values[dx][dy] < value && (current == null || values[dx][dy] < tl) && !other.solid() && other.floor().drownTime <= 0 &&
!(point.x != 0 && point.y != 0 && (world.solid(tile.x + point.x, tile.y) || world.solid(tile.x, tile.y + point.y)))){ //diagonal corner trap
current = other;
tl = values[dx][dy];
}
}
if(current == null || tl == impassable) return tile;
return current;
}
/** 队伍路径使用
* Created a new flowfield that aims to get to a certain target for a certain team.
* Pathfinding thread only. */
private PathData createPathSquad(Team team, int squad, int member, PathTarget target, IntArray targets){
PathData path = new PathData(team, target, world.width(), world.height());
list.add(path);
extendPathMap[team.id][squad][member] = path;
//grab targets from passed array
synchronized(path.targets){
path.targets.clear();
path.targets.addAll(targets);
}
//fill with impassables by default
for(int x = 0; x < world.width(); x++){
for(int y = 0; y < world.height(); y++){
path.weights[x][y] = impassable;
}
}
//add targets
for(int i = 0; i < path.targets.size; i++){
int pos = path.targets.get(i);
path.weights[Pos.x(pos)][Pos.y(pos)] = 0;
path.frontier.addFirst(pos);
}
return path;
}
private void updateTile(Tile tile, PathData path){
if(net.client()) return;
int x = tile.x, y = tile.y;
tile.getLinkedTiles(t -> {
if(Structs.inBounds(t.x, t.y, tiles)){
tiles[t.x][t.y] = packTile(t);
}
});
//can't iterate through array so use the map, which should not lead to problems
if(path != null){
synchronized(path.targets){
path.targets.clear();
path.target.getTargets(path.team, path.targets);
}
queue.post(() -> {
updateTargets(path, x, y);
});
}
}
public int teamCount = Team.all().length;
public int squadCount = FinalCons.max_squad_count;
public int memberCount = FinalCons.max_member_count;
// private int movePos = -1;
/** Grid map of created path data that should not be queued again. */
private GridBits[] extendCreated = new GridBits[teamCount];
/** zones扩展路径数据, 队伍编队移动算法.*/
private PathData[][][] extendPathMap = new PathData[teamCount][squadCount][memberCount];
/** squad path target*/
private PathTarget[][] squadTarget = new PathTarget[squadCount][memberCount];
/** unit move target*/
private int[][][] squadTargetPos = new int[teamCount][squadCount][memberCount];
/** unit move target tile*/
public Tile[][][] squadTargetTile = new Tile[teamCount][squadCount][memberCount];
// AStar道路路径算法
private FlatTiledAStar aStar = new FlatTiledAStar();
private int movePos = -1;
// zones add end
}
|
class CredentialType
{
public function getName()
{
return 'classcentral_credentialbundle_credentialtype';
}
} |
#!/bin/bash
#
# Copyright 2021 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
usage() {
echo "Usage: $0
-f|--filesystem <Name of Volume's Source Filesystem>
-l|--path <full Path of Volume in Primary Filesystem>
-F|--fileset <name of source fileset>
-s|--size <size in GB>
-u|--username <Username of spectrum scale GUI user account.>
-p|--password <Password of spectrum scale GUI user account.>
-r|--guihost <Route host name used to route traffic to the spectrum scale GUI service.>
[-P|--pvname <name for pv>]
[-c|--storageclass <StorageClass for pv>]
[-a|--accessmode <AccessMode for pv>]
[-h|--help] " 1>&2
exit 1
}
fullUsage() {
echo "Usage: $0
-f|--filesystem <Name of Volume's Source Filesystem>
-l|--path <full Path of Volume in Primary Filesystem>
-F|--fileset <name of source fileset>
-s|--size <size in GB>
-u|--username <Username of spectrum scale GUI user account.>
-p|--password <Password of spectrum scale GUI user account.>
-r|--guihost <HostName(or route) used to access IBM Spectrum Scale GUI service running on Primary Cluster.>
[-P|--pvname <name for pv>]
[-c|--storageclass <StorageClass for pv>]
[-a|--accessmode <AccessMode for pv>]
[-h|--help]
Example 1: Directory based static volume
This example shows how to create a volume from a directory '/mnt/fs1/staticpv' within the filesystem 'fs1'.
$0 --filesystem fs1 --path /mnt/fs1/staticpv --size 10 --pvname mystaticpv --guihost ibm-spectrum-scale-gui-ibm-spectrum-scale.apps.cluster.cp.fyre.ibm.com
Example 2: Fileset based volume
This example shows how to create a volume from a fileset 'fileset1' within the filesystem 'fs1'.
$0 --filesystem fs1 --fileset f1 --size 10 --pvname mystaticpv --guihost ibm-spectrum-scale-gui-ibm-spectrum-scale.apps.cluster.cp.fyre.ibm.com
Note: The Path specified for option --path must be valid gpfs path from primary filesystem." 1>&2
exit 1
}
# Generate Yaml
generate_pv_yaml() {
volhandle=$1
volname=$2
volsize=$3
accessmode=$4
if [[ -f "${volname}.yaml" ]]; then
echo "ERROR: File ${volname}.yaml already exist"
exit 2
fi
cat >"${volname}".yaml <<EOL
# -- ${volname}.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: ${volname}
spec:
capacity:
storage: ${volsize}Gi
accessModes:
- ${accessmode}
claimRef:
name: pvc-${volname}
namespace: ibm-spectrum-scale-csi
csi:
driver: spectrumscale.csi.ibm.com
volumeHandle: ${volhandle}
${STORAGECLASS}
EOL
echo "INFO: volumeHandle: ${volhandle}"
echo "INFO: Successfully created ${volname}.yaml"
}
# Generate PVC manifest
generate_pvc_yaml() {
volname=$1
volsize=$2
accessmode=$3
if [[ -f "pvc-${volname}.yaml" ]]; then
echo "ERROR: File pvc-${volname}.yaml already exist"
exit 2
fi
cat >pvc-"${volname}".yaml <<EOL
# -- pvc-${volname}.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-${volname}
namespace: ibm-spectrum-scale-csi
spec:
accessModes:
- ${accessmode}
resources:
requests:
storage: ${volsize}Gi
${STORAGECLASS}
EOL
echo "INFO: Successfully created pvc-${volname}.yaml"
}
SHORT=hf:l:F:s:P:c:a:u:p:r:
LONG=help,filesystem:,path:,fileset:,size:,pvname:,storageclass:,accessmode:,username:,password:,guihost:
ERROROUT="/tmp/csierror.out"
OPTS=$(getopt --options $SHORT --long $LONG --name "$0" -- "$@")
if [ $? != 0 ]; then
echo "Failed to parse options...exiting." >&2
usage
exit 1
fi
[[ $# -lt 1 ]] && fullUsage
eval set -- "$OPTS"
while true; do
case "$1" in
-h | --help)
fullUsage
;;
-l | --path)
VOLPATH="$2"
shift 2
;;
-f | --filesystem)
FSNAME="$2"
shift 2
;;
-F | --fileset)
FSETNAME="$2"
shift 2
;;
-s | --size)
VOLSIZE="$2"
shift 2
;;
-P | --pvname)
VOLNAME="$2"
shift 2
;;
-c | --storageclass)
CLASS="$2"
shift 2
;;
-a | --accessmode)
ACCESSMODE="$2"
shift 2
;;
-u | --username)
USERNAME="$2"
shift 2
;;
-p | --password)
PASSWORD="$2"
shift 2
;;
-r | --guihost)
URL="$2"
shift 2
;;
--)
shift
break
;;
*)
usage
exit 1
;;
esac
done
# Secure username/password prompt if not passed with flag
if [ -z "$USERNAME" ]; then read -r -p "GUI Username: " USERNAME ; fi
if [ -z "$PASSWORD" ]; then read -r -p "GUI Password: " -s PASSWORD ; echo ; fi
# Pre-requisite check
if ! python3 --version 1>/dev/null 2>${ERROROUT};
then
echo "ERROR: Pre-requisite check failed. Python3 not found."
exit 2
fi
# Check for mandatory Params
MPARAM=""
[[ -z "${FSNAME}" ]] && MPARAM="${MPARAM}--filesystem "
[[ -z "${VOLSIZE}" ]] && MPARAM="${MPARAM}--size "
if [ ! -z "$MPARAM" ]; then
echo "ERROR: Mandatory parameter missing : $MPARAM"
usage
fi
if [[ ! -z "${VOLPATH}" && ! -z "${FSETNAME}" ]]; then
echo "ERROR: Missing parameter. Either 'path' or 'fileset' is mandatory."
usage
fi
if [[ ! ${VOLSIZE} =~ ^[1-9][0-9]*$ ]]; then
echo "ERROR: Provided value for --size=${VOLSIZE} is not valid number."
exit 2
fi
if [[ ${#VOLNAME} -ge 254 ]]; then
echo "ERROR: pvname specified against option --pvname must be less than 254 characters."
exit 2
fi
if [ -z "${VOLNAME}" ]; then
VOLNAME=${VOLPATH%/}
VOLNAME=${VOLNAME##*/}
VOLNAME="pv-${FSNAME}-${VOLNAME}"
VOLNAME=${VOLNAME,,}
if [[ ${#VOLNAME} -ge 254 ]]; then
echo "ERROR: Specify name for pv using option --pvname."
exit 2
fi
if ! [[ "${VOLNAME}" =~ ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ ]]; then
echo "ERROR: Specify name for pv using option --pvname."
exit 2
fi
fi
if ! [[ "${VOLNAME}" =~ ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ ]]; then
echo "ERROR: Invalid pv name specified. pv name must satisfy DNS-1123 label requirement."
exit 2
fi
[[ -z "${ACCESSMODE}" ]] && ACCESSMODE="ReadWriteMany"
if ! [[ "$ACCESSMODE" == "ReadWriteMany" || "$ACCESSMODE" == "ReadWriteOnce" ]]; then
echo "ERROR: Invalid access mode specified. Valid accessmode are ReadWriteMany and ReadWriteOnce."
exit 2
fi
STORAGECLASS=""
if ! [[ -z "${CLASS}" ]]; then
if ! [[ "${CLASS}" =~ ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ ]]; then
echo "ERROR: Invalid storageClass name specified. storageClass name must satisfy DNS-1123 label requirement."
exit 2
fi
STORAGECLASS="storageClassName: ${CLASS}"
fi
# Check if this is spectrum scale node
#if [[ ! -f /usr/lpp/mmfs/bin/mmlscluster ]]; then
# echo "ERROR: Spectrum Scale cli's are not present on this node"
# exit 2
#fi
echo >${ERROROUT}
# Authentication and route validation
response=$(curl -kv -u "${USERNAME}":"${PASSWORD}" -X GET \
--header 'accept:application/json' \
"https://${URL}:443/scalemgmt/v2/cluster" \
2>&1 | grep -i 'HTTP/1.1 ' | awk '{print $3}'| sed -e 's/^[ \t]*//')
if [[ ${response} == 401 ]]; then
echo "ERROR: Unauthorized. Incorrect username or password."
exit 2
elif [[ -z ${response} ]]; then
echo "ERROR: Could not resolve host ${URL}."
exit 2
fi
# Get the Spectrum Scale cluster ID
clusterID=$(curl -k -u "${USERNAME}":"${PASSWORD}" -X GET \
--header 'accept:application/json' \
"https://${URL}:443/scalemgmt/v2/cluster" \
2>${ERROROUT} | python3 -c "import sys, json; print(json.load(sys.stdin)['cluster']['clusterSummary']['clusterId'])" 2>>${ERROROUT})
if [[ $? -ne 0 ]] || [[ -z "$clusterID" ]]; then
echo "ERROR: Failed to get the Spectrum Scale cluster ID."
#cat ${ERROROUT}
exit 2
fi
# Get the Fileystem ID
fileSystemID=$(curl -k -u "${USERNAME}":"${PASSWORD}" -X GET \
--header 'accept:application/json' \
"https://${URL}:443/scalemgmt/v2/filesystems/${FSNAME}" \
2>${ERROROUT} | python3 -c "import sys, json; print(json.load(sys.stdin)['filesystems'][0]['uuid'])" 2>>${ERROROUT})
if [[ $? -ne 0 ]] || [[ -z "$fileSystemID" ]]; then
echo "ERROR: Failed to get the Fileystem ID of ${FSNAME}"
#cat ${ERROROUT}
exit 2
fi
# TODO : Add check for kubernetes lable limit for value of VolumeHandle
# echo "FSETNAME=${FSETNAME}"
if [[ -z "${FSETNAME}" ]]; then
# Verify the path exists and is a GPFS path.
mountpathDepth=$(curl -k -u "${USERNAME}":"${PASSWORD}" -X GET \
--header 'accept:application/json' \
"https://${URL}:443/scalemgmt/v2/filesystems/${FSNAME}" \
2>${ERROROUT} | python3 -c "import sys, json; print(json.load(sys.stdin)['filesystems'][0]['mount']['mountPoint'])" | grep -o "[\/]" | wc -l)
relativePath=""
for ((i=1;i<=mountpathDepth;i++)); do relativePath+="../"; done
relativePath+=$VOLPATH
relativePath=${relativePath//\//%2F}
response=$(curl -k -u "${USERNAME}":"${PASSWORD}" -X GET \
--header 'accept:application/json' \
"https://${URL}:443/scalemgmt/v2/filesystems/${FSNAME}/owner/${relativePath}" \
2>${ERROROUT} | python3 -c "import sys, json; print(json.dumps(json.load(sys.stdin)['status']))" 2>>${ERROROUT})
responseCode=$(echo "$response" | python3 -c "import sys, json; print(json.load(sys.stdin)['code'])")
responseMsg=$(echo "$response" | python3 -c "import sys, json; print(json.load(sys.stdin)['message'])")
if [[ $responseCode != 200 ]]; then
if [[ $responseMsg == "Path is not a valid GPFS path." ]]; then
echo "ERROR: The Path (${VOLPATH}) is not gpfs path."
exit 2
elif [[ $responseMsg == "File not found" ]]; then
echo "ERROR: Either Path (${VOLPATH}) does not exist or it is not a Directory/Softlink."
exit 2
else
echo "ERROR: Failed to verify the path (${VOLPATH}). Check error log for details."
echo "$responseMsg" > ${ERROROUT}
exit 2
fi
fi
fi
# Generate Volume Handle
if [[ ! -z "${FSETNAME}" ]]; then
fsetId=$(curl -k -u "${USERNAME}":"${PASSWORD}" -X GET \
--header 'accept:application/json' \
"https://${URL}:443/scalemgmt/v2/filesystems/${FSNAME}/filesets/${FSETNAME}" \
2>${ERROROUT} | python3 -c "import sys, json; print(json.load(sys.stdin)['filesets'][0]['config']['id'])" 2>>${ERROROUT})
if [[ $? -ne 0 ]] || [[ -z "$fsetId" ]]; then
echo "ERROR: Failed to get the fileset ID of ${FSETNAME}."
#cat ${ERROROUT}
exit 2
fi
fsetLinkPath=$(curl -k -u "${USERNAME}":"${PASSWORD}" -X GET \
--header 'accept:application/json' \
"https://${URL}:443/scalemgmt/v2/filesystems/${FSNAME}/filesets/${FSETNAME}" \
2>${ERROROUT} | python3 -c "import sys, json; print(json.load(sys.stdin)['filesets'][0]['config']['path'])" 2>>${ERROROUT})
if [[ $? -ne 0 ]] || [[ -z "$fsetLinkPath" ]]; then
echo "ERROR: Failed to get the fileset link path of ${FSETNAME}."
exit 2
fi
if [[ "${fsetLinkPath}" == "--" ]]; then
echo "ERROR: Fileset ${FSETNAME} is not linked."
exit 2
fi
VolumeHandle="${clusterID};${fileSystemID};fileset=${fsetId};path=${fsetLinkPath}"
else
VolumeHandle="${clusterID};${fileSystemID};path=${VOLPATH}"
fi
# Gererate yaml file
generate_pv_yaml "${VolumeHandle}" "${VOLNAME}" "${VOLSIZE}" "${ACCESSMODE}"
generate_pvc_yaml "${VOLNAME}" "${VOLSIZE}" "${ACCESSMODE}"
rm -f ${ERROROUT}
exit 0
|
/////////////////////////////////////////////////////////////////////////////
// Name: src/common/imagpng.cpp
// Purpose: wxImage PNG handler
// Author: <NAME>
// Copyright: (c) <NAME>
// Licence: wxWindows licence
/////////////////////////////////////////////////////////////////////////////
// ============================================================================
// declarations
// ============================================================================
// ----------------------------------------------------------------------------
// headers
// ----------------------------------------------------------------------------
// For compilers that support precompilation, includes "wx.h".
#include "wx/wxprec.h"
#if wxUSE_IMAGE && wxUSE_LIBPNG
#include "wx/imagpng.h"
#include "wx/versioninfo.h"
#ifndef WX_PRECOMP
#include "wx/log.h"
#include "wx/intl.h"
#include "wx/palette.h"
#include "wx/stream.h"
#endif
#include "png.h"
// For memcpy
#include <string.h>
// ----------------------------------------------------------------------------
// local functions
// ----------------------------------------------------------------------------
// is the pixel with this value of alpha a fully opaque one?
static inline
bool IsOpaque(unsigned char a)
{
return a == 0xff;
}
// ============================================================================
// wxPNGHandler implementation
// ============================================================================
wxIMPLEMENT_DYNAMIC_CLASS(wxPNGHandler,wxImageHandler);
#if wxUSE_STREAMS
#ifndef PNGLINKAGEMODE
#ifdef PNGAPI
#define PNGLINKAGEMODE PNGAPI
#else
#define PNGLINKAGEMODE LINKAGEMODE
#endif
#endif
namespace
{
// VS: wxPNGInfoStruct declared below is a hack that needs some explanation.
// First, let me describe what's the problem: libpng uses jmp_buf in
// its png_struct structure. Unfortunately, this structure is
// compiler-specific and may vary in size, so if you use libpng compiled
// as DLL with another compiler than the main executable, it may not work.
// Luckily, it is still possible to use setjmp() & longjmp() as long as the
// structure is not part of png_struct.
//
// Sadly, there's no clean way to attach user-defined data to png_struct.
// There is only one customizable place, png_struct.io_ptr, which is meant
// only for I/O routines and is set with png_set_read_fn or
// png_set_write_fn. The hacky part is that we use io_ptr to store
// a pointer to wxPNGInfoStruct that holds I/O structures _and_ jmp_buf.
struct wxPNGInfoStruct
{
jmp_buf jmpbuf;
bool verbose;
union
{
wxInputStream *in;
wxOutputStream *out;
} stream;
};
#define WX_PNG_INFO(png_ptr) ((wxPNGInfoStruct*)png_get_io_ptr(png_ptr))
// This is another helper struct which is used to pass parameters to
// DoLoadPNGFile(). It allows us to use the usual RAII for freeing memory,
// which wouldn't be possible inside DoLoadPNGFile() because it uses
// setjmp/longjmp() functions for error handling, which are incompatible with
// C++ destructors.
struct wxPNGImageData
{
wxPNGImageData()
{
lines = NULL;
m_buf = NULL;
info_ptr = (png_infop) NULL;
png_ptr = (png_structp) NULL;
ok = false;
}
bool Alloc(png_uint_32 width, png_uint_32 height, unsigned char* buf)
{
lines = (unsigned char **)malloc(height * sizeof(unsigned char *));
if ( !lines )
return false;
size_t w = width;
// if RGB data will be written directly to wxImage buffer
if (buf)
w *= 3;
else
{
// allocate intermediate RGBA buffer
w *= 4;
buf =
m_buf = static_cast<unsigned char*>(malloc(w * height));
if (!m_buf)
return false;
}
lines[0] = buf;
for (png_uint_32 i = 1; i < height; i++)
lines[i] = lines[i - 1] + w;
return true;
}
~wxPNGImageData()
{
free(m_buf);
free( lines );
if ( png_ptr )
{
if ( info_ptr )
png_destroy_read_struct( &png_ptr, &info_ptr, (png_infopp) NULL );
else
png_destroy_read_struct( &png_ptr, (png_infopp) NULL, (png_infopp) NULL );
}
}
void DoLoadPNGFile(wxImage* image, wxPNGInfoStruct& wxinfo);
unsigned char** lines;
unsigned char* m_buf;
png_infop info_ptr;
png_structp png_ptr;
bool ok;
};
} // anonymous namespace
// ----------------------------------------------------------------------------
// helper functions
// ----------------------------------------------------------------------------
extern "C"
{
static void PNGLINKAGEMODE wx_PNG_stream_reader( png_structp png_ptr, png_bytep data,
png_size_t length )
{
WX_PNG_INFO(png_ptr)->stream.in->Read(data, length);
}
static void PNGLINKAGEMODE wx_PNG_stream_writer( png_structp png_ptr, png_bytep data,
png_size_t length )
{
WX_PNG_INFO(png_ptr)->stream.out->Write(data, length);
}
static void
PNGLINKAGEMODE wx_PNG_warning(png_structp png_ptr, png_const_charp message)
{
wxPNGInfoStruct *info = png_ptr ? WX_PNG_INFO(png_ptr) : NULL;
if ( !info || info->verbose )
{
wxLogWarning( wxString::FromAscii(message) );
}
}
// from pngerror.c
// so that the libpng doesn't send anything on stderr
static void
PNGLINKAGEMODE wx_PNG_error(png_structp png_ptr, png_const_charp message)
{
wx_PNG_warning(NULL, message);
// we're not using libpng built-in jump buffer (see comment before
// wxPNGInfoStruct above) so we have to return ourselves, otherwise libpng
// would just abort
longjmp(WX_PNG_INFO(png_ptr)->jmpbuf, 1);
}
} // extern "C"
// ----------------------------------------------------------------------------
// LoadFile() helpers
// ----------------------------------------------------------------------------
// init the alpha channel for the image and fill it with 1s up to (x, y)
static
unsigned char *InitAlpha(wxImage *image, png_uint_32 x, png_uint_32 y)
{
// create alpha channel
image->SetAlpha();
unsigned char *alpha = image->GetAlpha();
// set alpha for the pixels we had so far
png_uint_32 end = y * image->GetWidth() + x;
// all the previous pixels were opaque
memset(alpha, 0xff, end);
return alpha + end;
}
// ----------------------------------------------------------------------------
// reading PNGs
// ----------------------------------------------------------------------------
bool wxPNGHandler::DoCanRead( wxInputStream& stream )
{
unsigned char hdr[4];
if ( !stream.Read(hdr, WXSIZEOF(hdr)) ) // it's ok to modify the stream position here
return false;
return memcmp(hdr, "\211PNG", WXSIZEOF(hdr)) == 0;
}
// convert data from RGB to wxImage format
static
void CopyDataFromPNG(wxImage *image,
unsigned char **lines,
png_uint_32 width,
png_uint_32 height)
{
// allocated on demand if we have any non-opaque pixels
unsigned char *alpha = NULL;
unsigned char *ptrDst = image->GetData();
{
for ( png_uint_32 y = 0; y < height; y++ )
{
const unsigned char *ptrSrc = lines[y];
for ( png_uint_32 x = 0; x < width; x++ )
{
unsigned char r = *ptrSrc++;
unsigned char g = *ptrSrc++;
unsigned char b = *ptrSrc++;
unsigned char a = *ptrSrc++;
// the first time we encounter a transparent pixel we must
// allocate alpha channel for the image
if ( !IsOpaque(a) && !alpha )
alpha = InitAlpha(image, x, y);
if ( alpha )
*alpha++ = a;
*ptrDst++ = r;
*ptrDst++ = g;
*ptrDst++ = b;
}
}
}
}
// temporarily disable the warning C4611 (interaction between '_setjmp' and
// C++ object destruction is non-portable) - I don't see any dtors here
#ifdef __VISUALC__
#pragma warning(disable:4611)
#endif /* VC++ */
// This function uses wxPNGImageData to store some of its "local" variables in
// order to avoid clobbering these variables by longjmp(): having them inside
// the stack frame of the caller prevents this from happening. It also
// "returns" its result via wxPNGImageData: use its "ok" field to check
// whether loading succeeded or failed.
void
wxPNGImageData::DoLoadPNGFile(wxImage* image, wxPNGInfoStruct& wxinfo)
{
png_uint_32 width, height = 0;
int bit_depth, color_type;
image->Destroy();
png_ptr = png_create_read_struct
(
PNG_LIBPNG_VER_STRING,
NULL,
wx_PNG_error,
wx_PNG_warning
);
if (!png_ptr)
return;
// NB: please see the comment near wxPNGInfoStruct declaration for
// explanation why this line is mandatory
png_set_read_fn( png_ptr, &wxinfo, wx_PNG_stream_reader);
info_ptr = png_create_info_struct( png_ptr );
if (!info_ptr)
return;
if (setjmp(wxinfo.jmpbuf))
return;
png_read_info( png_ptr, info_ptr );
png_get_IHDR( png_ptr, info_ptr, &width, &height, &bit_depth, &color_type, NULL, NULL, NULL );
png_set_expand(png_ptr);
png_set_gray_to_rgb(png_ptr);
png_set_strip_16( png_ptr );
png_set_packing( png_ptr );
image->Create((int)width, (int)height, (bool) false /* no need to init pixels */);
if (!image->IsOk())
return;
const bool needCopy =
(color_type & PNG_COLOR_MASK_ALPHA) ||
png_get_valid(png_ptr, info_ptr, PNG_INFO_tRNS);
if (!Alloc(width, height, needCopy ? NULL : image->GetData()))
return;
png_read_image( png_ptr, lines );
png_read_end( png_ptr, info_ptr );
#if wxUSE_PALETTE
if (color_type == PNG_COLOR_TYPE_PALETTE)
{
png_colorp palette = NULL;
int numPalette = 0;
(void) png_get_PLTE(png_ptr, info_ptr, &palette, &numPalette);
unsigned char* r = new unsigned char[numPalette];
unsigned char* g = new unsigned char[numPalette];
unsigned char* b = new unsigned char[numPalette];
for (int j = 0; j < numPalette; j++)
{
r[j] = palette[j].red;
g[j] = palette[j].green;
b[j] = palette[j].blue;
}
image->SetPalette(wxPalette(numPalette, r, g, b));
delete[] r;
delete[] g;
delete[] b;
}
#endif // wxUSE_PALETTE
// set the image resolution if it's available
png_uint_32 resX, resY;
int unitType;
if (png_get_pHYs(png_ptr, info_ptr, &resX, &resY, &unitType)
== PNG_INFO_pHYs)
{
wxImageResolution res = wxIMAGE_RESOLUTION_CM;
switch (unitType)
{
default:
wxLogWarning(_("Unknown PNG resolution unit %d"), unitType);
wxFALLTHROUGH;
case PNG_RESOLUTION_UNKNOWN:
image->SetOption(wxIMAGE_OPTION_RESOLUTIONX, resX);
image->SetOption(wxIMAGE_OPTION_RESOLUTIONY, resY);
res = wxIMAGE_RESOLUTION_NONE;
break;
case PNG_RESOLUTION_METER:
/*
Convert meters to centimeters.
Use a string to not lose precision (converting to cm and then
to inch would result in integer rounding error).
If an app wants an int, GetOptionInt will convert and round
down for them.
*/
image->SetOption(wxIMAGE_OPTION_RESOLUTIONX,
wxString::FromCDouble((double) resX / 100.0, 2));
image->SetOption(wxIMAGE_OPTION_RESOLUTIONY,
wxString::FromCDouble((double) resY / 100.0, 2));
break;
}
image->SetOption(wxIMAGE_OPTION_RESOLUTIONUNIT, res);
}
// loaded successfully, now init wxImage with this data
if (needCopy)
CopyDataFromPNG(image, lines, width, height);
// This will indicate to the caller that loading succeeded.
ok = true;
}
bool
wxPNGHandler::LoadFile(wxImage *image,
wxInputStream& stream,
bool verbose,
int WXUNUSED(index))
{
wxPNGInfoStruct wxinfo;
wxinfo.verbose = verbose;
wxinfo.stream.in = &stream;
wxPNGImageData data;
data.DoLoadPNGFile(image, wxinfo);
if ( !data.ok )
{
if (verbose)
{
wxLogError(_("Couldn't load a PNG image - file is corrupted or not enough memory."));
}
if ( image->IsOk() )
{
image->Destroy();
}
return false;
}
return true;
}
// ----------------------------------------------------------------------------
// SaveFile() palette helpers
// ----------------------------------------------------------------------------
typedef wxLongToLongHashMap PaletteMap;
static unsigned long PaletteMakeKey(const png_color_8& clr)
{
return (wxImageHistogram::MakeKey(clr.red, clr.green, clr.blue) << 8) | clr.alpha;
}
static long PaletteFind(const PaletteMap& palette, const png_color_8& clr)
{
unsigned long value = PaletteMakeKey(clr);
PaletteMap::const_iterator it = palette.find(value);
return (it != palette.end()) ? it->second : wxNOT_FOUND;
}
static long PaletteAdd(PaletteMap *palette, const png_color_8& clr)
{
unsigned long value = PaletteMakeKey(clr);
PaletteMap::const_iterator it = palette->find(value);
size_t index;
if (it == palette->end())
{
index = palette->size();
(*palette)[value] = index;
}
else
{
index = it->second;
}
return index;
}
// ----------------------------------------------------------------------------
// writing PNGs
// ----------------------------------------------------------------------------
bool wxPNGHandler::SaveFile( wxImage *image, wxOutputStream& stream, bool verbose )
{
wxPNGInfoStruct wxinfo;
wxinfo.verbose = verbose;
wxinfo.stream.out = &stream;
png_structp png_ptr = png_create_write_struct
(
PNG_LIBPNG_VER_STRING,
NULL,
wx_PNG_error,
wx_PNG_warning
);
if (!png_ptr)
{
if (verbose)
{
wxLogError(_("Couldn't save PNG image."));
}
return false;
}
png_infop info_ptr = png_create_info_struct(png_ptr);
if (info_ptr == NULL)
{
png_destroy_write_struct( &png_ptr, (png_infopp)NULL );
if (verbose)
{
wxLogError(_("Couldn't save PNG image."));
}
return false;
}
if (setjmp(wxinfo.jmpbuf))
{
png_destroy_write_struct( &png_ptr, (png_infopp)NULL );
if (verbose)
{
wxLogError(_("Couldn't save PNG image."));
}
return false;
}
// NB: please see the comment near wxPNGInfoStruct declaration for
// explanation why this line is mandatory
png_set_write_fn( png_ptr, &wxinfo, wx_PNG_stream_writer, NULL);
const int iHeight = image->GetHeight();
const int iWidth = image->GetWidth();
const bool hasPngFormatOption = image->HasOption(wxIMAGE_OPTION_PNG_FORMAT);
int iColorType = hasPngFormatOption
? image->GetOptionInt(wxIMAGE_OPTION_PNG_FORMAT)
: wxPNG_TYPE_COLOUR;
bool bHasAlpha = image->HasAlpha();
bool bHasMask = image->HasMask();
bool bUsePalette = iColorType == wxPNG_TYPE_PALETTE
#if wxUSE_PALETTE
|| (!hasPngFormatOption && image->HasPalette() )
#endif
;
png_color_8 mask = { 0, 0, 0, 0, 0 };
if (bHasMask)
{
mask.red = image->GetMaskRed();
mask.green = image->GetMaskGreen();
mask.blue = image->GetMaskBlue();
}
PaletteMap palette;
if (bUsePalette)
{
png_color png_rgb [PNG_MAX_PALETTE_LENGTH];
png_byte png_trans[PNG_MAX_PALETTE_LENGTH];
const unsigned char *pColors = image->GetData();
const unsigned char* pAlpha = image->GetAlpha();
if (bHasMask && !pAlpha)
{
// Mask must be first
PaletteAdd(&palette, mask);
}
for (int y = 0; y < iHeight; y++)
{
for (int x = 0; x < iWidth; x++)
{
png_color_8 rgba;
rgba.red = *pColors++;
rgba.green = *pColors++;
rgba.blue = *pColors++;
rgba.gray = 0;
rgba.alpha = (pAlpha && !bHasMask) ? *pAlpha++ : 0;
// save in our palette
long index = PaletteAdd(&palette, rgba);
if (index < PNG_MAX_PALETTE_LENGTH)
{
// save in libpng's palette
png_rgb[index].red = rgba.red;
png_rgb[index].green = rgba.green;
png_rgb[index].blue = rgba.blue;
png_trans[index] = rgba.alpha;
}
else
{
bUsePalette = false;
break;
}
}
}
if (bUsePalette)
{
png_set_PLTE(png_ptr, info_ptr, png_rgb, palette.size());
if (bHasMask && !pAlpha)
{
wxASSERT(PaletteFind(palette, mask) == 0);
png_trans[0] = 0;
png_set_tRNS(png_ptr, info_ptr, png_trans, 1, NULL);
}
else if (pAlpha && !bHasMask)
{
png_set_tRNS(png_ptr, info_ptr, png_trans, palette.size(), NULL);
}
}
}
/*
If saving palettised was requested but it was decided we can't use a
palette then reset the colour type to RGB.
*/
if (!bUsePalette && iColorType == wxPNG_TYPE_PALETTE)
{
iColorType = wxPNG_TYPE_COLOUR;
}
bool bUseAlpha = !bUsePalette && (bHasAlpha || bHasMask);
int iPngColorType;
if (bUsePalette)
{
iPngColorType = PNG_COLOR_TYPE_PALETTE;
iColorType = wxPNG_TYPE_PALETTE;
}
else if ( iColorType==wxPNG_TYPE_COLOUR )
{
iPngColorType = bUseAlpha ? PNG_COLOR_TYPE_RGB_ALPHA
: PNG_COLOR_TYPE_RGB;
}
else
{
iPngColorType = bUseAlpha ? PNG_COLOR_TYPE_GRAY_ALPHA
: PNG_COLOR_TYPE_GRAY;
}
if (image->HasOption(wxIMAGE_OPTION_PNG_FILTER))
png_set_filter( png_ptr, PNG_FILTER_TYPE_BASE, image->GetOptionInt(wxIMAGE_OPTION_PNG_FILTER) );
if (image->HasOption(wxIMAGE_OPTION_PNG_COMPRESSION_LEVEL))
png_set_compression_level( png_ptr, image->GetOptionInt(wxIMAGE_OPTION_PNG_COMPRESSION_LEVEL) );
if (image->HasOption(wxIMAGE_OPTION_PNG_COMPRESSION_MEM_LEVEL))
png_set_compression_mem_level( png_ptr, image->GetOptionInt(wxIMAGE_OPTION_PNG_COMPRESSION_MEM_LEVEL) );
if (image->HasOption(wxIMAGE_OPTION_PNG_COMPRESSION_STRATEGY))
png_set_compression_strategy( png_ptr, image->GetOptionInt(wxIMAGE_OPTION_PNG_COMPRESSION_STRATEGY) );
if (image->HasOption(wxIMAGE_OPTION_PNG_COMPRESSION_BUFFER_SIZE))
png_set_compression_buffer_size( png_ptr, image->GetOptionInt(wxIMAGE_OPTION_PNG_COMPRESSION_BUFFER_SIZE) );
int iBitDepth = !bUsePalette && image->HasOption(wxIMAGE_OPTION_PNG_BITDEPTH)
? image->GetOptionInt(wxIMAGE_OPTION_PNG_BITDEPTH)
: 8;
png_set_IHDR( png_ptr, info_ptr, image->GetWidth(), image->GetHeight(),
iBitDepth, iPngColorType,
PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_BASE,
PNG_FILTER_TYPE_BASE);
int iElements;
png_color_8 sig_bit;
if ( iPngColorType & PNG_COLOR_MASK_COLOR )
{
sig_bit.red =
sig_bit.green =
sig_bit.blue = (png_byte)iBitDepth;
iElements = 3;
}
else // grey
{
sig_bit.gray = (png_byte)iBitDepth;
iElements = 1;
}
if ( bUseAlpha )
{
sig_bit.alpha = (png_byte)iBitDepth;
iElements++;
}
if ( iBitDepth == 16 )
iElements *= 2;
// save the image resolution if we have it
int resX, resY;
switch ( GetResolutionFromOptions(*image, &resX, &resY) )
{
case wxIMAGE_RESOLUTION_INCHES:
{
const double INCHES_IN_METER = 10000.0 / 254;
resX = int(resX * INCHES_IN_METER);
resY = int(resY * INCHES_IN_METER);
}
break;
case wxIMAGE_RESOLUTION_CM:
resX *= 100;
resY *= 100;
break;
case wxIMAGE_RESOLUTION_NONE:
break;
default:
wxFAIL_MSG( wxT("unsupported image resolution units") );
}
if ( resX && resY )
png_set_pHYs( png_ptr, info_ptr, resX, resY, PNG_RESOLUTION_METER );
png_set_sBIT( png_ptr, info_ptr, &sig_bit );
png_write_info( png_ptr, info_ptr );
png_set_shift( png_ptr, &sig_bit );
png_set_packing( png_ptr );
unsigned char *
data = (unsigned char *)malloc( image->GetWidth() * iElements );
if ( !data )
{
png_destroy_write_struct( &png_ptr, (png_infopp)NULL );
return false;
}
const unsigned char *
pAlpha = (const unsigned char *)(bHasAlpha ? image->GetAlpha() : NULL);
const unsigned char *pColors = image->GetData();
for (int y = 0; y != iHeight; ++y)
{
unsigned char *pData = data;
for (int x = 0; x != iWidth; x++)
{
png_color_8 clr;
clr.red = *pColors++;
clr.green = *pColors++;
clr.blue = *pColors++;
clr.gray = 0;
clr.alpha = (bUsePalette && pAlpha) ? *pAlpha++ : 0; // use with wxPNG_TYPE_PALETTE only
switch ( iColorType )
{
default:
wxFAIL_MSG( wxT("unknown wxPNG_TYPE_XXX") );
wxFALLTHROUGH;
case wxPNG_TYPE_COLOUR:
*pData++ = clr.red;
if ( iBitDepth == 16 )
*pData++ = 0;
*pData++ = clr.green;
if ( iBitDepth == 16 )
*pData++ = 0;
*pData++ = clr.blue;
if ( iBitDepth == 16 )
*pData++ = 0;
break;
case wxPNG_TYPE_GREY:
{
// where do these coefficients come from? maybe we
// should have image options for them as well?
unsigned uiColor =
(unsigned) (76.544*(unsigned)clr.red +
150.272*(unsigned)clr.green +
36.864*(unsigned)clr.blue);
*pData++ = (unsigned char)((uiColor >> 8) & 0xFF);
if ( iBitDepth == 16 )
*pData++ = (unsigned char)(uiColor & 0xFF);
}
break;
case wxPNG_TYPE_GREY_RED:
*pData++ = clr.red;
if ( iBitDepth == 16 )
*pData++ = 0;
break;
case wxPNG_TYPE_PALETTE:
*pData++ = (unsigned char) PaletteFind(palette, clr);
break;
}
if ( bUseAlpha )
{
unsigned char uchAlpha = 255;
if ( bHasAlpha )
uchAlpha = *pAlpha++;
if ( bHasMask )
{
if ( (clr.red == mask.red)
&& (clr.green == mask.green)
&& (clr.blue == mask.blue) )
uchAlpha = 0;
}
*pData++ = uchAlpha;
if ( iBitDepth == 16 )
*pData++ = 0;
}
}
png_bytep row_ptr = data;
png_write_rows( png_ptr, &row_ptr, 1 );
}
free(data);
png_write_end( png_ptr, info_ptr );
png_destroy_write_struct( &png_ptr, (png_infopp)&info_ptr );
return true;
}
#ifdef __VISUALC__
#pragma warning(default:4611)
#endif /* VC++ */
#endif // wxUSE_STREAMS
/*static*/ wxVersionInfo wxPNGHandler::GetLibraryVersionInfo()
{
// The version string seems to always have a leading space and a trailing
// new line, get rid of them both.
wxString str = png_get_header_version(NULL) + 1;
str.Replace("\n", "");
return wxVersionInfo("libpng",
PNG_LIBPNG_VER_MAJOR,
PNG_LIBPNG_VER_MINOR,
PNG_LIBPNG_VER_RELEASE,
str);
}
#endif // wxUSE_LIBPNG
|
#!/bin/sh
RELATIVE_DIR=`dirname "$0"`
cd $RELATIVE_DIR
sudo python ./four_leg_control/main.py
|
from typing import Union
class Bank:
def __init__(self):
self.accounts = {}
def create_account(self, name: str, initial_deposit: float) -> int:
account_number = len(self.accounts) + 1
self.accounts[account_number] = {'name': name, 'balance': initial_deposit}
return account_number
def deposit(self, account_number: int, amount: float) -> float:
if account_number in self.accounts:
self.accounts[account_number]['balance'] += amount
return self.accounts[account_number]['balance']
else:
return "Account not found"
def withdraw(self, account_number: int, amount: float) -> Union[float, str]:
if account_number in self.accounts:
if self.accounts[account_number]['balance'] >= amount:
self.accounts[account_number]['balance'] -= amount
return self.accounts[account_number]['balance']
else:
return "Insufficient funds"
else:
return "Account not found"
def check_balance(self, account_number: int) -> Union[float, str]:
if account_number in self.accounts:
return self.accounts[account_number]['balance']
else:
return "Account not found" |
<filename>src/seed/date.js
'use strict';
let moment = require('moment');
let random = require('../util/random');
const {wrap} = require('../util/hooks');
let date = wrap(function (start=0, end=Date.now()) {
let time;
if (Array.isArray(start)) {
time = start[random.int(0, start.length - 1)];
time = moment(time).toDate().getTime();
} else {
start = moment(start).toDate().getTime();
end = moment(end).toDate().getTime();
time = random.int(start, end);
}
return time;
});
date.toSecond = wrap(function () {
return Math.ceil(date.apply(null, arguments) / 1000);
});
date.format = wrap((format, start, end) => {
return moment(date(start, end)).format(format);
});
module.exports = date;
|
import React from 'react';
import ReactDOM from 'react-dom';
import { shallow, mount } from 'enzyme';
import List from '../List';
import Section from '../Section';
import ListSpacingContext from '../contexts/listSpacing';
it('renders without crashing', () => {
const div = document.createElement('div');
const element = <List title="Title">Hello world</List>;
ReactDOM.render(element, div);
});
it('consumes context to render a <Section> with spacing configs', () => {
const wrapper = shallow(<List>Foo Bar</List>);
expect(wrapper.is(ListSpacingContext.Consumer)).toBeTruthy();
const renderedElement = wrapper.prop('children')(true);
expect(renderedElement.type).toBe(Section);
expect(renderedElement.props.verticalSpacing).toBe(true);
expect(renderedElement.props.bodySpacing).toBe(false);
});
it('renders a <ul> inside root <Section>', () => {
const wrapper = mount(<List>Foo Bar</List>);
expect(wrapper.find(Section).find('ul').exists()).toBeTruthy();
expect(wrapper.find(Section).find('ul').text()).toBe('Foo Bar');
});
it('renders in variants with "normal" as default', () => {
const wrapper = mount(<List>Foo Bar</List>);
expect(wrapper.find(Section).hasClass('gyp-list--normal')).toBeTruthy();
wrapper.setProps({ variant: 'setting' });
expect(wrapper.find(Section).hasClass('gyp-list--setting')).toBeTruthy();
wrapper.setProps({ variant: 'button' });
expect(wrapper.find(Section).hasClass('gyp-list--button')).toBeTruthy();
});
it('passes unknown props to wrapper <Section>', () => {
const wrapper = mount(
<List
id="foo"
verticalSpacing={false}
/>
);
expect(wrapper.find(Section).props()).toEqual(
expect.objectContaining({
id: 'foo',
verticalSpacing: false,
})
);
});
|
<filename>src/test/java/com/kvn/poi/exp/function/FunctionRegisterTest.java
package com.kvn.poi.exp.function;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* Created by wangzhiyuan on 2018/9/4
*/
public class FunctionRegisterTest {
@Test
public void registerInternalFunction() {
FunctionRegister.registerInternalFunction();
}
} |
#!/usr/bin/env bash
#SBATCH --job-name=T3E
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task=10
#SBATCH --time=24:00:00
#SBATCH --output=%x-%j.log
python Structure-based_annotation.py -i T3E.fasta -o T3E_prediction
|
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
else
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
fi
local destination="${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries
local basename
basename="$(basename "$1" | sed -E s/\\..+// && exit ${PIPESTATUS[0]})"
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/${basename}.framework/${basename}" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "$1"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework 'Pods-CCNetWorking_Example/AFNetworking.framework'
install_framework 'Pods-CCNetWorking_Example/CCNetWorking.framework'
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework 'Pods-CCNetWorking_Example/AFNetworking.framework'
install_framework 'Pods-CCNetWorking_Example/CCNetWorking.framework'
fi
|
package api
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/ovh/cds/engine/api/group"
"github.com/ovh/cds/engine/api/pipeline"
"github.com/ovh/cds/engine/api/project"
"github.com/ovh/cds/engine/api/secret"
"github.com/ovh/cds/engine/api/test"
"github.com/ovh/cds/engine/api/test/assets"
"github.com/ovh/cds/engine/api/worker"
"github.com/ovh/cds/engine/api/workermodel"
"github.com/ovh/cds/engine/api/workflow"
"github.com/ovh/cds/sdk"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_DeleteAllWorkerModels(t *testing.T) {
api, _, _, end := newTestAPI(t)
defer end()
// Load and delete all worker
workers, err := worker.LoadAll(context.Background(), api.mustDB())
require.NoError(t, err, "unable to load workers")
for _, w := range workers {
assert.NoError(t, worker.Delete(api.mustDB(), w.ID))
}
// Load and delete all worker models
models, err := workermodel.LoadAll(context.Background(), api.mustDB(), nil)
require.NoError(t, err)
for _, m := range models {
assert.NoError(t, workermodel.Delete(api.mustDB(), m.ID))
}
// Load and delete all worker model patterns
modelPatterns, err := workermodel.LoadPatterns(api.mustDB())
require.NoError(t, err)
for _, wmp := range modelPatterns {
assert.NoError(t, workermodel.DeletePattern(api.mustDB(), wmp.ID))
}
}
func Test_postWorkerModelAsAdmin(t *testing.T) {
Test_DeleteAllWorkerModels(t)
api, _, _, end := newTestAPI(t)
defer end()
_, jwtRaw := assets.InsertAdminUser(t, api.mustDB())
groupShared, err := group.LoadByName(context.TODO(), api.mustDB(), sdk.SharedInfraGroupName)
require.NoError(t, err)
model := sdk.Model{
Name: "Test1",
GroupID: groupShared.ID,
Type: sdk.Docker,
ModelDocker: sdk.ModelDocker{
Image: "buildpack-deps:jessie",
Shell: "sh -c",
Cmd: "worker --api={{.API}}",
Envs: map[string]string{
"CDS_TEST": "THIS IS A TEST",
},
},
}
// Send POST model request
uri := api.Router.GetRoute("POST", api.postWorkerModelHandler, nil)
test.NotEmpty(t, uri)
req := assets.NewJWTAuthentifiedRequest(t, jwtRaw, "POST", uri, model)
w := httptest.NewRecorder()
api.Router.Mux.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
var newModel sdk.Model
require.NoError(t, json.Unmarshal(w.Body.Bytes(), &newModel))
assert.Equal(t, groupShared.ID, newModel.GroupID)
assert.Equal(t, "worker --api={{.API}}", newModel.ModelDocker.Cmd, "Main worker command is not good")
assert.Equal(t, "THIS IS A TEST", newModel.ModelDocker.Envs["CDS_TEST"], "Worker model envs are not good")
}
func Test_addWorkerModelWithPrivateRegistryAsAdmin(t *testing.T) {
api, _, _, end := newTestAPI(t)
defer end()
//Loading all models
models, errlw := workermodel.LoadAll(context.Background(), api.mustDB(), nil)
if errlw != nil {
t.Fatalf("Error getting models : %s", errlw)
}
//Delete all of them
for _, m := range models {
if err := workermodel.Delete(api.mustDB(), m.ID); err != nil {
t.Fatalf("Error deleting model : %s", err)
}
}
//Create admin user
u, jwt := assets.InsertAdminUser(t, api.mustDB())
assert.NotZero(t, u)
assert.NotZero(t, jwt)
g, err := group.LoadByName(context.TODO(), api.mustDB(), "shared.infra")
if err != nil {
t.Fatalf("Error getting group : %s", err)
}
model := sdk.Model{
Name: "Test1",
GroupID: g.ID,
Type: sdk.Docker,
ModelDocker: sdk.ModelDocker{
Image: "buildpack-deps:jessie",
Shell: "sh -c",
Cmd: "worker --api={{.API}}",
Envs: map[string]string{
"CDS_TEST": "THIS IS A TEST",
},
Private: true,
Username: "test",
Password: "<PASSWORD>",
},
RegisteredCapabilities: sdk.RequirementList{
{
Name: "capa1",
Type: sdk.BinaryRequirement,
Value: "1",
},
},
}
//Prepare request
uri := api.Router.GetRoute("POST", api.postWorkerModelHandler, nil)
test.NotEmpty(t, uri)
req := assets.NewJWTAuthentifiedRequest(t, jwt, "POST", uri, model)
//Do the request
w := httptest.NewRecorder()
api.Router.Mux.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
var newModel sdk.Model
assert.NoError(t, json.Unmarshal(w.Body.Bytes(), &newModel))
test.Equal(t, "worker --api={{.API}}", newModel.ModelDocker.Cmd, "Main worker command is not good")
test.Equal(t, "THIS IS A TEST", newModel.ModelDocker.Envs["CDS_TEST"], "Worker model envs are not good")
test.Equal(t, sdk.PasswordPlaceholder, newModel.ModelDocker.Password, "Worker model password returned are not placeholder")
}
func Test_WorkerModelUsage(t *testing.T) {
Test_DeleteAllWorkerModels(t)
api, db, router, end := newTestAPI(t)
defer end()
u, jwt := assets.InsertAdminUser(t, db)
assert.NotZero(t, u)
grName := sdk.RandomString(10)
gr := assets.InsertTestGroup(t, db, grName)
test.NotNil(t, gr)
model := sdk.Model{
Name: sdk.RandomString(10),
GroupID: gr.ID,
Type: sdk.Docker,
ModelDocker: sdk.ModelDocker{
Image: "buildpack-deps:jessie",
Shell: "sh -c",
Cmd: "worker --api={{.API}}",
Envs: map[string]string{
"CDS_TEST": "THIS IS A TEST",
},
},
}
test.NoError(t, workermodel.Insert(db, &model))
pkey := sdk.RandomString(10)
proj := assets.InsertTestProject(t, db, api.Cache, pkey, pkey)
require.NoError(t, group.InsertLinkGroupUser(context.TODO(), db, &group.LinkGroupUser{
GroupID: proj.ProjectGroups[0].Group.ID,
AuthentifiedUserID: u.ID,
Admin: true,
}))
pip := sdk.Pipeline{
ProjectID: proj.ID,
ProjectKey: proj.Key,
Name: "pip1",
}
test.NoError(t, pipeline.InsertPipeline(db, &pip))
//Insert Stage
stage := &sdk.Stage{
Name: "stage_Test_0",
PipelineID: pip.ID,
BuildOrder: 1,
Enabled: true,
Prerequisites: []sdk.Prerequisite{},
}
pip.Stages = append(pip.Stages, *stage)
t.Logf("Insert Stage %s for Pipeline %s of Project %s", stage.Name, pip.Name, proj.Name)
test.NoError(t, pipeline.InsertStage(db, stage))
//Insert Action
t.Logf("Insert Action script on Stage %s for Pipeline %s of Project %s", stage.Name, pip.Name, proj.Name)
job := &sdk.Job{
Action: sdk.Action{
Name: "NewAction",
Enabled: true,
Requirements: []sdk.Requirement{
{
Name: fmt.Sprintf("%s/%s", grName, model.Name),
Type: sdk.ModelRequirement,
Value: fmt.Sprintf("%s/%s", grName, model.Name),
},
},
},
Enabled: true,
}
errJob := pipeline.InsertJob(db, job, stage.ID, &pip)
test.NoError(t, errJob)
assert.NotZero(t, job.PipelineActionID)
assert.NotZero(t, job.Action.ID)
proj, _ = project.LoadByID(db, proj.ID,
project.LoadOptions.WithApplications,
project.LoadOptions.WithPipelines,
project.LoadOptions.WithEnvironments,
project.LoadOptions.WithGroups,
)
wf := sdk.Workflow{
Name: "workflow1",
ProjectID: proj.ID,
ProjectKey: proj.Key,
WorkflowData: sdk.WorkflowData{
Node: sdk.Node{
Name: "root",
Context: &sdk.NodeContext{
PipelineID: pip.ID,
},
},
},
}
test.NoError(t, workflow.Insert(context.Background(), db, api.Cache, *proj, &wf))
//Prepare request
vars := map[string]string{
"permGroupName": gr.Name,
"permModelName": model.Name,
}
uri := router.GetRoute("GET", api.getWorkerModelUsageHandler, vars)
test.NotEmpty(t, uri)
req := assets.NewJWTAuthentifiedRequest(t, jwt, "GET", uri, vars)
//Do the request
w := httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
var pipelines []sdk.Pipeline
test.NoError(t, json.Unmarshal(w.Body.Bytes(), &pipelines))
test.NotNil(t, pipelines)
test.Equal(t, 1, len(pipelines))
test.Equal(t, "pip1", pipelines[0].Name)
test.Equal(t, proj.Key, pipelines[0].ProjectKey)
}
func Test_postWorkerModelWithWrongRequest(t *testing.T) {
Test_DeleteAllWorkerModels(t)
api, _, router, end := newTestAPI(t)
defer end()
//Create admin user
u, jwt := assets.InsertAdminUser(t, api.mustDB())
assert.NotZero(t, u)
assert.NotZero(t, jwt)
g, err := group.LoadByName(context.TODO(), api.mustDB(), "shared.infra")
if err != nil {
t.Fatalf("Error getting group : %s", err)
}
//Type is mandatory
model := sdk.Model{
Name: "Test1",
ModelDocker: sdk.ModelDocker{
Image: "buildpack-deps:jessie",
Cmd: "worker",
},
GroupID: g.ID,
}
//Prepare request
uri := api.Router.GetRoute("POST", api.postWorkerModelHandler, nil)
test.NotEmpty(t, uri)
req := assets.NewJWTAuthentifiedRequest(t, jwt, "POST", uri, model)
w := httptest.NewRecorder()
api.Router.Mux.ServeHTTP(w, req)
assert.Equal(t, 400, w.Code)
t.Logf("Body: %s", w.Body.String())
//Name is mandatory
model = sdk.Model{
GroupID: g.ID,
Type: sdk.Docker,
}
//Prepare request
req = assets.NewJWTAuthentifiedRequest(t, jwt, "POST", uri, model)
//Do the request
w = httptest.NewRecorder()
api.Router.Mux.ServeHTTP(w, req)
assert.Equal(t, 400, w.Code)
t.Logf("Body: %s", w.Body.String())
//GroupID is mandatory
model = sdk.Model{
Name: "Test1",
Type: sdk.Docker,
ModelDocker: sdk.ModelDocker{
Image: "buildpack-deps:jessie",
Cmd: "worker",
},
}
//Prepare request
req = assets.NewJWTAuthentifiedRequest(t, jwt, "POST", uri, model)
//Do the request
w = httptest.NewRecorder()
api.Router.Mux.ServeHTTP(w, req)
assert.Equal(t, 400, w.Code)
t.Logf("Body: %s", w.Body.String())
//Cmd is mandatory
model = sdk.Model{
Name: "Test1",
Type: sdk.Docker,
ModelDocker: sdk.ModelDocker{
Image: "buildpack-deps:jessie",
},
}
//Prepare request
req = assets.NewJWTAuthentifiedRequest(t, jwt, "POST", uri, model)
//Do the request
w = httptest.NewRecorder()
api.Router.Mux.ServeHTTP(w, req)
assert.Equal(t, 400, w.Code)
t.Logf("Body: %s", w.Body.String())
//SendBadRequest
//Prepare request
req = assets.NewJWTAuthentifiedRequest(t, jwt, "POST", uri, "blabla")
//Do the request
w = httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
assert.Equal(t, 400, w.Code)
t.Logf("Body: %s", w.Body.String())
}
func Test_postWorkerModelAsAGroupMember(t *testing.T) {
Test_DeleteAllWorkerModels(t)
api, _, router, end := newTestAPI(t)
defer end()
//Create group
g := &sdk.Group{
Name: sdk.RandomString(10),
}
//Create user
u, jwt := assets.InsertLambdaUser(t, api.mustDB(), g)
assert.NotZero(t, u)
assert.NotZero(t, jwt)
model := sdk.Model{
Name: "Test1",
GroupID: g.ID,
Type: sdk.Docker,
ModelDocker: sdk.ModelDocker{
Image: "buildpack-deps:jessie",
Cmd: "worker",
Shell: "sh",
},
}
//Prepare request
uri := router.GetRoute("POST", api.postWorkerModelHandler, nil)
test.NotEmpty(t, uri, "Route route found")
req := assets.NewJWTAuthentifiedRequest(t, jwt, "POST", uri, model)
//Do the request
w := httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
assert.Equal(t, 403, w.Code, "Status code should be 403 because only a group admin can create a model")
t.Logf("Body: %s", w.Body.String())
}
func Test_postWorkerModelAsAGroupAdmin(t *testing.T) {
Test_DeleteAllWorkerModels(t)
api, _, router, end := newTestAPI(t)
defer end()
//Create group
g := &sdk.Group{
Name: sdk.RandomString(10),
}
//Create user
u, jwt := assets.InsertLambdaUser(t, api.mustDB(), g)
assets.SetUserGroupAdmin(t, api.mustDB(), g.ID, u.ID)
model := sdk.Model{
Name: "Test1",
GroupID: g.ID,
Type: sdk.Docker,
ModelDocker: sdk.ModelDocker{
Image: "buildpack-deps:jessie",
Cmd: "worker",
Shell: "sh",
},
}
//Prepare request
uri := router.GetRoute("POST", api.postWorkerModelHandler, nil)
test.NotEmpty(t, uri)
req := assets.NewJWTAuthentifiedRequest(t, jwt, "POST", uri, model)
//Do the request
w := httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
assert.Equal(t, 403, w.Code, "Status code should equal 403 because the worker model haven't pattern and is not restricted")
t.Logf("Body: %s", w.Body.String())
}
func Test_postWorkerModelAsAGroupAdminWithRestrict(t *testing.T) {
Test_DeleteAllWorkerModels(t)
api, _, router, end := newTestAPI(t)
defer end()
//Create group
g := &sdk.Group{
Name: sdk.RandomString(10),
}
//Create user
u, jwt := assets.InsertLambdaUser(t, api.mustDB(), g)
assets.SetUserGroupAdmin(t, api.mustDB(), g.ID, u.ID)
model := sdk.Model{
Name: "Test1",
GroupID: g.ID,
Type: sdk.Docker,
Restricted: true,
ModelDocker: sdk.ModelDocker{
Image: "buildpack-deps:jessie",
Shell: "sh -c",
Cmd: "worker --api={{.API}}",
},
}
//Prepare request
uri := router.GetRoute("POST", api.postWorkerModelHandler, nil)
test.NotEmpty(t, uri)
req := assets.NewJWTAuthentifiedRequest(t, jwt, "POST", uri, model)
//Do the request
w := httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code, "Status code should equal 200")
var newModel sdk.Model
assert.NoError(t, json.Unmarshal(w.Body.Bytes(), &newModel))
test.Equal(t, "worker --api={{.API}}", newModel.ModelDocker.Cmd, "Main worker command is not good")
}
func Test_postWorkerModelAsAGroupAdminWithoutRestrictWithPattern(t *testing.T) {
Test_DeleteAllWorkerModels(t)
api, _, router, end := newTestAPI(t)
defer end()
//Create group
g := &sdk.Group{
Name: sdk.RandomString(10),
}
//Create user
u, jwt := assets.InsertLambdaUser(t, api.mustDB(), g)
assert.NotZero(t, u)
assert.NotZero(t, jwt)
assets.SetUserGroupAdmin(t, api.mustDB(), g.ID, u.ID)
pattern := sdk.ModelPattern{
Name: "test",
Type: sdk.Openstack,
Model: sdk.ModelCmds{
PreCmd: "apt-get install curl -y",
Cmd: "./worker",
},
}
test.NoError(t, workermodel.InsertPattern(api.mustDB(), &pattern))
model := sdk.Model{
Name: "Test1",
GroupID: g.ID,
Type: sdk.Openstack,
PatternName: "test",
ModelVirtualMachine: sdk.ModelVirtualMachine{
Image: "Debian 7",
Flavor: "vps-ssd-1",
Cmd: "worker --api={{.API}}",
},
}
//Prepare request
uri := router.GetRoute("POST", api.postWorkerModelHandler, nil)
test.NotEmpty(t, uri)
req := assets.NewJWTAuthentifiedRequest(t, jwt, "POST", uri, model)
//Do the request
w := httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code, "Status code should equal 200")
var newModel sdk.Model
assert.NoError(t, json.Unmarshal(w.Body.Bytes(), &newModel))
test.Equal(t, "./worker", newModel.ModelVirtualMachine.Cmd, "Main worker command is not good")
test.Equal(t, "apt-get install curl -y", newModel.ModelVirtualMachine.PreCmd, "Pre worker command is not good")
}
func Test_postWorkerModelAsAWrongGroupMember(t *testing.T) {
Test_DeleteAllWorkerModels(t)
api, _, router, end := newTestAPI(t)
defer end()
//Create group
g := &sdk.Group{
Name: sdk.RandomString(10),
}
//Create group
g1 := &sdk.Group{
Name: sdk.RandomString(10),
}
require.NoError(t, group.Insert(context.TODO(), api.mustDB(), g1))
//Create user
u, jwt := assets.InsertLambdaUser(t, api.mustDB(), g)
assets.SetUserGroupAdmin(t, api.mustDB(), g.ID, u.ID)
model := sdk.Model{
Name: "Test1",
GroupID: g1.ID,
Type: sdk.Docker,
ModelDocker: sdk.ModelDocker{
Image: "buildpack-deps:jessie",
Cmd: "worker",
Shell: "sh",
},
}
//Prepare request
uri := router.GetRoute("POST", api.postWorkerModelHandler, nil)
test.NotEmpty(t, uri)
req := assets.NewJWTAuthentifiedRequest(t, jwt, "POST", uri, model)
//Do the request
w := httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
assert.Equal(t, 403, w.Code, "Status code should be 403 because only a group admin can create a model")
t.Logf("Body: %s", w.Body.String())
}
func Test_putWorkerModel(t *testing.T) {
Test_DeleteAllWorkerModels(t)
api, _, router, end := newTestAPI(t)
defer end()
//Create group
g := &sdk.Group{
Name: sdk.RandomString(10),
}
//Create user
u, jwt := assets.InsertLambdaUser(t, api.mustDB(), g)
assets.SetUserGroupAdmin(t, api.mustDB(), g.ID, u.ID)
model := sdk.Model{
Name: "Test1",
GroupID: g.ID,
Type: sdk.Docker,
Restricted: true,
ModelDocker: sdk.ModelDocker{
Image: "buildpack-deps:jessie",
Shell: "sh -c",
Cmd: "worker",
},
}
//Prepare request
uri := router.GetRoute("POST", api.postWorkerModelHandler, nil)
test.NotEmpty(t, uri)
req := assets.NewJWTAuthentifiedRequest(t, jwt, "POST", uri, model)
//Do the request
w := httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
t.Logf("Body: %s", w.Body.String())
json.Unmarshal(w.Body.Bytes(), &model)
model2 := sdk.Model{
Name: "Test1bis",
GroupID: g.ID,
Type: sdk.Docker,
Restricted: true,
ModelDocker: sdk.ModelDocker{
Image: "buildpack-deps:jessie",
Cmd: "worker",
Shell: "sh -c",
},
}
//Prepare request
vars := map[string]string{
"permGroupName": g.Name,
"permModelName": model.Name,
}
uri = router.GetRoute("PUT", api.putWorkerModelHandler, vars)
test.NotEmpty(t, uri)
req = assets.NewJWTAuthentifiedRequest(t, jwt, "PUT", uri, model2)
//Do the request
w = httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
t.Logf("Body: %s", w.Body.String())
}
func Test_putWorkerModelWithPassword(t *testing.T) {
Test_DeleteAllWorkerModels(t)
api, _, router, end := newTestAPI(t)
defer end()
//Create group
g := &sdk.Group{
Name: sdk.RandomString(10),
}
//Create user
u, jwt := assets.InsertLambdaUser(t, api.mustDB(), g)
assets.SetUserGroupAdmin(t, api.mustDB(), g.ID, u.ID)
model := sdk.Model{
Name: "Test1",
GroupID: g.ID,
Type: sdk.Docker,
Restricted: true,
ModelDocker: sdk.ModelDocker{
Image: "buildpack-deps:jessie",
Shell: "sh -c",
Cmd: "worker",
Private: true,
Username: "test",
Password: "<PASSWORD>",
},
RegisteredCapabilities: sdk.RequirementList{
{
Name: "capa1",
Type: sdk.BinaryRequirement,
Value: "1",
},
},
}
//Prepare request
uri := router.GetRoute("POST", api.postWorkerModelHandler, nil)
test.NotEmpty(t, uri)
req := assets.NewJWTAuthentifiedRequest(t, jwt, "POST", uri, model)
//Do the request
w := httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
t.Logf("Body: %s", w.Body.String())
json.Unmarshal(w.Body.Bytes(), &model)
model2 := sdk.Model{
Name: "Test1bis",
GroupID: g.ID,
Type: sdk.Docker,
Restricted: true,
ModelDocker: sdk.ModelDocker{
Image: "buildpack-deps:jessie",
Cmd: "worker",
Shell: "sh -c",
Private: true,
Username: "test",
Password: <PASSWORD>,
},
RegisteredCapabilities: sdk.RequirementList{
{
Name: "capa1",
Type: sdk.BinaryRequirement,
Value: "1",
},
{
Name: "capa2",
Type: sdk.BinaryRequirement,
Value: "2",
},
},
}
//Prepare request
vars := map[string]string{
"permGroupName": g.Name,
"permModelName": model.Name,
}
uri = router.GetRoute("PUT", api.putWorkerModelHandler, vars)
test.NotEmpty(t, uri)
req = assets.NewJWTAuthentifiedRequest(t, jwt, "PUT", uri, model2)
//Do the request
w = httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
var resp sdk.Model
test.NoError(t, json.Unmarshal(w.Body.Bytes(), &resp))
test.Equal(t, sdk.PasswordPlaceholder, resp.ModelDocker.Password, "Worker model should not return password, but placeholder")
wm, errL := workermodel.LoadByNameAndGroupIDWithClearPassword(api.mustDB(), resp.Name, resp.GroupID)
test.NoError(t, errL)
pw, errPw := secret.DecryptValue(wm.ModelDocker.Password)
test.NoError(t, errPw)
test.Equal(t, "testpw", pw)
}
func Test_deleteWorkerModel(t *testing.T) {
Test_DeleteAllWorkerModels(t)
api, _, router, end := newTestAPI(t)
defer end()
//Create group
g := &sdk.Group{
Name: sdk.RandomString(10),
}
//Create user
u, jwt := assets.InsertLambdaUser(t, api.mustDB(), g)
assets.SetUserGroupAdmin(t, api.mustDB(), g.ID, u.ID)
model := sdk.Model{
Name: "Test1",
GroupID: g.ID,
Type: sdk.Docker,
Restricted: true,
ModelDocker: sdk.ModelDocker{
Image: "buildpack-deps:jessie",
Cmd: "worker",
Shell: "sh -c",
},
}
//Prepare request
uri := router.GetRoute("POST", api.postWorkerModelHandler, nil)
test.NotEmpty(t, uri)
req := assets.NewJWTAuthentifiedRequest(t, jwt, "POST", uri, model)
//Do the request
w := httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
t.Logf("Body: %s", w.Body.String())
json.Unmarshal(w.Body.Bytes(), &model)
//Prepare request
vars := map[string]string{
"permGroupName": g.Name,
"permModelName": model.Name,
}
uri = router.GetRoute("DELETE", api.deleteWorkerModelHandler, vars)
test.NotEmpty(t, uri)
req = assets.NewJWTAuthentifiedRequest(t, jwt, "DELETE", uri, nil)
//Do the request
w = httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
assert.Equal(t, 204, w.Code)
t.Logf("Body: %s", w.Body.String())
}
func Test_getWorkerModel(t *testing.T) {
Test_DeleteAllWorkerModels(t)
api, _, router, end := newTestAPI(t)
defer end()
//Create admin user
u, jwt := assets.InsertAdminUser(t, api.mustDB())
assert.NotZero(t, u)
assert.NotZero(t, jwt)
g, err := group.LoadByName(context.TODO(), api.mustDB(), "shared.infra")
if err != nil {
t.Fatalf("Error getting group : %s", err)
}
model := sdk.Model{
Name: "Test1",
GroupID: g.ID,
Type: sdk.Docker,
ModelDocker: sdk.ModelDocker{
Image: "buildpack-deps:jessie",
Shell: "sh -c",
Cmd: "worker",
},
}
//Prepare request
uri := router.GetRoute("POST", api.postWorkerModelHandler, nil)
test.NotEmpty(t, uri)
req := assets.NewJWTAuthentifiedRequest(t, jwt, "POST", uri, model)
//Do the request
w := httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
t.Logf("Body: %s", w.Body.String())
//Prepare request
uri = router.GetRoute("GET", api.getWorkerModelsHandler, nil)
test.NotEmpty(t, uri)
req = assets.NewJWTAuthentifiedRequest(t, jwt, "GET", uri+"?name=Test1", nil)
//Do the request
w = httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
t.Logf("Body: %s", w.Body.String())
}
func Test_getWorkerModels(t *testing.T) {
Test_DeleteAllWorkerModels(t)
api, db, router, end := newTestAPI(t)
defer end()
_, jwtAdmin := assets.InsertAdminUser(t, api.mustDB())
g1 := &sdk.Group{Name: sdk.RandomString(10)}
g2 := assets.InsertGroup(t, db)
_, jwtGroupMember := assets.InsertLambdaUser(t, api.mustDB(), g1)
m1 := sdk.Model{
Name: "A" + sdk.RandomString(10),
GroupID: g1.ID,
Type: sdk.Docker,
}
require.NoError(t, workermodel.Insert(db, &m1))
m2 := sdk.Model{
Name: "B" + sdk.RandomString(10),
GroupID: g1.ID,
Type: sdk.Docker,
}
require.NoError(t, workermodel.Insert(db, &m2))
m3 := sdk.Model{
Name: "C" + sdk.RandomString(10),
GroupID: g2.ID,
Type: sdk.Docker,
}
require.NoError(t, workermodel.Insert(db, &m3))
// getWorkerModelsHandler by admin
uri := router.GetRoute(http.MethodGet, api.getWorkerModelsHandler, nil)
test.NotEmpty(t, uri)
req := assets.NewJWTAuthentifiedRequest(t, jwtAdmin, http.MethodGet, uri, nil)
w := httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
results := []sdk.Model{}
json.Unmarshal(w.Body.Bytes(), &results)
require.Equal(t, 3, len(results))
assert.Equal(t, m1.Name, results[0].Name)
assert.Equal(t, m2.Name, results[1].Name)
assert.Equal(t, m3.Name, results[2].Name)
// getWorkerModelsHandler by group member
uri = router.GetRoute(http.MethodGet, api.getWorkerModelsHandler, nil)
test.NotEmpty(t, uri)
req = assets.NewJWTAuthentifiedRequest(t, jwtGroupMember, http.MethodGet, uri, nil)
w = httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
json.Unmarshal(w.Body.Bytes(), &results)
require.Equal(t, 2, len(results))
assert.Equal(t, m1.Name, results[0].Name)
assert.Equal(t, m2.Name, results[1].Name)
// getWorkerModelsForGroupHandler
uri = router.GetRoute(http.MethodGet, api.getWorkerModelsForGroupHandler, map[string]string{
"permGroupName": g2.Name,
})
test.NotEmpty(t, uri)
req = assets.NewJWTAuthentifiedRequest(t, jwtAdmin, http.MethodGet, uri, nil)
w = httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)
json.Unmarshal(w.Body.Bytes(), &results)
require.Equal(t, 1, len(results))
assert.Equal(t, m3.Name, results[0].Name)
}
// This test create a worker model then an action that will use it.
// Next the model group and name will be updated and we want to check if the requirement was updated.
func Test_renameWorkerModel(t *testing.T) {
Test_DeleteAllWorkerModels(t)
api, db, router, end := newTestAPI(t)
defer end()
// create new group
g1 := assets.InsertTestGroup(t, db, sdk.RandomString(10))
// create new group
g2 := assets.InsertTestGroup(t, db, sdk.RandomString(10))
// create admin user
u, jwt := assets.InsertAdminUser(t, api.mustDB())
assert.NotZero(t, u)
assert.NotZero(t, jwt)
// prepare post model request
uri := router.GetRoute("POST", api.postWorkerModelHandler, nil)
test.NotEmpty(t, uri)
initialName := sdk.RandomString(10)
req := assets.NewJWTAuthentifiedRequest(t, jwt, "POST", uri, sdk.Model{
Name: initialName,
GroupID: g1.ID,
Type: sdk.Docker,
ModelDocker: sdk.ModelDocker{
Image: "buildpack-deps:jessie",
Cmd: "worker",
Shell: "sh",
},
})
// send post model request
w := httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
// check created model
assert.Equal(t, 200, w.Code)
var result sdk.Model
assert.NoError(t, json.Unmarshal(w.Body.Bytes(), &result))
assert.Equal(t, g1.Name, result.Group.Name)
assert.Equal(t, initialName, result.Name)
// prepare post action request
uri = router.GetRoute("POST", api.postActionHandler, nil)
test.NotEmpty(t, uri)
actionName := sdk.RandomString(10)
modelPath := fmt.Sprintf("%s/%s --privileged", result.Group.Name, result.Name)
req = assets.NewJWTAuthentifiedRequest(t, jwt, "POST", uri, sdk.Action{
Name: actionName,
GroupID: &g1.ID,
Requirements: []sdk.Requirement{{
Type: sdk.ModelRequirement,
Name: modelPath,
Value: modelPath,
}},
})
// send post action request
w = httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
// check created action
assert.Equal(t, 201, w.Code)
var action sdk.Action
assert.NoError(t, json.Unmarshal(w.Body.Bytes(), &action))
assert.Equal(t, g1.Name, action.Group.Name)
assert.Equal(t, actionName, action.Name)
assert.Equal(t, 1, len(action.Requirements))
assert.Equal(t, modelPath, action.Requirements[0].Value)
// prepare put model request
uri = router.GetRoute("PUT", api.putWorkerModelHandler, map[string]string{
"permGroupName": result.Group.Name,
"permModelName": result.Name,
})
test.NotEmpty(t, uri)
newName := sdk.RandomString(10)
result.Name = newName
result.GroupID = g2.ID
req = assets.NewJWTAuthentifiedRequest(t, jwt, "PUT", uri, result)
// send put model request
w = httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
// check updated model
assert.Equal(t, 200, w.Code)
assert.NoError(t, json.Unmarshal(w.Body.Bytes(), &result))
assert.Equal(t, g2.Name, result.Group.Name)
assert.Equal(t, newName, result.Name)
// prepare get action request
uri = router.GetRoute("GET", api.getActionHandler, map[string]string{
"permGroupName": action.Group.Name,
"permActionName": action.Name,
})
test.NotEmpty(t, uri)
req = assets.NewJWTAuthentifiedRequest(t, jwt, "GET", uri, nil)
// send get action request
w = httptest.NewRecorder()
router.Mux.ServeHTTP(w, req)
// check action
updatedModelPath := fmt.Sprintf("%s/%s --privileged", result.Group.Name, result.Name)
assert.Equal(t, 200, w.Code)
assert.NoError(t, json.Unmarshal(w.Body.Bytes(), &action))
assert.Equal(t, g1.Name, action.Group.Name)
assert.Equal(t, actionName, action.Name)
assert.Equal(t, 1, len(action.Requirements))
assert.Equal(t, updatedModelPath, action.Requirements[0].Value)
}
|
<filename>src/components/Text/Text.types.ts
import type { TextColor } from 'src/Colors';
import type { PropsWithTypedChildren, WithTestID } from 'src/types';
import type { TextProps as RNTextProps } from 'react-native';
export enum TextVariant {
Hero = 'hero',
Heading = 'heading',
Paragraph = 'paragraph',
Caption = 'caption',
Chef = 'chef',
Quote = 'quote',
}
export enum TextAlign {
Left = 'left',
Center = 'center',
Right = 'right',
}
export enum FontWeight {
Regular = 'Regular',
Bold = 'Bold',
}
export enum TextSize {
Hero1 = 'hero1',
Hero2 = 'hero2',
Heading1 = 'heading1',
Heading2 = 'heading2',
Heading3 = 'heading3',
Heading4 = 'heading4',
Paragraph1 = 'paragraph1',
Paragraph2 = 'paragraph2',
Basic = 'basic',
}
export type FontProps = WithTestID<{
align?: TextAlign;
color?: TextColor;
uppercase?: boolean;
}>;
type BasicText = FontProps & {
variant: TextVariant.Caption | TextVariant.Chef | TextVariant.Quote;
size?: TextSize.Basic;
weight?: FontWeight.Regular;
};
type HeroText = FontProps & {
variant: TextVariant.Hero;
size: TextSize.Hero1 | TextSize.Hero2;
weight?: FontWeight.Regular;
};
type HeadingText = FontProps & {
variant: TextVariant.Heading;
size:
| TextSize.Heading1
| TextSize.Heading2
| TextSize.Heading3
| TextSize.Heading4;
weight?: FontWeight.Regular;
};
type ParagraphText = FontProps & {
variant: TextVariant.Paragraph;
size: TextSize.Paragraph1 | TextSize.Paragraph2;
weight: FontWeight.Regular | FontWeight.Bold;
};
export type TextProps = PropsWithTypedChildren<
| (RNTextProps & BasicText)
| (RNTextProps & HeroText)
| (RNTextProps & HeadingText)
| (RNTextProps & ParagraphText),
string
>;
|
#!/usr/bin/env bash
set -euo pipefail
GH_REPO="https://github.com/godotengine/godot"
REPO="https://downloads.tuxfamily.org/godotengine"
TOOL_NAME="godot"
TOOL_TEST="godot --version"
fail() {
echo -e "asdf-$TOOL_NAME: $*"
exit 1
}
curl_opts=(-fsSL)
sort_versions() {
sed 'h; s/[+-]/./g; s/.p\([[:digit:]]\)/.z\1/; s/$/.z/; G; s/\n/ /' |
LC_ALL=C sort -t. -k 1,1 -k 2,2n -k 3,3n -k 4,4n -k 5,5n | awk '{print $2}'
}
list_github_tags() {
git ls-remote --tags --refs "$GH_REPO" |
grep -o 'refs/tags/.*' | cut -d/ -f3- |
sed 's/^v//;s/-stable$//'
}
list_all_versions() {
list_github_tags
}
download_release() {
local version filename platform url
version="$1"
filename="$2"
if [[ "$(uname)" == 'Linux' ]]; then
platform='x11.64'
elif [[ "$(uname)" == 'Darwin' ]]; then
platform='osx.universal'
fi
url="$REPO/${version}/Godot_v${version}-stable_${platform}.zip"
echo "* Downloading $TOOL_NAME release $version..."
curl "${curl_opts[@]}" -o "$filename" -C - "$url" || fail "Could not download $url"
}
install_version() {
local install_type="$1"
local version="$2"
local install_path="$3"
local platform
if [[ "$(uname)" == 'Linux' ]]; then
platform='x11.64'
elif [[ "$(uname)" == 'Darwin' ]]; then
platform='osx.univeral'
fi
if [ "$install_type" != "version" ]; then
fail "asdf-$TOOL_NAME supports release installs only"
fi
local release_file="$install_path/$TOOL_NAME-$version.zip"
(
mkdir -p "$install_path/bin"
download_release "$version" "$release_file"
unzip -qq "$release_file" -d "$install_path" || fail "Could not extract $release_file"
mv "$install_path/Godot_v${version}-stable_${platform}" "$install_path/bin/godot"
rm "$release_file"
local tool_cmd
tool_cmd="$(echo "$TOOL_TEST" | cut -d' ' -f1)"
test -x "$install_path/bin/$tool_cmd" || fail "Expected $install_path/bin/$tool_cmd to be executable."
echo "$TOOL_NAME $version installation was successful!"
) || (
rm -rf "$install_path"
fail "An error ocurred while installing $TOOL_NAME $version."
)
}
|
package com.dam.authentication.rest.message;
import java.util.UUID;
import org.springframework.http.HttpStatus;
public class TokenValidationResponse extends RestResponse {
private UUID tokenId;
private Long userId;
public TokenValidationResponse (Long userId, UUID tokenId) {
super(HttpStatus.OK, "OK", "User validated.");
setTokenId(tokenId);
setUserId(userId);
}
public UUID getTokenId() {
return tokenId;
}
public void setTokenId(UUID tokenId) {
this.tokenId = tokenId;
}
public Long getUserId() {
return userId;
}
public void setUserId(Long userId) {
this.userId = userId;
}
}
|
<reponame>wongoo/alipay-sdk-java-all
package com.alipay.api.response;
import com.alipay.api.internal.mapping.ApiField;
import com.alipay.api.AlipayResponse;
/**
* ALIPAY API: alipay.multimedia.resource.masstoken.get response.
*
* @author <NAME>
* @since 1.0, 2021-12-08 23:30:24
*/
public class AlipayMultimediaResourceMasstokenGetResponse extends AlipayResponse {
private static final long serialVersionUID = 2782361765186287611L;
/**
* token创建时间戳,秒
*/
@ApiField("create_time")
private Long createTime;
/**
* token失效时间戳,秒
*/
@ApiField("dead_time")
private Long deadTime;
/**
* 从AFTS系统获取的token参数,用于从AFTS系统上传下载操作时的鉴权参数。示例值仅供参考,无法直接使用。
*/
@ApiField("mass_token")
private String massToken;
public void setCreateTime(Long createTime) {
this.createTime = createTime;
}
public Long getCreateTime( ) {
return this.createTime;
}
public void setDeadTime(Long deadTime) {
this.deadTime = deadTime;
}
public Long getDeadTime( ) {
return this.deadTime;
}
public void setMassToken(String massToken) {
this.massToken = massToken;
}
public String getMassToken( ) {
return this.massToken;
}
}
|
<reponame>andreibarabas/Instabug-React-Native<gh_stars>1-10
package com.instabug.reactlibrary;
import android.os.Handler;
import android.os.Looper;
import com.facebook.react.bridge.Callback;
import com.facebook.react.bridge.ReactApplicationContext;
import com.facebook.react.bridge.ReactContextBaseJavaModule;
import com.facebook.react.bridge.ReactMethod;
import com.instabug.chat.Replies;
import com.instabug.library.Feature;
import com.instabug.reactlibrary.utils.InstabugUtil;
import com.instabug.reactlibrary.utils.MainThreadHandler;
import javax.annotation.Nonnull;
public class RNInstabugRepliesModule extends ReactContextBaseJavaModule {
public RNInstabugRepliesModule(ReactApplicationContext reactApplicationContext) {
super(reactApplicationContext);
}
@Nonnull
@Override
public String getName() {
return "IBGReplies";
}
@ReactMethod
public void setEnabled(final boolean isEnabled) {
MainThreadHandler.runOnMainThread(new Runnable() {
@Override
public void run() {
try {
if (isEnabled) {
Replies.setState(Feature.State.ENABLED);
} else {
Replies.setState(Feature.State.DISABLED);
}
} catch (Exception e) {
e.printStackTrace();
}
}
});
}
@ReactMethod
public void hasChats(final Callback callback) {
MainThreadHandler.runOnMainThread(new Runnable() {
@Override
public void run() {
boolean hasChats = Replies.hasChats();
callback.invoke(hasChats);
}
});
}
@ReactMethod
public void show() {
MainThreadHandler.runOnMainThread(new Runnable() {
@Override
public void run() {
Replies.show();
}
});
}
/**
* Set whether new in app notification received will play a small sound notification
* or not (Default is {@code false})
*
* @param shouldPlaySound desired state of conversation sounds
* @since 4.1.0
*/
@ReactMethod
public void setInAppNotificationSound(final boolean shouldPlaySound) {
MainThreadHandler.runOnMainThread(new Runnable() {
@Override
public void run() {
try {
Replies.setInAppNotificationSound(shouldPlaySound);
} catch (Exception e) {
e.printStackTrace();
}
}
});
}
/**
* Get current unread count of messages for this user
*
* @return number of messages that are unread for this user
*/
@ReactMethod
public void getUnreadRepliesCount(final Callback messageCountCallback) {
MainThreadHandler.runOnMainThread(new Runnable() {
@Override
public void run() {
int unreadMessages = 0;
try {
unreadMessages = Replies.getUnreadRepliesCount();
} catch (Exception e) {
e.printStackTrace();
}
messageCountCallback.invoke(unreadMessages);
}
});
}
/**
* Enabled/disable chat notification
*
* @param isChatNotificationEnable whether chat notification is reburied or not
*/
@ReactMethod
public void setInAppNotificationEnabled(final boolean isChatNotificationEnable) {
MainThreadHandler.runOnMainThread(new Runnable() {
@Override
public void run() {
try {
Replies.setInAppNotificationEnabled(isChatNotificationEnable);
} catch (Exception e) {
e.printStackTrace();
}
}
});
}
@ReactMethod
public void setOnNewReplyReceivedHandler(final Callback onNewReplyReceivedCallback) {
MainThreadHandler.runOnMainThread(new Runnable() {
@Override
public void run() {
try {
Runnable onNewReplyReceivedRunnable = new Runnable() {
@Override
public void run() {
InstabugUtil.sendEvent(getReactApplicationContext(), Constants.IBG_ON_NEW_REPLY_RECEIVED_CALLBACK, null);
}
};
Replies.setOnNewReplyReceivedCallback(onNewReplyReceivedRunnable);
} catch (java.lang.Exception exception) {
exception.printStackTrace();
}
}
});
}
}
|
#include <iostream>
#include <vector>
#include <mutex>
#include <algorithm>
#define VALIDATE_NOT_NULL(ptr) \
if (ptr == nullptr) { \
throw std::invalid_argument("Null pointer exception"); \
}
class PluginManager {
private:
std::vector<std::string> plugins;
std::mutex mutex;
public:
void addPlugin(const std::string& plugin) {
VALIDATE_NOT_NULL(&plugin);
std::lock_guard<std::mutex> lock(mutex);
plugins.push_back(plugin);
}
void removePlugin(const std::string& plugin) {
VALIDATE_NOT_NULL(&plugin);
std::lock_guard<std::mutex> lock(mutex);
plugins.erase(std::remove(plugins.begin(), plugins.end(), plugin), plugins.end());
}
std::vector<std::string> getPlugins() {
std::lock_guard<std::mutex> lock(mutex);
return plugins;
}
};
int main() {
PluginManager manager;
// Add plugins
manager.addPlugin("Plugin A");
manager.addPlugin("Plugin B");
manager.addPlugin("Plugin C");
// Get and print plugins
std::vector<std::string> retrievedPlugins = manager.getPlugins();
for (const auto& plugin : retrievedPlugins) {
std::cout << "Plugin: " << plugin << std::endl;
}
// Remove a plugin
manager.removePlugin("Plugin B");
// Get and print plugins after removal
retrievedPlugins = manager.getPlugins();
for (const auto& plugin : retrievedPlugins) {
std::cout << "Plugin: " << plugin << std::endl;
}
return 0;
} |
#!/usr/bin/python3
"""An improved version of PswdProtHello that uses objects."""
#Classes
#===============================================================================
class User(object):
"""A user."""
def __init__(self, name, pswd):
"""Setup this user."""
self.name = name
self.pswd = pswd
def __eq__(self, user):
"""Compare this user to the given user."""
return self.name == user.name and self.pswd == user.pswd
def say_hello(self):
"""Say hello to this user."""
print("Hello {}!".format(self.name))
#Entry Point
#===============================================================================
dylan = User("Dylan", "cheetah")
current_user = User(input("Name: "), input("Pswd: "))
if current_user == dylan:
current_user.say_hello()
else:
print("***Access Denied***")
|
#!/usr/bin/env bash
set -x
source logging.sh
source common.sh
source validation.sh
early_cleanup_validation
sudo podman image prune --all
|
<gh_stars>0
package main
import (
"errors"
"fmt"
"log"
"github.com/dmies/adventOfGo/filehandler"
)
// FindNumbersThatSumTo2020 checks an expense report ([]int) if there are two numbers that sum up to 2020 and returns them
func FindNumbersThatSumTo2020(expenseReport []int) (int, int, error) {
for i, x := range expenseReport {
for j, y := range expenseReport {
if i != j && x+y == 2020 {
return x, y, nil
}
}
}
return 0, 0, errors.New("couldn't find numbers that sum up to 2020")
}
// FindThreeNumbersThatSumTo2020 checks an expense report ([]int) if there are three numbers that sum up to 2020 and returns them
func FindThreeNumbersThatSumTo2020(expenseReport []int) (int, int, int, error) {
for i, x := range expenseReport {
for j, y := range expenseReport {
for k, z := range expenseReport {
if i != j && i != k && j != k && x+y+z == 2020 {
return x, y, z, nil
}
}
}
}
return 0, 0, 0, errors.New("couldn't find three numbers that sum up to 2020")
}
// Part1 uses FindNumbersThatSumTo2020 to find the correct numbers and multiplies them
func Part1(expenseReport []int) (int, error) {
x1, x2, err := FindNumbersThatSumTo2020(expenseReport)
if err != nil {
return 0, err
}
return x1 * x2, nil
}
// Part2 uses FindThreeNumbersThatSumTo2020 to find the correct numbers and multiplies them
func Part2(expenseReport []int) (int, error) {
x1, x2, x3, err := FindThreeNumbersThatSumTo2020(expenseReport)
if err != nil {
return 0, err
}
return x1 * x2 * x3, nil
}
func main() {
expenseReport, err := filehandler.ImportNumberPerLineList("./input.txt")
if err != nil {
log.Fatal(err)
}
solution1, err := Part1(expenseReport)
if err != nil {
log.Fatal(err)
}
fmt.Printf("day 01, part1 %v\n", solution1)
solution2, err := Part2(expenseReport)
if err != nil {
log.Fatal(err)
}
fmt.Printf("day 01, part2 %v\n", solution2)
}
|
def dec2bin(num):
result = ""
while num > 0:
result += str(num % 2)
num //= 2
return result[::-1]
print(dec2bin(13)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.