text stringlengths 1 1.05M |
|---|
import { Employee } from './employee.model';
export class Department {
public _id: string;
public _key: string;
public _rev: string;
public tenant: string;
public name: string;
public collaborators:Employee[];
public manager: Employee;
public addressID: string;
public topDepartment: string;
public attachedTo: string;
constructor(id: string, key: string, rev: string, $tenant: string, $name: string, $collaborators: Employee[], $manager: Employee, $addressID: string, $topDepartment:string, $attachedTo:string) {
this._id = id;
this._key = key;
this._rev = rev;
this.tenant = $tenant;
this.name = $name;
this.collaborators = $collaborators;
this.manager = $manager;
this.addressID = $addressID;
this.topDepartment = $topDepartment;
this.attachedTo = $attachedTo;
}
} |
package interfaces;
import exceptions.BillNotFoundException;
import exceptions.UnableToAddProductToBillException;
import exceptions.UnableToCalculatePriceException;
import exceptions.UnableToCloseBillException;
import models.Bill;
import models.Product;
public interface IBillLogic {
Product addProductToBill(Bill bill, String name)
throws UnableToAddProductToBillException;
void closeBill(Bill bill) throws UnableToCloseBillException;
double calculateAmountPrice(Bill bill)
throws UnableToCalculatePriceException;
String printBill(Bill bill) throws BillNotFoundException;
}
|
var _layer_8hpp =
[
[ "InputSlot", "classarmnn_1_1_input_slot.xhtml", "classarmnn_1_1_input_slot" ],
[ "OutputSlot", "classarmnn_1_1_output_slot.xhtml", "classarmnn_1_1_output_slot" ],
[ "Layer", "classarmnn_1_1_layer.xhtml", "classarmnn_1_1_layer" ],
[ "BindableLayer", "classarmnn_1_1_bindable_layer.xhtml", "classarmnn_1_1_bindable_layer" ],
[ "LayerPriority", "_layer_8hpp.xhtml#a419086ecb4dc9d0f9e5d8933c87e2ea2", null ]
]; |
<filename>src/components/icon/assets/tokenKeyword.tsx
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
// THIS IS A GENERATED FILE. DO NOT MODIFY MANUALLY. @see scripts/compile-icons.js
import * as React from 'react';
interface SVGRProps {
title?: string;
titleId?: string;
}
const EuiIconTokenKeyword = ({
title,
titleId,
...props
}: React.SVGProps<SVGSVGElement> & SVGRProps) => (
<svg
xmlns="http://www.w3.org/2000/svg"
width={16}
height={16}
viewBox="0 0 16 16"
aria-labelledby={titleId}
{...props}
>
{title ? <title id={titleId}>{title}</title> : null}
<path d="M10.33 10.28c-.12.04-.29.07-.42.07-.23 0-.43-.08-.53-.3l-.63-1.34 2.32-2.81H9.3L7.76 7.93h-.09L8.22 4H6.59l-1.05 7.5h1.63l.27-1.94h.1l.43 1.12c.27.71.74.92 1.33.92.23 0 .6-.04.86-.11l.17-1.21z" />
</svg>
);
export const icon = EuiIconTokenKeyword;
|
#!/usr/bin/env bash
set -eux
ARCH=$1
FUNCION_NAME=dump-ruby27-${ARCH/_/-}
aws lambda invoke \
--region us-east-1 \
--function-name "$FUNCION_NAME" \
--payload '{}' out.json
|
#!/usr/bin/env bash
# Bash and Dash compatible.
TEMPLATE_DIR=/proj/edgect/templates/
BURST=0
if test $# -eq 0; then
echo "ERROR: Not enough arguments given."
"$0" -h
exit 1
fi
TEMPLATE_GIVEN=false
ARGS="$@"
while test $# -gt 0; do
case "$1" in
-h|--help)
echo "$0: Install necessary packages, configure click and start DPDK click"
echo "Usage: $0 [-p|--path path_to_templates] template_name"
echo -e "\t-h, --help\tProduce this help message and exit."
echo -e "\t-p, --path\tLook for Click templates in this directory."
echo -e "\t\t\tWithout this option, script will search for templates in ${TEMPLATE_DIR}"
echo -e "\t-b, --burst\tProvide a burst parameter to DPDK."
echo -e "\t\t\tAllows controlling number of packets to send to the NIC."
exit 0
;;
-p|--path)
shift
if test $# -gt 0; then
TEMPLATE_DIR=$1
else
echo "ERROR: No path given to -p|--path option"
exit 1
fi
shift
;;
-b|--burst)
shift
if test $# -gt 0; then
BURST=$1
else
echo "ERROR: No burst value provided"
exit 1
fi
shift
;;
*)
if $TEMPLATE_GIVEN; then
echo "ERROR: Given multiple templates?"
"$0" -h
exit 1
fi
TEMPLATE_GIVEN=true
TEMPLATE=$1
shift
;;
esac
done
if ! $TEMPLATE_GIVEN; then
echo "ERROR: No template given."
exit 1
fi
if [ ! -d "$TEMPLATE_DIR" ]; then
echo "ERROR: No such directory: \"$TEMPLATE_DIR\""
exit 1
fi
if [ ! -d "$TEMPLATE_DIR/$TEMPLATE" ]; then
echo "ERROR: No such template directory found: \"$TEMPLATE_DIR/$TEMPLATE\""
exit 1
fi
if [ ! -f "$TEMPLATE_DIR/$TEMPLATE/vrouter.template" ]; then
echo "ERROR: Click template ($TEMPLATE_DIR/$TEMPLATE/vrouter.template) not found."
exit 1
fi
echo "INFO: Given template $TEMPLATE and dir $TEMPLATE_DIR"
# If we weren't given permission, we're taking it anyway!
# BAD!, but hey, our script is a special snowflake.
# (Check if we're running with the right privs, and if not, try running ourselves again with perms.)
# (Do dash compliant $EUID check)
if [ `id -u` -ne 0 ]; then
echo "INFO: Rerunning script with sudo privs."
echo "sudo $0 $ARGS"
exec sudo "$0" $ARGS
fi
apt-get update
apt-get install python-netaddr python-netifaces -y;
cp $TEMPLATE_DIR/$TEMPLATE/vrouter.template /tmp
python /proj/edgect/exp_scripts/updateClickConfig.py --burst $BURST
# Kill possibly lingering instances.
# If we end up killing click, we need to wait for clean up or we can't start a new click.
# (e.g. without the pause, starting click will fail and we can't run this script back-to-back)
KILL_COUNT=`pkill --euid 0 -c click`
if [ $KILL_COUNT -gt 0 ]; then
echo "INFO: Killed $KILL_COUNT click processes"
# Hack - surely a better way:
sleep 5
fi
rm -f /click
click --dpdk -c 0xffffff -n 4 -- -u /click /tmp/vrouter.click >/tmp/click.log 2>&1 < /dev/null &
|
#!/bin/bash
wget http://aass.oru.se/Research/Learning/data/tutorials/localization.bag
|
<filename>development/src/main/java/net/community/chest/net/proto/text/ssh/SSHPacketHeader.java<gh_stars>1-10
/*
*
*/
package net.community.chest.net.proto.text.ssh;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Serializable;
import net.community.chest.CoVariantReturn;
import net.community.chest.io.encode.ElementEncoder;
import net.community.chest.lang.PubliclyCloneable;
/**
* <P>Copyright as per GPLv2</P>
*
* @author <NAME>.
* @since Jul 2, 2009 2:36:17 PM
*/
public class SSHPacketHeader implements ElementEncoder<SSHPacketHeader>,
PubliclyCloneable<SSHPacketHeader>,
Serializable {
/**
*
*/
private static final long serialVersionUID = -8162865449387179771L;
public SSHPacketHeader ()
{
super();
}
private int _length;
public int getLength ()
{
return _length;
}
public void setLength (int length)
{
_length = length;
}
private short _padLength;
public short getPadLength ()
{
return _padLength;
}
public void setPadLength (short padLength)
{
_padLength = padLength;
}
public int getPayloadLength ()
{
return getLength() - getPadLength() - 1;
}
// NOTE !!! not read/written - also, array length not necessarily exactly of payload length
private byte[] _payloadData;
public byte[] getPayloadData ()
{
return _payloadData;
}
public void setPayloadData (byte[] payloadData)
{
_payloadData = payloadData;
}
/*
* @see net.community.chest.io.encode.ElementEncoder#read(java.io.InputStream)
*/
@Override
public SSHPacketHeader read (InputStream in) throws IOException
{
setLength(SSHProtocol.readUint32(in));
setPadLength(SSHProtocol.readByte(in));
return this;
}
public SSHPacketHeader (InputStream in) throws IOException
{
read(in);
}
/*
* @see net.community.chest.io.encode.ElementEncoder#write(java.io.OutputStream)
*/
@Override
public void write (OutputStream out) throws IOException
{
SSHProtocol.writeUint32(out, getLength());
out.write(getPadLength() & 0x00FF);
}
/*
* @see java.lang.Object#clone()
*/
@Override
@CoVariantReturn
public SSHPacketHeader clone () throws CloneNotSupportedException
{
return getClass().cast(super.clone());
}
/*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals (Object obj)
{
if (!(obj instanceof SSHPacketHeader))
return false;
if (this == obj)
return true;
final SSHPacketHeader h=(SSHPacketHeader) obj;
return (h.getLength() == getLength())
&& (h.getPadLength() == getPadLength())
;
}
/*
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode ()
{
return getLength() + getPadLength();
}
/*
* @see java.lang.Object#toString()
*/
@Override
public String toString ()
{
return "Length=" + getLength() + ";Pad=" + getPadLength();
}
}
|
import styled from 'styled-components'
import Header from '../components/header'
import Layout from '../components/layout'
import Message from '../components/message'
import SEO from '../components/seo'
import type { NextPage } from 'next'
const Title = styled.h1`
font-size: 2rem;
font-weight: 900;
margin: 10px 24px 0;
`
const Description = styled.p`
font-size: 1.2rem;
margin: 10px 24px 0;
`
const NotFound: NextPage = () => (
<Layout>
<SEO title="404 Not found" />
<Header />
<Message>
<Title>404 Not found</Title>
<Description>
おさがしのページを見つけることができませんでした。
</Description>
</Message>
</Layout>
)
export default NotFound
|
#!/usr/bin/env bash
SCRIPT_DIR="$(
cd "$(dirname "$BASH_SOURCE[0]")"
pwd
)"
. ${SCRIPT_DIR}/version.sh
docker tag kubevirt/builder:${VERSION} docker.io/kubevirt/builder:${VERSION}
docker push docker.io/kubevirt/builder:${VERSION}
|
from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware
from .api import api_router, root_router
from .app_settings import get_settings # noqa
settings = get_settings()
app = FastAPI(title=settings.PROJECT_NAME, openapi_url=f"{settings.API_V1_STR}/openapi.json")
# Set all CORS enabled origins
if settings.BACKEND_CORS_ORIGINS:
app.add_middleware(
CORSMiddleware,
allow_origin_regex=settings.BACKEND_CORS_ORIGINS,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(root_router)
app.include_router(api_router, prefix=settings.API_V1_STR)
|
#!/bin/bash
set -o nounset
set -o errexit
set -o pipefail
echo "************ baremetalds assisted setup command ************"
# Fetch packet basic configuration
# shellcheck source=/dev/null
source "${SHARED_DIR}/packet-conf.sh"
export CI_CREDENTIALS_DIR=/var/run/assisted-installer-bot
# Copy assisted source from current directory to the remote server
tar -czf - . | ssh "${SSHOPTS[@]}" "root@${IP}" "cat > /root/assisted.tar.gz"
# Prepare configuration and run
scp "${SSHOPTS[@]}" "${CLUSTER_PROFILE_DIR}/pull-secret" "root@${IP}:pull-secret"
if [ "${ENVIRONMENT}" != "local" ]; then
if [ "${ENVIRONMENT}" = "production" ]; then
remote_service_url="https://api.openshift.com"
pull_secret_file="${CI_CREDENTIALS_DIR}/prod-pull-secret"
else
echo "Unknown environment ${ENVIRONMENT}"
exit 1
fi
scp "${SSHOPTS[@]}" "${CI_CREDENTIALS_DIR}/offline-token" "root@${IP}:offline-token"
scp "${SSHOPTS[@]}" "${pull_secret_file}" "root@${IP}:pull-secret"
echo "export REMOTE_SERVICE_URL=${remote_service_url}" >> "${SHARED_DIR}/assisted-additional-config"
echo "export NO_MINIKUBE=true" >> "${SHARED_DIR}/assisted-additional-config"
echo "export MAKEFILE_TARGET='create_full_environment test_parallel'" >> "${SHARED_DIR}/assisted-additional-config"
WORKER_DISK_SIZE=$(echo 120G | numfmt --from=iec)
echo "export WORKER_DISK=${WORKER_DISK_SIZE}" >> "${SHARED_DIR}/assisted-additional-config"
fi
# Additional mechanism to inject assisted additional variables directly
# from a multistage step configuration.
# Backward compatible with the previous approach based on creating the
# assisted-additional-config file from a multistage step command
if [[ -n "${ASSISTED_CONFIG:-}" ]]; then
readarray -t config <<< "${ASSISTED_CONFIG}"
for var in "${config[@]}"; do
if [[ ! -z "${var}" ]]; then
echo "export ${var}" >> "${SHARED_DIR}/assisted-additional-config"
fi
done
fi
if [[ -e "${SHARED_DIR}/assisted-additional-config" ]]
then
scp "${SSHOPTS[@]}" "${SHARED_DIR}/assisted-additional-config" "root@${IP}:assisted-additional-config"
fi
timeout -s 9 175m ssh "${SSHOPTS[@]}" "root@${IP}" bash - << EOF |& sed -e 's/.*auths\{0,1\}".*/*** PULL_SECRET ***/g'
set -xeuo pipefail
# Some Packet images have a file /usr/config left from the provisioning phase.
# The problem is that sos expects it to be a directory. Since we don't care
# about the Packet provisioner, remove the file if it's present.
test -f /usr/config && rm -f /usr/config || true
echo 'baseurl=http://vault.centos.org/\$contentdir/\$releasever/BaseOS/\$basearch/os/' >> /etc/yum.repos.d/CentOS-Linux-BaseOS.repo
echo 'baseurl=http://vault.centos.org/\$contentdir/\$releasever/extras/\$basearch/os/' >> /etc/yum.repos.d/CentOS-Linux-Extras.repo
echo 'baseurl=http://vault.centos.org/\$contentdir/\$releasever/AppStream/\$basearch/os/' >> /etc/yum.repos.d/CentOS-Linux-AppStream.repo
dnf install -y git sysstat sos jq
systemctl start sysstat
mkdir -p /tmp/artifacts
REPO_DIR="/home/assisted"
mkdir -p "\${REPO_DIR}"
mkdir -p "\${REPO_DIR}"/minikube_home
echo "export MINIKUBE_HOME=\${REPO_DIR}/minikube_home" >> /root/config
# NVMe makes it faster
NVME_DEVICE="/dev/nvme0n1"
if [ -e "\$NVME_DEVICE" ];
then
mkfs.xfs -f "\${NVME_DEVICE}"
mount "\${NVME_DEVICE}" "\${REPO_DIR}"
fi
tar -xzvf assisted.tar.gz -C "\${REPO_DIR}"
chown -R root:root "\${REPO_DIR}"
cd "\${REPO_DIR}"
set +x
echo "export PULL_SECRET='\$(cat /root/pull-secret)'" >> /root/config
echo "export OFFLINE_TOKEN='\$(cat /root/offline-token)'" >> /root/config
set -x
# Save Prow variables that might become handy inside the Packet server
echo "export CI=true" >> /root/config
echo "export OPENSHIFT_CI=true" >> /root/config
echo "export REPO_NAME=${REPO_NAME:-}" >> /root/config
echo "export JOB_TYPE=${JOB_TYPE:-}" >> /root/config
echo "export PULL_NUMBER=${PULL_NUMBER:-}" >> /root/config
echo "export RELEASE_IMAGE_LATEST=${RELEASE_IMAGE_LATEST}" >> /root/config
# Override default images
echo "export SERVICE=${ASSISTED_SERVICE_IMAGE}" >> /root/config
echo "export AGENT_DOCKER_IMAGE=${ASSISTED_AGENT_IMAGE}" >> /root/config
echo "export CONTROLLER_IMAGE=${ASSISTED_CONTROLLER_IMAGE}" >> /root/config
echo "export INSTALLER_IMAGE=${ASSISTED_INSTALLER_IMAGE}" >> /root/config
# Most jobs and tests don't require this image, so this allows it as optional
if [ "${PROVIDER_IMAGE}" != "${ASSISTED_CONTROLLER_IMAGE}" ];
then
echo "export PROVIDER_IMAGE=${PROVIDER_IMAGE}" >> /root/config
fi
# expr command's return value is 1 in case of a false expression. We don't want to exit in this case.
set +e
IS_REHEARSAL=\$(expr "${REPO_OWNER:-}" = "openshift" "&" "${REPO_NAME:-}" = "release")
set -e
if [ "${JOB_TYPE:-}" = "presubmit" ] && (( ! \${IS_REHEARSAL} )); then
if [ "${REPO_NAME:-}" = "assisted-service" ]; then
echo "export SERVICE_BRANCH=${PULL_PULL_SHA:-master}" >> /root/config
fi
else
# Periodics run against latest release
echo "export OPENSHIFT_INSTALL_RELEASE_IMAGE=${RELEASE_IMAGE_LATEST}" >> /root/config
fi
IMAGES=(${ASSISTED_AGENT_IMAGE} ${ASSISTED_CONTROLLER_IMAGE} ${ASSISTED_INSTALLER_IMAGE} ${RELEASE_IMAGE_LATEST})
CI_REGISTRIES=\$(for image in \${IMAGES}; do echo \${image} | cut -d'/' -f1; done | sort -u | paste -sd "," -)
echo "export PUBLIC_CONTAINER_REGISTRIES=quay.io,\${CI_REGISTRIES}" >> /root/config
echo "export ASSISTED_SERVICE_HOST=${IP}" >> /root/config
echo "export CHECK_CLUSTER_VERSION=True" >> /root/config
echo "export TEST_TEARDOWN=false" >> /root/config
echo "export TEST_FUNC=test_install" >> /root/config
echo "export INSTALLER_KUBECONFIG=\${REPO_DIR}/build/kubeconfig" >> /root/config
if [[ -e /root/assisted-additional-config ]]; then
cat /root/assisted-additional-config >> /root/config
fi
source /root/config
# TODO: remove once we finished moving to the new dockerfile
export TEST_INFRA_DOCKERFILE=Dockerfile.assisted-test-infra
make \${MAKEFILE_TARGET:-create_full_environment run test_parallel}
EOF
if [[ -n "${POST_INSTALL_COMMANDS:-}" ]]; then
echo "${POST_INSTALL_COMMANDS}" > "${SHARED_DIR}/assisted-post-install.sh"
scp "${SSHOPTS[@]}" "${SHARED_DIR}/assisted-post-install.sh" "root@${IP}:assisted-post-install.sh"
fi
# Post-installation commands
ssh "${SSHOPTS[@]}" "root@${IP}" bash - << EOF |& sed -e 's/.*auths\{0,1\}".*/*** PULL_SECRET ***/g'
set -xeuo pipefail
cd /home/assisted
source /root/config
echo "export KUBECONFIG=/home/assisted/build/kubeconfig" >> /root/.bashrc
export KUBECONFIG=/home/assisted/build/kubeconfig
if [[ -e "/root/assisted-post-install.sh" ]]; then
source "/root/assisted-post-install.sh"
fi
EOF
|
public class DiceOddEven {
public static void main(String[]args) {
int diceNumber = 3;
// Determining if the dice roll is odd or even
if (diceNumber % 2 == 0) {
System.out.println("The number is even.");
}
else {
System.out.println("The number is odd.");
}
}
} |
#!/bin/bash
download() {
FILE="$2.tgz"
if [ -f $FILE ]; then
echo "File $FILE exists. Extracting.."
if [ -d $2 ]; then
echo "File already extracted"
else
tar -zvxf $FILE
fi
else
echo "File $FILE does not exist. Downloading solr from $1..."
curl -O $1
tar -zxf $FILE
fi
echo "Downloaded!"
}
SOLR_VERSION="6.6.1"
SOLR_DIR="solr-$SOLR_VERSION"
SOLR_CORE="testannotations"
if [ -z "$1" ];then
SAS_HOME="`pwd`";
else
SAS_HOME="$1";
fi
download "http://archive.apache.org/dist/lucene/solr/$SOLR_VERSION/solr-$SOLR_VERSION.tgz" "solr-$SOLR_VERSION"
cd $SOLR_DIR
./bin/solr -e cloud -noprompt
./bin/solr create_collection -c test -d $SAS_HOME/src/main/resources/solr -shards 2
|
<reponame>f96q/kptboard<filename>spec/controllers/users/sessions_controller_spec.rb
require 'rails_helper'
describe Users::SessionsController do
end
|
package pkg
import (
"context"
"github.com/sanjid133/gopher-love/pkg/system"
"github.com/sanjid133/gopher-love/util"
"os"
"path/filepath"
"strings"
)
type Love interface {
Initialize(config *system.SecretConfig) (Love, error)
//Decode(url string) (*Repository, error)
GetOrgRepos(org string) ([]*Repository, error)
IsLoved(repo *Repository) (bool, error)
SendLove(repo *Repository) error
}
type LoveBag interface {
Initialize(directory string) LoveBag
File() string
Read() ([]*Repository, error)
}
type Repository struct {
Platform string
Owner string
Name string
Url string
HasMirror bool
MirrorUrl string
}
func DetectManager(ctx context.Context, directory string) string {
//managers := GetAllRegistereredManager()
for m := range manager {
file := manager[m](ctx).File()
if _, err := os.Stat(filepath.Join(directory, file)); err == nil {
return m
}
}
return ""
}
func UrlToRepo(url string) *Repository {
url = strings.TrimPrefix(url, "https://")
repo := &Repository{
Url: url,
}
parts := strings.Split(url, "/")
if len(parts) > 0 {
repo.Platform = util.GetPlatform(parts[0])
}
if len(parts) > 1 {
repo.Owner = parts[1]
}
if len(parts) > 2 {
repo.Name = parts[2]
}
return repo
}
var reaction = []string{
":kissing_heart:",
":kissing_closed_eyes:",
":kissing_smiling_eyes:",
":yellow_heart:",
":blue_heart:",
":purple_heart:",
":heart:",
":green_heart:",
":heartpulse:",
":heartbeat:",
":two_hearts:",
":revolving_hearts:",
":sparkling_heart:",
":gift_heart:",
}
|
#!/bin/sh
echo "The application will start in ${JHIPSTER_SLEEP}s..." && sleep ${JHIPSTER_SLEEP}
exec java ${JAVA_OPTS} -Djava.security.egd=file:/dev/./urandom -cp /app/resources/:/app/classes/:/app/libs/* "com.research.demo.ResearchDemoApp" "$@"
|
#!/bin/bash
source @includes.sh
echo '###################################################'
echo '# Description: Extract image sequence from a video'
echo '# Usage: $ ./videoToImageSequence.sh /path/to/video.mov [jpg]'
echo '# Param 1: Video file'
echo '# Param 2 [Optional]: Output file format [jpg]'
echo '# Requires: ffmpeg'
echo '###################################################'
echoNewline
################################################################################
################################################################################
# check parameters & set defaults
if [[ $1 == "" ]] ; then
echoError '1st arg must be a video file'
exit 1
fi
outputFormat="png"
if [[ $2 == "" ]] ; then
echoInfo "[Optional]: Using default image format of $outputFormat"
else
outputFormat=$2
fi
################################################################################
################################################################################
# get filename
filename=$1
extension=$(extension $filename)
echoInfo "Saving images from video: $filename"
# create output directory
newDir="$filename-frames"
mkdir $newDir
echoInfo "Created directory: $newDir"
# do conversion
if [ $outputFormat = "jpg" ]; then
# jpeg quality info: http://stackoverflow.com/questions/10225403/how-can-i-extract-a-good-quality-jpeg-image-from-an-h264-video-file-with-ffmpeg
ffmpeg -i $filename -qscale:v 1 $newDir/ffout%08d.jpg
else
ffmpeg -i $filename $newDir/ffout%08d.png
fi
################################################################################
################################################################################
# complete
echoSuccess "Extracted video frames: \n# $newDir"
|
/*
* Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
*
* This file is part of Orfeo Toolbox
*
* https://www.orfeo-toolbox.org/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "otbWrapperOutputProcessXMLParameter.h"
#include "otbWrapperChoiceParameter.h"
#include "otbWrapperListViewParameter.h"
#include "otbWrapperDirectoryParameter.h"
#include "otbWrapperEmptyParameter.h"
#include "otbWrapperInputFilenameParameter.h"
#include "otbWrapperInputFilenameListParameter.h"
#include "otbWrapperOutputFilenameParameter.h"
#include "otbWrapperInputVectorDataParameter.h"
#include "otbWrapperInputVectorDataListParameter.h"
#include "otbWrapperOutputVectorDataParameter.h"
#include "otbWrapperRadiusParameter.h"
#include "otbWrapperStringListParameter.h"
#include "otbWrapperInputImageParameter.h"
#include "otbWrapperInputImageListParameter.h"
#include "otbWrapperComplexInputImageParameter.h"
#include "otbWrapperOutputImageParameter.h"
#include "otbWrapperComplexOutputImageParameter.h"
#include "otbWrapperRAMParameter.h"
#include "itksys/SystemTools.hxx"
namespace otb
{
namespace Wrapper
{
OutputProcessXMLParameter::OutputProcessXMLParameter()
: m_Node(nullptr)
, m_Appli()
{
this->SetKey("outxml");
this->SetName("Save otb application to xml file");
this->SetDescription("Save otb application to xml file");
this->SetMandatory(false);
this->SetActive(false);
this->SetRole(Role_Output);
}
OutputProcessXMLParameter::~OutputProcessXMLParameter()
{
}
std::string
OutputProcessXMLParameter::pixelTypeToString(ImagePixelType pixType)
{
std::string type;
switch( pixType )
{
case ImagePixelType_uint8:
{
type = "uint8";
break;
}
case ImagePixelType_int16:
{
type = "int16";
break;
}
case ImagePixelType_uint16:
{
type = "uint16";
break;
}
case ImagePixelType_int32:
{
type = "int32";
break;
}
case ImagePixelType_uint32:
{
type = "uint32";
break;
}
case ImagePixelType_float:
{
type = "float";
break;
}
case ImagePixelType_double:
{
type = "double";
break;
}
default:
{
type = "float";
break;
}
}
return type;
}
TiXmlElement* OutputProcessXMLParameter::AddChildNodeTo(TiXmlElement *parent, std::string name, std::string value)
{
TiXmlElement * n_Node = new TiXmlElement( name.c_str() );
parent->LinkEndChild( n_Node );
if(!value.empty())
{
TiXmlText * nv_NodeValue = new TiXmlText( value.c_str() );
n_Node->LinkEndChild( nv_NodeValue );
}
return n_Node;
}
void
OutputProcessXMLParameter::Write(Application::Pointer app)
{
// Check if the filename is not empty
if(m_FileName.empty())
itkExceptionMacro("The XML output FileName is empty, please set the filename via the method SetFileName");
// Check that the right extension is given : expected .xml */
if (itksys::SystemTools::GetFilenameLastExtension(m_FileName) != ".xml")
{
itkExceptionMacro(<<itksys::SystemTools::GetFilenameLastExtension(m_FileName)
<<" is a wrong Extension FileName : Expected .xml");
}
// start creating XML file
TiXmlDocument doc;
TiXmlDeclaration* decl = new TiXmlDeclaration( "1.0", "", "" );
doc.LinkEndChild( decl );
TiXmlElement * n_OTB = new TiXmlElement( "OTB");
doc.LinkEndChild( n_OTB );
std::string version = OTB_VERSION_STRING;
AddChildNodeTo(n_OTB, "version", version);
// Parse application
TiXmlElement *n_App = ParseApplication(app);
n_OTB->LinkEndChild(n_App);
// Finally, write xml contents to file
doc.SaveFile( m_FileName.c_str() );
}
TiXmlElement*
OutputProcessXMLParameter::ParseApplication(Application::Pointer app)
{
m_Appli = app;
TiXmlElement * n_App = new TiXmlElement("application");
m_Node = n_App;
AddChildNodeTo(n_App, "name", app->GetName());
AddChildNodeTo(n_App, "descr", app->GetDescription());
TiXmlElement *n_AppDoc;
n_AppDoc = AddChildNodeTo(n_App, "doc");
AddChildNodeTo(n_AppDoc, "name", app->GetDocName());
AddChildNodeTo(n_AppDoc, "longdescr", app->GetDocLongDescription());
AddChildNodeTo(n_AppDoc, "authors", app->GetDocAuthors());
AddChildNodeTo(n_AppDoc, "limitations", app->GetDocLimitations());
AddChildNodeTo(n_AppDoc, "seealso", app->GetDocSeeAlso());
TiXmlElement *n_DocTags;
n_DocTags = AddChildNodeTo(n_AppDoc, "tags");
std::vector<std::string> docTagList = app->GetDocTags();
std::vector<std::string>::iterator tagIt;
for(tagIt = docTagList.begin(); tagIt!= docTagList.end(); ++tagIt)
{
std::string tag = *tagIt;
AddChildNodeTo(n_DocTags, "tag", tag);
}
// recursive call to ParseGroup(), starting with "" (i.e. GetParameterList())
this->ParseGroup(std::string(""));
// reset temporary members
m_Appli = nullptr;
m_Node = nullptr;
return n_App;
}
void
OutputProcessXMLParameter::ParseGroup(const std::string& group)
{
std::string prefix(group);
ParameterGroup::Pointer paramGroup = m_Appli->GetParameterList();
if (!group.empty())
{
prefix += '.';
Parameter* rawParam = paramGroup->GetParameterByKey(group);
ParameterGroup* rawParamAsGroup = dynamic_cast<ParameterGroup*>(rawParam);
if (rawParamAsGroup)
{
paramGroup = rawParamAsGroup;
}
else
{
itkExceptionMacro("Function ParseGroup() expected a group parameter for key "<<group);
}
}
std::vector<std::string> paramList = paramGroup->GetParametersKeys(false);
// Iterate through parameters
for (std::vector<std::string>::const_iterator it = paramList.begin(); it!= paramList.end(); ++it)
{
std::string key = prefix + *it;
Parameter *param = paramGroup->GetParameterByKey(*it);
std::string paramName = param->GetName();
ParameterType type = m_Appli->GetParameterType(key);
std::string typeAsString = paramGroup->GetParameterTypeAsString(type);
// if param is a Group, inspect this group with a recursive call
if (type == ParameterType_Group)
{
this->ParseGroup(key);
}
else
{
bool paramExists = m_Appli->HasUserValue(key) &&
m_Appli->IsParameterEnabled(key) &&
m_Appli->GetParameterRole(key) == Role_Input;
if ( type == ParameterType_OutputProcessXML )
{
paramExists = false;
}
std::string emptyValue;
if (type == ParameterType_Empty)
{
EmptyParameter* eParam = dynamic_cast<EmptyParameter *> (param);
if(eParam!=nullptr)
{
//Don't use m_Appli->HasUserValue which returns false always because of
//EmptyParameter::HasValue() is false for EmptyParameter
if(eParam->HasUserValue())
{
paramExists = true;
emptyValue = "false";
if( eParam->GetActive() )
{
emptyValue = "true";
}
}
}
}
if(type == ParameterType_RAM)
{
paramExists = true;
}
// if parameter doesn't have any value then skip it
if (paramExists)
{
std::vector<std::string> values;
std::string value;
bool hasValueList = false;
std::string pixelTypeAsString;
if (type == ParameterType_OutputImage)
{
OutputImageParameter *imgParam = dynamic_cast<OutputImageParameter *>(param);
if(imgParam!=nullptr)
{
value = imgParam->GetFileName();
ImagePixelType pixType = imgParam->GetPixelType();
pixelTypeAsString = pixelTypeToString(pixType);
}
}
else if( type == ParameterType_InputImageList || type == ParameterType_InputFilenameList ||
type == ParameterType_InputVectorDataList || type == ParameterType_StringList ||
type == ParameterType_ListView )
{
values = m_Appli->GetParameterStringList(key);
hasValueList = true;
}
else if (type == ParameterType_Int || type == ParameterType_Radius || type == ParameterType_RAM )
{
value = m_Appli->GetParameterAsString(key);
}
else if(type == ParameterType_Float)
{
std::ostringstream oss;
oss << std::setprecision(std::numeric_limits<float>::digits10+1);
oss << m_Appli->GetParameterFloat( key );
value = oss.str();
}
else if ( type == ParameterType_String || type == ParameterType_InputFilename ||
type == ParameterType_Directory || type == ParameterType_InputImage ||
type == ParameterType_ComplexInputImage || type == ParameterType_InputVectorData ||
type == ParameterType_Choice || type == ParameterType_OutputVectorData ||
type == ParameterType_OutputFilename || type == ParameterType_Bool)
{
value = m_Appli->GetParameterString(key);
}
else if(key == "rand")
{
std::ostringstream strm;
strm << m_Appli->GetParameterInt("rand");
value = strm.str();
}
else if (typeAsString == "Empty")
{
//Nothing to do. copy emptyValue
value = emptyValue;
}
else if (type == ParameterType_InputProcessXML)
{
continue;
}
//get only file name
/*
if(type == ParameterType_InputFilename || type == ParameterType_InputImage ||
type == ParameterType_ComplexInputImage || type == ParameterType_InputVectorData ||
type == ParameterType_OutputVectorData || type == ParameterType_OutputFilename)
{
unsigned found = value.find_last_of("/\\");
//std::cerr << " path: " << value.substr(0,found) << '\n';
value = value.substr(found+1);
}
else
if(type == ParameterType_InputImageList || type == ParameterType_InputFilenameList ||
type == ParameterType_InputVectorDataList)
{
std::vector<std::string>::iterator strIt;
for(strIt = values.begin(); strIt != values.end(); ++strIt)
{
std::string val = *strIt;
unsigned found = val.find_last_of("/\\");
*strIt = val.substr(found+1);
}
}
*/
//parameter node in xml
TiXmlElement * n_Parameter = new TiXmlElement("parameter");
const char * mandatory = "false";
if( param->GetMandatory() )
mandatory = "true";
n_Parameter->SetAttribute("mandatory", mandatory);
//setting parameter key as child node in parameter
AddChildNodeTo(n_Parameter, "key", key);
AddChildNodeTo(n_Parameter, "type", typeAsString);
AddChildNodeTo(n_Parameter, "name", paramName);
if(type == ParameterType_OutputImage || type == ParameterType_ComplexOutputImage)
{
AddChildNodeTo(n_Parameter, "pixtype", pixelTypeAsString);
}
if(!hasValueList)
{
AddChildNodeTo(n_Parameter, "value", value);
}
else
{
TiXmlElement *n_Values = AddChildNodeTo(n_Parameter, "values");
std::vector<std::string>::iterator strIt;
for(strIt = values.begin(); strIt != values.end(); ++strIt)
{
AddChildNodeTo(n_Values, "value",*strIt);
}
}
m_Node->LinkEndChild(n_Parameter);
}
// dig into Choice parameter
if (type == ParameterType_Choice)
{
std::string choiceGroup(key);
choiceGroup += '.';
choiceGroup += m_Appli->GetParameterString(key);
this->ParseGroup(choiceGroup);
}
}
}
}
} //end namespace wrapper
} //end namespace otb
|
#!/usr/bin/env bash
set -xeo pipefail
function waitfor {
WAIT_MAX=0
until sh -c "$*" &> /dev/null || [ $WAIT_MAX -eq 45 ]; do
sleep 1
(( WAIT_MAX = WAIT_MAX + 1 ))
done
}
function finish {
echo "The last command was: $(history 1 | awk '{print $2}')"
kubectl get pods
kubectl describe pods
kubectl logs deployment/vault-operator
kubectl logs --all-containers statefulset/vault
kubectl logs -n vswh deployment/vault-secrets-webhook
kubectl describe deployment/hello-secrets
kubectl describe rs hello-secrets
kubectl describe pod hello-secrets
kubectl logs deployment/hello-secrets --all-containers
kubectl get secret -n vswh -o yaml
}
function check_webhook_seccontext {
kubectl describe deployment/hello-secrets-seccontext
kubectl describe rs hello-secrets-seccontext
kubectl describe pod hello-secrets-seccontext
kubectl logs deployment/hello-secrets-seccontext --all-containers
}
trap finish EXIT
# Smoke test the pure Vault Helm chart first
helm upgrade --install --wait vault ./charts/vault --set unsealer.image.tag=latest
helm delete vault
kubectl delete secret bank-vaults
# Create a resource quota in the default namespace
kubectl create quota bank-vaults --hard=cpu=4,memory=8G,pods=10,services=10,replicationcontrollers=10,secrets=15,persistentvolumeclaims=10
# Install the operators and companion
helm dependency build ./charts/vault-operator
helm upgrade --install vault-operator ./charts/vault-operator \
--set image.tag=latest \
--set image.pullPolicy=IfNotPresent \
--set etcd-operator.enabled=true \
--set etcd-operator.deployments.backupOperator=false \
--set etcd-operator.deployments.restoreOperator=false \
--wait
# Install common RBAC setup for CRs
kubectl apply -f operator/deploy/rbac.yaml
# First test: HA setup with etcd
kubectl apply -f operator/deploy/cr-etcd-ha.yaml
waitfor kubectl get etcdclusters.etcd.database.coreos.com/etcd-cluster
kubectl wait --for=condition=available --timeout=120s etcdclusters.etcd.database.coreos.com/etcd-cluster
kubectl wait --for=condition=healthy --timeout=180s vault/vault
kubectl delete -f operator/deploy/cr-etcd-ha.yaml
kubectl delete secret vault-unseal-keys
kubectl delete pvc --all
kubectl delete deployment vault-operator-etcd-operator-etcd-operator # the etcd operator is also unused from this point
# Second test: test the external secrets watcher work and match as expected
kubectl apply -f deploy/test-external-secrets-watch-deployment.yaml
kubectl wait --for=condition=healthy --timeout=120s vault/vault
test x`kubectl get pod vault-0 -o jsonpath='{.metadata.annotations.vault\.banzaicloud\.io/watched-secrets-sum}'` = "x"
kubectl delete -f deploy/test-external-secrets-watch-deployment.yaml
kubectl delete secret vault-unseal-keys
kubectl apply -f deploy/test-external-secrets-watch-secrets.yaml
kubectl apply -f deploy/test-external-secrets-watch-deployment.yaml
kubectl wait --for=condition=healthy --timeout=120s vault/vault
test x`kubectl get pod vault-0 -o jsonpath='{.metadata.annotations.vault\.banzaicloud\.io/watched-secrets-sum}'` = "xbac8dfa8bdf03009f89303c8eb4a6c8f2fd80eb03fa658f53d6d65eec14666d4"
kubectl delete -f deploy/test-external-secrets-watch-deployment.yaml
kubectl delete -f deploy/test-external-secrets-watch-secrets.yaml
kubectl delete secret vault-unseal-keys
# Third test: Raft HA setup
kubectl apply -f operator/deploy/cr-raft.yaml
kubectl wait --for=condition=healthy --timeout=120s vault/vault
kubectl delete -f operator/deploy/cr-raft.yaml
kubectl delete secret vault-unseal-keys
kubectl delete pvc --all
# Fourth test: HSM setup with SoftHSM
kubectl apply -f operator/deploy/cr-hsm-softhsm.yaml
kubectl wait --for=condition=healthy --timeout=120s vault/vault
kubectl delete -f operator/deploy/cr-hsm-softhsm.yaml
kubectl delete secret vault-unseal-keys
kubectl delete pvc --all
# Fifth test: single node cluster with defined PriorityClass via vaultPodSpec and vaultConfigurerPodSpec
kubectl create clusterrolebinding oidc-reviewer --clusterrole=system:service-account-issuer-discovery --group=system:unauthenticated
kubectl apply -f operator/deploy/priorityclass.yaml
kubectl apply -f operator/deploy/cr-priority.yaml
kubectl wait --for=condition=healthy --timeout=120s vault/vault
# Leave this instance for further tests
# Run a client tests
# Give bank-vaults some time to let the Kubernetes auth backend configuration happen
sleep 20
# Run an internal client which tries to read from Vault with the configured Kubernetes auth backend
kurun run cmd/examples/main.go
# Only kind is configured to be able to run this test
kubectl delete -f operator/deploy/cr-priority.yaml
kubectl delete -f operator/deploy/priorityclass.yaml
kubectl delete secret vault-unseal-keys
kubectl delete pvc --all
# Sixth test: Run the OIDC authenticated client test
kubectl create namespace vswh # create the namespace beforehand, because we need the CA cert here as well
kubectl apply -f operator/deploy/cr-oidc.yaml
kubectl wait --for=condition=healthy --timeout=120s vault/vault
kurun apply -f hack/oidc-pod.yaml
waitfor "kubectl get pod/oidc -o json | jq -e '.status.phase == \"Succeeded\"'"
# Run the webhook test, the hello-secrets deployment should be successfully mutated
helm upgrade --install vault-secrets-webhook ./charts/vault-secrets-webhook \
--set image.tag=latest \
--set image.pullPolicy=IfNotPresent \
--set configMapMutation=true \
--set configmapFailurePolicy=Fail \
--set podsFailurePolicy=Fail \
--set secretsFailurePolicy=Fail \
--set env.VAULT_ENV_IMAGE=banzaicloud/vault-env:latest \
--namespace vswh \
--wait
kubectl apply -f deploy/test-secret.yaml
test "$(kubectl get secrets sample-secret -o jsonpath='{.data.\.dockerconfigjson}' | base64 --decode | jq -r '.auths[].username')" = "dockerrepouser"
test "$(kubectl get secrets sample-secret -o jsonpath='{.data.\.dockerconfigjson}' | base64 --decode | jq -r '.auths[].password')" = "dockerrepopassword"
test "$(kubectl get secrets sample-secret -o jsonpath='{.data.inline}' | base64 --decode)" = "Inline: secretId AWS_ACCESS_KEY_ID"
kubectl apply -f deploy/test-configmap.yaml
test "$(kubectl get cm sample-configmap -o jsonpath='{.data.aws-access-key-id}')" = "secretId"
test "$(kubectl get cm sample-configmap -o jsonpath='{.data.aws-access-key-id-formatted}')" = "AWS key in base64: c2VjcmV0SWQ="
test "$(kubectl get cm sample-configmap -o jsonpath='{.binaryData.aws-access-key-id-binary}')" = "secretId"
test "$(kubectl get cm sample-configmap -o jsonpath='{.data.aws-access-key-id-inline}')" = "AWS_ACCESS_KEY_ID: secretId AWS_SECRET_ACCESS_KEY: s3cr3t"
kubectl apply -f deploy/test-deployment-seccontext.yaml
kubectl wait --for=condition=available deployment/hello-secrets-seccontext --timeout=120s
check_webhook_seccontext
kubectl delete -f deploy/test-deployment-seccontext.yaml
kubectl apply -f deploy/test-deployment.yaml
kubectl wait --for=condition=available deployment/hello-secrets --timeout=120s
|
import sys
class RegistrationSystem:
def __init__(self):
self.gui = GUI()
self.reg = Registration()
def hide_gui(self):
self.gui.hide()
def hide_registration(self):
self.reg.hide()
def run_main_program(self):
# Placeholder for main program logic
pass
def start_registration_system(self):
if not self.gui.isHidden():
self.hide_gui()
if not self.reg.isHidden():
self.hide_registration()
self.run_main_program()
else:
sys.exit()
global current_exit_code |
from typing import List
def extract_attributes(tag: str) -> List[str]:
attributes = []
start = tag.find(' ')
end = tag.find('>')
if start != -1:
tag = tag[start:end]
pairs = tag.split()[1:]
for pair in pairs:
attributes.append(pair.strip('\''))
return attributes |
#!/bin/bash
LAUNCH_CONFIG_FILE=${1:-/build/config.yaml}
CLUSTER_INFO_FILE=${2:-/build/cluster_info.json}
set -e
LAUNCH_SUCCESS="False"
RETRY_LAUNCH="True"
while [ x"${LAUNCH_SUCCESS}" == x"False" ]; do
rm -f ${CLUSTER_INFO_FILE} # dcos-launch complains if the file already exists
dcos-launch create --config-path=${LAUNCH_CONFIG_FILE} --info-path=${CLUSTER_INFO_FILE}
if [ x"$RETRY_LAUNCH" == x"True" ]; then
set +e
else
set -e
fi
dcos-launch wait --info-path=${CLUSTER_INFO_FILE} 2>&1 | tee dcos-launch-wait-output.stdout
# Grep exits with an exit code of 1 if no lines are matched. We thus need to
# disable exit on errors.
set +e
ROLLBACK_FOUND=$(grep -o "Exception: StackStatus changed unexpectedly to: ROLLBACK_IN_PROGRESS" dcos-launch-wait-output.stdout)
if [ -n "${ROLLBACK_FOUND}" ]; then
if [ x"${RETRY_LAUNCH}" == x"False" ]; then
set -e
echo "Cluster launch failed"
exit 1
fi
# TODO: This would be a good place to add some form of alerting!
# We could add a cluster_failure.sh callback, for example.
# We only retry once!
RETRY_LAUNCH="False"
set -e
# We need to wait for the current stack to be deleted
dcos-launch delete --info-path=${CLUSTER_INFO_FILE}
rm -f ${CLUSTER_INFO_FILE}
echo "Cluster creation failed. Retrying after 30 seconds"
sleep 30
else
LAUNCH_SUCCESS="True"
fi
done
set -e
# Print the cluster info.
echo "Printing ${CLUSTER_INFO_FILE}..."
cat ${CLUSTER_INFO_FILE}
|
module MechanicalTurk
class BaseController < ApplicationController
authorize_resource :class => false
end
end
|
#!/usr/bin/env bash
# set up environment
source activate afsd
pip install --upgrade youtube-dl mmcv
set -e
DATA_DIR="./finegym"
ANNO_DIR="./finegym/annotations"
if [[ ! -d "${ANNO_DIR}" ]]; then
echo "${ANNO_DIR} does not exist. Creating";
mkdir -p ${ANNO_DIR}
# downloading the annotation files
echo "Downloading annotations..."
wget https://sdolivia.github.io/FineGym/resources/dataset/finegym_annotation_info_v1.0.json -O $ANNO_DIR/annotation.json
wget https://sdolivia.github.io/FineGym/resources/dataset/gym99_train_element_v1.0.txt -O $ANNO_DIR/gym99_train_org.txt
wget https://sdolivia.github.io/FineGym/resources/dataset/gym99_val_element.txt -O $ANNO_DIR/gym99_val_org.txt
fi
echo "Downloading videos..."
python download.py ${ANNO_DIR}/annotation.json ${DATA_DIR}/videos |
package com.deskbill.controller;
import javax.swing.JDialog;
import javax.swing.JOptionPane;
import com.deskbill.domain.Sort;
import com.deskbill.services.SortService;
import com.deskbill.view.AbstractOperationSortDialog;
public class AddSortController extends AbstractOperationSortDialog {
private static final long serialVersionUID = -7986296623248796274L;
public AddSortController(JDialog dialog) {
super(dialog);
titleLabel.setText("添加分类");
super.setTitle("添加分类");
}
@Override
public void confirm() {
//String parent = parentBox.getSelectedItem().toString();
String sname = snameTxt.getText().trim();
String sdesc = sdescArea.getText();
/*if ("=请选择=".equals(parent)) {
JOptionPane.showMessageDialog(this, "请选择分类", "错误提示", JOptionPane.ERROR_MESSAGE);
return;
}*/
if (sname == null || sname.equals("")) {
JOptionPane.showMessageDialog(this, "请填写分类名称");
return;
}
Sort sort = new Sort();
sort.setType(parentBox.getSelectedItem().toString());
sort.setSname(sname);
sort.setSdesc(sdesc);
sort.setState(1);
SortService sortService = new SortService();
int addSort = sortService.addSort(sort);
if (addSort > 0) {
JOptionPane.showMessageDialog(this, "添加分类成功", "操作成功", JOptionPane.PLAIN_MESSAGE);
this.dispose();
}else {
JOptionPane.showMessageDialog(this, "添加分类失败", "操作失败", JOptionPane.ERROR_MESSAGE);
}
}
}
|
<filename>convertallunits.py
import argparse
import glob
import json
import os
import scm.supcom_exporter
import subprocess
import sys
cwd = os.getcwd()
parser = argparse.ArgumentParser()
parser.add_argument('--input-spec', help=r'search spec for fbi files for units to convert, eg "d:\temp\ccdata\UNITS\*.fbi"')
parser.add_argument('--converter-cmd', help='path to 3do2scm.exe executable, eg "c:\\3do2scm.exe"', required=False, default=os.path.join(cwd,"3do2scm.exe"))
parser.add_argument('--tadata-paths', help='paths under which to search for 3do files, eg "d:\\temp\\totala1 d:\\temp\\ccdata"', nargs='+')
args = parser.parse_args()
units_dir = os.path.join(cwd,'UNITS')
if not os.path.exists(units_dir):
os.mkdir(units_dir)
for fn in glob.glob(args.input_spec):
print("----", fn)
unit,_ = os.path.splitext(os.path.basename(fn))
target_dir = os.path.join(units_dir,unit)
if not os.path.exists(target_dir):
os.mkdir(target_dir)
os.chdir(target_dir)
for suffix in ("", "_dead"):
try:
json_bytes = subprocess.check_output([args.converter_cmd, unit+suffix] + args.tadata_paths, stderr=None, shell=True)
if json_bytes:
_3do_data = json.loads(json_bytes)
scm.supcom_exporter.export(_3do_data)
except subprocess.CalledProcessError as e:
print("Unable to convert model {}: {}".format(unit+suffix, e))
os.chdir(cwd)
|
json.partial! "room_categories/room_category", room_category: @room_category
|
def compress_string(input_str):
compressed = ""
count = 1
for i in range(1, len(input_str)):
if input_str[i] == input_str[i - 1]:
count += 1
else:
compressed += str(count) + input_str[i - 1]
count = 1
compressed += str(count) + input_str[-1]
return compressed if len(compressed) < len(input_str) else input_str |
#!/usr/bin/env bash
# Copyright (c) Microsoft. All rights reserved.
COL_NO="\033[0m" # no color
COL_ERR="\033[1;31m" # light red
COL_H1="\033[1;33m" # yellow
COL_H2="\033[1;36m" # light cyan
header() {
echo -e "${COL_H1}\n### $1 ${COL_NO}"
}
error() {
echo -e "${COL_ERR}$1 ${COL_NO}"
}
check_dependency_npm() {
set +e
TEST=$(which npm)
if [[ -z "$TEST" ]]; then
echo "ERROR: 'npm' command not found."
echo "Install Node.js and npm and make sure the 'npm' command is in the PATH."
echo "Node.js and npm installation: https://www.npmjs.com/get-npm"
exit 1
fi
set -e
}
check_dependency_docker() {
if ! which docker >/dev/null 2>&1 ; then
echo "ERROR: 'docker' command not found."
echo "Install Docker and make sure the 'docker' command is in the PATH."
echo "Docker installation: https://www.docker.com/community-edition#/download"
exit 1
fi
}
check_dependency_git() {
if ! which git >/dev/null 2>&1 ; then
echo "ERROR: 'git' command not found."
echo "Install git and make sure the 'git' command is in the PATH."
echo "Git installation: https://git-scm.com"
exit 1
fi
}
|
from nose.tools import raises
from apmserver import ServerBaseTest, SecureServerBaseTest
from requests.exceptions import SSLError
import requests
import zlib
import gzip
try:
from StringIO import StringIO
except ImportError:
import io
class Test(ServerBaseTest):
transactions_url = 'http://localhost:8200/v1/transactions'
def test_ok(self):
transactions = self.get_transaction_payload()
r = requests.post(self.transactions_url, data=transactions,
headers={'Content-Type': 'application/json'})
assert r.status_code == 202, r.status_code
def test_empty(self):
transactions = {}
r = requests.post(self.transactions_url, json=transactions)
assert r.status_code == 400, r.status_code
def test_not_existent(self):
transactions = {}
invalid_url = 'http://localhost:8200/transactionX'
r = requests.post(invalid_url, json=transactions)
assert r.status_code == 404, r.status_code
def test_method_not_allowed(self):
r = requests.get(self.transactions_url)
assert r.status_code == 405, r.status_code
def test_bad_json(self):
r = requests.post(self.transactions_url, json="not json")
assert r.status_code == 400, r.status_code
def test_healthcheck(self):
healtcheck_url = 'http://localhost:8200/healthcheck'
r = requests.get(healtcheck_url)
assert r.status_code == 200, r.status_code
def test_gzip(self):
transactions = self.get_transaction_payload()
out = ""
try:
out = StringIO()
except:
out = io.BytesIO()
with gzip.GzipFile(fileobj=out, mode="w") as f:
try:
f.write(transactions)
except:
f.write(bytes(transactions, 'utf-8'))
r = requests.post(self.transactions_url, data=out.getvalue(),
headers={'Content-Encoding': 'gzip', 'Content-Type': 'application/json'})
assert r.status_code == 202, r.status_code
def test_deflat(self):
transactions = self.get_transaction_payload()
compressed_data = None
try:
compressed_data = zlib.compress(transactions)
except:
compressed_data = zlib.compress(bytes(transactions, 'utf-8'))
r = requests.post(self.transactions_url, data=compressed_data,
headers={'Content-Encoding': 'deflate', 'Content-Type': 'application/json'})
assert r.status_code == 202, r.status_code
def test_gzip_error(self):
data = self.get_transaction_payload()
r = requests.post(self.transactions_url, data=data,
headers={'Content-Encoding': 'gzip', 'Content-Type': 'application/json'})
assert r.status_code == 400, r.status_code
def test_deflate_error(self):
data = self.get_transaction_payload()
r = requests.post(self.transactions_url, data=data,
headers={'Content-Encoding': 'deflate', 'Content-Type': 'application/json'})
assert r.status_code == 400, r.status_code
class SecureTest(SecureServerBaseTest):
def test_https_ok(self):
transactions = self.get_transaction_payload()
r = requests.post("https://localhost:8200/v1/transactions",
data=transactions,
headers={'Content-Type': 'application/json'},
verify=False)
assert r.status_code == 202, r.status_code
@raises(SSLError)
def test_https_verify(self):
transactions = self.get_transaction_payload()
requests.post("https://localhost:8200/v1/transactions",
data=transactions,
headers={'Content-Type': 'application/json'})
|
#!/bin/sh
IPSET_LANIPLIST="laniplist"
IPSET_VPSIPLIST="vpsiplist"
IPSET_SHUNTLIST="shuntlist"
IPSET_GFW="gfwlist"
IPSET_CHN="chnroute"
IPSET_BLACKLIST="blacklist"
IPSET_BLACKLIST2="blacklist2"
IPSET_BLACKLIST3="blacklist3"
IPSET_WHITELIST="whitelist"
IPSET_LANIPLIST_6="laniplist6"
IPSET_VPSIPLIST_6="vpsiplist6"
IPSET_SHUNTLIST_6="shuntlist6"
IPSET_GFW6="gfwlist6"
IPSET_CHN6="chnroute6"
IPSET_BLACKLIST_6="blacklist_6"
IPSET_BLACKLIST2_6="blacklist2_6"
IPSET_BLACKLIST3_6="blacklist3_6"
IPSET_WHITELIST_6="whitelist_6"
PROXY_IPV6=0
FORCE_INDEX=2
ipt_n="iptables -t nat"
ipt_m="iptables -t mangle"
ip6t_n="ip6tables -t nat"
ip6t_m="ip6tables -t mangle"
FWI=$(uci -q get firewall.passwall.path 2>/dev/null)
factor() {
if [ -z "$1" ] || [ -z "$2" ]; then
echo ""
elif [ "$1" == "1:65535" ]; then
echo ""
else
echo "$2 $1"
fi
}
dst() {
echo "-m set $2 --match-set $1 dst"
}
comment() {
local name=$(echo $1 | sed 's/ /_/g')
echo "-m comment --comment '$name'"
}
RULE_LAST_INDEX() {
[ $# -ge 3 ] || {
echolog "索引列举方式不正确(iptables),终止执行!"
exit 1
}
local ipt_tmp=${1}; shift
local chain=${1}; shift
local list=${1}; shift
local default=${1:-0}; shift
local _index=$($ipt_tmp -n -L $chain --line-numbers 2>/dev/null | grep "$list" | sed -n '$p' | awk '{print $1}')
echo "${_index:-${default}}"
}
REDIRECT() {
local redirect="-j REDIRECT --to-ports $1"
[ "$2" == "TPROXY" ] && redirect="-j TPROXY --tproxy-mark 0x1/0x1 --on-port $1"
[ "$2" == "MARK" ] && redirect="-j MARK --set-mark $1"
echo $redirect
}
get_redirect_ipt() {
case "$1" in
disable)
echo "-j RETURN"
;;
global)
echo "$(REDIRECT $2 $3)"
;;
gfwlist)
echo "$(dst $IPSET_GFW) $(REDIRECT $2 $3)"
;;
chnroute)
echo "$(dst $IPSET_CHN !) $(REDIRECT $2 $3)"
;;
returnhome)
echo "$(dst $IPSET_CHN) $(REDIRECT $2 $3)"
;;
esac
}
get_redirect_ip6t() {
case "$1" in
disable)
echo "-j RETURN"
;;
global)
echo "$(REDIRECT $2 $3)"
;;
gfwlist)
echo "$(dst $IPSET_GFW6) $(REDIRECT $2 $3)"
;;
chnroute)
echo "$(dst $IPSET_CHN6 !) $(REDIRECT $2 $3)"
;;
returnhome)
echo "$(dst $IPSET_CHN6) $(REDIRECT $2 $3)"
;;
esac
}
get_action_chain_name() {
case "$1" in
disable)
echo "不代理"
;;
global)
echo "全局代理"
;;
gfwlist)
echo "防火墙列表"
;;
chnroute)
echo "中国列表以外"
;;
returnhome)
echo "中国列表"
;;
esac
}
gen_laniplist() {
cat <<-EOF
0.0.0.0/8
10.0.0.0/8
100.64.0.0/10
127.0.0.0/8
169.254.0.0/16
172.16.0.0/12
192.168.0.0/16
224.0.0.0/4
240.0.0.0/4
EOF
}
gen_laniplist_6() {
cat <<-EOF
::/128
::1/128
::ffff:0:0/96
::ffff:0:0:0/96
64:ff9b::/96
100::/64
2001::/32
2001:20::/28
2001:db8::/32
2002::/16
fc00::/7
fe80::/10
ff00::/8
EOF
}
load_acl() {
local items=$(get_enabled_anonymous_secs "@acl_rule")
[ -n "$items" ] && {
local item enabled remarks ip mac tcp_proxy_mode udp_proxy_mod
local tcp_node udp_node tcp_no_redir_ports udp_no_redir_ports tcp_redir_ports udp_redir_ports
local TCP_NODE UDP_NODE TCP_NODE_TYPE UDP_NODE_TYPE ipt_tmp is_tproxy tcp_port udp_port msg msg2
echolog "访问控制:"
for item in $items; do
unset ip mac tcp_port udp_port is_tproxy msg
eval $(uci -q show "${CONFIG}.${item}" | cut -d'.' -sf 3-)
[ -z "${ip}${mac}" ] && continue
tcp_proxy_mode=${tcp_proxy_mode:-default}
udp_proxy_mode=${udp_proxy_mode:-default}
tcp_no_redir_ports=${tcp_no_redir_ports:-default}
udp_no_redir_ports=${udp_no_redir_ports:-default}
tcp_redir_ports=${tcp_redir_ports:-default}
udp_redir_ports=${udp_redir_ports:-default}
[ "$tcp_proxy_mode" = "default" ] && tcp_proxy_mode=$TCP_PROXY_MODE
[ "$udp_proxy_mode" = "default" ] && udp_proxy_mode=$UDP_PROXY_MODE
[ "$tcp_no_redir_ports" = "default" ] && tcp_no_redir_ports=$TCP_NO_REDIR_PORTS
[ "$udp_no_redir_ports" = "default" ] && udp_no_redir_ports=$UDP_NO_REDIR_PORTS
[ "$tcp_redir_ports" = "default" ] && tcp_redir_ports=$TCP_REDIR_PORTS
[ "$udp_redir_ports" = "default" ] && udp_redir_ports=$UDP_REDIR_PORTS
#echolog "访问控制:${item}..."
[ -n "$ip" ] && msg="IP:$ip,"
[ -n "$mac" ] && msg="${msg:+${msg}和}MAC:$mac,"
ipt_tmp=$ipt_n
[ "$tcp_proxy_mode" != "disable" ] && {
[ "$TCP_NODE" != "nil" ] && {
tcp_port=$TCP_REDIR_PORT
eval TCP_NODE_TYPE=$(echo $(config_n_get $TCP_NODE type) | tr 'A-Z' 'a-z')
[ "$TCP_NODE_TYPE" == "brook" ] && [ "$(config_n_get $TCP_NODE protocol client)" == "client" ] && is_tproxy=1
#[ "$TCP_NODE_TYPE" == "trojan-go" ] && is_tproxy=1
msg2="${msg}使用TCP节点 [$(get_action_chain_name $tcp_proxy_mode)]"
if [ -n "${is_tproxy}" ]; then
msg2="${msg2}(TPROXY:${tcp_port})代理"
ipt_tmp=$ipt_m && is_tproxy="TPROXY"
else
msg2="${msg2}(REDIRECT:${tcp_port})代理"
fi
[ "$tcp_no_redir_ports" != "disable" ] && {
$ipt_tmp -A PSW $(comment "$remarks") $(factor $ip "-s") $(factor $mac "-m mac --mac-source") -p tcp -m multiport --dport $tcp_no_redir_ports -j RETURN
$ip6t_m -A PSW $(comment "$remarks") $(factor $ip "-s") $(factor $mac "-m mac --mac-source") -p tcp -m multiport --dport $tcp_no_redir_ports -j RETURN
msg2="${msg2}[$?]除${tcp_no_redir_ports}外的"
}
msg2="${msg2}所有端口"
$ipt_tmp -A PSW $(comment "$remarks") -p tcp $(factor $ip "-s") $(factor $mac "-m mac --mac-source") -d 1.2.3.4 $(REDIRECT $tcp_port $is_tproxy)
$ipt_tmp -A PSW $(comment "$remarks") -p tcp $(factor $ip "-s") $(factor $mac "-m mac --mac-source") $(factor $tcp_redir_ports "-m multiport --dport") $(dst $IPSET_SHUNTLIST) $(REDIRECT $tcp_port $is_tproxy)
$ipt_tmp -A PSW $(comment "$remarks") -p tcp $(factor $ip "-s") $(factor $mac "-m mac --mac-source") $(factor $tcp_redir_ports "-m multiport --dport") $(dst $IPSET_BLACKLIST) $(REDIRECT $tcp_port $is_tproxy)
$ipt_tmp -A PSW $(comment "$remarks") -p tcp $(factor $ip "-s") $(factor $mac "-m mac --mac-source") $(factor $tcp_redir_ports "-m multiport --dport") $(get_redirect_ipt $tcp_proxy_mode $tcp_port $is_tproxy)
if [ "$PROXY_IPV6" == "1" ]; then
$ip6t_m -A PSW $(comment "$remarks") -p tcp $(factor $ip "-s") $(factor $mac "-m mac --mac-source") $(factor $tcp_redir_ports "-m multiport --dport") $(dst $IPSET_SHUNTLIST_6) $(REDIRECT $tcp_port TPROXY)
$ip6t_m -A PSW $(comment "$remarks") -p tcp $(factor $ip "-s") $(factor $mac "-m mac --mac-source") $(factor $tcp_redir_ports "-m multiport --dport") $(dst $IPSET_BLACKLIST_6) $(REDIRECT $tcp_port TPROXY)
$ip6t_m -A PSW $(comment "$remarks") -p tcp $(factor $ip "-s") $(factor $mac "-m mac --mac-source") $(factor $tcp_redir_ports "-m multiport --dport") $(get_redirect_ip6t $tcp_proxy_mode $tcp_port TPROXY)
fi
}
echolog " - ${msg2}"
}
$ipt_tmp -A PSW $(comment "$remarks") $(factor $ip "-s") $(factor $mac "-m mac --mac-source") -p tcp -j RETURN
$ip6t_m -A PSW $(comment "$remarks") $(factor $ip "-s") $(factor $mac "-m mac --mac-source") -p tcp -j RETURN
[ "$udp_proxy_mode" != "disable" ] && {
msg2="${msg}使用UDP节点 [$(get_action_chain_name $udp_proxy_mode)]"
[ "$UDP_NODE" != "nil" ] && {
udp_port=$UDP_REDIR_PORT
msg2="${msg2}(TPROXY:${udp_port})代理"
[ "$udp_no_redir_ports" != "disable" ] && {
$ipt_m -A PSW $(comment "$remarks") $(factor $ip "-s") $(factor $mac "-m mac --mac-source") -p udp -m multiport --dport $udp_no_redir_ports -j RETURN
$ip6t_m -A PSW $(comment "$remarks") $(factor $ip "-s") $(factor $mac "-m mac --mac-source") -p udp -m multiport --dport $udp_no_redir_ports -j RETURN
msg2="${msg2}[$?]除${udp_no_redir_ports}外的"
}
msg2="${msg2}所有端口"
$ipt_m -A PSW $(comment "$remarks") -p tcp $(factor $ip "-s") $(factor $mac "-m mac --mac-source") -d 1.2.3.4 $(REDIRECT $udp_port TPROXY)
$ipt_m -A PSW $(comment "$remarks") -p udp $(factor $ip "-s") $(factor $mac "-m mac --mac-source") $(factor $udp_redir_ports "-m multiport --dport") $(dst $IPSET_SHUNTLIST) $(REDIRECT $udp_port TPROXY)
$ipt_m -A PSW $(comment "$remarks") -p udp $(factor $ip "-s") $(factor $mac "-m mac --mac-source") $(factor $udp_redir_ports "-m multiport --dport") $(dst $IPSET_BLACKLIST) $(REDIRECT $udp_port TPROXY)
$ipt_m -A PSW $(comment "$remarks") -p udp $(factor $ip "-s") $(factor $mac "-m mac --mac-source") $(factor $udp_redir_ports "-m multiport --dport") $(get_redirect_ipt $udp_proxy_mode $udp_port TPROXY)
if [ "$PROXY_IPV6" == "1" ]; then
$ip6t_m -A PSW $(comment "$remarks") -p udp $(factor $ip "-s") $(factor $mac "-m mac --mac-source") $(factor $udp_redir_ports "-m multiport --dport") $(dst $IPSET_SHUNTLIST_6) $(REDIRECT $udp_port TPROXY)
$ip6t_m -A PSW $(comment "$remarks") -p udp $(factor $ip "-s") $(factor $mac "-m mac --mac-source") $(factor $udp_redir_ports "-m multiport --dport") $(dst $IPSET_BLACKLIST_6) $(REDIRECT $udp_port TPROXY)
$ip6t_m -A PSW $(comment "$remarks") -p udp $(factor $ip "-s") $(factor $mac "-m mac --mac-source") $(factor $udp_redir_ports "-m multiport --dport") $(get_redirect_ip6t $udp_proxy_mode $udp_port TPROXY)
fi
}
echolog " - ${msg2}"
}
$ipt_m -A PSW $(comment "$remarks") $(factor $ip "-s") $(factor $mac "-m mac --mac-source") -p udp -j RETURN
$ip6t_m -A PSW $(comment "$remarks") $(factor $ip "-s") $(factor $mac "-m mac --mac-source") -p udp -j RETURN
done
}
# 加载TCP默认代理模式
local ipt_tmp=$ipt_n
local is_tproxy msg
if [ "$TCP_PROXY_MODE" != "disable" ]; then
[ "$TCP_NO_REDIR_PORTS" != "disable" ] && {
$ipt_tmp -A PSW $(comment "默认") -p tcp -m multiport --dport $TCP_NO_REDIR_PORTS -j RETURN
$ip6t_m -A PSW $(comment "默认") -p tcp -m multiport --dport $TCP_NO_REDIR_PORTS -j RETURN
}
ipt_tmp=$ipt_n
unset is_tproxy msg
[ "$TCP_NODE" != "nil" ] && {
local TCP_NODE_TYPE=$(echo $(config_n_get $TCP_NODE type) | tr 'A-Z' 'a-z')
[ "$TCP_NODE_TYPE" == "brook" ] && [ "$(config_n_get $TCP_NODE protocol client)" == "client" ] && is_tproxy=1
#[ "$TCP_NODE_TYPE" == "trojan-go" ] && is_tproxy=1
msg="TCP默认代理:使用TCP节点 [$(get_action_chain_name $TCP_PROXY_MODE)]"
if [ -n "$is_tproxy" ]; then
ipt_tmp=$ipt_m && is_tproxy="TPROXY"
msg="${msg}(TPROXY:${TCP_REDIR_PORT})代理"
else
msg="${msg}(REDIRECT:${TCP_REDIR_PORT})代理"
fi
[ "$TCP_NO_REDIR_PORTS" != "disable" ] && msg="${msg}除${TCP_NO_REDIR_PORTS}外的"
msg="${msg}所有端口"
$ipt_tmp -A PSW $(comment "默认") -p tcp -d 1.2.3.4 $(REDIRECT $TCP_REDIR_PORT $is_tproxy)
$ipt_tmp -A PSW $(comment "默认") -p tcp $(factor $TCP_REDIR_PORTS "-m multiport --dport") $(dst $IPSET_SHUNTLIST) $(REDIRECT $TCP_REDIR_PORT $is_tproxy)
$ipt_tmp -A PSW $(comment "默认") -p tcp $(factor $TCP_REDIR_PORTS "-m multiport --dport") $(dst $IPSET_BLACKLIST) $(REDIRECT $TCP_REDIR_PORT $is_tproxy)
$ipt_tmp -A PSW $(comment "默认") -p tcp $(factor $TCP_REDIR_PORTS "-m multiport --dport") $(get_redirect_ipt $TCP_PROXY_MODE $TCP_REDIR_PORT $is_tproxy)
if [ "$PROXY_IPV6" == "1" ]; then
$ip6t_m -A PSW $(comment "默认") -p tcp $(factor $TCP_REDIR_PORTS "-m multiport --dport") $(dst $IPSET_SHUNTLIST_6) $(REDIRECT $TCP_REDIR_PORT TPROXY)
$ip6t_m -A PSW $(comment "默认") -p tcp $(factor $TCP_REDIR_PORTS "-m multiport --dport") $(dst $IPSET_BLACKLIST_6) $(REDIRECT $TCP_REDIR_PORT TPROXY)
$ip6t_m -A PSW $(comment "默认") -p tcp $(factor $TCP_REDIR_PORTS "-m multiport --dport") $(get_redirect_ip6t $TCP_PROXY_MODE $TCP_REDIR_PORT TPROXY)
fi
echolog "${msg}"
}
fi
$ipt_n -A PSW $(comment "默认") -p tcp -j RETURN
$ipt_m -A PSW $(comment "默认") -p tcp -j RETURN
$ip6t_m -A PSW $(comment "默认") -p tcp -j RETURN
# 加载UDP默认代理模式
if [ "$UDP_PROXY_MODE" != "disable" ]; then
[ "$UDP_NO_REDIR_PORTS" != "disable" ] && {
$ipt_m -A PSW $(comment "默认") -p udp -m multiport --dport $UDP_NO_REDIR_PORTS -j RETURN
$ip6t_m -A PSW $(comment "默认") -p udp -m multiport --dport $UDP_NO_REDIR_PORTS -j RETURN
}
[ "$UDP_NODE" != "nil" ] && {
msg="UDP默认代理:使用UDP节点 [$(get_action_chain_name $UDP_PROXY_MODE)](TPROXY:${UDP_REDIR_PORT})代理"
[ "$UDP_NO_REDIR_PORTS" != "disable" ] && msg="${msg}除${UDP_NO_REDIR_PORTS}外的"
msg="${msg}所有端口"
$ipt_m -A PSW $(comment "默认") -p udp -d 1.2.3.4 $(REDIRECT $UDP_REDIR_PORT TPROXY)
$ipt_m -A PSW $(comment "默认") -p udp $(factor $UDP_REDIR_PORTS "-m multiport --dport") $(dst $IPSET_SHUNTLIST) $(REDIRECT $UDP_REDIR_PORT TPROXY)
$ipt_m -A PSW $(comment "默认") -p udp $(factor $UDP_REDIR_PORTS "-m multiport --dport") $(dst $IPSET_BLACKLIST) $(REDIRECT $UDP_REDIR_PORT TPROXY)
$ipt_m -A PSW $(comment "默认") -p udp $(factor $UDP_REDIR_PORTS "-m multiport --dport") $(get_redirect_ipt $UDP_PROXY_MODE $UDP_REDIR_PORT TPROXY)
if [ "$PROXY_IPV6" == "1" ]; then
$ip6t_m -A PSW $(comment "默认") -p udp $(factor $UDP_REDIR_PORTS "-m multiport --dport") $(dst $IPSET_SHUNTLIST_6) $(REDIRECT $UDP_REDIR_PORT TPROXY)
$ip6t_m -A PSW $(comment "默认") -p udp $(factor $UDP_REDIR_PORTS "-m multiport --dport") $(dst $IPSET_BLACKLIST_6) $(REDIRECT $UDP_REDIR_PORT TPROXY)
$ip6t_m -A PSW $(comment "默认") -p udp $(factor $UDP_REDIR_PORTS "-m multiport --dport") $(get_redirect_ip6t $UDP_PROXY_MODE $UDP_REDIR_PORT TPROXY)
fi
echolog "${msg}"
}
fi
$ipt_m -A PSW $(comment "默认") -p udp -j RETURN
$ip6t_m -A PSW $(comment "默认") -p udp -j RETURN
unset is_tproxy msg
}
filter_haproxy() {
uci show $CONFIG | grep "@haproxy_config" | grep "lbss=" | cut -d "'" -f 2 | grep -E "([0-9]{1,3}[\.]){3}[0-9]{1,3}" | awk -F ":" '{print $1}' | sed -e "/^$/d" | sed -e "s/^/add $IPSET_VPSIPLIST &/g" | awk '{print $0} END{print "COMMIT"}' | ipset -! -R
for host in $(uci show $CONFIG | grep "@haproxy_config" | grep "lbss=" | cut -d "'" -f 2 | grep -v -E "([0-9]{1,3}[\.]){3}[0-9]{1,3}" | awk -F ":" '{print $1}'); do
ipset -q add $IPSET_VPSIPLIST $(get_host_ip ipv4 $host 1)
done
echolog "加入负载均衡的节点到ipset[$IPSET_VPSIPLIST]直连完成"
}
filter_vpsip() {
uci show $CONFIG | grep ".address=" | cut -d "'" -f 2 | grep -E "([0-9]{1,3}[\.]){3}[0-9]{1,3}" | sed -e "/^$/d" | sed -e "s/^/add $IPSET_VPSIPLIST &/g" | awk '{print $0} END{print "COMMIT"}' | ipset -! -R
uci show $CONFIG | grep ".address=" | cut -d "'" -f 2 | grep -E "([[a-f0-9]{1,4}(:[a-f0-9]{1,4}){7}|[a-f0-9]{1,4}(:[a-f0-9]{1,4}){0,7}::[a-f0-9]{0,4}(:[a-f0-9]{1,4}){0,7}])" | sed -e "/^$/d" | sed -e "s/^/add $IPSET_VPSIPLIST_6 &/g" | awk '{print $0} END{print "COMMIT"}' | ipset -! -R
echolog "加入所有节点到ipset[$IPSET_VPSIPLIST]直连完成"
}
filter_node() {
local proxy_node=${1}
local stream=$(echo ${2} | tr 'A-Z' 'a-z')
local proxy_port=${3}
filter_rules() {
local node=${1}
local stream=${2}
local _proxy=${3}
local _port=${4}
local is_tproxy ipt_tmp ip6t_tmp msg msg2
if [ -n "$node" ] && [ "$node" != "nil" ]; then
local type=$(echo $(config_n_get $node type) | tr 'A-Z' 'a-z')
local address=$(config_n_get $node address)
local port=$(config_n_get $node port)
ipt_tmp=$ipt_n
ip6t_tmp=$ip6t_m
[ "$stream" == "udp" ] && is_tproxy=1
[ "$type" == "brook" ] && [ "$(config_n_get $node protocol client)" == "client" ] && is_tproxy=1
#[ "$type" == "trojan-go" ] && is_tproxy=1
if [ -n "$is_tproxy" ]; then
ipt_tmp=$ipt_m
ip6t_tmp=$ip6t_m
msg="TPROXY"
else
msg="REDIRECT"
fi
else
echolog " - 节点配置不正常,略过"
return 0
fi
local ADD_INDEX=$FORCE_INDEX
for _ipt in 4 6; do
[ "$_ipt" == "4" ] && _ipt=$ipt_tmp
[ "$_ipt" == "6" ] && _ipt=$ip6t_tmp
$_ipt -n -L PSW_OUTPUT | grep -q "${address}:${port}"
if [ $? -ne 0 ]; then
unset dst_rule
local dst_rule=$(REDIRECT 1 MARK)
msg2="按规则路由(${msg})"
[ "$_ipt" == "$ipt_m" -o "$_ipt" == "$ip6t_m" ] || {
dst_rule=$(REDIRECT $_port)
msg2="套娃使用(${msg}:${port} -> ${_port})"
}
[ -n "$_proxy" ] && [ "$_proxy" == "1" ] && [ -n "$_port" ] || {
ADD_INDEX=$(RULE_LAST_INDEX "$_ipt" PSW_OUT_PUT "$IPSET_VPSIPLIST" $FORCE_INDEX)
dst_rule=" -j RETURN"
msg2="直连代理"
}
$_ipt -I PSW_OUTPUT $ADD_INDEX $(comment "${address}:${port}") -p $stream -d $address --dport $port $dst_rule 2>/dev/null
else
msg2="已配置过的节点,"
fi
done
msg="[$?]$(echo ${2} | tr 'a-z' 'A-Z')${msg2}使用链${ADD_INDEX},节点(${type}):${address}:${port}"
#echolog " - ${msg}"
}
local proxy_protocol=$(config_n_get $proxy_node protocol)
local proxy_type=$(echo $(config_n_get $proxy_node type nil) | tr 'A-Z' 'a-z')
[ "$proxy_type" == "nil" ] && echolog " - 节点配置不正常,略过!:${proxy_node}" && return 0
if [ "$proxy_protocol" == "_balancing" ]; then
#echolog " - 多节点负载均衡(${proxy_type})..."
proxy_node=$(config_n_get $proxy_node balancing_node)
for _node in $proxy_node; do
filter_rules "$_node" "$stream"
done
elif [ "$proxy_protocol" == "_shunt" ]; then
#echolog " - 按请求目的地址分流(${proxy_type})..."
local default_node=$(config_n_get $proxy_node default_node _direct)
local main_node=$(config_n_get $proxy_node main_node nil)
if [ "$main_node" != "nil" ]; then
filter_rules $main_node $stream
else
if [ "$default_node" != "_direct" ] && [ "$default_node" != "_blackhole" ]; then
filter_rules $default_node $stream
fi
fi
:<<!
local default_node_address=$(get_host_ip ipv4 $(config_n_get $default_node address) 1)
local default_node_port=$(config_n_get $default_node port)
local shunt_ids=$(uci show $CONFIG | grep "=shunt_rules" | awk -F '.' '{print $2}' | awk -F '=' '{print $1}')
for shunt_id in $shunt_ids; do
#local shunt_proxy=$(config_n_get $proxy_node "${shunt_id}_proxy" 0)
local shunt_proxy=0
local shunt_node=$(config_n_get $proxy_node "${shunt_id}" nil)
[ "$shunt_node" != "nil" ] && {
[ "$shunt_proxy" == 1 ] && {
local shunt_node_address=$(get_host_ip ipv4 $(config_n_get $shunt_node address) 1)
local shunt_node_port=$(config_n_get $shunt_node port)
[ "$shunt_node_address" == "$default_node_address" ] && [ "$shunt_node_port" == "$default_node_port" ] && {
shunt_proxy=0
}
}
filter_rules "$(config_n_get $proxy_node $shunt_id)" "$stream" "$shunt_proxy" "$proxy_port"
}
done
!
else
#echolog " - 普通节点(${proxy_type})..."
filter_rules "$proxy_node" "$stream"
fi
}
dns_hijack() {
$ipt_n -I PSW -p udp --dport 53 -j REDIRECT --to-ports 53
echolog "强制转发本机DNS端口 UDP/53 的请求[$?]"
}
add_firewall_rule() {
echolog "开始加载防火墙规则..."
ipset -! create $IPSET_LANIPLIST nethash
ipset -! create $IPSET_VPSIPLIST nethash
ipset -! create $IPSET_SHUNTLIST nethash
ipset -! create $IPSET_GFW nethash
ipset -! create $IPSET_CHN nethash
ipset -! create $IPSET_BLACKLIST nethash
ipset -! create $IPSET_BLACKLIST2 nethash
ipset -! create $IPSET_BLACKLIST3 nethash
ipset -! create $IPSET_WHITELIST nethash
ipset -! create $IPSET_LANIPLIST_6 nethash family inet6
ipset -! create $IPSET_VPSIPLIST_6 nethash family inet6
ipset -! create $IPSET_SHUNTLIST_6 nethash family inet6
ipset -! create $IPSET_GFW6 nethash family inet6
ipset -! create $IPSET_CHN6 nethash family inet6
ipset -! create $IPSET_BLACKLIST_6 nethash family inet6
ipset -! create $IPSET_BLACKLIST2_6 nethash family inet6
ipset -! create $IPSET_BLACKLIST3_6 nethash family inet6
ipset -! create $IPSET_WHITELIST_6 nethash family inet6
local shunt_ids=$(uci show $CONFIG | grep "=shunt_rules" | awk -F '.' '{print $2}' | awk -F '=' '{print $1}')
for shunt_id in $shunt_ids; do
config_n_get $shunt_id ip_list | tr -s "\r\n" "\n" | sed -e "/^$/d" | grep -E "(\.((2(5[0-5]|[0-4]\d))|[0-1]?\d{1,2})){3}" | sed -e "s/^/add $IPSET_SHUNTLIST &/g" | awk '{print $0} END{print "COMMIT"}' | ipset -! -R
done
for shunt_id in $shunt_ids; do
config_n_get $shunt_id ip_list | tr -s "\r\n" "\n" | sed -e "/^$/d" | grep -E "([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}" | sed -e "s/^/add $IPSET_SHUNTLIST_6 &/g" | awk '{print $0} END{print "COMMIT"}' | ipset -! -R
done
cat $RULES_PATH/chnroute | sed -e "/^$/d" | sed -e "s/^/add $IPSET_CHN &/g" | awk '{print $0} END{print "COMMIT"}' | ipset -! -R
cat $RULES_PATH/proxy_ip | sed -e "/^$/d" | grep -E "(\.((2(5[0-5]|[0-4]\d))|[0-1]?\d{1,2})){3}" | sed -e "s/^/add $IPSET_BLACKLIST &/g" | awk '{print $0} END{print "COMMIT"}' | ipset -! -R
[ -f "$RULES_PATH/proxy_ip2" ] && cat $RULES_PATH/proxy_ip2 | grep -E "(\.((2(5[0-5]|[0-4]\d))|[0-1]?\d{1,2})){3}" | sed -e "/^$/d" | sed -e "s/^/add $IPSET_BLACKLIST2 &/g" | awk '{print $0} END{print "COMMIT"}' | ipset -! -R
[ -f "$RULES_PATH/proxy_ip3" ] && cat $RULES_PATH/proxy_ip3 | grep -E "(\.((2(5[0-5]|[0-4]\d))|[0-1]?\d{1,2})){3}" | sed -e "/^$/d" | sed -e "s/^/add $IPSET_BLACKLIST3 &/g" | awk '{print $0} END{print "COMMIT"}' | ipset -! -R
cat $RULES_PATH/direct_ip | sed -e "/^$/d" | grep -E "(\.((2(5[0-5]|[0-4]\d))|[0-1]?\d{1,2})){3}" | sed -e "s/^/add $IPSET_WHITELIST &/g" | awk '{print $0} END{print "COMMIT"}' | ipset -! -R
cat $RULES_PATH/chnroute6 | sed -e "/^$/d" | sed -e "s/^/add $IPSET_CHN6 &/g" | awk '{print $0} END{print "COMMIT"}' | ipset -! -R
cat $RULES_PATH/proxy_ip | grep -E "([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}" | sed -e "/^$/d" | sed -e "s/^/add $IPSET_BLACKLIST_6 &/g" | awk '{print $0} END{print "COMMIT"}' | ipset -! -R
[ -f "$RULES_PATH/proxy_ip2" ] && cat $RULES_PATH/proxy_ip2 | grep -E "([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}" | sed -e "/^$/d" | sed -e "s/^/add $IPSET_BLACKLIST2_6 &/g" | awk '{print $0} END{print "COMMIT"}' | ipset -! -R
[ -f "$RULES_PATH/proxy_ip3" ] && cat $RULES_PATH/proxy_ip3 | grep -E "([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}" | sed -e "/^$/d" | sed -e "s/^/add $IPSET_BLACKLIST3_6 &/g" | awk '{print $0} END{print "COMMIT"}' | ipset -! -R
cat $RULES_PATH/direct_ip | grep -E "([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}" | sed -e "/^$/d" | sed -e "s/^/add $IPSET_WHITELIST_6 &/g" | awk '{print $0} END{print "COMMIT"}' | ipset -! -R
ipset -! -R <<-EOF
$(gen_laniplist | sed -e "s/^/add $IPSET_LANIPLIST /")
EOF
[ $? -eq 0 ] || {
echolog "系统不兼容,终止执行!"
return 1
}
ipset -! -R <<-EOF
$(gen_laniplist_6 | sed -e "s/^/add $IPSET_LANIPLIST_6 /")
EOF
# [ $? -eq 0 ] || {
# echolog "系统不兼容IPv6,终止执行!"
# return 1
# }
# 忽略特殊IP段
local lan_ifname lan_ip
lan_ifname=$(uci -q -p /var/state get network.lan.ifname)
[ -n "$lan_ifname" ] && {
lan_ip=$(ip address show $lan_ifname | grep -w "inet" | awk '{print $2}')
lan_ip6=$(ip address show $lan_ifname | grep -w "inet6" | awk '{print $2}')
#echolog "本机IPv4网段互访直连:${lan_ip}"
#echolog "本机IPv6网段互访直连:${lan_ip6}"
[ -n "$lan_ip" ] && ipset -! -R <<-EOF
$(echo $lan_ip | sed -e "s/ /\n/g" | sed -e "s/^/add $IPSET_LANIPLIST /")
EOF
[ -n "$lan_ip6" ] && ipset -! -R <<-EOF
$(echo $lan_ip6 | sed -e "s/ /\n/g" | sed -e "s/^/add $IPSET_LANIPLIST_6 /")
EOF
}
local ISP_DNS=$(cat $RESOLVFILE 2>/dev/null | grep -E -o "[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+" | sort -u | grep -v 0.0.0.0 | grep -v 127.0.0.1)
[ -n "$ISP_DNS" ] && {
#echolog "处理 ISP DNS 例外..."
for ispip in $ISP_DNS; do
ipset -! add $IPSET_WHITELIST $ispip >/dev/null 2>&1 &
#echolog " - 追加到白名单:${ispip}"
done
}
local ISP_DNS6=$(cat $RESOLVFILE 2>/dev/null | grep -E "([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}" | awk -F % '{print $1}' | awk -F " " '{print $2}'| sort -u )
[ -n "$ISP_DNS" ] && {
#echolog "处理 ISP IPv6 DNS 例外..."
for ispip6 in $ISP_DNS; do
ipset -! add $IPSET_WHITELIST_6 $ispip6 >/dev/null 2>&1 &
#echolog " - 追加到白名单:${ispip6}"
done
}
# 过滤所有节点IP
filter_vpsip > /dev/null 2>&1 &
filter_haproxy > /dev/null 2>&1 &
$ipt_n -N PSW
$ipt_n -A PSW $(dst $IPSET_LANIPLIST) -j RETURN
$ipt_n -A PSW $(dst $IPSET_VPSIPLIST) -j RETURN
$ipt_n -A PSW $(dst $IPSET_WHITELIST) -j RETURN
$ipt_n -A PSW -m mark --mark 0xff -j RETURN
$ipt_n -N PSW_OUTPUT
$ipt_n -A PSW_OUTPUT $(dst $IPSET_LANIPLIST) -j RETURN
$ipt_n -A PSW_OUTPUT $(dst $IPSET_VPSIPLIST) -j RETURN
$ipt_n -A PSW_OUTPUT $(dst $IPSET_WHITELIST) -j RETURN
$ipt_n -A PSW_OUTPUT -m mark --mark 0xff -j RETURN
$ipt_m -N PSW
$ipt_m -A PSW $(dst $IPSET_LANIPLIST) -j RETURN
$ipt_m -A PSW $(dst $IPSET_VPSIPLIST) -j RETURN
$ipt_m -A PSW $(dst $IPSET_WHITELIST) -j RETURN
$ipt_m -A PSW -m mark --mark 0xff -j RETURN
$ipt_m -N PSW_OUTPUT
$ipt_m -A PSW_OUTPUT $(dst $IPSET_LANIPLIST) -j RETURN
$ipt_m -A PSW_OUTPUT $(dst $IPSET_VPSIPLIST) -j RETURN
$ipt_m -A PSW_OUTPUT $(dst $IPSET_WHITELIST) -j RETURN
$ipt_m -A PSW_OUTPUT -m mark --mark 0xff -j RETURN
ip rule add fwmark 1 lookup 100
ip route add local 0.0.0.0/0 dev lo table 100
local NODE_TYPE=$(echo $(config_n_get $TCP_NODE type) | tr 'A-Z' 'a-z')
local ipv6_tproxy=$(config_t_get global_other ipv6_tproxy 0)
if [ $NODE_TYPE == "xray" ] && [ $ipv6_tproxy == "1" ]; then
PROXY_IPV6=1
echolog "节点类型:$NODE_TYPE,开启实验性IPv6透明代理(TProxy)..."
else
[ $enble_ipv6=="1" ] && echolog "节点类型:$NODE_TYPE,暂不支持IPv6透明代理(TProxy)..."
fi
#$ip6t_n -N PSW
#$ip6t_n -A PREROUTING -j PSW
#$ip6t_n -N PSW_OUTPUT
#$ip6t_n -A OUTPUT -p tcp -j PSW_OUTPUT
$ip6t_m -N PSW
$ip6t_m -A PSW $(dst $IPSET_LANIPLIST_6) -j RETURN
$ip6t_m -A PSW $(dst $IPSET_VPSIPLIST_6) -j RETURN
$ip6t_m -A PSW $(dst $IPSET_WHITELIST_6) -j RETURN
$ip6t_m -A PSW -m mark --mark 0xff -j RETURN
$ip6t_m -A PREROUTING -j PSW
$ip6t_m -N PSW_OUTPUT
$ip6t_m -A PSW_OUTPUT $(dst $IPSET_LANIPLIST_6) -j RETURN
$ip6t_m -A PSW_OUTPUT $(dst $IPSET_VPSIPLIST_6) -j RETURN
$ip6t_m -A PSW_OUTPUT $(dst $IPSET_WHITELIST_6) -j RETURN
$ip6t_m -A PSW_OUTPUT -m mark --mark 0xff -j RETURN
$ip6t_m -A OUTPUT -j PSW_OUTPUT
ip -6 rule add fwmark 1 table 100
ip -6 route add local ::/0 dev lo table 100
# 加载路由器自身代理 TCP
if [ "$TCP_NODE" != "nil" ]; then
local ipt_tmp=$ipt_n
local dns_l="PSW_OUTPUT"
local dns_r=$(REDIRECT $TCP_REDIR_PORT)
local blist_r=$(REDIRECT $TCP_REDIR_PORT)
local p_r=$(get_redirect_ipt $LOCALHOST_TCP_PROXY_MODE $TCP_REDIR_PORT)
TCP_NODE_TYPE=$(echo $(config_n_get $TCP_NODE type) | tr 'A-Z' 'a-z')
echolog "加载路由器自身 TCP 代理..."
if [ "$TCP_NODE_TYPE" == "brook" ] && [ "$(config_n_get $TCP_NODE protocol client)" == "client" ]; then
echolog " - 启用 TPROXY 模式"
ipt_tmp=$ipt_m
dns_l="PSW"
dns_r="$(REDIRECT $TCP_REDIR_PORT TPROXY)"
blist_r=$(REDIRECT 1 MARK)
p_r=$(get_redirect_ipt $LOCALHOST_TCP_PROXY_MODE 1 MARK)
fi
_proxy_tcp_access() {
[ -n "${2}" ] || return 0
ipset -q test $IPSET_LANIPLIST ${2}
[ $? -eq 0 ] && {
echolog " - 上游 DNS 服务器 ${2} 已在直接访问的列表中,不强制向 TCP 代理转发对该服务器 TCP/${3} 端口的访问"
return 0
}
local ADD_INDEX=$FORCE_INDEX
$ipt_tmp -I $dns_l $ADD_INDEX -p tcp -d ${2} --dport ${3} $dns_r
[ "$ipt_tmp" == "$ipt_m" ] && $ipt_tmp -I PSW_OUTPUT $ADD_INDEX -p tcp -d ${2} --dport ${3} $(REDIRECT 1 MARK)
echolog " - [$?]将上游 DNS 服务器 ${2}:${3} 加入到路由器自身代理的 TCP 转发链${ADD_INDEX}"
}
[ "$use_tcp_node_resolve_dns" == 1 ] && hosts_foreach DNS_FORWARD _proxy_tcp_access 53
$ipt_tmp -A OUTPUT -p tcp -j PSW_OUTPUT
[ "$TCP_NO_REDIR_PORTS" != "disable" ] && {
$ipt_tmp -A PSW_OUTPUT -p tcp -m multiport --dport $TCP_NO_REDIR_PORTS -j RETURN
$ipt_tmp -A PSW_OUTPUT -p tcp -m multiport --sport $TCP_NO_REDIR_PORTS -j RETURN
$ip6t_m -A PSW_OUTPUT -p tcp -m multiport --dport $TCP_NO_REDIR_PORTS -j RETURN
$ip6t_m -A PSW_OUTPUT -p tcp -m multiport --sport $TCP_NO_REDIR_PORTS -j RETURN
echolog " - [$?]不代理TCP 端口:$TCP_NO_REDIR_PORTS"
}
$ipt_tmp -A PSW_OUTPUT -p tcp -d 1.2.3.4 $blist_r
$ipt_tmp -A PSW_OUTPUT -p tcp $(factor $TCP_REDIR_PORTS "-m multiport --dport") $(dst $IPSET_SHUNTLIST) $blist_r
$ipt_tmp -A PSW_OUTPUT -p tcp $(factor $TCP_REDIR_PORTS "-m multiport --dport") $(dst $IPSET_BLACKLIST) $blist_r
$ipt_tmp -A PSW_OUTPUT -p tcp $(factor $TCP_REDIR_PORTS "-m multiport --dport") $p_r
if [ "$PROXY_IPV6" == "1" ]; then
$ip6t_m -A PSW_OUTPUT -p tcp $(factor $TCP_REDIR_PORTS "-m multiport --dport") $(dst $IPSET_SHUNTLIST_6) $(REDIRECT 1 MARK)
$ip6t_m -A PSW_OUTPUT -p tcp $(factor $TCP_REDIR_PORTS "-m multiport --dport") $(dst $IPSET_BLACKLIST_6) $(REDIRECT 1 MARK)
$ip6t_m -A PSW_OUTPUT -p tcp $(factor $TCP_REDIR_PORTS "-m multiport --dport") $(get_redirect_ip6t $LOCALHOST_TCP_PROXY_MODE 1 MARK)
fi
fi
local PR_INDEX=$(RULE_LAST_INDEX "$ipt_n" PREROUTING ADBYBY)
if [ "$PR_INDEX" == "0" ]; then
PR_INDEX=$(RULE_LAST_INDEX "$ipt_n" PREROUTING prerouting_rule)
else
echolog "发现 adbyby 规则链,adbyby 规则优先..."
fi
PR_INDEX=$((PR_INDEX + 1))
$ipt_n -I PREROUTING $PR_INDEX -p tcp -j PSW
echolog "使用链表 PREROUTING 排列索引${PR_INDEX}[$?]"
# if [ "$PROXY_IPV6" == "1" ]; then
# local msg="IPv6 配置不当,无法代理"
# $ip6t_n -A PSW -p tcp $(REDIRECT $TCP_REDIR_PORT)
# $ip6t_n -A PSW_OUTPUT -p tcp $(REDIRECT $TCP_REDIR_PORT)
# msg="${msg},转发 IPv6 TCP 流量到节点[$?]"
# echolog "$msg"
# fi
# 过滤Socks节点
[ "$SOCKS_ENABLED" = "1" ] && {
local ids=$(uci show $CONFIG | grep "=socks" | awk -F '.' '{print $2}' | awk -F '=' '{print $1}')
#echolog "分析 Socks 服务所使用节点..."
local id enabled node port msg num
for id in $ids; do
enabled=$(config_n_get $id enabled 0)
[ "$enabled" == "1" ] || continue
node=$(config_n_get $id node nil)
port=$(config_n_get $id port 0)
msg="Socks 服务 [:${port}]"
if [ "$node" == "nil" ] || [ "$port" == "0" ]; then
msg="${msg} 未配置完全,略过"
elif [ "$(echo $node | grep ^tcp)" ]; then
#eval "node=\${TCP_NODE}"
#msg="${msg} 使用与 TCP 代理自动切换${num} 相同的节点,延后处理"
continue
else
filter_node $node TCP > /dev/null 2>&1 &
filter_node $node UDP > /dev/null 2>&1 &
fi
#echolog " - ${msg}"
done
}
# 处理轮换节点的分流或套娃
local node port stream switch
for stream in TCP UDP; do
eval "node=\${${stream}_NODE}"
eval "port=\${${stream}_REDIR_PORT}"
#echolog "分析 $stream 代理自动切换..."
[ "$node" == "tcp" ] && [ "$stream" == "UDP" ] && {
eval "node=\${TCP_NODE}"
eval "port=\${TCP_REDIR_PORT}"
}
if [ "$node" != "nil" ]; then
filter_node $node $stream $port > /dev/null 2>&1 &
fi
done
# 加载路由器自身代理 UDP
if [ "$UDP_NODE" != "nil" ]; then
echolog "加载路由器自身 UDP 代理..."
local UDP_NODE_TYPE=$(echo $(config_n_get $UDP_NODE type) | tr 'A-Z' 'a-z')
local ADD_INDEX=$FORCE_INDEX
_proxy_udp_access() {
[ -n "${2}" ] || return 0
ipset -q test $IPSET_LANIPLIST ${2}
[ $? == 0 ] && {
echolog " - 上游 DNS 服务器 ${2} 已在直接访问的列表中,不强制向 UDP 代理转发对该服务器 UDP/${3} 端口的访问"
return 0
}
$ipt_m -I PSW $ADD_INDEX -p udp -d ${2} --dport ${3} $(REDIRECT $UDP_REDIR_PORT TPROXY)
$ipt_m -I PSW_OUTPUT $ADD_INDEX -p udp -d ${2} --dport ${3} $(REDIRECT 1 MARK)
echolog " - [$?]将上游 DNS 服务器 ${2}:${3} 加入到路由器自身代理的 UDP 转发链${ADD_INDEX}"
}
[ "$use_udp_node_resolve_dns" == 1 ] && hosts_foreach DNS_FORWARD _proxy_udp_access 53
$ipt_m -A OUTPUT -p udp -j PSW_OUTPUT
[ "$UDP_NO_REDIR_PORTS" != "disable" ] && {
$ipt_m -A PSW_OUTPUT -p udp -m multiport --dport $UDP_NO_REDIR_PORTS -j RETURN
$ipt_m -A PSW_OUTPUT -p udp -m multiport --sport $UDP_NO_REDIR_PORTS -j RETURN
$ip6t_m -A PSW_OUTPUT -p udp -m multiport --dport $UDP_NO_REDIR_PORTS -j RETURN
$ip6t_m -A PSW_OUTPUT -p udp -m multiport --sport $UDP_NO_REDIR_PORTS -j RETURN
echolog " - [$?]不代理 UDP 端口:$UDP_NO_REDIR_PORTS"
}
$ipt_m -A PSW_OUTPUT -p udp -d 1.2.3.4 $(REDIRECT 1 MARK)
$ipt_m -A PSW_OUTPUT -p udp $(factor $UDP_REDIR_PORTS "-m multiport --dport") $(dst $IPSET_SHUNTLIST) $(REDIRECT 1 MARK)
$ipt_m -A PSW_OUTPUT -p udp $(factor $UDP_REDIR_PORTS "-m multiport --dport") $(dst $IPSET_BLACKLIST) $(REDIRECT 1 MARK)
$ipt_m -A PSW_OUTPUT -p udp $(factor $UDP_REDIR_PORTS "-m multiport --dport") $(get_redirect_ipt $LOCALHOST_UDP_PROXY_MODE 1 MARK)
if [ "$PROXY_IPV6" == "1" ]; then
$ip6t_m -A PSW_OUTPUT -p udp $(factor $UDP_REDIR_PORTS "-m multiport --dport") $(dst $IPSET_SHUNTLIST_6) $(REDIRECT 1 MARK)
$ip6t_m -A PSW_OUTPUT -p udp $(factor $UDP_REDIR_PORTS "-m multiport --dport") $(dst $IPSET_BLACKLIST_6) $(REDIRECT 1 MARK)
$ip6t_m -A PSW_OUTPUT -p udp $(factor $UDP_REDIR_PORTS "-m multiport --dport") $(get_redirect_ip6t $LOCALHOST_UDP_PROXY_MODE 1 MARK)
fi
fi
$ipt_m -A PREROUTING -j PSW
# 加载ACLS
load_acl
# dns_hijack "force"
echolog "防火墙规则加载完成!"
}
del_firewall_rule() {
ib_nat_exist=$($ipt_n -nL PREROUTING | grep -c PSW)
if [ ! -z "$ib_nat_exist" ];then
until [ "$ib_nat_exist" = 0 ]
do
$ipt_n -D PREROUTING -p tcp -j PSW 2>/dev/null
$ipt_n -D OUTPUT -p tcp -j PSW_OUTPUT 2>/dev/null
$ipt_m -D PREROUTING -j PSW 2>/dev/null
$ipt_m -D OUTPUT -p tcp -j PSW_OUTPUT 2>/dev/null
$ipt_m -D OUTPUT -p udp -j PSW_OUTPUT 2>/dev/null
#$ip6t_n -D PREROUTING -j PSW 2>/dev/null
#$ip6t_n -D OUTPUT -p tcp -j PSW_OUTPUT 2>/dev/null
$ip6t_m -D PREROUTING -j PSW 2>/dev/null
$ip6t_m -D OUTPUT -j PSW_OUTPUT 2>/dev/null
ib_nat_exist=$(expr $ib_nat_exist - 1)
done
fi
$ipt_n -F PSW 2>/dev/null && $ipt_n -X PSW 2>/dev/null
$ipt_n -F PSW_OUTPUT 2>/dev/null && $ipt_n -X PSW_OUTPUT 2>/dev/null
$ipt_m -F PSW 2>/dev/null && $ipt_m -X PSW 2>/dev/null
$ipt_m -F PSW_OUTPUT 2>/dev/null && $ipt_m -X PSW_OUTPUT 2>/dev/null
#$ip6t_n -F PSW 2>/dev/null && $ip6t_n -X PSW 2>/dev/null
#$ip6t_n -F PSW_OUTPUT 2>/dev/null && $ip6t_n -X PSW_OUTPUT 2>/dev/null
$ip6t_m -F PSW 2>/dev/null && $ip6t_m -X PSW 2>/dev/null
$ip6t_m -F PSW_OUTPUT 2>/dev/null && $ip6t_m -X PSW_OUTPUT 2>/dev/null
ip rule del fwmark 1 lookup 100 2>/dev/null
ip route del local 0.0.0.0/0 dev lo table 100 2>/dev/null
ip -6 rule del fwmark 1 table 100 2>/dev/null
ip -6 route del local ::/0 dev lo table 100 2>/dev/null
ipset -F $IPSET_LANIPLIST >/dev/null 2>&1 && ipset -X $IPSET_LANIPLIST >/dev/null 2>&1 &
ipset -F $IPSET_VPSIPLIST >/dev/null 2>&1 && ipset -X $IPSET_VPSIPLIST >/dev/null 2>&1 &
#ipset -F $IPSET_SHUNTLIST >/dev/null 2>&1 && ipset -X $IPSET_SHUNTLIST >/dev/null 2>&1 &
#ipset -F $IPSET_GFW >/dev/null 2>&1 && ipset -X $IPSET_GFW >/dev/null 2>&1 &
#ipset -F $IPSET_CHN >/dev/null 2>&1 && ipset -X $IPSET_CHN >/dev/null 2>&1 &
#ipset -F $IPSET_BLACKLIST >/dev/null 2>&1 && ipset -X $IPSET_BLACKLIST >/dev/null 2>&1 &
#ipset -F $IPSET_BLACKLIST2 >/dev/null 2>&1 && ipset -X $IPSET_BLACKLIST2 >/dev/null 2>&1 &
#ipset -F $IPSET_BLACKLIST3 >/dev/null 2>&1 && ipset -X $IPSET_BLACKLIST3 >/dev/null 2>&1 &
ipset -F $IPSET_WHITELIST >/dev/null 2>&1 && ipset -X $IPSET_WHITELIST >/dev/null 2>&1 &
ipset -F $IPSET_LANIPLIST_6 >/dev/null 2>&1 && ipset -X $IPSET_LANIPLIST_6 >/dev/null 2>&1 &
ipset -F $IPSET_VPSIPLIST_6 >/dev/null 2>&1 && ipset -X $IPSET_VPSIPLIST_6 >/dev/null 2>&1 &
#ipset -F $IPSET_SHUNTLIST_6 >/dev/null 2>&1 && ipset -X $IPSET_SHUNTLIST_6 >/dev/null 2>&1 &
#ipset -F $IPSET_GFW6 >/dev/null 2>&1 && ipset -X $IPSET_GFW6 >/dev/null 2>&1 &
#ipset -F $IPSET_CHN6 >/dev/null 2>&1 && ipset -X $IPSET_CHN6 >/dev/null 2>&1 &
#ipset -F $IPSET_BLACKLIST_6 >/dev/null 2>&1 && ipset -X $IPSET_BLACKLIST_6 >/dev/null 2>&1 &
#ipset -F $IPSET_BLACKLIST2_6 >/dev/null 2>&1 && ipset -X $IPSET_BLACKLIST2_6 >/dev/null 2>&1 &
#ipset -F $IPSET_BLACKLIST3_6 >/dev/null 2>&1 && ipset -X $IPSET_BLACKLIST3_6 >/dev/null 2>&1 &
ipset -F $IPSET_WHITELIST_6 >/dev/null 2>&1 && ipset -X $IPSET_WHITELIST_6 >/dev/null 2>&1 &
echolog "删除相关防火墙规则完成。"
}
flush_ipset() {
ipset -F $IPSET_LANIPLIST >/dev/null 2>&1 && ipset -X $IPSET_LANIPLIST >/dev/null 2>&1 &
ipset -F $IPSET_VPSIPLIST >/dev/null 2>&1 && ipset -X $IPSET_VPSIPLIST >/dev/null 2>&1 &
ipset -F $IPSET_SHUNTLIST >/dev/null 2>&1 && ipset -X $IPSET_SHUNTLIST >/dev/null 2>&1 &
ipset -F $IPSET_GFW >/dev/null 2>&1 && ipset -X $IPSET_GFW >/dev/null 2>&1 &
ipset -F $IPSET_CHN >/dev/null 2>&1 && ipset -X $IPSET_CHN >/dev/null 2>&1 &
ipset -F $IPSET_BLACKLIST >/dev/null 2>&1 && ipset -X $IPSET_BLACKLIST >/dev/null 2>&1 &
ipset -F $IPSET_BLACKLIST2 >/dev/null 2>&1 && ipset -X $IPSET_BLACKLIST2 >/dev/null 2>&1 &
ipset -F $IPSET_BLACKLIST3 >/dev/null 2>&1 && ipset -X $IPSET_BLACKLIST3 >/dev/null 2>&1 &
ipset -F $IPSET_WHITELIST >/dev/null 2>&1 && ipset -X $IPSET_WHITELIST >/dev/null 2>&1 &
ipset -F $IPSET_LANIPLIST_6 >/dev/null 2>&1 && ipset -X $IPSET_LANIPLIST_6 >/dev/null 2>&1 &
ipset -F $IPSET_VPSIPLIST_6 >/dev/null 2>&1 && ipset -X $IPSET_VPSIPLIST_6 >/dev/null 2>&1 &
ipset -F $IPSET_SHUNTLIST_6 >/dev/null 2>&1 && ipset -X $IPSET_SHUNTLIST_6 >/dev/null 2>&1 &
ipset -F $IPSET_GFW6 >/dev/null 2>&1 && ipset -X $IPSET_GFW6 >/dev/null 2>&1 &
ipset -F $IPSET_CHN6 >/dev/null 2>&1 && ipset -X $IPSET_CHN6 >/dev/null 2>&1 &
ipset -F $IPSET_BLACKLIST_6 >/dev/null 2>&1 && ipset -X $IPSET_BLACKLIST_6 >/dev/null 2>&1 &
ipset -F $IPSET_BLACKLIST2_6 >/dev/null 2>&1 && ipset -X $IPSET_BLACKLIST2_6 >/dev/null 2>&1 &
ipset -F $IPSET_BLACKLIST3_6 >/dev/null 2>&1 && ipset -X $IPSET_BLACKLIST3_6 >/dev/null 2>&1 &
ipset -F $IPSET_WHITELIST_6 >/dev/null 2>&1 && ipset -X $IPSET_WHITELIST_6 >/dev/null 2>&1 &
}
flush_include() {
echo '#!/bin/sh' >$FWI
}
gen_include() {
flush_include
cat <<-EOF >>$FWI
/etc/init.d/passwall reload
EOF
return 0
}
start() {
add_firewall_rule
gen_include
}
stop() {
del_firewall_rule
flush_include
}
case $1 in
flush_ipset)
flush_ipset
;;
stop)
stop
;;
start)
start
;;
*) ;;
esac
|
<gh_stars>0
import {ItemNode} from '../models/node/itemNode'
import {Project} from "../models/node/project";
import {Epic} from "../models/node/epic";
import {Issue} from "../models/node/issue";
import {SubTask} from "../models/node/subTask";
import {Commit} from "../models/commit";
import {ItemNodeType} from "../models/node/itemNodeType";
import {LoggedHours} from "../models/loggedHours";
/**
* @param {!{projects: !Array<number>, issues: !Array<number>, authors1: !Array<string>, authors2: !Array<string>}} data
* @param {!{projects: !Array<!Project>, issues: !Array<!Issue>}} generatedData
* @returns {!ItemNode}
*/
export function selecteItemNodes(data, generatedData) {
/**
* @type {ItemNodeType}
*/
const itemNodeType = ItemNodeType;
/**
* @param allIssues
* @return {!Object<string, !ItemNode>}
*/
function getAllIssue(allIssues) {
const nodeItemsMapper = {};
allIssues.forEach(issueData => {
convertNode(issueData, nodeItemsMapper);
});
return nodeItemsMapper;
}
/**
* @param {!ItemNode} item
*/
function getHighestIssueParent(item){
return item.parentNode ? getHighestIssueParent(item.parentNode) : item;
}
/**
* @param {!Object} rawNode
* @param {!Object<string, !ItemNode>} nodeItemsMapper
*/
function convertNode(rawNode, nodeItemsMapper) {
let itemNode = null;
if (rawNode.type === itemNodeType.ISSUE) {
itemNode = convertIssue(rawNode, nodeItemsMapper);
} else if (rawNode.type === itemNodeType.EPIC) {
itemNode = convertEpic(rawNode, nodeItemsMapper);
}
if (itemNode) {
convertLoggedHours(itemNode, rawNode);
nodeItemsMapper[itemNode.nodeId] = itemNode;
}
return itemNode;
}
/**
* @param {!ItemNode} itemNode
* @param {!Object} rawNode
*/
function convertLoggedHours(itemNode, rawNode){
itemNode.loggedHours = rawNode.loggedHours
.filter(lH => data.authors1.includes(lH.authorName))
.map(lH =>new LoggedHours(lH.hours, lH.authorName));
itemNode.loggedHoursSum = itemNode.loggedHours.reduce((currentSum, loggedHour)=> loggedHour.hours + currentSum,0);
}
/**
* @param {!Object} rawIssue
* @param {!Object<string, !ItemNode>} nodeItemsMapper
* @returns {Issue}
*/
function convertIssue(rawIssue, nodeItemsMapper) {
if (!nodeItemsMapper[rawIssue.nodeId]) {
let issue = new Issue(rawIssue.nodeId);
issue.children = rawIssue.children.map(sT => {
const subTask = new SubTask(sT.nodeId, issue);
nodeItemsMapper[sT.nodeId] = subTask;
convertLoggedHours(subTask, sT);
return subTask;
});
return issue;
}
}
/**
* @param {!Object} rawEpic
* @param {!Object<string, !ItemNode>} nodeItemsMapper
* @returns {Epic}
*/
function convertEpic(rawEpic, nodeItemsMapper) {
if (!nodeItemsMapper[rawEpic.nodeId]) {
let epic = new Epic(rawEpic.nodeId);
epic.children = rawEpic.children.map(c => {
let child = nodeItemsMapper[c.nodeId];
if (!child) {
child = convertNode(c, nodeItemsMapper);
child.parentNode = epic;
}
return child;
});
return epic;
}
}
/**
* @returns {!Array<!Project>}
*/
function getSelectedProjectsWithCommits() {
return generatedData.projects
.filter(project => data.projects.some(id => id === project.nodeId))
.map(p => {
const project = new Project(p.nodeId, p.name);
project.commits = p.commits
.filter(c => data.authors1.includes(c.authorName))
.map(c => {
const commit = new Commit(c.commitId, c.node, project, c.authorName, c.linesAdded, c.linesDeleted);
commit.files = c.files;
commit.build = c.build;
return commit;
});
return project;
});
}
/**
* @param {!Project} project
* @param {!Object<string, !ItemNode>} nodeItemsMapper
*/
function addCommitsToIssues(project, nodeItemsMapper) {
const projectIssues = [];
project.commits.forEach(commit=> {
const issue = nodeItemsMapper[commit.node.nodeId];
if (issue) {
issue.setCommit(commit);
commit.node = issue;
const highestParent = getHighestIssueParent(issue);
if (!projectIssues.includes(highestParent)) {
projectIssues.push(highestParent);
}
}
});
project.children = projectIssues;
}
/**
* @param {!Project} project
* @param {!Object<string, !ItemNode>} nodeItemsMapper
*/
function updateDirectProjectChildrenParent(project, nodeItemsMapper){
project.children.forEach(child => child.parentNode = project);
}
/**
* @param {!ItemNode} itemNode
*/
function updateChildrenSum(itemNode) {
itemNode.children.forEach(updateChildrenSum);
itemNode.commitsIncludingChildren = itemNode.children
.reduce((currentCommits, child) => child.commitsIncludingChildren.concat(currentCommits), itemNode.commits);
itemNode.allFilesTouched = itemNode.commitsIncludingChildren
.reduce((currentFiles, commit) => {
commit.files.forEach(file=> {
if (!currentFiles.includes(file)){
currentFiles.push(file);
}
});
return currentFiles;
}, [])
itemNode.loggedHoursSumWithChilds = itemNode.children
.reduce((currentSum, child)=> child.loggedHoursSumWithChilds + currentSum, itemNode.loggedHoursSum);
itemNode.commitsReferencedIncludeChildren = itemNode.children
.reduce((currentSum, child)=> child.commitsReferencedIncludeChildren + currentSum, itemNode.commitsReferenced);
itemNode.linesAddedIncludeChildren = itemNode.children
.reduce((currentSum, child)=> child.linesAddedIncludeChildren + currentSum, itemNode.linesAdded);
itemNode.linesDeletedIncludeChildren = itemNode.children
.reduce((currentSum, child)=> child.linesDeletedIncludeChildren + currentSum, itemNode.linesDeleted);
if (itemNode.linesAddedIncludeChildren + itemNode.linesDeletedIncludeChildren === 0 || itemNode.loggedHoursSumWithChilds === 0) {
if (itemNode.parentNode) {
itemNode.parentNode.children = itemNode.parentNode.children.filter(c => c !== itemNode);
}
}
}
/**
* @param {!Object<string, !ItemNode>} nodeItemsMapper
*/
function filterApprovedNodeIds(nodeItemsMapper) {
const mapper = {};
data.issues.forEach(issueId => {
mapper[issueId] = nodeItemsMapper[issueId];
let parent = mapper[issueId].parentNode;
while(parent) {
mapper[parent.nodeId] = parent;
parent = parent.nodeId;
}
setApprovedChildrens(mapper, mapper[issueId].children);
});
return mapper;
}
/**
* @param {!Object<string, !ItemNode>} mapper
* @param {!Array<!ItemNode>} itemNodes
*/
function setApprovedChildrens(mapper, itemNodes){
itemNodes.forEach(itemNode => {
setApprovedChildrens(mapper, itemNode.children);
mapper[itemNode.nodeId] = itemNode;
});
}
/**
* @return {!ItemNode}
*/
function extractFilteredNodes() {
const projects = getSelectedProjectsWithCommits();
const nodeItemsMapper = filterApprovedNodeIds(getAllIssue(generatedData.issues));
projects.forEach(project => addCommitsToIssues(project, nodeItemsMapper));
projects.forEach(project => updateDirectProjectChildrenParent(project, nodeItemsMapper));
projects.forEach(project => project.commits = []);
// TODO update node items with sum of childrens.
const root = new ItemNode(-1, null, [], projects);
root.name = "All projects";
root.type = "-";
projects.forEach(p => p.parentNode = root);
updateChildrenSum(root);
return root;
}
return extractFilteredNodes();
}
// logged hours
|
<filename>lib/mochizuki/logging.rb
# frozen_string_literal: true
require 'logger'
module Mochizuki
module Logging
def self.included(base)
base.extend ClassMethods
end
module ClassMethods
def logger
@logger ||= Logger.new($stdout)
end
end
end
end
module Mochizuki
include Logging
end
|
#!/usr/bin/env bash
## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
umask 027
function showUsage() {
echo "Usage: $0 <mode> <name> <max-memory> \"<vmarg-list>\" <app-classname> <app-args>... "
echo " <mode>: fg run as foreground process in current shell"
echo " bg run as background process in new shell"
echo " debug run as foreground process in current shell in debug mode (suspend=n)"
echo " debug-suspend run as foreground process in current shell in debug mode (suspend=y)"
echo " NOTE: for all debug modes environment variable DEBUG_ADDRESS may be set to "
echo " override default debug address of 127.0.0.1:18001"
echo " <name>: application name used for naming console window"
echo " <max-memory>: maximum memory heap size in MB (e.g., 768M or 2G). Use empty \"\" if default"
echo " should be used. This will generally be upto 1/4 of the physical memory available"
echo " to the OS."
echo " <vmarg-list>: pass-thru args (e.g., \"-Xmx512M -Dmyvar=1 -DanotherVar=2\") - use"
echo " empty \"\" if vmargs not needed"
echo " <app-classname>: application classname (e.g., ghidra.GhidraRun )"
echo " <app-args>...: arguments to be passed to the application"
echo " "
echo " Example:"
echo " $0 debug Ghidra 768M \"\" ghidra.GhidraRun"
exit 1
}
VMARG_LIST=
ARGS=()
INDEX=0
WHITESPACE="[[:space:]]"
for AA in "$@"
do
INDEX=$(expr $INDEX + 1)
case "$INDEX" in
1)
MODE=$AA
;;
2)
APPNAME=$AA
;;
3)
MAXMEM=$AA
;;
4)
if [ "$AA" != "" ]; then
VMARG_LIST=$AA
fi
;;
5)
CLASSNAME=$AA
;;
*)
# Preserve quoted arguments
if [[ $AA =~ $WHITESPACE ]]; then
AA="\"$AA\""
fi
ARGS[${#ARGS[@]}]=$AA
;;
esac
done
# Verify that required number of args were provided
if [[ ${INDEX} -lt 5 ]]; then
echo "Incorrect launch usage - missing argument(s)"
showUsage
exit 1
fi
SUPPORT_DIR="${0%/*}"
if [ -f "${SUPPORT_DIR}/launch.properties" ]; then
# Production Environment
INSTALL_DIR="${SUPPORT_DIR}/.."
CPATH="${INSTALL_DIR}/Ghidra/Framework/Utility/lib/Utility.jar"
LS_CPATH="${SUPPORT_DIR}/LaunchSupport.jar"
DEBUG_LOG4J="${SUPPORT_DIR}/debug.log4j.xml"
else
# Development Environment
INSTALL_DIR="${SUPPORT_DIR}/../../../.."
CPATH="${INSTALL_DIR}/Ghidra/Framework/Utility/bin/main"
LS_CPATH="${INSTALL_DIR}/GhidraBuild/LaunchSupport/bin/main"
DEBUG_LOG4J="${INSTALL_DIR}/Ghidra/RuntimeScripts/Common/support/debug.log4j.xml"
if ! [ -d "${LS_CPATH}" ]; then
echo "Ghidra cannot launch in development mode because Eclipse has not compiled its class files."
exit 1
fi
fi
# Make sure some kind of java is on the path. It's required to run the LaunchSupport program.
if ! [ -x "$(command -v java)" ] ; then
echo "Java runtime not found. Please refer to the Ghidra Installation Guide's Troubleshooting section."
exit 1
fi
# Get the JDK that will be used to launch Ghidra
JAVA_HOME="$(java -cp "${LS_CPATH}" LaunchSupport "${INSTALL_DIR}" -jdk_home -save)"
if [ ! $? -eq 0 ]; then
# No JDK has been setup yet. Let the user choose one.
java -cp "${LS_CPATH}" LaunchSupport "${INSTALL_DIR}" -jdk_home -ask
# Now that the user chose one, try again to get the JDK that will be used to launch Ghidra
JAVA_HOME="$(java -cp "${LS_CPATH}" LaunchSupport "${INSTALL_DIR}" -jdk_home -save)"
if [ ! $? -eq 0 ]; then
echo
echo "Failed to find a supported JDK. Please refer to the Ghidra Installation Guide's Troubleshooting section."
exit 1
fi
fi
JAVA_CMD="${JAVA_HOME}/bin/java"
# Get the configurable VM arguments from the launch properties
VMARG_LIST+=" $(java -cp "${LS_CPATH}" LaunchSupport "${INSTALL_DIR}" -vmargs)"
# Add extra macOS VM arguments
if [ "$(uname -s)" = "Darwin" ]; then
VMARG_LIST+=" -Xdock:name=${APPNAME}"
fi
# Set Max Heap Size if specified
if [ "${MAXMEM}" != "" ]; then
VMARG_LIST+=" -Xmx${MAXMEM}"
fi
BACKGROUND=false
if [ "${MODE}" = "debug" ] || [ "${MODE}" = "debug-suspend" ]; then
SUSPEND=n
if [ "${DEBUG_ADDRESS}" = "" ]; then
DEBUG_ADDRESS=127.0.0.1:18001
fi
if [ "${MODE}" = "debug-suspend" ]; then
SUSPEND=y
fi
VMARG_LIST+=" -Dlog4j.configuration=\"${DEBUG_LOG4J}\""
VMARG_LIST+=" -agentlib:jdwp=transport=dt_socket,server=y,suspend=${SUSPEND},address=${DEBUG_ADDRESS}"
elif [ "${MODE}" = "fg" ]; then
:
elif [ "${MODE}" = "bg" ]; then
BACKGROUND=true
else
echo "Incorrect launch usage - invalid launch mode: ${MODE}"
exit 1
fi
if [ "${BACKGROUND}" = true ]; then
eval "\"${JAVA_CMD}\" ${VMARG_LIST} -showversion -cp \"${CPATH}\" ghidra.GhidraLauncher ${CLASSNAME} ${ARGS[@]}" &>/dev/null &
# If our process dies immediately, output something so the user knows to run in debug mode.
# Otherwise they'll never see any error output from background mode.
# Doing a kill -0 sends a no-op signal, which can be used to see if the process is still alive.
PID=$!
sleep 1
if ! kill -0 ${PID} &>/dev/null; then
echo "Exited with error. Run in foreground (fg) mode for more details."
exit 1
fi
exit 0
else
eval "\"${JAVA_CMD}\" ${VMARG_LIST} -showversion -cp \"${CPATH}\" ghidra.GhidraLauncher ${CLASSNAME} ${ARGS[@]}"
exit $?
fi
|
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
BCSYMBOLMAP_DIR="BCSymbolMaps"
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
if [ -d "${source}/${BCSYMBOLMAP_DIR}" ]; then
# Locate and install any .bcsymbolmaps if present, and remove them from the .framework before the framework is copied
find "${source}/${BCSYMBOLMAP_DIR}" -name "*.bcsymbolmap"|while read f; do
echo "Installing $f"
install_bcsymbolmap "$f" "$destination"
rm "$f"
done
rmdir "${source}/${BCSYMBOLMAP_DIR}"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
elif [ -L "${binary}" ]; then
echo "Destination binary is symlinked..."
dirname="$(dirname "${binary}")"
binary="${dirname}/$(readlink "${binary}")"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u)
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
warn_missing_arch=${2:-true}
if [ -r "$source" ]; then
# Copy the dSYM into the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .dSYM "$source")"
binary_name="$(ls "$source/Contents/Resources/DWARF")"
binary="${DERIVED_FILES_DIR}/${basename}.dSYM/Contents/Resources/DWARF/${binary_name}"
# Strip invalid architectures from the dSYM.
if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then
strip_invalid_archs "$binary" "$warn_missing_arch"
fi
if [[ $STRIP_BINARY_RETVAL == 0 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
mkdir -p "${DWARF_DSYM_FOLDER_PATH}"
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.dSYM"
fi
fi
}
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
warn_missing_arch=${2:-true}
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
if [[ "$warn_missing_arch" == "true" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
fi
STRIP_BINARY_RETVAL=1
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary"
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=0
}
# Copies the bcsymbolmap files of a vendored framework
install_bcsymbolmap() {
local bcsymbolmap_path="$1"
local destination="${BUILT_PRODUCTS_DIR}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}"
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identity
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/Yoga/yoga.framework"
install_framework "${BUILT_PRODUCTS_DIR}/YogaKit/YogaKit.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/Yoga/yoga.framework"
install_framework "${BUILT_PRODUCTS_DIR}/YogaKit/YogaKit.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
#!/usr/bin/env bash
fonts_dir="${HOME}/.local/share/fonts"
if [ ! -d "${fonts_dir}" ]; then
echo "mkdir -p $fonts_dir"
mkdir -p "${fonts_dir}"
else
echo "Found fonts dir $fonts_dir"
fi
for type in Bold Light Medium Regular Retina; do
file_path="${HOME}/.local/share/fonts/FiraCode-${type}.ttf"
file_url="https://github.com/tonsky/FiraCode/blob/master/distr/ttf/FiraCode-${type}.ttf?raw=true"
if [ ! -e "${file_path}" ]; then
echo "wget -O $file_path $file_url"
wget -O "${file_path}" "${file_url}"
else
echo "Found existing file $file_path"
fi;
done
echo "fc-cache -f"
fc-cache -f
|
<reponame>Crazywolfovo/ioDemo
package com.ioDemo.nio.NIOWebServer.connector;
public class ConnectorUtils {
public static final String PROTOCOL = "HTTP/1.1";
public static final String CARRIAGE = "\r";
public static final String NEWLINE = "\n";
public static final String SPACE = " ";
public static String renderStatus(HttpStatus status) {
return PROTOCOL +
SPACE +
status.getStatus() +
SPACE +
status.getMsg() +
CARRIAGE + NEWLINE +
CARRIAGE + NEWLINE;
}
}
|
<gh_stars>0
package com.mrh0.createaddition.blocks.chunkloader;
/*
import java.util.List;
import com.simibubi.create.foundation.tileEntity.SmartTileEntity;
import com.simibubi.create.foundation.tileEntity.TileEntityBehaviour;
import net.minecraft.world.level.block.entity.BlockEntityType;
public class ChunkLoaderTileEntity extends SmartTileEntity {
public ChunkLoaderTileEntity(BlockEntityType<?> tileEntityTypeIn) {
super(tileEntityTypeIn);
}
@Override
public void addBehaviours(List<TileEntityBehaviour> behaviours) {
}
}
*/ |
const sortByKey = (arr, key) =>
arr.sort((a, b) => {
if (a[key] < b[key]) return -1;
if (a[key] > b[key]) return 1;
return 0;
}); |
#!/usr/bin/env bash
#source venv/bin/activate
while true; do
flask deploy
if [[ "$?" == "0" ]]; then
break
fi
echo Deploy command failed, retrying in 5 secs...
sleep 5
done
exec gunicorn -b :5000 --access-logfile - --error-logfile - flasky:app
|
bash clean.sh
git submodule init
git submodule update
|
#!/bin/bash
# after installing xsettingsd or gnome-settings-daemon on kde
# gtk applications may have their buttons missing
gsettings set org.gnome.desktop.wm.preferences button-layout ':minimize,maximize,close'
|
<reponame>quintel/etengine
class Inspect::InputsController < Inspect::BaseController
layout 'application'
before_action :find_input, :only => [:show]
def index
@inputs = Input.all.sort_by(&:key)
end
def show
end
private
def find_input
@input = Input.get(params[:id]) || render_not_found('input')
end
end
|
//#####################################################################
// Copyright 2002-2007, <NAME>, <NAME>, <NAME>, <NAME>.
// This file is part of PhysBAM whose distribution is governed by the license contained in the accompanying file PHYSBAM_COPYRIGHT.txt.
//#####################################################################
// Class CONSERVATION_ENO_RF
//#####################################################################
#include <PhysBAM_Tools/Grids_Uniform_Advection/ADVECTION_SEPARABLE_UNIFORM.h>
#include <PhysBAM_Tools/Grids_Uniform_Arrays/ARRAYS_ND.h>
#include <PhysBAM_Fluids/PhysBAM_Compressible/Conservation_Law_Solvers/CONSERVATION_ENO_RF.h>
#include <PhysBAM_Fluids/PhysBAM_Compressible/Conservation_Law_Solvers/EIGENSYSTEM.h>
#include <PhysBAM_Fluids/PhysBAM_Compressible/Conservation_Law_Solvers/CONSERVATION.h>
using namespace PhysBAM;
//#####################################################################
// Function Conservation_Solver
//#####################################################################
// psi is size (1,m) - U is size 3 by (-2,m+3) with 3 ghost cells - Fx is size 3 by (1,m)
template<class T_GRID,int d> void CONSERVATION_ENO_RF<T_GRID,d>::
Conservation_Solver(const int m,const T dx,const ARRAY<bool,VECTOR<int,1> >& psi,const ARRAY<TV_DIMENSION,VECTOR<int,1> >& U,ARRAY<TV_DIMENSION,VECTOR<int,1> >& Fx,EIGENSYSTEM<T,TV_DIMENSION>& eigensystem,EIGENSYSTEM<T,TV_DIMENSION>& eigensystem_explicit,
const VECTOR<bool,2>& outflow_boundaries,ARRAY<TV_DIMENSION,VECTOR<int,1> >* U_flux)
{
switch(order){
case 1:Conservation_Solver_Helper<1>(m,dx,psi,U,Fx,eigensystem,eigensystem_explicit,outflow_boundaries);break;
case 2:Conservation_Solver_Helper<2>(m,dx,psi,U,Fx,eigensystem,eigensystem_explicit,outflow_boundaries);break;
case 3:Conservation_Solver_Helper<3>(m,dx,psi,U,Fx,eigensystem,eigensystem_explicit,outflow_boundaries);break;
default:PHYSBAM_FATAL_ERROR();}
}
//#####################################################################
// Function Conservation_Solver_Helper
//#####################################################################
template<class T_GRID,int d> template<int eno_order> void CONSERVATION_ENO_RF<T_GRID,d>::
Conservation_Solver_Helper(const int m,const T dx,const ARRAY<bool,VECTOR<int,1> >& psi,const ARRAY<TV_DIMENSION,VECTOR<int,1> >& U,ARRAY<TV_DIMENSION,VECTOR<int,1> >& Fx,EIGENSYSTEM<T,TV_DIMENSION>& eigensystem,EIGENSYSTEM<T,TV_DIMENSION>& eigensystem_explicit,
const VECTOR<bool,2>& outflow_boundaries)
{
int k,i,j;
// divided differences
ARRAY<VECTOR<T,eno_order> ,VECTOR<int,2> > DU(1,d,-2,m+3),DF(1,d,-2,m+3);
ARRAY<TV_DIMENSION,VECTOR<int,1> > F(-2,m+3);eigensystem.Flux(m,U,F);
for(i=-2;i<=m+3;i++) for(k=1;k<=d;k++){DU(k,i)(1)=U(i)(k);DF(k,i)(1)=F(i)(k);}
for(j=2;j<=eno_order;j++) for(k=1;k<=d;k++) for(i=-2;i<=m+4-j;i++){DU(k,i)(j)=(DU(k,i+1)(j-1)-DU(k,i)(j-1))/(j*dx);DF(k,i)(j)=(DF(k,i+1)(j-1)-DF(k,i)(j-1))/(j*dx);}
// calculate the fluxes
ARRAY<bool,VECTOR<int,1> > psi_ghost(0,m+1);ARRAY<bool,VECTOR<int,1> >::Put(psi,psi_ghost); // ghost points for the if statement below
ARRAY<TV_DIMENSION,VECTOR<int,1> > flux(0,m); // fluxes to the right of each point
ARRAY<T,VECTOR<int,1> > lambda(1,d),lambda_left(1,d),lambda_right(1,d);
MATRIX<T,d,d> L,R;
ARRAY<VECTOR<T,eno_order> ,VECTOR<int,2> > LDU(1,d,-2,m+3),LDF(1,d,-2,m+3);
ARRAY<VECTOR<T,eno_order> ,VECTOR<int,2> > *Dstate_ptr,*Dflux_ptr;
for(i=0;i<=m;i++) if(psi_ghost(i) || psi_ghost(i+1)){ // compute flux
// eigensystem
eigensystem.Eigenvalues(U,i,lambda,lambda_left,lambda_right);
if(!eigensystem.All_Eigenvalues_Same()){
eigensystem.Eigenvectors(U,i,L,R);
// transfer the divided differences into the characteristic fields
for(j=1;j<=eno_order;j++) for(int ii=i+1-j;ii<=i+1;ii++) if(ii >= -2 && ii <= m+4-j) for(k=1;k<=d;k++){
LDU(k,ii)(j)=LDF(k,ii)(j)=0;
for(int kk=1;kk<=d;kk++){LDU(k,ii)(j)+=L(k,kk)*DU(kk,ii)(j);LDF(k,ii)(j)+=L(k,kk)*DF(kk,ii)(j);}}
Dstate_ptr=&LDU;Dflux_ptr=&LDF;}
else{Dstate_ptr=&DU;Dflux_ptr=&DF;}
ARRAY<VECTOR<T,eno_order> ,VECTOR<int,2> > &Dstate=*Dstate_ptr,&Dflux=*Dflux_ptr;
// find a flux in each characteristic field
T flux_total;
if(eno_order == 1) for(k=1;k<=d;k++){
if(lambda_left(k)*lambda_right(k) > 0)
if(lambda(k) > 0) flux_total=Dflux(k,i)(1);
else flux_total=Dflux(k,i+1)(1);
else{
T alpha=Alpha(lambda_left,lambda_right,k,d);
T flux_left=Dflux(k,i)(1)+alpha*Dstate(k,i)(1);
T flux_right=Dflux(k,i+1)(1)-alpha*Dstate(k,i+1)(1);
flux_total=(T).5*(flux_left+flux_right);}
if(!eigensystem.All_Eigenvalues_Same()) for(int kk=1;kk<=d;kk++) flux(i)(kk)+=flux_total*R(k,kk);
else flux(i)(k)=flux_total;}
else if(eno_order == 2) for(k=1;k<=d;k++){
if(lambda_left(k)*lambda_right(k) > 0)
if(lambda(k) > 0) flux_total=ADVECTION_SEPARABLE_UNIFORM<GRID<TV>,T>::ENO(dx,Dflux(k,i)(1),Dflux(k,i-1)(2),Dflux(k,i)(2));
else flux_total=ADVECTION_SEPARABLE_UNIFORM<GRID<TV>,T>::ENO(dx,Dflux(k,i+1)(1),-Dflux(k,i+1)(2),-Dflux(k,i)(2));
else{
T alpha=Alpha(lambda_left,lambda_right,k,d);
T flux_left=ADVECTION_SEPARABLE_UNIFORM<GRID<TV>,T>::ENO(dx,Dflux(k,i)(1)+alpha*Dstate(k,i)(1),Dflux(k,i-1)(2)+alpha*Dstate(k,i-1)(2),Dflux(k,i)(2)+alpha*Dstate(k,i)(2));
T flux_right=ADVECTION_SEPARABLE_UNIFORM<GRID<TV>,T>::ENO(dx,Dflux(k,i+1)(1)-alpha*Dstate(k,i+1)(1),-(Dflux(k,i+1)(2)-alpha*Dstate(k,i+1)(2)),-(Dflux(k,i)(2)-alpha*Dstate(k,i)(2)));
flux_total=(T).5*(flux_left+flux_right);}
if(!eigensystem.All_Eigenvalues_Same()) for(int kk=1;kk<=d;kk++) flux(i)(kk)+=flux_total*R(k,kk);
else flux(i)(k)=flux_total;}
else if(eno_order == 3) for(k=1;k<=d;k++){
if(lambda_left(k)*lambda_right(k) > 0)
if(lambda(k) > 0) flux_total=ADVECTION_SEPARABLE_UNIFORM<GRID<TV>,T>::ENO(dx,Dflux(k,i)(1),Dflux(k,i-1)(2),Dflux(k,i)(2),Dflux(k,i-2)(3),Dflux(k,i-1)(3),Dflux(k,i)(3));
else flux_total=ADVECTION_SEPARABLE_UNIFORM<GRID<TV>,T>::ENO(dx,Dflux(k,i+1)(1),-Dflux(k,i+1)(2),-Dflux(k,i)(2),Dflux(k,i+1)(3),Dflux(k,i)(3),Dflux(k,i-1)(3));
else{
T alpha=Alpha(lambda_left,lambda_right,k,d);
T flux_left=ADVECTION_SEPARABLE_UNIFORM<GRID<TV>,T>::ENO(dx,Dflux(k,i)(1)+alpha*Dstate(k,i)(1),Dflux(k,i-1)(2)+alpha*Dstate(k,i-1)(2),Dflux(k,i)(2)+alpha*Dstate(k,i)(2),
Dflux(k,i-2)(3)+alpha*Dstate(k,i-2)(3),Dflux(k,i-1)(3)+alpha*Dstate(k,i-1)(3),Dflux(k,i)(3)+alpha*Dstate(k,i)(3));
T flux_right=ADVECTION_SEPARABLE_UNIFORM<GRID<TV>,T>::ENO(dx,Dflux(k,i+1)(1)-alpha*Dstate(k,i+1)(1),-(Dflux(k,i+1)(2)-alpha*Dstate(k,i+1)(2)),-(Dflux(k,i)(2)-alpha*Dstate(k,i)(2)),
Dflux(k,i+1)(3)-alpha*Dstate(k,i+1)(3),Dflux(k,i)(3)-alpha*Dstate(k,i)(3),Dflux(k,i-1)(3)-alpha*Dstate(k,i-1)(3));
flux_total=(T).5*(flux_left+flux_right);}
if(!eigensystem.All_Eigenvalues_Same()) for(int kk=1;kk<=d;kk++) flux(i)(kk)+=flux_total*R(k,kk);
else flux(i)(k)=flux_total;}}
// difference the fluxes
T one_over_dx=1/dx;
for(i=1;i<=m;i++) if(psi_ghost(i)) Fx(i)=(flux(i)-flux(i-1))*one_over_dx;
if(save_fluxes) for(i=0;i<=m;i++) if(psi_ghost(i) || psi_ghost(i+1)) flux_temp(i)=flux(i);
}
//#####################################################################
#define INSTANTIATION_HELPER(T_GRID) \
template class CONSERVATION_ENO_RF<T_GRID,1>; \
template class CONSERVATION_ENO_RF<T_GRID,2>; \
template class CONSERVATION_ENO_RF<T_GRID,3>; \
template class CONSERVATION_ENO_RF<T_GRID,4>; \
template class CONSERVATION_ENO_RF<T_GRID,5>; \
template class CONSERVATION_ENO_RF<T_GRID,6>;
#define P(...) __VA_ARGS__
INSTANTIATION_HELPER(P(GRID<VECTOR<float,1> >))
INSTANTIATION_HELPER(P(GRID<VECTOR<float,2> >))
INSTANTIATION_HELPER(P(GRID<VECTOR<float,3> >))
#ifndef COMPILE_WITHOUT_DOUBLE_SUPPORT
INSTANTIATION_HELPER(P(GRID<VECTOR<double,1> >))
INSTANTIATION_HELPER(P(GRID<VECTOR<double,2> >))
INSTANTIATION_HELPER(P(GRID<VECTOR<double,3> >))
#endif
|
export const useSelector = jest.fn();
export const useDispatch = jest.fn();
|
<gh_stars>0
/**
* Classe abstrata AbstractSubject - escreva a descrição da classe aqui
*
* @author (<NAME>)
* @version (2018-05-14)
*/
public abstract class AbstractProduto
{
private String nome;
private int valor;
public void setNome(String nome){
this.nome = nome;
}
public String getNome(){
return this.nome;
}
public void setValor(int valor){
this.valor = valor;
this.notificar();
}
public int getValor(){
return this.valor;
}
abstract void adicionar(AbstractCliente cliente);
abstract void remover(AbstractCliente cliente);
abstract void notificar();
}
|
<gh_stars>0
package fuzz
import (
"context"
"io"
"sync"
"time"
api "github.com/osrg/gobgp/v3/api"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type Dumper struct {
worker
}
var _ Worker = (*Dumper)(nil)
func NewDumper(ctx context.Context, client api.GobgpApiClient,
concurrent int, logger *zap.SugaredLogger, name string, wg *sync.WaitGroup) *Dumper {
return &Dumper{
worker{
cli: client,
workers: concurrent,
ctx: ctx,
wg: wg,
interval: 100 * time.Millisecond,
log: logger,
Name: name,
},
}
}
func (d *Dumper) Once() {
ctx, cancel := context.WithTimeout(d.ctx, 10*time.Second)
defer cancel()
stream, err := d.cli.ListPath(ctx, &api.ListPathRequest{
TableType: api.TableType_GLOBAL,
Family: &api.Family{
Afi: api.Family_AFI_IP,
Safi: api.Family_SAFI_UNICAST,
},
Name: "",
Prefixes: nil,
SortType: api.ListPathRequest_PREFIX,
})
if err != nil {
st, ok := status.FromError(err)
if !ok {
d.log.Errorf("failed to list rib: %v\n", err)
return
}
if st.Code() == codes.Canceled {
d.log.Debugf("grpc stub %s", st.Code())
return
}
}
for {
_, err := stream.Recv()
if err == io.EOF {
break
} else if err != nil {
if st, ok := status.FromError(err); ok {
d.log.Debugf("expected grpc return: %s", st.Code())
}
break
}
}
}
func (d *Dumper) Loop() {
for i := 0; i < d.workers; i++ {
d.wg.Add(1)
go func(id int) {
defer d.wg.Done()
ticker := time.NewTicker(d.interval)
for {
select {
case <-ticker.C:
d.Once()
case <-d.ctx.Done():
d.log.Infof("context done returned from %s-%d", d.Name, id)
return
}
}
}(i)
}
}
|
/**
* Copyright (c) 2012-2013 <NAME>
*
* This file is part of css.java.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.jknack.css.expression;
import static org.apache.commons.lang3.Validate.notNull;
public class NumberExpression extends AbstractExpression {
public enum Unit {
NONE(""),
PERCENTAGE("%"),
EM("em"),
EX("ex"),
CH("ch"),
REM("rem"),
VW("vw"),
VH("vh"),
V_MIN("vmin"),
V_MAX("vmax"),
CM("cm"),
MM("mm"),
IN("in"),
PX("px"),
PT("pt"),
PC("pc"),
DEG("deg"),
RAD("rad"),
GRAD("grad"),
TURN("turn"),
SECONDS("s"),
MILLIS("ms"),
DPI("dpi"),
DPCM("dpcm"),
DPPX("dppx"),
HZ("hz"),
KHZ("khz");
private String unit;
private Unit(final String unit) {
this.unit = notNull(unit, "The unit is required.");
}
public String unit() {
return unit;
}
public static Unit of(final String sunit) {
for (Unit unit : values()) {
if (unit.unit.equalsIgnoreCase(sunit)) {
return unit;
}
}
return NONE;
}
@Override
public String toString() {
return unit;
}
}
private Unit unit;
private Number number;
public NumberExpression(final String value) {
String[] parts = split(value);
try {
number = Integer.parseInt(parts[0]);
} catch (NumberFormatException ex) {
number = Double.parseDouble(parts[0]);
}
this.unit = Unit.of(parts[1]);
}
private static String[] split(final String value) {
for (int i = 0; i < value.length(); i++) {
char ch = value.charAt(i);
if (!Character.isDigit(ch) && ch != '.') {
return new String[]{value.substring(0, i), value.substring(i) };
}
}
return new String[]{value, null };
}
public Number number() {
return number;
}
public Unit unit() {
return unit;
}
@Override
public String toString() {
StringBuilder buffer = new StringBuilder();
buffer.append(number);
buffer.append(unit);
return buffer.toString();
}
}
|
var http = require('http');
var express = require('express');
var path = require('path');
var datalayer = require('./server/datalayer');
var bookkeeping = require('./server/bookkeeping');
var port = process.env.PORT || 8081;
var dataService = datalayer('mongodb://127.0.0.1:27017/bookkeeping-test');
var app = bookkeeping(dataService);
app.use(express.static(__dirname + '/' + process.env.STATIC_DIR));
http.createServer(app).listen(port, '0.0.0.0', function() {
console.log('Express server listening on port ' + port + ' serving static content from ' + process.env.STATIC_DIR);
});
|
awk -F '|' '{for(i=1;i<=NF;i++) s+=$i; print s; s=0}' c.txt |
def calculateAverageScore(scores):
if len(scores) < 4:
return round(sum(scores) / len(scores))
scores.sort()
return round(sum(scores[1:-1]) / (len(scores) - 2)) |
public static <T extends Comparable<T>> T maxNumber(T n1, T n2)
{
if (n1.compareTo(n2) > 0)
return n1;
return n2;
} |
import moduleList from '../../../utils/module.js'
/**
*
* @param {Array} groupIds 所属分组
* @param {Number} num 所属层级
*/
export function getGroup(progroupIds, num = 3) {
const groupIds = progroupIds ? progroupIds.split(',').filter(item => item != '') : []
let groupArray = []
// 获得三级分组的合集
moduleList.data.forEach(element => {
if (element.children.length > 0) {
groupArray = groupArray.concat(element.children)
}
})
let filterGroupIds = []
switch (num) {
case 2:
const arr = moduleList.data.filter(item => groupIds.indexOf(item.id.toString()) !== -1)
console.log(arr)
arr.forEach(element => {
filterGroupIds.push({ id: element.id, label: element.label })
})
break
case 3:
filterGroupIds = groupArray.filter(item => groupIds.indexOf(item.id.toString()) !== -1)
break
}
return filterGroupIds
}
/**
* 获得二级分组的所有三级分组id
* @param {*} groupId
*/
export function getGroupChild (groupId) {
const _idx = moduleList.data.findIndex(item => item.id === Number(groupId))
let filterGroupIds = moduleList.data[_idx].children
return filterGroupIds
}
|
<gh_stars>0
import { keyframes } from 'styled-components';
export interface AnimateProps {
animate?: boolean;
}
export const slideInFromLeft = keyframes`
0% {
transform: translateX(-30vw);
visibility: hidden;
opacity: 0;
}
20% {
visibility: hidden;
opacity: 0;
}
100% {
visibility: visible;
opacity: 1;
transform: translateX(0vw);
}
`;
|
<reponame>zhaosiwen1949/malagu<gh_stars>0
import * as program from 'commander';
import { HookExecutor } from '../hook/hook-executor';
program
.name('malagu deploy')
.usage('[options]')
.description('deploy a applicaton')
.parse(process.argv);
(async () => {
const hookExecutor = new HookExecutor();
await hookExecutor.executeDeployHooks();
})();
|
import { NgModule } from '@angular/core';
import { CommonModule } from '@angular/common';
import { MatButtonModule } from '@angular/material/button';
import { MatExpansionModule } from '@angular/material/expansion';
import { MatFormFieldModule } from '@angular/material/form-field';
import { MatIconModule } from '@angular/material/icon';
import { MatInputModule } from '@angular/material/input';
import { MatOptionModule } from '@angular/material/core';
import { MatProgressSpinnerModule } from '@angular/material/progress-spinner';
import { MatRadioModule } from '@angular/material/radio'
import { MatSelectModule } from '@angular/material/select';
import { ReactiveFormsModule, FormsModule } from '@angular/forms';
import { StoreModule } from '@ngrx/store';
import { EffectsModule } from '@ngrx/effects';
import { SidePanelComponent } from './side-panel.component';
import { SidePanelStoreEffects } from './store/effects';
import * as fromSidePanel from './store/reducers';
@NgModule({
declarations: [
SidePanelComponent
],
imports: [
CommonModule,
FormsModule,
MatButtonModule,
MatExpansionModule,
MatFormFieldModule,
MatIconModule,
MatInputModule,
MatOptionModule,
MatProgressSpinnerModule,
MatRadioModule,
MatSelectModule,
ReactiveFormsModule,
StoreModule.forFeature('sidePanel', fromSidePanel.reducer),
EffectsModule.forFeature([SidePanelStoreEffects])
],
exports: [
SidePanelComponent
]
})
export class SidePanelModule { }
|
// 10539. 수빈이와 수열
// 2019.05.22
// 구현
#include<iostream>
#include<vector>
using namespace std;
int main()
{
int n;
cin >> n;
vector<int> v(n);
for (int i = 0; i < n; i++)
{
cin >> v[i];
}
vector<int> ans;
ans.push_back(v[0]);
int sum = v[0];
// 문제에서 주어진대로 수열의 값을 구한다.
for (int i = 1; i < n; i++)
{
int tmp = v[i] * (i + 1);
ans.push_back(tmp - sum);
sum += (tmp - sum);
}
for (int i = 0; i < n; i++)
{
cout << ans[i] << " ";
}
cout << endl;
return 0;
}
|
#!/usr/bin/env bash
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
# If we fail for any reason a message will be displayed
die() {
msg="$*"
echo "ERROR: $msg" >&2
exit 1
}
# Install the yq yaml query package from the mikefarah github repo
# Install via binary download, as we may not have golang installed at this point
function install_yq() {
GOPATH=${GOPATH:-${HOME}/go}
local yq_path="${GOPATH}/bin/yq"
local yq_pkg="github.com/mikefarah/yq"
[ -x "${GOPATH}/bin/yq" ] && return
read -r -a sysInfo <<< "$(uname -sm)"
case "${sysInfo[0]}" in
"Linux" | "Darwin")
goos="${sysInfo[0],}"
;;
"*")
die "OS ${sysInfo[0]} not supported"
;;
esac
case "${sysInfo[1]}" in
"aarch64")
goarch=arm64
;;
"ppc64le")
goarch=ppc64le
;;
"x86_64")
goarch=amd64
;;
"s390x")
goarch=s390x
;;
"*")
die "Arch ${sysInfo[1]} not supported"
;;
esac
mkdir -p "${GOPATH}/bin"
# Check curl
if ! command -v "curl" >/dev/null; then
die "Please install curl"
fi
# Workaround to get latest release from github (to not use github token).
# Get the redirection to latest release on github.
yq_latest_url=$(curl -Ls -o /dev/null -w %{url_effective} "https://${yq_pkg}/releases/latest")
# The redirected url should include the latest release version
# https://github.com/mikefarah/yq/releases/tag/<VERSION-HERE>
yq_version=$(basename "${yq_latest_url}")
local yq_url="https://${yq_pkg}/releases/download/${yq_version}/yq_${goos}_${goarch}"
curl -o "${yq_path}" -LSs ${yq_url}
chmod +x ${yq_path}
if ! command -v "${yq_path}" >/dev/null; then
die "Cannot not get ${yq_path} executable"
fi
}
install_yq
|
// Copyright 2017 The TIE Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview Unit tests for TestSuite domain objects.
*/
describe('TestSuiteObjectFactory', function() {
var TestCaseObjectFactory;
var TestSuiteObjectFactory;
beforeEach(module('tieData'));
beforeEach(inject(function($injector) {
TestSuiteObjectFactory = $injector.get('TestSuiteObjectFactory');
TestCaseObjectFactory = $injector.get('TestCaseObjectFactory');
}));
describe('getId', function() {
it('should correctly retrieve the ID of the test suite', function() {
var suite = TestSuiteObjectFactory.create({
id: 'ID',
humanReadableName: 'human readable name',
testCases: []
});
expect(suite.getId()).toBe('ID');
});
});
describe('getHumanReadableName', function() {
it('should correctly retrieve the human-readable name', function() {
var suite = TestSuiteObjectFactory.create({
id: 'ID',
humanReadableName: 'human readable name',
testCases: []
});
expect(suite.getHumanReadableName()).toBe('human readable name');
});
});
describe('getTestCases', function() {
it('should correctly retrieve the array of test cases', function() {
var suite1 = TestSuiteObjectFactory.create({
id: 'ID',
humanReadableName: 'human readable name',
testCases: []
});
expect(suite1.getTestCases()).toEqual([]);
var suite2 = TestSuiteObjectFactory.create({
id: 'ID',
humanReadableName: 'human readable name',
testCases: [{
input: 'abc',
allowedOutputs: ['def']
}]
});
expect(suite2.getTestCases().length).toBe(1);
expect(suite2.getTestCases()[0] instanceof TestCaseObjectFactory)
.toBe(true);
expect(suite2.getTestCases()[0].getInput()).toEqual('abc');
});
});
});
|
def find_max(arr):
if len(arr) == 0:
return None
max_number = arr[0]
for num in arr[1:]:
if num > max_number:
max_number = num
return max_number |
<filename>client/template.js
import Visualizer from './classes/visualizer'
const state = {
particles: [],
count: 10,
speed: 1.5,
size: 25,
hue: 0,
saturation: 80,
lightness: 80,
colorRate: 1,
stroke: 'inverse',
background: 'inverse',
shrink: false
}
export default class Template extends Visualizer {
constructor() {
super({ volumeSmoothing: 100 })
init(this.sketch.width, this.sketch.height)
}
hooks() {
this.sync.on('tatum', tatum => {
})
this.sync.on('segment', segment => {
state.particles.forEach(particle => {
particle.vibrate(5, this.sync.volume, this.sketch.ctx)
})
})
this.sync.on('beat', beat => {
state.particles.forEach(particle => {
particle.vibrate(10, this.sync.volume, this.sketch.ctx)
})
if (state.hue >= 360) {
state.hue = 0
}
state.hue += 10
this.sketch.ctx.fillStyle = `hsl(${state.hue}, ${state.saturation}%, ${state.lightness}%)`
})
this.sync.on('bar', bar => {
state.shrink = !state.shrink
state.particles.forEach(particle => {
particle.vibrate(5, this.sync.volume, this.sketch.ctx)
})
})
this.sync.on('section', section => {
})
}
paint({ ctx, height, width, now }) {
// this.sync.volume
// this.sync.tatum
// this.sync.segment
// this.sync.beat
// this.sync.bar
// this.sync.section
if (state.background === 'light') {
document.body.style.backgroundColor = 'white'
}
if (state.background === 'dark') {
document.body.style.backgroundColor = 'black'
}
if (state.background === 'match') {
document.body.style.backgroundColor = `hsl(${state.hue}, ${state.saturation}%, ${state.lightness}%)`
}
if (state.background === 'inverse') {
document.body.style.backgroundColor = `hsl(${360 - state.hue}, ${100 - state.saturation}%, ${100 - state.lightness}%)`
}
state.particles.forEach(particle => {
particle.update(ctx, width, height)
})
}
}
class Particle {
constructor(width, height) {
this.x = Math.random() * width
this.y = Math.random() * height
this.canvasWidth = width
this.canvasHeight = height
this.radius = Math.random() * state.size
this.speedX = Math.random() * state.speed
this.speedY = Math.random() * state.speed
}
draw(ctx) {
if (this.radius <= 0) {
this.radius = 1
}
ctx.beginPath()
ctx.arc(this.x, this.y, this.radius, 0, Math.PI * 2)
ctx.fillStyle = `hsl(${state.hue}, ${state.saturation}%, ${state.lightness}%)`
ctx.fill()
if (state.stroke === 'light') {
ctx.strokeStyle = 'white'
}
if (state.stroke === 'dark') {
ctx.strokeStyle = 'black'
}
if (state.stroke === 'match') {
ctx.strokeStyle = `hsl(${state.hue}, ${state.saturation}%, ${state.lightness}%)`
}
if (state.stroke === 'inverse') {
ctx.strokeStyle = `hsl(${360 - state.hue}, ${100 - state.saturation}%, ${100 - state.lightness}%)`
}
ctx.stroke()
}
update(ctx) {
this.x += this.speedX
this.y += this.speedY
if (this.x + this.radius > this.canvasWidth
|| this.x - this.radius < 0) {
this.speedX = -this.speedX
}
if (this.y + this.radius > this.canvasHeight
|| this.y - this.radius < 0) {
this.speedY = -this.speedY
}
this.draw(ctx)
}
vibrate(amount, volume = 1, ctx = null) {
const sign = state.shrink ? -1 : 1
this.radius += sign * amount * volume
this.update(ctx)
}
}
function init(width, height) {
for (let i = 0; i < state.count; i++) {
state.particles.push(new Particle(width, height))
}
}
function reInit() {
ctx.clearRect(0, 0, canvas.width, canvas.height)
state.particles.length = 0
init()
} |
import { push } from 'react-router-redux'
import api from '../../common/api'
import * as _ from 'lodash';
import { RESPONSE_CREATE_TASKS } from '../create'
import { RESPONSE_UPDATE_TASKS } from '../update'
import { RESPONSE_REMOVE_TASK, REQUEST_REMOVE_TASK } from '../remove'
export const REQUEST_TASKS = 'TASKS/LIST/REQUEST_TASKS'
export const RESPONSE_TASKS = 'TASKS/LIST/RESPONSE_TASKS'
export const ERROR_TASKS = 'TASKS/LIST/ERROR_TASKS'
let initialState = {
tasks: [],
loading: true,
error: null
}
export default function reducer(state = initialState, action = {}) {
let diff;
switch (action.type) {
case RESPONSE_CREATE_TASKS:
return { ...state, tasks: [...state.tasks, action.payload] }
case RESPONSE_UPDATE_TASKS:
diff = _.differenceWith(state.tasks, [action.payload], (a, b) => a.id === b.id);
return { ...state, tasks: [...diff, action.payload] }
case RESPONSE_REMOVE_TASK:
diff = _.differenceWith(state.tasks, [action.payload], (a, b) => a.id === b.id);
return { ...state, tasks: [...diff], loading: false }
case REQUEST_REMOVE_TASK:
return { ...state, loading: true }
case REQUEST_TASKS:
return { ...state, loading: true }
case RESPONSE_TASKS:
return { ...state, loading: false, tasks: action.payload };
case ERROR_TASKS:
return { ...state, loading: false, tasks: [], error: action.error }
default:
return state;
}
}
export const load = () => (dispatch) => {
dispatch({ type: REQUEST_TASKS })
const url = "task"
api.get(url)
.then((response) => {
dispatch({ type: RESPONSE_TASKS, payload: response.data })
})
.catch((error) => {
dispatch(push('/'));
dispatch({ type: ERROR_TASKS, error: "error" })
})
}
export const goToCreate = () => (dispatch) => {
dispatch(push('/task/new'))
}
export const goToEdit = (id) => (dispatch, state) => {
dispatch(push(`/task/update/${id}`))
} |
#!/bin/bash
#Adds the Orderer to /etc/hosts.
#You may manually add the hosts to /etc/hosts
source ../setup/manage_hosts.sh
HOSTNAME=orderer2.acme.com
removehost $HOSTNAME &> /dev/null
addhost $HOSTNAME
HOSTNAME=orderer3.acme.com
removehost $HOSTNAME &> /dev/null
addhost $HOSTNAME
HOSTNAME=orderer4.acme.com
removehost $HOSTNAME &> /dev/null
addhost $HOSTNAME
HOSTNAME=orderer5.acme.com
removehost $HOSTNAME &> /dev/null
addhost $HOSTNAME |
#include <errno.h>
#include "double.h"
#include "quasi_cube.h"
static void quasi_cube_measure_fprint(FILE * out, const quasi_cube * r)
{
double r_area;
r_area = quasi_cube_measure(r);
fprintf(out, "area : %g\n", r_area);
}
static void quasi_cube_centroid_fprint(FILE * out, const quasi_cube * r)
{
double r_centroid[3];
quasi_cube_centroid(r_centroid, r);
fputs("centroid : ", out);
double_array_fprint(out, r->dim_embedded, r_centroid, "--raw");
fputc('\n', out);
}
int main()
{
quasi_cube * r;
FILE * in, * out;
out = stdout;
in = stdin;
r = quasi_cube_fscan(in);
if (errno)
{
perror("Problem in quasi-cube scanning");
quasi_cube_free(r);
return errno;
}
quasi_cube_measure_fprint(out, r);
quasi_cube_centroid_fprint(out, r);
quasi_cube_free(r);
return 0;
}
|
<reponame>aws-samples/performance-testing-framework-for-apache-kafka
// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import { Construct } from 'constructs';
import { Aws, StackProps, Duration } from 'aws-cdk-lib';
import { aws_iam as iam } from 'aws-cdk-lib';
import { aws_stepfunctions as sfn } from 'aws-cdk-lib';
import { aws_stepfunctions_tasks as tasks } from 'aws-cdk-lib';
import { aws_lambda as lambda } from 'aws-cdk-lib';
import * as batch from '@aws-cdk/aws-batch-alpha';
import { IntegrationPattern } from 'aws-cdk-lib/aws-stepfunctions';
export interface CreditDepletionParameters extends StackProps {
clusterName: string,
jobDefinition: batch.IJobDefinition,
jobQueue: batch.IJobQueue,
commandParameters: string[],
payload: sfn.TaskInput
}
export class CreditDepletion extends Construct {
stateMachine: sfn.StateMachine;
constructor(scope: Construct, id: string, props: CreditDepletionParameters) {
super(scope, id);
const fail = new sfn.Fail(this, 'FailDepletion');
const succeed = new sfn.Succeed(this, 'SucceedDepletion');
// the number of jobs is determined before all jobs are manually failed, so if there were already failed or succeeded jobs, the depletion exited early and needs to be retried
const checkAllJobsRunning = new sfn.Choice(this, 'CheckAllJobsRunning')
.when(sfn.Condition.numberGreaterThan('$.cluster_throughput.Payload.succeededPlusFailedJobs', 0), fail)
.otherwise(succeed);
const terminateCreditDepletionLambda = new lambda.Function(this, 'TerminateCreditDepletionLambda', {
runtime: lambda.Runtime.NODEJS_14_X,
code: lambda.Code.fromAsset('lambda'),
timeout: Duration.seconds(5),
handler: 'manage-infrastructure.terminateDepletionJob',
});
terminateCreditDepletionLambda.addToRolePolicy(
new iam.PolicyStatement({
actions: ['batch:TerminateJob'],
resources: ['*']
})
);
const terminateCreditDepletion = new tasks.LambdaInvoke(this, 'TerminateCreditDepletion', {
lambdaFunction: terminateCreditDepletionLambda,
inputPath: '$.depletion_job',
resultPath: 'DISCARD'
});
terminateCreditDepletion.next(checkAllJobsRunning);
const queryClusterThroughputLambda = new lambda.Function(this, 'QueryClusterThroughputLambda', {
runtime: lambda.Runtime.NODEJS_14_X,
code: lambda.Code.fromAsset('lambda'),
timeout: Duration.seconds(5),
handler: 'manage-infrastructure.queryMskClusterThroughput',
environment: {
MSK_CLUSTER_NAME: props.clusterName
}
});
queryClusterThroughputLambda.addToRolePolicy(
new iam.PolicyStatement({
actions: ['cloudwatch:GetMetricData', 'batch:DescribeJobs'],
resources: ['*']
})
);
const queryClusterThroughputLowerThan = new tasks.LambdaInvoke(this, 'QueryClusterThroughputLowerThan', {
lambdaFunction: queryClusterThroughputLambda,
inputPath: '$',
resultPath: '$.cluster_throughput'
});
const waitThroughputLowerThan = new sfn.Wait(this, 'WaitThroughputLowerThan', {
time: sfn.WaitTime.duration(Duration.minutes(1))
});
waitThroughputLowerThan.next(queryClusterThroughputLowerThan);
const checkThroughputLowerThan = new sfn.Choice(this, 'CheckThroughputLowerThanThreshold')
.when(sfn.Condition.numberGreaterThan('$.cluster_throughput.Payload.succeededPlusFailedJobs', 0), terminateCreditDepletion)
.when(
sfn.Condition.and(
sfn.Condition.numberLessThanJsonPath('$.cluster_throughput.Payload.clusterMbInPerSec', '$.test_specification.depletion_configuration.lower_threshold.mb_per_sec'),
// sfn.Condition.numberLessThanJsonPath('$.cluster_throughput.Payload.brokerMbInPerSecStddev', '$.test_specification.depletion_configuration.lower_threshold.max_broker_stddev')
),
terminateCreditDepletion
)
.otherwise(waitThroughputLowerThan);
queryClusterThroughputLowerThan.next(checkThroughputLowerThan);
const queryClusterThroughputExceeded = new tasks.LambdaInvoke(this, 'QueryClusterThroughputExceeded', {
lambdaFunction: queryClusterThroughputLambda,
inputPath: '$',
resultPath: '$.cluster_throughput'
});
const waitThroughputExceeded = new sfn.Wait(this, 'WaitThroughputExceeded', {
time: sfn.WaitTime.duration(Duration.minutes(1))
});
waitThroughputExceeded.next(queryClusterThroughputExceeded);
const checkLowerThanPresent = new sfn.Choice(this, 'CheckLowerThanPresent')
.when(sfn.Condition.isPresent('$.test_specification.depletion_configuration.lower_threshold.mb_per_sec'), waitThroughputLowerThan)
.otherwise(terminateCreditDepletion);
const checkThroughputExceeded = new sfn.Choice(this, 'CheckThroughputExceeded')
.when(sfn.Condition.numberGreaterThan('$.cluster_throughput.Payload.succeededPlusFailedJobs', 0), terminateCreditDepletion)
.when(sfn.Condition.numberGreaterThanJsonPath('$.cluster_throughput.Payload.clusterMbInPerSec', '$.test_specification.depletion_configuration.upper_threshold.mb_per_sec'), checkLowerThanPresent)
.otherwise(waitThroughputExceeded);
queryClusterThroughputExceeded.next(checkThroughputExceeded);
const submitCreditDepletion = new tasks.BatchSubmitJob(this, 'SubmitCreditDepletion', {
jobName: 'RunCreditDepletion',
jobDefinitionArn: props.jobDefinition.jobDefinitionArn,
jobQueueArn: props.jobQueue.jobQueueArn,
arraySize: sfn.JsonPath.numberAt('$.current_test.parameters.num_jobs'),
containerOverrides: {
command: [...props.commandParameters, '--command', 'deplete-credits' ]
},
payload: props.payload,
integrationPattern: IntegrationPattern.REQUEST_RESPONSE,
inputPath: '$',
resultPath: '$.depletion_job',
});
submitCreditDepletion.next(waitThroughputExceeded);
const updateDepletionParameterLambda = new lambda.Function(this, 'UpdateDepletionParameterLambda', {
runtime: lambda.Runtime.PYTHON_3_8,
code: lambda.Code.fromAsset('lambda'),
timeout: Duration.seconds(5),
handler: 'test-parameters.update_parameters_for_depletion',
});
const updateDepletionParameter = new tasks.LambdaInvoke(this, 'UpdateDepletionParameter', {
lambdaFunction: updateDepletionParameterLambda,
outputPath: '$.Payload',
});
updateDepletionParameter.next(submitCreditDepletion);
const checkDepletion = new sfn.Choice(this, 'CheckDepletionRequired')
.when(sfn.Condition.isNotPresent('$.test_specification.depletion_configuration'), succeed)
.otherwise(updateDepletionParameter);
this.stateMachine = new sfn.StateMachine(this, 'DepleteCreditsStateMachine', {
definition: checkDepletion,
stateMachineName: `${Aws.STACK_NAME}-credit-depletion`,
});
}
}
|
'use strict';
self.addEventListener('message', function () {
var foo = null;
foo.bar = 'baz';
}); |
<filename>proxymm.go
package luar
// Metamethods.
// Errors in metamethod will yield a call to RaiseError.
// It is not possible to return an error / bool / message to the caller when
// metamethods are called via Lua operators (e.g. __newindex).
// TODO: Replicate Go/Lua error messages in RaiseError.
import (
"fmt"
"math"
"math/cmplx"
"reflect"
"github.com/nwidger/golua/lua"
)
func channel__index(L *lua.State) int {
v, t := valueOfProxy(L, 1)
name := L.ToString(2)
switch name {
case "recv":
f := func(L *lua.State) int {
val, ok := v.Recv()
if ok {
GoToLuaProxy(L, val)
return 1
}
return 0
}
L.PushGoFunction(f)
case "send":
f := func(L *lua.State) int {
val := reflect.New(t.Elem())
err := LuaToGo(L, 1, val.Interface())
if err != nil {
L.RaiseError(fmt.Sprintf("channel requires %v value type", t.Elem()))
}
v.Send(val.Elem())
return 0
}
L.PushGoFunction(f)
case "close":
f := func(L *lua.State) int {
v.Close()
return 0
}
L.PushGoFunction(f)
default:
pushGoMethod(L, name, v)
}
return 1
}
func complex__index(L *lua.State) int {
v, _ := valueOfProxy(L, 1)
name := L.ToString(2)
switch name {
case "real":
L.PushNumber(real(v.Complex()))
case "imag":
L.PushNumber(imag(v.Complex()))
default:
pushGoMethod(L, name, v)
}
return 1
}
func interface__index(L *lua.State) int {
v, _ := valueOfProxy(L, 1)
name := L.ToString(2)
pushGoMethod(L, name, v)
return 1
}
// TODO: Should map[string] and struct allow direct method calls? Check if first letter is uppercase?
func map__index(L *lua.State) int {
v, t := valueOfProxy(L, 1)
key := reflect.New(t.Key())
err := LuaToGo(L, 2, key.Interface())
if err == nil {
key = key.Elem()
val := v.MapIndex(key)
if val.IsValid() {
GoToLuaProxy(L, val)
return 1
}
}
if !L.IsNumber(2) && L.IsString(2) {
name := L.ToString(2)
pushGoMethod(L, name, v)
return 1
}
if err != nil {
L.RaiseError(fmt.Sprintf("map requires %v key", t.Key()))
}
return 0
}
func map__ipairs(L *lua.State) int {
v, _ := valueOfProxy(L, 1)
keys := v.MapKeys()
intKeys := map[uint64]reflect.Value{}
// Filter integer keys.
for _, k := range keys {
if k.Kind() == reflect.Interface {
k = k.Elem()
}
switch unsizedKind(k) {
case reflect.Int64:
i := k.Int()
if i > 0 {
intKeys[uint64(i)] = k
}
case reflect.Uint64:
intKeys[k.Uint()] = k
}
}
idx := uint64(0)
iter := func(L *lua.State) int {
idx++
if _, ok := intKeys[idx]; !ok {
return 0
}
GoToLuaProxy(L, idx)
val := v.MapIndex(intKeys[idx])
GoToLuaProxy(L, val)
return 2
}
L.PushGoFunction(iter)
return 1
}
func map__newindex(L *lua.State) int {
v, t := valueOfProxy(L, 1)
key := reflect.New(t.Key())
err := LuaToGo(L, 2, key.Interface())
if err != nil {
L.RaiseError(fmt.Sprintf("map requires %v key", t.Key()))
}
key = key.Elem()
val := reflect.New(t.Elem())
err = LuaToGo(L, 3, val.Interface())
if err != nil {
L.RaiseError(fmt.Sprintf("map requires %v value type", t.Elem()))
}
val = val.Elem()
v.SetMapIndex(key, val)
return 0
}
func map__pairs(L *lua.State) int {
v, _ := valueOfProxy(L, 1)
keys := v.MapKeys()
idx := -1
n := v.Len()
iter := func(L *lua.State) int {
idx++
if idx == n {
return 0
}
GoToLuaProxy(L, keys[idx])
val := v.MapIndex(keys[idx])
GoToLuaProxy(L, val)
return 2
}
L.PushGoFunction(iter)
return 1
}
func number__add(L *lua.State) int {
v1, t1 := luaToGoValue(L, 1)
v2, t2 := luaToGoValue(L, 2)
var result interface{}
switch commonKind(v1, v2) {
case reflect.Uint64:
result = v1.Uint() + v2.Uint()
case reflect.Int64:
result = v1.Int() + v2.Int()
case reflect.Float64:
result = valueToNumber(L, v1) + valueToNumber(L, v2)
case reflect.Complex128:
result = valueToComplex(L, v1) + valueToComplex(L, v2)
}
pushNumberValue(L, result, t1, t2)
return 1
}
func number__div(L *lua.State) int {
v1, t1 := luaToGoValue(L, 1)
v2, t2 := luaToGoValue(L, 2)
var result interface{}
switch commonKind(v1, v2) {
case reflect.Uint64:
result = v1.Uint() / v2.Uint()
case reflect.Int64:
result = v1.Int() / v2.Int()
case reflect.Float64:
result = valueToNumber(L, v1) / valueToNumber(L, v2)
case reflect.Complex128:
result = valueToComplex(L, v1) / valueToComplex(L, v2)
}
pushNumberValue(L, result, t1, t2)
return 1
}
func number__lt(L *lua.State) int {
v1, _ := luaToGoValue(L, 1)
v2, _ := luaToGoValue(L, 2)
switch commonKind(v1, v2) {
case reflect.Uint64:
L.PushBoolean(v1.Uint() < v2.Uint())
case reflect.Int64:
L.PushBoolean(v1.Int() < v2.Int())
case reflect.Float64:
L.PushBoolean(valueToNumber(L, v1) < valueToNumber(L, v2))
}
return 1
}
func number__mod(L *lua.State) int {
v1, t1 := luaToGoValue(L, 1)
v2, t2 := luaToGoValue(L, 2)
var result interface{}
switch commonKind(v1, v2) {
case reflect.Uint64:
result = v1.Uint() % v2.Uint()
case reflect.Int64:
result = v1.Int() % v2.Int()
case reflect.Float64:
result = math.Mod(valueToNumber(L, v1), valueToNumber(L, v2))
}
pushNumberValue(L, result, t1, t2)
return 1
}
func number__mul(L *lua.State) int {
v1, t1 := luaToGoValue(L, 1)
v2, t2 := luaToGoValue(L, 2)
var result interface{}
switch commonKind(v1, v2) {
case reflect.Uint64:
result = v1.Uint() * v2.Uint()
case reflect.Int64:
result = v1.Int() * v2.Int()
case reflect.Float64:
result = valueToNumber(L, v1) * valueToNumber(L, v2)
case reflect.Complex128:
result = valueToComplex(L, v1) * valueToComplex(L, v2)
}
pushNumberValue(L, result, t1, t2)
return 1
}
func number__pow(L *lua.State) int {
v1, t1 := luaToGoValue(L, 1)
v2, t2 := luaToGoValue(L, 2)
var result interface{}
switch commonKind(v1, v2) {
case reflect.Uint64:
result = math.Pow(float64(v1.Uint()), float64(v2.Uint()))
case reflect.Int64:
result = math.Pow(float64(v1.Int()), float64(v2.Int()))
case reflect.Float64:
result = math.Pow(valueToNumber(L, v1), valueToNumber(L, v2))
case reflect.Complex128:
result = cmplx.Pow(valueToComplex(L, v1), valueToComplex(L, v2))
}
pushNumberValue(L, result, t1, t2)
return 1
}
func number__sub(L *lua.State) int {
v1, t1 := luaToGoValue(L, 1)
v2, t2 := luaToGoValue(L, 2)
var result interface{}
switch commonKind(v1, v2) {
case reflect.Uint64:
result = v1.Uint() - v2.Uint()
case reflect.Int64:
result = v1.Int() - v2.Int()
case reflect.Float64:
result = valueToNumber(L, v1) - valueToNumber(L, v2)
case reflect.Complex128:
result = valueToComplex(L, v1) - valueToComplex(L, v2)
}
pushNumberValue(L, result, t1, t2)
return 1
}
func number__unm(L *lua.State) int {
v1, t1 := luaToGoValue(L, 1)
var result interface{}
switch unsizedKind(v1) {
case reflect.Uint64:
result = -v1.Uint()
case reflect.Int64:
result = -v1.Int()
case reflect.Float64, reflect.String:
result = -valueToNumber(L, v1)
case reflect.Complex128:
result = -v1.Complex()
}
v := reflect.ValueOf(result)
if unsizedKind(v1) == reflect.Complex128 {
makeValueProxy(L, v.Convert(t1), cComplexMeta)
} else if isNewType(t1) {
makeValueProxy(L, v.Convert(t1), cNumberMeta)
} else {
L.PushNumber(v.Float())
}
return 1
}
// From Lua's specs: "A metamethod only is selected when both objects being
// compared have the same type and the same metamethod for the selected
// operation." Thus both arguments must be proxies for this function to be
// called. No need to check for type equality: Go's "==" operator will do it for
// us.
func proxy__eq(L *lua.State) int {
var a1 interface{}
_ = LuaToGo(L, 1, &a1)
var a2 interface{}
_ = LuaToGo(L, 2, &a2)
L.PushBoolean(a1 == a2)
return 1
}
func proxy__gc(L *lua.State) int {
proxyId := *(*uintptr)(L.ToUserdata(1))
proxymu.Lock()
delete(proxyMap, proxyId)
proxymu.Unlock()
return 0
}
func proxy__tostring(L *lua.State) int {
v, _ := valueOfProxy(L, 1)
L.PushString(fmt.Sprintf("%v", v))
return 1
}
func slice__index(L *lua.State) int {
v, _ := valueOfProxy(L, 1)
for v.Kind() == reflect.Ptr {
// For arrays.
v = v.Elem()
}
if L.IsNumber(2) {
idx := L.ToInteger(2)
if idx < 1 || idx > v.Len() {
L.RaiseError("slice/array get: index out of range")
}
v := v.Index(idx - 1)
GoToLuaProxy(L, v)
} else if L.IsString(2) {
name := L.ToString(2)
if v.Kind() == reflect.Array {
pushGoMethod(L, name, v)
return 1
}
switch name {
case "append":
f := func(L *lua.State) int {
narg := L.GetTop()
args := []reflect.Value{}
for i := 1; i <= narg; i++ {
elem := reflect.New(v.Type().Elem())
err := LuaToGo(L, i, elem.Interface())
if err != nil {
L.RaiseError(fmt.Sprintf("slice requires %v value type", v.Type().Elem()))
}
args = append(args, elem.Elem())
}
newslice := reflect.Append(v, args...)
makeValueProxy(L, newslice, cSliceMeta)
return 1
}
L.PushGoFunction(f)
case "cap":
L.PushInteger(int64(v.Cap()))
case "slice":
L.PushGoFunction(slicer(L, v, cSliceMeta))
default:
pushGoMethod(L, name, v)
}
} else {
L.RaiseError("non-integer slice/array index")
}
return 1
}
func slice__ipairs(L *lua.State) int {
v, _ := valueOfProxy(L, 1)
for v.Kind() == reflect.Ptr {
v = v.Elem()
}
n := v.Len()
idx := -1
iter := func(L *lua.State) int {
idx++
if idx == n {
return 0
}
GoToLuaProxy(L, idx+1) // report as 1-based index
val := v.Index(idx)
GoToLuaProxy(L, val)
return 2
}
L.PushGoFunction(iter)
return 1
}
func slice__newindex(L *lua.State) int {
v, t := valueOfProxy(L, 1)
for v.Kind() == reflect.Ptr {
// For arrays.
v = v.Elem()
t = t.Elem()
}
idx := L.ToInteger(2)
val := reflect.New(t.Elem())
err := LuaToGo(L, 3, val.Interface())
if err != nil {
L.RaiseError(fmt.Sprintf("slice requires %v value type", t.Elem()))
}
val = val.Elem()
if idx < 1 || idx > v.Len() {
L.RaiseError("slice/array set: index out of range")
}
v.Index(idx - 1).Set(val)
return 0
}
func slicemap__len(L *lua.State) int {
v, _ := valueOfProxy(L, 1)
for v.Kind() == reflect.Ptr {
// For arrays.
v = v.Elem()
}
L.PushInteger(int64(v.Len()))
return 1
}
// Lua accepts concatenation with string and number.
func string__concat(L *lua.State) int {
v1, t1 := luaToGoValue(L, 1)
v2, t2 := luaToGoValue(L, 2)
s1 := valueToString(L, v1)
s2 := valueToString(L, v2)
result := s1 + s2
if t1 == t2 || isPredeclaredType(t2) {
v := reflect.ValueOf(result)
makeValueProxy(L, v.Convert(t1), cStringMeta)
} else if isPredeclaredType(t1) {
v := reflect.ValueOf(result)
makeValueProxy(L, v.Convert(t2), cStringMeta)
} else {
L.PushString(result)
}
return 1
}
func string__index(L *lua.State) int {
v, _ := valueOfProxy(L, 1)
if L.IsNumber(2) {
idx := L.ToInteger(2)
if idx < 1 || idx > v.Len() {
L.RaiseError("index out of range")
}
v := v.Index(idx - 1).Convert(reflect.TypeOf(""))
GoToLuaProxy(L, v)
} else if L.IsString(2) {
name := L.ToString(2)
if name == "slice" {
L.PushGoFunction(slicer(L, v, cStringMeta))
} else {
pushGoMethod(L, name, v)
}
} else {
L.RaiseError("non-integer string index")
}
return 1
}
func string__ipairs(L *lua.State) int {
v, _ := valueOfProxy(L, 1)
for v.Kind() == reflect.Ptr {
v = v.Elem()
}
r := []rune(v.String())
n := len(r)
idx := -1
iter := func(L *lua.State) int {
idx++
if idx == n {
return 0
}
GoToLuaProxy(L, idx+1) // report as 1-based index
GoToLuaProxy(L, string(r[idx]))
return 2
}
L.PushGoFunction(iter)
return 1
}
func string__len(L *lua.State) int {
v1, _ := luaToGoValue(L, 1)
L.PushInteger(int64(v1.Len()))
return 1
}
func string__lt(L *lua.State) int {
v1, _ := luaToGoValue(L, 1)
v2, _ := luaToGoValue(L, 2)
L.PushBoolean(v1.String() < v2.String())
return 1
}
func struct__index(L *lua.State) int {
v, t := valueOfProxy(L, 1)
name := L.ToString(2)
vp := v
if t.Kind() == reflect.Ptr {
v = v.Elem()
}
field := v.FieldByName(name)
if !field.IsValid() || !field.CanSet() {
// No such exported field, try for method.
pushGoMethod(L, name, vp)
} else {
GoToLuaProxy(L, field)
}
return 1
}
func struct__newindex(L *lua.State) int {
v, t := valueOfProxy(L, 1)
name := L.ToString(2)
if t.Kind() == reflect.Ptr {
v = v.Elem()
}
field := v.FieldByName(name)
if !field.IsValid() {
L.RaiseError(fmt.Sprintf("no field named `%s` for type %s", name, v.Type()))
}
val := reflect.New(field.Type())
err := LuaToGo(L, 3, val.Interface())
if err != nil {
L.RaiseError(fmt.Sprintf("struct field %v requires %v value type, error with target: %v", name, field.Type(), err))
}
field.Set(val.Elem())
return 0
}
|
<reponame>luckfamousa/DicomDeidentify
package de.stereotypez
import spray.json.{JsString, JsValue, RootJsonFormat}
import spray.json.DefaultJsonProtocol._
object ActionCode extends Enumeration {
type ActionCode = Value
val `X`, `Z`, `C`, `D`, `K`, `U`, `X/Z/U*`, `X/Z/D`, `X/Z`, `X/D`, `Z/D` = Value
def of(s: String): ActionCode = s match {
case "X" => `X`
case "Z" => `Z`
case "C" => `C`
case "D" => `D`
case "K" => `K`
case "U" => `U`
case "X/Z/U*" => `X/Z/U*`
case "X/Z/D" => `X/Z/D`
case "X/Z" => `X/Z`
case "X/D" => `X/D`
case "Z/D" => `Z/D`
case c => throw new UnknownActionCodeException(s"Code '$c' is unknown.")
}
override def toString(): String = this match {
case `X` => "X"
case `Z` => "Z"
case `C` => "C"
case `D` => "D"
case `K` => "K"
case `U` => "U"
case `X/Z/U*` => "X/Z/U*"
case `X/Z/D` => "X/Z/D"
case `X/Z` => "X/Z"
case `X/D` => "X/D"
case `Z/D` => "Z/D"
case c => throw new UnknownActionCodeException(s"Code '$c' is unknown.") // should never happen
}
implicit object ActionCodeFormat extends RootJsonFormat[ActionCode] {
override def read(json: JsValue): ActionCode = ActionCode.of(json.convertTo[String])
override def write(obj: ActionCode): JsValue = JsString(obj.toString)
}
}
class UnknownActionCodeException(msg: String) extends RuntimeException(msg)
|
import { Title, ImageLink, Content, SubmitEditPost, WarningMessage } from '../components/EditPostForm'
import { Container } from 'react-bootstrap'
import { useEffect, useState } from 'react'
import getContent from '../functions/getContent'
function EditContent(props){
const docId = props.docId
const [imageSrc, setImageSrc] = useState('')
const [titleText, setTitleText] = useState('')
const [contentText, setContentText] = useState('')
const [warningMessage, setWarningMessage] = useState('')
useEffect(() => {
//Get the post data from the server
getContent(docId)
.then(response => {
console.log(response)
setTitleText(response.title)
setImageSrc(response.imageSrc)
setContentText(response.content)
})
},[docId])
return(
<Container className="mt-5">
<Title title={titleText} setTitle={setTitleText}></Title>
<ImageLink imageSrc={imageSrc} setImageSrc={setImageSrc}></ImageLink>
<Content content={contentText} setContent={setContentText}></Content>
<SubmitEditPost docId={docId} setWarningMessage={setWarningMessage} title={titleText} imageSrc={imageSrc} content={contentText}></SubmitEditPost>
<WarningMessage warningMessage={warningMessage}></WarningMessage>
</Container>
)
}
export default EditContent |
class Admin::UsersController < Admin::BaseController
def index
if params[:prime_only]
@users = User.with_prime_access
else
@users = User.all
end
end
def show
@user = User.find params[:id]
end
def destroy
@user = User.find params[:id]
name = @user.name
@user.update deleted: true
flash[:notice] = "#{name} has been deleted."
redirect_to admin_users_path
end
def masq
session[:masq] = User::MASQ_TIMEOUT.from_now.to_i
session[:uid] = User.find(params[:id]).id
redirect_to root_path
end
def toggle_prime
user = User.find_by(id: params[:id])
user.toggle! :prime
redirect_to admin_users_path
end
end
|
#!/bin/bash
cd $(git rev-parse --show-toplevel)
if [ "${COVERAGE}" = "true" ]; then
tools/generate_stubs.sh
cmp --silent stubs/const.php tools/stubs/output/const.php
if [ $? -eq 0 ]
then
echo "const file ok";
else
echo "const file not up to date";
diff stubs/const.php tools/stubs/output/const.php
exit -1;
fi
cmp --silent stubs/functions.php tools/stubs/output/functions.php
if [ $? -eq 0 ]
then
echo "functions file ok";
else
echo "functions file not up to date";
diff stubs/functions.php tools/stubs/output/functions.php
exit -1;
fi
fi
|
<reponame>dbirulia/android-prework
package com.example.dbirulia.simpletodoapp.utils;
import android.content.ContentValues;
import android.content.Context;
import android.database.Cursor;
import android.database.sqlite.SQLiteDatabase;
import android.database.sqlite.SQLiteOpenHelper;
import com.example.dbirulia.simpletodoapp.models.ToDoItem;
import java.util.ArrayList;
public class DBHelper extends SQLiteOpenHelper {
public static final String DATABASE_NAME = "ToDoList.db";
public static final String TODOLIST_TABLE_NAME = "todos";
public static final String TODO_COLUMN_ID = "id";
public static final String TODO_COLUMN_NAME = "name";
public static final String TODO_COLUMN_DETAILS = "details";
public static final String TODO_COLUMN_PRIORITY = "priority";
public static final String TODO_COLUMN_DUEDATE = "duedate";
public DBHelper(Context context)
{
super(context, DATABASE_NAME , null, 1);
}
@Override
public void onCreate(SQLiteDatabase db) {
// TODO Auto-generated method stub
db.execSQL(
"CREATE TABLE IF NOT EXISTS todos " +
"(id integer primary key, name text, details text, priority text, duedate int)"
);
}
@Override
public void onUpgrade(SQLiteDatabase db, int oldVersion, int newVersion) {
// TODO Auto-generated method stub
db.execSQL("DROP TABLE IF EXISTS todos");
onCreate(db);
}
public long insertToDoItem (String name, String details, String priority, Integer duedate)
{
SQLiteDatabase db = this.getWritableDatabase();
ContentValues contentValues = new ContentValues();
contentValues.put("name", name);
contentValues.put("details", details);
contentValues.put("priority", priority);
contentValues.put("duedate", duedate);
long id = db.insert("todos", null, contentValues);
return id;
}
public boolean updateToDoItem (Long id, String name, String details, String priority, Integer duedate)
{
SQLiteDatabase db = this.getWritableDatabase();
ContentValues contentValues = new ContentValues();
contentValues.put("name", name);
contentValues.put("details", details);
contentValues.put("priority", priority);
contentValues.put("duedate", duedate);
db.update("todos", contentValues, "id = ? ", new String[] { Long.toString(id) } );
return true;
}
public Integer deleteToDoItem (Long id)
{
SQLiteDatabase db = this.getWritableDatabase();
return db.delete("todos",
"id = ? ",
new String[] { Long.toString(id) });
}
public ArrayList<ToDoItem> getAllToDos()
{
ArrayList<ToDoItem> todos_list = new ArrayList<ToDoItem>();
SQLiteDatabase db = this.getReadableDatabase();
Cursor res = db.rawQuery("select * from todos", null);
res.moveToFirst();
while(res.isAfterLast() == false){
String name = res.getString(res.getColumnIndex(TODO_COLUMN_NAME));
long id = res.getLong(res.getColumnIndex(TODO_COLUMN_ID));
String details = res.getString(res.getColumnIndex(TODO_COLUMN_DETAILS));
String priority = res.getString(res.getColumnIndex(TODO_COLUMN_PRIORITY));
int duedate = res.getInt(res.getColumnIndex(TODO_COLUMN_DUEDATE));
todos_list.add(new ToDoItem(id, name, details, priority, duedate));
res.moveToNext();
}
return todos_list;
}
} |
<gh_stars>0
import Sprite from "./Sprite";
import { getTimeStamp } from "../utils";
const defaultConfig = {
WIDTH: 36,
HEIGHT: 32,
} as const;
const RESTART_ANIM_DURATION = 875;
const LOGO_PAUSE_DURATION = 875;
export default class RestartButton extends Sprite<typeof defaultConfig> {
msPerFrame: number;
// Retry animation.
frameTimeStamp: number;
animTimer: number;
gameOverRafId?: number;
flashTimer: number;
flashCounter: number;
/** RestartButton */
constructor(canvas: HTMLCanvasElement) {
super(canvas, "RESTART");
this.phases = [
[0, 0],
[36, 0],
[72, 0],
[108, 0],
[144, 0],
[180, 0],
[216, 0],
[252, 0],
];
this.msPerFrame = RESTART_ANIM_DURATION / this.phases.length;
// Retry animation.
this.frameTimeStamp = 0;
this.animTimer = 0;
this.flashTimer = 0;
this.flashCounter = 0;
}
init() {
this.config = defaultConfig;
this.x = (this.container.width - this.config.WIDTH) / 2;
this.y = (this.container.height - this.config.HEIGHT) / 2 + 15;
}
update() {
const now = getTimeStamp();
const deltaTime = now - (this.frameTimeStamp || now);
this.frameTimeStamp = now;
this.animTimer += deltaTime;
this.flashTimer += deltaTime;
// Restart Button
if (this.currentPhase == 0 && this.animTimer > LOGO_PAUSE_DURATION) {
this.animTimer = 0;
this.draw();
this.nextPhase();
} else if (
this.currentPhase > 0 &&
this.currentPhase < this.phases.length
) {
if (this.animTimer >= this.msPerFrame) {
this.draw();
this.nextPhase();
}
} else if (this.currentPhase == this.phases.length) {
this.reset();
return;
}
this.gameOverRafId = requestAnimationFrame(this.update.bind(this));
}
reset() {
if (this.gameOverRafId) {
cancelAnimationFrame(this.gameOverRafId);
this.gameOverRafId = undefined;
}
this.animTimer = 0;
this.frameTimeStamp = 0;
this.flashTimer = 0;
this.flashCounter = 0;
this.resetPhase();
}
}
|
const http = require('http');
const port = 8000;
http.createServer((req, res) => {
const date = new Date();
const response = {
date: date.toISOString()
};
res.writeHead(200, { 'Content-Type': 'application/json' });
res.write(JSON.stringify(response));
res.end();
}).listen(port);
console.log(`Server running at http://localhost:${port}`); |
#!/bin/bash
set -e
# 安装软件
sudo apt-get install git wget make libncurses-dev flex bison gperf python python-serial
# 下载工具链
mkdir -p ~/esp-toolchain
cd ~/esp-toolchain
wget https://dl.espressif.com/dl/xtensa-esp32-elf-linux64-1.22.0-61-gab8375a-5.2.0.tar.gz
tar -xzf xtensa-esp32-elf-linux64-1.22.0-61-gab8375a-5.2.0.tar.gz
# 配置环境变量
echo 'export IDF_PATH=$(pwd)/esp-idf' >> ~/.profile
echo 'export PATH=$PATH:$HOME/esp-toolchain/xtensa-esp32-elf/bin' >> ~/.profile
source ~/.profile |
'use strict';
/* eslint-disable no-unused-vars */
const { inspect } = require('util');
const { Command, CommandError, SimplicityEmbed } = require('@structures');
const { Constants, Util } = require('@util');
const token = process.env.DISCORD_TOKEN;
const { code, isEmpty, isPromise } = Util;
const value = (s) => code(s, 'js').replace(new RegExp(token, 'g'), () => '*'.repeat(token.length));
const exec = (c) => require('child_process').execSync(c).toString();
/**
* The Eval command class.
* @extends Command
*/
class Eval extends Command {
/**
* Creates an instance of EvalCommand.
* @param {Client} client The Client.
*/
constructor(client) {
super(client, 'eval', {
aliases: ['compile', 'ev', 'evaluate', 'exec', 'execute'],
args: [
{
full: true,
missingError: 'You need to input an expression for me to evaluate.',
required: true,
type: 'string',
},
],
category: 'dev',
requirements: { ownerOnly: true },
});
}
/**
* What gets ran when the command is called.
* @param {CommandContext} ctx The context of the command.
* @param {string} expr The expression to be evaluated.
* @returns {Promise<Message>} The reply from the command.
*/
async run(ctx, expr) {
const {
args,
author,
botLanguages,
command,
client,
channel,
database,
emoji,
guild,
language,
member,
message,
prefix,
query,
send,
t,
} = ctx;
let res;
const toEval = expr.replace(/(^`{3}(\w+)?|`{3}$)/g, '');
const cleanResult = async (evaluated, hrStart) => {
const resolved = await Promise.resolve(evaluated);
const hrDiff = process.hrtime(hrStart);
const inspected = typeof resolved === 'string' ? resolved : inspect(resolved, { depth: 0, showHidden: true });
const cleanEvaluated = value(this.clean(inspected));
const executedIn = `Executed in ${hrDiff[0] > 0 ? `${hrDiff[0]}s ` : ''}${hrDiff[1] / 1000000}ms`;
return `${isPromise(evaluated) ? 'Promise ' : ''}Result (${executedIn}): ${cleanEvaluated}`;
};
try {
const hrStart = process.hrtime();
const evaluated = eval(toEval);
res = await cleanResult(evaluated, hrStart);
} catch (err) {
if (['await is only valid in async function', 'await is not defined'].includes(err.message)) {
try {
const hrStart = process.hrtime();
if (toEval.trim().split('\n').length === 1) {
res = await cleanResult(eval(`(async () => ${toEval})()`), hrStart);
} else res = await cleanResult(eval(`(async () => {\n${toEval}\n})()`), hrStart);
} catch (er) {
res = `Error: ${value(this.clean(er))}`;
}
} else res = `Error: ${value(this.clean(err))}`;
} finally {
const msg = await send(res);
const permissions = channel.permissionsFor(guild.me);
if (permissions.has('ADD_REACTIONS') && permissions.has('MANAGE_MESSAGES')) {
await msg.react(emoji('CANCEL', { id: true }));
const filter = (r, u) => r.me && message.author.id === u.id;
const collector = await msg.createReactionCollector(filter, { errors: ['time'], max: 1, time: 30000 });
collector.on('collect', async () => {
if (!msg.deleted) await msg.delete().catch(() => null);
if (!message.deleted) await message.delete().catch(() => null);
});
collector.on('end', async () => {
if (!msg.deleted) await msg.reactions.removeAll().catch(() => null);
});
}
}
}
/**
* Cleans blank space from the eval response.
* @param {string} text The text to clean.
* @returns {string} The text cleaned.
* @private
*/
clean(text) {
const blankSpace = String.fromCharCode(8203);
return typeof text === 'string' ? text.replace(/`/g, `\`${blankSpace}`).replace(/@/g, `@${blankSpace}`) : text;
}
}
module.exports = Eval;
|
import os
import json
cached_results_folder = "/path/to/cached_results"
filter_file = lambda x: x # Placeholder for the filter_file function
def count_syscalls(fname: str) -> int:
used_syscalls = []
ffname = os.path.join(cached_results_folder, "syscalls_%s" % filter_file(fname))
try:
with open(ffname, "r") as ff:
used_syscalls = json.loads(ff.read())
return len(used_syscalls)
except (FileNotFoundError, json.JSONDecodeError):
return -1 |
def anagrams_count(word):
if len(word) <= 1:
return 1
else:
c = word[0]
rem_let = word[1:]
ret = anagrams_count(rem_let)
for i in range(len(rem_let)):
if c == rem_let[i]:
continue
rem_let_i = rem_let[:i] + rem_let[i+1:]
ret += anagrams_count(rem_let_i)
return ret |
let isLoading = false;
const setupSection = document.getElementById('setup-section');
const setupHeader = document.getElementById('setup-header');
const token = document.getElementById('token');
const url = document.getElementById('url');
const resultCanvas = document.getElementById('image-upload');
const resultPreview = document.getElementById('result-preview');
const imagePreview = document.getElementById('image-preview');
const uploadInput = document.getElementById('upload-input');
const sleep = (milliseconds) => {
return new Promise(resolve => setTimeout(resolve, milliseconds))
}
let imgFile = null;
let sasUrl = null;
const getToken = () => {
const locationURL = new URL(window.location.href);
const tokenParam = locationURL.searchParams.get('token');
const urlParam = locationURL.searchParams.get('url');
token.value = tokenParam || '';
url.value = urlParam || '';
};
setTimeout(() => {
getToken();
}, 500);
// ------------------------------------------------
//
// Show & hide setup section
//
// ------------------------------------------------
setupHeader.addEventListener('click', () => {
setupSection.classList.toggle('hidden');
setupSection.classList.toggle('up');
});
// ------------------------------------------------
//
// Upload img & Get results actions
//
// ------------------------------------------------
const uploadImgButton = document.getElementById('upload-img-btn');
const getResultsButton = document.getElementById('get-results-btn');
uploadImgButton.addEventListener('click', (e) => {
e.preventDefault();
uploadInput.click();
});
getResultsButton.addEventListener('click', (e) => {
e.preventDefault();
if (!imgFile) {
return;
}
uploadImage(resultCanvas, imgFile.name);
});
// ------------------------------------------------
//
// Drag & drop files
//
// ------------------------------------------------
function handleDroppedFile(e) {
e.preventDefault();
if (!isLoading) {
saveFormdata();
let files = extractFiles(e.dataTransfer);
imgFile = null;
if (files.length > 0) {
imgFile = files[0];
updateImage(imgFile, () => {
getResultsButton.disabled = false;
});
imagePreview.classList.add('uploaded');
getResultsButton.disabled = true;
}
}
cleanupDragData(e.dataTransfer);
return false;
}
function handleFileUpload(e) {
console.log('reading file')
let reader = new FileReader();
let file = this.files[0];
if (!file || Object.is(file, imgFile)) {
return;
}
let timeout = setTimeout(() => {
alert('FileReader not functioning');
}, 500);
imgFile = null;
reader.onload = () => {
clearTimeout(timeout);
imgFile = file;
updateImage(file, () => {
getResultsButton.disabled = false;
});
imagePreview.classList.add('uploaded');
getResultsButton.disabled = true;
};
reader.readAsDataURL(file);
}
function handleDragEnter(e) {
e.preventDefault();
return true;
}
const dropIcon = document.getElementById('drop-icon');
function handleDragOver(e) {
e.preventDefault();
if (!isLoading) {
dropIcon.classList.add('uploaded');
}
return true;
}
function handleDragLeave(e) {
e.preventDefault();
return true;
}
function extractFiles(dataTransfer) {
let files = [], i;
if (dataTransfer.items) {
for (i = 0; i < dataTransfer.items.length; i++) {
if (dataTransfer.items[i].kind === 'file') {
let file = dataTransfer.items[i].getAsFile();
files.push(file);
}
}
} else {
for (i = 0; i < dataTransfer.files.length; i++) {
files.push(dataTransfer.files[i]);
}
}
return files;
}
function cleanupDragData(dataTransfer) {
if (dataTransfer.items) {
dataTransfer.items.clear();
} else {
dataTransfer.clearData();
}
}
// ------------------------------------------------
//
// Layout actions
//
// ------------------------------------------------
const resetImage = () => {
if (imgFile) {
updateImage(imgFile, () => {
getResultsButton.disabled = false;
});
}
}
// ------------------------------------------------
//
// Persistence & session
//
// ------------------------------------------------
const imgParamName = document.getElementById('image-param-name');
const imgWidth = document.getElementById('input-width');
const imgHeight = document.getElementById('input-height');
function saveFormdata() {
localStorage.setItem('token', token.value);
localStorage.setItem('url', url.value);
localStorage.setItem('image-param-name', imgParamName.value);
localStorage.setItem('input-width', imgWidth.value);
localStorage.setItem('input-height', imgHeight.value);
}
function restoreFormdata() {
token.value = localStorage.getItem('token') || '';
url.value = localStorage.getItem('url') || '';
imgParamName.value = localStorage.getItem('image-param-name') || '';
imgWidth.value = localStorage.getItem('input-width') || '';
imgHeight.value = localStorage.getItem('input-height') || '';
}
// ------------------------------------------------
//
// View & UI
//
// ------------------------------------------------
function renderImage(ctx, img, w, h) {
let cw = ctx.canvas.width;
let ch = ctx.canvas.height;
ctx.clearRect(0, 0, cw, ch);
ctx.fillStyle = 'rgb(0, 0, 0)';
ctx.fillRect(0, 0, cw, ch);
ctx.save();
ctx.drawImage(img, (cw - w) * 0.5, (ch - h) * 0.5, w, h);
ctx.restore();
}
function updateImage(file, callback) {
let canvasBig = document.getElementById('image-big');
let canvasUpload = resultCanvas;
let ctxBig = canvasBig.getContext('2d');
let ctxUpload = canvasUpload.getContext('2d');
let img = new Image;
let imageWidth = imgWidth.value;
let imageHeight = imgHeight.value;
canvasUpload.setAttribute('width', imageWidth);
canvasUpload.setAttribute('height', imageHeight);
img.src = URL.createObjectURL(file);
img.onload = () => {
let bigScale = Math.min(img.width, img.height);
bigScale = Math.min(320 / bigScale, 1);
let w = img.width * bigScale;
let h = img.height * bigScale;
renderImage(ctxBig, img, w, h);
renderImage(ctxUpload, img, imageWidth, imageHeight);
callback();
}
}
function printStatus(message, isResult) {
resultPreview.classList.toggle('result', isResult);
document.getElementById('result').textContent = message;
}
// ------------------------------------------------
//
// API interaction
//
// ------------------------------------------------
function uploadImage(canvasElement, filename) {
let config = {
params : {
'id' : filename,
'pToken': <PASSWORD>.value,
'pUrl': url.value,
'pInputName' : imgParamName.value,
'pResizeDimensions' : {
'imgWidth' : imgWidth.value,
'imgHeigth' : imgHeight.value
}
}
}
axios.post('https://img-classifier-python.azurewebsites.net/api/UploadImage', JSON.stringify(config))
.then((res) => {
/* Store returned SAS URL to upload image to */
sasUrl = res.data.url
}).catch(error => {
printStatus('Bad robot!');
console.log(error)
});
console.log(imgFile)
_uploadBlob(sasUrl, imgFile)
_getResults(imgFile.name)
}
function _getResults(id){
resultsUrl = 'https://img-classifier-python.azurewebsites.net/api/GetResults?id=' + id
printStatus('querying...')
axios.get(resultsUrl)
.then((res) => {
if (res.data.prediction === ""){
printStatus('...')
sleep(5000).then(() => {
_getResults(id)
console.log('retrying')
printStatus('querying...')
})
} else {
console.log(res.data.prediction);
printStatus(res.data.prediction);
}
}).catch(error => {
printStatus('Bad robot!');
console.log(error)
});
}
function _uploadBlob(sasUrlp, file) {
const config = {
headers: {
'Content-Type': 'application/octet-stream',
'x-ms-version': '2017-04-17',
'x-ms-blob-type': 'BlockBlob',
'x-ms-blob-content-type': file.type
}
}
return axios.put(sasUrlp, file, config)
.then(function (res) {
console.log('Image uploaded!');
})
.catch(function (err) {
printStatus('Bad robot!');
console.log(err);
});
}
// ------------------------------------------------
//
// Main & startup
//
// ------------------------------------------------
window.addEventListener('drop', handleDroppedFile, false);
window.addEventListener('dragenter', handleDragEnter, false);
window.addEventListener('dragover', handleDragOver, false);
window.addEventListener('dragenter', handleDragLeave, false);
uploadInput.addEventListener('change', handleFileUpload, false);
restoreFormdata(); |
import { Toast } from 'quasar';
import PROVIDERS from './providers';
const API = 'http://egust.000webhostapp.com/currency_api/get_rates.php?api_version=2.0&provider=';
export default {
commitAction({ commit }, { action, payload = {} }) {
// console.log('commitAction', {action, payload})
commit(action, payload);
},
updateSourceRates({ commit, state }, src = null) {
if (src) {
commit('setSource', { src });
}
const source = src || state.source;
const cached = state.cache[source] || {};
const provider = PROVIDERS[source];
if (cached.timeout && Date.now() < cached.timeout) {
Toast.create({
html: `Fetched from cache: <b>${provider.title}</b>`,
image: provider.icon,
bgColor: '#ffa000',
});
return;
}
fetch(API + source)
.then(response => response.json())
.then((json) => {
json.timeout = (json.updated_at + provider.timeout) * 1000;
json.fetched = Date.now();
commit('updateCache', json);
Toast.create({
html: `Rates from <b>${provider.title}</b> have been updated`,
image: provider.icon,
bgColor: '#29b6f6',
});
});
},
};
|
def short_words(mylist):
short_words = []
for word in mylist:
if len(word) < 5:
short_words.append(word)
return short_words
print(short_words(mylist)) |
/*
* entry
* @Author: JiangBao
* @Date: 2018-08-22 17:05:10
* @Last Modified by: JiangBao
* @Last Modified time: 2018-08-24 15:47:47
*/
import React from 'react';
import ReactDOM from 'react-dom';
import './index.css';
import Notification from './notification';
/* eslint-disable */
let notification;
const DURATION = 1500;
/**
* create notification dom
*/
function createNotification() {
const div = document.createElement('div');
document.body.appendChild(div);
const notification = ReactDOM.render(<Notification />, div);
return {
addNotice(notice) {
return notification.addNotice(notice);
},
destroy() {
ReactDOM.unmountComponentAtNode(div);
document.body.removeChild(div);
},
};
}
const Toast = (content, duration = DURATION) => {
if (!notification) {
notification = createNotification();
}
return notification.addNotice({ content, duration });
};
export default Toast;
|
counter=0
until [ $counter -gt 5 ]
do
echo Counter: $counter
counter=$((counter + 1))
# ((counter++))
done
|
package com.example.expedia.fragment;
import android.app.Activity;
import android.os.Bundle;
import android.support.annotation.NonNull;
import android.support.v4.app.Fragment;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.Button;
import android.widget.EditText;
import android.widget.ImageView;
import android.widget.TextView;
import android.widget.Toast;
import com.example.expedia.R;
import com.example.expedia.datamanager.HttpConnection;
import com.example.expedia.datamanager.LoginCallback;
import com.google.android.gms.tasks.OnCompleteListener;
import com.google.android.gms.tasks.Task;
import com.google.firebase.iid.FirebaseInstanceId;
import com.google.firebase.iid.InstanceIdResult;
import com.google.gson.Gson;
import com.google.gson.JsonObject;
import java.util.Objects;
/**
* A simple {@link Fragment} subclass.
*/
public class LogInFragment extends Fragment {
private String email, password, json, fcmToken;
private Activity activity;
private EditText etEmail, etPassword;
private LoginCallback callback;
private HttpConnection httpConnection;
public LogInFragment() {
// Required empty public constructor
}
@Override
public View onCreateView(@NonNull LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
View view = inflater.inflate(R.layout.fragment_log_in, container, false);
FirebaseInstanceId.getInstance().getInstanceId()
.addOnCompleteListener(new OnCompleteListener<InstanceIdResult>() {
@Override
public void onComplete(@NonNull Task<InstanceIdResult> task) {
if (!task.isSuccessful()) {
Log.w("TAG", "getInstanceId failed", task.getException());
return;
}
// Get new Instance ID token
fcmToken = Objects.requireNonNull(task.getResult()).getToken();
// Log and toast
String msg = getString(R.string.msg_token_fmt, fcmToken);
Log.e("FirebaseId", msg);
}
});
activity = getActivity();
ImageView ivLogin_facebook = view.findViewById(R.id.login_facebook_btn);
ivLogin_facebook.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Toast.makeText(activity, "페이스북 로그인", Toast.LENGTH_SHORT).show();
}
});
ImageView ivLogin_google = view.findViewById(R.id.login_google_btn);
ivLogin_google.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Toast.makeText(activity, "구글 로그인", Toast.LENGTH_SHORT).show();
}
});
etEmail = view.findViewById(R.id.login_email);
etPassword = view.findViewById(R.id.login_password);
Button BtnLog_in = view.findViewById(R.id.login_btn);
BtnLog_in.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
email = etEmail.getText().toString();
password = etPassword.getText().toString();
Gson body = new Gson();
JsonObject object = new JsonObject();
object.addProperty("Email", email);
object.addProperty("Pw", password);
object.addProperty("FCM",fcmToken);
json = body.toJson(object);
Log.e("input",json);
sendData();
}
});
TextView tvForgotPassword = view.findViewById(R.id.find_password);
tvForgotPassword.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Toast.makeText(activity, "비밀번호 찾기", Toast.LENGTH_SHORT).show();
}
});
return view;
}
private void sendData() {
httpConnection = new HttpConnection(getContext(), "login", "post", json);
callback = new LoginCallback(activity);
new Thread() {
public void run() {
httpConnection.requestWebServer(callback);
}
}.start();
}
}
|
#!/usr/bin/bash
#SBATCH --ntasks 12 --nodes 1 --mem 96G --time 2:00:00 -p short --out logs/run_genemark.log
module load parallel
module load genemarkESET
if [ $SLURM_CPUS_ON_NODE ]; then
CPU=$SLURM_CPUS_ON_NODE
fi
CPUS=$SLURM_CPUS_ON_NODE
TMPDIR=genemark_run
OUTFILE=genemark.gtf
if [[ -f "config.txt" ]]; then
source config.txt
else
echo "Need a config file"
exit
fi
if [[ -z $MASKED || ! -f $MASKED ]]; then
echo "NEED TO EDIT CONFIG FILE TO SPECIFY THE INPUT GENOME AS VARIABLE: MASKED=GENOMEFILEFFASTA"
exit
fi
if [[ -z $GENEMARK || ! -f $GENEMARK ]]; then
echo "need to edit config and provide GENEMARK variable to point to gmes.mod file"
exit
fi
if [ ! -d $TMPDIR ]; then
mkdir -p $TMPDIR
pushd $TMPDIR
bp_seqretsplit.pl ../$MASKED
popd
fi
cmdfile=genemark.jobs
if [ ! -f $cmdfile ]; then
for file in $TMPDIR/*.fa;
do
scaf=$(basename $file .fa)
GTF="$TMPDIR/$scaf.gtf"
if [ ! -f $GTF ]; then
#reformat_fasta.pl --up --soft_mask --native --in $file --out $TMPDIR/$scaf.masked
probuild --reformat_fasta --mask_soft 100 --up --allow_x --letters_per_line 60 --in $file --out $TMPDIR/$scaf.masked
#unlink $file
perl -i -p -e "s/^>(\S+)/>$scaf/" $TMPDIR/$scaf.masked
echo "gmhmme3 -s $scaf -f gtf -m $GENEMARK -o $GTF $TMPDIR/$scaf.masked"
fi
done > $cmdfile
parallel -j$CPUS -a $cmdfile
perl -p -e 'if( ! /^#/ ) { my @row = split(/\t/,$_); my $scaf = $row[0]; $row[-1] =~ s/gene_id\s+"([^"]+)"; transcript_id\s+"([^"]+)"/gene_id "$scaf.$1"; transcript_id "$scaf.$2"/; $_ = join("\t",@row)}' $TMPDIR/*.gtf > genemark.gtf
fi
|
import argparse
import json
import os
import pickle
from pathlib import Path
from random import shuffle
from typing import Tuple, List
from itertools import chain
from tqdm import tqdm
def shop_user_lists(path):
shop_images_list = []
user_images_list = []
print('make shop- and user-list:')
for file in tqdm(os.listdir(path)):
with open(os.path.join(path, file), mode='r') as json_file:
anno_file = json.load(json_file)
if anno_file.get('source') == 'shop':
shop_images_list.append(Path(file).stem)
if anno_file.get('source') == 'user':
user_images_list.append(Path(file).stem)
return shop_images_list, user_images_list
def create_category_dict(coco_file: dict) -> dict:
category_list = []
for i in tqdm(coco_file.get('annotations')):
category_list.append((i.get('image_id'), i.get('category_id')))
category_dict = {}
for i in tqdm(category_list):
category_dict.setdefault(i[0], []).append(i[1])
return category_dict
def create_all_positive_pairs(coco_file, shop, user):
positive_pairs = []
user_set = set(user)
shop_set = set(shop)
print('creating all positive pairs:')
for i in tqdm(coco_file.get('annotations')):
if i.get('image_id') in user_set:
if i.get('style') != 0:
for j in coco_file.get('annotations'):
if j.get('image_id') in shop_set:
if i.get('style') == j.get('style'):
if i.get('pair_id') == j.get('pair_id'):
if i.get('category_id') == j.get('category_id'):
positive_pairs.append((i.get('image_id'), j.get('image_id'), i.get('category_id')))
return list(set(positive_pairs))
def create_negative_pairs(coco_file, shop, user, positive_pairs):
negative_pairs = []
user_set = set(user)
shop_set = set(shop)
print('creating all negative pairs:')
for i in tqdm(coco_file.get('annotations')):
if len(negative_pairs) >= len(positive_pairs):
break
else:
if i.get('image_id') in user_set:
for j in coco_file.get('annotations'):
if j.get('image_id') in shop_set:
if i.get('style') != j.get('style'):
if i.get('pair_id') == j.get('pair_id'):
if i.get('category_id') == j.get('category_id'):
negative_pairs.append((i.get('image_id'), j.get('image_id'), None))
return list(set(negative_pairs))
def load_pairs(pairs_pkl: str) -> List[Tuple[str, str]]:
with open(pairs_pkl, 'rb') as f:
pairs = pickle.load(f)
return pairs
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s',
'--set',
help='choose between training and validation',
choices=['train', 'validation', 'test'],
nargs='?',
default='validation')
args = parser.parse_args()
file = os.path.join('data', 'processed', 'deepfashion2_coco_' + args.set + '.json')
with open(file, mode='r') as json_file:
coco_file = json.load(json_file)
shop_pkl = Path(os.path.join('data', 'processed', args.set + '_shop_list.pkl'))
user_pkl = Path(os.path.join('data', 'processed', args.set + '_user_list.pkl'))
if shop_pkl.is_file() and user_pkl.is_file():
with open(shop_pkl, 'rb') as f:
shop = pickle.load(f)
with open(user_pkl, 'rb') as f:
user = pickle.load(f)
else:
shop, user = shop_user_lists(os.path.join('data', 'raw', args.set, 'annos'))
with open(shop_pkl, 'wb') as f:
pickle.dump(shop, f)
with open(user_pkl, 'wb') as f:
pickle.dump(user, f)
positive_pkl = Path(os.path.join('data', 'processed', args.set + '_positive_pairs.pkl'))
if positive_pkl.is_file():
with open(positive_pkl, 'rb') as f:
positive_pairs = pickle.load(f)
else:
positive_pairs = create_all_positive_pairs(coco_file, shop, user)
with open(positive_pkl, 'wb') as f:
pickle.dump(positive_pairs, f)
# negative_pairs = create_negative_pairs(coco_file, shop, user, positive_pairs)
# with open(os.path.join('data', 'processed', args.set + '_negative_pairs.pkl'), 'wb') as f:
# pickle.dump(negative_pairs, f)
negative_pkl = Path(os.path.join('data', 'processed', args.set + '_negative_pairs.pkl'))
if negative_pkl.is_file():
with open(negative_pkl, 'rb') as f:
negative_pairs = pickle.load(f)
else:
negative_pairs = create_negative_pairs(coco_file, shop, user, positive_pairs)
with open(negative_pkl, 'wb') as f:
pickle.dump(negative_pairs, f)
shuffle(positive_pairs)
shuffle(negative_pairs)
training_pairs = list(chain(*zip(positive_pairs, negative_pairs)))
with open(os.path.join('data', 'processed', args.set + '_pairs.pkl'), 'wb') as f:
pickle.dump(training_pairs, f)
print('length pos pairs: ', len(positive_pairs))
print(positive_pairs[:10])
print('length neg pairs: ', len(negative_pairs))
print(negative_pairs[:10])
print('length pairs: ', len(training_pairs))
print(training_pairs[:10])
if __name__ == '__main__':
main()
|
var _;
_ = WeakSet.length;
_ = WeakSet.name;
_ = WeakSet.prototype;
|
package com.donfyy.crowds.dagger;
import com.donfyy.crowds.L2Fragment;
import dagger.Module;
import dagger.android.ContributesAndroidInjector;
@Module
public abstract class L2FragmentModule {
@ContributesAndroidInjector
abstract L2Fragment L2Fragment();
}
|
<filename>spec/spec_helper.rb
require 'sqlite3'
require 'active_record'
require 'i18n'
require 'active_support/core_ext'
require 'byebug'
require 'simple_slug'
I18n.enforce_available_locales = false
I18n.default_locale = :uk
ActiveRecord::Base.establish_connection(
adapter: 'sqlite3',
database: ':memory:'
)
# ActiveRecord::Base.logger = Logger.new(STDOUT)
RSpec.configure do |config|
config.before(:suite) do
ActiveRecord::Migration.verbose = false
ActiveRecord::Schema.define do
create_table :rspec_active_record_bases, force: true do |t|
t.string :name
t.string :slug, limit: 191
t.string :slug_en, limit: 191
t.timestamps
end
create_table :simple_slug_history_slugs, force: true do |t|
t.string :slug, null: false, limit: 191
t.string :locale, limit: 10
t.integer :sluggable_id, null: false
t.string :sluggable_type, limit: 50, null: false
t.timestamps
end
end
end
end
class RspecActiveRecordBase < ActiveRecord::Base
include SimpleSlug::ModelAddition
end |
var Adapter,
__hasProp = {}.hasOwnProperty,
__slice = [].slice;
Adapter = (function() {
var dataValues, lastDataId;
function Adapter() {}
Adapter.prototype.name = "native";
Adapter.prototype.domReady = function(callback) {
var add, doc, done, init, poll, pre, rem, root, top, win, _ref;
done = false;
top = true;
win = window;
doc = document;
if ((_ref = doc.readyState) === "complete" || _ref === "loaded") {
return callback();
}
root = doc.documentElement;
add = (doc.addEventListener ? "addEventListener" : "attachEvent");
rem = (doc.addEventListener ? "removeEventListener" : "detachEvent");
pre = (doc.addEventListener ? "" : "on");
init = function(e) {
if (e.type === "readystatechange" && doc.readyState !== "complete") {
return;
}
(e.type === "load" ? win : doc)[rem](pre + e.type, init, false);
if (!done) {
done = true;
return callback();
}
};
poll = function() {
var e;
try {
root.doScroll("left");
} catch (_error) {
e = _error;
setTimeout(poll, 50);
return;
}
return init("poll");
};
if (doc.readyState !== "complete") {
if (doc.createEventObject && root.doScroll) {
try {
top = !win.frameElement;
} catch (_error) {}
if (top) {
poll();
}
}
doc[add](pre + "DOMContentLoaded", init, false);
doc[add](pre + "readystatechange", init, false);
return win[add](pre + "load", init, false);
}
};
Adapter.prototype.create = function(htmlString) {
var div;
div = document.createElement("div");
div.innerHTML = htmlString;
return this.wrap(div.childNodes);
};
Adapter.prototype.wrap = function(element) {
var el;
if (!element) {
element = [];
} else if (typeof element === "string") {
element = this.find(document.body, element);
element = element ? [element] : [];
} else if (element instanceof NodeList) {
element = (function() {
var _i, _len, _results;
_results = [];
for (_i = 0, _len = element.length; _i < _len; _i++) {
el = element[_i];
_results.push(el);
}
return _results;
})();
} else if (!(element instanceof Array)) {
element = [element];
}
return element;
};
Adapter.prototype.unwrap = function(element) {
return this.wrap(element)[0];
};
Adapter.prototype.tagName = function(element) {
return this.unwrap(element).tagName;
};
Adapter.prototype.attr = function(element, attr, value) {
if (arguments.length === 3) {
return this.unwrap(element).setAttribute(attr, value);
} else {
return this.unwrap(element).getAttribute(attr);
}
};
lastDataId = 0;
dataValues = {};
Adapter.prototype.data = function(element, name, value) {
var dataId;
dataId = this.attr(element, "data-id");
if (!dataId) {
dataId = ++lastDataId;
this.attr(element, "data-id", dataId);
dataValues[dataId] = {};
}
if (arguments.length === 3) {
return dataValues[dataId][name] = value;
} else {
value = dataValues[dataId][name];
if (value != null) {
return value;
}
value = this.attr(element, "data-" + (Opentip.prototype.dasherize(name)));
if (value) {
dataValues[dataId][name] = value;
}
return value;
}
};
Adapter.prototype.find = function(element, selector) {
return this.unwrap(element).querySelector(selector);
};
Adapter.prototype.findAll = function(element, selector) {
return this.unwrap(element).querySelectorAll(selector);
};
Adapter.prototype.update = function(element, content, escape) {
element = this.unwrap(element);
if (escape) {
element.innerHTML = "";
return element.appendChild(document.createTextNode(content));
} else {
return element.innerHTML = content;
}
};
Adapter.prototype.append = function(element, child) {
var unwrappedChild, unwrappedElement;
unwrappedChild = this.unwrap(child);
unwrappedElement = this.unwrap(element);
return unwrappedElement.appendChild(unwrappedChild);
};
Adapter.prototype.remove = function(element) {
var parentNode;
element = this.unwrap(element);
parentNode = element.parentNode;
if (parentNode != null) {
return parentNode.removeChild(element);
}
};
Adapter.prototype.addClass = function(element, className) {
return this.unwrap(element).classList.add(className);
};
Adapter.prototype.removeClass = function(element, className) {
return this.unwrap(element).classList.remove(className);
};
Adapter.prototype.css = function(element, properties) {
var key, value, _results;
element = this.unwrap(this.wrap(element));
_results = [];
for (key in properties) {
if (!__hasProp.call(properties, key)) continue;
value = properties[key];
_results.push(element.style[key] = value);
}
return _results;
};
Adapter.prototype.dimensions = function(element) {
var dimensions, revert;
element = this.unwrap(this.wrap(element));
dimensions = {
width: element.offsetWidth,
height: element.offsetHeight
};
if (!(dimensions.width && dimensions.height)) {
revert = {
position: element.style.position || '',
visibility: element.style.visibility || '',
display: element.style.display || ''
};
this.css(element, {
position: "absolute",
visibility: "hidden",
display: "block"
});
dimensions = {
width: element.offsetWidth,
height: element.offsetHeight
};
this.css(element, revert);
}
return dimensions;
};
Adapter.prototype.scrollOffset = function() {
return [window.pageXOffset || document.documentElement.scrollLeft || document.body.scrollLeft, window.pageYOffset || document.documentElement.scrollTop || document.body.scrollTop];
};
Adapter.prototype.viewportDimensions = function() {
return {
width: document.documentElement.clientWidth,
height: document.documentElement.clientHeight
};
};
Adapter.prototype.mousePosition = function(e) {
var pos;
pos = {
x: 0,
y: 0
};
if (e == null) {
e = window.event;
}
if (e == null) {
return;
}
try {
if (e.pageX || e.pageY) {
pos.x = e.pageX;
pos.y = e.pageY;
} else if (e.clientX || e.clientY) {
pos.x = e.clientX + document.body.scrollLeft + document.documentElement.scrollLeft;
pos.y = e.clientY + document.body.scrollTop + document.documentElement.scrollTop;
}
} catch (_error) {
e = _error;
}
return pos;
};
Adapter.prototype.offset = function(element) {
var offset;
element = this.unwrap(element);
offset = {
top: element.offsetTop,
left: element.offsetLeft
};
while (element = element.offsetParent) {
offset.top += element.offsetTop;
offset.left += element.offsetLeft;
if (element !== document.body) {
offset.top -= element.scrollTop;
offset.left -= element.scrollLeft;
}
}
return offset;
};
Adapter.prototype.observe = function(element, eventName, observer) {
return this.unwrap(element).addEventListener(eventName, observer, false);
};
Adapter.prototype.stopObserving = function(element, eventName, observer) {
return this.unwrap(element).removeEventListener(eventName, observer, false);
};
Adapter.prototype.ajax = function(options) {
var e, request, _ref, _ref1;
if (options.url == null) {
throw new Error("No url provided");
}
if (window.XMLHttpRequest) {
request = new XMLHttpRequest;
} else if (window.ActiveXObject) {
try {
request = new ActiveXObject("Msxml2.XMLHTTP");
} catch (_error) {
e = _error;
try {
request = new ActiveXObject("Microsoft.XMLHTTP");
} catch (_error) {
e = _error;
}
}
}
if (!request) {
throw new Error("Can't create XMLHttpRequest");
}
request.onreadystatechange = function() {
if (request.readyState === 4) {
try {
if (request.status === 200) {
if (typeof options.onSuccess === "function") {
options.onSuccess(request.responseText);
}
} else {
if (typeof options.onError === "function") {
options.onError("Server responded with status " + request.status);
}
}
} catch (_error) {
e = _error;
if (typeof options.onError === "function") {
options.onError(e.message);
}
}
return typeof options.onComplete === "function" ? options.onComplete() : void 0;
}
};
request.open((_ref = (_ref1 = options.method) != null ? _ref1.toUpperCase() : void 0) != null ? _ref : "GET", options.url);
return request.send();
};
Adapter.prototype.clone = function(object) {
var key, newObject, val;
newObject = {};
for (key in object) {
if (!__hasProp.call(object, key)) continue;
val = object[key];
newObject[key] = val;
}
return newObject;
};
Adapter.prototype.extend = function() {
var key, source, sources, target, val, _i, _len;
target = arguments[0], sources = 2 <= arguments.length ? __slice.call(arguments, 1) : [];
for (_i = 0, _len = sources.length; _i < _len; _i++) {
source = sources[_i];
for (key in source) {
if (!__hasProp.call(source, key)) continue;
val = source[key];
target[key] = val;
}
}
return target;
};
return Adapter;
})();
Opentip.addAdapter(new Adapter);
|
#! /usr/bin/env python
# -*- coding:UTF-8 -*-
import pymysql
"""时间空间使用效率比较强"""
def fetchsome(cursor, arraysize=1000):
while True:
results = cursor.fetchmany(arraysize)
if not results:
break
for r in results:
yield r
if __name__ == '__main__':
c = pymysql.connect(user='root',password='<PASSWORD>',database='world')
cursor = c.cursor()
cursor.execute('SELECT * from City')
results = fetchsome(cursor)
for r in results:
print r
|
# Import necessary libraries
import numpy as np
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from keras.utils import to_categorical
from keras.datasets import mnist
# Load and preprocess the dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1).astype('float32') / 255.0
x_test = x_test.reshape(-1, 28, 28, 1).astype('float32') / 255.0
y_train = to_categorical(y_train, num_classes=10)
y_test = to_categorical(y_test, num_classes=10)
# Create the model
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25)) # Add Dropout layer
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(10, activation='softmax)) # Output layer with softmax activation
# Compile the model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Train the model
model.fit(x_train, y_train, batch_size=128, epochs=5, validation_data=(x_test, y_test))
# Evaluate the model
loss, accuracy = model.evaluate(x_test, y_test)
print(f'Test accuracy: {accuracy * 100:.2f}%') |
/*====================================================================================================*/
/*====================================================================================================*/
#include "stm32f4xx.h"
#include "stm32f4xx_spi.h"
#include "mpu6500.h"
#include "main.h"
#include "functions.h"
/*====================================================================================================*/
/*====================================================================================================*/
/*====================================================================================================*/
/*====================================================================================================*
**函數 : MPU9250_ReadReg
**功能 : 讀暫存器
**輸入 : ReadAddr
**輸出 : ReadData
**使用 : MPU9250_ReadReg(ReadAddr, &DeviceID);
**====================================================================================================*/
/*====================================================================================================*/
void MPU9250_ReadReg(SPI_TypeDef* SPIx, u8 ReadAddr, u8 *ReadData )
{
if(SPIx == SPI1)
{
GPIO_ResetBits(GPIOA,GPIO_Pin_15);
}
else if(SPIx == SPI4)
{
GPIO_ResetBits(GPIOE,GPIO_Pin_15);
}
SPIx_ReadWriteByte(SPIx, 0x80 | ReadAddr);
*ReadData = SPIx_ReadWriteByte(SPIx, 0xFF);
if(SPIx == SPI1)
{
GPIO_SetBits(GPIOA,GPIO_Pin_15);
}
else if(SPIx == SPI4)
{
GPIO_SetBits(GPIOE,GPIO_Pin_15);
}
}
/*====================================================================================================*/
/*====================================================================================================*
**函數 : MPU9250_WriteReg
**功能 : 寫暫存器
**輸入 : WriteAddr, WriteData
**輸出 : None
**使用 : MPU9250_WriteReg(WriteAddr, WriteData);
**====================================================================================================*/
/*====================================================================================================*/
void MPU9250_WriteReg(SPI_TypeDef* SPIx, u8 WriteAddr, u8 WriteData )
{
if(SPIx == SPI1)
{
GPIO_ResetBits(GPIOA,GPIO_Pin_15);
}
else if(SPIx == SPI4)
{
GPIO_ResetBits(GPIOE,GPIO_Pin_15);
}
SPIx_ReadWriteByte(SPIx, WriteAddr);
SPIx_ReadWriteByte(SPIx, WriteData);
if(SPIx == SPI1)
{
GPIO_SetBits(GPIOA,GPIO_Pin_15);
}
else if(SPIx == SPI4)
{
GPIO_SetBits(GPIOE,GPIO_Pin_15);
}
}
/*=====================================================================================================*/
/*=====================================================================================================*
**函數 : MPU9250_ReadRegs
**功能 : 連續讀暫存器
**輸入 : ReadAddr, *ReadBuf, Bytes
**輸出 : None
**使用 : MPU9250_ReadRegs(MPU6500_ACCEL_XOUT_H, ReadBuf, 14);
**=====================================================================================================*/
/*=====================================================================================================*/
void MPU9250_ReadRegs(SPI_TypeDef* SPIx, u8 ReadAddr, u8 *ReadBuf, u8 Bytes )
{
u8 i = 0;
if(SPIx == SPI1)
{
GPIO_ResetBits(GPIOA,GPIO_Pin_15);
}
else if(SPIx == SPI4)
{
GPIO_ResetBits(GPIOE,GPIO_Pin_15);
}
//Delay_us(1);
SPIx_ReadWriteByte(SPIx, 0x80 | ReadAddr);
for(i=0; i<Bytes; i++)
ReadBuf[i] = SPIx_ReadWriteByte(SPIx, 0xFF);
if(SPIx == SPI1)
{
GPIO_SetBits(GPIOA,GPIO_Pin_15);
}
else if(SPIx == SPI4)
{
GPIO_SetBits(GPIOE,GPIO_Pin_15);
}
}
/*====================================================================================================*/
/*====================================================================================================*
**函數 : MPU9250_Config
**功能 : 初始化 MPU9250
**輸入 : None
**輸出 : None
**使用 : MPU9250_Config();
**====================================================================================================*/
/*====================================================================================================*/
void MPU9250_Config(SPI_TypeDef* SPIx)
{
GPIO_InitTypeDef GPIO_InitStruct;
SPI_InitTypeDef SPI_InitStruct;
if(SPIx == SPI1)
{
/* SPI1 Clk Init ************************************************************/
RCC_AHB1PeriphClockCmd(RCC_AHB1Periph_GPIOA, ENABLE);
RCC_AHB1PeriphClockCmd(RCC_AHB1Periph_GPIOB, ENABLE);
RCC_APB2PeriphClockCmd(RCC_APB2Periph_SPI1, ENABLE);
/* SPI AF ******************************************************************/
GPIO_PinAFConfig(GPIOA, GPIO_PinSource5, GPIO_AF_SPI1);
GPIO_PinAFConfig(GPIOB, GPIO_PinSource4, GPIO_AF_SPI1);
GPIO_PinAFConfig(GPIOA, GPIO_PinSource7, GPIO_AF_SPI1);
/* CSM PA4 */
GPIO_InitStruct.GPIO_Pin = GPIO_Pin_15;
GPIO_InitStruct.GPIO_Mode = GPIO_Mode_OUT;
GPIO_InitStruct.GPIO_OType = GPIO_Mode_OUT;
GPIO_InitStruct.GPIO_PuPd = GPIO_PuPd_UP;
GPIO_InitStruct.GPIO_Speed = GPIO_Speed_50MHz;
GPIO_Init(GPIOA, &GPIO_InitStruct);
/* SCK PA5 PA6 PA7*/
GPIO_InitStruct.GPIO_Pin = GPIO_Pin_4;
GPIO_InitStruct.GPIO_Mode = GPIO_Mode_AF;
GPIO_InitStruct.GPIO_OType = GPIO_OType_PP;
GPIO_InitStruct.GPIO_PuPd = GPIO_PuPd_UP;
GPIO_InitStruct.GPIO_Speed = GPIO_Speed_50MHz;
GPIO_Init(GPIOB, &GPIO_InitStruct);
GPIO_InitStruct.GPIO_Pin = GPIO_Pin_5 | GPIO_Pin_7 ;
GPIO_InitStruct.GPIO_Mode = GPIO_Mode_AF;
GPIO_InitStruct.GPIO_OType = GPIO_OType_PP;
GPIO_InitStruct.GPIO_PuPd = GPIO_PuPd_UP;
GPIO_InitStruct.GPIO_Speed = GPIO_Speed_50MHz;
GPIO_Init(GPIOA, &GPIO_InitStruct);
}
else if(SPIx == SPI4)
{
/* SPI4 Clk Init ************************************************************/
RCC_AHB1PeriphClockCmd(RCC_AHB1Periph_GPIOE, ENABLE);
RCC_APB2PeriphClockCmd(RCC_APB2Periph_SPI4, ENABLE);
/* SPI AF ******************************************************************/
GPIO_PinAFConfig(GPIOE, GPIO_PinSource2, GPIO_AF_SPI4);
GPIO_PinAFConfig(GPIOE, GPIO_PinSource5, GPIO_AF_SPI4);
GPIO_PinAFConfig(GPIOE, GPIO_PinSource6, GPIO_AF_SPI4);
/* CSM PE4 */
GPIO_InitStruct.GPIO_Pin = GPIO_Pin_15;
GPIO_InitStruct.GPIO_Mode = GPIO_Mode_OUT;
GPIO_InitStruct.GPIO_OType = GPIO_Mode_OUT;
GPIO_InitStruct.GPIO_PuPd = GPIO_PuPd_UP;
GPIO_InitStruct.GPIO_Speed = GPIO_Speed_50MHz;
GPIO_Init(GPIOE, &GPIO_InitStruct);
/* SCK PE5 PE6 PE2*/
GPIO_InitStruct.GPIO_Pin = GPIO_Pin_5 | GPIO_Pin_6 | GPIO_Pin_2;
GPIO_InitStruct.GPIO_Mode = GPIO_Mode_AF;
GPIO_InitStruct.GPIO_OType = GPIO_OType_PP;
GPIO_InitStruct.GPIO_PuPd = GPIO_PuPd_UP;
GPIO_InitStruct.GPIO_Speed = GPIO_Speed_50MHz;
GPIO_Init(GPIOE, &GPIO_InitStruct);
}
/* SPI Init ****************************************************************/
SPI_InitStruct.SPI_Direction = SPI_Direction_2Lines_FullDuplex; // Full Duplex
SPI_InitStruct.SPI_Mode = SPI_Mode_Master; // Master Mode
SPI_InitStruct.SPI_DataSize = SPI_DataSize_8b; // Data Size 8 bit
SPI_InitStruct.SPI_CPOL = SPI_CPOL_High; // Transitioned On The Falling Edge
SPI_InitStruct.SPI_CPHA = SPI_CPHA_2Edge; // Latched On the Rising Edge
SPI_InitStruct.SPI_NSS = SPI_NSS_Soft; // Software NSS Signal
SPI_InitStruct.SPI_BaudRatePrescaler = SPI_BaudRatePrescaler_8; // fsck = APB2 90MHz / 2 = 45MHz
SPI_InitStruct.SPI_FirstBit = SPI_FirstBit_MSB; // MSB First
SPI_InitStruct.SPI_CRCPolynomial = 7;
SPI_Init(SPIx, &SPI_InitStruct);
SPI_Cmd(SPIx, ENABLE);
}
/*====================================================================================================*/
/*====================================================================================================*
**函數 : MPU9250_Init
**功能 : 初始化 MPU9250
**輸入 : None
**輸出 : None
**使用 : MPU9250_Init();
**====================================================================================================*/
/*====================================================================================================*/
#define MPU9250_InitRegNum 10
void MPU9250_Init(SPI_TypeDef* SPIx)
{
u8 i = 0;
u8 MPU6500_Init_Data[MPU9250_InitRegNum][2] = {
{0x80, MPU6500_PWR_MGMT_1}, // Reset Device
{0x01, MPU6500_PWR_MGMT_1}, // Clock Source
{0x00, MPU6500_PWR_MGMT_2}, // Enable Acc & Gyro
{0x07, MPU6500_CONFIG}, //
{0x18, MPU6500_GYRO_CONFIG}, // +-2000dps
{0x18, MPU6500_ACCEL_CONFIG}, // +-16G
{0x08, MPU6500_ACCEL_CONFIG_2}, // Set Acc Data Rates
{0x30, MPU6500_INT_PIN_CFG}, //
{0x40, MPU6500_I2C_MST_CTRL}, // I2C Speed 348 kHz
{0x00, MPU6500_USER_CTRL}, // Enable AUX
// Set Slave to Read AK8963
//{0x8C, MPU6500_I2C_SLV0_ADDR}, // AK8963_I2C_ADDR ( 7'b000_1100 )
//{0x00, MPU6500_I2C_SLV0_REG}, // AK8963_WIA ( 0x00 )
//{0x81, MPU6500_I2C_SLV0_CTRL}, // Enable
//{0x01, MPU6500_I2C_MST_DELAY_CTRL}
};
for(i=0; i<MPU9250_InitRegNum; i++) {
MPU9250_WriteReg(SPIx,MPU6500_Init_Data[i][1], MPU6500_Init_Data[i][0]);
Delayms(1);
}
}
/*====================================================================================================*/
/*====================================================================================================*
**函數 : MPU9250_Check
**功能 : MPU9250 Check
**輸入 : None
**輸出 : Status
**使用 : Status = MPU9250_Check();
**====================================================================================================*/
/*====================================================================================================*/
u8 MPU9250_Check(SPI_TypeDef* SPIx)
{
u8 DeviceID = 0x00;
/* MPU6500 */
DeviceID = 0x00;
//printf("%d\r\n",DeviceID);
MPU9250_ReadReg(SPIx,MPU6500_WHO_AM_I, &DeviceID);
printf("%d\r\n",DeviceID);
if(DeviceID != MPU6500_Device_ID)
return ERROR;
/* AK8975 */
/* DeviceID = 0x00;
MPU9250_WriteReg(MPU6500_I2C_SLV0_ADDR, 0x8C); // Set AK8963_I2C_ADDR = 7'b000_1100
Delay_1us(10);
MPU9250_WriteReg(MPU6500_I2C_SLV0_REG, AK8963_WIA); // Set Write Reg
MPU9250_WriteReg(MPU6500_I2C_SLV0_CTRL, 0x81); // Start Read
Delay_1ms(1);
MPU9250_ReadReg(MPU6500_EXT_SENS_DATA_00, &DeviceID); // Read Data
if(DeviceID != AK8963_Device_ID)
return ERROR;*/
return SUCCESS;
}
/*====================================================================================================*/
/*====================================================================================================*
**函數 : MPU9250_Read
**功能 : 讀取感測器資料
**輸入 : *ReadBuf
**輸出 : None
**使用 : MPU9250_Read(ReadBuf);
**====================================================================================================*/
/*====================================================================================================*/
void MPU9250_Read(SPI_TypeDef* SPIx, u8 *ReadBuf)
{
MPU9250_ReadRegs(SPIx, MPU6500_ACCEL_XOUT_H, ReadBuf, 14);
}
/*====================================================================================================*/
/*====================================================================================================*/
u8 SPIx_ReadWriteByte(SPI_TypeDef* SPIx, u8 TxData)
{
u8 retry=0;
while (SPI_I2S_GetFlagStatus(SPIx, SPI_I2S_FLAG_TXE) == RESET)
{
retry++;
if(retry>200)return 0;
}
SPI_I2S_SendData(SPIx, TxData);
retry=0;
while (SPI_I2S_GetFlagStatus(SPIx, SPI_I2S_FLAG_RXNE) == RESET);
{
retry++;
if(retry>200)return 0;
}
return SPI_I2S_ReceiveData(SPIx);
}
|
export { default, lte } from 'ember-handlebars-conditions/helpers/lte';
|
<gh_stars>10-100
interface GenericObject<T> {
[Key: string]: T;
}
export default GenericObject; |
export const teams = [
{
_id: "1",
companyId: "1",
members: ["2", "3", "7", "8"],
teamName: "Team 1",
leader: "2",
quote: "The best of the best",
teamPic:
"https://cdn.vox-cdn.com/thumbor/YagQ2QhkHIkJyjsiVZfgGpJlAYw=/1400x1400/filters:format(jpeg)/cdn.vox-cdn.com/uploads/chorus_asset/file/18288482/chrome_2019_07_08_14_17_05.jpg",
tasks: ["1", "2"],
},
{
_id: "2",
companyId: "2",
members: ["5", "9"],
teamName: "Team 2",
leader: "5",
quote: "The worst of the best",
teamPic:
"https://www.thechronicleherald.ca/media/photologue/photos/cache/freelance-atlantic-skies-the-sun-is-currently-in-a-stage-of-solar-mini_rs3wJ07_large.jpg",
tasks: ["3"],
},
{
_id: "3",
companyId: "2",
members: ["6", "10"],
teamName: "Team 3",
leader: "6",
quote: "The best of the worst",
teamPic:
"https://upload.wikimedia.org/wikipedia/commons/e/e1/FullMoon2010.jpg",
tasks: [],
},
];
export function getTeams() {
return teams;
}
export function getTeamById(id) {
return teams.find((t) => t._id === id);
}
export function getTeamsByCompanyId(id) {
return teams.filter((team) => team.companyId === id);
}
export function deleteTeam(id) {
let TeamInDb = teams.find((m) => m._id === id);
teams.splice(teams.indexOf(TeamInDb), 1);
return TeamInDb;
}
export function saveTeam(team) {
const teamIn = getTeamById(team._id) || {};
teamIn.companyId = team.companyId;
teamIn.members = team.members;
teamIn.teamName = team.teamName;
teamIn.leader = team.leader;
teamIn.quote = team.quote;
teamIn.teamPic = team.teamPic;
teamIn.tasks = team.tasks;
if (!teamIn._id) {
teamIn._id = Date.now().toString();
teams.push(teamIn);
}
return teamIn;
}
/*export function saveTask(task) {
let taskInDb = tasks.find((m) => m._id === task._id) || {};
taskInDb.companyId = task.companyId;
taskInDb.name = task.name;
taskInDb.estimatedTime = task.estimatedTime;
taskInDb.usedTime = task.usedTime;
taskInDb.assignedToId = task.assignedToId;
taskInDb.assignedById = task.assignedById;
taskInDb.teamId = task.teamId;
taskInDb.taskDetail = task.taskDetail;
if (!taskInDb._id) {
taskInDb._id = Date.now().toString();
tasks.push(taskInDb);
}
return taskInDb;
}*/
|
const expression = (3 + (4 * (5 - (6 / 2))));
const syntaxTree = {
type: ' + ',
value: 3,
left: {
type: ' * ',
value: 4,
left: 5,
right: {
type: '/',
value: 6,
left: 2
}
}
}
console.log(syntaxTree); |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DLA-622-1
#
# Security announcement date: 2016-09-15 00:00:00 UTC
# Script generation date: 2017-01-01 21:09:18 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: armv7l
#
# Vulnerable packages fix on version:
# - tomcat6:6.0.45+dfsg-1~deb7u2
#
# Last versions recommanded by security team:
# - tomcat6:6.0.45+dfsg-1~deb7u2
#
# CVE List:
# - CVE-2016-1240
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade tomcat6=6.0.45+dfsg-1~deb7u2 -y
|
import * as sample from "./sample";
import * as tictactoe from "./tictactoe";
export default {
sample,
tictactoe
};
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.