text stringlengths 1 1.05M |
|---|
#!/bin/bash
testcasefolder=
#@ initialdir =
# DO NOT USE environment = COPY_ALL
#@ job_type = parallel
#@ class = test
#@ island_count = 1
#@ node = 20
#@tasks_per_node = 1
#@ wall_clock_limit = 0:30:00
#@ network.MPI = sn_all,not_shared,us
#@ energy_policy_tag = di73yeq_ept
#@ minimize_time_to_solution = yes
#@ output = job$(jobid).out
#@ error = job$(jobid).err
#@ notification=always
#@ notify_user=yourmail
#@ queue
. /etc/profile
. /etc/profile.d/modules.sh
export MP_SINGLE_THREAD=no
export OMP_NUM_THREADS=28
export MP_TASK_AFFINITY=core:$OMP_NUM_THREADS
. /etc/profile
. /etc/profile.d/modules.sh
module unload python mpi.ibm
module load python/2.7_anaconda_mpi
poe python TuSeisSolScripts/onHdf5/ComputeGroundMotionsEstimatesFromSurfaceMPI.py SimulationsResults-NZ/RES-NZ-easi5_090318-29mio/NZ-surface.xdmf --MP 28
#if you dont have mpi4py installed, run on 1 node
#module load python
#poe python TuSeisSolScripts/onHdf5/ComputeGroundMotionsEstimatesFromSurfaceMPI.py SimulationsResults-NZ/RES-NZ-easi5_090318-29mio/NZ-surface.xdmf --MP 28 --noMPI
|
#!/usr/bin/env bash
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
arg_dockerfile=docker/ubuntu.Dockerfile
arg_imagename=tensorrt-ubuntu
arg_osversion=18.04
arg_cudaversion=11.1
arg_cudnnversion=8.0
arg_help=0
while [[ "$#" -gt 0 ]]; do case $1 in
--file) arg_dockerfile="$2"; shift;;
--tag) arg_imagename="$2"; shift;;
--os) arg_osversion="$2"; shift;;
--cuda) arg_cudaversion="$2"; shift;;
-h|--help) arg_help=1;;
*) echo "Unknown parameter passed: $1"; echo "For help type: $0 --help"; exit 1;
esac; shift; done
if [ "$arg_help" -eq "1" ]; then
echo "Usage: $0 [options]"
echo " --help or -h : Print this help menu."
echo " --file <dockerfile> : Docker file to use for build."
echo " --tag <imagename> : Image name for the generated container."
echo " --os <version> : OS version to use."
echo " --cuda <version> : CUDA version to use."
exit;
fi
docker_args="-f $arg_dockerfile --build-arg OS_VERSION=$arg_osversion --build-arg CUDA_VERSION=$arg_cudaversion --build-arg uid=$(id -u) --build-arg gid=$(id -g) --tag=$arg_imagename ."
echo "Building container:"
echo "> docker build $docker_args"
docker build $docker_args
|
<filename>clop_default_test.go<gh_stars>100-1000
package clop
import (
"bytes"
"fmt"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
// 测试默认(default 标签)值
func Test_DefautlValue(t *testing.T) {
type defaultExample struct {
Int int `clop:"--int" default:"1"`
Float64 float64 `clop:"--float64" default:"3.64"`
Float32 float32 `clop:"--float32" default:"3.32"`
SliceString []string `clop:"--slice-string" default:"[\"one\", \"two\"]"`
SliceInt []int `clop:"--slice-int" default:"[1,2,3,4,5]"`
SliceFloat64 []float64 `clop:"--slice-float64" default:"[1.1,2.2,3.3,4.4,5.5]"`
Name []int `clop:"-e" usage:"slice test" valid:"required" default:"[1,2]"`
}
type tool struct {
Rate string `clop:"-r; --rate" usage:"rate" default:"8000"`
}
for range []struct{}{
func() struct{} {
got := defaultExample{}
p := New([]string{"--slice-string", "333", "--slice-string", "4444", "-e", "3", "-e", "4"}).SetExit(false)
err := p.Bind(&got)
assert.Equal(t, got.SliceString, []string{"333", "4444"})
assert.Equal(t, got.Name, []int{3, 4})
assert.NoError(t, err)
return struct{}{}
}(),
func() struct{} {
tol := tool{}
p := New([]string{}).SetExit(false)
err := p.Bind(&tol)
assert.NoError(t, err)
return struct{}{}
}(),
func() struct{} {
got := defaultExample{}
need := defaultExample{
Int: 1,
Float64: 3.64,
Float32: 3.32,
SliceString: []string{"one", "two"},
SliceInt: []int{1, 2, 3, 4, 5},
SliceFloat64: []float64{1.1, 2.2, 3.3, 4.4, 5.5},
Name: []int{1, 2},
}
p := New([]string{}).SetExit(false)
p.Bind(&got)
assert.Equal(t, got, need)
return struct{}{}
}(),
func() struct{} {
got := defaultExample{}
var out bytes.Buffer
p := New([]string{"-h"}).SetExit(false).SetOutput(&out)
err := p.Bind(&got)
assert.NoError(t, err)
needTest := []string{
`1`,
`3.64`,
`3.32`,
`["one", "two"]`,
`[1,2,3,4,5]`,
`[1.1,2.2,3.3,4.4,5.5]`,
}
helpMessage := out.String()
for _, v := range needTest {
pos := strings.Index(helpMessage, v)
assert.NotEqual(t, pos, -1, fmt.Sprintf("search (%s) not found", v))
}
return struct{}{}
}(),
} {
}
}
|
# Import necessary modules
from wtforms import SelectField
from wtforms.validators import DataRequired
from flask_sqlalchemy import SQLAlchemy
# Define the ModelSelectField class
class ModelSelectField(SelectField):
def __init__(self, label=None, validators=None, model=None, **kwargs):
super(ModelSelectField, self).__init__(label, validators, **kwargs)
self.model = model
self.choices = self._query_model_instances()
def _query_model_instances(self):
# Assuming the model is a SQLAlchemy model
if self.model:
db = SQLAlchemy()
instances = self.model.query.all()
return [(str(instance.id), str(instance)) for instance in instances]
else:
return []
# Example usage
from flask_wtf import FlaskForm
from wtforms import SubmitField
# Define a form using ModelSelectField
class MyForm(FlaskForm):
category = ModelSelectField(model=Category, validators=[DataRequired()])
submit = SubmitField('Submit') |
def get_item_count(results: dict) -> int:
if 'count' in results:
count = int(results['count'])
else:
count = 0
return count |
#!/bin/bash
#
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Test lightweight spawn stats generation in Bazel
#
set -eu
# Load the test setup defined in the parent directory
CURRENT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${CURRENT_DIR}/../integration_test_setup.sh" \
|| { echo "integration_test_setup.sh not found!" >&2; exit 1; }
function set_up() {
cat > BUILD <<EOF
genrule(
name = "foo",
cmd = "echo hello > \$@",
outs = ["foo.txt"],
)
EOF
}
function test_order() {
# Ensure the new stats are printed before Build completed
bazel build :foo 2>&1 | tee ${TEST_log} | sed -n '/process/,$p' | grep "Build complete" || fail "Expected \"process\" to be followed by \"Build completed\""
}
# Single execution of Bazel
function statistics_single() {
flags=$1 # flags to pass to Bazel
expect=$2 # string to expect
echo "Starting single run for $flags $expect" &> $TEST_log
output=`bazel build :foo $flags 2>&1 | tee ${TEST_log} | grep " process" | tr -d '\r'`
if ! [[ $output =~ ${expect} ]]; then
fail "bazel ${flags}: Want |${expect}|, got |${output}| "
fi
echo "Done $flags $expect" &> $TEST_log
}
function test_local() {
statistics_single "--spawn_strategy=local" ": 1 local"
}
function test_local_sandbox() {
if [[ "$PLATFORM" == "linux" ]]; then
statistics_single "--spawn_strategy=linux-sandbox" ": 1 linux-sandbox"
fi
}
# We are correctly resetting the counts
function test_repeat() {
flags="--spawn_strategy=local"
statistics_single $flags ": 1 local"
bazel clean $flags
statistics_single $flags ": 1 local"
}
# Locally cached results are not yet displayed
function test_localcache() {
flags="--spawn_strategy=local"
# We are correctly resetting the counts
statistics_single $flags ": 1 local"
statistics_single $flags "0 processes."
}
run_suite "bazel statistics tests"
|
#!/bin/bash
# This script can be used to rebuild a frontend project and refresh/restart the docker container in one go.
# Database will be dropped on each run of this script.
docker-compose rm -f -s -v
docker-compose build
docker-compose up -d
|
<gh_stars>0
from re import compile
from lazy import lazy
from resource_identifier.config import ZAM_CORE_CONFIG
def _determine_resource_catagory(result):
part_1 = result.pop('product_part_01')
part_2 = result.pop('product_part_02')
part_3 = result.pop('product_part_03')
part_4 = result.pop('product_part_04')
result['project'] = part_1
if 'ep' in part_2.lower():
result['episode'] = part_2
if part_3 in ZAM_CORE_CONFIG.config['ZAM_ASSET_TYPES']:
result['category'] = 'asset'
result['asset_type'] = part_3
result['asset_name'] = part_4
else:
result['category'] = 'shot'
result['sequence'] = part_3
result['shot'] = part_4
else:
if part_2 in ZAM_CORE_CONFIG.config['ZAM_ASSET_TYPES']:
result['category'] = 'asset'
result['asset_type'] = part_2
result['asset_name'] = part_3
else:
result['category'] = 'shot'
result['sequence'] = part_2
result['shot'] = part_3
def _determine_resource_type(result):
if result['sub_path']:
result['resource_type'] = result['sub_path'].split('/')[1]
else:
result['resource_type'] = None
class ZamResouceIdentifierBaseSolverMixin(object):
pre_parse_functions = []
post_parse_functions = [_determine_resource_catagory,
_determine_resource_type]
@lazy
def short_url(self) -> str:
return ''
@lazy
def full_url(self) -> str:
pattern = ZAM_CORE_CONFIG.config['ZAM_RESOURCE_IDENTIFIER_FULL_URL']
return pattern.format(zam_core_api_version=ZAM_CORE_CONFIG.config['ZAM_CORE_API_VERSION'],
site='beijing',
resource=self)
def post_parse_process(self, result):
for func in __class__.post_parse_functions:
func(result)
def pre_parse_process(self, result):
for func in __class__.pre_parse_functions:
func(result)
@lazy
def parse(self):
def traverse_parse(string, regex_component):
if (not regex_component):
return
result[regex_component['name']] = string
if string and regex_component['regex'] and regex_component['component_mappings']:
regex = compile(regex_component['regex'])
match = regex.match(string)
if match:
for string_component, regex_component in zip(match.groups(), regex_component['component_mappings']):
traverse_parse(string_component, regex_component)
else:
raise ValueError('Resource ID not match Regex: \n{}\n{}'.format(
string, regex_component['regex']))
result = dict()
regex_config = ZAM_CORE_CONFIG.config['ZAM_RESOURCE_IDENTIFIER_SYNTAX']
self.pre_parse_process(result)
traverse_parse(self, regex_config)
print(result)
self.post_parse_process(result)
return result
class ZamResourceIdentifierDataBaseSolverMixin(object):
def database_id(self) -> str:
return ''
class ZamResourceIdentifierFilePathSolverMixin(object):
def file_path(self) -> str:
return ''
class ZamResouceIdentifierRestApiSolverMixin(object):
def rest_api(self) -> str:
return ''
|
package common
import (
"errors"
"github.com/cjx2328/gocms/pkg/convert"
"github.com/gin-gonic/gin"
)
// GetQueryToStrE
func GetQueryToStrE(c *gin.Context,key string) (string,error) {
str,ok:=c.GetQuery(key)
if !ok {
return "",errors.New("没有这个值传入")
}
return str,nil
}
// GetQueryToStr
func GetQueryToStr(c *gin.Context,key string,defaultValues ...string) string {
var defaultValue string
if len(defaultValues)>0{
defaultValue=defaultValues[0]
}
str,err:=GetQueryToStrE(c,key)
if str=="" || err!=nil{
return defaultValue
}
return str
}
// QueryToUintE
func GetQueryToUintE(c *gin.Context,key string) (uint,error) {
str,err:=GetQueryToStrE(c,key)
if err !=nil {
return 0,err
}
return convert.ToUintE(str)
}
// QueryToUint
func GetQueryToUint(c *gin.Context,key string,defaultValues ...uint) uint {
var defaultValue uint
if len(defaultValues)>0{
defaultValue=defaultValues[0]
}
val,err:=GetQueryToUintE(c,key)
if err!=nil {
return defaultValue
}
return val
}
// QueryToUintE
func GetQueryToUint64E(c *gin.Context,key string) (uint64,error) {
str,err:=GetQueryToStrE(c,key)
if err !=nil {
return 0,err
}
return convert.ToUint64E(str)
}
// QueryToUint
func GetQueryToUint64(c *gin.Context,key string,defaultValues ...uint64) uint64 {
var defaultValue uint64
if len(defaultValues)>0{
defaultValue=defaultValues[0]
}
val,err:=GetQueryToUint64E(c,key)
if err!=nil {
return defaultValue
}
return val
} |
<?php
$servername = "localhost";
$username = "root";
$password = "mypassword";
$dbname = "db_name";
// Create connection
$conn = new mysqli($servername, $username, $password, $dbname);
// Check connection
if ($conn->connect_error) {
die("Connection failed: " . $conn->connect_error);
}
?> |
package net.autoitemswitch.events;
public interface TickListener {
public void onTick();
}
|
/*
* Copyright (c) 2015, EURECOM (www.eurecom.fr)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are those
* of the authors and should not be interpreted as representing official policies,
* either expressed or implied, of the FreeBSD Project.
*/
/** @brief Intertask Interface Signal Dumper
Allows users to connect their itti_analyzer to this process and dump
signals exchanged between tasks.
@author <NAME> <<EMAIL>>
*/
#define _GNU_SOURCE // required for pthread_setname_np()
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <error.h>
#include <sched.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <sys/select.h>
#include <sys/types.h>
#include <arpa/inet.h>
#include <sys/eventfd.h>
#include "assertions.h"
#include "liblfds611.h"
#include "itti_types.h"
#include "intertask_interface.h"
#include "intertask_interface_dump.h"
#include "dynamic_memory_check.h"
#if OAI_EMU
# include "vcd_signal_dumper.h"
#endif
static const int itti_dump_debug = 0; // 0x8 | 0x4 | 0x2;
#define ITTI_DUMP_DEBUG(m, x, args...) do { if ((m) & itti_dump_debug) fprintf(stdout, "[ITTI_DUMP][D]"x, ##args); } \
while(0)
#define ITTI_DUMP_ERROR(x, args...) do { fprintf(stdout, "[ITTI_DUMP][E]"x, ##args); } \
while(0)
typedef struct itti_dump_queue_item_s {
MessageDef *data;
uint32_t data_size;
uint32_t message_number;
uint32_t message_type;
uint32_t message_size;
} itti_dump_queue_item_t;
typedef struct {
int sd;
uint32_t last_message_number;
} itti_client_desc_t;
typedef struct itti_desc_s {
/*
* Asynchronous thread that write to file/accept new clients
*/
pthread_t itti_acceptor_thread;
pthread_attr_t attr;
/*
* List of messages to dump.
* * * NOTE: we limit the size of this queue to retain only the last exchanged
* * * messages. The size can be increased by setting up the ITTI_QUEUE_MAX_ELEMENTS
* * * in mme_default_values.h or by putting a custom in the configuration file.
*/
struct lfds611_ringbuffer_state *itti_message_queue;
int nb_connected;
/*
* Event fd used to notify new messages (semaphore)
*/
int event_fd;
int itti_listen_socket;
itti_client_desc_t itti_clients[ITTI_DUMP_MAX_CON];
} itti_desc_t;
typedef struct {
itti_socket_header_t socket_header;
itti_signal_header_t signal_header;
/*
* Message payload is added here, this struct is used as an header
*/
} itti_dump_message_t;
typedef struct {
itti_socket_header_t socket_header;
} itti_statistic_message_t;
static const itti_message_types_t itti_dump_xml_definition_end = ITTI_DUMP_XML_DEFINITION_END;
static const itti_message_types_t itti_dump_message_type_end = ITTI_DUMP_MESSAGE_TYPE_END;
static itti_desc_t itti_dump_queue;
static FILE *dump_file = NULL;
static int itti_dump_running = 1;
static volatile uint32_t pending_messages = 0;
/*------------------------------------------------------------------------------*/
static int
itti_dump_send_message (
int sd,
itti_dump_queue_item_t * message)
{
itti_dump_message_t *new_message;
ssize_t bytes_sent = 0,
total_sent = 0;
uint8_t *data_ptr;
/*
* Allocate memory for message header and payload
*/
size_t size = sizeof (itti_dump_message_t) + message->data_size + sizeof (itti_message_types_t);
AssertFatal (sd > 0, "Socket descriptor (%d) is invalid!\n", sd);
AssertFatal (message != NULL, "Message is NULL!\n");
new_message = malloc (size);
AssertFatal (new_message != NULL, "New message allocation failed!\n");
/*
* Preparing the header
*/
new_message->socket_header.message_size = size;
new_message->socket_header.message_type = ITTI_DUMP_MESSAGE_TYPE;
/*
* Adds message number in unsigned decimal ASCII format
*/
snprintf (new_message->signal_header.message_number_char, sizeof (new_message->signal_header.message_number_char), MESSAGE_NUMBER_CHAR_FORMAT, message->message_number);
new_message->signal_header.message_number_char[sizeof (new_message->signal_header.message_number_char) - 1] = '\n';
/*
* Appends message payload
*/
memcpy (&new_message[1], message->data, message->data_size);
memcpy (((void *)&new_message[1]) + message->data_size, &itti_dump_message_type_end, sizeof (itti_message_types_t));
data_ptr = (uint8_t *) & new_message[0];
do {
bytes_sent = send (sd, &data_ptr[total_sent], size - total_sent, 0);
if (bytes_sent < 0) {
ITTI_DUMP_ERROR ("[%d] Failed to send %zu bytes to socket (%d:%s)\n", sd, size, errno, strerror (errno));
free_wrapper (new_message);
return -1;
}
total_sent += bytes_sent;
} while (total_sent != size);
free_wrapper (new_message);
return total_sent;
}
static int
itti_dump_fwrite_message (
itti_dump_queue_item_t * message)
{
itti_dump_message_t new_message_header;
if ((dump_file != NULL) && (message != NULL)) {
new_message_header.socket_header.message_size = message->message_size + sizeof (itti_dump_message_t) + sizeof (itti_message_types_t);
new_message_header.socket_header.message_type = message->message_type;
snprintf (new_message_header.signal_header.message_number_char, sizeof (new_message_header.signal_header.message_number_char), MESSAGE_NUMBER_CHAR_FORMAT, message->message_number);
new_message_header.signal_header.message_number_char[sizeof (new_message_header.signal_header.message_number_char) - 1] = '\n';
fwrite (&new_message_header, sizeof (itti_dump_message_t), 1, dump_file);
fwrite (message->data, message->data_size, 1, dump_file);
fwrite (&itti_dump_message_type_end, sizeof (itti_message_types_t), 1, dump_file);
fflush (dump_file);
return (1);
}
return (0);
}
static int
itti_dump_send_xml_definition (
const int sd,
const char *message_definition_xml,
const uint32_t message_definition_xml_length)
{
itti_socket_header_t *itti_dump_message;
/*
* Allocate memory for message header and payload
*/
size_t itti_dump_message_size;
ssize_t bytes_sent = 0,
total_sent = 0;
uint8_t *data_ptr;
AssertFatal (sd > 0, "Socket descriptor (%d) is invalid!\n", sd);
AssertFatal (message_definition_xml != NULL, "Message definition XML is NULL!\n");
itti_dump_message_size = sizeof (itti_socket_header_t) + message_definition_xml_length + sizeof (itti_message_types_t);
itti_dump_message = calloc (1, itti_dump_message_size);
ITTI_DUMP_DEBUG (0x2, "[%d] Sending XML definition message of size %zu to observer peer\n", sd, itti_dump_message_size);
itti_dump_message->message_size = itti_dump_message_size;
itti_dump_message->message_type = ITTI_DUMP_XML_DEFINITION;
/*
* Copying message definition
*/
memcpy (&itti_dump_message[1], message_definition_xml, message_definition_xml_length);
memcpy (((void *)&itti_dump_message[1]) + message_definition_xml_length, &itti_dump_xml_definition_end, sizeof (itti_message_types_t));
data_ptr = (uint8_t *) & itti_dump_message[0];
do {
bytes_sent = send (sd, &data_ptr[total_sent], itti_dump_message_size - total_sent, 0);
if (bytes_sent < 0) {
ITTI_DUMP_ERROR ("[%d] Failed to send %zu bytes to socket (%d:%s)\n", sd, itti_dump_message_size, errno, strerror (errno));
free_wrapper (itti_dump_message);
return -1;
}
total_sent += bytes_sent;
} while (total_sent != itti_dump_message_size);
free_wrapper (itti_dump_message);
return 0;
}
static void
itti_dump_user_data_delete_function (
void *user_data,
void *user_state)
{
(void)user_state; // UNUSED
if (user_data != NULL) {
itti_dump_queue_item_t *item;
task_id_t task_id;
int result;
item = (itti_dump_queue_item_t *) user_data;
if (item->data != NULL) {
task_id = ITTI_MSG_ORIGIN_ID (item->data);
result = itti_free (task_id, item->data);
AssertFatal (result == EXIT_SUCCESS, "Failed to free memory (%d)!\n", result);
} else {
task_id = TASK_UNKNOWN;
}
result = itti_free (task_id, item);
AssertFatal (result == EXIT_SUCCESS, "Failed to free memory (%d)!\n", result);
}
}
static int
itti_dump_enqueue_message (
itti_dump_queue_item_t * new,
uint32_t message_size,
uint32_t message_type)
{
struct lfds611_freelist_element *new_queue_element = NULL;
int overwrite_flag;
AssertFatal (new != NULL, "Message to queue is NULL!\n");
#if OAI_EMU
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME (VCD_SIGNAL_DUMPER_FUNCTIONS_ITTI_DUMP_ENQUEUE_MESSAGE, VCD_FUNCTION_IN);
#endif
new->message_type = message_type;
new->message_size = message_size;
ITTI_DUMP_DEBUG (0x1, " itti_dump_enqueue_message: lfds611_ringbuffer_get_write_element\n");
new_queue_element = lfds611_ringbuffer_get_write_element (itti_dump_queue.itti_message_queue, &new_queue_element, &overwrite_flag);
if (overwrite_flag != 0) {
// no free element available: overwrite a non read one => data loss!
void *old = NULL;
lfds611_freelist_get_user_data_from_element (new_queue_element, &old);
ITTI_DUMP_DEBUG (0x4, " overwrite_flag set, freeing old data %p %p\n", new_queue_element, old);
itti_dump_user_data_delete_function (old, NULL);
}
lfds611_freelist_set_user_data_in_element (new_queue_element, new);
lfds611_ringbuffer_put_write_element (itti_dump_queue.itti_message_queue, new_queue_element);
if (overwrite_flag == 0) {
{
ssize_t write_ret;
eventfd_t sem_counter = 1;
/*
* Call to write for an event fd must be of 8 bytes
*/
write_ret = write (itti_dump_queue.event_fd, &sem_counter, sizeof (sem_counter));
AssertFatal (write_ret == sizeof (sem_counter), "Write to dump event failed (%d/%d)!\n", (int)write_ret, (int)sizeof (sem_counter));
}
// add one to pending_messages, atomically
__sync_fetch_and_add (&pending_messages, 1);
}
ITTI_DUMP_DEBUG (0x2, " Added element to queue %p %p, pending %u, type %u\n", new_queue_element, new, pending_messages, message_type);
#if OAI_EMU
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME (VCD_SIGNAL_DUMPER_FUNCTIONS_ITTI_DUMP_ENQUEUE_MESSAGE, VCD_FUNCTION_OUT);
#endif
return 0;
}
static void
itti_dump_socket_exit (
void)
{
close (itti_dump_queue.event_fd);
close (itti_dump_queue.itti_listen_socket);
/*
* Leave the thread as we detected end signal
*/
pthread_exit (NULL);
}
static int
itti_dump_flush_ring_buffer (
int flush_all)
{
struct lfds611_freelist_element *element = NULL;
void *user_data;
int j;
int consumer;
/*
* Check if there is a least one consumer
*/
consumer = 0;
if (dump_file != NULL) {
consumer = 1;
} else {
for (j = 0; j < ITTI_DUMP_MAX_CON; j++) {
if (itti_dump_queue.itti_clients[j].sd > 0) {
consumer = 1;
break;
}
}
}
if (consumer > 0) {
do {
/*
* Acquire the ring element
*/
lfds611_ringbuffer_get_read_element (itti_dump_queue.itti_message_queue, &element);
// subtract one from pending_messages, atomically
__sync_fetch_and_sub (&pending_messages, 1);
if (element == NULL) {
if (flush_all != 0) {
flush_all = 0;
} else {
AssertFatal (0, "Dump event with no data!\n");
}
} else {
/*
* Retrieve user part of the message
*/
lfds611_freelist_get_user_data_from_element (element, &user_data);
ITTI_DUMP_DEBUG (0x2, " removed element from queue %p %p, pending %u\n", element, user_data, pending_messages);
if (((itti_dump_queue_item_t *) user_data)->message_type == ITTI_DUMP_EXIT_SIGNAL) {
lfds611_ringbuffer_put_read_element (itti_dump_queue.itti_message_queue, element);
itti_dump_socket_exit ();
}
/*
* Write message to file
*/
itti_dump_fwrite_message ((itti_dump_queue_item_t *) user_data);
/*
* Send message to remote analyzer
*/
for (j = 0; j < ITTI_DUMP_MAX_CON; j++) {
if (itti_dump_queue.itti_clients[j].sd > 0) {
itti_dump_send_message (itti_dump_queue.itti_clients[j].sd, (itti_dump_queue_item_t *) user_data);
}
}
itti_dump_user_data_delete_function (user_data, NULL);
lfds611_freelist_set_user_data_in_element (element, NULL);
/*
* We have finished with this element, reinsert it in the ring buffer
*/
lfds611_ringbuffer_put_read_element (itti_dump_queue.itti_message_queue, element);
}
} while (flush_all);
}
return (consumer);
}
static int
itti_dump_handle_new_connection (
int sd,
const char *xml_definition,
uint32_t xml_definition_length)
{
if (itti_dump_queue.nb_connected < ITTI_DUMP_MAX_CON) {
uint8_t i;
for (i = 0; i < ITTI_DUMP_MAX_CON; i++) {
/*
* Let's find a place to store the new client
*/
if (itti_dump_queue.itti_clients[i].sd == -1) {
break;
}
}
ITTI_DUMP_DEBUG (0x2, " Found place to store new connection: %d\n", i);
AssertFatal (i < ITTI_DUMP_MAX_CON, "No more connection available (%d/%d) for socked %d!\n", i, ITTI_DUMP_MAX_CON, sd);
ITTI_DUMP_DEBUG (0x2, " Socket %d accepted\n", sd);
/*
* Send the XML message definition
*/
if (itti_dump_send_xml_definition (sd, xml_definition, xml_definition_length) < 0) {
AssertError (0, {
}
, "Failed to send XML definition!\n");
close (sd);
return -1;
}
itti_dump_queue.itti_clients[i].sd = sd;
itti_dump_queue.nb_connected++;
} else {
ITTI_DUMP_DEBUG (0x2, " Socket %d rejected\n", sd);
/*
* We have reached max number of users connected...
* * * Reject the connection.
*/
close (sd);
return -1;
}
return 0;
}
static void *
itti_dump_socket (
void *arg_p)
{
uint32_t message_definition_xml_length;
char *message_definition_xml;
int rc;
int itti_listen_socket,
max_sd;
int on = 1;
fd_set read_set,
working_set;
struct sockaddr_in servaddr; /* socket address structure */
struct timeval *timeout_p = NULL;
ITTI_DUMP_DEBUG (0x2, " Creating TCP dump socket on port %u\n", ITTI_PORT);
message_definition_xml = (char *)arg_p;
AssertFatal (message_definition_xml != NULL, "Message definition XML is NULL!\n");
message_definition_xml_length = strlen (message_definition_xml) + 1;
if ((itti_listen_socket = socket (AF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0) {
ITTI_DUMP_ERROR (" ocket creation failed (%d:%s)\n", errno, strerror (errno));
pthread_exit (NULL);
}
/*
* Allow socket reuse
*/
rc = setsockopt (itti_listen_socket, SOL_SOCKET, SO_REUSEADDR, (char *)&on, sizeof (on));
if (rc < 0) {
ITTI_DUMP_ERROR (" setsockopt SO_REUSEADDR failed (%d:%s)\n", errno, strerror (errno));
close (itti_listen_socket);
pthread_exit (NULL);
}
/*
* Set socket to be non-blocking.
* * * NOTE: sockets accepted will inherit this option.
*/
rc = ioctl (itti_listen_socket, FIONBIO, (char *)&on);
if (rc < 0) {
ITTI_DUMP_ERROR (" ioctl FIONBIO (non-blocking) failed (%d:%s)\n", errno, strerror (errno));
close (itti_listen_socket);
pthread_exit (NULL);
}
memset (&servaddr, 0, sizeof (servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_addr.s_addr = htonl (INADDR_ANY);
servaddr.sin_port = htons (ITTI_PORT);
if (bind (itti_listen_socket, (struct sockaddr *)&servaddr, sizeof (servaddr)) < 0) {
ITTI_DUMP_ERROR (" Bind failed (%d:%s)\n", errno, strerror (errno));
pthread_exit (NULL);
}
if (listen (itti_listen_socket, 5) < 0) {
ITTI_DUMP_ERROR (" Listen failed (%d:%s)\n", errno, strerror (errno));
pthread_exit (NULL);
}
FD_ZERO (&read_set);
/*
* Add the listener
*/
FD_SET (itti_listen_socket, &read_set);
/*
* Add the event fd
*/
FD_SET (itti_dump_queue.event_fd, &read_set);
/*
* Max of both sd
*/
max_sd = itti_listen_socket > itti_dump_queue.event_fd ? itti_listen_socket : itti_dump_queue.event_fd;
itti_dump_queue.itti_listen_socket = itti_listen_socket;
/*
* Loop waiting for incoming connects or for incoming data
* * * on any of the connected sockets.
*/
while (1) {
int desc_ready;
int client_socket = -1;
int i;
memcpy (&working_set, &read_set, sizeof (read_set));
timeout_p = NULL;
/*
* No timeout: select blocks till a new event has to be handled
* * * on sd's.
*/
rc = select (max_sd + 1, &working_set, NULL, NULL, timeout_p);
if (rc < 0) {
ITTI_DUMP_ERROR (" select failed (%d:%s)\n", errno, strerror (errno));
pthread_exit (NULL);
} else if (rc == 0) {
/*
* Timeout
*/
if (itti_dump_flush_ring_buffer (1) == 0) {
if (itti_dump_running) {
ITTI_DUMP_DEBUG (0x4, " No messages consumers, waiting ...\n");
usleep (100 * 1000);
} else {
itti_dump_socket_exit ();
}
}
}
desc_ready = rc;
for (i = 0; i <= max_sd && desc_ready > 0; i++) {
if (FD_ISSET (i, &working_set)) {
desc_ready -= 1;
if (i == itti_dump_queue.event_fd) {
/*
* Notification of new element to dump from other tasks
*/
eventfd_t sem_counter;
ssize_t read_ret;
/*
* Read will always return 1 for kernel versions > 2.6.30
*/
read_ret = read (itti_dump_queue.event_fd, &sem_counter, sizeof (sem_counter));
if (read_ret < 0) {
ITTI_DUMP_ERROR (" Failed read for semaphore: %s\n", strerror (errno));
pthread_exit (NULL);
}
AssertFatal (read_ret == sizeof (sem_counter), "Failed to read from dump event FD (%d/%d)!\n", (int)read_ret, (int)sizeof (sem_counter));
if (itti_dump_flush_ring_buffer (0) == 0) {
if (itti_dump_running) {
ITTI_DUMP_DEBUG (0x4, " No messages consumers, waiting ...\n");
usleep (100 * 1000);
{
ssize_t write_ret;
sem_counter = 1;
/*
* Call to write for an event fd must be of 8 bytes
*/
write_ret = write (itti_dump_queue.event_fd, &sem_counter, sizeof (sem_counter));
AssertFatal (write_ret == sizeof (sem_counter), "Failed to write to dump event FD (%d/%d)!\n", (int)write_ret, (int)sem_counter);
}
} else {
itti_dump_socket_exit ();
}
} else {
ITTI_DUMP_DEBUG (0x1, " Write element to file\n");
}
} else
if (i == itti_listen_socket) {
do {
client_socket = accept (itti_listen_socket, NULL, NULL);
if (client_socket < 0) {
if (errno == EWOULDBLOCK || errno == EAGAIN) {
/*
* No more new connection
*/
ITTI_DUMP_DEBUG (0x2, " No more new connection\n");
continue;
} else {
ITTI_DUMP_ERROR (" accept failed (%d:%s)\n", errno, strerror (errno));
pthread_exit (NULL);
}
}
if (itti_dump_handle_new_connection (client_socket, message_definition_xml, message_definition_xml_length) == 0) {
/*
* The socket has been accepted.
* * * We have to update the set to include this new sd.
*/
FD_SET (client_socket, &read_set);
if (client_socket > max_sd)
max_sd = client_socket;
}
} while (client_socket != -1);
} else {
/*
* For now the MME itti dumper should not receive data
* * * other than connection oriented (CLOSE).
*/
uint8_t j;
ITTI_DUMP_DEBUG (0x2, " Socket %d disconnected\n", i);
/*
* Close the socket and update info related to this connection
*/
close (i);
for (j = 0; j < ITTI_DUMP_MAX_CON; j++) {
if (itti_dump_queue.itti_clients[j].sd == i)
break;
}
/*
* In case we don't find the matching sd in list of known
* * * connections -> assert.
*/
AssertFatal (j < ITTI_DUMP_MAX_CON, "Connection index not found (%d/%d) for socked %d!\n", j, ITTI_DUMP_MAX_CON, i);
/*
* Re-initialize the socket to -1 so we can accept new
* * * incoming connections.
*/
itti_dump_queue.itti_clients[j].sd = -1;
itti_dump_queue.itti_clients[j].last_message_number = 0;
itti_dump_queue.nb_connected--;
/*
* Remove the socket from the FD set and update the max sd
*/
FD_CLR (i, &read_set);
if (i == max_sd) {
if (itti_dump_queue.nb_connected == 0) {
/*
* No more new connection max_sd = itti_listen_socket
*/
max_sd = itti_listen_socket;
} else {
while (FD_ISSET (max_sd, &read_set) == 0) {
max_sd -= 1;
}
}
}
}
}
}
}
return NULL;
}
/*------------------------------------------------------------------------------*/
int
itti_dump_queue_message (
task_id_t sender_task,
message_number_t message_number,
MessageDef * message_p,
const char *message_name,
const uint32_t message_size)
{
if (itti_dump_running) {
itti_dump_queue_item_t *new;
AssertFatal (message_name != NULL, "Message name is NULL!\n");
AssertFatal (message_p != NULL, "Message is NULL!\n");
#if OAI_EMU
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME (VCD_SIGNAL_DUMPER_FUNCTIONS_ITTI_DUMP_ENQUEUE_MESSAGE_malloc, VCD_FUNCTION_IN);
#endif
new = itti_malloc (sender_task, TASK_MAX, sizeof (itti_dump_queue_item_t));
#if OAI_EMU
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME (VCD_SIGNAL_DUMPER_FUNCTIONS_ITTI_DUMP_ENQUEUE_MESSAGE_malloc, VCD_FUNCTION_OUT);
#endif
#if OAI_EMU
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME (VCD_SIGNAL_DUMPER_FUNCTIONS_ITTI_DUMP_ENQUEUE_MESSAGE_malloc, VCD_FUNCTION_IN);
#endif
new->data = itti_malloc (sender_task, TASK_MAX, message_size);
#if OAI_EMU
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME (VCD_SIGNAL_DUMPER_FUNCTIONS_ITTI_DUMP_ENQUEUE_MESSAGE_malloc, VCD_FUNCTION_OUT);
#endif
memcpy (new->data, message_p, message_size);
new->data_size = message_size;
new->message_number = message_number;
itti_dump_enqueue_message (new, message_size, ITTI_DUMP_MESSAGE_TYPE);
}
return 0;
}
/* This function should be called by each thread that will use the ring buffer */
void
itti_dump_thread_use_ring_buffer (
void)
{
lfds611_ringbuffer_use (itti_dump_queue.itti_message_queue);
}
int
itti_dump_init (
const char *const messages_definition_xml,
const char *const dump_file_name)
{
int i,
ret;
struct sched_param scheduler_param;
scheduler_param.sched_priority = sched_get_priority_min (SCHED_FIFO) + 1;
if (dump_file_name != NULL) {
dump_file = fopen (dump_file_name, "wb");
if (dump_file == NULL) {
ITTI_DUMP_ERROR (" can not open dump file \"%s\" (%d:%s)\n", dump_file_name, errno, strerror (errno));
} else {
/*
* Output the XML to file
*/
uint32_t message_size = strlen (messages_definition_xml) + 1;
itti_socket_header_t header;
header.message_size = sizeof (itti_socket_header_t) + message_size + sizeof (itti_message_types_t);
header.message_type = ITTI_DUMP_XML_DEFINITION;
fwrite (&header, sizeof (itti_socket_header_t), 1, dump_file);
fwrite (messages_definition_xml, message_size, 1, dump_file);
fwrite (&itti_dump_xml_definition_end, sizeof (itti_message_types_t), 1, dump_file);
fflush (dump_file);
}
}
memset (&itti_dump_queue, 0, sizeof (itti_desc_t));
ITTI_DUMP_DEBUG (0x2, " Creating new ring buffer for itti dump of %u elements\n", ITTI_QUEUE_MAX_ELEMENTS);
if (lfds611_ringbuffer_new (&itti_dump_queue.itti_message_queue, ITTI_QUEUE_MAX_ELEMENTS, NULL, NULL) != 1) {
/*
* Always assert on this condition
*/
AssertFatal (0, " Failed to create ring buffer!\n");
}
itti_dump_queue.event_fd = eventfd (0, EFD_SEMAPHORE);
if (itti_dump_queue.event_fd == -1) {
/*
* Always assert on this condition
*/
AssertFatal (0, "eventfd failed: %s!\n", strerror (errno));
}
itti_dump_queue.nb_connected = 0;
for (i = 0; i < ITTI_DUMP_MAX_CON; i++) {
itti_dump_queue.itti_clients[i].sd = -1;
itti_dump_queue.itti_clients[i].last_message_number = 0;
}
/*
* initialized with default attributes
*/
ret = pthread_attr_init (&itti_dump_queue.attr);
if (ret < 0) {
AssertFatal (0, "pthread_attr_init failed (%d:%s)!\n", errno, strerror (errno));
}
ret = pthread_attr_setschedpolicy (&itti_dump_queue.attr, SCHED_FIFO);
if (ret < 0) {
AssertFatal (0, "pthread_attr_setschedpolicy (SCHED_IDLE) failed (%d:%s)!\n", errno, strerror (errno));
}
ret = pthread_attr_setschedparam (&itti_dump_queue.attr, &scheduler_param);
if (ret < 0) {
AssertFatal (0, "pthread_attr_setschedparam failed (%d:%s)!\n", errno, strerror (errno));
}
ret = pthread_create (&itti_dump_queue.itti_acceptor_thread, &itti_dump_queue.attr, &itti_dump_socket, (void *)messages_definition_xml);
if (ret < 0) {
AssertFatal (0, "pthread_create failed (%d:%s)!\n", errno, strerror (errno));
}
pthread_setname_np (itti_dump_queue.itti_acceptor_thread, "ITTI acceptor");
return 0;
}
void
itti_dump_exit (
void)
{
void *arg;
itti_dump_queue_item_t *new;
new = itti_malloc (TASK_UNKNOWN, TASK_UNKNOWN, sizeof (itti_dump_queue_item_t));
memset (new, 0, sizeof (itti_dump_queue_item_t));
/*
* Set a flag to stop recording message
*/
itti_dump_running = 0;
/*
* Send the exit signal to other thread
*/
itti_dump_enqueue_message (new, 0, ITTI_DUMP_EXIT_SIGNAL);
ITTI_DUMP_DEBUG (0x2, " waiting for dumper thread to finish\n");
/*
* wait for the thread to terminate
*/
pthread_join (itti_dump_queue.itti_acceptor_thread, &arg);
ITTI_DUMP_DEBUG (0x2, " dumper thread correctly exited\n");
if (dump_file != NULL) {
/*
* Synchronise file and then close it
*/
fclose (dump_file);
dump_file = NULL;
}
if (itti_dump_queue.itti_message_queue) {
lfds611_ringbuffer_delete (itti_dump_queue.itti_message_queue, itti_dump_user_data_delete_function, NULL);
}
}
|
package apps;
import org.jooby.Jooby;
public class App1100 extends Jooby {
{
use(Controller1100.class);
}
}
|
class CustomCollection:
def __init__(self):
self.elements = []
def add(self, element):
self.elements.append(element)
def remove(self, element):
if element in self.elements:
self.elements.remove(element)
def contains(self, element):
return element in self.elements
def __iter__(self):
return iter(self.elements)
def __len__(self):
return len(self.elements) |
import { HttpBackend, HttpClient, HttpClientModule } from '@angular/common/http';
import { HttpClientTestingModule, HttpTestingController } from '@angular/common/http/testing';
import { async, ComponentFixture, TestBed } from '@angular/core/testing';
import { NoopAnimationsModule } from '@angular/platform-browser/animations';
import { RouterTestingModule } from '@angular/router/testing';
import { CoreModule } from '../../../../../core/src/core/core.module';
import { getGitHubAPIURL, GITHUB_API_URL } from '../../../../../core/src/core/github.helpers';
import { SharedModule } from '../../../../../core/src/shared/shared.module';
import { TabNavService } from '../../../../../core/src/tab-nav.service';
import { generateCfStoreModules } from '../../../../test-framework/cloud-foundry-endpoint-service.helper';
import { CloudFoundrySharedModule } from '../../../shared/cf-shared.module';
import { CfOrgSpaceDataService } from '../../../shared/data-services/cf-org-space-service.service';
import { ApplicationEnvVarsHelper } from '../application/application-tabs-base/tabs/build-tab/application-env-vars.service';
import { CreateApplicationModule } from '../create-application/create-application.module';
import {
DeployApplicationOptionsStepComponent,
} from './deploy-application-options-step/deploy-application-options-step.component';
import {
DeployApplicationStepSourceUploadComponent,
} from './deploy-application-step-source-upload/deploy-application-step-source-upload.component';
import { CommitListWrapperComponent } from './deploy-application-step2-1/commit-list-wrapper/commit-list-wrapper.component';
import { DeployApplicationStep21Component } from './deploy-application-step2-1/deploy-application-step2-1.component';
import {
DeployApplicationFsComponent,
} from './deploy-application-step2/deploy-application-fs/deploy-application-fs.component';
import { DeployApplicationStep2Component } from './deploy-application-step2/deploy-application-step2.component';
import { DeployApplicationStep3Component } from './deploy-application-step3/deploy-application-step3.component';
import { ApplicationDeploySourceTypes } from './deploy-application-steps.types';
import { DeployApplicationComponent } from './deploy-application.component';
import { GithubProjectExistsDirective } from './github-project-exists.directive';
describe('DeployApplicationComponent', () => {
let component: DeployApplicationComponent;
let fixture: ComponentFixture<DeployApplicationComponent>;
beforeEach(async(() => {
TestBed.configureTestingModule({
declarations: [
DeployApplicationComponent,
DeployApplicationStep2Component,
DeployApplicationStep21Component,
DeployApplicationStep3Component,
DeployApplicationOptionsStepComponent,
DeployApplicationStepSourceUploadComponent,
DeployApplicationFsComponent,
CommitListWrapperComponent,
GithubProjectExistsDirective,
],
providers: [
CfOrgSpaceDataService,
ApplicationEnvVarsHelper,
{ provide: GITHUB_API_URL, useFactory: getGitHubAPIURL },
HttpClient,
{
provide: HttpBackend,
useClass: HttpTestingController
},
TabNavService,
ApplicationDeploySourceTypes
],
imports: [
...generateCfStoreModules(),
SharedModule,
CoreModule,
RouterTestingModule,
CreateApplicationModule,
NoopAnimationsModule,
HttpClientModule,
HttpClientTestingModule,
HttpClientModule,
CloudFoundrySharedModule
]
})
.compileComponents();
}));
beforeEach(() => {
fixture = TestBed.createComponent(DeployApplicationComponent);
component = fixture.componentInstance;
fixture.detectChanges();
});
it('should create', () => {
expect(component).toBeTruthy();
});
});
|
<filename>src/main/java/com/lovecws/mumu/netty/serialization/protobuf/news/SinaFinanceNewsOrBuilder.java<gh_stars>0
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: news.proto
package com.lovecws.mumu.netty.serialization.protobuf.news;
public interface SinaFinanceNewsOrBuilder extends
// @@protoc_insertion_point(interface_extends:com.lovecws.mumu.netty.serialization.protobuf.news.SinaFinanceNews)
com.google.protobuf.MessageOrBuilder {
/**
* <code>string htitle = 1;</code>
*/
java.lang.String getHtitle();
/**
* <code>string htitle = 1;</code>
*/
com.google.protobuf.ByteString
getHtitleBytes();
/**
* <code>string keywords = 2;</code>
*/
java.lang.String getKeywords();
/**
* <code>string keywords = 2;</code>
*/
com.google.protobuf.ByteString
getKeywordsBytes();
/**
* <code>string description = 3;</code>
*/
java.lang.String getDescription();
/**
* <code>string description = 3;</code>
*/
com.google.protobuf.ByteString
getDescriptionBytes();
/**
* <code>string url = 4;</code>
*/
java.lang.String getUrl();
/**
* <code>string url = 4;</code>
*/
com.google.protobuf.ByteString
getUrlBytes();
/**
* <code>string sumary = 5;</code>
*/
java.lang.String getSumary();
/**
* <code>string sumary = 5;</code>
*/
com.google.protobuf.ByteString
getSumaryBytes();
/**
* <code>string content = 6;</code>
*/
java.lang.String getContent();
/**
* <code>string content = 6;</code>
*/
com.google.protobuf.ByteString
getContentBytes();
/**
* <code>string logo = 7;</code>
*/
java.lang.String getLogo();
/**
* <code>string logo = 7;</code>
*/
com.google.protobuf.ByteString
getLogoBytes();
/**
* <code>string title = 8;</code>
*/
java.lang.String getTitle();
/**
* <code>string title = 8;</code>
*/
com.google.protobuf.ByteString
getTitleBytes();
/**
* <code>string pubDate = 9;</code>
*/
java.lang.String getPubDate();
/**
* <code>string pubDate = 9;</code>
*/
com.google.protobuf.ByteString
getPubDateBytes();
/**
* <code>string mediaName = 10;</code>
*/
java.lang.String getMediaName();
/**
* <code>string mediaName = 10;</code>
*/
com.google.protobuf.ByteString
getMediaNameBytes();
/**
* <code>string mediaUrl = 11;</code>
*/
java.lang.String getMediaUrl();
/**
* <code>string mediaUrl = 11;</code>
*/
com.google.protobuf.ByteString
getMediaUrlBytes();
/**
* <code>string category = 12;</code>
*/
java.lang.String getCategory();
/**
* <code>string category = 12;</code>
*/
com.google.protobuf.ByteString
getCategoryBytes();
/**
* <code>string type = 13;</code>
*/
java.lang.String getType();
/**
* <code>string type = 13;</code>
*/
com.google.protobuf.ByteString
getTypeBytes();
}
|
<gh_stars>0
package com.estafet.boostcd.feature.api.scheduler;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
import com.estafet.boostcd.feature.api.dao.RepoDAO;
import com.estafet.boostcd.feature.api.jms.CommitProducer;
import com.estafet.boostcd.feature.api.model.Feature;
import com.estafet.boostcd.feature.api.model.Repo;
import com.estafet.boostcd.feature.api.model.RepoCommit;
import com.estafet.boostcd.feature.api.service.FeatureService;
import com.estafet.boostcd.feature.api.service.GitService;
@Component
public class CommitScheduler {
@Autowired
private RepoDAO repoDAO;
@Autowired
private FeatureService featureService;
@Autowired
private CommitProducer commitProducer;
@Autowired
private GitService gitService;
@Scheduled(fixedRate = 60000)
public void execute() {
for (Repo repo : repoDAO.getRepos()) {
for (RepoCommit commit : gitService.getLastestRepoCommits(repo.getName())) {
commitProducer.sendMessage(commit.getCommitMessage());
}
}
for (Feature feature : featureService.getIncompleteFeatures()) {
for (RepoCommit matched : feature.getMatched()) {
commitProducer.sendMessage(matched.getCommitMessage());
}
}
}
}
|
#!/bin/bash -e
app_name="DCU-central-control-app"
# Copy the latest DCU-central-control-app from github in the /home/pi folder
pushd files
find ${app_name} -type f -exec install -D "{}" "${ROOTFS_DIR}/home/${FIRST_USER_NAME}/{}" \;
popd
# Configure ownership
on_chroot << EOF
chown -R ${FIRST_USER_NAME}:${FIRST_USER_NAME} /home/${FIRST_USER_NAME}
EOF
# Install ccu-app service
install -m 644 files/ccu-app.service "${ROOTFS_DIR}/etc/systemd/system/"
on_chroot << EOF
systemctl enable ccu-app
EOF
|
#!/bin/sh
#Author raony
#PBS -N blast_nt2
#PBS -e blast_2.err
#PBS -o blast_2.log
#PBS -q fila64
#PBS -l select=1:ncpus=24
cd $PBS_O_WORKDIR
time /home/raonyguimaraes/programs/ncbi-blast-2.3.0+/bin/blastn -db /home/raonyguimaraes/blast_db/nt -evalue 1e-5 -query splits/R_2013_11_30_15_23_49_user_CTL-148-Metagenoma_Mauro_Eduardo2.CTLT_PGM.IonXpress_014_Sem28S2_COI2_18S2_Q20_good.01.fasta -out /home/raonyguimaraes/experiment/blast_bench/output/blast_output_2.out -num_threads 24 |
<?php
// Function to calculate the all possible combinations
// of an array
function find_combinations($arr, $n, $r)
{
$data = array();
combinationUtil($arr, 0, $data, 0, $r, $n);
}
// Function to generate all combinations
function combinationUtil($arr, $index, $data,
$i, $r, $n)
{
// Current cobination is ready
if ($index == $r)
{
for ($j = 0; $j < $r; $j++)
echo $data[$j] . " ";
echo "\n";
return;
}
// When no more elements are there to put in data[]
if ($i >= $n)
return;
// current is included, put next at next
// location
$data[$index] = $arr[$i];
combinationUtil($arr, $index + 1, $data,
$i + 1, $r, $n);
// current is excluded, replace it with
// next (Note that i+1 is passed, but
// index is not changed)
combinationUtil($arr, $index, $data,
$i + 1, $r, $n);
}
// Driver Code
$arr = array('A', 'B', 'C', 'D');
$r = 2;
$n = count($arr);
find_combinations($arr, $n, $r);
// This code is contributed by anuj_67.
?> |
def optimize(x, y):
return min(x, y)
print(optimize(4, 6)) |
#!/bin/bash
set -e
function watch_mon_health {
while true; do
log "Checking for zombie mons"
/check_zombie_mons.py || true
log "Sleep 30 sec"
sleep 30
done
}
|
import math
# Prompting the user to input the radius and height of the cylinder
radius = float(input("Enter the radius of the cylinder: "))
height = float(input("Enter the height of the cylinder: "))
# Calculating the volume of the cylinder
volume = math.pi * radius**2 * height
# Displaying the calculated volume of the cylinder with one decimal point precision
print("\nThe Volume of the cylinder is {:.1f} cubic units\n".format(volume)) |
<filename>pages/login/login.js
var api = require('../../config/api.js');
var util = require('../../utils/util.js');
var user = require('../../utils/user.js');
var app = getApp();
Page({
data: {
heightTop: '',
type: false,
loginStayPop: false,
closeStayPropFlag: false,
token: wx.getStorageSync('accesstoken') || '',
checkTken:''
},
onLoad(options) {
if (this.options.type) {
this.setData({
type: true
})
}
},
stayGiveUpLogin(e) {
const {
type
} = this.data
if (type) {
wx.switchTab({
url: '/pages/index/index'
});
} else {
wx.navigateBack({
delta: 1
})
}
let closeLoginTime = util.getnewDateSeconds()
wx.setStorageSync('UserCloseLoginTime', closeLoginTime)
this.registerClickFun(e.currentTarget.dataset.typsmsg)
},
registerClickFun(msg) {
app.sensors.track('registerClick', {
click_source: msg
})
},
cancelBack(e) {
const {
closeStayPropFlag
} = this.data
if (!closeStayPropFlag) {
this.setData({
loginStayPop: true
})
} else {
this.stayGiveUpLogin()
}
this.registerClickFun(e.currentTarget.dataset.typsmsg)
},
closeLoginStayPop(e) {
this.setData({
loginStayPop: false,
closeStayPropFlag: true
})
this.registerClickFun(e.currentTarget.dataset.typsmsg)
},
goPage(e) {
console.log(e.currentTarget.dataset.id);
var url = '/pages/webViewList/userAgreement/userAgreement?type=' + e.currentTarget.dataset.id
wx.navigateTo({
url
});
},
getPhoneNumber(e) {
let {checkTken} = this.data;
this.registerClickFun(e.target.dataset.typsmsg)
// user.login();
//直接调用wx.login 获取code
if (e.detail.errMsg == "getPhoneNumber:ok") {
let _iv = e.detail.iv;
let _encryptedData = e.detail.encryptedData
user.checkLogin().catch(() => {
if (!wx.getStorageSync('code')) {
wx.login({
success: function (res) {
if (res) {
user.loginByWeixin('微信授权登录-页面', _iv, _encryptedData, res.code).then(res => {
if (wx.getStorageSync('accessOption')) {
util.getCustomer(wx.getStorageSync('accessOption'))
}
resolve(true);
wx.showToast({
title: "微信授权成功",
icon: "none",
duration: 2000,
});
wx.navigateBack({
delta: 1
})
// clearInterval(this.checkTken)
}).catch((err) => {
// reject(false)
util.showErrorToast('微信登录失败');
});
}
}
})
}else{
user.loginByWeixin('微信授权登录-页面', _iv, _encryptedData).then(res => {
if (wx.getStorageSync('accessOption')) {
util.getCustomer(wx.getStorageSync('accessOption'))
}
wx.showToast({
title: "微信授权成功",
icon: "none",
duration: 2000,
});
wx.navigateBack({
delta: 1
})
}).catch((err) => {
util.showErrorToast('微信登录失败');
});
}
})
} else {
app.sensors.track('loginClose', {})
wx.showToast({
title: "用户取消授权",
icon: "none",
duration: 2000,
});
}
},
onReady: function () { },
onShow: function () {
const {
top,
height
} = app.globalData
this.setData({
heightTop: (top + height)
})
// 页面显示
wx.login({
success: function (res) {
wx.setStorageSync('code', res.code)
},
fail: function (err) {
}
});
},
onHide: function () {
},
onUnload: function () {
// 页面关闭
app.sensors.track('loginClose', {})
},
accountLogin: function (e) {
wx.navigateTo({
url: "/pages/iphoneLogin/iphoneLogin"
});
this.registerClickFun(e.currentTarget.dataset.typsmsg)
}
}) |
#!/bin/bash
#setup for each vm
POSITIONAL=()
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-k|--keyname)
KEYNAME="$2"
shift # past argument
shift # past value
esac
done
set -- "${POSITIONAL[@]}" # restore positional parameters
#uninstallfor first vm (master vm)
sudo systemctl stop docker
sudo apt-get purge -y docker-engine docker docker.io docker-ce
sudo apt-get autoremove -y --purge docker-engine docker docker.io docker-ce
sudo apt-get autoclean
sudo rm -rf /var/lib/docker
sudo rm /etc/apparmor.d/docker
sudo groupdel docker
# test if ip.txt exist
input_ip=$(cat ip.txt)
if [ "$input_ip" == "cat: ip.txt: No such file or directory" ]
then
echo "can't find ip.txt (program exit)"
exit 1
fi
# test if ip.txt is empty
total_ip=0
ip="ip"
IFS=' ' read -ra ADDR <<< "$input_ip"
for i in "${ADDR[@]}";
do
((total_ip++))
# process "$i"
done
if [ "${total_ip}" -le 0 ]; then
echo "installation done"
exit 1
fi
# test if the key is empty
if [ -z "$KEYNAME" ];
then
echo "Please input your key name (more detail see readme)"
exit 1
fi
for i in $input_ip;
do
ssh -i ~/.ssh/$KEYNAME ubuntu@$i "sudo systemctl stop docker "
ssh -i ~/.ssh/$KEYNAME ubuntu@$i "sudo apt-get purge -y docker-engine docker docker.io docker-ce"
ssh -i ~/.ssh/$KEYNAME ubuntu@$i "sudo apt-get autoremove -y --purge docker-engine docker docker.io docker-ce"
ssh -i ~/.ssh/$KEYNAME ubuntu@$i "sudo apt-get autoclean"
ssh -i ~/.ssh/$KEYNAME ubuntu@$i "sudo rm -rf /var/lib/docker"
ssh -i ~/.ssh/$KEYNAME ubuntu@$i "sudo rm /etc/apparmor.d/docker"
ssh -i ~/.ssh/$KEYNAME ubuntu@$i "sudo groupdel docker"
done
echo "unistallation done"
|
<filename>netresources.h
#include <windows.h>
#include <winnetwk.h>
#pragma comment(lib, "mpr.lib")
BOOL WINAPI enumerateResources();
|
#!/usr/bin/env bash
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
set -o errexit # Exit the script if any statement fails.
set -o nounset # Exit the script if any uninitialized variable is used.
CLONE_URL=${CLONE_URL:- 'git://github.com/alexa/avs-device-sdk.git'}
PORT_AUDIO_FILE="pa_stable_v190600_20161030.tgz"
PORT_AUDIO_DOWNLOAD_URL="http://www.portaudio.com/archives/$PORT_AUDIO_FILE"
TEST_MODEL_DOWNLOAD="https://github.com/Sensory/alexa-rpi/blob/master/models/spot-alexa-rpi-31000.snsr"
BUILD_TESTS=${BUILD_TESTS:-'true'}
CURRENT_DIR="$(pwd)"
INSTALL_BASE=${INSTALL_BASE:-"$CURRENT_DIR"}
SOURCE_FOLDER=${SDK_LOC:-''}
THIRD_PARTY_FOLDER=${THIRD_PARTY_LOC:-'third-party'}
BUILD_FOLDER=${BUILD_FOLDER:-'build'}
SOUNDS_FOLDER=${SOUNDS_FOLDER:-'sounds'}
DB_FOLDER=${DB_FOLDER:-'db'}
SOURCE_PATH="$INSTALL_BASE/$SOURCE_FOLDER"
THIRD_PARTY_PATH="$INSTALL_BASE/$THIRD_PARTY_FOLDER"
BUILD_PATH="$INSTALL_BASE/$BUILD_FOLDER"
SOUNDS_PATH="$INSTALL_BASE/$SOUNDS_FOLDER"
DB_PATH="$INSTALL_BASE/$DB_FOLDER"
CONFIG_DB_PATH="$DB_PATH"
UNIT_TEST_MODEL_PATH="$INSTALL_BASE/avs-device-sdk/KWD/inputs/SensoryModels/"
UNIT_TEST_MODEL="$THIRD_PARTY_PATH/alexa-rpi/models/spot-alexa-rpi-31000.snsr"
INPUT_CONFIG_FILE="$SOURCE_PATH/avs-device-sdk/Integration/AlexaClientSDKConfig.json"
OUTPUT_CONFIG_FILE="$BUILD_PATH/Integration/AlexaClientSDKConfig.json"
TEMP_CONFIG_FILE="$BUILD_PATH/Integration/tmp_AlexaClientSDKConfig.json"
TEST_SCRIPT="$INSTALL_BASE/test.sh"
LIB_SUFFIX="a"
ANDROID_CONFIG_FILE=""
# Default device serial number if nothing is specified
DEVICE_SERIAL_NUMBER="123456"
# Default device manufacturer name
DEVICE_MANUFACTURER_NAME=${DEVICE_MANUFACTURER_NAME:-"Test Manufacturer"}
# Default device description
DEVICE_DESCRIPTION=${DEVICE_DESCRIPTION:-"Test Device"}
GSTREAMER_AUDIO_SINK="autoaudiosink"
build_port_audio() {
# build port audio
echo
echo "==============> BUILDING PORT AUDIO =============="
echo
pushd $THIRD_PARTY_PATH
wget -c $PORT_AUDIO_DOWNLOAD_URL
tar zxf $PORT_AUDIO_FILE
pushd portaudio
./configure --without-jack
make
popd
popd
}
get_platform() {
uname_str=`uname -a`
result=""
if [[ "$uname_str" == "Linux "* ]] && [[ -f /etc/os-release ]]
then
sys_id=`cat /etc/os-release | grep "^ID="`
if [[ "$sys_id" == "ID=raspbian" ]]
then
echo "Raspberry pi"
fi
elif [[ "$uname_str" == "MINGW64"* ]]
then
echo "Windows mingw64"
fi
}
show_help() {
echo 'Usage: setup.sh <config-json-file> [OPTIONS]'
echo 'The <config-json-file> can be downloaded from developer portal and must contain the following:'
echo ' "clientId": "<OAuth client ID>"'
echo ' "productId": "<your product name for device>"'
echo ''
echo 'Optional parameters'
echo ' -s <serial-number> If nothing is provided, the default device serial number is 123456'
echo ' -a <file-name> The file that contains Android installation configurations (e.g. androidConfig.txt)'
echo ' -d <description> The description of the device.'
echo ' -m <manufacturer> The device manufacturer name.'
echo ' -h Display this help and exit'
}
if [[ $# -lt 1 ]]; then
show_help
exit 1
fi
CONFIG_JSON_FILE=$1
if [ ! -f "$CONFIG_JSON_FILE" ]; then
echo "Config json file not found!"
show_help
exit 1
fi
shift 1
OPTIONS=s:a:m:d:h
while getopts "$OPTIONS" opt ; do
case $opt in
s )
DEVICE_SERIAL_NUMBER="$OPTARG"
;;
a )
ANDROID_CONFIG_FILE="$OPTARG"
if [ ! -f "$ANDROID_CONFIG_FILE" ]; then
echo "Android config file is not found!"
exit 1
fi
source $ANDROID_CONFIG_FILE
;;
d )
DEVICE_DESCRIPTION="$OPTARG"
;;
m )
DEVICE_MANUFACTURER_NAME="$OPTARG"
;;
h )
show_help
exit 1
;;
esac
done
if [[ ! "$DEVICE_SERIAL_NUMBER" =~ [0-9a-zA-Z_]+ ]]; then
echo 'Device serial number is invalid!'
exit 1
fi
# The target platform for the build.
PLATFORM=${PLATFORM:-$(get_platform)}
if [ "$PLATFORM" == "Raspberry pi" ]
then
source pi.sh
elif [ "$PLATFORM" == "Windows mingw64" ]
then
source mingw.sh
else
PLATFORM_LOWER=$(echo "${PLATFORM}" | tr '[:upper:]' '[:lower:]')
if [ "$PLATFORM_LOWER" == "android" ]
then
PLATFORM="Android"
source android.sh
else
echo "The installation script doesn't support current system. (System: $(uname -a))"
exit 1
fi
fi
echo "################################################################################"
echo "################################################################################"
echo ""
echo ""
echo "AVS Device SDK $PLATFORM Script - Terms and Agreements"
echo ""
echo ""
echo "The AVS Device SDK is dependent on several third-party libraries, environments, "
echo "and/or other software packages that are installed using this script from "
echo "third-party sources (\"External Dependencies\"). These are terms and conditions "
echo "associated with the External Dependencies "
echo "(available at https://github.com/alexa/avs-device-sdk/wiki/Dependencies) that "
echo "you need to agree to abide by if you choose to install the External Dependencies."
echo ""
echo ""
echo "If you do not agree with every term and condition associated with the External "
echo "Dependencies, enter \"QUIT\" in the command line when prompted by the installer."
echo "Else enter \"AGREE\"."
echo ""
echo ""
echo "################################################################################"
echo "################################################################################"
read input
input=$(echo $input | awk '{print tolower($0)}')
if [ $input == 'quit' ]
then
exit 1
elif [ $input == 'agree' ]
then
echo "################################################################################"
echo "Proceeding with installation"
echo "################################################################################"
else
echo "################################################################################"
echo 'Unknown option'
echo "################################################################################"
exit 1
fi
if [ ! -d "$BUILD_PATH" ]
then
# Make sure required packages are installed
echo "==============> INSTALLING REQUIRED TOOLS AND PACKAGE ============"
echo
install_dependencies
# create / paths
echo
echo "==============> CREATING PATHS AND GETTING SOUND FILES ============"
echo
mkdir -p $SOURCE_PATH
mkdir -p $THIRD_PARTY_PATH
mkdir -p $SOUNDS_PATH
mkdir -p $DB_PATH
run_os_specifics
if [ ! -d "${SOURCE_PATH}/avs-device-sdk" ]
then
#get sdk
echo
echo "==============> CLONING SDK =============="
echo
cd $SOURCE_PATH
git clone --single-branch $CLONE_URL avs-device-sdk
fi
# make the SDK
echo
echo "==============> BUILDING SDK =============="
echo
mkdir -p $BUILD_PATH
cd $BUILD_PATH
cmake "$SOURCE_PATH/avs-device-sdk" \
-DCMAKE_BUILD_TYPE=DEBUG \
"${CMAKE_PLATFORM_SPECIFIC[@]}"
cd $BUILD_PATH
make SampleApp -j2
make PreviewAlexaClient -j2
else
cd $BUILD_PATH
make SampleApp -j2
make PreviewAlexaClient -j2
fi
echo
echo "==============> SAVING CONFIGURATION FILE =============="
echo
# Create configuration file with audioSink configuration at the beginning of the file
cat << EOF > "$OUTPUT_CONFIG_FILE"
{
"gstreamerMediaPlayer":{
"audioSink":"$GSTREAMER_AUDIO_SINK"
},
EOF
cd $INSTALL_BASE
bash genConfig.sh config.json $DEVICE_SERIAL_NUMBER $CONFIG_DB_PATH $SOURCE_PATH/avs-device-sdk $TEMP_CONFIG_FILE \
-DSDK_CONFIG_MANUFACTURER_NAME="$DEVICE_MANUFACTURER_NAME" -DSDK_CONFIG_DEVICE_DESCRIPTION="$DEVICE_DESCRIPTION"
# Delete first line from temp file to remove opening bracket
sed -i -e "1d" $TEMP_CONFIG_FILE
# Append temp file to configuration file
cat $TEMP_CONFIG_FILE >> $OUTPUT_CONFIG_FILE
# Delete temp file
rm $TEMP_CONFIG_FILE
echo
echo "==============> FINAL CONFIGURATION =============="
echo
cat $OUTPUT_CONFIG_FILE
generate_start_script
generate_test_script
echo " **** Completed Configuration/Build ***"
|
<reponame>agus-setiawan-desu/brapi-Java-TestServer<gh_stars>1-10
package org.brapi.test.BrAPITestServer.service;
import java.util.ArrayList;
import java.util.List;
import org.springframework.data.domain.Page;
import org.springframework.data.domain.PageRequest;
import org.springframework.data.domain.Pageable;
import org.springframework.data.domain.Sort;
import io.swagger.model.IndexPagination;
import io.swagger.model.Metadata;
public class PagingUtility {
public static void calculateMetaData(Metadata metaData) {
int totalCount = metaData.getPagination().getTotalCount();
int pageSize = metaData.getPagination().getPageSize();
metaData.getPagination().setTotalPages((totalCount / pageSize) + 1);
}
public static Pageable getPageRequest(Metadata metaData) {
if (metaData == null) {
metaData = new Metadata();
}
if (metaData.getPagination() == null) {
metaData.setPagination(new IndexPagination());
}
return getPageRequest(metaData, null);
}
public static Pageable getPageRequest(Metadata metaData, Sort sort) {
int page = 0;
if (metaData.getPagination().getCurrentPage() != null && metaData.getPagination().getCurrentPage() >= 0) {
page = metaData.getPagination().getCurrentPage();
}
int pageSize = 1000;
if (metaData.getPagination().getPageSize() != null && metaData.getPagination().getPageSize() > 0) {
pageSize = metaData.getPagination().getPageSize();
}
if (sort == null) {
return PageRequest.of(page, pageSize);
}
return PageRequest.of(page, pageSize, sort);
}
public static void calculateMetaData(Metadata metaData, Page<?> page) {
if (metaData == null) {
metaData = new Metadata();
}
if (metaData.getPagination() == null) {
metaData.setPagination(new IndexPagination());
}
// metaData.getPagination().setPageSize(page.getNumberOfElements());
metaData.getPagination().setCurrentPage(page.getNumber());
metaData.getPagination().setTotalCount((int) page.getTotalElements());
metaData.getPagination().setTotalPages((int) page.getTotalPages());
}
public static <T> List<T> paginateSimpleList(List<T> list, Metadata metadata) {
if (list != null && metadata != null) {
metadata.getPagination().setTotalCount(list.size());
calculateMetaData(metadata);
List<T> subList = new ArrayList<>();
int fromIndex = metadata.getPagination().getCurrentPage() * metadata.getPagination().getPageSize();
int toIndex = fromIndex + metadata.getPagination().getPageSize();
if(fromIndex < list.size()) {
if (toIndex >= list.size()) {
toIndex = list.size();
}
subList = list.subList(fromIndex, toIndex);
}
return subList;
}
return list;
}
}
|
import requests, json, copy
from rave_python.rave_base import RaveBase
from rave_python.rave_misc import checkIfParametersAreComplete, generateTransactionReference
from rave_python.rave_exceptions import ServerError, IncompletePaymentDetailsError, SubaccountCreationError, PlanStatusError
class SubAccount(RaveBase) :
def __init__(self, publicKey, secretKey, production, usingEnv):
self.headers = {
'content-type': 'application/json'
}
super(SubAccount, self).__init__(publicKey, secretKey, production, usingEnv)
def _preliminaryResponseChecks(self, response, TypeOfErrorToRaise, name):
# Check if we can obtain a json
try:
responseJson = response.json()
except:
raise ServerError({"error": True, "name": name, "errMsg": response})
# Check if the response contains data parameter
if not responseJson.get("data", None):
raise TypeOfErrorToRaise({"error": True, "name": name, "errMsg": responseJson.get("message", "Server is down")})
# Check if it is returning a 200
if not response.ok:
errMsg = responseJson["data"].get("message", None)
raise TypeOfErrorToRaise({"error": True, "errMsg": errMsg})
return responseJson
def _handleCreateResponse(self, response, accountDetails):
responseJson = self._preliminaryResponseChecks(response, SubaccountCreationError, accountDetails["business_email"])
if responseJson["status"] == "success":
return {"error": False, "id": responseJson["data"].get("id", None), "data": responseJson["data"]}
else:
raise SubaccountCreationError({"error": True, "data": responseJson["data"]})
# This makes and handles all requests pertaining to the status of your payment plans
def _handleAccountStatusRequests(self, type, endpoint, isPostRequest=False, data=None):
# Checks if it is a post request
if isPostRequest:
response = requests.post(endpoint, headers=self.headers, data=json.dumps(data))
else:
response = requests.get(endpoint, headers=self.headers)
# Checks if it can be parsed to json
try:
responseJson = response.json()
except:
raise ServerError({"error": True, "errMsg": response.text })
# Checks if it returns a 2xx code
if response.ok:
return {"error": False, "returnedData": responseJson}
else:
raise PlanStatusError(type, {"error": True, "returnedData": responseJson })
#function to create a payment plan
#Params: accountDetails - a dict containing account_bank, account_number, business_name, business_email, business_contact, business_contact_mobile, business_mobile, split_type, split_value
#if duration is not passed, any subscribed customer will be charged #indefinitely
def create(self, accountDetails):
# Performing shallow copy of planDetails to avoid public exposing payload with secret key
accountDetails = copy.copy(accountDetails)
accountDetails.update({"seckey": self._getSecretKey()})
requiredParameters = ["account_bank", "account_number", "business_name", "business_email", "business_contact", "business_contact_mobile", "business_mobile", "split_type", "split_value"]
checkIfParametersAreComplete(requiredParameters, accountDetails)
endpoint = self._baseUrl + self._endpointMap["subaccount"]["create"]
response = requests.post(endpoint, headers=self.headers, data=json.dumps(accountDetails))
return self._handleCreateResponse(response, accountDetails)
#gets all subaccounts connected to a merchant's account
def all(self):
endpoint = self._baseUrl + self._endpointMap["subaccount"]["list"] + "?seckey="+self._getSecretKey()
return self._handleAccountStatusRequests("List", endpoint)
def fetch(self, subaccount_id):
if not subaccount_id:
return "No subaccount id supplied. Kindly pass one in"
endpoint = self._baseUrl + self._endpointMap["subaccount"]["fetch"] + "/" +str(subaccount_id) + "?seckey="+self._getSecretKey()
return self._handleAccountStatusRequests("Fetch", endpoint)
def edit(self, Subaccount_id, newData={}):
if not id:
return "Plan id was not supplied. Kindly supply one"
endpoint = self._baseUrl + self._endpointMap["subaccount"]["update"]
data = {
"seckey": self._getSecretKey(),
"account_number": newData.get("account_number", None),
"account_bank": newData.get("account_bank", None),
"business_name": newData.get("business_name", None),
"business_email": newData.get("business_email", None),
"split_type": newData.get("split_type", None),
"split_value": newData.get("split_value", None),
}
return self._handlePlanStatusRequests("Edit", endpoint, isPostRequest=True, data=data)
def cancel(self, subaccount_id):
if not subbacount_id:
return "Subaccount id was not supplied. Kindly supply one"
endpoint = self._baseUrl + self._endpointMap["subaccount"]["delete"]
data = {
"seckey": self._getSecretKey(),
"id": subaccount_id,
}
return self._handlePlanStatusRequests("Cancel", endpoint, isPostRequest=True, data=data)
|
<reponame>AlekseyGoremykin/koa-api-starter
const Joi = require('@hapi/joi');
const { v4: uuidv4 } = require('uuid');
const validate = require('middlewares/validate');
const writerService = require('resources/writers/writer.service');
const bookSchema = Joi.object({
title: Joi.string()
.trim()
.required()
.messages({
'string.empty': 'title is required',
}),
genre: Joi.string()
.required()
.valid('novel', 'poem'),
});
const booksSchema = Joi.object({
books: Joi.array()
.items(bookSchema),
});
const emptySchema = Joi.object();
async function validator(ctx, next) {
const { writerId } = ctx.params;
const isWriterExists = await writerService.exists({
_id: writerId,
});
if (!isWriterExists) {
ctx.body = {
errors: {
writerId: ['Writer is not found'],
},
};
ctx.throw(400);
}
await next();
}
async function handleCreate(ctx) {
const { writerId } = ctx.params;
const book = ctx.validatedData;
book._id = uuidv4();
await writerService.atomic.update(
{ _id: writerId },
{ $push: { books: book } },
);
ctx.body = book;
}
async function handleUpdateAll(ctx) {
const { writerId } = ctx.params;
const { books } = ctx.validatedData;
const newBooks = (books || []).map((book) => ({ ...book, _id: uuidv4() }));
await writerService.atomic.update(
{ _id: writerId },
{ $set: { books: newBooks } },
);
ctx.body = newBooks;
}
async function handleDelete(ctx) {
const { writerId, bookId } = ctx.params;
await writerService.atomic.update(
{ _id: writerId },
{ $pull: { books: { _id: bookId } } },
);
ctx.body = {};
}
module.exports.register = (router) => {
router.post('/:writerId/books', validate(bookSchema), validator, handleCreate);
router.put('/:writerId/books', validate(booksSchema), validator, handleUpdateAll);
router.delete('/:writerId/books/:bookId', validate(emptySchema), handleDelete);
};
|
if (typeof AFRAME === 'undefined') {
throw new Error('Component attempted to register before AFRAME was available.');
}
require('./altspace');
require('./altspace-cursor-collider');
require('./altspace-tracked-controls');
require('./native-components');
require('./native-resources');
require('./sync');
require('./sync-system');
require('./sync-transform');
require('./sync-color');
require('./sync-n-sound');
require('./sync-n-skeleton-parent');
require('./one-per-user');
require('./instantiator');
require('./wire');
|
<reponame>cschladetsch/KAI
#pragma once
#include <KAI/Executor/Operation.h>
#include <KAI/Executor/Continuation.h>
#include <KAI/Executor/SignedContinuation.h>
#include <KAI/Executor/Compiler.h>
|
<filename>src/rasperi_opengl_reference_rasterizer/rasperi_opengl_sky_box_shader.h<gh_stars>0
/* ---------------------------------------------------------------- *
<NAME> <<EMAIL>>
The definition of kuu::rasperi::OpenGLSkyBox class
* ---------------------------------------------------------------- */
#pragma once
#include <memory>
#include <glad/glad.h>
#include <glm/vec2.hpp>
#include <glm/mat4x4.hpp>
namespace kuu
{
namespace rasperi
{
/* ---------------------------------------------------------------- *
* ---------------------------------------------------------------- */
class OpenGLSkyBoxShader
{
public:
OpenGLSkyBoxShader();
void use();
glm::mat4 viewMatrix;
glm::mat4 projectionMatrix;
private:
struct Impl;
std::shared_ptr<Impl> impl;
};
} // namespace rasperi
} // namespace kuu
|
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/1024+0+512-shuffled-N/13-model --tokenizer_name model-configs/1536-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/1024+0+512-shuffled-N/13-1024+0+512-N-VB-fill-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function remove_all_but_nouns_and_verbs_fill_first_two_thirds_sixth --eval_function last_sixth_eval |
use std::collections::HashMap;
enum Part {
Bool,
Uuid,
String(HashMap<String, String>),
Number(HashMap<String, i32>),
}
enum StringDescriptor {
Uuid(usize),
Patterns(Vec<String>),
}
enum ValueDescriptor {
Bool,
String(StringDescriptor),
}
fn search_for_patterns(keys: Vec<String>) -> StringDescriptor {
// Implement pattern search logic here
StringDescriptor::Patterns(keys)
}
fn process_parts(parts: Vec<Part>, pp: Vec<&str>) -> Vec<ValueDescriptor> {
let mut result = Vec::new();
for (i, part) in parts.iter().enumerate() {
if pp[i].starts_with("blst_param_") {
match part {
Part::Bool => result.push(ValueDescriptor::Bool),
Part::Uuid => result.push(ValueDescriptor::String(StringDescriptor::Uuid(4))),
Part::String(hm) => {
let keys = hm.keys().cloned().collect();
result.push(ValueDescriptor::String(search_for_patterns(keys)));
}
Part::Number(hm) => {
let keys = hm.keys().cloned().collect();
result.push(ValueDescriptor::String(search_for_patterns(keys)));
}
}
}
}
result
}
fn main() {
let parts = vec![
Part::Bool,
Part::Uuid,
Part::String(HashMap::new()),
Part::Number(HashMap::new()),
];
let pp = vec!["blst_param_bool", "blst_param_uuid", "other_string", "other_number"];
let result = process_parts(parts, pp);
println!("{:?}", result);
} |
"use strict";
/**
* Sample React Native App
* https://github.com/facebook/react-native
*
* Generated with the TypeScript template
* https://github.com/emin93/react-native-template-typescript
*
* @format
*/
var __importStar = (this && this.__importStar) || function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k];
result["default"] = mod;
return result;
};
Object.defineProperty(exports, "__esModule", { value: true });
var react_1 = __importStar(require("react"));
var react_native_1 = require("react-native");
var instructions = react_native_1.Platform.select({
ios: 'Press Cmd+R to reload,\n' + 'Cmd+D or shake for dev menu',
android: 'Double tap R on your keyboard to reload,\n' +
'Shake or press menu button for dev menu',
});
exports.App = function () {
var _a = react_1.useState(0), count = _a[0], setCount = _a[1];
return (react_1.default.createElement(react_native_1.View, { style: styles.container },
react_1.default.createElement(react_native_1.Text, { style: styles.welcome }, "Welcome to React Native!"),
react_1.default.createElement(react_native_1.Text, { style: styles.instructions }, "To get started, edit App.tsx"),
react_1.default.createElement(react_native_1.Text, { style: styles.instructions }, count),
react_1.default.createElement(react_native_1.Button, { title: "increment", onPress: function () { return setCount(count + 1); } })));
};
var styles = react_native_1.StyleSheet.create({
container: {
flex: 1,
justifyContent: 'center',
alignItems: 'center',
backgroundColor: '#F5FCFF',
},
welcome: {
fontSize: 20,
textAlign: 'center',
margin: 10,
},
instructions: {
textAlign: 'center',
color: '#333333',
marginBottom: 5,
},
});
|
<filename>JavaScript Algorithms and Data Structures Certification (300 hours)/ES6 Challenges/14. Use Destructuring Assignment to Assign Variables from Arrays.js
// Use destructuring assignment to swap the values of a and b so that a
// receives the value stored in b, and b receives the value stored in a.
// Value of a should be 6, after swapping.
// Value of b should be 8, after swapping.
// You should use array destructuring to swap a and b.
let a = 8, b = 6;
// change code below this line
[a, b] = [b, a];
// change code above this line
console.log(a); // should be 6
console.log(b); // should be 8
|
#!/bin/bash
FILE=$1
RELEASE=$2
export FILE
export RELEASE
input=$FILE
if [ -z "${RELEASE}"]
then
while IFS= read -r line
do
echo "http://svl-artifactory.juniper.net/artifactory/contrail-static-prod/"$line"/contrail-vrouter-agent.tgz"
echo "http://svl-artifactory.juniper.net/artifactory/contrail-static-prod/"$line"/control-node.tgz"
done < "$input"
else
echo "http://svl-artifactory.juniper.net/artifactory/contrail-static-prod/"$RELEASE"/contrail-vrouter-agent.tgz"
echo "http://svl-artifactory.juniper.net/artifactory/contrail-static-prod/"$RELEASE"/control-node.tgz"
fi
|
<filename>controller/annotations/service/abortOnClose.go
package service
import (
"github.com/haproxytech/client-native/v2/models"
"github.com/haproxytech/kubernetes-ingress/controller/utils"
)
type AbortOnClose struct {
name string
backend *models.Backend
}
func NewAbortOnClose(n string, b *models.Backend) *AbortOnClose {
return &AbortOnClose{name: n, backend: b}
}
func (a *AbortOnClose) GetName() string {
return a.name
}
func (a *AbortOnClose) Process(input string) error {
var enabled bool
var err error
if input != "" {
enabled, err = utils.GetBoolValue(input, "abortonclose")
if err != nil {
return err
}
}
if enabled {
a.backend.Abortonclose = "enabled"
} else {
a.backend.Abortonclose = "disabled"
}
return nil
}
|
<reponame>borisbsu/cloudwatch-logback-appender
package com.j256.cloudwatchlogbackappender;
import ch.qos.logback.classic.pattern.ClassicConverter;
import ch.qos.logback.classic.spi.ILoggingEvent;
/**
* Get the value of a system environment variable, the name of which is the {option}.
*
* @author graywatson
*/
public class SystemPropertyConverter extends ClassicConverter {
private String propertyName;
@Override
public void start() {
super.start();
propertyName = getFirstOption();
}
@Override
public String convert(ILoggingEvent event) {
if (propertyName == null) {
return "null";
} else {
return System.getProperty(propertyName, "null");
}
}
}
|
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
</head>
<body>
<h1>Welcome!</h1>
<p>This is a test page.</p>
</body>
</html> |
import pandas as pd
def sum_nested_series(df):
# Create a new DataFrame to store the sums
sum_df = pd.DataFrame(columns=df.columns, index=df.index)
# Iterate over each column in the input DataFrame
for col in df.columns:
# Apply a lambda function to sum the values in each nested Series
sum_df[col] = df[col].apply(lambda x: sum(x))
return sum_df |
<gh_stars>0
package com.rudikershaw.gitbuildhook;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import org.apache.maven.it.Verifier;
import org.eclipse.jgit.api.Git;
import org.eclipse.jgit.storage.file.FileRepositoryBuilder;
import org.junit.Test;
/** Unit and integration tests for the GitBuildHookMojo. */
public class GitConfigMojoTest extends AbstractMojoTest {
/**
* Tests that the hook, installed by changing the hooks directory, prevents a commit.
*
* @throws Exception if a temp project cannot be created for testing.
*/
@Test
public void testConfigureGitHooksDirectory() throws Exception {
moveToTempTestDirectory("test-project-configure", "pom.xml");
final File rootFolder = getFolder().getRoot();
assertTrue(rootFolder.exists());
final Verifier verifier = getVerifier(rootFolder.toString());
verifier.executeGoal("install");
verifier.verifyErrorFreeLog();
verifier.assertFilePresent(".git");
verifier.resetStreams();
final FileRepositoryBuilder repoBuilder = new FileRepositoryBuilder();
repoBuilder.findGitDir(rootFolder);
try (Git git = Git.open(repoBuilder.getGitDir())) {
assertEquals("hooks-path/", git.getRepository().getConfig().getString("core", null, "hooksPath"));
assertEquals("custom", git.getRepository().getConfig().getString("custom", "config", "name"));
}
}
}
|
<filename>jack/tests/test.cpp
/*
Copyright (C) 2005 <NAME> for GRAME
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/** @file jack_test.c
*
* @brief This client test the jack API.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#ifndef WIN32
#include <unistd.h>
#endif
#include <string.h>
#include <getopt.h>
#include <math.h>
#include <assert.h>
#include <stdarg.h>
#include <jack/jack.h>
#include <jack/intclient.h>
#include <jack/transport.h>
#if defined(WIN32) && !defined(M_PI)
#define M_PI 3.151592653
#endif
#ifdef WIN32
#define jack_sleep(val) Sleep((val))
#else
#define jack_sleep(val) usleep((val) * 1000)
#endif
typedef struct
{
jack_nframes_t ft; // running counter frame time
jack_nframes_t fcs; // from sycle start...
jack_nframes_t lft; // last frame time...
}
FrameTimeCollector;
FILE *file;
FrameTimeCollector* framecollect;
FrameTimeCollector perpetualcollect;
FrameTimeCollector lastperpetualcollect;
int frames_collected = 0;
// ports
jack_port_t *output_port1;
jack_port_t *output_port1b;
jack_port_t *input_port2;
jack_port_t *output_port2;
jack_port_t *input_port1;
// clients
jack_client_t *client1;
jack_client_t *client2;
const char *client_name1;
const char *client_name2;
unsigned long sr; // sample rate
// for time -t option
int time_to_run = 0;
int time_before_exit = 1;
// standard error count
int t_error = 0;
int reorder = 0; // graph reorder callback
int RT = 0; // is real time or not...
int FW = 0; // freewheel mode
int init_clbk = 0; // init callback
int port_rename_clbk = 0; // portrename callback
int i, j, k = 0;
int port_callback_reg = 0;
jack_nframes_t cur_buffer_size, old_buffer_size, cur_pos;
int activated = 0;
int count1, count2 = 0; // for freewheel
int xrun = 0;
int have_xrun = 0; // msg to tell the process1 function to write a special thing in the frametime file.
int process1_activated = -1; // to control processing...
int process2_activated = -1; // to control processing...
unsigned long int index1 = 0;
unsigned long int index2 = 0;
jack_default_audio_sample_t *signal1; // signal source d'emission
jack_default_audio_sample_t *signal2; // tableau de reception
jack_transport_state_t ts;
jack_position_t pos;
jack_position_t request_pos;
int silent_error = 0; // jack silent mode
int verbose_mode = 0;
int transport_mode = 1;
jack_nframes_t input_ext_latency = 0; // test latency for PHY devices
jack_nframes_t output_ext_latency = 0; // test latency for PHY devices
int sync_called = 0;
int starting_state = 1;
int linecount = 0; // line counter for log file of sampleframe counter --> for graph function.
int linebuf = 0; // reminders for graph analysis
int linetransport = 0;
int linefw = 0;
int lineports = 0;
int linecl2 = 0;
int client_register = 0;
/**
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
Callbacks & basics functions
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*/
void usage()
{
fprintf (stderr, "\n\n"
"usage: jack_test \n"
" [ --time OR -t time_to_run (in seconds) ]\n"
" [ --quiet OR -q (quiet mode : without jack server errors) ]\n"
" [ --verbose OR -v (verbose mode : no details on tests done. Only main results & errors) ]\n"
" [ --transport OR -k (Do not test transport functions.) ]\n"
" --realtime OR -R (jack is in rt mode)\n\n\n"
);
exit(1);
}
void Log(const char *fmt, ...)
{
if (verbose_mode) {
va_list ap;
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
}
}
void Collect(FrameTimeCollector* TheFrame)
{
TheFrame->lft = jack_last_frame_time(client1);
TheFrame->ft = jack_frame_time(client1);
TheFrame->fcs = jack_frames_since_cycle_start(client1);
}
void Jack_Thread_Init_Callback(void *arg)
{
#ifdef WIN32
Log("Init callback has been successfully called from thread = %x. (msg from callback)\n", GetCurrentThread());
#else
Log("Init callback has been successfully called from thread = %x. (msg from callback)\n", pthread_self());
#endif
init_clbk = 1;
}
void Jack_Freewheel_Callback(int starting, void *arg)
{
Log("Freewheel callback has been successfully called with value %i. (msg from callback)\n", starting);
FW = starting;
}
void Jack_Client_Registration_Callback(const char* name, int val, void *arg)
{
Log("Client registration callback name = %s has been successfully called with value %i. (msg from callback)\n", name, val);
if (val)
client_register++;
else
client_register--;
}
int Jack_Port_Rename_Callback(jack_port_id_t port, const char* old_name, const char* new_name, void *arg)
{
Log("Rename callback has been successfully called with old_name '%s' and new_name '%s'. (msg from callback)\n", old_name, new_name);
port_rename_clbk = 1;
return 0;
}
int Jack_Update_Buffer_Size(jack_nframes_t nframes, void *arg)
{
cur_buffer_size = jack_get_buffer_size(client1);
Log("Buffer size = %d (msg from callback)\n", cur_buffer_size);
return 0;
}
int Jack_XRun_Callback(void *arg)
{
xrun++;
have_xrun = 1;
Log("Xrun has been detected ! (msg from callback)\n");
return 0;
}
int Jack_Graph_Order_Callback(void *arg)
{
reorder++;
return 0;
}
int Jack_Sample_Rate_Callback(jack_nframes_t nframes, void *arg)
{
Log("Sample rate : %i.\n", nframes);
return 0;
}
void Jack_Error_Callback(const char *msg)
{
if (silent_error == 0) {
fprintf(stderr, "error : %s (msg from callback)\n", msg);
}
}
void jack_shutdown(void *arg)
{
printf("Jack_test has been kicked out by jackd !\n");
exit(1);
}
void jack_info_shutdown(jack_status_t code, const char* reason, void *arg)
{
printf("JACK server failure : %s\n", reason);
exit(1);
}
void Jack_Port_Register(jack_port_id_t port, int mode, void *arg)
{
port_callback_reg++;
}
void Jack_Port_Connect(jack_port_id_t a, jack_port_id_t b, int connect, void* arg)
{
Log("PortConnect src = %ld dst = %ld onoff = %ld (msg from callback)\n", a, b, connect);
}
int Jack_Sync_Callback(jack_transport_state_t state, jack_position_t *pos, void *arg)
{
int res = 0;
switch (state) {
case JackTransportStarting:
sync_called++;
if (starting_state == 0) {
Log("sync callback : Releasing status : now ready...\n");
res = 1;
} else {
if (sync_called == 1) {
Log("sync callback : Holding status...\n");
}
res = 0;
}
break;
case JackTransportStopped:
Log("sync callback : JackTransportStopped...\n");
res = 0;
break;
default:
res = 0;
break;
}
return res;
}
/**
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
processing functions
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
* Proccess1 is for client1
* 4 modes, activated with process1_activated
*
* -1 : idle mode
* 0 : write zeros to output 1
* 1 : write continuously signal1 (sinusoidal test signal) to output1
* 3 : mode for summation test. While record (done by process2) is not running, write signal1 to both out1 & out1b.
* when record begin (index2 > 0), write signal1 in phase opposition to out1 & out2
* 5 : Frames Time checking mode : write the array containing the three values of frame_time, frames cycles start and
* last frame time during 150 cycles.
*/
int process1(jack_nframes_t nframes, void *arg)
{
if (FW == 0) {
Collect(&perpetualcollect);
if (have_xrun) {
fprintf(file, "%i %i\n", (perpetualcollect.ft - lastperpetualcollect.ft), (2*cur_buffer_size));
have_xrun = 0;
} else {
fprintf(file, "%i 0\n", (perpetualcollect.ft - lastperpetualcollect.ft));
}
linecount++;
lastperpetualcollect.ft = perpetualcollect.ft;
}
jack_default_audio_sample_t *out1;
jack_default_audio_sample_t *out1b;
activated++; // counter of callback activation
if (process1_activated == 1) {
out1 = (jack_default_audio_sample_t *) jack_port_get_buffer (output_port1, nframes);
for (jack_nframes_t p = 0; p < nframes; p++) {
out1[p] = signal1[index1];
index1++;
if (index1 == 48000)
index1 = 0;
}
}
if (process1_activated == 3) {
out1 = (jack_default_audio_sample_t *) jack_port_get_buffer (output_port1, nframes);
out1b = (jack_default_audio_sample_t *) jack_port_get_buffer (output_port1b, nframes);
for (jack_nframes_t p = 0; p < nframes; p++) {
out1[p] = signal1[index1];
if (index2 != 0) {
out1b[p] = ( -1 * signal1[index1]);
} else {
out1b[p] = signal1[index1];
}
index1++;
if (index1 == 48000)
index1 = 0;
}
}
if (process1_activated == 0) {
out1 = (jack_default_audio_sample_t *) jack_port_get_buffer (output_port1, nframes);
memset (out1, 0, sizeof (jack_default_audio_sample_t) * nframes); //�crit des z�ros en sortie...
}
if (process1_activated == 5) {
Collect(&framecollect[frames_collected]);
frames_collected++;
if (frames_collected > 798) {
process1_activated = -1;
}
}
return 0;
}
/**
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
* Proccess2 is for client2
* 3 modes, activated with process1_activated
*
* -1 : idle mode
* 0 : idle mode
* 1 : record in2 into signal2.(for first transmit test)
* 2 : record in2 into signal2 while send signal1 in out2. used dor Tie data test.
* 3 : record in2 into sigal2 for summation data test.
* In records modes, at the end of the record (signal2 is full), it stop the test, setting both activation states to -1.
*/
int process2(jack_nframes_t nframes, void *arg)
{
jack_default_audio_sample_t *out2;
jack_default_audio_sample_t *in2;
if (process2_activated == 1) { // Reception du process1 pour comparer les donnees
in2 = (jack_default_audio_sample_t *) jack_port_get_buffer (input_port2, nframes);
for (unsigned int p = 0; p < nframes; p++) {
signal2[index2] = in2[p];
if (index2 == 95999) {
process2_activated = 0;
process1_activated = 0;
//index2 = 0;
} else {
index2++;
}
}
}
if (process2_activated == 2) { // envoie de signal1 pour test tie mode et le r�cup�re direct + latence de la boucle jack...
out2 = (jack_default_audio_sample_t *) jack_port_get_buffer (output_port2, nframes);
in2 = (jack_default_audio_sample_t *) jack_port_get_buffer (input_port2, nframes);
for (unsigned int p = 0; p < nframes; p++) {
out2[p] = signal1[index1];
index1++;
if (index1 == 48000)
index1 = 0;
signal2[index2] = in2[p];
if (index2 == 95999) {
process2_activated = -1;
//index2 = 0;
} else {
index2++;
}
}
}
if (process2_activated == 3) { // envoie de -signal1 pour sommation en oppo de phase par jack
in2 = (jack_default_audio_sample_t *) jack_port_get_buffer (input_port2, nframes);
for (unsigned int p = 0; p < nframes;p++) {
signal2[index2] = in2[p];
if (index2 == 95999) {
process2_activated = 0;
process1_activated = 0;
//index2 = 0;
} else {
index2++;
}
}
}
return 0;
}
// Alternate thread model
static int _process (jack_nframes_t nframes)
{
jack_default_audio_sample_t *in, *out;
in = (jack_default_audio_sample_t *)jack_port_get_buffer (input_port1, nframes);
out = (jack_default_audio_sample_t *)jack_port_get_buffer (output_port1, nframes);
memcpy (out, in,
sizeof (jack_default_audio_sample_t) * nframes);
return 0;
}
static void* jack_thread(void *arg)
{
jack_client_t* client = (jack_client_t*) arg;
jack_nframes_t last_thread_time = jack_frame_time(client);
while (1) {
jack_nframes_t frames = jack_cycle_wait(client);
jack_nframes_t current_thread_time = jack_frame_time(client);
jack_nframes_t delta_time = current_thread_time - last_thread_time;
Log("jack_thread : delta_time = %ld\n", delta_time);
int status = _process(frames);
last_thread_time = current_thread_time;
jack_cycle_signal (client, status);
}
return 0;
}
// To test callback exiting
int process3(jack_nframes_t nframes, void *arg)
{
static int process3_call = 0;
if (process3_call++ > 10) {
Log("process3 callback : exiting...\n");
return -1;
} else {
Log("calling process3 callback : process3_call = %ld\n", process3_call);
return 0;
}
}
int process4(jack_nframes_t nframes, void *arg)
{
jack_client_t* client = (jack_client_t*) arg;
static jack_nframes_t last_time = jack_frame_time(client);
static jack_nframes_t tolerance = (jack_nframes_t)(cur_buffer_size * 0.1f);
jack_nframes_t cur_time = jack_frame_time(client);
jack_nframes_t delta_time = cur_time - last_time;
Log("calling process4 callback : jack_frame_time = %ld delta_time = %ld\n", cur_time, delta_time);
if (delta_time > 0 && (jack_nframes_t)abs(delta_time - cur_buffer_size) > tolerance) {
printf("!!! ERROR !!! jack_frame_time seems to return incorrect values cur_buffer_size = %d, delta_time = %d tolerance %d\n", cur_buffer_size, delta_time, tolerance);
}
last_time = cur_time;
return 0;
}
int process5(jack_nframes_t nframes, void *arg)
{
jack_client_t* client = (jack_client_t*) arg;
static jack_nframes_t first_current_frames;
static jack_time_t first_current_usecs;
static jack_time_t first_next_usecs;
static float first_period_usecs;
static int res1 = jack_get_cycle_times(client, &first_current_frames, &first_current_usecs, &first_next_usecs, &first_period_usecs);
jack_nframes_t current_frames;
jack_time_t current_usecs;
jack_time_t next_usecs;
float period_usecs;
int res = jack_get_cycle_times(client, ¤t_frames, ¤t_usecs, &next_usecs, &period_usecs);
if (res != 0) {
printf("!!! ERROR !!! jack_get_cycle_times fails...\n");
return 0;
}
Log("calling process5 callback : jack_get_cycle_times delta current_frames = %ld delta current_usecs = %ld delta next_usecs = %ld period_usecs = %f\n",
current_frames - first_current_frames, current_usecs - first_current_usecs, next_usecs - first_next_usecs, period_usecs);
first_current_frames = current_frames;
first_current_usecs = current_usecs;
first_next_usecs = next_usecs;
return 0;
}
static void display_transport_state()
{
jack_transport_state_t ts;
jack_position_t pos;
ts = jack_transport_query(client2, &pos);
switch (ts) {
case JackTransportStopped:
Log("Transport is stopped...\n");
break;
case JackTransportRolling:
Log("Transport is rolling...\n");
break;
case JackTransportLooping:
Log("Transport is looping...\n");
break;
case JackTransportStarting:
Log("Transport is starting...\n");
break;
case JackTransportNetStarting:
Log("Transport is starting with network sync...\n");
break;
}
}
/**
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
MAIN FUNCTION
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*/
int main (int argc, char *argv[])
{
const char **inports; // array of PHY input/output
const char **outports; // array of PHY input/outputs
const char *server_name = NULL;
const char **connexions1;
const char **connexions2;
jack_status_t status;
char portname[128] = "port";
char filename[128] = "framefile.ext";
const char *nullportname = "";
int option_index;
int opt;
int a = 0; // working number for in/out port (PHY)...
int test_link = 0; // for testing the "overconnect" function
int flag; // flag for ports...
int is_mine = 0; // to test jack_port_is_mine function...
const char *options = "kRnqvt:";
float ratio; // for speed calculation in freewheel mode
jack_options_t jack_options = JackNullOption;
struct option long_options[] = {
{"realtime", 0, 0, 'R'},
{"non-realtime", 0, 0, 'n'},
{"time", 0, 0, 't'},
{"quiet", 0, 0, 'q'},
{"verbose", 0, 0, 'v'},
{"transport", 0, 0, 'k'},
{0, 0, 0, 0}
};
client_name1 = "jack_test";
time_to_run = 1;
//verbose_mode = 1;
//RT = 1;
while ((opt = getopt_long (argc, argv, options, long_options, &option_index)) != EOF) {
switch (opt) {
case 'k':
transport_mode = 0;
break;
case 'q':
silent_error = 1;
break;
case 'v':
verbose_mode = 1;
printf("Verbose mode is activated...\n");
break;
case 't':
time_to_run = atoi(optarg);
break;
case 'R':
RT = 1;
break;
default:
fprintf (stderr, "unknown option %c\n", opt);
usage ();
}
}
if (RT) {
printf("Jack server is said being in realtime mode...\n");
} else {
printf("Jack server is said being in non-realtime mode...\n");
}
/**
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
init signal data for test
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*/
framecollect = (FrameTimeCollector *) malloc(800 * sizeof(FrameTimeCollector));
signal1 = (jack_default_audio_sample_t *) malloc(48000 * sizeof(jack_default_audio_sample_t));
signal2 = (jack_default_audio_sample_t *) malloc(96000 * sizeof(jack_default_audio_sample_t));
signal1[0] = 0;
int p;
for (p = 1; p < 48000;p++) {
signal1[p] = (float)(sin((p * 2 * M_PI * 1000 ) / 48000));
}
for (p = 0; p < 95999;p++) {
signal2[p] = 0.0 ;
}
index1 = 0;
index2 = 0;
/**
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
begin test
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*/
printf("*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*--*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\n");
printf("*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*--*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\n");
printf("*-*-*-*-*-*-*-*-*-*-*-*-*-* Start jack server stress test *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\n");
printf("*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*--*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\n");
printf("*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*--*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\n");
/**
* Register a client...
*
*/
Log("Register a client using jack_client_open()...\n");
client1 = jack_client_open(client_name1, jack_options, &status, server_name);
if (client1 == NULL) {
fprintf (stderr, "jack_client_open() failed, "
"status = 0x%2.0x\n", status);
if (status & JackServerFailed) {
fprintf(stderr, "Unable to connect to JACK server\n");
}
exit (1);
}
if (status & JackServerStarted) {
fprintf(stderr, "JACK server started\n");
}
/**
* Internal client tests...
*
*/
jack_intclient_t intclient;
Log("trying to load the \"inprocess\" server internal client \n");
intclient = jack_internal_client_load (client1, "inprocess",
(jack_options_t)(JackLoadName|JackLoadInit),
&status, "inprocess", "");
if (intclient == 0 || status & JackFailure) {
printf("!!! ERROR !!! cannot load internal client \"inprocess\" intclient 0x%llX status 0x%2.0x !\n", (unsigned long long)intclient, status);
} else {
Log("\"inprocess\" server internal client loaded\n");
char* internal_name = jack_get_internal_client_name(client1, intclient);
if (strcmp(internal_name, "inprocess") == 0) {
Log("jack_get_internal_client_name returns %s\n", internal_name);
} else {
printf("!!! ERROR !!! jack_get_internal_client_name returns incorrect name %s\n", internal_name);
}
jack_intclient_t intclient1 = jack_internal_client_handle(client1, "inprocess", &status);
if (intclient1 == intclient) {
Log("jack_internal_client_handle returns correct handle\n");
} else {
printf("!!! ERROR !!! jack_internal_client_handle returns incorrect handle 0x%llX\n", (unsigned long long)intclient1);
}
// Unload internal client
status = jack_internal_client_unload (client1, intclient);
if (status == 0) {
Log("jack_internal_client_unload done first time returns correct value\n");
} else {
printf("!!! ERROR !!! jack_internal_client_unload returns incorrect value 0x%2.0x\n", status);
}
// Unload internal client second time
status = jack_internal_client_unload (client1, intclient);
if (status & JackFailure && status & JackNoSuchClient) {
Log("jack_internal_client_unload done second time returns correct value\n");
} else {
printf("!!! ERROR !!! jack_internal_client_unload returns incorrect value 0x%2.0x\n", status);
}
}
/**
* try to register another one with the same name...
*
*/
Log("trying to register a new jackd client with name %s using jack_client_new()...\n", client_name1);
client2 = jack_client_new(client_name1);
if (client2 == NULL) {
Log ("valid : a second client with the same name cannot be registered\n");
} else {
printf("!!! ERROR !!! Jackd server has accepted multiples client with the same name !\n");
jack_client_close(client2);
}
/**
* try to register another one with the same name using jack_client_open ==> since JackUseExactName is not used, an new client should be opened...
*
*/
Log("trying to register a new jackd client with name %s using jack_client_open()...\n", client_name1);
client2 = jack_client_open(client_name1, jack_options, &status, server_name);
if (client2 != NULL) {
Log ("valid : a second client with the same name can be registered (client automatic renaming)\n");
jack_client_close(client2);
} else {
printf("!!! ERROR !!! Jackd server automatic renaming feature does not work!\n");
}
/**
* testing client name...
* Verify that the name sended at registration and the one returned by jack server is the same...
*
*/
Log("Testing name...");
client_name2 = jack_get_client_name(client1);
if (strcmp(client_name1, client_name2) == 0)
Log(" ok\n");
else
printf("\n!!! ERROR !!! name returned different from the one given : %s\n", client_name2);
/**
* Test RT mode...
* verify if the real time mode returned by jack match the optional argument defined when launching jack_test*/
if (jack_is_realtime(client1) == RT)
Log("Jackd is in realtime mode (RT = %i).\n", RT);
else
printf("!!! ERROR !!! Jackd is in a non-expected realtime mode (RT = %i).\n", RT);
/**
* Register all callbacks...
*
*/
if (jack_set_thread_init_callback(client1, Jack_Thread_Init_Callback, 0) != 0)
printf("!!! ERROR !!! while calling jack_set_thread_init_callback()...\n");
if (jack_set_freewheel_callback(client1, Jack_Freewheel_Callback, 0) != 0 )
printf("\n!!! ERROR !!! while calling jack_set_freewheel_callback()...\n");
if (jack_set_process_callback(client1, process1, 0) != 0) {
printf("Error when calling jack_set_process_callback() !\n");
}
jack_on_shutdown(client1, jack_shutdown, 0);
if (jack_on_info_shutdown)
jack_on_info_shutdown(client1, jack_info_shutdown, 0);
if (jack_set_buffer_size_callback(client1, Jack_Update_Buffer_Size, 0) != 0) {
printf("Error when calling buffer_size_callback !\n");
}
if (jack_set_graph_order_callback(client1, Jack_Graph_Order_Callback, 0) != 0) {
printf("Error when calling Jack_Graph_Order_Callback() !\n");
}
if (jack_set_port_rename_callback(client1, Jack_Port_Rename_Callback, 0) != 0 )
printf("\n!!! ERROR !!! while calling jack_set_rename_callback()...\n");
if (jack_set_xrun_callback(client1, Jack_XRun_Callback, 0 ) != 0) {
printf("Error when calling jack_set_xrun_callback() !\n");
}
if (jack_set_sample_rate_callback(client1, Jack_Sample_Rate_Callback, 0 ) != 0) {
printf("Error when calling Jack_Sample_Rate_Callback() !\n");
}
if (jack_set_port_registration_callback(client1, Jack_Port_Register, 0) != 0) {
printf("Error when calling jack_set_port_registration_callback() !\n");
}
if (jack_set_port_connect_callback(client1, Jack_Port_Connect, 0) != 0) {
printf("Error when calling jack_set_port_connect_callback() !\n");
}
if (jack_set_client_registration_callback(client1, Jack_Client_Registration_Callback, 0) != 0) {
printf("Error when calling jack_set_client_registration_callback() !\n");
}
jack_set_error_function(Jack_Error_Callback);
/**
* Create file for clock "frame time" analysis
*
*/
cur_buffer_size = jack_get_buffer_size(client1);
sprintf (filename, "framefile-%i.dat", cur_buffer_size);
file = fopen(filename, "w");
if (file == NULL) {
fprintf(stderr, "Erreur dans l'ouverture du fichier log framefile.dat");
exit(-1);
}
/**
* Try to register a client with a NULL name/zero length name...
*
*/
output_port1 = jack_port_register(client1, nullportname,
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsOutput, 0);
if (output_port1 == NULL) {
Log("Can't register a port with a NULL portname... ok.\n");
} else {
printf("!!! ERROR !!! Can register a port with a NULL portname !\n");
jack_port_unregister(client1, output_port1);
}
/**
* Register 1 port in order to stress other functions.
*
*/
output_port1 = jack_port_register(client1, portname,
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsOutput, 0);
if (output_port1 == NULL) {
printf("!!! ERROR !!! Can't register any port for the client !\n");
exit(1);
}
/**
* Test port type of the just registered port.
*
*/
if (strcmp(jack_port_type(output_port1), JACK_DEFAULT_AUDIO_TYPE) != 0) {
printf("!!! ERROR !!! jack_port_type returns an incorrect value!\n");
} else {
Log("Checking jack_port_type()... ok.\n");
}
/**
* Try to register another port with the same name...
*
*/
output_port2 = jack_port_register(client1, portname,
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsOutput, 0);
if (output_port2 == NULL) {
Log("Can't register two ports with the same name... ok\n");
} else {
if (strcmp (jack_port_name(output_port1), jack_port_name(output_port2)) == 0) {
printf("!!! ERROR !!! Can register two ports with the same name ! (%px : %s & %px : %s).\n", output_port1, jack_port_name(output_port1), output_port2, jack_port_name(output_port2));
jack_port_unregister(client1, output_port2);
} else {
Log("Can't register two ports with the same name... ok (auto-rename %s into %s).\n", jack_port_name(output_port1), jack_port_name(output_port2));
jack_port_unregister(client1, output_port2);
}
}
/**
* Verify that both port_name and port_short_name return correct results...
*
*/
sprintf (portname, "%s:%s", jack_get_client_name(client1), jack_port_short_name(output_port1));
if (strcmp(jack_port_name(output_port1), portname) != 0) {
printf("!!! ERROR !!! functions jack_port_name and/or jack_short_port_name seems to be invalid !\n");
printf("client_name = %s\n short_port_name = %s\n port_name = %s\n", jack_get_client_name(client1), jack_port_short_name(output_port1), jack_port_name(output_port1));
}
/**
* Verify the function port_set_name
*
*/
if (jack_port_set_name (output_port1, "renamed-port#") == 0 ) {
if (strcmp(jack_port_name(output_port1), "renamed-port#") == 0) {
printf("!!! ERROR !!! functions jack_port_set_name seems to be invalid !\n");
printf("jack_port_name return '%s' whereas 'renamed-port#' was expected...\n", jack_port_name(output_port1));
} else {
Log("Checking jack_port_set_name()... ok\n");
jack_port_set_name (output_port1, "port");
}
} else {
printf("error : port_set_name function can't be tested...\n");
}
port_callback_reg = 0; // number of port registration received by the callback
/**
* Activate the client
*
*/
if (jack_activate(client1) < 0) {
printf ("Fatal error : cannot activate client1\n");
exit(1);
}
/**
* Test if portrename callback have been called.
*
*/
jack_port_set_name (output_port1, "renamed-port#");
jack_sleep(1 * 1000);
if (port_rename_clbk == 0)
printf("!!! ERROR !!! Jack_Port_Rename_Callback was not called !!.\n");
/**
* Test if portregistration callback have been called.
*
*/
jack_sleep(1 * 1000);
if (1 == port_callback_reg) {
Log("%i ports have been successfully created, and %i callback reg ports have been received... ok\n", 1, port_callback_reg);
} else {
printf("!!! ERROR !!! %i ports have been created, and %i callback reg ports have been received !\n", 1, port_callback_reg);
}
/**
* Test if init callback initThread have been called.
*
*/
if (init_clbk == 0)
printf("!!! ERROR !!! Jack_Thread_Init_Callback was not called !!.\n");
jack_sleep(10 * 1000); // test see the clock in the graph at the begining...
/**
* Stress Freewheel mode...
* Try to enter freewheel mode. Check the realtime mode de-activation.
* Check that the number of call of the process callback is greater than in non-freewheel mode.
* Give an approximated speed ratio (number of process call) between the two modes.
* Then return in normal mode.
*/
t_error = 0;
activated = 0;
jack_sleep(1 * 1000);
count1 = activated;
Log("Testing activation freewheel mode...\n");
linefw = linecount; // count for better graph reading with gnuplot
jack_set_freewheel(client1, 1);
activated = 0;
jack_sleep(1 * 1000);
count2 = activated;
if (jack_is_realtime(client1) == 0) {
t_error = 0;
} else {
printf("\n!!! ERROR !!! RT mode is always activated while freewheel mode is applied !\n");
t_error = 1;
}
if (activated == 0)
printf("!!! ERROR !!! Freewheel mode doesn't activate audio callback !!\n");
jack_set_freewheel(client1, 0);
jack_sleep(7 * 1000);
if (jack_is_realtime(client1) == 1) {}
else {
printf("\n!!! ERROR !!! freewheel mode fail to reactivate RT mode when exiting !\n");
t_error = 1;
}
if (t_error == 0) {
Log("Freewheel mode appears to work well...\n");
}
if (count1 == 0) {
Log("Audio Callback in 'standard' (non-freewheel) mode seems not to be called...\n");
Log("Ratio speed would be unavailable...\n");
} else {
ratio = (float) ((count2 - count1) / count1);
Log("Approximative speed ratio of freewheel mode = %f : 1.00\n", ratio);
}
/**
* Stress buffer function...
* get current buffer size.
* Try to apply a new buffer size value ( 2 x the precedent buffer size value)
* Then return in previous buffer size mode.
*
*/
float factor = 0.5f;
old_buffer_size = jack_get_buffer_size(client1);
Log("Testing BufferSize change & Callback...\n--> Current buffer size : %d.\n", old_buffer_size);
linebuf = linecount;
if (jack_set_buffer_size(client1, (jack_nframes_t)(old_buffer_size * factor)) < 0) {
printf("!!! ERROR !!! jack_set_buffer_size fails !\n");
}
jack_sleep(1 * 1000);
cur_buffer_size = jack_get_buffer_size(client1);
if (abs((old_buffer_size * factor) - cur_buffer_size) > 5) { // Tolerance needed for dummy driver...
printf("!!! ERROR !!! Buffer size has not been changed !\n");
printf("!!! Maybe jack was compiled without the '--enable-resize' flag...\n");
} else {
Log("jack_set_buffer_size() command successfully applied...\n");
}
jack_sleep(3 * 1000);
jack_set_buffer_size(client1, old_buffer_size);
cur_buffer_size = jack_get_buffer_size(client1);
/**
* Test the last regestered port to see if port_is_mine function the right value.
* A second test will be performed later.
* The result will be printed at the end.
*
*/
if (jack_port_is_mine(client1, output_port1)) {
is_mine = 1;
} else {
is_mine = 0;
}
/**
* Check that the ID returned by the port_by_name is right.
* (it seems there is a problem here in some jack versions).
*
*/
if (output_port1 != jack_port_by_name(client1, jack_port_name(output_port1))) {
printf("!!! ERROR !!! function jack_port_by_name() return bad value !\n");
printf("!!! jack_port_by_name(jack_port_name(_ID_) ) != _ID_returned_at_port_registering ! (%px != %px)\n", jack_port_by_name(client1, jack_port_name(output_port1)), output_port1);
} else {
Log("Checking jack_port_by_name() return value... ok\n");
}
if (NULL != jack_port_by_name(client1, jack_port_short_name(output_port1))) {
printf("!!! ERROR !!! function jack_port_by_name() return a value (%px) while name is incomplete !\n", jack_port_by_name(client1, jack_port_short_name(output_port1)));
} else {
Log("Checking jack_port_by_name() with bad argument... ok (returned id 0)\n");
}
/**
* remove the output port previously created
* no more ports should subsist here for our client.
*
*/
if (jack_port_unregister(client1, output_port1) != 0) {
printf("!!! ERROR !!! while unregistering port %s.\n", jack_port_name(output_port1));
}
/**
* list all in ports
*
*/
inports = jack_get_ports(client1, NULL, NULL, 0);
/**
* Test the first PHY (physical) connection to see if it's "mine".
* and report the result in the test that began before.
* The result is printed later.
*
*/
if (jack_port_is_mine(client1, jack_port_by_name(client1, inports[0]))) {
is_mine = 0;
}
/**
* List all devices' flags and print them...
*
*/
Log("\nTry functions jack_get_ports, jack_port_flag & jack_port_by_name to list PHY devices...\n");
Log("-----------------------------------------------------------\n");
Log("---------------------------DEVICES-------------------------\n");
Log("-----------------------------------------------------------\n");
a = 0;
while (inports[a] != NULL) {
flag = jack_port_flags(jack_port_by_name(client1, inports[a]) );
Log(" * %s (id : %i)\n", inports[a], jack_port_by_name(client1, inports[a]));
Log(" (");
if (flag & JackPortIsInput)
Log("JackPortIsInput ");
if (flag & JackPortIsOutput)
Log("JackPortIsOutput ");
if (flag & JackPortIsPhysical)
Log("JackPortIsPhysical ");
if (flag & JackPortCanMonitor)
Log("JackPortCanMonitor ");
if (flag & JackPortIsTerminal)
Log("JackPortIsTerminal ");
Log(")\n\n");
a++;
}
Log("-----------------------------------------------------------\n\n");
/**
* list all PHY in/out ports...
* This list will be used later many times.
*
*/
outports = jack_get_ports(client1, NULL, NULL, JackPortIsPhysical | JackPortIsOutput);
inports = jack_get_ports(client1, NULL, NULL, JackPortIsPhysical | JackPortIsInput);
if (outports == NULL) {
printf("!!! WARNING !!! no physical capture ports founded !\n");
}
if (inports == NULL) {
printf("!!! WARNING !!! no physical output ports founded !\n");
}
/**
* Brute test : try to create as many ports as possible.
* It stops when jack returns an error.
* Then try to connect each port to physical entry...
* Check also that graph reorder callback is called.
*
*/
Log("Registering as many ports as possible and connect them to physical entries...\n");
lineports = linecount;
t_error = 0;
i = 0; // number of couple 'input-ouput'
j = 0; // number of ports created
port_callback_reg = 0; // number of port registration received by the callback
reorder = 0; // number of graph reorder callback activation
test_link = 0 ; // Test the "overconnect" function only one time
while (t_error == 0) {
sprintf (portname, "input_%d", i);
input_port1 = jack_port_register(client1, portname,
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsInput, 0);
j++;
if (input_port1 == NULL) {
j--;
t_error = 1;
} else {
// Connect created input to PHY output
a = 0;
while (outports[a] != NULL) {
if (jack_connect(client1, outports[a], jack_port_name(input_port1))) {
printf ("error : cannot connect input PHY port to client port %s\n", jack_port_name(input_port1));
} else {
// printf ("input PHY port %s connected to client port %s\n", outports[a], jack_port_name(input_port1));
}
a++;
}
// Try one time to "overconnect" 2 ports (the latest created)...
if (test_link == 0) {
if (jack_connect(client1, outports[a - 1], jack_port_name(input_port1)) == EEXIST) {
// cannot over-connect input PHY port to client port. ok.
test_link = 1;
}
}
}
sprintf(portname, "output_%d", i);
output_port1 = jack_port_register(client1, portname,
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsOutput, 0);
j++;
if (output_port1 == NULL) {
t_error = 1;
j--;
} else {
// Connect created input to PHY output
a = 0;
while (inports[a] != NULL) {
if (jack_connect(client1, jack_port_name(output_port1), inports[a])) {
printf ("error : cannot connect input PHY port %s to client port %s\n", inports[a], jack_port_name(output_port1));
} else {
// output PHY port %s connected to client port. ok.
}
a++;
}
// Try one time to "overconnect" 2 ports (the latest created)...
if (test_link == 0) {
if (jack_connect(client1, jack_port_name(output_port1), inports[a - 1]) == EEXIST) {
// cannot over-connect output PHY port to client port. ok.
test_link = 1;
}
}
}
i++;
}
jack_sleep(1 * 1000); // To hope all port registration and reorder callback have been received...
// Check port registration callback
if (j == port_callback_reg) {
Log("%i ports have been successfully created, and %i callback reg ports have been received... ok\n", j, port_callback_reg);
} else {
printf("!!! ERROR !!! %i ports have been created, and %i callback reg ports have been received !\n", j, port_callback_reg);
}
if (reorder == (2 * j)) {
Log("%i graph reorder callback have been received... ok\n", reorder);
} else {
printf("!!! ERROR !!! %i graph reorder callback have been received (maybe non-valid value)...\n", reorder);
}
/**
* print basic test connection functions result ...
* over-connected means here that we try to connect 2 ports that are already connected.
*
*/
if (test_link) {
Log("Jack links can't be 'over-connected'... ok\n");
} else {
printf("!!! ERROR !!! Jack links can be 'over-connected'...\n");
}
/**
* Print the result of the two jack_is_mine test.
*
*/
if (is_mine == 1) {
Log("Checking jack_port_is_mine()... ok\n");
} else {
printf("!!! ERROR !!! jack_port_is_mine() function seems to send non-valid datas !\n");
}
/**
* Free the array of the physical input and ouput ports.
* (as mentionned in the doc of jack_get_ports)
*
*/
jack_free(inports);
jack_free(outports);
/**
* Try to "reactivate" the client whereas it's already activated...
*
*/
if (jack_activate(client1) < 0) {
printf("!!! ERROR !!! Cannot activate client1 a second time...\n");
exit(1);
} else {
Log("jackd server accept client.jack_activate() re-activation (while client was already activated).\n");
}
/**
* Deregister all ports previously created.
*
*/
port_callback_reg = 0; // to check registration callback
Log("Deregistering all ports of the client...\n");
inports = jack_get_ports(client1, NULL, NULL, 0);
a = 0;
while (inports[a] != NULL) {
flag = jack_port_flags(jack_port_by_name(client1, inports[a]));
input_port1 = jack_port_by_name(client1, inports[a]);
if (jack_port_is_mine(client1, input_port1)) {
if (jack_port_unregister(client1, input_port1) != 0) {
printf("!!! ERROR !!! while unregistering port %s.\n", jack_port_name(output_port1));
}
}
a++;
}
// Check port registration callback again
if (j == port_callback_reg) {
Log("%i ports have been successfully created, and %i callback reg ports have been received... ok\n", j, port_callback_reg);
} else {
printf("!!! ERROR !!! %i ports have been created, and %i callback reg ports have been received !\n", j, port_callback_reg);
}
jack_free(inports); // free array of ports (as mentionned in the doc of jack_get_ports)
/**
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
Open a new client (second one) to test some other things...
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*/
Log("\n\n----------------------------------------------------------------------\n");
Log("Starting second new client 'jack_test_#2'...\n");
/* open a client connection to the JACK server */
client_name2 = "jack_test_#2";
linecl2 = linecount; // reminders for graph analysis
client2 = jack_client_new(client_name2);
if (client2 == NULL) {
fprintf(stderr, "jack_client_new() failed for %s.\n"
"status = 0x%2.0x\n", client_name2, status);
if (status & JackServerFailed) {
fprintf(stderr, "Unable to connect client2 to JACK server\n");
}
exit(1);
}
// Check client registration callback
jack_sleep(1000);
if (client_register == 0)
printf("!!! ERROR !!! Client registration callback not called!\n");
/**
* Register callback for this client.
* Callbacks are the same as the first client for most of them, excepted for process audio callback.
*
*/
jack_set_port_registration_callback(client2, Jack_Port_Register, 0);
jack_set_process_callback(client2, process2, 0);
jack_on_shutdown(client2, jack_shutdown, 0);
/**
* Register one input and one output for each client.
*
*/
Log("registering 1 input/output ports for each client...\n");
output_port1 = jack_port_register(client1, "out1",
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsOutput, 0);
output_port2 = jack_port_register(client2, "out2",
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsOutput, 0);
input_port1 = jack_port_register(client1, "in1",
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsInput, 0);
input_port2 = jack_port_register(client2, "in2",
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsInput, 0);
if ((output_port1 == NULL) || (output_port2 == NULL) || (input_port1 == NULL) || (input_port2 == NULL)) {
printf("!!! ERROR !!! Unable to register ports...\n");
}
/**
* Set each process mode to idle and activate client2
*
*/
process2_activated = -1;
process1_activated = -1;
if (jack_activate(client2) < 0) {
printf ("Fatal error : cannot activate client2\n");
exit (1);
}
/**
* Connect the two clients and check that all connections are well-done.
*
*/
Log("Testing connections functions between clients...\n");
if (jack_connect(client1, jack_port_name(output_port1), jack_port_name(input_port2)) != 0) {
printf("!!! ERROR !!! while client1 intenting to connect ports...\n");
}
if (jack_connect(client2, jack_port_name(output_port2), jack_port_name(input_port1)) != 0) {
printf("!!! ERROR !!! while client2 intenting to connect ports...\n");
}
if (jack_connect(client1, jack_port_name(output_port1), jack_port_name(input_port1)) != 0) {
printf("!!! ERROR !!! while client1 intenting to connect ports...\n");
}
/**
* Test the port_connected function...
*
*/
if ((jack_port_connected(output_port1) == jack_port_connected(input_port1)) &&
(jack_port_connected(output_port2) == jack_port_connected(input_port2)) &&
(jack_port_connected(output_port2) == 1) &&
(jack_port_connected(output_port1) == 2)
) {
Log("Checking jack_port_connected()... ok.\n");
} else {
printf("!!! ERROR !!! function jack_port_connected() return a bad value !\n");
printf("jack_port_connected(output_port1) %d\n", jack_port_connected(output_port1));
printf("jack_port_connected(output_port2) %d\n", jack_port_connected(output_port2));
printf("jack_port_connected(input_port1) %d\n", jack_port_connected(input_port1));
printf("jack_port_connected(input_port2) %d\n", jack_port_connected(input_port2));
}
/**
* Test a new time the port_by_name function...(now we are in multi-client mode)
*
*/
Log("Testing again jack_port_by_name...\n");
if (output_port1 != jack_port_by_name(client1, jack_port_name(output_port1))) {
printf("!!! ERROR !!! function jack_port_by_name() return bad value in a multi-client application!\n");
printf("!!! jack_port_by_name(jack_port_name(_ID_) ) != _ID_ in multiclient application.\n");
} else {
Log("Checking jack_port_by_name() function with a multi-client application... ok\n");
}
/**
* Test the port_connected_to function...
*
*/
if ((jack_port_connected_to (output_port1, jack_port_name(input_port2))) &&
(!(jack_port_connected_to (output_port2, jack_port_name(input_port2))))) {
Log("checking jack_port_connected_to()... ok\n");
} else {
printf("!!! ERROR !!! jack_port_connected_to() return bad value !\n");
}
/**
* Test the port_get_connections & port_get_all_connections functions...
*
*/
Log("Testing jack_port_get_connections and jack_port_get_all_connections...\n");
a = 0;
t_error = 0;
connexions1 = jack_port_get_connections (output_port1);
connexions2 = jack_port_get_all_connections(client1, output_port1);
if ((connexions1 == NULL) || (connexions2 == NULL)) {
printf("!!! ERROR !!! port_get_connexions or port_get_all_connexions return a NULL pointer !\n");
} else {
while ((connexions1[a] != NULL) && (connexions2[a] != NULL) && (t_error == 0)) {
t_error = strcmp(connexions1[a], connexions2[a]);
a++;
}
if (t_error == 0) {
Log("Checking jack_port_get_connections Vs jack_port_get_all_connections... ok\n");
} else {
printf("!!! ERROR !!! while checking jack_port_get_connections Vs jack_port_get_all_connections...\n");
}
}
a = 0;
t_error = 0;
inports = jack_get_ports(client1, NULL, NULL, JackPortIsPhysical | JackPortIsInput);
connexions1 = NULL;
assert(inports != NULL);
if (inports[0] != NULL) {
connexions1 = jack_port_get_connections (jack_port_by_name(client1, inports[0]));
connexions2 = jack_port_get_all_connections(client1, jack_port_by_name(client1, inports[0]));
}
jack_free (inports);
if (connexions1 == NULL) {
Log("checking jack_port_get_connections() for external client... ok\n");
} else {
while ((connexions1[a] != NULL) && (connexions2[a] != NULL) && (t_error == 0)) {
t_error = strcmp(connexions1[a], connexions2[a]);
a++;
}
}
if (t_error == 0) {
Log("Checking jack_port_get_connections() Vs jack_port_get_all_connections() on PHY port... ok\n");
} else {
printf("!!! ERROR !!! while checking jack_port_get_connections() Vs jack_port_get_all_connections() on PHY port...\n");
}
if (jack_disconnect(client1, jack_port_name(output_port1), jack_port_name(input_port1)) != 0) {
printf("!!! ERROR !!! while client1 intenting to disconnect ports...\n");
}
if (jack_disconnect(client1, jack_port_name(output_port2), jack_port_name(input_port1)) != 0) {
printf("!!! ERROR !!! while client1 intenting to disconnect ports...\n");
}
// No links should subsist now...
/**
* Checking data connexion
* establishing a link between client1.out1 --> client2.in2
* Send the signal1 test on out1. Record the result into signal2. (see process functions).
---------------------------------------------------------------------------*/
Log("Testing connections datas between clients...\n");
jack_connect(client2, jack_port_name(output_port1), jack_port_name(input_port2) );
process2_activated = -1;
process1_activated = -1;
Log("process 2 : idle mode...\n");
Log("Sending datas...");
index1 = 0;
index2 = 0;
process1_activated = 1; // We start emitting first.
process2_activated = 1; // So record begin at least when we just begin to emitt the signal, else at next call of process with
// nframe = jack buffersize shifting.
while (process2_activated == 1) {
jack_sleep(1 * 1000);
Log(".");
}
index2 = 0;
Log("\nAnalysing datas...\n"); // search the first occurence of the first element of the reference signal in the recorded signal
while (signal2[index2] != signal1[1] ) {
index2++;
if (index2 == 95999) {
printf("!!! ERROR !!! Data not found in first connexion data check!\n");
break;
}
}
index1 = index2;
Log("Data founded at offset %i.\n", index2);
// And now we founded were the recorded data are, we can see if the two signals matches...
while ( (signal2[index2] == signal1[index2 - index1 + 1]) || (index2 == 95999) || ((index2 - index1 + 1) == 47999) ) {
index2++;
}
Log("Checking difference between datas... %i have the same value...\n", index2 - index1);
if ((index2 - index1) == 48000) {
Log("Data received are valid...\n");
} else {
printf("!!! ERROR !!! data transmission seems not to be valid in first connexion data check!\n");
}
if (jack_disconnect(client1, jack_port_name(output_port1), jack_port_name(input_port2) ) != 0)
// no more connection between ports exist now...
{
printf("Error while establishing new connexion (disconnect).\n");
}
/**
* Test TIE MODE
* (This mode seems to be problematic in standard jack version 0.100. It seems that nobody
* is used to apply this mode because the tie mode doesn't work at all. A patch seems difficult to produce
* in this version of jack. Tie mode work well in MP version.)
* Test some basic thinks (tie with 2 differents client, tie non-owned ports...)
* Tie client1.in1 and client1.out1 ports, and make some data test to check the validity of the tie.
*
*/
Log("Testing tie mode...\n");
if (jack_port_tie(input_port1, output_port2) != 0) {
Log("not possible to tie two ports from two differents clients... ok\n");
} else {
printf("!!! ERROR !!! port_tie has allowed a connexion between two differents clients !\n");
jack_port_untie(output_port2);
}
Log("Testing connections datas in tie mode...\n");
int g;
for (g = 0; g < 96000; g++)
signal2[g] = 0.0;
// Create a loop (emit test) client2.out2----client.in1--tie--client1.out1-----client2.in1 (receive test)
if (jack_port_tie(input_port1, output_port1) != 0) {
printf("Unable to tie... fatal error : data test will not be performed on tie mode !!\n");
} else { // begin of tie
if (jack_connect(client1, jack_port_name(output_port1), jack_port_name(input_port2)) != 0) {
printf("!!! ERROR !!! while client1 intenting to connect ports...\n");
}
if (jack_connect(client1, jack_port_name(output_port2), jack_port_name(input_port1)) != 0) {
printf("!!! ERROR !!! while client1 intenting to connect ports...\n");
}
process1_activated = -1;
process2_activated = -1;
// We can manualy check here that the tie is effective.
// ie : playing a wav with a client, connecting ports manualy with qjackctl, and listen...
// printf("manual test\n");
// jack_sleep(50);
// printf("end of manual test\n");
index1 = 0;
index2 = 0;
process1_activated = -1;
process2_activated = 2;
Log("Sending datas...");
while (process2_activated == 2) {
jack_sleep(1 * 1000);
Log(".");
}
process1_activated = -1;
process2_activated = -1;
index2 = 0;
Log("\nAnalysing datas...\n");
// We must find at least 2 identical values to ensure we are at the right place in the siusoidal array...
while (!((signal2[index2] == signal1[1]) && (signal2[index2 + 1] == signal1[2]))) {
index2++;
if (index2 == 95999) {
printf("!!! ERROR !!! Data not found in connexion check of tie mode!\n");
break;
}
}
index1 = index2;
Log("Tie mode : Data founded at offset %i.\n", index2);
while (signal2[index2] == signal1[index2 - index1 + 1]) {
index2++;
if ((index2 == 95999) || ((index2 - index1 + 1) == 47999)) {
break;
}
}
Log("Checking difference between datas... %i have the same value...\n", index2 - index1);
if ((index2 - index1) > 47995) {
Log("Data received in tie mode are valid...\n");
} else {
// in tie mode, the buffers adress should be the same for the two tied ports.
printf("!!! ERROR !!! data transmission seems not to be valid !\n");
printf("Links topology : (emitt) client2.out2 ----> client1.in1--(tie)--client1.out1----->client2.in2 (recive)\n");
printf(" port_name : Port_adress \n");
printf(" output_port1 : %px\n", jack_port_get_buffer(output_port1, cur_buffer_size));
printf(" input_port2 : %px\n", jack_port_get_buffer(input_port2, cur_buffer_size));
printf(" output_port2 : %px\n", jack_port_get_buffer(output_port2, cur_buffer_size));
printf(" input_port1 : %px\n", jack_port_get_buffer(input_port1, cur_buffer_size));
}
jack_port_untie(output_port1);
jack_port_disconnect(client1, output_port2);
jack_port_disconnect(client1, output_port1);
} //end of tie
/**
* Testing SUMMATION CAPABILITIES OF JACK CONNECTIONS
*
* In a short test, we just check a simple summation in jack.
* A first client(client1) send two signal in phase opposition
* A second client(client2) record the summation at one of his port
* So, the result must be zero...
* See process1 for details about steps of this test
*
*/
// fprintf(file, "Sum test\n");
Log("Checking summation capabilities of patching...\n");
output_port1b = jack_port_register(client1, "out1b",
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsOutput, 0);
jack_connect(client2, jack_port_name(output_port1), jack_port_name(input_port2));
jack_connect(client2, jack_port_name(output_port1b), jack_port_name(input_port2));
process1_activated = 3;
process2_activated = -1;
for (g = 0; g < 96000; g++)
signal2[g] = 0.0;
index1 = 0;
index2 = 0;
Log("Sending datas...");
process2_activated = 3;
while (process2_activated == 3) {
jack_sleep(1 * 1000);
Log(".");
}
process1_activated = -1;
process2_activated = -1;
index2 = 0;
Log("\nAnalysing datas...\n"); // same idea as above, with first data check...
while (!((signal2[index2] == 0.0 ) && (signal2[(index2 + 1)] == 0.0 ))) {
index2++;
if (index2 == 95999) {
printf("!!! ERROR !!! Data not found in summation check!\n");
break;
}
}
index1 = index2;
Log("Data founded at offset %i.\n", index2);
while ( signal2[index2] == 0.0 ) {
index2++;
if ((index2 > 95998) || ((index2 - index1 + 1) > 47998)) {
break;
}
}
Log("Checking difference between datas...\n");
if ((index2 - index1) > 47996) {
Log("Data mixed received are valid...\nSummation is well done.\n");
} else {
printf("!!! ERROR !!! data transmission / summation seems not to be valid !\n");
}
jack_port_disconnect(client1, output_port1);
jack_port_disconnect(client1, output_port1b);
jack_port_unregister(client1, output_port1b);
if (jack_port_name(output_port1b) != NULL ) {
printf("!!! WARNING !!! port_name return something while the port have been unregistered !\n");
printf("!!! Name of unregistered port : %s !\n", jack_port_name(output_port1b));
} else {
Log("Checking jack_port_name() with a non valid port... ok\n");
}
if (jack_port_set_name(output_port1b, "new_name") == 0 ) {
printf("!!! WARNING !!! An unregistered port can be renamed successfully !\n");
} else {
Log("Checking renaming of an unregistered port... ok\n");
}
inports = jack_get_ports(client1, NULL, NULL, JackPortIsPhysical | JackPortIsInput);
if (jack_port_set_name(jack_port_by_name(client1, inports[0]), "new_name") == 0 ) {
printf("!!! WARNING !!! A PHYSICAL port can be renamed successfully !\n");
} else {
Log("Checking renaming of an unregistered port... ok\n");
}
jack_free (inports);
/**
* Checking latency issues
* here are simple latency check
* We simply check that the value returned by jack seems ok
* Latency compensation is a difficult point.
* Actually, jack is not able to see "thru" client to build a full latency chain.
* Ardour use theses informations to do internally his compensations.
*
* 3 test are done : one with no connections between client, one with a serial connection, and one with parallel connection
*/
Log("Checking about latency functions...\n");
t_error = 0;
jack_recompute_total_latencies(client1);
Log("jack_recompute_total_latencies...\n");
if ((jack_port_get_latency (output_port1) != 0) ||
(jack_port_get_total_latency(client1, output_port1) != 0) ) {
t_error = 1;
printf("!!! ERROR !!! default latency of a non-PHY device is not set to zero !\n");
}
inports = jack_get_ports(client1, NULL, NULL, JackPortIsPhysical | JackPortIsInput);
outports = jack_get_ports(client1, NULL, NULL, JackPortIsPhysical | JackPortIsOutput);
if (inports[0] != NULL) {
output_ext_latency = jack_port_get_latency (jack_port_by_name(client1, inports[0])); // from client to out driver (which has "inputs" ports..)
input_ext_latency = jack_port_get_latency (jack_port_by_name(client1, outports[0])); // from in driver (which has "output" ports..) to client
if (output_ext_latency != jack_port_get_total_latency(client1, jack_port_by_name(client1, inports[0]))) {
t_error = 1;
printf("!!! ERROR !!! get_latency & get_all_latency for a PHY device (unconnected) didn't return the same value !\n");
}
Log("Checking a serial model with 2 clients...\n");
jack_connect(client1, jack_port_name(output_port1), jack_port_name(input_port2));
jack_connect(client1, outports[0], jack_port_name(input_port1));
jack_connect(client2, jack_port_name(output_port2), inports[0]);
jack_port_set_latency(output_port2, 256);
jack_recompute_total_latencies(client1);
if ((jack_port_get_latency (output_port1) != 0) ||
(jack_port_get_total_latency(client1, output_port1) != 0) ||
(jack_port_get_latency (jack_port_by_name(client1, inports[0])) != (output_ext_latency)) ||
(jack_port_get_total_latency(client1, jack_port_by_name(client1, inports[0])) != (output_ext_latency + 256)) ||
(jack_port_get_total_latency(client1, output_port2) != (output_ext_latency + 256)) ||
(jack_port_get_total_latency(client1, input_port2) != 0) ||
(jack_port_get_total_latency(client1, input_port1) != input_ext_latency) ||
(jack_port_get_latency (jack_port_by_name(client1, outports[0])) != input_ext_latency) ||
(jack_port_get_total_latency(client1, jack_port_by_name(client1, outports[0])) != input_ext_latency)
) {
printf("!!! WARNING !!! get_latency functions may have a problem : bad value returned !\n");
printf("!!! get_latency(output_port1) : %i (must be 0)\n", jack_port_get_latency(output_port1));
printf("!!! get_total_latency(output_port1) : %i (must be 0)\n", jack_port_get_total_latency(client1, output_port1));
printf("!!! get_latency(PHY[0]) : %i (must be external latency : %i)\n", jack_port_get_latency(jack_port_by_name(client1, inports[0])), output_ext_latency);
printf("!!! get_total_latency(PHY[0]) : %i (must be %i)\n", jack_port_get_total_latency(client1, jack_port_by_name(client1, inports[0])) , (output_ext_latency + 256));
printf("!!! get_total_latency(output_port2) : %i (must be %i)\n", jack_port_get_total_latency(client1, output_port2), (output_ext_latency + 256));
printf("!!! get_total_latency(input_port2) : %i (must be 0)\n", jack_port_get_total_latency(client1, input_port2));
printf("!!! get_total_latency(input_port1) : %i (must be %i)\n", jack_port_get_total_latency(client1, input_port1), input_ext_latency);
printf("!!! get_latency(PHY[0]) : %i (must be %i)\n", jack_port_get_latency(jack_port_by_name(client1, outports[0])), input_ext_latency);
printf("!!! get_total_latency(PHY[0]) : %i (must be %i)\n", jack_port_get_total_latency(client1, jack_port_by_name(client1, outports[0])), input_ext_latency);
} else {
Log("get_latency & get_total_latency seems quite ok...\n");
}
jack_port_disconnect(client1, output_port1);
jack_port_disconnect(client1, output_port2);
jack_port_disconnect(client1, input_port1);
jack_port_disconnect(client1, input_port2);
Log("Checking a parallel model with 2 clients...\n");
jack_connect(client2, outports[0], jack_port_name(input_port1));
jack_connect(client2, outports[0], jack_port_name(input_port2));
jack_connect(client2, jack_port_name(output_port1), inports[0]);
jack_connect(client2, jack_port_name(output_port2), inports[0]);
jack_port_set_latency(output_port1, 256);
jack_port_set_latency(output_port2, 512);
jack_recompute_total_latencies(client1);
if ((jack_port_get_latency(output_port1) != 256 ) ||
(jack_port_get_total_latency(client1, output_port1) != (256 + output_ext_latency)) ||
(jack_port_get_latency(output_port2) != 512) ||
(jack_port_get_total_latency(client1, output_port2) != (512 + output_ext_latency)) ||
(jack_port_get_latency(jack_port_by_name(client1, inports[0])) != output_ext_latency) ||
(jack_port_get_total_latency(client1, jack_port_by_name(client1, inports[0])) != (512 + output_ext_latency))
) {
printf("!!! WARNING !!! get_latency functions may have a problem : bad value returned !\n");
printf("!!! get_latency(output_port1) : %i (must be 256)\n", jack_port_get_latency(output_port1));
printf("!!! get_total_latency(output_port1) : %i (must be 256 + output_ext_latency)\n", jack_port_get_total_latency(client1, output_port1));
printf("!!! get_latency(output_port2) : %i (must 512)\n", jack_port_get_latency(output_port2));
printf("!!! get_total_latency(output_port2) : %i (must 512 + output_ext_latency)\n", jack_port_get_total_latency(client1, output_port2));
printf("!!! get_latency(inports[0])) : %i (must output_ext_latency)\n", jack_port_get_latency(jack_port_by_name(client1, inports[0])));
printf("!!! get_total_latency(inports[0]) : %i (must 512 + output_ext_latency)\n", jack_port_get_total_latency(client1, jack_port_by_name(client1, inports[0])));
} else {
Log("get_latency & get_total_latency seems quite ok...\n");
}
} else {
printf("No physical port founded : not able to test latency functions...");
}
jack_port_disconnect(client1, input_port1);
jack_port_disconnect(client1, input_port2);
jack_port_disconnect(client1, output_port1);
jack_port_disconnect(client1, output_port2);
jack_sleep(1000);
jack_free(inports);
jack_free(outports);
/**
* Checking transport API.
* Simple transport test.
* Check a transport start with a "slow" client, simulating a delay around 1 sec before becoming ready.
*
*/
Log("-----------------------------------------------------------\n");
Log("---------------------------TRANSPORT-----------------------\n");
Log("-----------------------------------------------------------\n");
lineports = linecount;
if (transport_mode) {
int wait_count;
ts = jack_transport_query(client1, &pos);
if (ts == JackTransportStopped) {
Log("Transport is stopped...\n");
} else {
jack_transport_stop(client1);
Log("Transport state : %i\n", ts);
}
if (jack_set_sync_callback(client2, Jack_Sync_Callback, 0) != 0)
printf("error while calling set_sync_callback...\n");
Log("starting transport...\n");
starting_state = 1; // Simulate starting state
jack_transport_start(client1);
// Wait until sync callback is called
while (!(sync_called)) {
jack_sleep(1 * 1000);
}
// Wait untill rolling : simulate sync time out
Log("Simulate a slow-sync client exceeding the time-out\n");
wait_count = 0;
do {
jack_sleep(100); // Wait 100 ms each cycle
wait_count++;
if (wait_count == 100) {
Log("!!! ERROR !!! max time-out exceedeed : sync time-out does not work correctly\n");
break;
}
ts = jack_transport_query(client2, &pos);
Log("Waiting....pos = %ld\n", pos.frame);
display_transport_state();
} while (ts != JackTransportRolling);
Log("Sync callback have been called %i times.\n", sync_called);
jack_transport_stop(client1);
// Wait until stopped
ts = jack_transport_query(client2, &pos);
while (ts != JackTransportStopped) {
jack_sleep(1 * 1000);
ts = jack_transport_query(client2, &pos);
}
// Simulate starting a slow-sync client that rolls after 0.5 sec
Log("Simulate a slow-sync client that needs 0.5 sec to start\n");
sync_called = 0;
wait_count = 0;
starting_state = 1; // Simulate starting state
Log("Starting transport...\n");
jack_transport_start(client1);
display_transport_state();
Log("Waiting 0.5 sec...\n");
jack_sleep(500);
starting_state = 0; // Simulate end of starting state after 0.5 sec
// Wait untill rolling
ts = jack_transport_query(client2, &pos);
while (ts != JackTransportRolling) {
jack_sleep(100); // Wait 100 ms each cycle
wait_count++;
if (wait_count == 10) {
Log("!!! ERROR !!! starting a slow-sync client does not work correctly\n");
break;
}
ts = jack_transport_query(client2, &pos);
}
if (sync_called == 0)
Log("!!! ERROR !!! starting a slow-sync client does not work correctly\n");
Log("Sync callback have been called %i times.\n", sync_called);
display_transport_state();
// Test jack_transport_locate while rolling
Log("Test jack_transport_locate while rolling\n");
ts = jack_transport_query(client2, &pos);
Log("Transport current frame = %ld\n", pos.frame);
jack_nframes_t cur_frame = pos.frame;
wait_count = 0;
do {
display_transport_state();
jack_sleep(10); // 10 ms
// locate at first...
wait_count++;
if (wait_count == 1) {
Log("Do jack_transport_locate\n");
jack_transport_locate(client1, cur_frame / 2);
} else if (wait_count == 100) {
break;
}
ts = jack_transport_query(client2, &pos);
Log("Locating.... frame = %ld\n", pos.frame);
} while (pos.frame > cur_frame);
ts = jack_transport_query(client2, &pos);
Log("Transport current frame = %ld\n", pos.frame);
if (wait_count == 100) {
printf("!!! ERROR !!! jack_transport_locate does not work correctly\n");
}
// Test jack_transport_reposition while rolling
Log("Test jack_transport_reposition while rolling\n");
ts = jack_transport_query(client2, &pos);
Log("Transport current frame = %ld\n", pos.frame);
cur_frame = pos.frame;
wait_count = 0;
do {
display_transport_state();
jack_sleep(10); // 10 ms
// locate at first...
wait_count++;
if (wait_count == 1) {
Log("Do jack_transport_reposition\n");
request_pos.frame = cur_frame / 2;
jack_transport_reposition(client1, &request_pos);
} else if (wait_count == 100) {
break;
}
ts = jack_transport_query(client2, &pos);
Log("Locating.... frame = %ld\n", pos.frame);
} while (pos.frame > cur_frame);
ts = jack_transport_query(client2, &pos);
Log("Transport current frame = %ld\n", pos.frame);
if (wait_count == 100) {
printf("!!! ERROR !!! jack_transport_reposition does not work correctly\n");
}
// Test jack_transport_reposition while stopped
jack_transport_stop(client1);
ts = jack_transport_query(client2, &pos);
Log("Transport current frame = %ld\n", pos.frame);
Log("Test jack_transport_reposition while stopped\n");
wait_count = 0;
request_pos.frame = 10000;
jack_transport_reposition(client1, &request_pos);
do {
display_transport_state();
jack_sleep(100); // 100 ms
if (wait_count++ == 10)
break;
ts = jack_transport_query(client2, &pos);
Log("Locating.... frame = %ld\n", pos.frame);
} while (pos.frame != 10000);
ts = jack_transport_query(client2, &pos);
Log("Transport current frame = %ld\n", pos.frame);
if (pos.frame != 10000) {
printf("!!! ERROR !!! jack_transport_reposition does not work correctly\n");
}
jack_transport_stop(client1);
/* Tell the JACK server that we are ready to roll. Our
* process() callback will start running now. */
} else {
printf("Transport check is disabled...\n");
}
time_before_exit = time_to_run;
while (time_before_exit != 0) {
jack_sleep (1 * 1000);
time_before_exit--;
}
if (jack_deactivate(client2) != 0) {
printf("!!! ERROR !!! jack_deactivate does not return 0 for client2 !\n");
}
if (jack_deactivate(client1) != 0) {
printf("!!! ERROR !!! jack_deactivate does not return 0 for client1 !\n");
}
/**
* Checking jack_frame_time.
*/
Log("Testing jack_frame_time...\n");
jack_set_process_callback(client1, process4, client1);
jack_activate(client1);
jack_sleep(2 * 1000);
/**
* Checking jack_get_cycle_times.
*/
Log("Testing jack_get_cycle_times...\n");
jack_deactivate(client1);
jack_set_process_callback(client1, process5, client1);
jack_activate(client1);
jack_sleep(3 * 1000);
/**
* Checking alternate thread model
*/
Log("Testing alternate thread model...\n");
jack_deactivate(client1);
jack_set_process_callback(client1, NULL, NULL); // remove callback
jack_set_process_thread(client1, jack_thread, client1);
jack_activate(client1);
jack_sleep(2 * 1000);
/**
* Checking callback exiting : when the return code is != 0, the client is desactivated.
*/
Log("Testing callback exiting...\n");
jack_deactivate(client1);
jack_set_process_thread(client1, NULL, NULL); // remove thread callback
jack_set_process_callback(client1, process3, 0);
jack_activate(client1);
jack_sleep(3 * 1000);
/**
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
Closing program
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
*/
if (jack_deactivate(client2) != 0) {
printf("!!! ERROR !!! jack_deactivate does not return 0 for client2 !\n");
}
if (jack_deactivate(client1) != 0) {
printf("!!! ERROR !!! jack_deactivate does not return 0 for client1 !\n");
}
if (jack_client_close(client2) != 0) {
printf("!!! ERROR !!! jack_client_close does not return 0 for client2 !\n");
}
if (jack_client_close(client1) != 0) {
printf("!!! ERROR !!! jack_client_close does not return 0 for client1 !\n");
}
if (xrun == 0) {
Log("No Xrun have been detected during this test... cool !\n");
} else {
printf("%i Xrun have been detected during this session (seen callback messages to see where are the problems).\n", xrun);
}
free(framecollect);
free(signal1);
free(signal2);
Log("Exiting jack_test...\n");
fclose(file);
printf("*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*--*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\n");
sprintf (filename, "framegraph-%i.gnu", cur_buffer_size);
file = fopen(filename, "w");
if (file == NULL) {
fprintf(stderr, "Erreur dans l'ouverture du fichier");
exit( -1);
}
fprintf(file, "reset\n");
fprintf(file, "set terminal png transparent nocrop enhanced\n");
fprintf(file, "set output 'framegraph-%i-1.png'\n", cur_buffer_size);
fprintf(file, "set title \"Frame time evolution during jack_test run\"\n");
fprintf(file, "set yrange [ %i.00000 : %i.0000 ] noreverse nowriteback\n", cur_buffer_size - (cur_buffer_size / 8), cur_buffer_size + (cur_buffer_size / 8));
fprintf(file, "set xrange [ 0.00000 : %i.0000 ] noreverse nowriteback\n" , linecount - 1);
fprintf(file, "set ylabel \"Frametime evolution (d(ft)/dt)\"\n");
fprintf(file, "set xlabel \"FrameTime\"\n");
fprintf(file, "set label \"| buf.siz:%i | fr.wl:%i | rg.ports:%i | 2nd.client:%i | trsprt:%i |\" at graph 0.01, 0.04\n", linebuf, linefw, lineports, linecl2, linetransport);
fprintf(file, "plot 'framefile-%i.dat' using 2 with impulses title \"Xruns\",'framefile-%i.dat' using 1 with line title \"Sampletime variation at %i\"\n", cur_buffer_size, cur_buffer_size, cur_buffer_size);
fprintf(file, "set output 'framegraph-%i-2.png'\n", cur_buffer_size);
fprintf(file, "set title \"Frame time evolution during jack_test run\"\n");
fprintf(file, "set yrange [ %i.00000 : %i.0000 ] noreverse nowriteback\n", (int) (cur_buffer_size / 2), (int) (2*cur_buffer_size + (cur_buffer_size / 8)));
fprintf(file, "set xrange [ 0.00000 : %i.0000 ] noreverse nowriteback\n" , linecount - 1);
fprintf(file, "set ylabel \"Frametime evolution (d(ft)/dt)\"\n");
fprintf(file, "set xlabel \"FrameTime\"\n");
fprintf(file, "set label \"| buf.siz:%i | fr.wl:%i | rg.ports:%i | 2nd.client:%i | trsprt:%i |\" at graph 0.01, 0.04\n", linebuf, linefw, lineports, linecl2, linetransport);
fprintf(file, "plot 'framefile-%i.dat' using 2 with impulses title \"Xruns\",'framefile-%i.dat' using 1 with line title \"Sampletime variation at %i\"\n", cur_buffer_size, cur_buffer_size, cur_buffer_size);
fclose(file);
return 0;
}
|
#!/bin/sh
# Apple Documentation Reference: https://apple.co/3Gfe95W
#
# To make this script executable, run in terminal:
# chmod +x build_docs.sh
PROJECT_NAME="RadioBrowserKit"
DOC_DIR="Documentation/API/"
ROOT_URL="http://de1.api.radio-browser.info/json/"
# Create the output directory
mkdir -p "${DOC_DIR}"
# Build the documentation
jazzy \
--clean \
--author "Woodbytes" \
--author_url "https://woodbytes.me" \
--github_url "https://github.com/phranck/${PROJECT_NAME}" \
--output "${DOC_DIR}" \
--swift-build-tool spm \
--build-tool-arguments -Xswiftc,-swift-version,-Xswiftc,5 \
--theme fullwidth \
--module ${PROJECT_NAME} \
--root-url "${ROOT_URL}" \
--documentation=Documentation/*.md
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
import { CheckConfiguration, FormattedCheckResult } from './ruleresults';
export class CheckMessageTransformer {
public addMessagesToChecks(checks: FormattedCheckResult[], checkConfigurations: CheckConfiguration[]): void {
for (let checkIndex = 0; checkIndex < checks.length; checkIndex++) {
const checkResult = checks[checkIndex];
const checkConfig = checkConfigurations.filter(config => config.id === checkResult.id).pop();
if (checkConfig === undefined) {
continue;
}
if (checkResult.result) {
checkResult.message = checkConfig.passMessage ? checkConfig.passMessage() : checkResult.message;
} else {
checkResult.message = checkConfig.failMessage ? checkConfig.failMessage() : checkResult.message;
}
}
}
}
|
<filename>yupc-backend/yupc-auth/yupc-auth-server/src/main/java/com/github/yupc/auth/service/ApplicationService.java
package com.github.yupc.auth.service;
import com.github.yupc.auth.common.jwt.TokenVo;
/**
* @author yupc
* @createTime 2017-12-13 23:04
*/
public interface ApplicationService {
TokenVo applyToken(String appId, String appSecret) ;
}
|
import { Test, TestingModule } from '@nestjs/testing';
import { CharactersController } from './characters.controller';
import { CharactersService } from './characters.service';
describe('Characters Controller', () => {
let charactersController: CharactersController;
beforeEach(async () => {
const characters: TestingModule = await Test.createTestingModule({
controllers: [CharactersController],
providers: [CharactersService]
}).compile();
charactersController = characters.get<CharactersController>(CharactersController);
});
describe('Characters list', () => {
it('Should return a list with all of house characters', () => {
expect(charactersController.findOne('1')).toMatchObject([
{"id": 1, "name": "<NAME>"}, {"id": 2, "name": "Aemon"}, {"id": 3, "name": "<NAME>"}
])
});
});
}); |
#!/bin/bash
SESSION=work
pid="$(pidof tmux)"
# exec
if test "$pid"; then
tmux attach
else
tmux new -d -s $SESSION
#
tmux new-window -t $SESSION:1 -n 'music'
tmux send-keys "ncmpcpp" C-m
tmux split-window -h
tmux select-pane -t 1
tmux resize-pane -R 10
tmux split-window -v
tmux resize-pane -D 1
tmux send-keys "cava" C-m
tmux select-pane -t 3
tmux send-keys "pfetch" C-m
#
tmux new-window -t $SESSION:2 -n 'proc'
tmux send-keys "gotop" C-m
tmux split-window -h
tmux select-pane -t 1
tmux resize-pane -R 5
tmux split-window -v
tmux resize-pane -D 1
tmux send-keys "htop" C-m
tmux select-pane -t 3
tmux send-keys "ranger" C-m
#
tmux new-window -t $SESSION:3 -n 'edit'
tmux send-keys "nvim" C-m
tmux split-window -h
tmux resize-pane -R 40
tmux select-pane -t 2
tmux send-keys C-m
#
tmux select-window -t $SESSION:1
tmux attach-session -t $SESSION
fi
|
/** @file */
#ifndef CROW_WARN_H
#define CROW_WARN_H
#include <igris/buffer.h>
namespace crow
{
void warn(igris::buffer msg);
}
#endif |
<filename>packages/marketing/src/components/pages/Privacy/Privacy.tsx
import React from 'react';
import sharedClasses from '../../../common.module.css';
export default function Privacy() {
return (
<div>
<h1 className={sharedClasses.h1}>Privacy Policy</h1>
<section>
<h2 className={sharedClasses.h2}>Respecting Privacy</h2>
<p className={sharedClasses.p}>
Privacy is a fundamental right, and at Crypto Crowdfund and its group
companies, it is treated as such. This document sets forth our current
policy in this regard.
</p>
</section>
<section>
<h2 className={sharedClasses.h2}>Sources of Personal Data</h2>
<p className={sharedClasses.p}>
Our business is founded on the principle that people matter. Respect
for people is the foundation of our business. Respect for people
includes respecting their right to privacy.
</p>
<p className={sharedClasses.p}>
We are engaged in the business of using innovative ways to bring
health to all in India. Our website seeks to bring cases of those in
need of healthcare and other essential services to the attention of
those in a position to make a difference and the opportunity to do so.
The users of our website are those in need and those seeking to help.
</p>
<p className={sharedClasses.p}>
In the conduct of our business, we collect and process personal data
of our users and visitors. Personal data is collected in the course of
transacting on the website. Personal data is also collected by Crypto
Crowdfund and its affiliates outside of website through in-person
meetings, participation in conferences and other business forums,
transaction or promotion of business, and with respect to job
applicants, through job application materials and interviews. Personal
data is also collected through social media; from solicited and
unsolicited communications; responses to promotional and other
materials, among others. If your personal data is available with us,
more likely than not, it has been shared by you with us.
</p>
<p className={sharedClasses.p}>
Data collection technologies built into our websites,
telecommunication systems, digital advertising, social media, etc. are
additional sources of personal data collection. In addition to
personal data that a user voluntarily furnishes, these electronic
systems also collect personal data without the knowledge of the user,
including user’s preferences, frequency of access, IP addresses, type
of browser, communication device or operating systems used, and
geographic location, etc.
</p>
<p className={sharedClasses.p}>
Separately, we also collect and process personal data of our employees
and independent contractors at the time of, in the course of and
following their retention.
</p>
</section>
<section>
<h2 className={sharedClasses.h2}>Processing of Personal Data</h2>
<p className={sharedClasses.p}>
We acquire, maintain and process personal data where it is necessary
for the pursuit of legitimate business interests, balancing this
interest against the data subject’s interests and fundamental rights.
Legitimate interests include establishing and maintaining
relationships, servicing their anticipated and actual needs, and for
administrative purposes. Towards that end, we may use various contact
management, analytics and processing software and other tools and
techniques, including marketing by use of electronic means.
</p>
<p className={sharedClasses.p}>
We may share personal data with one or more of our group companies for
performance of business functions and for internal analytics. Some of
our group companies or service providers may be located in
jurisdictions that are outside of the jurisdiction of the personal
data subject, or in jurisdictions that offer a lower level of privacy
or data protection. In such circumstances, we have put in place
appropriate safeguards in accordance with applicable law and in line
with this policy.
</p>
<p className={sharedClasses.p}>
We may also process personal data where it is necessary for entering
into or performing under a contract. Finally, in certain
circumstances, we may process personal data on the basis of consent of
the data subject for specific purposes.
</p>
<p className={sharedClasses.p}>
We may also use or disclose personal data if we are required by law to
do so or if we reasonably believe that use or disclosure is necessary
to protect our rights and/or to comply with judicial or regulatory
proceedings, a court order or other legal process. We do not otherwise
disclose or sell to any third party any personal data that we collect
or which comes into our possession without the consent of the data
subject. The only exception to the foregoing is where any part of our
business is sold to a third party, in which case the acquiror of the
business will likely also acquire personal data in our possession or
control.
</p>
</section>
<section>
<h2 className={sharedClasses.h2}>Right to Object and Unsubscribe</h2>
<p className={sharedClasses.p}>
You have the right to object to your personal data being used for
direct marketing by us. You may opt out by adjusting your
communication preferences by logging into your profile and turning off
the communication or by writing to
<a href="mailto:<EMAIL>" className={sharedClasses.link}>
{' <EMAIL> '}
</a>
. You also have the right to unsubscribe from electronic
communications by utilizing the unsubscribe feature contained in our
emails. Note that the unsubscribe feature contained in an email may
unsubscribe you from only certain (but not all) kinds of electronic or
non-electronic communications originating from us.
</p>
<p className={sharedClasses.p}>
Also, by continuing to the next step, you are willingly giving Crypto
Crowdfund, permission to contact you or communicate to you and all the
contacts given by you via whatsapp, email, sms, and other modes of
notification.
</p>
</section>
<section>
<h2 className={sharedClasses.h2}>Notice to EU Data Subjects</h2>
<p className={sharedClasses.p}>
As noted above, we process personal data with the consent of the data
subject, to enter into or perform under a contract between us and the
data subject or a business represented by the data subject, to comply
with legal obligations, or where we have a legitimate interest to do
so.
</p>
<p className={sharedClasses.p}>
As noted above, we process personal data with the consent of the data
subject, to enter into or perform under a contract between us and the
data subject or a business represented by the data subject, to comply
with legal obligations, or where we have a legitimate interest to do
so. Further, as noted above, subject to the exceptions permitted by
law, a data subject has the right to access her or his information,
rectify or update such information, erase such information (subject to
our need to retain certain information for legal or other purposes),
receive a copy of the data subject’s information, and to object to or
restrict the processing of such subject’s personal data. The data
subject may do so by writing to us at{' '}
<a href="mailto:<EMAIL>" className={sharedClasses.link}>
{' <EMAIL> '}
</a>
</p>
</section>
</div>
);
}
|
<filename>SmokingProject-master/app/src/main/java/com/example/kakyunglee/smokingproject/activity/dto/NoticeListDTO.java
package com.example.kakyunglee.smokingproject.activity.dto;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
public class NoticeListDTO implements Serializable{
private static final long serialVersionUID=1L;
public List<NoticeDTO> noticeLists=new ArrayList<NoticeDTO>();
public void getName() {
for(NoticeDTO notice : noticeLists) {
System.out.println(notice.getContents());
}
}
}
|
import './global-utilities';
import { parseUtcDate } from './utilities';
import { Injectable } from '@angular/core';
import {
BackendService,
CaseType,
Column,
ColumnFilter,
ColumnFilters,
ColumnSort,
CurrentFilter,
CurrentPagination,
CurrentSorter,
FilterChangedArgs,
FieldType,
GridOption,
OdataOption,
Pagination,
PaginationChangedArgs,
SearchTerm,
SortChangedArgs,
SortDirection,
SortDirectionString
} from './../models/index';
import { OdataService } from './odata.service';
let timer: any;
const DEFAULT_FILTER_TYPING_DEBOUNCE = 750;
const DEFAULT_ITEMS_PER_PAGE = 25;
const DEFAULT_PAGE_SIZE = 20;
@Injectable()
export class GridOdataService implements BackendService {
private _currentFilters: CurrentFilter[];
private _currentPagination: CurrentPagination;
private _currentSorters: CurrentSorter[];
private _columnDefinitions: Column[];
private _grid: any;
odataService: OdataService;
options: OdataOption;
pagination: Pagination | undefined;
defaultOptions: OdataOption = {
top: DEFAULT_ITEMS_PER_PAGE,
orderBy: '',
caseType: CaseType.pascalCase
};
constructor() {
this.odataService = new OdataService();
}
/** Getter for the Grid Options pulled through the Grid Object */
private get _gridOptions(): GridOption {
return (this._grid && this._grid.getOptions) ? this._grid.getOptions() : {};
}
buildQuery(): string {
return this.odataService.buildQuery();
}
init(options: OdataOption, pagination?: Pagination, grid?: any): void {
this._grid = grid;
const mergedOptions = { ...this.defaultOptions, ...options };
if (pagination && pagination.pageSize) {
mergedOptions.top = pagination.pageSize;
}
this.odataService.options = { ...mergedOptions, top: mergedOptions.top || this.defaultOptions.top };
this.options = this.odataService.options;
this.pagination = pagination;
// save current pagination as Page 1 and page size as "top"
this._currentPagination = {
pageNumber: 1,
pageSize: this.odataService.options.top || this.defaultOptions.top
};
if (grid && grid.getColumns) {
this._columnDefinitions = (options && options.columnDefinitions) || grid.getColumns();
this._columnDefinitions = this._columnDefinitions.filter((column: Column) => !column.excludeFromQuery);
}
}
updateOptions(serviceOptions?: OdataOption) {
this.options = { ...this.options, ...serviceOptions };
}
removeColumnFilter(fieldName: string): void {
this.odataService.removeColumnFilter(fieldName);
}
/** Get the Filters that are currently used by the grid */
getCurrentFilters(): CurrentFilter[] {
return this._currentFilters;
}
/** Get the Pagination that is currently used by the grid */
getCurrentPagination(): CurrentPagination {
return this._currentPagination;
}
/** Get the Sorters that are currently used by the grid */
getCurrentSorters(): CurrentSorter[] {
return this._currentSorters;
}
/*
* Reset the pagination options
*/
resetPaginationOptions() {
this.odataService.updateOptions({
skip: 0
});
}
saveColumnFilter(fieldName: string, value: string, terms?: any[]) {
this.odataService.saveColumnFilter(fieldName, value, terms);
}
/*
* FILTERING
*/
processOnFilterChanged(event: Event, args: FilterChangedArgs): Promise<string> {
const serviceOptions: GridOption = args.grid.getOptions();
const backendApi = serviceOptions.backendServiceApi;
if (backendApi === undefined) {
throw new Error('Something went wrong in the GridOdataService, "backendServiceApi" is not initialized');
}
// only add a delay when user is typing, on select dropdown filter it will execute right away
let debounceTypingDelay = 0;
if (event && (event.type === 'keyup' || event.type === 'keydown')) {
debounceTypingDelay = backendApi.filterTypingDebounce || DEFAULT_FILTER_TYPING_DEBOUNCE;
}
// keep current filters & always save it as an array (columnFilters can be an object when it is dealt by SlickGrid Filter)
this._currentFilters = this.castFilterToColumnFilter(args.columnFilters);
const promise = new Promise<string>((resolve, reject) => {
// reset Pagination, then build the OData query which we will use in the WebAPI callback
// wait a minimum user typing inactivity before processing any query
clearTimeout(timer);
timer = setTimeout(() => {
// loop through all columns to inspect filters & set the query
this.updateFilters(args.columnFilters);
this.resetPaginationOptions();
resolve(this.odataService.buildQuery());
}, debounceTypingDelay);
});
return promise;
}
/*
* PAGINATION
*/
processOnPaginationChanged(event: Event, args: PaginationChangedArgs) {
const pageSize = +(args.pageSize || DEFAULT_PAGE_SIZE);
this.updatePagination(args.newPage, pageSize);
// build the OData query which we will use in the WebAPI callback
return this.odataService.buildQuery();
}
/*
* SORTING
*/
processOnSortChanged(event: Event, args: SortChangedArgs) {
const sortColumns = (args.multiColumnSort) ? args.sortCols : new Array({ sortCol: args.sortCol, sortAsc: args.sortAsc });
// loop through all columns to inspect sorters & set the query
this.updateSorters(sortColumns);
// build the OData query which we will use in the WebAPI callback
return this.odataService.buildQuery();
}
/**
* loop through all columns to inspect filters & update backend service filteringOptions
* @param columnFilters
*/
updateFilters(columnFilters: ColumnFilters | CurrentFilter[], isUpdatedByPreset?: boolean) {
let searchBy = '';
const searchByArray: string[] = [];
// loop through all columns to inspect filters
for (const columnId in columnFilters) {
if (columnFilters.hasOwnProperty(columnId)) {
const columnFilter = columnFilters[columnId];
// if user defined some "presets", then we need to find the filters from the column definitions instead
let columnDef: Column | undefined;
if (isUpdatedByPreset && Array.isArray(this._columnDefinitions)) {
columnDef = this._columnDefinitions.find((column: Column) => {
return column.id === columnFilter.columnId;
});
} else {
columnDef = columnFilter.columnDef;
}
if (!columnDef) {
throw new Error('[Backend Service API]: Something went wrong in trying to get the column definition of the specified filter (or preset filters). Did you make a typo on the filter columnId?');
}
let fieldName = columnDef.queryField || columnDef.queryFieldFilter || columnDef.field || columnDef.name || '';
const fieldType = columnDef.type || 'string';
const searchTerms = (columnFilter ? columnFilter.searchTerms : null) || [];
let fieldSearchValue = (Array.isArray(searchTerms) && searchTerms.length === 1) ? searchTerms[0] : '';
if (typeof fieldSearchValue === 'undefined') {
fieldSearchValue = '';
}
if (typeof fieldSearchValue !== 'string' && !searchTerms) {
throw new Error(`ODdata filter searchTerm property must be provided as type "string", if you use filter with options then make sure your IDs are also string. For example: filter: {model: Filters.select, collection: [{ id: "0", value: "0" }, { id: "1", value: "1" }]`);
}
fieldSearchValue = '' + fieldSearchValue; // make sure it's a string
const matches = fieldSearchValue.match(/^([<>!=\*]{0,2})(.*[^<>!=\*])([\*]?)$/); // group 1: Operator, 2: searchValue, 3: last char is '*' (meaning starts with, ex.: abc*)
const operator = columnFilter.operator || ((matches) ? matches[1] : '');
let searchValue = (!!matches) ? matches[2] : '';
const lastValueChar = (!!matches) ? matches[3] : (operator === '*z' ? '*' : '');
const bypassOdataQuery = columnFilter.bypassBackendQuery || false;
// no need to query if search value is empty
if (fieldName && searchValue === '' && searchTerms.length === 0) {
this.removeColumnFilter(fieldName);
continue;
}
// escaping the search value
searchValue = searchValue.replace(`'`, `''`); // escape single quotes by doubling them
searchValue = encodeURIComponent(searchValue); // encode URI of the final search value
// extra query arguments
if (bypassOdataQuery) {
// push to our temp array and also trim white spaces
if (fieldName) {
this.saveColumnFilter(fieldName, fieldSearchValue, searchTerms);
}
} else {
searchBy = '';
// titleCase the fieldName so that it matches the WebApi names
if (this.odataService.options.caseType === CaseType.pascalCase) {
fieldName = String.titleCase(fieldName || '');
}
// when having more than 1 search term (then check if we have a "IN" or "NOT IN" filter search)
if (searchTerms && searchTerms.length > 1) {
const tmpSearchTerms = [];
if (operator === 'IN') {
// example:: (Stage eq "Expired" or Stage eq "Renewal")
for (let j = 0, lnj = searchTerms.length; j < lnj; j++) {
tmpSearchTerms.push(`${fieldName} eq '${searchTerms[j]}'`);
}
searchBy = tmpSearchTerms.join(' or ');
searchBy = `(${searchBy})`;
} else if (operator === 'NIN' || operator === 'NOTIN' || operator === 'NOT IN') {
// example:: (Stage ne "Expired" and Stage ne "Renewal")
for (let k = 0, lnk = searchTerms.length; k < lnk; k++) {
tmpSearchTerms.push(`${fieldName} ne '${searchTerms[k]}'`);
}
searchBy = tmpSearchTerms.join(' and ');
searchBy = `(${searchBy})`;
}
} else if (operator === '*' || operator === 'a*' || operator === '*z' || lastValueChar !== '') {
// first/last character is a '*' will be a startsWith or endsWith
searchBy = (operator === '*' || operator === '*z')
? `endswith(${fieldName}, '${searchValue}')`
: `startswith(${fieldName}, '${searchValue}')`;
} else if (fieldType === FieldType.date) {
// date field needs to be UTC and within DateTime function
const dateFormatted = parseUtcDate(searchValue, true);
if (dateFormatted) {
searchBy = `${fieldName} ${this.mapOdataOperator(operator)} DateTime'${dateFormatted}'`;
}
} else if (fieldType === FieldType.string) {
// string field needs to be in single quotes
if (operator === '') {
searchBy = `substringof('${searchValue}', ${fieldName})`;
} else {
// searchBy = `substringof('${searchValue}', ${fieldNameCased}) ${this.mapOdataOperator(operator)} true`;
searchBy = `${fieldName} ${this.mapOdataOperator(operator)} '${searchValue}'`;
}
} else {
// any other field type (or undefined type)
searchValue = fieldType === FieldType.number ? searchValue : `'${searchValue}'`;
searchBy = `${fieldName} ${this.mapOdataOperator(operator)} ${searchValue}`;
}
// push to our temp array and also trim white spaces
if (searchBy !== '') {
searchByArray.push(String.trim(searchBy));
this.saveColumnFilter(fieldName || '', fieldSearchValue, searchTerms);
}
}
}
}
// update the service options with filters for the buildQuery() to work later
this.odataService.updateOptions({
filter: (searchByArray.length > 0) ? searchByArray.join(' and ') : '',
skip: undefined
});
}
/**
* Update the pagination component with it's new page number and size
* @param newPage
* @param pageSize
*/
updatePagination(newPage: number, pageSize: number) {
this._currentPagination = {
pageNumber: newPage,
pageSize
};
this.odataService.updateOptions({
top: pageSize,
skip: (newPage - 1) * pageSize
});
}
/**
* loop through all columns to inspect sorters & update backend service orderBy
* @param columnFilters
*/
updateSorters(sortColumns?: ColumnSort[], presetSorters?: CurrentSorter[]) {
let sortByArray: any[] = [];
const sorterArray: CurrentSorter[] = [];
if (!sortColumns && presetSorters) {
// make the presets the current sorters, also make sure that all direction are in lowercase for OData
sortByArray = presetSorters;
sortByArray.forEach((sorter) => sorter.direction = sorter.direction.toLowerCase() as SortDirectionString);
// display the correct sorting icons on the UI, for that it requires (columnId, sortAsc) properties
const tmpSorterArray = sortByArray.map((sorter) => {
const columnDef = this._columnDefinitions.find((column: Column) => column.id === sorter.columnId);
sorterArray.push({
columnId: columnDef ? ((columnDef.queryField || columnDef.queryFieldSorter || columnDef.field || columnDef.id) + '') : (sorter.columnId + ''),
direction: sorter.direction
});
// return only the column(s) found in the Column Definitions ELSE null
if (columnDef) {
return {
columnId: sorter.columnId,
sortAsc: sorter.direction.toUpperCase() === SortDirection.ASC
};
}
return null;
});
this._grid.setSortColumns(tmpSorterArray);
} else if (sortColumns && !presetSorters) {
// build the SortBy string, it could be multisort, example: customerNo asc, purchaserName desc
if (sortColumns && sortColumns.length === 0) {
sortByArray = new Array(this.defaultOptions.orderBy); // when empty, use the default sort
} else {
if (sortColumns) {
for (const columnDef of sortColumns) {
if (columnDef.sortCol) {
let fieldName = (columnDef.sortCol.queryField || columnDef.sortCol.queryFieldSorter || columnDef.sortCol.field || columnDef.sortCol.id) + '';
let columnFieldName = (columnDef.sortCol.field || columnDef.sortCol.id) + '';
if (this.odataService.options.caseType === CaseType.pascalCase) {
fieldName = String.titleCase(fieldName);
columnFieldName = String.titleCase(columnFieldName);
}
sorterArray.push({
columnId: columnFieldName,
direction: columnDef.sortAsc ? 'asc' : 'desc'
});
}
}
sortByArray = sorterArray;
}
}
}
// transform the sortby array into a CSV string for OData
sortByArray = sortByArray as CurrentSorter[];
const csvString = sortByArray.map((sorter) => `${sorter.columnId} ${sorter.direction.toLowerCase()}`).join(',');
this.odataService.updateOptions({
orderBy: (this.odataService.options.caseType === CaseType.pascalCase) ? String.titleCase(csvString) : csvString
});
// keep current Sorters and update the service options with the new sorting
this._currentSorters = sortByArray as CurrentSorter[];
// build the OData query which we will use in the WebAPI callback
return this.odataService.buildQuery();
}
//
// private functions
// -------------------
/**
* Cast provided filters (could be in multiple format) into an array of ColumnFilter
* @param columnFilters
*/
private castFilterToColumnFilter(columnFilters: ColumnFilters | CurrentFilter[]): CurrentFilter[] {
// keep current filters & always save it as an array (columnFilters can be an object when it is dealt by SlickGrid Filter)
const filtersArray: ColumnFilter[] = ((typeof columnFilters === 'object') ? Object.keys(columnFilters).map(key => columnFilters[key]) : columnFilters) as CurrentFilter[];
return filtersArray.map((filter) => {
const columnDef = filter.columnDef;
const header = (columnDef) ? (columnDef.headerKey || columnDef.name || '') : '';
const tmpFilter: CurrentFilter = { columnId: filter.columnId || '' };
if (filter.operator) {
tmpFilter.operator = filter.operator;
}
if (Array.isArray(filter.searchTerms)) {
tmpFilter.searchTerms = filter.searchTerms;
}
return tmpFilter;
});
}
/**
* Mapper for mathematical operators (ex.: <= is "le", > is "gt")
* @param string operator
* @returns string map
*/
private mapOdataOperator(operator: string) {
let map = '';
switch (operator) {
case '<':
map = 'lt';
break;
case '<=':
map = 'le';
break;
case '>':
map = 'gt';
break;
case '>=':
map = 'ge';
break;
case '<>':
case '!=':
map = 'ne';
break;
case '=':
case '==':
default:
map = 'eq';
break;
}
return map;
}
}
|
<reponame>TY980910/Light-Musicbox<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: omi
# @Date: 2014-08-24 21:51:57
"""
网易云音乐 Menu
"""
from __future__ import print_function, unicode_literals, division, absolute_import
import time
# import curses
import threading
import sys
import os
import signal
import webbrowser
import locale
import hashlib
from collections import namedtuple
from future.builtins import range, str
from api import NetEase
from player import Player
# from ui import Ui
# from osdlyrics import show_lyrics_new_process
from config import Config
from utils import notify
from storage import Storage
from cache import Cache
import logger
locale.setlocale(locale.LC_ALL, "")
log = logger.getLogger(__name__)
def carousel(left, right, x):
# carousel x in [left, right]
if x > right:
return left
elif x < left:
return right
else:
return x
shortcut = [
["j", "Down ", "下移"],
["k", "Up ", "上移"],
["h", "Back ", "后退"],
["l", "Forward ", "前进"],
["u", "Prev page ", "上一页"],
["d", "Next page ", "下一页"],
["f", "Search ", "快速搜索"],
["[", "Prev song ", "上一曲"],
["]", "Next song ", "下一曲"],
[" ", "Play/Pause", "播放/暂停"],
["?", "Shuffle ", "手气不错"],
["=", "Volume+ ", "音量增加"],
["-", "Volume- ", "音量减少"],
["m", "Menu ", "主菜单"],
["p", "Present/History ", "当前/历史播放列表"],
["i", "Music Info ", "当前音乐信息"],
["Shift+p", "Playing Mode ", "播放模式切换"],
["Shift+a", "Enter album ", "进入专辑"],
["a", "Add ", "添加曲目到打碟"],
["z", "DJ list ", "打碟列表(退出后清空)"],
["s", "Star ", "添加到本地收藏"],
["c", "Collection", "本地收藏列表"],
["r", "Remove ", "删除当前条目"],
["Shift+j", "Move Down ", "向下移动当前条目"],
["Shift+k", "Move Up ", "向上移动当前条目"],
[",", "Like ", "喜爱"],
["Shfit+c", "Cache ", "缓存歌曲到本地"],
[".", "Trash FM ", "删除 FM"],
["/", "Next FM ", "下一 FM"],
["q", "Quit ", "退出"],
["w", "Quit&Clear", "退出并清除用户信息"],
]
class Menu(object):
def __init__(self):
self.config = Config()
self.datatype = "main"
self.title = "网易云音乐"
self.datalist = [
"排行榜",
"艺术家",
"新碟上架",
"精选歌单",
"我的歌单",
"主播电台",
"每日推荐歌曲",
"每日推荐歌单",
"私人FM",
"搜索",
"帮助",
]
self.offset = 0
self.index = 0
self.storage = Storage()
self.storage.load()
self.collection = self.storage.database["collections"]
self.player = Player()
self.player.playing_song_changed_callback = self.song_changed_callback
self.cache = Cache()
# self.ui = Ui()
self.api = NetEase()
# self.screen = curses.initscr()
# self.screen.keypad(1)
self.step = 10
self.stack = []
self.djstack = []
self.at_playing_list = False
self.enter_flag = True
# signal.signal(signal.SIGWINCH, self.change_term)
# signal.signal(signal.SIGINT, self.send_kill)
self.menu_starts = time.time()
self.countdown_start = time.time()
self.countdown = -1
self.is_in_countdown = False
self.keyword = ''
@property
def user(self):
return self.storage.database["user"]
@property
def account(self):
return self.user["username"]
@property
def md5pass(self):
return self.user["password"]
@property
def userid(self):
return self.user["user_id"]
@property
def username(self):
return self.user["nickname"]
def login(self):
if self.account and self.md5pass:
account, md5pass = self.account, self.md5pass
else:
#modified
account = str(input('name:'))
password = str(input('password:'))
md5pass = hashlib.md5(password.encode("utf-8")).hexdigest()
resp = self.api.login(account, md5pass)
if resp["code"] == 200:
userid = resp["account"]["id"]
nickname = resp["profile"]["nickname"]
self.storage.login(account, md5pass, userid, nickname)
print('that is right........')
return True
else:
self.storage.logout()
# x = self.ui.build_login_error()
# if x != ord("1"):
# return False
return self.login()
def to_login(self,username,passwd):
if self.account and self.md5pass:
account, md5pass = self.account, self.md5pass
else:
# modified
# account = str(input('name:'))
# password = str(input('password:'))
account = username
password = <PASSWORD>
md5pass = hashlib.md5(password.encode("utf-8")).hexdigest()
resp = self.api.login(account, md5pass)
if resp["code"] == 200:
userid = resp["account"]["id"]
nickname = resp["profile"]["nickname"]
self.storage.login(account, md5pass, userid, nickname)
print('that is right........')
return True
else:
self.storage.logout()
# x = self.ui.build_login_error()
# if x != ord("1"):
# return False
# return self.login()
return False
def search(self, category):
# self.ui.screen.timeout(-1)
SearchArg = namedtuple("SearchArg", ["prompt", "api_type", "post_process"])
category_map = {
"songs": SearchArg("搜索歌曲:", 1, lambda datalist: datalist),
"albums": SearchArg("搜索专辑:", 10, lambda datalist: datalist),
"artists": SearchArg("搜索艺术家:", 100, lambda datalist: datalist),
"playlists": SearchArg("搜索网易精选集:", 1000, lambda datalist: datalist),
}
prompt, api_type, post_process = category_map[category]
# keyword = self.ui.get_param(prompt)
# keyword = str(input('Input the song\'s name:'))
keyword = self.keyword
if not keyword:
return []
data = self.api.search(keyword, api_type)
if not data:
return data
datalist = post_process(data.get(category, []))
return self.api.dig_info(datalist, category)
def change_term(self, signum, frame):
self.ui.screen.clear()
self.ui.screen.refresh()
def send_kill(self, signum, fram):
self.player.stop()
self.cache.quit()
self.storage.save()
# curses.endwin()
sys.exit()
def update_alert(self, version):
latest = Menu().check_version()
if latest != version and latest != 0:
notify("MusicBox Update is available", 1)
time.sleep(0.5)
notify(
"NetEase-MusicBox installed version:"
+ version
+ "\nNetEase-MusicBox latest version:"
+ latest,
0,
)
def check_version(self):
# 检查更新 && 签到
try:
mobile = self.api.daily_task(is_mobile=True)
pc = self.api.daily_task(is_mobile=False)
if mobile["code"] == 200:
notify("移动端签到成功", 1)
if pc["code"] == 200:
notify("PC端签到成功", 1)
data = self.api.get_version()
return data["info"]["version"]
except KeyError as e:
return 0
def start_fork(self, version):
pid = os.fork()
if pid == 0:
Menu().update_alert(version)
else:
Menu().start()
def play_pause(self):
if self.player.is_empty:
return
if not self.player.playing_flag:
self.player.resume()
else:
self.player.pause()
def next_song(self):
if self.player.is_empty:
return
self.player.next()
def previous_song(self):
if self.player.is_empty:
return
self.player.prev()
def start(self):
# while True:
# print('input 1:login,2:search,100:break')
# num = int(input('Please input your choice:'))
# print('you input {}'.format(num))
# if (num == 1):
# print('username before: {}'.format(self.user))
# myplaylist = self.request_api(self.api.user_playlist, self.userid)
# print(myplaylist)
# print('username: {}'.format(self.user))
# elif num == 2:
# datalist = self.search('songs')
# print('search result:')
# for idxx,val in enumerate(datalist):
# print('{}:{}-{}'.format(idxx,val['song_name'],val['artist']))
# if idxx > 10:
# break;
# elif num == 100:
# break
#############################################################afer:
def print_info():
print('----------------------------')
print('1:清空信息并退出')
print('2:上移')
print('3:下移')
print('4:搜索')
print('5:播放')
print('6:登录')
print('7:个人歌单')
print('100:直接退出')
print('----------------------------')
while True:
datatype = self.datatype
title = self.title
datalist = self.datalist
offset = self.offset
idx = self.index
step = self.step
print_info()
key = int(input('请输入你的选择:'))
if key == 100:
print('正在退出....')
self.player.stop()
self.storage.save()
break
elif key == 1:
self.api.logout()
print('正在退出....')
self.player.stop()
break
elif key == 2:
if idx == offset:
if offset == 0:
continue
self.offset -= step
# 移动光标到最后一列
self.index = offset - 1
else:
self.index = carousel(
offset, min(len(datalist), offset + step) - 1, idx - 1
)
self.menu_starts = time.time()
elif key == 3:
if idx == min(len(datalist), offset + step) - 1:
if offset + step >= len(datalist):
continue
self.offset += step
# 移动光标到第一列
self.index = offset + step
else:
self.index = carousel(
offset, min(len(datalist), offset + step) - 1, idx + 1
)
self.menu_starts = time.time()
elif key == 4:
self.index = 0
self.offset = 0
idx = 1;
SearchCategory = namedtuple("SearchCategory", ["type", "title"])
idx_map = {
0: SearchCategory("playlists", "精选歌单搜索列表"),
1: SearchCategory("songs", "歌曲搜索列表"),
2: SearchCategory("artists", "艺术家搜索列表"),
3: SearchCategory("albums", "专辑搜索列表"),
}
self.datatype, self.title = idx_map[idx]
self.datalist = self.search(self.datatype)
print('search result:')
for idxx,val in enumerate(self.datalist):
print('{}:{}-{}'.format(idxx,val['song_name'],val['artist']))
if idxx > 10:
break;
which_one = int(input('输入想要播放的序号:'))
while which_one > 10 or which_one < 0:
which_one = int(input('序号不合理,重新输入:'))
self.player.new_player_list('songs',self.title,self.datalist,-1)
self.idx = which_one
self.player.play_or_pause(self.idx,self.at_playing_list)
elif key == 5:
print('当前的歌单:')
cnt = 0
for key in self.player.songs.keys():
print('{}.{}----{}'.format(cnt,self.player.songs[key]['song_name'],self.player.songs[key]['artist']))
cnt += 1
if cnt > 10:
break
which_one = int(input('输入想要播放的序号:'))
while which_one > 10 or which_one < 0:
which_one = int(input('序号不合理,重新输入:'))
self.idx = which_one
self.player.play_or_pause(self.idx,self.at_playing_list)
elif key == 6:
myplaylist = self.request_api(self.api.user_playlist, self.userid)
self.datatype = 'top_playlists'
myplaylist = self.api.dig_info(myplaylist, self.datatype)
notify('登录成功')
elif key == 7:
myplaylist = self.request_api(self.api.user_playlist, self.userid)
self.datatype = 'top_playlists'
myplaylist = self.api.dig_info(myplaylist, self.datatype)
print('{}的歌单:'.format(self.username))
for x,y in enumerate(myplaylist):
print('{}.{}'.format(x,y['playlist_name']))
def get_songs_info(self,search_info,choice):
self.keyword = search_info
if choice<0 or choice>3:
notify('选择有误')
return
idx = choice;
SearchCategory = namedtuple("SearchCategory", ["type", "title"])
idx_map = {
0: SearchCategory("playlists", "精选歌单搜索列表"),
1: SearchCategory("songs", "歌曲搜索列表"),
2: SearchCategory("artists", "艺术家搜索列表"),
3: SearchCategory("albums", "专辑搜索列表"),
}
self.datatype, self.title = idx_map[idx]
self.datalist = self.search(self.datatype)
res = []
if choice == 1:
for idxx, val in enumerate(self.datalist):
res.append('{}(歌曲名)-{}(艺术家))'.format(val['song_name'], val['artist']))
if idxx > 10:
break;
elif choice == 2:
for idxx, val in enumerate(self.datalist):
res.append('艺术家:{}'.format(val['artists_name']))
if idxx > 10:
break;
elif choice == 3:
# print(self.datalist)
for idxx, val in enumerate(self.datalist):
res.append('{}(专辑)-{}(艺术家)'.format(val['albums_name'], val['artists_name']))
if idxx > 10:
break;
else:
pass
return res
def play_which_song(self,which):
# self.player.new_player_list('songs', self.title, self.datalist, -1)
# # self.idx = which
# self.player.play_or_pause(self.idx, self.at_playing_list)
# print('self.at...',self.at_playing_list)
self.player.new_player_list("songs", self.title, self.datalist, -1)
# self.player.end_callback = None
self.player.play_or_pause(which, self.at_playing_list)
# self.at_playing_list = True
def now_total_time(self):
return self.player.process_location,self.player.process_length
def dispatch_enter(self, idx):
# The end of stack
netease = self.api
datatype = self.datatype
title = self.title
datalist = self.datalist
offset = self.offset
index = self.index
self.stack.append([datatype, title, datalist, offset, index])
if idx >= len(self.datalist):
return False
if datatype == "main":
self.choice_channel(idx)
# 该艺术家的热门歌曲
elif datatype == "artists":
artist_name = datalist[idx]["artists_name"]
artist_id = datalist[idx]["artist_id"]
self.datatype = "artist_info"
self.title += " > " + artist_name
self.datalist = [
{"item": "{}的热门歌曲".format(artist_name), "id": artist_id},
{"item": "{}的所有专辑".format(artist_name), "id": artist_id},
]
elif datatype == "artist_info":
self.title += " > " + datalist[idx]["item"]
artist_id = datalist[0]["id"]
if idx == 0:
self.datatype = "songs"
songs = netease.artists(artist_id)
self.datalist = netease.dig_info(songs, "songs")
elif idx == 1:
albums = netease.get_artist_album(artist_id)
self.datatype = "albums"
self.datalist = netease.dig_info(albums, "albums")
elif datatype == "djchannels":
radio_id = datalist[idx]["id"]
programs = netease.djprograms(radio_id)
self.title += " > " + datalist[idx]["name"]
self.datatype = "songs"
self.datalist = netease.dig_info(programs, "songs")
# 该专辑包含的歌曲
elif datatype == "albums":
album_id = datalist[idx]["album_id"]
songs = netease.album(album_id)
self.datatype = "songs"
self.datalist = netease.dig_info(songs, "songs")
self.title += " > " + datalist[idx]["albums_name"]
# 精选歌单选项
elif datatype == "recommend_lists":
data = self.datalist[idx]
self.datatype = data["datatype"]
self.datalist = netease.dig_info(data["callback"](), self.datatype)
self.title += " > " + data["title"]
# 全站置顶歌单包含的歌曲
elif datatype in ["top_playlists", "playlists"]:
playlist_id = datalist[idx]["playlist_id"]
songs = netease.playlist_detail(playlist_id)
self.datatype = "songs"
self.datalist = netease.dig_info(songs, "songs")
self.title += " > " + datalist[idx]["playlist_name"]
# 分类精选
elif datatype == "playlist_classes":
# 分类名称
data = self.datalist[idx]
self.datatype = "playlist_class_detail"
self.datalist = netease.dig_info(data, self.datatype)
self.title += " > " + data
# 某一分类的详情
elif datatype == "playlist_class_detail":
# 子类别
data = self.datalist[idx]
self.datatype = "top_playlists"
log.error(data)
self.datalist = netease.dig_info(netease.top_playlists(data), self.datatype)
self.title += " > " + data
# 歌曲评论
elif datatype in ["songs", "fmsongs"]:
song_id = datalist[idx]["song_id"]
comments = self.api.song_comments(song_id, limit=100)
try:
hotcomments = comments["hotComments"]
comcomments = comments["comments"]
except KeyError:
hotcomments = comcomments = []
self.datalist = []
for one_comment in hotcomments:
self.datalist.append(
"(热评 %s❤️ ️)%s:%s"
% (
one_comment["likedCount"],
one_comment["user"]["nickname"],
one_comment["content"],
)
)
for one_comment in comcomments:
self.datalist.append(one_comment["content"])
self.datatype = "comments"
self.title = "网易云音乐 > 评论:%s" % datalist[idx]["song_name"]
self.offset = 0
self.index = 0
# 歌曲榜单
elif datatype == "toplists":
songs = netease.top_songlist(idx)
self.title += " > " + self.datalist[idx]
self.datalist = netease.dig_info(songs, "songs")
self.datatype = "songs"
# 搜索菜单
elif datatype == "search":
self.index = 0
self.offset = 0
SearchCategory = namedtuple("SearchCategory", ["type", "title"])
idx_map = {
0: SearchCategory("playlists", "精选歌单搜索列表"),
1: SearchCategory("songs", "歌曲搜索列表"),
2: SearchCategory("artists", "艺术家搜索列表"),
3: SearchCategory("albums", "专辑搜索列表"),
}
self.datatype, self.title = idx_map[idx]
self.datalist = self.search(self.datatype)
else:
self.enter_flag = False
def show_playing_song(self):
if self.player.is_empty:
return
if not self.at_playing_list:
self.stack.append(
[self.datatype, self.title, self.datalist, self.offset, self.index]
)
self.at_playing_list = True
self.datatype = self.player.info["player_list_type"]
self.title = self.player.info["player_list_title"]
self.datalist = [self.player.songs[i] for i in self.player.info["player_list"]]
self.index = self.player.info["idx"]
self.offset = self.index // self.step * self.step
def song_changed_callback(self):
if self.at_playing_list:
self.show_playing_song()
def fm_callback(self):
# log.debug('FM CallBack.')
data = self.get_new_fm()
self.player.append_songs(data)
if self.datatype == "fmsongs":
if self.player.is_empty:
return
self.datatype = self.player.info["player_list_type"]
self.title = self.player.info["player_list_title"]
self.datalist = []
for i in self.player.info["player_list"]:
self.datalist.append(self.player.songs[i])
self.index = self.player.info["idx"]
self.offset = self.index // self.step * self.step
if not self.player.playing_flag:
switch_flag = False
self.player.play_or_pause(self.index, switch_flag)
def request_api(self, func, *args):
result = func(*args)
if result:
return result
if not self.login():
print('you really need to login')
notify("You need to log in")
return False
return func(*args)
def get_new_fm(self):
data = self.request_api(self.api.personal_fm)
if not data:
return []
return self.api.dig_info(data, "fmsongs")
def choice_channel(self, idx):
self.offset = 0
self.index = 0
if idx == 0:
self.datalist = self.api.toplists
self.title += " > 排行榜"
self.datatype = "toplists"
elif idx == 1:
artists = self.api.top_artists()
self.datalist = self.api.dig_info(artists, "artists")
self.title += " > 艺术家"
self.datatype = "artists"
elif idx == 2:
albums = self.api.new_albums()
self.datalist = self.api.dig_info(albums, "albums")
self.title += " > 新碟上架"
self.datatype = "albums"
elif idx == 3:
self.datalist = [
{
"title": "全站置顶",
"datatype": "top_playlists",
"callback": self.api.top_playlists,
},
{
"title": "分类精选",
"datatype": "playlist_classes",
"callback": lambda: [],
},
]
self.title += " > 精选歌单"
self.datatype = "recommend_lists"
elif idx == 4:
myplaylist = self.request_api(self.api.user_playlist, self.userid)
self.datatype = "top_playlists"
self.datalist = self.api.dig_info(myplaylist, self.datatype)
self.title += " > " + self.username + " 的歌单"
elif idx == 5:
self.datatype = "djchannels"
self.title += " > 主播电台"
self.datalist = self.api.djchannels()
elif idx == 6:
self.datatype = "songs"
self.title += " > 每日推荐歌曲"
myplaylist = self.request_api(self.api.recommend_playlist)
if myplaylist == -1:
return
self.datalist = self.api.dig_info(myplaylist, self.datatype)
elif idx == 7:
myplaylist = self.request_api(self.api.recommend_resource)
self.datatype = "top_playlists"
self.title += " > 每日推荐歌单"
self.datalist = self.api.dig_info(myplaylist, self.datatype)
elif idx == 8:
self.datatype = "fmsongs"
self.title += " > 私人FM"
self.datalist = self.get_new_fm()
elif idx == 9:
self.datatype = "search"
self.title += " > 搜索"
self.datalist = ["歌曲", "艺术家", "专辑", "网易精选集"]
elif idx == 10:
self.datatype = "help"
self.title += " > 帮助"
self.datalist = shortcut
|
import * as React from 'react';
import { observer } from 'mobx-react';
import { polygonHull } from 'd3-polygon';
import * as _ from 'lodash';
import { css } from '@patternfly/react-styles';
import styles from '@patternfly/react-styles/css/components/Topology/topology-components';
import CollapseIcon from '@patternfly/react-icons/dist/esm/icons/compress-alt-icon';
import NodeLabel from '../nodes/labels/NodeLabel';
import { Layer } from '../layers';
import { GROUPS_LAYER } from '../../const';
import { hullPath, maxPadding, useCombineRefs, useHover } from '../../utils';
import { BadgeLocation, isGraph, Node, NodeShape, NodeStyle, PointTuple } from '../../types';
import {
useDragNode,
useSvgAnchor,
WithContextMenuProps,
WithDndDropProps,
WithDragNodeProps,
WithSelectionProps
} from '../../behavior';
import { CollapsibleGroupProps } from './types';
type DefaultGroupExpandedProps = {
className?: string;
element: Node;
droppable?: boolean;
canDrop?: boolean;
dropTarget?: boolean;
dragging?: boolean;
hover?: boolean;
label?: string; // Defaults to element.getLabel()
secondaryLabel?: string;
showLabel?: boolean; // Defaults to true
truncateLength?: number; // Defaults to 13
badge?: string;
badgeColor?: string;
badgeTextColor?: string;
badgeBorderColor?: string;
badgeClassName?: string;
badgeLocation?: BadgeLocation;
labelIconClass?: string; // Icon to show in label
labelIcon?: string;
labelIconPadding?: number;
} & Partial<CollapsibleGroupProps & WithDragNodeProps & WithSelectionProps & WithDndDropProps & WithContextMenuProps>;
type PointWithSize = [number, number, number];
// Return the point whose Y is the largest value.
// If multiple points are found, compute the center X between them
// export for testing only
export function computeLabelLocation(points: PointWithSize[]): PointWithSize {
let lowPoints: PointWithSize[];
const threshold = 5;
_.forEach(points, p => {
const delta = !lowPoints ? Infinity : Math.round(p[1]) - Math.round(lowPoints[0][1]);
if (delta > threshold) {
lowPoints = [p];
} else if (Math.abs(delta) <= threshold) {
lowPoints.push(p);
}
});
return [
(_.minBy(lowPoints, p => p[0])[0] + _.maxBy(lowPoints, p => p[0])[0]) / 2,
lowPoints[0][1],
// use the max size value
_.maxBy(lowPoints, p => p[2])[2]
];
}
const DefaultGroupExpanded: React.FunctionComponent<DefaultGroupExpandedProps> = ({
className,
element,
collapsible,
selected,
onSelect,
hover,
label,
secondaryLabel,
showLabel = true,
truncateLength,
dndDropRef,
droppable,
canDrop,
dropTarget,
onContextMenu,
contextMenuOpen,
dragging,
dragNodeRef,
badge,
badgeColor,
badgeTextColor,
badgeBorderColor,
badgeClassName,
badgeLocation,
labelIconClass,
labelIcon,
labelIconPadding,
onCollapseChange
}) => {
const [hovered, hoverRef] = useHover();
const [labelHover, labelHoverRef] = useHover();
const dragLabelRef = useDragNode()[1];
const refs = useCombineRefs<SVGPathElement>(hoverRef, dragNodeRef);
const isHover = hover !== undefined ? hover : hovered;
const anchorRef = useSvgAnchor();
const outlineRef = useCombineRefs(dndDropRef, anchorRef);
const labelLocation = React.useRef<PointWithSize>();
const pathRef = React.useRef<string>();
let parent = element.getParent();
let altGroup = false;
while (!isGraph(parent)) {
altGroup = !altGroup;
parent = parent.getParent();
}
// cast to number and coerce
const padding = maxPadding(element.getStyle<NodeStyle>().padding ?? 17);
const hullPadding = (point: PointWithSize | PointTuple) => (point[2] || 0) + padding;
if (!droppable || !pathRef.current || !labelLocation.current) {
const children = element.getNodes().filter(c => c.isVisible());
if (children.length === 0) {
return null;
}
const points: (PointWithSize | PointTuple)[] = [];
_.forEach(children, c => {
if (c.getNodeShape() === NodeShape.circle) {
const bounds = c.getBounds();
const { width, height } = bounds;
const { x, y } = bounds.getCenter();
const radius = Math.max(width, height) / 2;
points.push([x, y, radius] as PointWithSize);
} else {
// add all 4 corners
const { width, height, x, y } = c.getBounds();
points.push([x, y, 0] as PointWithSize);
points.push([x + width, y, 0] as PointWithSize);
points.push([x, y + height, 0] as PointWithSize);
points.push([x + width, y + height, 0] as PointWithSize);
}
});
const hullPoints: (PointWithSize | PointTuple)[] =
points.length > 2 ? polygonHull(points as PointTuple[]) : (points as PointTuple[]);
if (!hullPoints) {
return null;
}
// change the box only when not dragging
pathRef.current = hullPath(hullPoints as PointTuple[], hullPadding);
// Compute the location of the group label.
labelLocation.current = computeLabelLocation(hullPoints as PointWithSize[]);
}
const groupClassName = css(
styles.topologyGroup,
className,
altGroup && 'pf-m-alt-group',
canDrop && 'pf-m-highlight',
dragging && 'pf-m-dragging',
selected && 'pf-m-selected'
);
const innerGroupClassName = css(
styles.topologyGroup,
className,
altGroup && 'pf-m-alt-group',
canDrop && 'pf-m-highlight',
dragging && 'pf-m-dragging',
selected && 'pf-m-selected',
(isHover || labelHover) && 'pf-m-hover',
canDrop && dropTarget && 'pf-m-drop-target'
);
return (
<g ref={labelHoverRef} onContextMenu={onContextMenu} onClick={onSelect} className={groupClassName}>
<Layer id={GROUPS_LAYER}>
<g ref={refs} onContextMenu={onContextMenu} onClick={onSelect} className={innerGroupClassName}>
<path ref={outlineRef} className={styles.topologyGroupBackground} d={pathRef.current} />
</g>
</Layer>
{showLabel && (
<NodeLabel
className={styles.topologyGroupLabel}
x={labelLocation.current[0]}
y={labelLocation.current[1] + hullPadding(labelLocation.current) + 24}
paddingX={8}
paddingY={5}
dragRef={dragNodeRef ? dragLabelRef : undefined}
status={element.getNodeStatus()}
secondaryLabel={secondaryLabel}
truncateLength={truncateLength}
badge={badge}
badgeColor={badgeColor}
badgeTextColor={badgeTextColor}
badgeBorderColor={badgeBorderColor}
badgeClassName={badgeClassName}
badgeLocation={badgeLocation}
labelIconClass={labelIconClass}
labelIcon={labelIcon}
labelIconPadding={labelIconPadding}
onContextMenu={onContextMenu}
contextMenuOpen={contextMenuOpen}
hover={isHover || labelHover}
actionIcon={collapsible ? <CollapseIcon /> : undefined}
onActionIconClick={() => onCollapseChange(element, true)}
>
{label || element.getLabel()}
</NodeLabel>
)}
</g>
);
};
export default observer(DefaultGroupExpanded);
|
package automation.ui.common;
import automation.report.dao.AutoReportDao;
import automation.report.dao.AutoReportImpl;
import automation.utils.ConfUtils;
import org.apache.commons.io.FileUtils;
import org.openqa.selenium.OutputType;
import org.openqa.selenium.TakesScreenshot;
import org.openqa.selenium.WebDriver;
import org.testng.Reporter;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.util.Date;
public class ScreenShot {
private static AutoReportDao autoReportService;
private static byte[] takeScreenShot(WebDriver driver) {
byte[] bytes = new byte[1024];
try {
bytes = ((TakesScreenshot) driver).getScreenshotAs(OutputType.BYTES);
} catch (Exception e) {
e.printStackTrace();
}
return bytes;
}
private static String storeImageToFile(byte[] bytes, String picID) {
String dir = Reporter.getCurrentTestResult().getTestContext().getOutputDirectory() + "\\..\\html\\img\\";
String filepath = dir + picID + ".jpg";
try {
InputStream inputStream = new ByteArrayInputStream(bytes);
FileUtils.copyInputStreamToFile(inputStream, new File(filepath));
} catch (IOException | NullPointerException e) {
System.out.println(e.getMessage());
}
Reporter.log("<img class='pimg' src='img/" + picID + ".jpg' width=100 />");
return filepath;
}
private static boolean storeImageToDB(WebDriver driver, byte[] bytes, String picID, String createTime) {
if (autoReportService == null) {
autoReportService = AutoReportImpl.createInstance(ConfUtils.getConf(System.getProperty("environment")));
}
return autoReportService.insertImage(picID, bytes, driver.getCurrentUrl(), createTime);
}
public static String takeScreenShot(WebDriver driver, String createTime) {
byte[] bytes = takeScreenShot(driver);
String picID = getPictureId();
storeImageToFile(bytes, picID);
if (ConfUtils.useReportDB()) {
storeImageToDB(driver, bytes, picID, createTime);
}
return picID;
}
/**
* %后的1指第一个参数,若只有var一个可变参数,所以就是指var。 $后的0表示,位数不够用0补齐,如果没有这个0(如%1$nd)就以空格补齐,
* 0后面的n表示总长度,总长度可以可以是大于9例如(%1$010d),d表示将var按十进制转字符串,长度不够的话用0或空格补齐。
*/
public static String getPictureId() {
Date date = new Date();
date.setTime(System.currentTimeMillis());
String str = String.format("%1$tY%1$tm%1$td%1$tH%1$tM%1$tS%1$tL", date);
return String.valueOf(Long.parseLong(str) + 1);// thread safety so add one
}
}
|
def match(obj, *args):
*paths, values = args
current_obj = obj
for path in paths:
if isinstance(current_obj, dict) and path in current_obj:
current_obj = current_obj[path]
else:
return False
for key, value in values.items():
if key not in current_obj or current_obj[key] != value:
return False
return True |
<reponame>jeckhummer/wf-constructor<gh_stars>0
import {
CLOSE_WO_EDITOR,
OPEN_WO_EDITOR,
OPEN_WO_EDITOR_TAB,
UPDATE_EDITOR_WO, SET_WO_CUSTOM_FIELDS, SET_WO_CUSTOM_FIELDS_LOADING_ANIMATION_VISIBILITY
} from '../../actions/WOEditor';
export const WO_EDITOR_TABS = {
CUSTOM_FIELDS: 1,
NOTIFICATIONS: 2
};
const DEFAULT_STATE = {
open: false,
activeTab: WO_EDITOR_TABS.CUSTOM_FIELDS,
WO: {},
selectedCustomFieldId: {},
customFieldsLoading: false,
customFields: null
};
export const WOEditor = (state = DEFAULT_STATE, action) => {
switch (action.type) {
case OPEN_WO_EDITOR:
return {
...state,
open: true,
WO: action.WO
};
case CLOSE_WO_EDITOR:
return DEFAULT_STATE;
case OPEN_WO_EDITOR_TAB:
return {
...state,
activeTab: action.tab
};
case UPDATE_EDITOR_WO:
return {
...state,
WO: {
...state.WO,
...action.diff
}
};
case SET_WO_CUSTOM_FIELDS:
return {
...state,
customFields: action.fields
};
case SET_WO_CUSTOM_FIELDS_LOADING_ANIMATION_VISIBILITY:
return {
...state,
customFieldsLoading: action.visible
};
default:
return state;
}
}; |
(defn reverse-array
[array]
(reverse array))
(reverse-array [1 2 3 4 5]) ; returns (5 4 3 2 1) |
<reponame>alterem/smartCityService
package com.zhcs.service;
import com.zhcs.entity.CallRecordEntity;
import java.util.List;
import java.util.Map;
//*****************************************************************************
/**
* <p>Title:CallRecordService</p>
* <p>Description: 通话记录表</p>
* <p>Copyright: Copyright (c) 2017</p>
* <p>Company: 深圳市智慧城市管家信息科技有限公司 </p>
* @author 刘晓东 - Alter
* @version v1.0 2017年2月23日
*/
//*****************************************************************************
public interface CallRecordService {
CallRecordEntity queryObject(Long id);
List<CallRecordEntity> queryList(Map<String, Object> map);
int queryTotal(Map<String, Object> map);
void save(CallRecordEntity callRecord);
void update(CallRecordEntity callRecord);
void delete(Long id);
void deleteBatch(Long[] ids);
}
|
<reponame>atpsoft/dohutil
require 'dohutil/core_ext/bigdecimal'
module Doh
class Test_core_ext_bigdecimal < DohTest::TestGroup
def test_integer_to_d
assert_equal(BigDecimal(1), 1.to_d)
end
def test_to_dig_valid
assert_equal('0.00', BigDecimal('0').to_dig)
assert_equal('1.00', BigDecimal('1').to_dig)
assert_equal('1.10', BigDecimal('1.1').to_dig)
assert_equal('1.11', BigDecimal('1.11134').to_dig)
assert_equal('1.111', BigDecimal('1.1113456').to_dig(3))
end
def test_to_dig_errors
assert_raises(ArgumentError) { BigDecimal('1').to_dig(-1) }
assert_equal('0.00', BigDecimal('NaN').to_dig)
assert_equal('0.00', BigDecimal('Infinity').to_dig)
assert_equal('0.00', BigDecimal('0').to_dig)
assert_equal('0', BigDecimal('NaN').to_dig(0))
assert_equal('1', BigDecimal('1').to_dig(0))
assert_equal('0', BigDecimal('0').to_dig(0))
end
def test_to_dig_doesnt_modify
bd = BigDecimal('1.11134')
assert_equal('1.11134', bd.to_s)
assert_equal('1.11', bd.to_dig)
assert_equal('1.11134', bd.to_s)
end
end
end
|
<reponame>zjutcv/zjutcv
# Copyright (c) ZJUTCV. All rights reserved.
from .time import print_time
from .file import ZPath as Path
__all__ = [
'print_time', 'Path'
]
|
import git
def get_latest_commit_sha(repo_path: str) -> str:
repo = git.Repo(repo_path)
sha = repo.head.object.hexsha
return sha |
class BST:
def __init__(self):
self.root = None
def insert(self, value):
new_node = TreeNode(value)
if not self.root:
self.root = new_node
else:
current = self.root
while True:
if current.value < value:
# Go right
if not current.right:
current.right = new_node
break
current = current.right
else:
# Go left
if not current.left:
current.left = new_node
break
current = current.left
def search(self, value):
current = self.root
while current:
if current.value == value:
return True
if value < current.value:
current = current.left
else:
current = current.right
return False
def traverse(self):
def inorder(root):
if not root:
return
inorder(root.left)
print(root.value)
inorder(root.right)
inorder(self.root) |
<filename>code401Challenges/src/main/java/code401Challenges/fifoAnimalShelter/Animal.java<gh_stars>0
package code401Challenges.fifoAnimalShelter;
public class Animal {
protected String name;
protected String gender;
protected String color;
protected int ageInYears;
protected boolean bark;
protected boolean meow;
public Animal(String name, String gender, String color, int ageInYears){
this.name = name;
this.gender = gender;
this.color = color;
this.ageInYears = ageInYears;
}
public String getName() {
return this.name;
}
public String getGender() {
return this.gender;
}
public String getColor() {
return this.color;
}
public int getAgeInYears() {
return ageInYears;
}
public boolean doesMeow() {
return this.meow;
}
public boolean doesBark() {
return this.bark;
}
}
|
<gh_stars>1-10
package com.java.study.algorithm.zuo.abasic.basic_class_07;
/**
* 求n!的结果
*/
public class Code_01_Factorial {
public static long Factorial(int n) {
if (n == 0 || n == 1) {
return n;
}
return n * Factorial(n - 1);
}
public static long Factorial2(int n) {
if (n == 0 || n == 1) {
return n;
}
long result = 1;
for (int i = 2; i <= n; i++) {
result = i * result;
}
return result;
// long[] arr = new long[n + 1];
// arr[1] = 1;
// for (int i = 2; i <= n; i++) {
// arr[i] = i * arr[i - 1];
// }
// return arr[n];
}
// public static int Factorial(int n) {
//
// if (n < 0) {
// throw new IllegalArgumentException("非法数字");
// }
//
// if (n == 0 || n == 1) {
// return n;
// }
//
// return Factorial(n - 1) + Factorial(n - 2);
// }
//
//
// public static int Factorial2(int n) {
// if (n < 0) {
// throw new IllegalArgumentException("非法数字");
// }
//
// if (n == 0 || n == 1) {
// return n;
// }
//
//
// int[] arr = new int[n + 1];
//
// arr[0] = 0;
// arr[1] = 1;
// for (int i = 2; i <= n; i++) {
// arr[i] = arr[i - 1] + arr[i - 2];
// }
//
// return arr[n];
//
// }
public static void main(String[] args) {
for (int i = 0; i <= 30; i++) {
System.out.println(Factorial(i));
System.out.println(Factorial2(i));
}
}
} |
import * as assert from "assert";
import { IRecipe, IRecipeItem, RecipeType } from "../recipe";
import { transformRecipe } from "./gw2shinies";
describe("remoteparsers/gw2shinies", () => {
it("can parse recipes", () => {
const result = example1.map(transformRecipe);
assert.deepEqual(result, [
{
_id: "",
base_id: "",
ingredients: [
{ id: 21156, amount: 2 },
{ id: 19700, amount: 5 },
{ id: 19722, amount: 5 },
{ id: 20798, amount: 1 },
],
location: null,
prerequisites: [],
results: [
{ id: 21260, amount: 1 },
],
source: "GW2Shinies",
subtype: "Blueprint",
timestamp: new Date(0),
type: "MysticForge",
},
{
_id: "",
base_id: "",
ingredients: [
{ id: 23098, amount: 1 },
{ id: 23097, amount: 1 },
{ id: 23096, amount: 1 },
{ id: 20799, amount: 50 },
],
location: null,
prerequisites: [],
results: [
{ id: 23095, amount: 1 },
],
source: "GW2Shinies",
subtype: "Amulet",
timestamp: new Date(0),
type: "MysticForge",
},
{
_id: "",
base_id: "",
ingredients: [
{ id: 24276, amount: 250 },
{ id: 24277, amount: 1 },
{ id: 20796, amount: 5 },
{ id: 20799, amount: 5 },
],
location: null,
prerequisites: [],
results: [
{ id: 24277, amount: 6.91 },
],
source: "GW2Shinies",
subtype: "CraftingMaterial",
timestamp: new Date(0),
type: "MysticForge",
},
{
_id: "",
base_id: "",
ingredients: [
{ id: 19976, amount: 100 },
{ id: 12976, amount: 250 },
{ id: 19721, amount: 250 },
{ id: 20852, amount: 1 },
],
location: null,
prerequisites: [],
results: [
{ id: 31058, amount: 1 },
],
source: "GW2Shinies",
subtype: "Weapon",
timestamp: new Date(0),
type: "MysticForge",
},
{
_id: "",
base_id: "",
ingredients: [
{ id: 19976, amount: 17 },
{ id: 12196, amount: 1 },
{ id: 19663, amount: 6 },
{ id: 20799, amount: 9 },
],
location: null,
prerequisites: [],
results: [
{ id: 9731, amount: 1 },
],
source: "GW2Shinies",
subtype: "Recipe",
timestamp: new Date(0),
type: "MysticForge",
},
]);
});
});
const example1 = [
{
average_yield: "1",
recipe_item_1: "21156",
recipe_item_1_quantity: "2",
recipe_item_2: "19700",
recipe_item_2_quantity: "5",
recipe_item_3: "19722",
recipe_item_3_quantity: "5",
recipe_item_4: "20798",
recipe_item_4_quantity: "1",
target_recipe: "21260",
type: "blueprint",
},
{
average_yield: "1",
recipe_item_1: "23098",
recipe_item_1_quantity: "1",
recipe_item_2: "23097",
recipe_item_2_quantity: "1",
recipe_item_3: "23096",
recipe_item_3_quantity: "1",
recipe_item_4: "20799",
recipe_item_4_quantity: "50",
target_recipe: "23095",
type: "amulet",
},
{
average_yield: "6.91",
recipe_item_1: "24276",
recipe_item_1_quantity: "250",
recipe_item_2: "24277",
recipe_item_2_quantity: "1",
recipe_item_3: "20796",
recipe_item_3_quantity: "5",
recipe_item_4: "20799",
recipe_item_4_quantity: "5",
target_recipe: "24277",
type: "promo",
},
{
average_yield: "1",
recipe_item_1: "19976",
recipe_item_1_quantity: "100",
recipe_item_2: "12976",
recipe_item_2_quantity: "250",
recipe_item_3: "19721",
recipe_item_3_quantity: "250",
recipe_item_4: "20852",
recipe_item_4_quantity: "1",
target_recipe: "31058",
type: "weapon",
},
{
average_yield: "1",
recipe_item_1: "19976",
recipe_item_1_quantity: "17",
recipe_item_2: "12196",
recipe_item_2_quantity: "1",
recipe_item_3: "19663",
recipe_item_3_quantity: "6",
recipe_item_4: "20799",
recipe_item_4_quantity: "9",
target_recipe: "9731",
type: "recipe",
},
];
|
<reponame>Travis-Richards/game
/** A two dimensional vector.
* Used in various locations to indicate
* either direction or position. Most of
* the functions in this class are not documented
* simply because they're self-documenting.
* */
class Vector {
private double x;
private double y;
public Vector(double x, double y) {
this.x = x;
this.y = y;
}
public double getX() {
return this.x;
}
public double getY() {
return this.y;
}
public boolean isMostlyUp() {
if (y > 0 && y > Math.abs(x)) {
return true;
}
return false;
}
public boolean isMostlyRight() {
if (x > 0 && x > Math.abs(y)) {
return true;
}
return false;
}
public boolean isMostlyDown() {
if (y < 0 && Math.abs(y) > Math.abs(x)) {
return true;
}
return false;
}
public boolean isMostlyLeft() {
if (x < 0 && Math.abs(x) > Math.abs(y)) {
return true;
}
return false;
}
}
|
#!/usr/bin/env bash
set -e
# shellcheck source=cluster-up/cluster/ephemeral-provider-common.sh
source "${KUBEVIRTCI_PATH}/cluster/ephemeral-provider-common.sh"
function deploy_cnao() {
if [ "$KUBEVIRT_WITH_CNAO" == "true" ] || [ "$KUBVIRT_WITH_CNAO_SKIP_CONFIG" == "true" ]; then
$kubectl create -f /opt/cnao/namespace.yaml
$kubectl create -f /opt/cnao/network-addons-config.crd.yaml
$kubectl create -f /opt/cnao/operator.yaml
if [ "$KUBVIRT_WITH_CNAO_SKIP_CONFIG" != "true" ]; then
$kubectl create -f /opt/cnao/network-addons-config-example.cr.yaml
fi
# Install whereabouts on CNAO lanes
$kubectl create -f /opt/whereabouts
fi
}
function wait_for_cnao_ready() {
if [ "$KUBEVIRT_WITH_CNAO" == "true" ] || [ "$KUBVIRT_WITH_CNAO_SKIP_CONFIG" == "true" ]; then
$kubectl wait deployment -n cluster-network-addons cluster-network-addons-operator --for condition=Available --timeout=200s
if [ "$KUBVIRT_WITH_CNAO_SKIP_CONFIG" != "true" ]; then
$kubectl wait networkaddonsconfig cluster --for condition=Available --timeout=200s
fi
fi
}
function deploy_istio() {
if [ "$KUBEVIRT_DEPLOY_ISTIO" == "true" ] && [[ $KUBEVIRT_PROVIDER =~ k8s-1\.1.* ]]; then
echo "ERROR: Istio is not supported on kubevirtci version < 1.20"
exit 1
elif [ "$KUBEVIRT_DEPLOY_ISTIO" == "true" ]; then
if [ "$KUBEVIRT_WITH_CNAO" == "true" ]; then
$kubectl create -f /opt/istio/istio-operator-with-cnao.cr.yaml
else
$kubectl create -f /opt/istio/istio-operator.cr.yaml
fi
fi
}
function wait_for_istio_ready() {
if [ "$KUBEVIRT_DEPLOY_ISTIO" == "true" ]; then
istio_operator_ns=istio-system
retries=0
max_retries=20
while [[ $retries -lt $max_retries ]]; do
echo "waiting for istio-operator to be healthy"
sleep 5
health=$(kubectl -n $istio_operator_ns get istiooperator istio-operator -o jsonpath="{.status.status}")
if [[ $health == "HEALTHY" ]]; then
break
fi
retries=$((retries + 1))
done
if [ $retries == $max_retries ]; then
echo "waiting istio-operator to be healthy failed"
exit 1
fi
fi
}
function deploy_cdi() {
if [ "$KUBEVIRT_DEPLOY_CDI" == "true" ]; then
$kubectl create -f /opt/cdi-*-operator.yaml
$kubectl create -f /opt/cdi-*-cr.yaml
fi
}
function wait_for_cdi_ready() {
if [ "$KUBEVIRT_DEPLOY_CDI" == "true" ]; then
while [ "$($kubectl get pods --namespace cdi | grep -c 'cdi-')" -lt 4 ]; do
$kubectl get pods --namespace cdi
sleep 10
done
$kubectl wait --for=condition=Ready pod --timeout=180s --all --namespace cdi
fi
}
function up() {
params=$(_add_common_params)
if echo "$params" | grep -q ERROR; then
echo -e "$params"
exit 1
fi
eval ${_cli:?} run $params
# Copy k8s config and kubectl
${_cli} scp --prefix ${provider_prefix:?} /usr/bin/kubectl - >${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubectl
chmod u+x ${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubectl
${_cli} scp --prefix $provider_prefix /etc/kubernetes/admin.conf - >${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubeconfig
# Set server and disable tls check
export KUBECONFIG=${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubeconfig
${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubectl config set-cluster kubernetes --server="https://$(_main_ip):$(_port k8s)"
${KUBEVIRTCI_CONFIG_PATH}/$KUBEVIRT_PROVIDER/.kubectl config set-cluster kubernetes --insecure-skip-tls-verify=true
# Make sure that local config is correct
prepare_config
kubectl="${_cli} --prefix $provider_prefix ssh node01 -- sudo kubectl --kubeconfig=/etc/kubernetes/admin.conf"
# For multinode cluster Label all the non master nodes as workers,
# for one node cluster label master with 'master,worker' roles
if [ "$KUBEVIRT_NUM_NODES" -gt 1 ]; then
label="!node-role.kubernetes.io/master"
else
label="node-role.kubernetes.io/master"
fi
$kubectl label node -l $label node-role.kubernetes.io/worker=''
deploy_cnao
deploy_istio
deploy_cdi
until wait_for_cnao_ready && wait_for_istio_ready && wait_for_cdi_ready; do
echo "Waiting for cluster components..."
sleep 5
done
}
|
def factorial(n):
if (n == 0):
return 1
return n * factorial(n - 1) |
<filename>app/src/main/java/com/dmitrybrant/response/uploadImagesServerRes/BackImageResponse.java
package com.dmitrybrant.response.uploadImagesServerRes;
public class BackImageResponse {
}
|
<gh_stars>0
class Solution:
def search_start(self, nums, start, end):
g_end = end
while nums[start] != nums[g_end]:
mid = (start + end) // 2
if nums[mid] == nums[g_end]:
end = mid - 1
else:
start = mid + 1
return start
def search_end(self, nums, start, end):
g_start = start
while nums[end] != nums[g_start]:
mid = (start + end) // 2
if nums[mid] == nums[g_start]:
start = mid + 1
else:
end = mid - 1
return end
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
Time: O(log n)
Space: O(1)
"""
low = 0
high = len(nums) - 1
while low <= high:
mid = (low + high) // 2
if nums[mid] < target:
low = mid + 1
elif nums[mid] > target:
high = mid - 1
else:
s = self.search_start(nums, low, mid)
e = self.search_end(nums, mid, high)
return [s, e]
return [-1, -1]
if __name__ == '__main__':
assert [1, 16] == Solution().searchRange([1, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8], 8)
assert [-1, -1] == Solution().searchRange([], 8)
assert [1, 1] == Solution().searchRange([1, 3], 3)
assert [0, 0] == Solution().searchRange([1], 1)
assert [17, 17] == Solution().searchRange([1, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9], 9)
|
<filename>ext/extconf.rb<gh_stars>0
require "mkmf"
link = "debug"
if ARGV[0] == "--release"
link = "release"
end
$LDFLAGS << " -L./#{link} -lrubyfmt "
create_makefile("rubyfmt_#{link}")
|
#!/bin/sh
# chezmoi install script
# contains code from and inspired by
# https://github.com/client9/shlib
# https://github.com/goreleaser/godownloader
set -e
BINDIR=${BINDIR:-./bin}
TAGARG=latest
LOG_LEVEL=2
EXECARGS=
GITHUB_DOWNLOAD=https://github.com/twpayne/chezmoi/releases/download
tmpdir=$(mktemp -d)
trap 'rm -rf ${tmpdir}' EXIT
usage() {
this="$1"
cat <<EOF
${this}: download chezmoi and optionally run chezmoi
Usage: ${this} [-b bindir] [-d] [-t tag] [chezmoi-args]
-b sets the installation directory, default is ${BINDIR}.
-d enables debug logging.
-t sets the tag, default is ${TAG}.
If chezmoi-args are given, after install chezmoi is executed with chezmoi-args.
EOF
exit 2
}
main() {
parse_args "$@"
GOOS=$(get_goos)
GOARCH=$(get_goarch)
check_goos_goarch "${GOOS}/${GOARCH}"
TAG="$(real_tag $TAGARG)"
VERSION="${TAG#v}"
log_info "found version ${VERSION} for ${TAGARG}/${GOOS}/${GOARCH}"
case "${GOOS}" in
windows)
BINSUFFIX=.exe
FORMAT=zip
;;
*)
BINSUFFIX=
FORMAT=tar.gz
;;
esac
# download tarball
NAME="chezmoi_${VERSION}_${GOOS}_${GOARCH}"
TARBALL="${NAME}.${FORMAT}"
TARBALL_URL="${GITHUB_DOWNLOAD}/${TAG}/${TARBALL}"
http_download "${tmpdir}/${TARBALL}" "${TARBALL_URL}"
# download checksums
CHECKSUMS="chezmoi_${VERSION}_checksums.txt"
CHECKSUMS_URL="${GITHUB_DOWNLOAD}/${TAG}/${CHECKSUMS}"
http_download "${tmpdir}/${CHECKSUMS}" "${CHECKSUMS_URL}"
# verify checksums
hash_sha256_verify "${tmpdir}/${TARBALL}" "${tmpdir}/${CHECKSUMS}"
(cd "${tmpdir}" && untar "${TARBALL}")
# install binary
test ! -d "${BINDIR}" && install -d "${BINDIR}"
BINARY="chezmoi${BINSUFFIX}"
install "${tmpdir}/${BINARY}" "${BINDIR}/"
log_info "installed ${BINDIR}/${BINARY}"
if [ -n "${EXECARGS}" ]; then
# shellcheck disable=SC2086
exec "${BINDIR}/${BINARY}" $EXECARGS
fi
}
parse_args() {
while getopts "b:dh?t:" arg; do
case "${arg}" in
b) BINDIR="${OPTARG}" ;;
d) LOG_LEVEL=3 ;;
h | \?) usage "$0" ;;
t) TAGARG="${OPTARG}" ;;
*) return 1 ;;
esac
done
shift $((OPTIND - 1))
EXECARGS="$*"
}
get_goos() {
os=$(uname -s | tr '[:upper:]' '[:lower:]')
case "${os}" in
cygwin_nt*) goos="windows" ;;
mingw*) goos="windows" ;;
msys_nt*) goos="windows" ;;
*) goos="${os}" ;;
esac
echo "${goos}"
}
get_goarch() {
arch=$(uname -m)
case "${arch}" in
386) goarch="i386" ;;
aarch64) goarch="arm64" ;;
armv*) goarch="arm" ;;
i386) goarch="i386" ;;
i686) goarch="i386" ;;
x86) goarch="i386" ;;
x86_64) goarch="amd64" ;;
*) goarch="${arch}" ;;
esac
echo "${goarch}"
}
check_goos_goarch() {
case "$1" in
darwin/amd64) return 0 ;;
freebsd/386) return 0 ;;
freebsd/amd64) return 0 ;;
freebsd/arm) return 0 ;;
freebsd/arm64) return 0 ;;
linux/386) return 0 ;;
linux/amd64) return 0 ;;
linux/arm) return 0 ;;
linux/arm64) return 0 ;;
linux/ppc64) return 0 ;;
linux/ppc64le) return 0 ;;
openbsd/386) return 0 ;;
openbsd/amd64) return 0 ;;
openbsd/arm) return 0 ;;
openbsd/arm64) return 0 ;;
windows/386) return 0 ;;
windows/amd64) return 0 ;;
*)
echo "$1: unsupported platform" 1>&2
return 1
;;
esac
}
real_tag() {
tag=$1
log_debug "checking GitHub for tag ${tag}"
release_url="https://github.com/twpayne/chezmoi/releases/${tag}"
json=$(http_get "${release_url}" "Accept: application/json")
if [ -z "${json}" ]; then
log_err "real_tag error retrieving GitHub release ${tag}"
return 1
fi
real_tag=$(echo "${json}" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//')
if [ -z "${real_tag}" ]; then
log_err "real_tag error determining real tag of GitHub release ${tag}"
return 1
fi
test -z "${real_tag}" && return 1
log_debug "found tag ${real_tag} for ${tag}"
echo "${real_tag}"
}
http_get() {
tmpfile=$(mktemp)
http_download "${tmpfile}" "$1" "$2" || return 1
body=$(cat "${tmpfile}")
rm -f "${tmpfile}"
echo "${body}"
}
http_download_curl() {
local_file=$1
source_url=$2
header=$3
if [ -z "${header}" ]; then
code=$(curl -w '%{http_code}' -sL -o "${local_file}" "${source_url}")
else
code=$(curl -w '%{http_code}' -sL -H "${header}" -o "${local_file}" "${source_url}")
fi
if [ "${code}" != "200" ]; then
log_debug "http_download_curl received HTTP status ${code}"
return 1
fi
return 0
}
http_download_wget() {
local_file=$1
source_url=$2
header=$3
if [ -z "${header}" ]; then
wget -q -O "${local_file}" "${source_url}"
else
wget -q --header "${header}" -O "${local_file}" "${source_url}"
fi
}
http_download() {
log_debug "http_download $2"
if is_command curl; then
http_download_curl "$@"
return
elif is_command wget; then
http_download_wget "$@"
return
fi
log_crit "http_download unable to find wget or curl"
return 1
}
hash_sha256() {
target=$1
if is_command sha256sum; then
hash=$(sha256sum "${target}") || return 1
echo "${hash}" | cut -d ' ' -f 1
elif is_command shasum; then
hash=$(shasum -a 256 "${target}" 2>/dev/null) || return 1
echo "${hash}" | cut -d ' ' -f 1
elif is_command sha256; then
hash=$(sha256 -q "${target}" 2>/dev/null) || return 1
echo "${hash}" | cut -d ' ' -f 1
elif is_command openssl; then
hash=$(openssl dgst -sha256 "${target}") || return 1
echo "${hash}" | cut -d ' ' -f a
else
log_crit "hash_sha256 unable to find command to compute SHA256 hash"
return 1
fi
}
hash_sha256_verify() {
target=$1
checksums=$2
basename=${target##*/}
want=$(grep "${basename}" "${checksums}" 2>/dev/null | tr '\t' ' ' | cut -d ' ' -f 1)
if [ -z "${want}" ]; then
log_err "hash_sha256_verify unable to find checksum for ${target} in ${checksums}"
return 1
fi
got=$(hash_sha256 "${target}")
if [ "${want}" != "${got}" ]; then
log_err "hash_sha256_verify checksum for ${target} did not verify ${want} vs ${got}"
return 1
fi
}
untar() {
tarball=$1
case "${tarball}" in
*.tar.gz | *.tgz) tar -xzf "${tarball}" ;;
*.tar) tar -xf "${tarball}" ;;
*.zip) unzip "${tarball}" ;;
*)
log_err "untar unknown archive format for ${tarball}"
return 1
;;
esac
}
is_command() {
command -v "$1" >/dev/null
}
log_debug() {
[ 3 -le "${LOG_LEVEL}" ] || return 0
echo debug "$@" 1>&2
}
log_info() {
[ 2 -le "${LOG_LEVEL}" ] || return 0
echo info "$@" 1>&2
}
log_err() {
[ 1 -le "${LOG_LEVEL}" ] || return 0
echo error "$@" 1>&2
}
log_crit() {
[ 0 -le "${LOG_LEVEL}" ] || return 0
echo critical "$@" 1>&2
}
main "$@"
|
<gh_stars>0
// export class Stock {
// ///////////////
// // Variables //
// ///////////////
// private symbol: string
// private name: string
// private market_id: string
// //////////////////
// // Constructors //
// //////////////////
// public constructor(
// symbol: string,
// name: string,
// market_id: string,
// ){
// this.symbol = symbol
// this.name = name
// this.market_id = market_id
// }
// /////////////
// // Getters //
// /////////////
// public getSymbol(): string {
// return this.symbol
// }
// public getName(): string {
// return this.name
// }
// public getMarketId(): string {
// return this.market_id
// }
// /////////////
// // Setters //
// /////////////
// public setSymbol(symbol: string): void {
// this.symbol = symbol
// }
// public setName(name: string): void {
// this.name = name
// }
// public setMarketId(marketId: string): void {
// this.market_id = marketId
// }
// } |
function base64Unescape(value: string): string {
return (value + '==='.slice((value.length + 3) % 4)).replace(/-/g, '+').replace(/_/g, '/');
}
function base64Escape(value: string): string {
return value
.replace(/\+/g, '-')
.replace(/\//g, '_')
.replace(/=/g, '');
}
export function base64UrlEncode(value: string): string {
return base64Escape(window.btoa(value));
}
export function base64UrlDecode(value: string): string {
return window.atob(base64Unescape(value));
}
|
/*
Navicat MySQL Data Transfer
Source Server : 127.0.0.1
Source Server Version : 50617
Source Host : localhost:3306
Source Database : cloud-vue
Target Server Type : MYSQL
Target Server Version : 50617
File Encoding : 65001
Date: 2017-04-27 16:08:52
*/
SET FOREIGN_KEY_CHECKS=0;
-- ----------------------------
-- Table structure for sys_admin_access
-- ----------------------------
DROP TABLE IF EXISTS `sys_admin_access`;
CREATE TABLE `sys_admin_access` (
`user_id` int(11) DEFAULT NULL,
`group_id` int(11) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of sys_admin_access
-- ----------------------------
INSERT INTO `sys_admin_access` VALUES ('2', '15');
-- ----------------------------
-- Table structure for sys_admin_group
-- ----------------------------
DROP TABLE IF EXISTS `sys_admin_group`;
CREATE TABLE `sys_admin_group` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`title` varchar(100) DEFAULT NULL,
`rules` varchar(4000) DEFAULT NULL,
`pid` int(11) DEFAULT NULL,
`remark` varchar(100) DEFAULT NULL,
`status` tinyint(3) DEFAULT '1',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=27 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of sys_admin_group
-- ----------------------------
INSERT INTO `sys_admin_group` VALUES ('15', '普通会员', '1,2,3,4,5,6,7,8,9,10,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,59,61,62,63,28,29', '0', '最厉害的组别', '1');
INSERT INTO `sys_admin_group` VALUES ('24', 'bbb', '10', '0', 'sss', '1');
INSERT INTO `sys_admin_group` VALUES ('25', 'ccc', '61,62', '24', 'xxxx', '1');
-- ----------------------------
-- Table structure for sys_admin_menu
-- ----------------------------
DROP TABLE IF EXISTS `sys_admin_menu`;
CREATE TABLE `sys_admin_menu` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT COMMENT '菜单ID',
`pid` int(11) unsigned DEFAULT '0' COMMENT '上级菜单ID',
`title` varchar(32) DEFAULT '' COMMENT '菜单名称',
`url` varchar(127) DEFAULT '' COMMENT '链接地址',
`icon` varchar(64) DEFAULT '' COMMENT '图标',
`menu_type` tinyint(4) DEFAULT NULL COMMENT '菜单类型',
`sort` tinyint(4) unsigned DEFAULT '0' COMMENT '排序(同级有效)',
`status` tinyint(4) DEFAULT '1' COMMENT '状态',
`rule_id` int(11) DEFAULT NULL COMMENT '权限id',
`module` varchar(50) DEFAULT NULL,
`menu` varchar(50) DEFAULT NULL COMMENT '三级菜单吗',
`rule_name` varchar(50) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=63 DEFAULT CHARSET=utf8 COMMENT='【配置】后台菜单表';
-- ----------------------------
-- Records of sys_admin_menu
-- ----------------------------
INSERT INTO `sys_admin_menu` VALUES ('52', '0', '管理', '', '', '1', '0', '1', '59', 'Administrative', '', '管理菜单');
INSERT INTO `sys_admin_menu` VALUES ('53', '52', '系统配置', '', '', '1', '0', '1', '61', 'Administrative', '', '系统管理二级菜单');
INSERT INTO `sys_admin_menu` VALUES ('54', '53', '菜单管理', '/home/menu/list', '', '1', '0', '1', '21', 'Administrative', 'menu', '菜单列表');
INSERT INTO `sys_admin_menu` VALUES ('55', '53', '系统参数', '/home/config/add', '', '1', '0', '1', '29', 'Administrative', 'systemConfig', '修改系统配置');
INSERT INTO `sys_admin_menu` VALUES ('56', '53', '权限规则', '/home/rule/list', '', '1', '0', '1', '13', 'Administrative', 'rule', '规则列表');
INSERT INTO `sys_admin_menu` VALUES ('57', '52', '组织架构', '', '', '1', '0', '1', '63', 'Administrative', '', '组织架构二级菜单');
INSERT INTO `sys_admin_menu` VALUES ('58', '57', '岗位管理', '/home/position/list', '', '1', '0', '1', '31', 'Administrative', 'position', '岗位列表');
INSERT INTO `sys_admin_menu` VALUES ('59', '57', '部门管理', '/home/structures/list', '', '1', '0', '1', '39', 'Administrative', 'structures', '部门列表');
INSERT INTO `sys_admin_menu` VALUES ('60', '57', '用户组管理', '/home/groups/list', '', '1', '0', '1', '47', 'Administrative', 'groups', '用户组列表');
INSERT INTO `sys_admin_menu` VALUES ('61', '52', '账户管理', '', '', '1', '0', '1', '62', 'Administrative', '', '账户管理二级菜单');
INSERT INTO `sys_admin_menu` VALUES ('62', '61', '账户列表', '/home/users/list', '', '1', '0', '1', '55', 'Administrative', 'users', '成员列表');
-- ----------------------------
-- Table structure for sys_admin_menu_copy
-- ----------------------------
DROP TABLE IF EXISTS `sys_admin_menu_copy`;
CREATE TABLE `sys_admin_menu_copy` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT COMMENT '菜单ID',
`pid` int(11) unsigned DEFAULT '0' COMMENT '上级菜单ID',
`title` varchar(32) DEFAULT '' COMMENT '菜单名称',
`url` varchar(127) DEFAULT '' COMMENT '链接地址',
`icon` varchar(64) DEFAULT '' COMMENT '图标',
`menu_type` tinyint(4) DEFAULT NULL COMMENT '菜单类型',
`sort` tinyint(4) unsigned DEFAULT '0' COMMENT '排序(同级有效)',
`status` tinyint(4) DEFAULT '1' COMMENT '状态',
`rule_id` int(11) DEFAULT NULL COMMENT '权限id',
`module` varchar(50) DEFAULT NULL,
`menu` varchar(50) DEFAULT NULL COMMENT '三级菜单吗',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=63 DEFAULT CHARSET=utf8 COMMENT='【配置】后台菜单表';
-- ----------------------------
-- Records of sys_admin_menu_copy
-- ----------------------------
INSERT INTO `sys_admin_menu_copy` VALUES ('52', '0', '管理', '', '', '1', '0', '1', '59', 'Administrative', '');
INSERT INTO `sys_admin_menu_copy` VALUES ('53', '52', '系统配置', '', '', '1', '0', '1', '61', 'Administrative', '');
INSERT INTO `sys_admin_menu_copy` VALUES ('54', '53', '菜单管理', '/home/menu/list', '', '1', '0', '1', '21', 'Administrative', 'menu');
INSERT INTO `sys_admin_menu_copy` VALUES ('55', '53', '系统参数', '/home/config/add', '', '1', '0', '1', '29', 'Administrative', 'systemConfig');
INSERT INTO `sys_admin_menu_copy` VALUES ('56', '53', '权限规则', '/home/rule/list', '', '1', '0', '1', '13', 'Administrative', 'rule');
INSERT INTO `sys_admin_menu_copy` VALUES ('57', '52', '组织架构', '', '', '1', '0', '1', '63', 'Administrative', '');
INSERT INTO `sys_admin_menu_copy` VALUES ('58', '57', '岗位管理', '/home/position/list', '', '1', '0', '1', '31', 'Administrative', 'position');
INSERT INTO `sys_admin_menu_copy` VALUES ('59', '57', '部门管理', '/home/structures/list', '', '1', '0', '1', '39', 'Administrative', 'structures');
INSERT INTO `sys_admin_menu_copy` VALUES ('60', '57', '用户组管理', '/home/groups/list', '', '1', '0', '1', '47', 'Administrative', 'groups');
INSERT INTO `sys_admin_menu_copy` VALUES ('61', '52', '账户管理', '', '', '1', '0', '1', '62', 'Administrative', '');
INSERT INTO `sys_admin_menu_copy` VALUES ('62', '61', '账户列表', '/home/users/list', '', '1', '0', '1', '55', 'Administrative', 'users');
-- ----------------------------
-- Table structure for sys_admin_post
-- ----------------------------
DROP TABLE IF EXISTS `sys_admin_post`;
CREATE TABLE `sys_admin_post` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(200) DEFAULT NULL COMMENT '岗位名称',
`remark` varchar(200) DEFAULT NULL COMMENT '岗位备注',
`create_time` int(11) DEFAULT NULL COMMENT '数据创建时间',
`status` tinyint(5) DEFAULT '1' COMMENT '状态1启用,0禁用',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=33 DEFAULT CHARSET=utf8 COMMENT='岗位表';
-- ----------------------------
-- Records of sys_admin_post
-- ----------------------------
INSERT INTO `sys_admin_post` VALUES ('5', '后端开发工程师', '', '1484706862', '1');
INSERT INTO `sys_admin_post` VALUES ('6', '前端开发工程师', '', '1484706863', '1');
INSERT INTO `sys_admin_post` VALUES ('7', '设计师', '', '1484706863', '1');
INSERT INTO `sys_admin_post` VALUES ('11', '文案策划', '', '1484706863', '1');
INSERT INTO `sys_admin_post` VALUES ('12', '产品助理', '', '1484706863', '1');
INSERT INTO `sys_admin_post` VALUES ('15', '总经理', '', '1484706863', '1');
INSERT INTO `sys_admin_post` VALUES ('20', '项目经理', '', '1484706863', '1');
INSERT INTO `sys_admin_post` VALUES ('25', '职能', '', '1484706863', '1');
INSERT INTO `sys_admin_post` VALUES ('26', '项目助理', '', '1484706863', '1');
INSERT INTO `sys_admin_post` VALUES ('27', '测试工程师', '', '1484706863', '1');
INSERT INTO `sys_admin_post` VALUES ('28', '人事经理', '', '1484706863', '1');
INSERT INTO `sys_admin_post` VALUES ('29', 'CEO', '', '1484706863', '1');
INSERT INTO `sys_admin_post` VALUES ('30', '品牌策划', '', '1484706863', '1');
INSERT INTO `sys_admin_post` VALUES ('31', '前端研发工程师', '', '1484706863', '1');
INSERT INTO `sys_admin_post` VALUES ('32', '后端研发工程师', '', '1484706863', '1');
-- ----------------------------
-- Table structure for sys_admin_rule
-- ----------------------------
DROP TABLE IF EXISTS `sys_admin_rule`;
CREATE TABLE `sys_admin_rule` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`title` varchar(100) DEFAULT '' COMMENT '名称',
`name` varchar(100) DEFAULT '' COMMENT '定义',
`level` tinyint(5) DEFAULT NULL COMMENT '级别。1模块,2控制器,3操作',
`pid` int(11) DEFAULT '0' COMMENT '父id,默认0',
`status` tinyint(3) DEFAULT '1' COMMENT '状态,1启用,0禁用',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=67 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of sys_admin_rule
-- ----------------------------
INSERT INTO `sys_admin_rule` VALUES ('10', '系统基础功能', 'admin', '1', '0', '1');
INSERT INTO `sys_admin_rule` VALUES ('11', '权限规则', 'rules', '2', '10', '1');
INSERT INTO `sys_admin_rule` VALUES ('13', '规则列表', 'index', '3', '11', '1');
INSERT INTO `sys_admin_rule` VALUES ('14', '权限详情', 'read', '3', '11', '1');
INSERT INTO `sys_admin_rule` VALUES ('15', '编辑权限', 'update', '3', '11', '1');
INSERT INTO `sys_admin_rule` VALUES ('16', '删除权限', 'delete', '3', '11', '1');
INSERT INTO `sys_admin_rule` VALUES ('17', '添加权限', 'save', '3', '11', '1');
INSERT INTO `sys_admin_rule` VALUES ('18', '批量删除权限', 'deletes', '3', '11', '1');
INSERT INTO `sys_admin_rule` VALUES ('19', '批量启用/禁用权限', 'enables', '3', '11', '1');
INSERT INTO `sys_admin_rule` VALUES ('20', '菜单管理', 'menus', '2', '10', '1');
INSERT INTO `sys_admin_rule` VALUES ('21', '菜单列表', 'index', '3', '20', '1');
INSERT INTO `sys_admin_rule` VALUES ('22', '添加菜单', 'save', '3', '20', '1');
INSERT INTO `sys_admin_rule` VALUES ('23', '菜单详情', 'read', '3', '20', '1');
INSERT INTO `sys_admin_rule` VALUES ('24', '编辑菜单', 'update', '3', '20', '1');
INSERT INTO `sys_admin_rule` VALUES ('25', '删除菜单', 'delete', '3', '20', '1');
INSERT INTO `sys_admin_rule` VALUES ('26', '批量删除菜单', 'deletes', '3', '20', '1');
INSERT INTO `sys_admin_rule` VALUES ('27', '批量启用/禁用菜单', 'enables', '3', '20', '1');
INSERT INTO `sys_admin_rule` VALUES ('28', '系统管理', 'systemConfigs', '2', '10', '1');
INSERT INTO `sys_admin_rule` VALUES ('29', '修改系统配置', 'save', '3', '28', '1');
INSERT INTO `sys_admin_rule` VALUES ('30', '岗位管理', 'posts', '2', '10', '1');
INSERT INTO `sys_admin_rule` VALUES ('31', '岗位列表', 'index', '3', '30', '1');
INSERT INTO `sys_admin_rule` VALUES ('32', '岗位详情', 'read', '3', '30', '1');
INSERT INTO `sys_admin_rule` VALUES ('33', '编辑岗位', 'update', '3', '30', '1');
INSERT INTO `sys_admin_rule` VALUES ('34', '删除岗位', 'delete', '3', '30', '1');
INSERT INTO `sys_admin_rule` VALUES ('35', '添加岗位', 'save', '3', '30', '1');
INSERT INTO `sys_admin_rule` VALUES ('36', '批量删除岗位', 'deletes', '3', '30', '1');
INSERT INTO `sys_admin_rule` VALUES ('37', '批量启用/禁用岗位', 'enables', '3', '30', '1');
INSERT INTO `sys_admin_rule` VALUES ('38', '部门管理', 'structures', '2', '10', '1');
INSERT INTO `sys_admin_rule` VALUES ('39', '部门列表', 'index', '3', '38', '1');
INSERT INTO `sys_admin_rule` VALUES ('40', '部门详情', 'read', '3', '38', '1');
INSERT INTO `sys_admin_rule` VALUES ('41', '编辑部门', 'update', '3', '38', '1');
INSERT INTO `sys_admin_rule` VALUES ('42', '删除部门', 'delete', '3', '38', '1');
INSERT INTO `sys_admin_rule` VALUES ('43', '添加部门', 'save', '3', '38', '1');
INSERT INTO `sys_admin_rule` VALUES ('44', '批量删除部门', 'deletes', '3', '38', '1');
INSERT INTO `sys_admin_rule` VALUES ('45', '批量启用/禁用部门', 'enables', '3', '38', '1');
INSERT INTO `sys_admin_rule` VALUES ('46', '用户组管理', 'groups', '2', '10', '1');
INSERT INTO `sys_admin_rule` VALUES ('47', '用户组列表', 'index', '3', '46', '1');
INSERT INTO `sys_admin_rule` VALUES ('48', '用户组详情', 'read', '3', '46', '1');
INSERT INTO `sys_admin_rule` VALUES ('49', '编辑用户组', 'update', '3', '46', '1');
INSERT INTO `sys_admin_rule` VALUES ('50', '删除用户组', 'delete', '3', '46', '1');
INSERT INTO `sys_admin_rule` VALUES ('51', '添加用户组', 'save', '3', '46', '1');
INSERT INTO `sys_admin_rule` VALUES ('52', '批量删除用户组', 'deletes', '3', '46', '1');
INSERT INTO `sys_admin_rule` VALUES ('53', '批量启用/禁用用户组', 'enables', '3', '46', '1');
INSERT INTO `sys_admin_rule` VALUES ('54', '成员管理', 'users', '2', '10', '1');
INSERT INTO `sys_admin_rule` VALUES ('55', '成员列表', 'index', '3', '54', '1');
INSERT INTO `sys_admin_rule` VALUES ('56', '成员详情', 'read', '3', '54', '1');
INSERT INTO `sys_admin_rule` VALUES ('57', '删除成员', 'delete', '3', '54', '1');
INSERT INTO `sys_admin_rule` VALUES ('59', '管理菜单', 'Adminstrative', '2', '10', '1');
INSERT INTO `sys_admin_rule` VALUES ('61', '系统管理二级菜单', 'systemConfig', '1', '59', '1');
INSERT INTO `sys_admin_rule` VALUES ('62', '账户管理二级菜单', 'personnel', '3', '59', '1');
INSERT INTO `sys_admin_rule` VALUES ('63', '组织架构二级菜单', 'structures', '3', '59', '1');
INSERT INTO `sys_admin_rule` VALUES ('66', 'bb', 'bbb', '1', '59', '0');
-- ----------------------------
-- Table structure for sys_admin_structure
-- ----------------------------
DROP TABLE IF EXISTS `sys_admin_structure`;
CREATE TABLE `sys_admin_structure` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(200) DEFAULT '',
`pid` int(11) DEFAULT '0',
`status` tinyint(3) DEFAULT '1',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=58 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of sys_admin_structure
-- ----------------------------
INSERT INTO `sys_admin_structure` VALUES ('1', 'xxxxx科技有限公司', '0', '1');
INSERT INTO `sys_admin_structure` VALUES ('5', '设计部', '1', '1');
INSERT INTO `sys_admin_structure` VALUES ('6', '职能部', '1', '1');
INSERT INTO `sys_admin_structure` VALUES ('37', '总经办', '1', '1');
INSERT INTO `sys_admin_structure` VALUES ('52', '项目部', '1', '1');
INSERT INTO `sys_admin_structure` VALUES ('53', '测试部', '1', '1');
INSERT INTO `sys_admin_structure` VALUES ('54', '开发部', '1', '1');
INSERT INTO `sys_admin_structure` VALUES ('55', '市场部', '1', '1');
INSERT INTO `sys_admin_structure` VALUES ('56', '研发部', '1', '1');
INSERT INTO `sys_admin_structure` VALUES ('57', '企业微信', '0', '1');
-- ----------------------------
-- Table structure for sys_admin_user
-- ----------------------------
DROP TABLE IF EXISTS `sys_admin_user`;
CREATE TABLE `sys_admin_user` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`username` varchar(100) DEFAULT NULL COMMENT '管理后台账号',
`password` varchar(100) DEFAULT NULL COMMENT '<PASSWORD>',
`remark` varchar(100) DEFAULT NULL COMMENT '用户备注',
`create_time` int(11) DEFAULT NULL,
`realname` varchar(100) DEFAULT NULL COMMENT '真实姓名',
`structure_id` int(11) DEFAULT NULL COMMENT '部门',
`post_id` int(11) DEFAULT NULL COMMENT '岗位',
`status` tinyint(3) DEFAULT NULL COMMENT '状态,1启用0禁用',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of sys_admin_user
-- ----------------------------
INSERT INTO `sys_admin_user` VALUES ('1', 'admin', 'e<PASSWORD>', '', '1487217060', '超级管理员', '1', '5', '1');
INSERT INTO `sys_admin_user` VALUES ('3', '用户AAAA', 'c78b6663d47cfbdb4d65ea51c104044e', '', '1487217060', '用户A', '5', '20', '1');
-- ----------------------------
-- Table structure for sys_system_config
-- ----------------------------
DROP TABLE IF EXISTS `sys_system_config`;
CREATE TABLE `sys_system_config` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT COMMENT '配置ID',
`name` varchar(50) DEFAULT '',
`value` varchar(100) DEFAULT '' COMMENT '配置值',
`group` tinyint(4) unsigned DEFAULT '0' COMMENT '配置分组',
`need_auth` tinyint(4) DEFAULT '1' COMMENT '1需要登录后才能获取,0不需要登录即可获取',
PRIMARY KEY (`id`),
UNIQUE KEY `参数名` (`name`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=5 DEFAULT CHARSET=utf8 COMMENT='【配置】系统配置表';
-- ----------------------------
-- Records of sys_system_config
-- ----------------------------
INSERT INTO `sys_system_config` VALUES ('1', 'SYSTEM_NAME', 'cloud-vue通用后台登录', '0', '1');
INSERT INTO `sys_system_config` VALUES ('2', 'SYSTEM_LOGO', 'uploads\\20170219\\d9fe7b784e1b1f406234b7b301e627e8.png', '0', '1');
INSERT INTO `sys_system_config` VALUES ('3', 'LOGIN_SESSION_VALID', '1644', '0', '1');
INSERT INTO `sys_system_config` VALUES ('4', 'IDENTIFYING_CODE', '0', '0', '1');
|
<reponame>ChunboLI/NETCTOSS
package com.tarena.crm.dao.impl;
import java.sql.Connection;
import java.sql.Date;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.util.ArrayList;
import java.util.List;
import com.tarena.crm.entity.Custom;
import com.tarena.crm.entity.CustomInfo;
import com.tarena.crm.entity.Customsource;
import com.tarena.crm.entity.Customstatus;
import com.tarena.crm.entity.Customtype;
import com.tarena.crm.entity.Emp;
import com.tarena.db.DBUtils;
public class CustomInfoDaoImpl {
public List<CustomInfo> findAll() throws Exception {
List<CustomInfo> cilist = new ArrayList<CustomInfo>();
Connection conn = null;
PreparedStatement prep = null;
ResultSet rs = null;
try {
conn = DBUtils.getConnection();
prep = conn.prepareStatement("select * from Custom");
rs = prep.executeQuery();
while(rs.next()){
long id = rs.getLong("id");
String name = rs.getString("name");
String job = rs.getString("job");
String gender = rs.getInt("gender")==0?"男":"女";
String qq = rs.getString("qq");
Customstatus cst = new CustomStatusDaoImpl().findById(rs.getInt("status"));
String status = cst.getStatus();
Customsource cso = new CustomSourceDaoImpl().findById(rs.getInt("source"));
String source = cso.getSource();
Customtype ct = new CustomTypeDaoImpl().findById(rs.getInt("type"));
String type = ct.getType();
String email = rs.getString("email");
String mobliePhone = rs.getString("mobliePhone");
String company = rs.getString("company");
String remarks = rs.getString("remarks");
Emp emp = new EmpDaoImpl().findById(rs.getLong("belongTo"));
String empName = emp.getName();
CustomInfo c = new CustomInfo();
c.setId(id);
c.setCustomName(name);
c.setJob(job);
c.setGender(gender);
c.setQq(qq);
c.setStatus(status);
c.setSource(source);
c.setType(type);
c.setEmail(email);
c.setMobliePhone(mobliePhone);
c.setCompany(company);
c.setRemarks(remarks);
c.setEmpName(empName);
cilist.add(c);
}
} catch (Exception e) {
e.printStackTrace();
throw e;
} finally{
DBUtils.closeConnection(conn);
}
return cilist;
}
}
|
<gh_stars>10-100
#ifndef SENSOR_H
#define SENSOR_H
/* (GCLK_SOURCE / GCLK_DIVIDE) / (PRESCALER + REGISTER COUNTS) = OVERFLOW FREQUENCE OF TCX
* (48Mhz / GCLK_DIVID) / PRESCALER + 2^16) = 2^16µS means
* Currenlty setted up to 1MHZ which equals count stamps of 1µS */
void initCounter()
{
// divides the source frequence of the GLCK with the provided divide value
REG_GCLK_GENDIV = GCLK_GENDIV_DIV(3) |
GCLK_GENDIV_ID(4);
while (GCLK->STATUS.bit.SYNCBUSY);
// 48MHz source for the GCLK
REG_GCLK_GENCTRL = GCLK_GENCTRL_IDC |
GCLK_GENCTRL_GENEN |
GCLK_GENCTRL_SRC_DFLL48M |
GCLK_GENCTRL_ID(4);
while (GCLK->STATUS.bit.SYNCBUSY);
// passes GEN_GCLK4 to the counters TC4 and TC5
REG_GCLK_CLKCTRL = GCLK_CLKCTRL_CLKEN |
GCLK_CLKCTRL_GEN_GCLK4 |
GCLK_CLKCTRL_ID_TC4_TC5;
while (GCLK->STATUS.bit.SYNCBUSY);
REG_TC4_CTRLA |= TC_CTRLA_MODE_COUNT16;
while (TC4->COUNT16.STATUS.bit.SYNCBUSY);
REG_TC4_CTRLA |= TC_CTRLA_PRESCALER_DIV16|
TC_CTRLA_ENABLE;
while (TC4->COUNT16.STATUS.bit.SYNCBUSY);
}
volatile static uint16_t startT;
volatile static uint16_t stopT;
volatile static uint16_t duration;
volatile static uint16_t duration_prev;
typedef struct Sweep{
uint16_t magicNumber;
uint8_t sensorID;
uint8_t lighthouse;
uint8_t rotor;
uint16_t sweepDuration;
}Sweep;
typedef struct _FIFO128sweep{
uint8_t mRead;
uint8_t mWrite;
Sweep * mBuffer[128];
}FIFO128sweep;
typedef struct _FIFO128t{
uint8_t mRead;
uint8_t mWrite;
uint16_t mBuffer[128];
}FIFO128t;
volatile static bool vertical;
volatile static bool sweep_active;
volatile static bool synced;
volatile static bool BaseStation;
volatile static bool lighthouse;
volatile static uint16_t sweep_start;
volatile static uint16_t sweep_duration;
volatile static uint16_t active_sweep_a;
volatile static uint16_t active_sweep_b;
volatile static FIFO128sweep sweepFIFO;
//FIFO Operations are implemented as Preprocessor Functions since the FIFO is used in several IRQs an things need to get a bit faster
#define FIFO_init(fifo) { fifo.mRead = 0; fifo.mWrite = 0;}
#define FIFO_available(fifo) ( fifo.mRead != fifo.mWrite )
#define FIFO_read(fifo, size) ( \
(FIFO_available(fifo)) ? \
fifo.mBuffer[(fifo.mRead = ((fifo.mRead + 1) & (size -1)))] : 0 \
)
#define FIFO_write(fifo, data, size) { \
uint8_t temp = ((fifo.mWrite +1 )& (size -1)); \
if(temp != fifo.mRead) { \
fifo.mBuffer[temp] = data; \
fifo.mWrite = temp; \
} \
}
#define FIFO128_read(fifo) FIFO_read(fifo, 128)
#define FIFO128_write(fifo, data) FIFO_write(fifo, data, 128)
void rising_IRQ_S1(void)
{
startT = (uint16_t) (TC4->COUNT16.COUNT.reg);
}
void falling_IRQ_S1(void)
{
stopT = (uint16_t) (TC4->COUNT16.COUNT.reg);
duration = (stopT - startT);
// sync pulse detected! Get duration and start sweep-counting measurment
if( duration > 50 )
{
// check if the sync pulse signals skip or not
if((60 < duration && 70 > duration)
|| ( 81 < duration && 90 > duration))
{
vertical = true;
synced = true;
sweep_active = true;
sweep_start = startT;
}else if(synced == false){
}else if(( 71 < duration && 80 > duration)
|| ( 91 < duration && 100 > duration))
{
sweep_start = startT;
vertical = false;
sweep_active = true;
}
}
// laser sweep detected! Complete sweep-counting measurement
else if(true == sweep_active && duration < 50)
{
sweep_active = false;
sweep_duration = startT - sweep_start ;
if(BaseStation == false){
active_sweep_a = startT;
BaseStation = true;
}else{
active_sweep_b = startT;
uint16_t baseStationGap= ((active_sweep_b - active_sweep_a));
if(baseStationGap > 6000)
{
lighthouse = 1;
}else{
lighthouse = 0;
}
BaseStation = false;
}
Sweep * sweep = static_cast<Sweep*>(malloc( sizeof(Sweep)));
if(sweep != NULL){
uint8_t sensorID = 0;
sweep->lighthouse = lighthouse;
sweep->rotor = vertical;
sweep->sensorID = 0;
sweep->sweepDuration = sweep_duration;
sweep->magicNumber = 0xBEEF;
}
FIFO128_write(sweepFIFO, sweep);
}
}
#endif
|
def remove_vowels(string):
vowels = ('a', 'e', 'i', 'o', 'u')
for x in string.lower():
if x in vowels:
string = string.replace(x, "")
return string |
def search_string(string, search):
if search in string:
return True
else:
return False |
import java.util.concurrent.locks.ReentrantLock;
public class Logger {
private static String hostHost;
private static ReentrantLock consoleLogLock = new ReentrantLock();
public void logMessage(String message) {
consoleLogLock.lock();
try {
System.out.println(message);
} finally {
consoleLogLock.unlock();
}
}
public static void main(String[] args) {
Logger logger = new Logger();
// Simulate concurrent logging by multiple threads
Thread thread1 = new Thread(() -> {
for (int i = 0; i < 5; i++) {
logger.logMessage("Thread 1 - Log message " + i);
}
});
Thread thread2 = new Thread(() -> {
for (int i = 0; i < 5; i++) {
logger.logMessage("Thread 2 - Log message " + i);
}
});
thread1.start();
thread2.start();
try {
thread1.join();
thread2.join();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
} |
import BaseError from './baseError';
class MinPriceError extends BaseError {
constructor(message) {
super(message || 'The price cant be lower than 0.0001', 530);
}
}
export default MinPriceError;
|
# This script is from https://github.com/WaldJohannaU/3RScan with MIT License (Copyright (c) 2020 Johanna Wald)
if [[ ! -d "data" ]]; then
mkdir data
fi
# download example data
if [[ ! -d "data/3RScan" ]]; then
if [[ ! -f "data/3RScan.v2.zip" ]]; then
wget "http://campar.in.tum.de/files/3RScan/3RScan.v2.zip" -P data
fi
unzip "data/3RScan.v2.zip" -d ./data/3RScan
fi
if [[ ! -f "data/3RScan/3RScan.json" ]]; then
wget "http://campar.in.tum.de/files/3RScan/3RScan.json" -P data/3RScan
fi
if [[ ! -f "data/3RScan/objects.json" ]]; then
wget "http://campar.in.tum.de/files/3DSSG/3DSSG/objects.json" -P data/3RScan
fi |
<gh_stars>1-10
// ==UserScript==
// @namespace https://tampermonkey.myso.kr/
// @exclude *
// ==UserLibrary==
// @name GM_xmlhttpRequestHook
// @description GM_xmlhttpRequestHook 스크립트
// @copyright 2021, myso (https://tampermonkey.myso.kr)
// @license Apache-2.0
// @version 1.0.8
// ==/UserScript==
// ==/UserLibrary==
// ==OpenUserJS==
// @author myso
// ==/OpenUserJS==
(function (window) {
window.GM_xmlhttpRequestHook = (callback) => window.GM_xmlhttpRequestHook.tracking = callback || window.GM_xmlhttpRequestHook.tracking;
window.GM_xmlhttpRequestHook.tracking = (data, origin) => origin;
window.fetch = ((fetch) => {
return function(url) {
window.GM_xmlhttpRequestHook.tracking({ type: 'xhr', target: this.url = url });
return fetch.apply(this, arguments);
};
})(window.fetch$ = window.fetch$ || window.fetch);
window.Response = ((Response) => {
const Res = Response.prototype, text = Res.text, json = Res.json;
Res.text = async function() {
const res = await text.apply(this, arguments), txt = res;
const detail_keys = ['url', 'status', 'statusText'];
const detail = detail_keys.reduce((r, o)=>(r[o] = this[o], r), { response: txt, responseText: txt, responseJson: undefined });
try { detail.responseJson = JSON.parse(res); } catch(e) {}
return window.GM_xmlhttpRequestHook.tracking({ type: 'xhrload', target: this.url, detail }, res);
};
Res.json = async function() {
const res = await json.apply(this, arguments), txt = JSON.stringify(res);
const detail_keys = ['url', 'status', 'statusText'];
const detail = detail_keys.reduce((r, o)=>(r[o] = this[o], r), { response: txt, responseText: txt, responseJson: res, });
return window.GM_xmlhttpRequestHook.tracking({ type: 'xhrload', target: this.url, detail }, res);
};
})(window.Response$ = window.Response$ || window.Response)
window.XMLHttpRequest = ((XMLHttpRequest)=> {
const XHR = XMLHttpRequest.prototype, send = XHR.send, open = XHR.open;
XHR.open = function(method, url) { this.url = url; return open.apply(this, arguments); }
XHR.send = function() {
this.addEventListener('load', function () {
const detail_keys = ['url', 'response', 'status', 'statusText'];
const detail = detail_keys.reduce((r, o)=>(r[o] = this[o], r), { responseText: undefined, responseJson: undefined });
try { detail.responseText = detail.response; } catch(e) {};
try { detail.responseJson = JSON.parse(detail.response); } catch(e) {}
window.GM_xmlhttpRequestHook.tracking({ type: 'xhrload', target: this.url, detail });
});
window.GM_xmlhttpRequestHook.tracking({ type: 'xhr', target: this.url });
send.apply(this, arguments);
};
return XMLHttpRequest;
})(window.XMLHttpRequest$ = window.XMLHttpRequest$ || window.XMLHttpRequest);
})(window); |
<html>
<head>
<title>Greeting Form</title>
</head>
<body>
<form method="post" action="greet.php">
<label for="name">Name:</label>
<input type="text" name="name" id="name" />
<label for="age">Age:</label>
<input type="number" name="age" id="age" />
<input type="submit" value="greet" />
</form>
</body>
</html> |
<gh_stars>0
package mezz.jei.plugins.vanilla.crafting;
import javax.annotation.Nullable;
import net.minecraftforge.common.crafting.IShapedRecipe;
import net.minecraftforge.common.util.Size2i;
import net.minecraft.item.crafting.ICraftingRecipe;
import net.minecraft.util.ResourceLocation;
import mezz.jei.api.constants.VanillaTypes;
import mezz.jei.api.ingredients.IIngredients;
import mezz.jei.api.recipe.category.extensions.vanilla.crafting.ICraftingCategoryExtension;
public class CraftingCategoryExtension<T extends ICraftingRecipe> implements ICraftingCategoryExtension {
protected final T recipe;
public CraftingCategoryExtension(T recipe) {
this.recipe = recipe;
}
@Override
public void setIngredients(IIngredients ingredients) {
ingredients.setInputIngredients(recipe.getIngredients());
ingredients.setOutput(VanillaTypes.ITEM, recipe.getResultItem());
}
@Nullable
@Override
public ResourceLocation getRegistryName() {
return recipe.getId();
}
@Nullable
@Override
public Size2i getSize() {
if (recipe instanceof IShapedRecipe) {
IShapedRecipe<?> shapedRecipe = (IShapedRecipe<?>) this.recipe;
return new Size2i(shapedRecipe.getRecipeWidth(), shapedRecipe.getRecipeHeight());
}
return null;
}
}
|
#!/bin/bash
# Author: yeho <lj2007331 AT gmail.com>
# BLOG: https://blog.linuxeye.com
#
# Notes: OneinStack for CentOS/RadHat 5+ Debian 6+ and Ubuntu 12+
#
# Project home page:
# https://oneinstack.com
# https://github.com/lj2007331/oneinstack
Upgrade_Nginx() {
pushd ${oneinstack_dir}/src > /dev/null
[ ! -e "$nginx_install_dir/sbin/nginx" ] && echo "${CWARNING}Nginx is not installed on your system! ${CEND}" && exit 1
OLD_Nginx_version_tmp=`$nginx_install_dir/sbin/nginx -v 2>&1`
OLD_Nginx_version=${OLD_Nginx_version_tmp##*/}
echo
echo "Current Nginx Version: ${CMSG}$OLD_Nginx_version${CEND}"
while :; do echo
read -p "Please input upgrade Nginx Version(example: 1.9.15): " NEW_Nginx_version
if [ "$NEW_Nginx_version" != "$OLD_Nginx_version" ]; then
[ ! -e "nginx-$NEW_Nginx_version.tar.gz" ] && wget --no-check-certificate -c http://nginx.org/download/nginx-$NEW_Nginx_version.tar.gz > /dev/null 2>&1
if [ -e "nginx-$NEW_Nginx_version.tar.gz" ]; then
src_url=https://www.openssl.org/source/openssl-$openssl_version.tar.gz && Download_src
src_url=http://mirrors.linuxeye.com/oneinstack/src/pcre-$pcre_version.tar.gz && Download_src
tar xzf openssl-$openssl_version.tar.gz
tar xzf pcre-$pcre_version.tar.gz
echo "Download [${CMSG}nginx-$NEW_Nginx_version.tar.gz${CEND}] successfully! "
break
else
echo "${CWARNING}Nginx version does not exist! ${CEND}"
fi
else
echo "${CWARNING}input error! Upgrade Nginx version is the same as the old version${CEND}"
fi
done
if [ -e "nginx-$NEW_Nginx_version.tar.gz" ]; then
echo "[${CMSG}nginx-$NEW_Nginx_version.tar.gz${CEND}] found"
echo "Press Ctrl+c to cancel or Press any key to continue..."
char=`get_char`
tar xzf nginx-$NEW_Nginx_version.tar.gz
pushd nginx-$NEW_Nginx_version
make clean
sed -i 's@CFLAGS="$CFLAGS -g"@#CFLAGS="$CFLAGS -g"@' auto/cc/gcc # close debug
$nginx_install_dir/sbin/nginx -V &> $$
nginx_configure_arguments=`cat $$ | grep 'configure arguments:' | awk -F: '{print $2}'`
rm -rf $$
./configure $nginx_configure_arguments
make -j ${THREAD}
if [ -f "objs/nginx" ]; then
/bin/mv $nginx_install_dir/sbin/nginx{,`date +%m%d`}
/bin/cp objs/nginx $nginx_install_dir/sbin/nginx
kill -USR2 `cat /var/run/nginx.pid`
sleep 1
kill -QUIT `cat /var/run/nginx.pid.oldbin`
popd > /dev/null
echo "You have ${CMSG}successfully${CEND} upgrade from ${CWARNING}$OLD_Nginx_version${CEND} to ${CWARNING}$NEW_Nginx_version${CEND}"
rm -rf nginx-$NEW_Nginx_version
else
echo "${CFAILURE}Upgrade Nginx failed! ${CEND}"
fi
fi
popd > /dev/null
}
Upgrade_Tengine() {
pushd ${oneinstack_dir}/src
[ ! -e "$tengine_install_dir/sbin/nginx" ] && echo "${CWARNING}Tengine is not installed on your system! ${CEND}" && exit 1
OLD_Tengine_version_tmp=`$tengine_install_dir/sbin/nginx -v 2>&1`
OLD_Tengine_version="`echo ${OLD_Tengine_version_tmp#*/} | awk '{print $1}'`"
echo
echo "Current Tengine Version: ${CMSG}$OLD_Tengine_version${CEND}"
while :; do echo
read -p "Please input upgrade Tengine Version(example: 2.1.15): " NEW_Tengine_version
if [ "$NEW_Tengine_version" != "$OLD_Tengine_version" ]; then
[ ! -e "tengine-$NEW_Tengine_version.tar.gz" ] && wget --no-check-certificate -c http://tengine.taobao.org/download/tengine-$NEW_Tengine_version.tar.gz > /dev/null 2>&1
if [ -e "tengine-$NEW_Tengine_version.tar.gz" ]; then
src_url=https://www.openssl.org/source/openssl-$openssl_version.tar.gz && Download_src
src_url=http://mirrors.linuxeye.com/oneinstack/src/pcre-$pcre_version.tar.gz && Download_src
tar xzf openssl-$openssl_version.tar.gz
tar xzf pcre-$pcre_version.tar.gz
echo "Download [${CMSG}tengine-$NEW_Tengine_version.tar.gz${CEND}] successfully! "
break
else
echo "${CWARNING}Tengine version does not exist! ${CEND}"
fi
else
echo "${CWARNING}input error! Upgrade Tengine version is the same as the old version${CEND}"
fi
done
if [ -e "tengine-$NEW_Tengine_version.tar.gz" ]; then
echo "[${CMSG}tengine-$NEW_Tengine_version.tar.gz${CEND}] found"
echo "Press Ctrl+c to cancel or Press any key to continue..."
char=`get_char`
tar xzf tengine-$NEW_Tengine_version.tar.gz
pushd tengine-$NEW_Tengine_version
make clean
sed -i 's@CFLAGS="$CFLAGS -g"@#CFLAGS="$CFLAGS -g"@' auto/cc/gcc # close debug
$tengine_install_dir/sbin/nginx -V &> $$
tengine_configure_arguments=`cat $$ | grep 'configure arguments:' | awk -F: '{print $2}'`
rm -rf $$
./configure $tengine_configure_arguments
make -j ${THREAD}
if [ -f "objs/nginx" ]; then
/bin/mv $tengine_install_dir/sbin/nginx{,`date +%m%d`}
/bin/mv $tengine_install_dir/sbin/dso_tool{,`date +%m%d`}
/bin/mv $tengine_install_dir/modules{,`date +%m%d`}
/bin/cp objs/nginx $tengine_install_dir/sbin/nginx
/bin/cp objs/dso_tool $tengine_install_dir/sbin/dso_tool
chmod +x $tengine_install_dir/sbin/*
make install
kill -USR2 `cat /var/run/nginx.pid`
sleep 1
kill -QUIT `cat /var/run/nginx.pid.oldbin`
popd > /dev/null
echo "You have ${CMSG}successfully${CEND} upgrade from ${CWARNING}$OLD_Tengine_version${CEND} to ${CWARNING}$NEW_Tengine_version${CEND}"
rm -rf tengine-$NEW_Tengine_version
else
echo "${CFAILURE}Upgrade Tengine failed! ${CEND}"
fi
fi
popd > /dev/null
}
Upgrade_OpenResty() {
pushd ${oneinstack_dir}/src
[ ! -e "$openresty_install_dir/nginx/sbin/nginx" ] && echo "${CWARNING}OpenResty is not installed on your system! ${CEND}" && exit 1
OLD_OpenResty_version_tmp=`$openresty_install_dir/nginx/sbin/nginx -v 2>&1`
OLD_OpenResty_version="`echo ${OLD_OpenResty_version_tmp#*/} | awk '{print $1}'`"
echo
echo "Current OpenResty Version: ${CMSG}$OLD_OpenResty_version${CEND}"
while :; do echo
read -p "Please input upgrade OpenResty Version(example: 1.9.7.19): " NEW_OpenResty_version
if [ "$NEW_OpenResty_version" != "$OLD_OpenResty_version" ]; then
[ ! -e "openresty-$NEW_OpenResty_version.tar.gz" ] && wget --no-check-certificate -c https://openresty.org/download/openresty-$NEW_OpenResty_version.tar.gz > /dev/null 2>&1
if [ -e "openresty-$NEW_OpenResty_version.tar.gz" ]; then
src_url=https://www.openssl.org/source/openssl-$openssl_version.tar.gz && Download_src
src_url=http://mirrors.linuxeye.com/oneinstack/src/pcre-$pcre_version.tar.gz && Download_src
tar xzf openssl-$openssl_version.tar.gz
tar xzf pcre-$pcre_version.tar.gz
echo "Download [${CMSG}openresty-$NEW_OpenResty_version.tar.gz${CEND}] successfully! "
break
else
echo "${CWARNING}OpenResty version does not exist! ${CEND}"
fi
else
echo "${CWARNING}input error! Upgrade OpenResty version is the same as the old version${CEND}"
fi
done
if [ -e "openresty-$NEW_OpenResty_version.tar.gz" ]; then
echo "[${CMSG}openresty-$NEW_OpenResty_version.tar.gz${CEND}] found"
echo "Press Ctrl+c to cancel or Press any key to continue..."
char=`get_char`
tar xzf openresty-$NEW_OpenResty_version.tar.gz
pushd openresty-$NEW_OpenResty_version
make clean
openresty_version_tmp=${NEW_OpenResty_version%.*}
sed -i 's@CFLAGS="$CFLAGS -g"@#CFLAGS="$CFLAGS -g"@' bundle/nginx-$openresty_version_tmp/auto/cc/gcc # close debug
$openresty_install_dir/nginx/sbin/nginx -V &> $$
./configure --prefix=$openresty_install_dir --user=$run_user --group=$run_user --with-http_stub_status_module --with-http_v2_module --with-http_ssl_module --with-http_gzip_static_module --with-http_realip_module --with-http_flv_module --with-http_mp4_module --with-openssl=../openssl-$openssl_version --with-pcre=../pcre-$pcre_version --with-pcre-jit --with-ld-opt='-ljemalloc'
make -j ${THREAD}
if [ -f "build/nginx-$openresty_version_tmp/objs/nginx" ]; then
/bin/mv $openresty_install_dir/nginx/sbin/nginx{,`date +%m%d`}
make install
kill -USR2 `cat /var/run/nginx.pid`
sleep 1
kill -QUIT `cat /var/run/nginx.pid.oldbin`
popd > /dev/null
echo "You have ${CMSG}successfully${CEND} upgrade from ${CWARNING}$OLD_OpenResty_version${CEND} to ${CWARNING}$NEW_OpenResty_version${CEND}"
rm -rf openresty-$NEW_OpenResty_version
else
echo "${CFAILURE}Upgrade OpenResty failed! ${CEND}"
fi
fi
popd > /dev/null
}
|
<gh_stars>1-10
package model
type FilterFunc func(resultRef interface{}) bool
type FilterIterator struct {
Iterator
PassFilter FilterFunc
}
func (self *FilterIterator) Next(resultRef interface{}) bool {
for self.Iterator.Next(resultRef) {
if self.PassFilter(resultRef) {
return true
}
}
return false
}
|
# coding=utf-8
__author__ = "AstroPrint Product Team <product@astroprint.com>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
import os
import json
import uuid
from flask import request, jsonify, abort
from flask.ext.login import current_user
from requests import ConnectionError
from octoprint.server import restricted_access, SUCCESS
from octoprint.server.api import api
from octoprint.events import eventManager, Events
from astroprint.cloud import astroprintCloud, AstroPrintCloudNoConnectionException
from astroprint.printfiles import FileDestinations
from astroprint.printfiles.downloadmanager import downloadManager
from astroprint.printer.manager import printerManager
#~~ Cloud Slicer control
@api.route('/astroprint', methods=['DELETE'])
@restricted_access
def cloud_slicer_logout():
astroprintCloud().signout()
return jsonify(SUCCESS)
@api.route('/astroprint/private-key', methods=['POST'])
def set_private_key():
email = request.values.get('email')
password = request.values.get('password')
if email and password:
try:
if astroprintCloud().signin(email, password):
return jsonify(SUCCESS)
except (AstroPrintCloudNoConnectionException, ConnectionError):
abort(503, "AstroPrint.com can't be reached")
else:
abort(400)
abort(401)
@api.route('/astroprint/login-key', methods=['GET'])
@restricted_access
def get_login_key():
try:
key = astroprintCloud().get_login_key()
if key:
return jsonify(key)
except (AstroPrintCloudNoConnectionException, ConnectionError):
abort(503, "AstroPrint.com can't be reached")
abort(401)
@api.route('/astroprint/upload-data', methods=['GET'])
@restricted_access
def upload_data():
filePath = request.args.get('file')
if filePath:
uploadInfo = astroprintCloud().get_upload_info(filePath)
if uploadInfo:
if 'error' in uploadInfo:
if uploadInfo['error'] == 'no_user':
abort(401)
else:
abort(500)
else:
return json.dumps(uploadInfo)
else:
abort(500)
abort(400)
@api.route("/astroprint/print-files", methods=["GET"])
@restricted_access
def designs():
forceSyncCloud = request.args.get('forceSyncCloud')
cloud_files = json.loads(astroprintCloud().print_files(forceSyncCloud))
local_files = list(printerManager().fileManager.getAllFileData())
if cloud_files:
for p in cloud_files:
p['local_filename'] = None
p['last_print'] = None
p['uploaded_on'] = None
for i in range(len(local_files)):
if "cloud_id" in local_files[i] and p['id'] == local_files[i]['cloud_id']:
local_file = local_files[i]
p['local_filename'] = local_file['name']
p['local_only'] = False
p['uploaded_on'] = local_file['date']
if 'prints' in local_file \
and 'last' in local_file['prints'] \
and local_file['prints']['last'] \
and 'date' in local_file['prints']['last']:
p['last_print'] = local_file['prints']['last']['date']
del local_files[i]
break
cloud_files = sorted(cloud_files, key=lambda e: e['local_filename'] is None)
else:
cloud_files = []
if local_files:
for p in local_files:
p['id'] = uuid.uuid4().hex
p['local_filename'] = p['name']
p['local_only'] = True
p['last_print'] = None
p['uploaded_on'] = p['date']
if 'gcodeAnalysis' in p:
p['info'] = p['gcodeAnalysis']
del p['gcodeAnalysis']
else:
p['info'] = None
if 'prints' in p \
and 'last' in p['prints'] \
and p['prints']['last'] \
and 'date' in p['prints']['last']:
p['last_print'] = p['prints']['last']['date']
del p['prints']
else:
local_files = []
files = sorted(local_files + cloud_files, key=lambda e: e['last_print'], reverse=True)
return json.dumps(files)
@api.route("/astroprint/print-files/<string:print_file_id>/download", methods=["GET"])
@restricted_access
def design_download(print_file_id):
# ask chintan
# if request.headers.get("X-Api-Key") != settings().get(["api", "key"]):
if current_user is None or not current_user.is_authenticated or not current_user.publicKey:
abort(401)
em = eventManager()
def progressCb(progress):
em.fire(
Events.CLOUD_DOWNLOAD, {
"type": "progress",
"id": print_file_id,
"progress": progress
}
)
def successCb(destFile, fileInfo):
if fileInfo is True:
#This means the files was already on the device
em.fire(
Events.CLOUD_DOWNLOAD, {
"type": "success",
"id": print_file_id
}
)
else:
if printerManager().fileManager.saveCloudPrintFile(destFile, fileInfo, FileDestinations.LOCAL):
em.fire(
Events.CLOUD_DOWNLOAD, {
"type": "success",
"id": print_file_id,
"filename": printerManager().fileManager._getBasicFilename(destFile),
"info": fileInfo["info"]
}
)
else:
errorCb(destFile, "Couldn't save the file")
def errorCb(destFile, error):
if error == 'cancelled':
em.fire(
Events.CLOUD_DOWNLOAD,
{
"type": "cancelled",
"id": print_file_id
}
)
else:
em.fire(
Events.CLOUD_DOWNLOAD,
{
"type": "error",
"id": print_file_id,
"reason": error
}
)
if destFile and os.path.exists(destFile):
os.remove(destFile)
if astroprintCloud().download_print_file(print_file_id, progressCb, successCb, errorCb):
return jsonify(SUCCESS)
return abort(400)
@api.route("/astroprint/print-files/<string:print_file_id>/download", methods=["DELETE"])
@restricted_access
def cancel_design_download(print_file_id):
if downloadManager().cancelDownload(print_file_id):
return jsonify(SUCCESS)
else:
return abort(404)
@api.route("/astroprint/print-jobs/<string:print_job_id>/add-reason", methods=["PUT"])
@restricted_access
def update_cancel_reason(print_job_id):
if not "application/json" in request.headers["Content-Type"]:
return abort(400)
data = request.json
#get reason
reason = {}
if 'reason' in data:
reason['reason_id'] = data['reason']
if 'other_text' in data:
reason['other_text'] = data['other_text']
if reason:
if not astroprintCloud().updateCancelReason(print_job_id, reason):
return abort(500)
else:
return jsonify(SUCCESS)
else:
return abort(400) |
#!/bin/sh
# Begin /usr/sbin/remove-expired-certs.sh
#
# Version 20120211
# Make sure the date is parsed correctly on all systems
mydate()
{
local y=$( echo $1 | cut -d" " -f4 )
local M=$( echo $1 | cut -d" " -f1 )
local d=$( echo $1 | cut -d" " -f2 )
local m
if [ ${d} -lt 10 ]; then d="0${d}"; fi
case $M in
Jan) m="01";;
Feb) m="02";;
Mar) m="03";;
Apr) m="04";;
May) m="05";;
Jun) m="06";;
Jul) m="07";;
Aug) m="08";;
Sep) m="09";;
Oct) m="10";;
Nov) m="11";;
Dec) m="12";;
esac
certdate="${y}${m}${d}"
}
OPENSSL=/usr/bin/openssl
DIR=/etc/ssl/certs
if [ $# -gt 0 ]; then
DIR="$1"
fi
certs=$( find ${DIR} -type f -name "*.pem" -o -name "*.crt" )
today=$( date +%Y%m%d )
for cert in $certs; do
notafter=$( $OPENSSL x509 -enddate -in "${cert}" -noout )
date=$( echo ${notafter} | sed 's/^notAfter=//' )
mydate "$date"
if [ ${certdate} -lt ${today} ]; then
echo "${cert} expired on ${certdate}! Removing..."
rm -f "${cert}"
fi
done
|
<gh_stars>0
/**
* Created by 都大爽 on 2017/8/7.
*/
import Vue from 'vue'
import VueRouter from 'vue-router'
Vue.use(VueRouter)
//路由懒加载
const Dashboard = resolve => require(['./pages/dashboard.vue'], resolve)
const Admin = resolve => require(['./pages/admin/admin.vue'], resolve)
const Department = resolve => require(['./pages/department/department.vue'], resolve)
const Texture = resolve => require(['./pages/texture/texture.vue'], resolve)
const Category = resolve => require(['./pages/category/category.vue'], resolve)
const Collection = resolve => require(['./pages/collection/collection.vue'], resolve)
const CollectionWork = resolve => require(['./pages/collection-work/collectionWork.vue'], resolve)
const CarryBorrow = resolve => require(['./pages/carry-borrow/carryBorrow.vue'], resolve)
const Damage = resolve => require(['./pages/damage/damage.vue'], resolve)
const Log = resolve => require(['./pages/log/log.vue'], resolve)
const goOut = resolve => require(['./pages/go-out/goOut.vue'], resolve)
const Knowledge = resolve => require(['./pages/knowledge/knowledge.vue'], resolve)
const Statistics = resolve => require(['./pages/statistics/statistics.vue'], resolve)
export default new VueRouter({
routes: [
{path: '/', component: Dashboard},
{path: '/admin', component: Admin},
{path: '/department', component: Department},
{path: '/texture', component: Texture},
{path: '/category', component: Category},
{path: '/collection', component: Collection},
{path: '/collection-work', component: CollectionWork},
{path: '/carry-borrow', component: CarryBorrow},
{path: '/damage', component: Damage},
{path: '/log', component: Log},
{path: '/go-out', component: goOut},
{path: '/knowledge', component: Knowledge},
{path: '/statistics', component: Statistics},
],
linkActiveClass: 'active'
}) |
#!/bin/bash
set -x
## Submit an update for go mod
git branch -D go-mod
git branch go-mod
git checkout go-mod
git pull origin master
GOPROXY=direct go get github.com/yadisnel/go-ms/v2@master
go fmt
go mod tidy
git add go.mod go.sum
git commit -m "Update go.mod"
git push origin go-mod
git checkout master
git branch -D go-mod
|
/*
* CPAchecker is a tool for configurable software verification.
* This file is part of CPAchecker.
*
* Copyright (C) 2007-2014 <NAME>
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
* CPAchecker web page:
* http://cpachecker.sosy-lab.org
*/
package org.sosy_lab.cpachecker.pcc.strategy.partialcertificate;
import java.util.ArrayDeque;
import java.util.Deque;
import java.util.HashSet;
import java.util.Set;
import org.sosy_lab.cpachecker.cpa.arg.ARGState;
public abstract class AbstractARGPass {
private final boolean visitMultipleTimes;
public AbstractARGPass(final boolean pMultipleVisits) {
visitMultipleTimes = pMultipleVisits;
}
public void passARG(ARGState root) {
Set<ARGState> seen = new HashSet<>();
Deque<ARGState> toVisit = new ArrayDeque<>();
ARGState currentNode;
boolean childKnown;
toVisit.add(root);
seen.add(root);
while (!toVisit.isEmpty()) {
currentNode = toVisit.pollLast();
visitARGNode(currentNode);
if (!stopPathDiscovery(currentNode)) {
for (ARGState child : currentNode.getChildren()) {
childKnown = seen.contains(child);
if (!childKnown) {
toVisit.addLast(child);
seen.add(child);
}
if (visitMultipleTimes && childKnown) {
visitARGNode(child);
}
}
}
}
}
public abstract void visitARGNode(ARGState node);
public abstract boolean stopPathDiscovery(ARGState node);
}
|
package com.ulisesbocchio.security.saml;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
/**
* @author <NAME>
*/
@SpringBootApplication
public class SamlServiceProviderApplication {
public static void main(String[] args) {
SpringApplication.run(SamlServiceProviderApplication.class, args);
}
}
|
#!/bin/sh
tar -xf lz4-1.9.3.tar.gz
cd lz4-1.9.3/
make
echo $? > ~/install-exit-status
cd ~
cat > compress-lz4 <<EOT
#!/bin/sh
./lz4-1.9.3/lz4 \$@ ubuntu-18.04.3-desktop-amd64.iso > \$LOG_FILE 2>&1
echo $? > ~/test-exit-status
EOT
chmod +x compress-lz4
|
<reponame>smagill/opensphere-desktop
package io.opensphere.arcgis2.mantle;
import io.opensphere.mantle.data.DataGroupInfo;
/**
* Interface to an object that knows how to add {@link DataGroupInfo} to the
* system.
*/
public interface MantleController
{
/**
* Adds the server group to the system.
*
* @param serverName The user designated name of the server.
* @param baseUrl The server's base url.
* @param dataGroup The data group to add.
*/
void addServerGroup(String serverName, String baseUrl, DataGroupInfo dataGroup);
}
|
<filename>src/app/delsos/model/Shopper.ts
export class Shopper{
firstname: string;
lastname: string;
email: string;
password: string;
age:string;
phone: string;
} |
<filename>polymer-globe/polymer/polymer-all/toolkit-ui/tools/loader/loader.js<gh_stars>1000+
/*
* Copyright 2013 The Toolkitchen Authors. All rights reserved.
* Use of this source code is governed by a BSD-style
* license that can be found in the LICENSE file.
*/
(function() {
var scope = window.Loader = {};
var flags = {};
// convert url arguments to flags
if (!flags.noOpts) {
location.search.slice(1).split('&').forEach(function(o) {
o = o.split('=');
o[0] && (flags[o[0]] = o[1] || true);
});
}
// process global logFlags
parseLogFlags(flags);
function load(scopeName) {
// imports
var scope = window[scopeName];
var entryPointName = scope.entryPointName;
var processFlags = scope.processFlags;
// acquire attributes and base path from entry point
var entryPoint = findScript(entryPointName);
var base = entryPoint.basePath;
// acquire common flags
var flags = Loader.flags;
// convert attributes to flags
var flags = Loader.flags;
for (var i=0, a; (a=entryPoint.attributes[i]); i++) {
if (a.name !== 'src') {
flags[a.name] = a.value || true;
}
}
// parse log flags into global
parseLogFlags(flags);
// exports
scope.basePath = base;
scope.flags = flags;
// process flags for dynamic dependencies
if (processFlags) {
processFlags.call(scope, flags);
}
// post-process imports
var modules = scope.modules || [];
var sheets = scope.sheets || [];
// write script tags for dependencies
modules.forEach(function(src) {
document.write('<script src="' + base + src + '"></script>');
});
// write link tags for styles
sheets.forEach(function(src) {
document.write('<link rel="stylesheet" href="' + base + src + '">');
});
}
// utility method
function findScript(fileName) {
var script = document.querySelector('script[src*="' + fileName + '"]');
var src = script.attributes.src.value;
script.basePath = src.slice(0, src.indexOf(fileName));
return script;
}
function parseLogFlags(flags) {
var logFlags = window.logFlags = window.logFlags || {};
if (flags.log) {
flags.log.split(',').forEach(function(f) {
logFlags[f] = true;
});
}
}
scope.flags = flags;
scope.load = load;
})();
|
package com.lbs.bot.model
case class Message(messageId: String, text: Option[String] = None)
case class Command(source: MessageSource, message: Message, callbackData: Option[String] = None) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.