text stringlengths 1 1.05M |
|---|
Based on the given information, the best machine learning model to predict the sentiment of a text would be a Long Short-Term Memory (LSTM) recurrent neural network. This type of neural network is particularly effective at classifying sequences of words in a text into a sentiment label (e.g. positive, negative or neutral). It has also been shown to achieve high accuracy in sentiment analysis tasks. |
package ru.stqa.pft.addressbook.tests;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import ru.stqa.pft.addressbook.model.ContactData;
import ru.stqa.pft.addressbook.model.Contacts;
import java.io.File;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.MatcherAssert.assertThat;
public class ContactModificationTests extends TestBase {
@BeforeMethod
public void ensurePredictions() {
Contacts before = app.db().contacts();
if (before.size() == 0) {
app.goTo().addContactPage();
app.contact().create(new ContactData().withFirstname("Oleg").withLastname("Ivanov")
.withMiddlename("Antonovich").withGroupname("test1"), true);
assertThat(app.contact().count(), equalTo(before.size() + 1));
}
}
@Test
public void testContactModification() {
Contacts before = app.db().contacts();
ContactData modifiedContact = before.iterator().next();
File photo = new File("src/test/resources/zel.jpg");
ContactData contact = new ContactData().withId(modifiedContact.getId())
.withFirstname("Arnold").withMiddlename("Swartz").withLastname("S.").withGroupname(null)
.withPhoto(photo);
app.goTo().homePage();
app.contact().modify(contact);
assertThat(app.contact().count(), equalTo(before.size()));
Contacts after = app.db().contacts();
assertThat(after, equalTo(before.without(modifiedContact).withAdded(contact)));
}
}
|
import React from 'react'
import styles from './spaced-items-container.module.scss'
const SpacedItemsContainer = ({children}) => {
return(
<div className={styles.spacedItemsContainer}>
{children}
</div>
)
}
export default SpacedItemsContainer |
<?php
function getCharacterCount($string) {
$charCount = array ();
for ($i = 0; $i < strlen($string); $i++) {
$char = $string[$i];
if (array_key_exists($char, $charCount)) {
$charCount[$char]++;
} else {
$charCount[$char] = 1;
}
}
return $charCount;
}
$string = "Hello World";
$charCount = getCharacterCount($string);
foreach ($charCount as $char => $count) {
echo "$char - $count \n";
}
// Result
H - 1
e - 1
l - 3
o - 2
- 1
W - 1
r - 1
d - 1 |
import * as antares from 'common/interfaces/antares';
import { ipcMain } from 'electron';
export default (connections: {[key: string]: antares.Client}) => {
ipcMain.handle('get-routine-informations', async (event, params) => {
try {
const result = await connections[params.uid].getRoutineInformations(params);
return { status: 'success', response: result };
}
catch (err) {
return { status: 'error', response: err.toString() };
}
});
ipcMain.handle('drop-routine', async (event, params) => {
try {
await connections[params.uid].dropRoutine(params);
return { status: 'success' };
}
catch (err) {
return { status: 'error', response: err.toString() };
}
});
ipcMain.handle('alter-routine', async (event, params) => {
try {
await connections[params.uid].alterRoutine(params);
return { status: 'success' };
}
catch (err) {
return { status: 'error', response: err.toString() };
}
});
ipcMain.handle('create-routine', async (event, params) => {
try {
await connections[params.uid].createRoutine(params);
return { status: 'success' };
}
catch (err) {
return { status: 'error', response: err.toString() };
}
});
};
|
#!/bin/sh
fatal()
{
echo "fatal: $1" 1>&2
exit 1
}
info()
{
echo "info: $1" 1>&2
}
CURRENT_DIR=`pwd` ||
fatal "could not retrieve current directory"
rm -rf doc-out ||
fatal "could not remove doc-out"
xmllint \
--noout \
--xinclude \
--schema ext/structural-0.1.0/structural-01.xsd \
doc/documentation.xml ||
fatal "could not validate document"
mkdir doc-out ||
fatal "could not create output directory"
cd doc-out ||
fatal "could not switch to output directory"
saxon \
-xi:on \
-xsl:../ext/structural-0.1.0/structural-01-standalone-x20.xsl \
-s:../doc/documentation.xml ||
fatal "could not generate documentation"
cp ../ext/structural-0.1.0/*.css . || fatal "could not copy CSS"
cp ../doc/*.css . || fatal "could not copy CSS"
cd "${CURRENT_DIR}" ||
fatal "could not restore directory"
VERSION=`head -n 1 README-VERSION.txt | sed 's/ /-doc-/g'` ||
fatal "could not retrieve version"
mv doc-out "${VERSION}" ||
fatal "could not rename output directory"
|
<reponame>AntonYermilov/progue
import threading
import time
from concurrent import futures
import numpy as np
import grpc
from game.client.model.action import *
from game.server.game import Game
from game.util import serialize_object
from .generated import progue_pb2_grpc, progue_pb2
class ProgueServer(progue_pb2_grpc.ProgueServerServicer):
def __init__(self):
super().__init__()
self.games = dict()
self.lock = threading.RLock()
def get_state(self, request, context):
with self.lock:
game = self.games[request.game_id.id]
state = game.get_state(request.player.id)
return progue_pb2.StateResponse(state=progue_pb2.State(state=serialize_object(state)))
def make_turn(self, request, context):
if request.action.action_type is 0:
action_type = ActionType.MOVE_ACTION
action_desc = MoveAction(row=request.action.move_action.row,
column=request.action.move_action.col)
elif request.action.action_type is 1:
action_type = ActionType.INVENTORY_ACTION
action_desc = InventoryAction(item_id=request.action.inventory_action.item_id,
action=request.action.inventory_action.action_type)
elif request.action.action_type is 2:
self.quit(request.game_id.id, request.player.id)
return progue_pb2.MakeTurnResponse()
else:
print('Error: unknown action type')
return None
action = Action(type=action_type, desc=action_desc)
player_id = request.player.id
with self.lock:
game = self.games[request.game_id.id]
if game is None:
return None
try:
game.on_make_turn(player_id, action)
except Exception as e:
print(e)
return progue_pb2.MakeTurnResponse()
def quit(self, game_id, player_id):
with self.lock:
game = self.games[game_id]
if game.player_quit(player_id):
del self.games[game_id]
def list_games(self, request, context):
response = progue_pb2.ListGamesResponse()
with self.lock:
for game_id in self.games:
response.game_ids.append(progue_pb2.GameId(id=game_id))
return response
def connect_to_game(self, request, context):
game_id = request.game_id.id
with self.lock:
if game_id in self.games:
game = self.games[game_id]
player_id = game.on_connect()
return progue_pb2.ConnectToGameResponse(successfully_connected=True,
player=progue_pb2.Player(id=player_id))
else:
return progue_pb2.ConnectToGameResponse(successfully_connected=False)
def create_game(self, request, context):
game_id = 'game ' + str(np.random.randint(1 << 30))
with self.lock:
if game_id not in self.games:
game = Game(singleplayer=request.singleplayer, load=request.load)
self.games[game_id] = game
player_id = game.on_connect()
player = progue_pb2.Player(id=player_id)
response = progue_pb2.CreateGameResponse(successfully_created=True,
player=player,
id=game_id)
return response
else:
return progue_pb2.CreateGameResponse(successfully_created=False)
def start_server(port: str):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=(('grpc.so_reuseport', 0),))
progue_pb2_grpc.add_ProgueServerServicer_to_server(ProgueServer(), server)
result = server.add_insecure_port(f'0.0.0.0:{port}')
server.start()
print(f'Serving on {result}')
try:
while True:
time.sleep(20000)
except KeyboardInterrupt:
print('Keyboard interrupt, shutting server down.')
finally:
server.stop(0)
|
<reponame>lananh265/social-network
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.tags = void 0;
var tags = {
"viewBox": "0 0 8 8",
"children": [{
"name": "path",
"attribs": {
"d": "M0 0v2l3 3 1.5-1.5.5-.5-2-2-1-1h-2zm3.41 0l3 3-1.19 1.22.78.78 2-2-3-3h-1.59zm-1.91 1c.28 0 .5.22.5.5s-.22.5-.5.5-.5-.22-.5-.5.22-.5.5-.5z",
"transform": "translate(0 1)"
}
}]
};
exports.tags = tags; |
class PopularitiesController < ApplicationController
include ApplicationHelper
DEFAULT_LIMIT = 50
def largest_popularity_changes
options = options_from_params
res = with_cache("largest_popularity_changes", hash_hash(options)) do
format_popularity_entries Popularity.largest_popularity_changes(options)
end
render json: res
end
def largest_popularity_decreases
options = options_from_params
res = with_cache("largest_popularity_decreases", hash_hash(options)) do
format_popularity_entries Popularity.largest_popularity_decreases(options)
end
render json: res
end
def largest_popularity_increases
options = options_from_params
res = with_cache("largest_popularity_increases", hash_hash(options)) do
format_popularity_entries Popularity.largest_popularity_changes(options)
end
render json: res
end
private
def options_from_params
{
hours_ago: hours_ago_param,
limit: limit_param,
percentage: percentage_param,
min_popularity: min_popularity_param,
start_index: start_index_param,
}
end
def start_index_param
start_index = params[:start_index].to_i
if start_index < 0
raise BadRequest, "please provide a positive integer for start_index"
end
start_index
end
def hours_ago_param
hours_ago = params[:hours_ago].to_i
if hours_ago <= 0
raise BadRequest, "please provide a positive integer for hours_ago"
end
hours_ago
end
def limit_param
params.fetch(:limit, DEFAULT_LIMIT).to_i
end
def percentage_param
params["percentage"] == "true"
end
def min_popularity_param
return unless percentage_param
min_popularity = params[:min_popularity].to_i
if min_popularity < 0
raise BadRequest, "please provide a positive integer for min_popularity"
end
min_popularity
end
def format_popularity_entries(entries)
entries.map do |entry|
{
start_popularity: entry["start_popularity"],
end_popularity: entry["end_popularity"],
popularity_difference: entry["popularity_difference"],
symbol: entry["symbol"],
}
end
end
end
|
<gh_stars>0
#include "mzpch.h"
#include "RendererAPI.h"
namespace Mazel
{
RendererAPI::API RendererAPI::s_API = RendererAPI::API::OpenGL;
} |
#!./test-libs/bats/bin/bats
# ---------------------------------------------------------------------------
# Copyright (c) 2021, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load '../libs/bats-support/load'
load '../libs/bats-assert/load'
@test "Create package '$PACKAGE_NAME:$VERSION' from ALPHA2." {
run $ALPHA2/bin/bal new $PACKAGE_NAME
assert_output "Created new Ballerina package '$PACKAGE_NAME' at $PACKAGE_NAME."
[ "$status" -eq 0 ]
mv $PACKAGE_NAME "$PACKAGE_NAME-$VERSION"
local current_user=$(whoami);
cd "$PACKAGE_NAME-$VERSION"
sed -i'.original' -e "s/$current_user/$TEST_ORGANIZATION/g" "Ballerina.toml"
sed -i'.original' -e "s/0.1.0/$VERSION/g" "Ballerina.toml"
rm "Ballerina.toml.original"
echo '# Sample github package' > "Package.md"
cd -
}
@test "Build package '$PACKAGE_NAME:$VERSION' from ALPHA2" {
cd "$PACKAGE_NAME-$VERSION"
run $ALPHA2/bin/bal build
assert_line --partial "target/bala/$TEST_ORGANIZATION-$PACKAGE_NAME-any-$VERSION.bala"
[ "$status" -eq 0 ]
cd -
}
|
#define _POSIX_C_SOURCE 200809L
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "check.h"
static enum permission eval(const char *keyword, const char *principal, const char *cmd, const char *user, const char *group, const char *command)
{
enum permission_principal pp = 0;
if (!strcmp(user, principal)) {
pp = PERM_USER;
} else if (principal[0] == ':' && !strcmp(group, principal + 1)) {
pp = PERM_GROUP;
}
enum permission_command pc = 0;
if (cmd == NULL) {
pc = PERM_ALL;
} else if (!strcmp(cmd, command)) {
pc = PERM_CMD;
}
enum permission_keyword pk = 0;
if (!strcmp(keyword, "authorize")) {
pk = PERM_PASS;
} else if (!strcmp(keyword, "authenticate")) {
pk = PERM_AUTH;
} else if (!strcmp(keyword, "deny")) {
pk = PERM_DENY;
} else {
fatal(0, "invalid keyword: %s", keyword);
}
if (pp == 0 || pc == 0) {
return UNKNOWN;
}
return pp | pc | pk;
}
enum permission get_permission(const char *user, const char *group, const char *command)
{
enum permission perm = UNKNOWN;
FILE *f = fopen(CONFIG_PATH, "r");
if (f == NULL) {
fatal(1, CONFIG_PATH);
}
ssize_t s = 0;
char *buf = NULL;
size_t len = 0;
while ((s = getline(&buf, &len, f)) > 0) {
if (*buf == '#') {
continue;
}
if (*buf == '\n') {
continue;
}
buf[s-1] = '\0';
char *space = strchr(buf, ' ');
if (!space) {
fatal(0, "invalid line in config: %s", buf);
}
char *keyword = buf;
*space = '\0';
char *principal = space + 1;
char *cmd = NULL;
space = strchr(principal, ' ');
if (space) {
*space = '\0';
cmd = space + 1;
}
enum permission tmp = eval(keyword, principal, cmd, user, group, command);
/* only change if a higher precedence is found */
if (tmp > perm) {
perm = tmp;
}
}
if (s == -1 && ferror(f)) {
fatal(1, "reading configuration");
}
if (buf) {
free(buf);
}
if (f) {
fclose(f);
}
return perm;
}
|
def smallest_number(arr):
smallest = arr[0]
for num in arr:
if num < smallest:
smallest = num
return smallest |
function combination(arr1, arr2) {
let result = [];
for (let i = 0; i < arr1.length; i++) {
for (let j = 0; j < arr2.length; j++) {
result.push(arr1[i] + arr2[j]);
}
}
return result;
}
const result = combination(["a", "b"], ["1", "2"]);
console.log(result); |
<reponame>Me-Diga/mediga<gh_stars>0
// Importando módulo gulp e plugins
var gulp = require('gulp'),
cssnano = require('gulp-cssnano'),
concat = require('gulp-concat'),
uglify = require('gulp-uglify'),
runSequence = require('run-sequence');
// Task para minificar arquivos css e juntá-los em um único arquivo
// utilizando o cssnano e o concat
gulp.task('minify-css', function() {
return gulp.src('static/css/**/*.css')
.pipe(concat('main.min.css'))
.pipe(cssnano())
.pipe(gulp.dest('static/css'));
});
// Task para minificar arquivos js e juntá-los em um único arquivo
// utilizando o uglify e o concat
gulp.task('minify-js', function() {
return gulp.src('static/js/**/*.js')
.pipe(concat('main.min.js'))
.pipe(uglify())
.pipe(gulp.dest('static/js'));
});
// Task default, que executa as tasks de minify
gulp.task('default', function(callback) {
runSequence('minify-css',
'minify-js',
callback);
});
|
/* Function that detects loop in a linked list and
returns pointer to the loop*/
Node *detectLoop(Node* h)
{
Node *slow_p = h, *fast_p = h;
while (slow_p && fast_p &&
fast_p->next) {
slow_p = slow_p->next;
fast_p = fast_p->next->next;
/* If slow_p and fast_p meet at some point
then there is a loop */
if (slow_p == fast_p) {
return slow_p;
}
}
/* Return null to indicate that there is no loop*/
return NULL;
} |
<filename>swagger/src/main/java/com/strategicgains/restexpress/plugin/swagger/wrapper/OAuthFlows.java
package com.strategicgains.restexpress.plugin.swagger.wrapper;
/**
* @see https://swagger.io/specification/#oauthFlowsObject
*/
public class OAuthFlows {
private OAuthFlow implicit;
private OAuthFlow password;
private OAuthFlow clientCredentials;
private OAuthFlow authorizationCode;
public OAuthFlow getImplicit() {
return implicit;
}
public void setImplicit(OAuthFlow implicit) {
this.implicit = implicit;
}
public OAuthFlow getPassword() {
return password;
}
public void setPassword(OAuthFlow password) {
this.password = password;
}
public OAuthFlow getClientCredentials() {
return clientCredentials;
}
public void setClientCredentials(OAuthFlow clientCredentials) {
this.clientCredentials = clientCredentials;
}
public OAuthFlow getAuthorizationCode() {
return authorizationCode;
}
public void setAuthorizationCode(OAuthFlow authorizationCode) {
this.authorizationCode = authorizationCode;
}
} |
async function retrieveAndSetupMountpoint(searchObj) {
return new Promise(async (resolve, reject) => {
try {
const doc = await StoredMountpoint.findOne(searchObj);
if (!doc) {
throw new Error('Mountpoint Not Found');
}
this.setup(doc);
resolve(this);
} catch (error) {
reject(error);
}
});
} |
package com.github.hinsteny.commons.warp.http.protocol;
import org.apache.http.HttpResponse;
/**
* @author Hinsteny
* @version PreHandleResponse: 2019-08-12 11:44 All rights reserved.$
*/
public interface PreHandleResponse {
/**
* 预处理http response.
*
* @param httpResponse response result
*/
String preHandle(HttpResponse httpResponse) throws Exception;
}
|
#!/bin/bash
convert -background transparent -pointsize 256 -font /System/Library/Fonts/Apple\ Symbols.ttf label:⌫ test.png
|
BURL=http://localhost/sites/default/files/public-backups
mkdir -p /tmp/pb
cd /tmp/pb
echo Setting variables for rotating backups ... && M=M$(date +%m) && Q=Q$(( ($(date +%-m)-1)/3+1 )) && YYYY=Y$(date +%Y) && J14=J$(( $(date +%-j) % 14 )) && YYYYMMDD=$(date +%Y-%m-%d) && pwd && mkdir -p $M $Q $YYYY $J14 && echo $YYYYMMDD - $YYYY $Q $M $J14 - $BURL >> backup-history.txt && echo $BURL && rm -f $J14/* $YYYY/* $Q/* $M/* && set -x && curl -k -s -o $J14/latest.txt $BURL/latest.txt && LTT=$(< $J14/latest.txt) && curl -k -s -o $J14/$LTT.plain-dump.sql.txt.gz $BURL/$LTT.plain-dump.sql.txt.gz && curl -k -s -o $J14/$LTT.sanitized-dump.sql.txt.gz $BURL/$LTT.sanitized-dump.sql.txt.gz && curl -k -s -o $J14/$LTT.sanitized-restore.sql.txt.gz $BURL/$LTT.sanitized-restore.sql.txt.gz && curl -k -s -o $J14/$LTT.sites-default-files.tar.xz $BURL/$LTT.sites-default-files.tar.xz && cp $J14/* $YYYY/ && cp $J14/* $Q/ && cp $J14/* $M/
|
# This utility script is written by @gromak and is provided "as is".
# The author doesn't guarantee anything about it. It might work.
grep --color=auto -r "$@" src test bench core/Pos update/Pos db/Pos lrc/Pos infra/Pos ssc/Pos godtossing/Pos txp/Pos
|
#!/bin/bash
# Copyright 2017 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing pe#rmissions and
# limitations under the License.
# for mac
if type "greadlink" > /dev/null; then
KOMPOSE_ROOT=$(greadlink -f $(dirname "${BASH_SOURCE}")/../../..)
else
KOMPOSE_ROOT=$(readlink -f $(dirname "${BASH_SOURCE}")/../../..)
fi
source $KOMPOSE_ROOT/script/test/cmd/lib.sh
# Get current branch and remote url of git repository
branch=$(git branch | grep \* | cut -d ' ' -f2-)
uri=$(git config --get remote.origin.url)
if [[ $uri != *".git"* ]]; then
uri="${uri}.git"
fi
# Get version
version=`kompose version`
# Warning Template
warning="Buildconfig using $uri::$branch as source."
# Replacing variables with current branch and uri
sed -e "s;%VERSION%;$version;g" -e "s;%URI%;$uri;g" -e "s;%REF%;$branch;g" $KOMPOSE_ROOT/script/test/fixtures/nginx-node-redis/output-os-template.json > /tmp/output-os.json
## TEST V2
DIR="v2"
k8s_cmd="kompose -f $KOMPOSE_ROOT/script/test/fixtures/$DIR/docker-compose.yaml convert --stdout -j --with-kompose-annotation=false"
os_cmd="kompose --provider=openshift -f $KOMPOSE_ROOT/script/test/fixtures/$DIR/docker-compose.yaml convert --stdout -j --with-kompose-annotation=false"
k8s_output="$KOMPOSE_ROOT/script/test/fixtures/$DIR/output-k8s.json"
os_output="$KOMPOSE_ROOT/script/test/fixtures/$DIR/output-os.json"
convert::expect_success "$k8s_cmd" "$k8s_output"
convert::expect_success "$os_cmd" "$os_output"
## TEST V3
DIR="v3.0"
k8s_cmd="kompose -f $KOMPOSE_ROOT/script/test/fixtures/$DIR/docker-compose.yaml convert --stdout -j --with-kompose-annotation=false"
os_cmd="kompose --provider=openshift -f $KOMPOSE_ROOT/script/test/fixtures/$DIR/docker-compose.yaml convert --stdout -j --with-kompose-annotation=false"
k8s_output="$KOMPOSE_ROOT/script/test/fixtures/$DIR/output-k8s.json"
os_output="$KOMPOSE_ROOT/script/test/fixtures/$DIR/output-os.json"
convert::expect_success_and_warning "$k8s_cmd" "$k8s_output"
convert::expect_success_and_warning "$os_cmd" "$os_output"
######
# Test the output file behavior of kompose convert
# Default behavior without -o
convert::check_artifacts_generated "kompose -f $KOMPOSE_ROOT/script/test/fixtures/redis-example/docker-compose.yml convert -j" "redis-deployment.json" "redis-service.json" "web-deployment.json" "web-service.json"
# Behavior with -o <filename>
convert::check_artifacts_generated "kompose -f $KOMPOSE_ROOT/script/test/fixtures/redis-example/docker-compose.yml convert -o output_file -j" "output_file"
# Behavior with -o <dirname>
convert::check_artifacts_generated "kompose -f $KOMPOSE_ROOT/script/test/fixtures/redis-example/docker-compose.yml convert -o $TEMP_DIR -j" "$TEMP_DIR/redis-deployment.json" "$TEMP_DIR/redis-service.json" "$TEMP_DIR/web-deployment.json" "$TEMP_DIR/web-service.json"
# Behavior with -o <dirname>/<filename>
convert::check_artifacts_generated "kompose -f $KOMPOSE_ROOT/script/test/fixtures/redis-example/docker-compose.yml convert -o $TEMP_DIR/output_file -j" "$TEMP_DIR/output_file"
# Behavior with -o <non-existent-dirname>/
dst=$TEMP_DIR/output_dir/
convert::check_artifacts_generated "kompose -f $KOMPOSE_ROOT/script/test/fixtures/redis-example/docker-compose.yml convert -o $dst -j" "${dst}redis-deployment.json" "${dst}redis-service.json" "${dst}web-deployment.json" "${dst}web-service.json"
# Behavior with -o <non-existent-dirname>/<filename>
convert::check_artifacts_generated "kompose -f $KOMPOSE_ROOT/script/test/fixtures/redis-example/docker-compose.yml convert -o $TEMP_DIR/output_dir2/output_file -j" "$TEMP_DIR/output_dir2/output_file"
#TEST the pvc-request-size command parameter
convert::check_artifacts_generated "kompose -f $KOMPOSE_ROOT/script/test/fixtures/pvc-request-size/docker-compose.yml convert -o $TEMP_DIR/output_dir2/output-k8s.json -j --pvc-request-size=300Mi" "$TEMP_DIR/output_dir2/output-k8s.json"
convert::check_artifacts_generated "kompose --provider=openshift -f $KOMPOSE_ROOT/script/test/fixtures/pvc-request-size/docker-compose.yml convert -o $TEMP_DIR/output_dir2/output-os.json -j --pvc-request-size=300Mi" "$TEMP_DIR/output_dir2/output-os.json"
######
# Test the path of build image
# Test build v2 absolute compose file
convert::check_artifacts_generated "kompose --build local -f $KOMPOSE_ROOT/script/test/fixtures/buildconfig/docker-compose.yml convert -o $TEMP_DIR/output_file" "$TEMP_DIR/output_file"
# Test build v2 relative compose file
relative_path=$(realpath --relative-to="$PWD" "$KOMPOSE_ROOT/script/test/fixtures/buildconfig/docker-compose.yml")
convert::check_artifacts_generated "kompose --build local -f $relative_path convert -o $TEMP_DIR/output_file" "$TEMP_DIR/output_file"
# Test build v3 absolute compose file with context
convert::check_artifacts_generated "kompose --build local -f $KOMPOSE_ROOT/script/test/fixtures/buildconfig/docker-compose-v3.yml convert -o $TEMP_DIR/output_file" "$TEMP_DIR/output_file"
# Test build v3 relative compose file with context
relative_path=$(realpath --relative-to="$PWD" "$KOMPOSE_ROOT/script/test/fixtures/buildconfig/docker-compose-v3.yml")
convert::check_artifacts_generated "kompose --build local -f $relative_path convert -o $TEMP_DIR/output_file" "$TEMP_DIR/output_file"
#####
# Test the build config with push image
# see tests_push_image.sh for local push test
# Should warn when push image disabled
cmd="kompose -f $KOMPOSE_ROOT/script/test/fixtures/buildconfig/docker-compose-build-no-image.yml -o $TEMP_DIR/output_file convert --build=local --push-image-registry=whatever"
convert::expect_warning "$cmd" "Push image registry 'whatever' is specified but push image is disabled, skipping pushing to repository"
|
#!/bin/sh
test_description='test quickfetch from local'
. ./test-lib.sh
test_expect_success setup '
test_tick &&
echo ichi >file &&
git add file &&
git commit -m initial &&
cnt=$( (
git count-objects | sed -e "s/ *objects,.*//"
) ) &&
test $cnt -eq 3
'
test_expect_success 'clone without alternate' '
(
mkdir cloned &&
cd cloned &&
git init-db &&
git remote add -f origin ..
) &&
cnt=$( (
cd cloned &&
git count-objects | sed -e "s/ *objects,.*//"
) ) &&
test $cnt -eq 3
'
test_expect_success 'further commits in the original' '
test_tick &&
echo ni >file &&
git commit -a -m second &&
cnt=$( (
git count-objects | sed -e "s/ *objects,.*//"
) ) &&
test $cnt -eq 6
'
test_expect_success 'copy commit and tree but not blob by hand' '
git rev-list --objects HEAD |
git pack-objects --stdout |
(
cd cloned &&
git unpack-objects
) &&
cnt=$( (
cd cloned &&
git count-objects | sed -e "s/ *objects,.*//"
) ) &&
test $cnt -eq 6 &&
blob=$(git rev-parse HEAD:file | sed -e "s|..|&/|") &&
test -f "cloned/.git/objects/$blob" &&
rm -f "cloned/.git/objects/$blob" &&
cnt=$( (
cd cloned &&
git count-objects | sed -e "s/ *objects,.*//"
) ) &&
test $cnt -eq 5
'
test_expect_success 'quickfetch should not leave a corrupted repository' '
(
cd cloned &&
git fetch
) &&
cnt=$( (
cd cloned &&
git count-objects | sed -e "s/ *objects,.*//"
) ) &&
test $cnt -eq 6
'
test_expect_success 'quickfetch should not copy from alternate' '
(
mkdir quickclone &&
cd quickclone &&
git init-db &&
(cd ../.git/objects && pwd) >.git/objects/info/alternates &&
git remote add origin .. &&
git fetch -k -k
) &&
obj_cnt=$( (
cd quickclone &&
git count-objects | sed -e "s/ *objects,.*//"
) ) &&
pck_cnt=$( (
cd quickclone &&
git count-objects -v | sed -n -e "/packs:/{
s/packs://
p
q
}"
) ) &&
origin_master=$( (
cd quickclone &&
git rev-parse origin/master
) ) &&
echo "loose objects: $obj_cnt, packfiles: $pck_cnt" &&
test $obj_cnt -eq 0 &&
test $pck_cnt -eq 0 &&
test z$origin_master = z$(git rev-parse master)
'
test_expect_success 'quickfetch should handle ~1000 refs (on Windows)' '
git gc &&
head=$(git rev-parse HEAD) &&
branchprefix="$head refs/heads/branch" &&
for i in 0 1 2 3 4 5 6 7 8 9; do
for j in 0 1 2 3 4 5 6 7 8 9; do
for k in 0 1 2 3 4 5 6 7 8 9; do
echo "$branchprefix$i$j$k" >> .git/packed-refs
done
done
done &&
(
cd cloned &&
git fetch &&
git fetch
)
'
test_done
|
<filename>Database_construction/from_kraken2_to_bed.py
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# modules required for handling dataframes
import os
import pandas as pd
# In[2]:
basedir = '/home/yiheng/MinION_data' # the directory where all the documents of each sequencing run are stored.
k2_output_db = pd.read_csv(os.path.join(basedir, 'genome_reference_mock.standardkraken2_output'), sep='\t', header=None)
# In[3]:
# you can name whatever you like, or just use numbers.
headers = ['classification', 'accession', 'taxid', 'length', 'kmer_profile']
k2_output_db.columns = headers
# In[8]:
k2_output_db_classified = k2_output_db[k2_output_db.classification == 'C']
# In[9]:
k2_output_db_classified = k2_output_db_classified.reset_index(drop=True)
# In[10]:
def generate_final_bed(input_df, row_index):
a = input_df.iloc[row_index,-1]
b = a.split(' ')
# important filter function
b = list(filter(None, b))
end = [int(c.split(':')[-1]) for c in b]
taxid_list = [c.split(':')[0] for c in b]
for x in range(0,len(end)):
if x == 0:
end[x] = end[x]
elif x == len(end) - 1:
end[x] = int(input_df.iloc[row_index,3]) - 33
else:
end[x] = end[x] + end[x-1]
start = end.copy()
for y in range(0,len(start)):
if y == 0:
start[y] = 0
else:
start[y] = end[y-1]
bed_df = pd.DataFrame({'taxid':taxid_list, 'start':start, 'end':end})
bed_df_trim = bed_df[(bed_df.taxid != '0') | ((bed_df.taxid == '0') & (bed_df.end - bed_df.start < 100))]
bed_df_trim = bed_df_trim.reset_index(drop=True)
bed_df_drop_taxid = bed_df_trim.drop(columns='taxid')
bed_df_drop_taxid['accession'] = input_df.iloc[row_index,1]
return bed_df_drop_taxid
# In[11]:
# These following lines need to be sticked together otherwise the bed_df_final will keep accumulating
columns = ['start','end', 'accession']
bed_df_final = pd.DataFrame(columns=columns)
for index in k2_output_db_classified.index:
bed_df_final = bed_df_final.append(generate_final_bed(k2_output_db_classified, index), ignore_index = True)
# In[12]:
bed_df_final = bed_df_final[['accession', 'start', 'end']]
# In[13]:
bed_df_final.to_csv(os.path.join(basedir, 'genome_reference_mock_final.bed'), sep='\t', header=False, index=False)
# In[ ]:
|
#!/bin/bash
source path.sh
USE_SCLITE=true
# test text normalization
echo "Start get text normalization test data ..."
python3 get_textnorm_data.py --test-file=data/textnorm_test_cases.txt --output-dir=data/textnorm
echo "Start test text normalization ..."
python3 test_textnorm.py --input-dir=data/textnorm --output-dir=exp/textnorm
# whether use sclite to get more detail information of WER
if [ "$USE_SCLITE" = true ];then
echo "Start sclite textnorm ..."
${MAIN_ROOT}/tools/sctk/bin/sclite -i wsj -r ./exp/textnorm/text.ref.clean trn -h ./exp/textnorm/text.tn trn -e utf-8 -o all
fi |
package tree.symbols;
import tree.DefaultTreeNodeSymbol;
public class TSBraceLeft extends DefaultTreeNodeSymbol {
public static int id = BRACE_LEFT;
public static String text = "{";
public TSBraceLeft() {
super(text, id);
}
}
|
if defined?(Gem)
begin
require 'rubygems'
rescue LoadError
# For JRUBY-5333, gracefully fail to load, since stdlib may not be available
warn 'RubyGems not found; disabling gems' if $VERBOSE
else
begin
gem 'did_you_mean'
require 'did_you_mean'
rescue Gem::LoadError, LoadError
end if defined?(DidYouMean)
end
end
|
#!/usr/bin/env bash
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#
frameworkVersion=net40
# sdk must match installed framworks under PREFIX/lib/mono/[value]
sdk=4
# langversion refers to C# language features. see man mcs for details.
langversion=${sdk}
nuget_cmd=nuget
# Match against our known SDK possibilities
case "${sdk}" in
4)
langversion=4
;;
4.5*)
langversion=5
;;
4.6*)
langversion=6
;;
4.7*)
langversion=7 # ignoring 7.1 for now.
;;
*)
langversion=6
;;
esac
echo "[INFO] Target framework: ${frameworkVersion}"
if ! type nuget &>/dev/null; then
echo "[INFO] Download nuget and packages"
wget -nc https://dist.nuget.org/win-x86-commandline/latest/nuget.exe;
nuget_cmd="mono nuget.exe"
fi
mozroots --import --sync
${nuget_cmd} restore IO.Swagger.sln -o packages;
echo "[INFO] Copy DLLs to the 'bin' folder"
mkdir -p bin;
cp packages/Newtonsoft.Json.10.0.3/lib/net40/Newtonsoft.Json.dll bin/Newtonsoft.Json.dll;
cp packages/microsoft.aspnet.webapi.client/5.2.7/lib/net45/System.Net.Http.Formatting.dll bin/System.Net.Http.Formatting.dll;
cp packages/microsoft.aspnet.webapi.core/5.2.7/lib/net45/System.Web.Http.dll bin/System.Web.Http.dll;
cp packages/System.ComponentModel.Annotations/4.5.0/lib/net461/System.ComponentModel.Annotations.dll bin/System.ComponentModel.Annotations.dll;
echo "[INFO] Run 'mcs' to build bin/IO.Swagger.dll"
mcs -langversion:${langversion} -sdk:${sdk} -r:bin/Newtonsoft.Json.dll,bin/System.Net.Http.Formatting.dll,\
bin/System.Web.Http.dll,\
System.ComponentModel.DataAnnotations.dll,\
System.Runtime.Serialization.dll \
-target:library \
-out:bin/IO.Swagger.dll \
-recurse:'src/IO.Swagger/*.cs' \
-doc:bin/IO.Swagger.xml \
-platform:anycpu
if [ $? -ne 0 ]
then
echo "[ERROR] Compilation failed with exit code $?"
exit 1
else
echo "[INFO] bin/IO.Swagger.dll was created successfully"
fi
|
<reponame>nedphae/contact-center-client<gh_stars>1-10
/** !
* 咨询接待页面
*/
import React from 'react';
import ChatApp from '../../components/Chat/ChatApp';
export default function Entertain() {
return <ChatApp />;
}
|
#!/bin/bash
export AGENT_FILE_PATH=/usr/local/header-scenario/agent
if [ -f "${AGENT_FILE_PATH}/skywalking-agent.jar" ]; then
JAVA_OPTS=" -javaagent:${AGENT_FILE_PATH}/skywalking-agent.jar "
JAVA_OPTS="$JAVA_OPTS -Dskywalking.collector.backend_service=${COLLECTOR_SERVER} "
JAVA_OPTS="$JAVA_OPTS -Dskywalking.agent.service_name=projectB "
JAVA_OPTS="$JAVA_OPTS -Dskywalking.agent.active_v2_header=true "
JAVA_OPTS="$JAVA_OPTS -Dskywalking.agent.active_v1_header=true "
fi
JAVA_OPTS="$JAVA_OPTS -Durl.prefixC=http://${PROJECTC_ADDRESS}/projectC -Durl.prefixD=http://${PROJECTD_ADDRESS}/projectD "
java $JAVA_OPTS -jar /usr/local/header-scenario/projectB.jar |
export class CreateArticleDto {
title: string;
description: string;
body: string;
taglist?: string;
}
|
<filename>feilong-net-jsoup/src/test/java/com/feilong/net/jsoup/MyTestMain.java
/*
* Copyright (C) 2008 feilong
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.feilong.net.jsoup;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.safety.Whitelist;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MyTestMain{
private static final Logger LOGGER = LoggerFactory.getLogger(MyTestMain.class);
public static void main(String[] args){
// 直接从字符串中输入 HTML 文档
String html = "<html><head><title> 开源中国社区 </title></head>" + "<body><p> 这里是 jsoup 项目的相关文章 </p></body></html>";
Document doc = Jsoup.parse(html);
// LOGGER.debug(doc.getElementsByClass(""));
String unsafe = "<p><a href='http://www.oschina.net/' onclick='stealCookies()'> 开源中国社区 </a></p>";
String safe = Jsoup.clean(unsafe, Whitelist.basic());
LOGGER.debug(safe);
LOGGER.debug(doc.text());
// 输出 :
// <p><a href="http://www.oschina.net/" rel="nofollow"> 开源中国社区 </a></p>
}
} |
/*
* Copyright 2014-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package org.dbflute.dbmeta.dtomap;
import java.util.List;
import org.dbflute.Entity;
/**
* The interface of DTO mapper.
* @param <ENTITY> The type of entity.
* @param <DTO> The type of DTO.
* @author jflute
*/
public interface DtoMapper<ENTITY extends Entity, DTO> {
/**
* Do mapping from an entity to a DTO with relation data.
* @param entity The entity as mapping resource. (NullAllowed: if null, returns null)
* @return The mapped DTO. (NotNull)
*/
DTO mappingToDto(ENTITY entity);
/**
* Do mapping from an entity list to a DTO list with relation data. <br>
* This calls this.mappingToDto() in a loop of the list.
* @param entityList The list of entity as mapping resource. (NotNull: null elements are inherited)
* @return The list of mapped DTO. (NotNull)
*/
List<DTO> mappingToDtoList(List<ENTITY> entityList);
/**
* Do mapping from a DTO to an entity with relation data. <br>
* A setter of an entity is called under the rule of this.needsMapping().
* @param dto The DTO as mapping resource. (NullAllowed: if null, returns null)
* @return The mapped entity. (NotNull)
*/
ENTITY mappingToEntity(DTO dto);
/**
* Do mapping from a DTO list to an entity list with relation data. <br>
* This calls this.mappingToEntity() in loop of the list.
* @param dtoList The list of DTO as mapping resource. (NotNull: null elements are inherited)
* @return The list of mapped entity. (NotNull)
*/
List<ENTITY> mappingToEntityList(List<DTO> dtoList);
/**
* Set the option whether base-only mapping or not.
* @param baseOnlyMapping Does the mapping ignore all references? (true: base-only mapping, false: all relations are valid)
*/
void setBaseOnlyMapping(boolean baseOnlyMapping);
/**
* Set the option whether common column is except or not.
* @param exceptCommonColumn Does the mapping except common column? (true: no mapping of common column)
*/
void setExceptCommonColumn(boolean exceptCommonColumn);
/**
* Set the option whether reverse reference or not.
* @param reverseReference Does the mapping contain reverse references? (true: reverse reference, false: one-way reference)
*/
void setReverseReference(boolean reverseReference);
}
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jena.riot.system;
import java.util.Iterator ;
import java.util.Map.Entry ;
import org.apache.jena.graph.Graph ;
import org.apache.jena.graph.Triple ;
import org.apache.jena.shared.PrefixMapping ;
import org.apache.jena.sparql.core.DatasetGraph ;
import org.apache.jena.sparql.core.Quad ;
/* TODO
* Split up StreamRDFLib into factory and operations
* Check start/finish policies.
* org.apache.jena.riot.stream?
*
*/
/** Utilities for sending to StreamRDF.
* Unless otherwise stated, send* operations do not call stream.start()/stream.finish()
* whereas other operations do.
*/
public class StreamOps {
/** Send a dataset to a StreamRDF as prefixes, triples and quads, enclosed in stream.start()/steram.finish() */
public static void datasetToStream(DatasetGraph datasetGraph, StreamRDF stream) {
stream.start() ;
sendDatasetToStream(datasetGraph, stream) ;
stream.finish() ;
}
/** Send the triples of graph and it's prefix mapping to a StreamRDF, enclosed in stream.start()/steram.finish() */
public static void graphToStream(Graph graph, StreamRDF stream) {
stream.start();
sendGraphToStream(graph, stream) ;
stream.finish() ;
}
/** Send a PrefixMap to a stream */
public static void sendPrefixesToStream(PrefixMap prefixMap, StreamRDF stream) {
for ( Entry<String, String> e : prefixMap.getMappingCopyStr().entrySet())
stream.prefix(e.getKey(), e.getValue()) ;
}
public static void sendPrefixesToStream(PrefixMapping prefixMap, StreamRDF stream) {
for ( Entry<String, String> e : prefixMap.getNsPrefixMap().entrySet() )
stream.prefix(e.getKey(), e.getValue()) ;
}
/** Send a dataset graph to a stream with triples for the default graph
* and quads for the named graphs without prefixes
*/
public static void sendTriplesQuadsToStream(DatasetGraph datasetGraph, StreamRDF stream) {
sendDatasetToStream(datasetGraph, stream, null) ;
}
/** Send a dataset to a StreamRDF as prefixes, triples and quads */
public static void sendDatasetToStream(DatasetGraph datasetGraph, StreamRDF stream) {
PrefixMap prefixMap = PrefixMapFactory.create(datasetGraph.getDefaultGraph().getPrefixMapping()) ;
sendDatasetToStream(datasetGraph, stream, prefixMap) ;
}
//
// /** Send a dataset to a StreamRDF as triples and quads, using the explicitly given prefix map */
// public static void datasetToStream(DatasetGraph datasetGraph, StreamRDF stream, PrefixMap prefixMap) {
//
// }
//
/** Send a dataset to a StreamRDF as triples and quads, using the explicitly given prefix map */
public static void sendDatasetToStream(DatasetGraph datasetGraph, StreamRDF stream, PrefixMap prefixMap) {
if ( prefixMap != null )
sendPrefixesToStream(prefixMap, stream) ;
// Default graph
Iterator<Triple> iter1 = datasetGraph.getDefaultGraph().find(null, null, null) ;
StreamOps.sendTriplesToStream(iter1, stream) ;
Iterator<Quad> iter2 = datasetGraph.findNG(null, null, null, null) ;
StreamOps.sendQuadsToStream(iter2, stream) ;
}
/** Send the triples of graph and an explicitly given prefix mapping, to a StreamRDF */
public static void sendGraphToStream(Graph graph, StreamRDF stream) {
PrefixMap prefixMap = PrefixMapFactory.create(graph.getPrefixMapping()) ;
sendGraphToStream(graph, stream, prefixMap) ;
}
/** Send the triples of graph and an explicitly given prefix mapping, to a StreamRDF */
public static void sendGraphToStream(Graph graph, StreamRDF stream, PrefixMap prefixMap) {
if ( prefixMap != null )
sendPrefixesToStream(graph.getPrefixMapping(), stream) ;
Iterator<Triple> iter = graph.find(null, null, null) ;
StreamOps.sendTriplesToStream(iter, stream) ;
}
/** Send the triples of graph to a StreamRDF (no prefix mapping) */
public static void sendTriplesToStream(Graph graph, StreamRDF stream) {
sendGraphToStream(graph, stream, null) ;
}
/** Set triples to a StreamRDF - does not call .start/.finish */
public static void sendTriplesToStream(Iterator<Triple> iter, StreamRDF dest)
{
for ( ; iter.hasNext() ; )
{
Triple t = iter.next() ;
dest.triple(t) ;
}
}
/** Send quads of a dataset (including default graph as quads) to a StreamRDF, without prefixes */
public static void sendQuadsToStream(DatasetGraph datasetGraph, StreamRDF stream) {
Iterator<Quad> iter2 = datasetGraph.find(null, null, null, null) ;
StreamOps.sendQuadsToStream(iter2, stream) ;
}
/** Set quads to a StreamRDF - does not call .start/.finish */
public static void sendQuadsToStream(Iterator<Quad> iter, StreamRDF dest)
{
for ( ; iter.hasNext() ; )
{
Quad q = iter.next() ;
dest.quad(q) ;
}
}
} |
import toml
from pathlib import Path
# Constants
PROJECT_ROOT = Path("/path/to/project/root") # Replace with actual project root path
PYPROJECT_TOML_FILENAME = "pyproject.toml"
ENTRY_POINT = "my_script"
class PyprojectToml:
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def check_entrypoint_in_pyproject_toml():
file_path = PROJECT_ROOT / PYPROJECT_TOML_FILENAME
try:
with open(file_path, 'r') as file:
pyproject_toml = PyprojectToml(**toml.load(file))
if hasattr(pyproject_toml, 'tool') and hasattr(pyproject_toml.tool, 'poetry') and hasattr(pyproject_toml.tool.poetry, 'scripts'):
if ENTRY_POINT in pyproject_toml.tool.poetry.scripts:
print(f"The entry point '{ENTRY_POINT}' exists in the pyproject.toml file.")
else:
print(f"The entry point '{ENTRY_POINT}' does not exist in the pyproject.toml file.")
else:
print("Invalid pyproject.toml file structure.")
except FileNotFoundError:
print(f"pyproject.toml file not found at {file_path}.")
check_entrypoint_in_pyproject_toml() |
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
elif [ -L "${binary}" ]; then
echo "Destination binary is symlinked..."
dirname="$(dirname "${binary}")"
binary="${dirname}/$(readlink "${binary}")"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u)
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
# Copy the dSYM into a the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .framework.dSYM "$source")"
binary="${DERIVED_FILES_DIR}/${basename}.framework.dSYM/Contents/Resources/DWARF/${basename}"
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then
strip_invalid_archs "$binary"
fi
if [[ $STRIP_BINARY_RETVAL == 1 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.framework.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.framework.dSYM"
fi
fi
}
# Copies the bcsymbolmap files of a vendored framework
install_bcsymbolmap() {
local bcsymbolmap_path="$1"
local destination="${BUILT_PRODUCTS_DIR}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}"
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identity
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
STRIP_BINARY_RETVAL=0
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary"
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=1
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/YZAPIs/YZAPIs.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/YZAPIs/YZAPIs.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
// https://open.kattis.com/problems/spavanac
#include <iostream>
using namespace std;
int main() {
int h, m;
cin >> h >> m;
int t = (h * 60 + m) - 45;
if (t < 0) t += 24 * 60;
cout << (t / 60) << " " << (t % 60) << endl;
}
|
ALTER TABLE my_table
RENAME COLUMN address TO location; |
def detect_patterns(text):
# Initialize list of patterns
patterns = create_patterns()
# Perform string matching
matches = []
for pattern in patterns:
matches += run_string_matching(text, pattern)
return matches |
def reverse_number(num):
rev = 0
while num > 0:
r = num % 10
rev = (rev * 10) + r
num = num // 10
return rev
input_num = 42
res = reverse_number(input_num)
print(res) |
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ed.biodare2.backend.features.tsdata.dataimport;
import ed.biodare2.backend.features.tsdata.tableview.TextDataTableReader;
import ed.biodare2.backend.repo.isa_dom.dataimport.CellCoordinates;
import ed.biodare2.backend.repo.isa_dom.dataimport.CellRole;
import ed.biodare2.backend.repo.isa_dom.dataimport.DataBundle;
import ed.biodare2.backend.repo.isa_dom.dataimport.DataColumnProperties;
import ed.biodare2.backend.repo.isa_dom.dataimport.DataTableImportParameters;
import ed.biodare2.backend.repo.isa_dom.dataimport.DataTrace;
import ed.biodare2.backend.repo.isa_dom.dataimport.ImportFormat;
import ed.biodare2.backend.repo.isa_dom.dataimport.TimeType;
import ed.robust.dom.data.TimeSeries;
import java.net.URISyntaxException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import java.util.function.BiFunction;
import java.util.stream.Collectors;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.*;
import org.junit.Ignore;
import static org.mockito.Mockito.*;
/**
*
* @author <NAME> <<EMAIL>>
*/
public class DataTableImporterTest {
double EPS = 1E-6;
public DataTableImporterTest() {
}
DataTableImporter instance;
TextDataTableReader reader;
@Before
public void setUp() {
instance = new DataTableImporter();
reader = mock(TextDataTableReader.class);
}
@Test
public void importTimesRowGivesTransformedRow() throws Exception {
DataTableImportParameters parameters = new DataTableImportParameters();
parameters.firstTimeCell = new CellCoordinates(2, 1);
parameters.timeOffset = 1;
parameters.timeType = TimeType.TIME_IN_MINUTES;
List<List<Object>> recs = List.of(
List.of("A","tom","60","120","600","")
);
when(reader.readRecords(1, 1)).thenReturn(recs);
List<Double> exp = new ArrayList<>(List.of(1.0+1, 2.0+1, 10.0+1));
List<Double> res = instance.importTimesRow(reader, parameters);
assertEquals(exp, res);
}
@Test
public void readTimesRowReadsDoublesFromGivenRowAndColumn() throws Exception {
CellCoordinates firstTime = new CellCoordinates(2, 1);
List<List<Object>> recs = List.of(
List.of("A","tom","1","2","10","")
);
when(reader.readRecords(1, 1)).thenReturn(recs);
List<Double> exp = new ArrayList<>(List.of(1.0, 2.0, 10.0));
exp.add(null);
List<Double> res = instance.readTimesRow(reader, firstTime);
assertEquals(exp, res);
}
@Test
public void valsToDoubleConverts() throws Exception {
List<Object> vals = List.of("","","123","1");
List<Double> exp = new ArrayList<>();
exp.add(null); exp.add(null); exp.add(123.0); exp.add(1.0);
assertEquals(exp, instance.valsToDoubles(vals));
}
@Test
public void testToDoubleConverstion() throws Exception {
Object in = null;
Double exp = null;
Double res;
res = instance.valToDouble(in);
assertNull(res);
in = "";
res = instance.valToDouble(in);
assertNull(res);
in = " \t ";
res = instance.valToDouble(in);
assertNull(res);
in = " 123";
exp = 123.0;
res = instance.valToDouble(in);
assertEquals(exp, res, EPS);
in = "123.5";
exp = 123.5;
res = instance.valToDouble(in);
assertEquals(exp, res, EPS);
in = 123L;
exp = 123.0;
res = instance.valToDouble(in);
assertEquals(exp, res, EPS);
in = "ala";
try {
instance.valToDouble(in);
fail("Exception expected");
} catch (NumberFormatException e) {}
}
@Test
public void testImportTraceRow() throws Exception {
List<Object> record = List.of("A","toc1","10","20","","40");
List<Double> times = List.of(1.0,2.0,3.0,4.0);
int curRow = 3;
int firstCol = 2;
BiFunction<List<Object>, Integer, String> labeller = (List<Object> row, Integer rowIx) -> row.get(1).toString();
Optional<DataTrace> dataO = instance.importTraceRow(record,times,curRow,firstCol,labeller);
DataTrace data = dataO.get();
TimeSeries expT = new TimeSeries();
expT.add(1, 10);
expT.add(2, 20);
expT.add(4, 40);
assertEquals(expT, data.trace);
assertEquals("toc1", data.details.dataLabel);
assertEquals(CellRole.DATA, data.role);
assertEquals("C4", data.traceRef);
}
@Test
public void testImportTracesRows() throws Exception {
TextDataTableReader.OpennedReader sequentialReader = mock(TextDataTableReader.OpennedReader.class);
List<Object> record1 = List.of("A","toc1","10","20","","40");
List<Object> record2 = List.of("B","toc2","10","20","","40");
when(sequentialReader.readRecord()).thenReturn(Optional.of(record1), Optional.of(record2), Optional.empty());
when(sequentialReader.skipLines(3)).thenReturn(3);
List<Double> times = List.of(1.0,2.0,3.0,4.0);
int firstRow = 3;
int firstCol = 2;
BiFunction<List<Object>, Integer, String> labeller = (List<Object> row, Integer rowIx) -> row.get(1).toString();
List<DataTrace> resp = instance.importTracesRows(sequentialReader, times, firstRow, firstCol, labeller);
assertEquals(2, resp.size());
DataTrace data = resp.get(0);
TimeSeries expT = new TimeSeries();
expT.add(1, 10);
expT.add(2, 20);
expT.add(4, 40);
assertEquals(expT, data.trace);
assertEquals("toc1", data.details.dataLabel);
assertEquals(CellRole.DATA, data.role);
assertEquals("C4", data.traceRef);
data = resp.get(1);
assertEquals("toc2", data.details.dataLabel);
}
@Test
public void testImportTracesRowsFromReader() throws Exception {
TextDataTableReader.OpennedReader sequentialReader = mock(TextDataTableReader.OpennedReader.class);
when(reader.openReader()).thenReturn(sequentialReader);
List<Object> record1 = List.of("A","toc1","10","20","","40");
List<Object> record2 = List.of("B","toc2","10","20","","40");
when(sequentialReader.readRecord()).thenReturn(Optional.of(record1), Optional.of(record2), Optional.empty());
when(sequentialReader.skipLines(3)).thenReturn(3);
List<Double> times = List.of(1.0,2.0,3.0,4.0);
int firstRow = 3;
int firstCol = 2;
BiFunction<List<Object>, Integer, String> labeller = (List<Object> row, Integer rowIx) -> row.get(1).toString();
List<DataTrace> resp = instance.importTracesRows(reader, times, firstRow, firstCol, labeller);
assertEquals(2, resp.size());
verify(sequentialReader).close();
}
@Test
public void importTracesRowsImportsFromExplicitLabels() throws Exception {
TextDataTableReader.OpennedReader sequentialReader = mock(TextDataTableReader.OpennedReader.class);
when(reader.openReader()).thenReturn(sequentialReader);
List<Object> record1 = List.of("A","toc1","10","20","","40");
List<Object> record2 = List.of("B","toc2","10","20","","40");
List<Object> record3 = List.of("C","toc3","10","20","","40");
when(sequentialReader.readRecord()).thenReturn(Optional.of(record1), Optional.of(record2), Optional.of(record3), Optional.empty());
when(sequentialReader.skipLines(3)).thenReturn(3);
List<Double> times = List.of(1.0,2.0,3.0,4.0);
DataTableImportParameters parameters = new DataTableImportParameters();
parameters.importLabels = false;
parameters.inRows = true;
parameters.userLabels = Arrays.asList(null,"L1",null,"L2");
parameters.firstTimeCell = new CellCoordinates(2, 0);
assertNull(parameters.dataStart);
List<DataTrace> resp = instance.importTracesRows(reader, times, parameters);
assertEquals(2, resp.size());
assertEquals("L1", resp.get(0).details.dataLabel);
assertEquals("L2", resp.get(1).details.dataLabel);
verify(sequentialReader).close();
}
public static Path getTestDataFile(String name) throws URISyntaxException {
Path file = Paths.get(DataTableImporterTest.class.getResource(name).toURI());
return file;
}
public static DataTableImportParameters getCSVTableInRowsParameters(String fileName) {
DataTableImportParameters parameters = new DataTableImportParameters();
parameters.fileName = fileName;
parameters.fileId = parameters.fileName;
parameters.importFormat = ImportFormat.COMA_SEP;
parameters.inRows = true;
parameters.firstTimeCell = new CellCoordinates(1, 0);
parameters.timeType = TimeType.TIME_IN_HOURS;
parameters.timeOffset = 1;
parameters.imgInterval = 0;
parameters.dataStart = new CellCoordinates(-1,1);
parameters.importLabels = true;
parameters.labelsSelection = new CellCoordinates(0, -1);
return parameters;
}
@Test
public void importCSVRowDataFromFile() throws Exception {
Path file = getTestDataFile("data_in_rows.csv");
DataTableImportParameters parameters = getCSVTableInRowsParameters("data_in_rows.csv");
DataBundle boundle = instance.importTimeSeries(file, parameters);
assertNotNull(boundle);
List<DataTrace> data = boundle.data;
assertEquals(64,data.size());
assertEquals("WT LHY",data.get(0).details.dataLabel);
assertEquals("WT TOC1",data.get(63).details.dataLabel);
TimeSeries trace = data.get(63).trace;
assertEquals(1+1, trace.getFirst().getTime(), EPS);
assertEquals(0.201330533, trace.getFirst().getValue(), EPS);
assertEquals(1+159, trace.getLast().getTime(), EPS);
assertEquals(0.553965719, trace.getLast().getValue(), EPS);
assertEquals(1, data.get(0).traceNr);
assertEquals(64, data.get(63).traceNr);
DataTrace dtrace = data.get(0);
assertEquals("B2", dtrace.traceFullRef);
assertEquals("B2", dtrace.traceRef);
dtrace = data.get(63);
assertEquals("B65", dtrace.traceFullRef);
assertEquals("B65", dtrace.traceRef);
}
@Test
public void importCSVRowDataFromFileRemovingBackgrounds() throws Exception {
Path file = getTestDataFile("data_in_rows.csv");
DataTableImportParameters parameters = getCSVTableInRowsParameters("data_in_rows.csv");
parameters.containsBackgrounds = true;
parameters.backgroundsLabels = List.of("WT LHY", "WT TOC1");
DataBundle boundle = instance.importTimeSeries(file, parameters);
assertNotNull(boundle);
List<DataTrace> backgrounds = boundle.backgrounds;
assertEquals(16, backgrounds.size());
assertEquals("WT LHY",backgrounds.get(0).details.dataLabel);
assertEquals("WT TOC1",backgrounds.get(15).details.dataLabel);
List<DataTrace> data = boundle.data;
assertEquals(64-16,data.size());
assertEquals("prr79 LHY",data.get(0).details.dataLabel);
assertEquals("WT PRR5",data.get(47).details.dataLabel);
DataTrace dtrace = data.get(47);
TimeSeries trace = dtrace.trace;
assertEquals(1+1, trace.getFirst().getTime(), EPS);
assertEquals(0.901916043, trace.getFirst().getValue(), EPS);
assertEquals(48, dtrace.traceNr);
assertEquals("B57", dtrace.traceRef);
dtrace = backgrounds.get(0);
trace = dtrace.trace;
assertEquals(1+1, trace.getFirst().getTime(), EPS);
assertEquals(1.643133821, trace.getFirst().getValue(), EPS);
assertEquals(49, dtrace.traceNr);
assertEquals("B2", dtrace.traceRef);
}
@Test
public void importExcelRowDataFromFile() throws Exception {
Path file = getTestDataFile("data_in_rows.xlsx");
DataTableImportParameters parameters = getCSVTableInRowsParameters("data_in_rows.csv");
parameters.importFormat = ImportFormat.EXCEL_TABLE;
DataBundle boundle = instance.importTimeSeries(file, parameters);
assertNotNull(boundle);
List<DataTrace> data = boundle.data;
assertEquals(64,data.size());
assertEquals("WT LHY",data.get(0).details.dataLabel);
assertEquals("WT TOC1",data.get(63).details.dataLabel);
TimeSeries trace = data.get(63).trace;
assertEquals(1+1, trace.getFirst().getTime(), EPS);
assertEquals(0.201330533, trace.getFirst().getValue(), EPS);
assertEquals(1+159, trace.getLast().getTime(), EPS);
assertEquals(0.553965719, trace.getLast().getValue(), EPS);
assertEquals(1, data.get(0).traceNr);
assertEquals(64, data.get(63).traceNr);
DataTrace dtrace = data.get(0);
assertEquals("B2", dtrace.traceFullRef);
assertEquals("B2", dtrace.traceRef);
dtrace = data.get(63);
assertEquals("B65", dtrace.traceFullRef);
assertEquals("B65", dtrace.traceRef);
}
@Test
public void importsLabelledCSVRowDataFromFile() throws Exception {
Path file = getTestDataFile("data_in_rows.csv");
DataTableImportParameters parameters = getCSVTableInRowsParameters("data_in_rows.csv");
parameters.importLabels = false;
parameters.userLabels = Arrays.asList(null, null, "L1","L2", null, "L3", null, null);
DataBundle boundle = instance.importTimeSeries(file, parameters);
assertNotNull(boundle);
List<DataTrace> data = boundle.data;
assertEquals(3,data.size());
assertEquals("L1",data.get(0).details.dataLabel);
assertEquals("L2",data.get(1).details.dataLabel);
assertEquals("L3",data.get(2).details.dataLabel);
TimeSeries trace = data.get(2).trace;
assertEquals(1+1, trace.getFirst().getTime(), EPS);
assertEquals(1.426291469, trace.getFirst().getValue(), EPS);
assertEquals(1+159, trace.getLast().getTime(), EPS);
assertEquals(1.799394662, trace.getLast().getValue(), EPS);
assertEquals(1, data.get(0).traceNr);
assertEquals(3, data.get(2).traceNr);
DataTrace dtrace = data.get(0);
assertEquals("B3", dtrace.traceFullRef);
assertEquals("B3", dtrace.traceRef);
dtrace = data.get(2);
assertEquals("B6", dtrace.traceFullRef);
assertEquals("B6", dtrace.traceRef);
}
@Test
public void importsLabelledExcelRowDataFromFile() throws Exception {
Path file = getTestDataFile("data_in_rows.xlsx");
DataTableImportParameters parameters = getCSVTableInRowsParameters("data_in_rows.csv");
parameters.importFormat = ImportFormat.EXCEL_TABLE;
parameters.importLabels = false;
parameters.userLabels = Arrays.asList(null, null, "L1","L2", null, "L3", null, null);
DataBundle boundle = instance.importTimeSeries(file, parameters);
assertNotNull(boundle);
List<DataTrace> data = boundle.data;
assertEquals(3,data.size());
assertEquals("L1",data.get(0).details.dataLabel);
assertEquals("L2",data.get(1).details.dataLabel);
assertEquals("L3",data.get(2).details.dataLabel);
TimeSeries trace = data.get(2).trace;
assertEquals(1+1, trace.getFirst().getTime(), EPS);
assertEquals(1.426291469, trace.getFirst().getValue(), EPS);
assertEquals(1+159, trace.getLast().getTime(), EPS);
assertEquals(1.799394662, trace.getLast().getValue(), EPS);
assertEquals(1, data.get(0).traceNr);
assertEquals(3, data.get(2).traceNr);
DataTrace dtrace = data.get(0);
assertEquals("B3", dtrace.traceFullRef);
assertEquals("B3", dtrace.traceRef);
dtrace = data.get(2);
assertEquals("B6", dtrace.traceFullRef);
assertEquals("B6", dtrace.traceRef);
}
public static DataTableImportParameters getCSVTableInColsParameters(String fileName) {
DataTableImportParameters parameters = new DataTableImportParameters();
parameters.fileName = fileName;
parameters.fileId = parameters.fileName;
parameters.importFormat = ImportFormat.COMA_SEP;
parameters.inRows = false;
parameters.firstTimeCell = new CellCoordinates(0, 1);
parameters.timeType = TimeType.TIME_IN_HOURS;
parameters.timeOffset = 1;
parameters.imgInterval = 0;
parameters.dataStart = new CellCoordinates(1,-1);
parameters.importLabels = true;
parameters.labelsSelection = new CellCoordinates(-1, 0);
return parameters;
}
@Test
public void importCSVColDataFromFile() throws Exception {
Path file = getTestDataFile("data_in_cols.csv");
DataTableImportParameters parameters = getCSVTableInColsParameters("data_in_cols.csv");
DataBundle boundle = instance.importTimeSeries(file, parameters);
assertNotNull(boundle);
List<DataTrace> data = boundle.data;
assertEquals(64,data.size());
assertEquals("WT LHY",data.get(0).details.dataLabel);
assertEquals("WT TOC1",data.get(63).details.dataLabel);
TimeSeries trace = data.get(63).trace;
assertEquals(1+1, trace.getFirst().getTime(), EPS);
assertEquals(0.201330533, trace.getFirst().getValue(), EPS);
assertEquals(1+159, trace.getLast().getTime(), EPS);
assertEquals(0.553965719, trace.getLast().getValue(), EPS);
trace = data.get(0).trace;
assertEquals(1+1, trace.getFirst().getTime(), EPS);
assertEquals(1.643133821, trace.getFirst().getValue(), EPS);
assertEquals(1+159, trace.getLast().getTime(), EPS);
assertEquals(0.859250886, trace.getLast().getValue(), EPS);
assertEquals(1, data.get(0).traceNr);
assertEquals(64, data.get(63).traceNr);
DataTrace dtrace = data.get(0);
assertEquals("B2", dtrace.traceFullRef);
assertEquals("B2", dtrace.traceRef);
dtrace = data.get(63);
assertEquals("BM2", dtrace.traceFullRef);
assertEquals("BM2", dtrace.traceRef);
}
@Test
public void importExcelColDataFromFile() throws Exception {
Path file = getTestDataFile("data_in_cols.xlsx");
DataTableImportParameters parameters = getCSVTableInColsParameters("data_in_cols.csv");
parameters.importFormat = ImportFormat.EXCEL_TABLE;
DataBundle boundle = instance.importTimeSeries(file, parameters);
assertNotNull(boundle);
List<DataTrace> data = boundle.data;
assertEquals(64,data.size());
assertEquals("WT LHY",data.get(0).details.dataLabel);
assertEquals("WT TOC1",data.get(63).details.dataLabel);
TimeSeries trace = data.get(63).trace;
assertEquals(1+1, trace.getFirst().getTime(), EPS);
assertEquals(0.201330533, trace.getFirst().getValue(), EPS);
assertEquals(1+159, trace.getLast().getTime(), EPS);
assertEquals(0.553965719, trace.getLast().getValue(), EPS);
trace = data.get(0).trace;
assertEquals(1+1, trace.getFirst().getTime(), EPS);
assertEquals(1.643133821, trace.getFirst().getValue(), EPS);
assertEquals(1+159, trace.getLast().getTime(), EPS);
assertEquals(0.859250886, trace.getLast().getValue(), EPS);
assertEquals(1, data.get(0).traceNr);
assertEquals(64, data.get(63).traceNr);
DataTrace dtrace = data.get(0);
assertEquals("B2", dtrace.traceFullRef);
assertEquals("B2", dtrace.traceRef);
dtrace = data.get(63);
assertEquals("BM2", dtrace.traceFullRef);
assertEquals("BM2", dtrace.traceRef);
}
@Test
@Ignore("The test file is not committed")
public void importCSVColDataFromMediumLargeFile() throws Exception {
Path file = Paths.get("E:\\Temp\\long_5000x1200.csv");
DataTableImportParameters parameters = getCSVTableInColsParameters("long_5000x1200.csv");
DataBundle boundle = instance.importTimeSeries(file, parameters);
assertNotNull(boundle);
List<DataTrace> data = boundle.data;
assertEquals(5000,data.size());
}
@Test
public void markBackgroundsSetTheCellRoleOnMatchingLabels() {
DataTableImportParameters parameters = new DataTableImportParameters();
parameters.containsBackgrounds = false;
parameters.backgroundsLabels = List.of("noise"," Back ");
List<DataTrace> traces = new ArrayList<>();
DataTrace trace = new DataTrace();
trace.details = new DataColumnProperties("toc1");
trace.role = CellRole.DATA;
traces.add(trace);
trace = new DataTrace();
trace.details = new DataColumnProperties("noise");
trace.role = CellRole.DATA;
traces.add(trace);
trace = new DataTrace();
trace.details = new DataColumnProperties("Noise");
trace.role = CellRole.DATA;
traces.add(trace);
trace = new DataTrace();
trace.details = new DataColumnProperties("Back");
trace.role = CellRole.DATA;
traces.add(trace);
instance.markBackgrounds(traces, parameters);
assertEquals(List.of(CellRole.DATA, CellRole.DATA, CellRole.DATA, CellRole.DATA),
traces.stream().map( t -> t.role).collect(Collectors.toList()));
parameters.containsBackgrounds = true;
instance.markBackgrounds(traces, parameters);
assertEquals(List.of(CellRole.DATA, CellRole.BACKGROUND, CellRole.DATA, CellRole.BACKGROUND),
traces.stream().map( t -> t.role).collect(Collectors.toList()));
}
}
|
<reponame>Godlike/Epona<gh_stars>0
/*
* Copyright (C) 2019 by Godlike
* This code is licensed under the MIT license (MIT)
* (http://opensource.org/licenses/MIT)
*/
int main()
{
return 0;
}
|
import { createSlice, Dispatch, PayloadAction } from '@reduxjs/toolkit';
export enum NotificationLevel {
Info = 'info',
Error = 'error',
Update = 'update',
}
let nextId = 0;
interface NotificationBase {
id: number;
}
interface UpdateNotification extends NotificationBase {
level: NotificationLevel.Update;
version: string;
}
export interface Notification extends NotificationBase {
level: Exclude<NotificationLevel, NotificationLevel.Update>;
message: string;
}
interface NotificationSliceState {
notifications: (UpdateNotification | Notification)[];
}
const { actions, reducer } = createSlice({
name: 'notification',
initialState: {
notifications: [],
} as NotificationSliceState,
reducers: {
pushNotification(state, action: PayloadAction<Notification>) {
state.notifications.push({ ...action.payload });
},
triggerUpdateNotification(state, action: PayloadAction<string>) {
state.notifications.push({
level: NotificationLevel.Update,
id: nextId,
version: action.payload,
});
nextId += 1;
},
removeNotification(state, action: PayloadAction<number>) {
state.notifications = state.notifications.filter(
(n) => n.id !== action.payload
);
},
},
});
export default reducer;
export const { triggerUpdateNotification, removeNotification } = actions;
/**
* Display notifications of different types.
*
* @example
* const dispatch = useDispatch();
* pushNotification(dispatch, { NotificationLevel.Info, message: 'Informative message' }, 5);
*
* @returns function to close the notification programatically.
*/
export function pushNotification(
dispatch: Dispatch,
notification: Omit<Notification, 'id'>,
autoRemoveSeconds?: number
) {
const id = nextId;
dispatch(actions.pushNotification({ ...notification, id }));
nextId += 1;
const close = () => dispatch(removeNotification(id));
if (autoRemoveSeconds) {
setTimeout(close, autoRemoveSeconds * 1000);
}
return close;
}
|
SELECT name, total_sales
FROM customers
ORDER BY total_sales DESC
LIMIT 1; |
#!/bin/sh
set -e
# 존재하지 않는 명령어
thisisnotcommand
echo "done" |
<reponame>anticipasean/girakkafunc
package cyclops.pure.arrow;
import cyclops.function.higherkinded.Higher;
import cyclops.function.combiner.Monoid;
public interface MonoidK<W> extends SemigroupK<W> {
<T> Higher<W, T> zero();
default <T> Monoid<Higher<W, T>> asMonoid() {
return Monoid.of(zero(),
(a, b) -> this.apply(a,
b));
}
}
|
import React, { Component } from 'react'
import Modal from 'components/modal'
class AttestationSuccess extends Component {
render() {
const { open, message, handleToggle } = this.props
return (
<Modal isOpen={open} data-modal="attestationSuccess">
<div className="image-container">
<img
src="images/circular-check-button.svg"
role="presentation"
/>
</div>
<h2 className="success-message">{message}</h2>
<div className="reminder">Don't forget to publish your changes.</div>
<div className="explanation">Publishing to the blockchain lets other users know that you have a verified profile.</div>
<div className="button-container">
<button
data-modal="attestationSuccess"
className="btn btn-clear"
onClick={handleToggle}
>
Continue
</button>
</div>
</Modal>
)
}
}
export default AttestationSuccess
|
<gh_stars>0
const Cat = function (name, src, cnt) {
let Name = name;
let Src = src;
let Count = cnt;
return {
getName: () => { return Name },
getSrc: () => { return Src },
getCount: () => { return Count },
setName: (name) => { Name = name },
setSrc: (src) => { Src = src },
setCount: (count) => { Count = count },
increment: function () { Count++ },
equals: function(cat) {
if(cat.getName() === Name && cat.getCount() === Count && cat.getSrc() === Src) {
return true;
}
else {
return false;
}
}
}
}
module.exports = { Cat };
|
# Define function to convert miles to kilometers
def miles_to_km(miles):
# 1 kilometer = 0.621371 miles
km = round(miles * 0.621371, 2)
return km
# convert miles to kilometers
miles = 5
kilometers = miles_to_km(miles)
# Print the result
print("Distance in miles:", miles)
print("Distance in kilometers:", kilometers, "km") |
#!/usr/bin/env bash
mvn clean install -Pquick -Pmulti-jvm-example-rest-data-service -pl :multi-jvm-example-rest-data-service -Dstandalone-path=data -Dstandalone-port=8084 $@
|
class ChangeUsergamesToUserGames < ActiveRecord::Migration[5.2]
def change
rename_table :usergames, :user_games
end
end
|
<reponame>concefly/awesome-helper
import { TreeHelper } from 'ah-tree-helper';
import { convertXml2List } from './util';
import { XmlElementNode, XmlNode, XmlTextNode, XmlCDataNode } from './XmlNode';
function convertInput2NodeList(input: string) {
const eleTypeMapper = new Map<string, typeof XmlNode>([
['text', XmlTextNode],
['element', XmlElementNode],
['cdata', XmlCDataNode],
]);
return convertXml2List(input).map(ele => {
const EleType = eleTypeMapper.get(ele.type!) || XmlNode;
return new EleType(ele.id, ele.parentId, { xmlElement: ele });
});
}
export class XmlHelper extends TreeHelper<XmlNode> {
constructor(readonly raw: string) {
super(convertInput2NodeList(raw));
// 设置 tree
this.list.forEach(t => t.setTree(this));
}
query(selector: string) {
let result: XmlNode[] = [];
this.findAllRoot().forEach(root => {
result = result.concat(root.query(selector));
});
return result;
}
}
|
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
BCSYMBOLMAP_DIR="BCSymbolMaps"
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
if [ -d "${source}/${BCSYMBOLMAP_DIR}" ]; then
# Locate and install any .bcsymbolmaps if present, and remove them from the .framework before the framework is copied
find "${source}/${BCSYMBOLMAP_DIR}" -name "*.bcsymbolmap"|while read f; do
echo "Installing $f"
install_bcsymbolmap "$f" "$destination"
rm "$f"
done
rmdir "${source}/${BCSYMBOLMAP_DIR}"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
elif [ -L "${binary}" ]; then
echo "Destination binary is symlinked..."
dirname="$(dirname "${binary}")"
binary="${dirname}/$(readlink "${binary}")"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u)
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
warn_missing_arch=${2:-true}
if [ -r "$source" ]; then
# Copy the dSYM into the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .dSYM "$source")"
binary_name="$(ls "$source/Contents/Resources/DWARF")"
binary="${DERIVED_FILES_DIR}/${basename}.dSYM/Contents/Resources/DWARF/${binary_name}"
# Strip invalid architectures from the dSYM.
if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then
strip_invalid_archs "$binary" "$warn_missing_arch"
fi
if [[ $STRIP_BINARY_RETVAL == 0 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
mkdir -p "${DWARF_DSYM_FOLDER_PATH}"
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.dSYM"
fi
fi
}
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
warn_missing_arch=${2:-true}
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
if [[ "$warn_missing_arch" == "true" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
fi
STRIP_BINARY_RETVAL=1
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary"
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=0
}
# Copies the bcsymbolmap files of a vendored framework
install_bcsymbolmap() {
local bcsymbolmap_path="$1"
local destination="${BUILT_PRODUCTS_DIR}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}"
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identity
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/RSDoneKeyboard/RSDoneKeyboard.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/RSDoneKeyboard/RSDoneKeyboard.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
def top_k_elements(arr, k):
arr.sort()
return arr[-k:] |
<html>
<head>
<title>Countdown Timer</title>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
</head>
<body>
<h1>Countdown Timer</h1>
<div>
Hours: <input type="text" id="hour">
Minutes: <input type="text" id="minute">
<button id="startTimer">Start Timer</button>
<div id="timer">
<span id="hours">00</span>:<span id="minutes">00</span>:<span id="seconds">00</span>
</div>
</div>
<script>
// Set variables
let h, m, s, interval;
// Function that starts the timer
$('#startTimer').click(function() {
// Get the hours and minutes from the input
h = parseInt($('#hour').val())
m = parseInt($('#minute').val())
// Set seconds to 0
s = 0
// Start the timer using setInterval
interval = setInterval(function() {
// Increment seconds
s += 1
// When seconds reach 60, increment minutes
if (s == 60) {
s = 0
m += 1
// When minutes reach 60, increment hours
if (m == 60) {
m = 0
h += 1
}
}
// Show hours, minutes and seconds in div
$('#hours').text(h.toString().padStart(2, "0"));
$('#minutes').text(m.toString().padStart(2, "0"));
$('#seconds').text(s.toString().padStart(2, "0"));
// When the timer has reached 0, stop the timer
if (h == 0 && m == 0 && s == 0) {
clearInterval(interval);
}
}, 1000);
});
</script>
</body>
</html> |
#include <iostream>
#include <string>
#include <windows.h>
// Function to generate the HWID based on the computer's hardware configuration
std::string GenerateHWID() {
std::string hwid;
// Implement code to gather hardware information and generate a unique identifier
// Example: Using CPUID, MAC address, hard drive serial number, etc. to create a unique string
// For demonstration purposes, let's assume a simple concatenation of CPUID and MAC address
// Replace this with actual hardware information retrieval and unique identifier generation
hwid = "CPUID-MACADDRESS";
return hwid;
}
// Function to verify the HWID against a predefined value
bool VerifyHWID(const std::string& generatedHWID, const std::string& predefinedHWID) {
// Perform a secure comparison to prevent timing attacks
// Use constant-time comparison to avoid timing-based attacks
return generatedHWID.size() == predefinedHWID.size() &&
std::equal(generatedHWID.begin(), generatedHWID.end(), predefinedHWID.begin());
}
int main() {
std::string predefinedHWID = "SF Heavy Equipment-1284220259";
std::string generatedHWID = GenerateHWID();
if (VerifyHWID(generatedHWID, predefinedHWID)) {
std::cout << "HWID verification successful. Access granted." << std::endl;
// Proceed with authorized actions
} else {
std::cout << "HWID verification failed. Access denied." << std::endl;
// Prevent unauthorized access
}
return 0;
} |
<gh_stars>0
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
try:
from modules.basic import *
from modules.line import *
except ImportError:
from star_tracker.modules.basic import *
from star_tracker.modules.line import *
class Canvas3D(FigureCanvas):
def __init__(self,figure=None):
super().__init__(figure=figure)
self.__config()
self.stars = {'x':[],'y':[],'z':[], 'v':[]}
self._graticule_plots = []
self._stars_plots = []
self._camera_position = []
self._graticule_color = "yellow"
self._camera_linewidth = 1
def __config(self):
color = 'white'
self.axes = self.figure.add_subplot(projection = '3d')
self.axes.set_title("Star Catalog 3D",{'color': color})
self.axes.scatter3D(0, 0, 0, s = 1,color = "red")
self.axes.set_facecolor("black")
self.figure.set_facecolor("black")
self.axes.grid(False)
self.axes.w_xaxis.pane.fill = False
self.axes.w_yaxis.pane.fill = False
self.axes.w_zaxis.pane.fill = False
self.axes.tick_params(axis='x', colors=color)
self.axes.tick_params(axis='y', colors=color)
self.axes.tick_params(axis='z', colors=color)
self.axes.yaxis.label.set_color(color)
self.axes.xaxis.label.set_color(color)
self.axes.zaxis.label.set_color(color)
self.axes.set_xlim(-1, 1)
self.axes.set_ylim(-1, 1)
self.axes.set_zlim(-1, 1)
self.axes.set_xlabel('X axis')
self.axes.set_ylabel('Y axis')
self.axes.set_zlabel('Z axis')
def showGraticule(self, show=False):
while self._graticule_plots:
self._graticule_plots[0].pop(0).remove()
del self._graticule_plots[0]
if not show:
return
ar, dec = [], []
for j in range(0,360,60):
for i in range(-90,91, 5):
ar.append(deg2rad(j))
dec.append(deg2rad(i))
for i in range(90,-91, -5):
ar.append(deg2rad(j+30))
dec.append(deg2rad(i))
x,y,z=spherical2catersian(ar, dec)
self._graticule_plots.append(self.axes.plot(x,y,z, color=self._graticule_color, linewidth = 0.4))
for i in range(-60, 90, 30):
ar, dec = [], []
for j in range(0, 361, 5):
ar.append(deg2rad(j))
dec.append(deg2rad(i))
x,y,z=spherical2catersian(ar, dec)
self._graticule_plots.append(self.axes.plot(x,y,z, color=self._graticule_color, linewidth = 0.4))
@property
def stars(self):
return self._stars
@stars.setter
def stars(self, value):
self._stars = value
def show_stars(self, show=False):
value = self.stars
while self._stars_plots:
self._stars_plots[0].remove()
del self._stars_plots[0]
if not show:
return
self._stars_plots.append(self.axes.scatter3D(self.stars['x'], self.stars['y'], self.stars['z'], s = self.stars['v'], color = "white"))
@property
def camera(self):
return self._camera
@camera.setter
def camera(self, value):
self._camera = value
def show_camera(self, show = False):
camera_3d = self._camera['3D']
camera_pos = self._camera['3D_pos']
while self._camera_position:
try:
self._camera_position[0].pop(0).remove()
except :
self._camera_position[0].remove()
del self._camera_position[0]
if not show:
return
self._camera_position.append(self.axes.plot( [0,camera_3d['x'][0]], [0,camera_3d['y'][0]], [0,camera_3d['z'][0]], color = 'g', linewidth =self._camera_linewidth))
self._camera_position.append(self.axes.plot( [0,camera_3d['x'][1]], [0,camera_3d['y'][1]], [0,camera_3d['z'][1]], color = 'g', linewidth =self._camera_linewidth))
self._camera_position.append(self.axes.plot( [0,camera_3d['x'][2]], [0,camera_3d['y'][2]], [0,camera_3d['z'][2]], color = 'g', linewidth =self._camera_linewidth))
self._camera_position.append(self.axes.plot( [0,camera_3d['x'][3]], [0,camera_3d['y'][3]], [0,camera_3d['z'][3]], color = 'g', linewidth =self._camera_linewidth))
self._camera_position.append(self.axes.scatter3D( camera_3d['x'][0], camera_3d['y'][0], camera_3d['z'][0], color='red'))
self._camera_position.append(self.axes.scatter3D( camera_3d['x'][1], camera_3d['y'][1], camera_3d['z'][1], color='green'))
self._camera_position.append(self.axes.scatter3D( camera_3d['x'][2], camera_3d['y'][2], camera_3d['z'][2], color='orange'))
self._camera_position.append(self.axes.scatter3D( camera_3d['x'][3], camera_3d['y'][3], camera_3d['z'][3], color='yellow'))
self._camera_position.append(self.axes.plot( camera_3d['x'][0:2], camera_3d['y'][0:2], camera_3d['z'][0:2], color = 'orange', linewidth =self._camera_linewidth))
self._camera_position.append(self.axes.plot( camera_3d['x'][1:3], camera_3d['y'][1:3], camera_3d['z'][1:3], color = 'y', linewidth =self._camera_linewidth))
self._camera_position.append(self.axes.plot( [camera_3d['x'][0],camera_3d['x'][3]], [camera_3d['y'][0],camera_3d['y'][3]], [camera_3d['z'][0],camera_3d['z'][3]], color = 'green', linewidth =self._camera_linewidth))
self._camera_position.append(self.axes.plot( [camera_3d['x'][2],camera_3d['x'][3]], [camera_3d['y'][2],camera_3d['y'][3]], [camera_3d['z'][2],camera_3d['z'][3]], color = 'r', linewidth = self._camera_linewidth))
z = camera_pos['z']
x = camera_pos['x']
y = camera_pos['y']
self._camera_position.append(self.axes.quiver(0, 0, 0, z[0], z[1], z[2], color="blue"))
self._camera_position.append(self.axes.quiver(0, 0, 0, x[0], x[1], x[2], color="red"))
self._camera_position.append(self.axes.quiver(0, 0, 0, y[0], y[1], y[2], color="green")) |
import { css, createGlobalStyle } from 'styled-components';
import { makeRgba } from '../../../../packages/helpers';
import './fontiran.css';
import './icons.css';
import './sanitize.css';
const additional = css`
html {
font-size: 62%;
font-family: ${props => props.theme.defaultFont};
* {
font-family: inherit;
outline: none;
}
.circle {
border-radius: 50%;
}
}
body {
padding: 0 !important;
margin: 0 !important;
background-color: ${props => props.theme.colors.gray.bright};
a {
text-decoration: none;
}
}
.section {
border-radius: calc(${props => props.theme.defaultRem} * 0.5);
background-color: ${props => props.theme.colors.white};
border: calc(${props => props.theme.defaultRem} * 0.1) solid
${props => makeRgba(0.25, props.theme.colors.gray.light)};
padding: calc(${props => props.theme.defaultRem} * 1.6);
margin: calc(${props => props.theme.defaultRem} * 1.6) auto;
&.disabled:after {
content: ' ';
z-index: 99999;
padding: calc(${props => props.theme.defaultRem} * 1.6);
background: ${props => makeRgba(0.2, props.theme.colors.white)};
width: 100%;
height: 100%;
display: block;
position: absolute;
left: 0;
top: 0;
content: '';
z-index: 99999;
}
}
.full-width {
width: 100%;
}
.globalLists {
list-style-type: none;
padding-right: 0;
padding-left: 0;
a {
text-decoration: none;
color: ${props => props.theme.colors.gray.normal};
}
}
.no-select {
-webkit-touch-callout: none; /* iOS Safari */
-webkit-user-select: none; /* Safari */
-khtml-user-select: none; /* Konqueror HTML */
-moz-user-select: none; /* Old versions of Firefox */
-ms-user-select: none; /* Internet Explorer/Edge */
user-select: none; /* Non-prefixed version, currently
supported by Chrome, Opera and Firefox */
}
.no-effect-button {
border: none !important;
display: inherit;
cursor: pointer;
background: ${props => props.theme.colors.transparent};
}
.p-absolute {
position: absolute;
}
.b-none {
border: none !important;
}
.text-normal {
font-weight: normal !important;
}
.text-bold {
font-weight: bold !important;
}
.text-large {
font-size: calc(${props => props.theme.defaultRem} * 1.8) !important;
}
.text-medium {
font-size: calc(${props => props.theme.defaultRem} * 1.4) !important;
}
.text-small {
font-size: calc(${props => props.theme.defaultRem} * 1.2) !important;
}
`;
const borderBox = css`
${[...Array(5).keys()].map(
i =>
`
.br-${i} {
border-radius: ${i / 10}rem !important;
}
.p-${i} {
padding: ${i}rem !important;
}
.pt-${i} {
padding-top: ${i}rem !important;
}
.pr-${i} {
padding-right: ${i}rem !important;
}
.pb-${i} {
padding-bottom: ${i}rem !important;
}
.pl-${i} {
padding-left: ${i}rem !important;
}
.m-${i} {
margin: ${i}rem !important;
}
.mt-${i} {
margin-top: ${i}rem !important;
}
.mt-auto {
margin-top: auto !important;
}
.mr-${i} {
margin-right: ${i}rem !important;
}
.mr-auto {
margin-right: auto !important;
}
.mb-${i} {
margin-bottom: ${i}rem !important;
}
.mb-auto {
margin-bottom: auto !important;
}
.ml-${i} {
margin-left: ${i}rem !important;
}
.ml-auto {
margin-left: auto !important;
}
.-m-${i} {
margin: -${i}rem !important;
}
.-mt-${i} {
margin-top: -${i}rem !important;
}
.-mr-${i} {
margin-right: -${i}rem !important;
}
.-mb-${i} {
margin-bottom: -${i}rem !important;
}
.-ml-${i} {
margin-left: -${i}rem !important;
}
`,
)}
`;
const display = css`
.d-block {
display: block;
}
.d-inline {
display: inline;
}
.d-inline-block {
display: inline-block;
}
.d-flex {
display: flex;
}
`;
const textAlign = `
.dir-rtl {
direction: rtl !important;
}
.dir-ltr {
direction: ltr !important;
}
.text-center {
text-align: center !important;
}
.text-right {
text-align: right !important;
}
.text-left {
text-align: left !important;
}
.text-justify {
text-align: justify !important;
}
.pull-left {
float: left;
}
.pull-right {
float: right;
}
`;
const flex = css`
.align-start {
display: flex;
align-items: flex-start;
}
.align-center {
display: flex;
align-items: center;
}
.align-end {
display: flex;
align-items: flex-end;
}
.justify-start {
display: flex;
justify-content: flex-start;
}
.justify-center {
display: flex;
justify-content: center;
}
.justify-end {
display: flex;
justify-content: flex-end;
}
.justify-between {
display: flex;
justify-content: space-between;
}
.flex-row {
display: flex;
flex-direction: row;
}
.flex-column {
display: flex;
flex-direction: column !important;
}
`;
/* eslint-disable prettier/prettier */
const colorful = css`
${props =>
Object.keys(props.theme.colors).map(color =>
typeof props.theme.colors[color] === 'string'
? `
.text-${color}{
color: ${props.theme.colors[color]} !important
}
.bg-${color}{
background-color: ${props.theme.colors[color]} !important
}
`
: Object.keys(props.theme.colors[color]).map(
shade => `
.text-${color}-${shade}{
color: ${props.theme.colors[color][shade]};
}
.bg-${color}-${shade}{
background-color: ${props.theme.colors[color][shade]};
}
`,
),
)}
`;
/* eslint-enable prettier/prettier */
const rotation = css`
.rotate-45 {
transform: rotate(45deg);
}
.rotate-90 {
transform: rotate(90deg);
}
.rotate-180 {
transform: rotate(180deg);
}
.spin,
.icon-spinner {
animation-name: spin;
animation-duration: 800ms;
animation-iteration-count: infinite;
animation-timing-function: linear;
}
@keyframes spin {
from {
transform: rotate(0deg);
}
to {
transform: rotate(360deg);
}
}
`;
/* eslint-disable indent */
const toasts = css`
.iziToast {
font-family: ${props => props.theme.defaultFont};
height: calc(${props => props.theme.defaultRem} * 8);
min-width: calc(${props => props.theme.defaultRem} * 31);
border-radius: calc(${props => props.theme.defaultRem} * 0.5); !important;
box-shadow: 0 0 calc(${props =>
props.theme.defaultRem} * 0.6) 0 rgba(0, 0, 0, 0.16) !important;
border: solid calc(${props => props.theme.defaultRem} * 0.1) ${props =>
props.theme.colors.gray['ultra-light']} !important;
.iziToast-close {
background-size: calc(${props => props.theme.defaultRem} * 1.4);
}
.iziToast-body {
height: 100% !important;
padding-right: calc(${props => props.theme.defaultRem} * 5) !important;
}
.iziToast-texts {
height: 100%;
display: flex !important;
align-items: center !important;
margin-top: calc(${props => props.theme.defaultRem} * 0.5) !important;
}
.iziToast-progressbar > div {
height: calc(${props => props.theme.defaultRem} * 0.4);
background: ${props => makeRgba(0.17, props.theme.colors.black)};
border-radius: 0 !important;
}
.iziToast-icon {
border-radius: 50% !important;
margin-top: calc(${props => props.theme.defaultRem} * -1.6) !important;
width: calc(${props => props.theme.defaultRem} * 3.2) !important;
height: calc(${props => props.theme.defaultRem} * 3.2) !important;
}
.toast-description {
font-size: calc(${props => props.theme.defaultRem} * 1.2);
margin-top: calc(${props => props.theme.defaultRem} * 0.8);
}
}
.iziToast-rtl {
padding: ${props =>
`calc(${props.theme.defaultRem} * 0.8) 0 calc(${props.theme.defaultRem} * 0.9) calc(${props.theme.defaultRem} * 5.2)`} !important;
}
.iziToast.iziToast-color-red {
background: ${props => props.theme.colors.red.bright};
border-right: calc(${props =>
props.theme.defaultRem} * 0.6) solid ${props =>
props.theme.colors.red.normal} !important;
.iziToast-icon {
background-color: ${props => props.theme.colors.red.normal} !important;
}
}
.iziToast.iziToast-color-orange {
background: ${props => props.theme.colors.orange.bright};
border-right: calc(${props =>
props.theme.defaultRem} * 0.6) solid ${props =>
props.theme.colors.orange.normal} !important;
.iziToast-icon {
background-color: ${props => props.theme.colors.orange.normal} !important;
}
}
.iziToast.iziToast-color-blue {
background: ${props => props.theme.colors.blue.bright};
border-right: calc(${props =>
props.theme.defaultRem} * 0.6) solid ${props =>
props.theme.colors.blue.normal} !important;
.iziToast-icon {
background-color: ${props => props.theme.colors.blue.normal} !important;
}
}
.iziToast.iziToast-color-green {
background: ${props => props.theme.colors.green.bright};
border-right: calc(${props =>
props.theme.defaultRem} * 0.6) solid ${props =>
props.theme.colors.green.normal} !important;
.iziToast-icon {
background-color: ${props => props.theme.colors.green.normal} !important;
}
}
`;
/* eslint-enable indent */
const GlobalStyles = createGlobalStyle`
${additional}
${borderBox}
${display}
${textAlign}
${flex}
${colorful}
${rotation}
${toasts}
`;
export default GlobalStyles;
|
package io.rapidpro.surveyor.net;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import io.rapidpro.flows.utils.JsonUtils;
import io.rapidpro.flows.utils.Jsonizable;
public class FlowDefinition implements Jsonizable {
public String base_language;
public JsonArray action_sets;
public JsonArray rule_sets;
public String version;
public String flow_type;
public String entry;
public Metadata metadata;
public static class Metadata {
public int revision;
public String name;
public String contact_creation;
public String uuid;
}
@Override
public JsonElement toJson() {
return JsonUtils.object(
"base_language", base_language,
"action_sets", action_sets,
"rule_sets", rule_sets,
"version", version,
"flow_type", flow_type,
"entry", entry,
"metadata", JsonUtils.object(
"uuid", metadata.uuid,
"revision", metadata.revision,
"name", metadata.name,
"contact_creation", metadata.contact_creation)
);
}
public String toString() {
return toJson().toString();
}
}
|
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2497-1
#
# Security announcement date: 2015-02-09 00:00:00 UTC
# Script generation date: 2017-01-01 21:04:14 UTC
#
# Operating System: Ubuntu 14.04 LTS
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - ntp:1:4.2.6.p5+dfsg-3ubuntu2.14.04.2
#
# Last versions recommanded by security team:
# - ntp:1:4.2.6.p5+dfsg-3ubuntu2.14.04.10
#
# CVE List:
# - CVE-2014-9297
# - CVE-2014-9298
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade ntp=1:4.2.6.p5+dfsg-3ubuntu2.14.04.10 -y
|
export * from './border-tokens';
export * from './color-tokens';
export * from './text-tokens';
|
import numpy as np
from scipy.signal import find_peaks
from .speech_feature_extraction import Extractor
class CustomExtractor(Extractor):
def extract_formants(self, audio_signal):
# Perform formant extraction using signal processing techniques
# Assume audio_signal is a 1D array representing the audio signal
# Perform formant estimation using LPC analysis or other techniques
# For demonstration purposes, let's assume a simple peak finding approach
formants, _ = find_peaks(np.abs(np.fft.fft(audio_signal)), height=1000, distance=1000)
# Select the first three formant frequencies
first_three_formants = formants[:3]
return first_three_formants.tolist() |
# beep ornegi
# 5 numaralı uca baglı hoparlörden BEEP sesi yapacak
from machine import Pin, PWM
import utime
BuzzerPin=5
def beep(frekans, time):
beeper = PWM(Pin(BuzzerPin,Pin.OUT), freq=frekans, duty=512)
utime.sleep(time)
beeper.deinit()
beep(1000,0.1)
beep(800,0.1)
beep(600,0.1)
# sozluk tipi degisken
notalar= { 'c' : 262, 'd' : 294, 'e' : 330, 'f': 349, 'g': 392, 'a':440, 'b':494, 'C':523, ' ':0}
print(notalar)
melodi = ' cdefgabC'
for nota in melodi:
beep(notalar[nota],0.1) |
/***********************************************************************************************************************
* OpenStudio(R), Copyright (c) 2008-2021, Alliance for Sustainable Energy, LLC, and other contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
* following conditions are met:
*
* (1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following
* disclaimer.
*
* (2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided with the distribution.
*
* (3) Neither the name of the copyright holder nor the names of any contributors may be used to endorse or promote products
* derived from this software without specific prior written permission from the respective party.
*
* (4) Other than as required in clauses (1) and (2), distributions in any form of modifications or other derivative works
* may not use the "OpenStudio" trademark, "OS", "os", or any other confusingly similar designation without specific prior
* written permission from Alliance for Sustainable Energy, LLC.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND ANY CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S), ANY CONTRIBUTORS, THE UNITED STATES GOVERNMENT, OR THE UNITED
* STATES DEPARTMENT OF ENERGY, NOR ANY OF THEIR EMPLOYEES, BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***********************************************************************************************************************/
#ifndef MODEL_DESIGNSPECIFICATIONOUTDOORAIR_HPP
#define MODEL_DESIGNSPECIFICATIONOUTDOORAIR_HPP
#include "ModelAPI.hpp"
#include "ResourceObject.hpp"
namespace openstudio {
namespace model {
class Schedule;
namespace detail {
class DesignSpecificationOutdoorAir_Impl;
} // namespace detail
/** DesignSpecificationOutdoorAir is a ModelObject that wraps the OpenStudio IDD
* object 'OS:DesignSpecification:OutdoorAir'. */
class MODEL_API DesignSpecificationOutdoorAir : public ResourceObject
{
public:
/** @name Constructors and Destructors */
//@{
explicit DesignSpecificationOutdoorAir(const Model& model);
virtual ~DesignSpecificationOutdoorAir() {}
//@}
static IddObjectType iddObjectType();
static std::vector<std::string> outdoorAirMethodValues();
/** \deprecated */
static std::vector<std::string> validOutdoorAirMethodValues();
/** @name Getters */
//@{
std::string outdoorAirMethod() const;
bool isOutdoorAirMethodDefaulted() const;
double outdoorAirFlowperPerson() const;
bool isOutdoorAirFlowperPersonDefaulted() const;
double outdoorAirFlowperFloorArea() const;
bool isOutdoorAirFlowperFloorAreaDefaulted() const;
double outdoorAirFlowRate() const;
bool isOutdoorAirFlowRateDefaulted() const;
double outdoorAirFlowAirChangesperHour() const;
bool isOutdoorAirFlowAirChangesperHourDefaulted() const;
/** In EnergyPlus 8.7.0 and above this field maps to the EnergyPlus field named "Outdoor Air Schedule Name" **/
boost::optional<Schedule> outdoorAirFlowRateFractionSchedule() const;
//@}
/** @name Setters */
//@{
bool setOutdoorAirMethod(const std::string& outdoorAirMethod);
void resetOutdoorAirMethod();
bool setOutdoorAirFlowperPerson(double outdoorAirFlowperPerson);
void resetOutdoorAirFlowperPerson();
bool setOutdoorAirFlowperFloorArea(double outdoorAirFlowperFloorArea);
void resetOutdoorAirFlowperFloorArea();
bool setOutdoorAirFlowRate(double outdoorAirFlowRate);
void resetOutdoorAirFlowRate();
bool setOutdoorAirFlowAirChangesperHour(double outdoorAirFlowAirChangesperHour);
void resetOutdoorAirFlowAirChangesperHour();
/** In EnergyPlus 8.7.0 and above this field maps to the EnergyPlus field named "Outdoor Air Schedule Name" **/
bool setOutdoorAirFlowRateFractionSchedule(Schedule& schedule);
void resetOutdoorAirFlowRateFractionSchedule();
//@}
protected:
/// @cond
typedef detail::DesignSpecificationOutdoorAir_Impl ImplType;
explicit DesignSpecificationOutdoorAir(std::shared_ptr<detail::DesignSpecificationOutdoorAir_Impl> impl);
friend class detail::DesignSpecificationOutdoorAir_Impl;
friend class Model;
friend class IdfObject;
friend class openstudio::detail::IdfObject_Impl;
/// @endcond
private:
REGISTER_LOGGER("openstudio.model.DesignSpecificationOutdoorAir");
};
/** \relates DesignSpecificationOutdoorAir*/
typedef boost::optional<DesignSpecificationOutdoorAir> OptionalDesignSpecificationOutdoorAir;
/** \relates DesignSpecificationOutdoorAir*/
typedef std::vector<DesignSpecificationOutdoorAir> DesignSpecificationOutdoorAirVector;
} // namespace model
} // namespace openstudio
#endif // MODEL_DESIGNSPECIFICATIONOUTDOORAIR_HPP
|
<filename>js/parseHtml.js
// 解析html字符串,将其转换为json格式,暂未加入容错处理
function parseHtml(html) {
let stack = [];
let i = 0;
while (i < html.length) {
if (html[i] === '<') {
let j = i;
while (j < html.length && html[j] !== '>') {
j++;
}
const sub = html.substring(i+1, j)
if (sub.startsWith('/')) { // 如果是后部分,</div>
let nodes = [];
while (stack.length > 0) {
const top = stack.pop();
if (typeof top === 'string') {
let item = {type: top, nodes: nodes}
stack.push(item);
break;
} else {
nodes.push(top);
}
}
} else { // 如果是前半部分, <div>
stack.push(sub);
}
i = j+1;
} else {
let j = i;
while (j < html.length && html[j] !== '<') {
j++;
}
const sub = html.substring(i, j)
let item = {type: 'text', text: sub}
i = j;
stack.push(item)
}
}
console.log(stack[0])
}
let html = '<div><span>21</span><span>2</span></div>'
parseHtml(html)
|
import React, { useState, useEffect, useRef } from 'react';
import cx from 'classnames';
import styles from './index.module.scss';
import { ICommand } from '../../types';
export default function CommandEditor({
command,
newCommand,
onSubmit,
onCancel,
fullWidth,
}: {
command?: ICommand;
newCommand?: boolean;
onSubmit: (command: ICommand) => void;
onCancel: () => void;
fullWidth?: boolean;
}) {
const input = useRef<HTMLInputElement>(null);
const [trigger, setTrigger] = useState(command?.trigger ?? '');
const [url, setUrl] = useState(command?.url ?? '');
useEffect(() => {
if (input.current) {
input.current.focus();
}
}, []);
function handleSubmit(e: React.FormEvent) {
e.preventDefault();
onSubmit({ trigger, url });
}
function handleCancel(e: React.MouseEvent) {
e.preventDefault();
onCancel();
}
const isSubmitDisabled = !trigger.trim() || !url.trim();
return (
<div
className={cx(
styles.wrapper,
newCommand && styles.new,
fullWidth && styles.fullWidth
)}
>
{newCommand && !command && <p>Create a new command:</p>}
{newCommand && command && (
<p>
Create a new command for <strong>{command.url}:</strong>
</p>
)}
<form className={styles.form} onSubmit={handleSubmit}>
<div className={styles.label}>
<label htmlFor="trigger">Trigger</label>
</div>
<input
ref={input}
id="trigger"
className={styles.input}
value={trigger}
onChange={e => setTrigger(e.currentTarget.value)}
placeholder={'"docs new", for example'}
></input>
<div className={styles.label}>
<label htmlFor="url">URL</label>
</div>
<input
id="url"
className={styles.input}
value={url}
onChange={e => setUrl(e.currentTarget.value)}
placeholder={'https://docs.google.com/document/u/0/create'}
></input>
<div className={styles.actions}>
{!(newCommand && command) && (
<a
href="#cancel"
className={styles.mutedLink}
onClick={handleCancel}
>
Cancel
</a>
)}
<div className="margin-v-sm" />
<input
className={styles.primaryButton}
type="submit"
value={newCommand ? 'Add' : 'Update'}
disabled={isSubmitDisabled}
/>
</div>
</form>
</div>
);
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package javax.el;
import org.junit.Assert;
import org.junit.Test;
import org.apache.jasper.el.ELContextImpl;
public class TestUtil {
@Test
public void testBug56425a() {
ELProcessor processor = new ELProcessor();
processor.defineBean("string", "a-b-c-d");
Assert.assertEquals("a_b_c_d", processor.eval("string.replace(\"-\",\"_\")"));
}
@Test
public void testBug56425b() {
ELProcessor processor = new ELProcessor();
processor.defineBean("string", "Not used. Any value is fine here");
Assert.assertEquals("5", processor.eval("string.valueOf(5)"));
}
private static class ELProcessor {
private final ExpressionFactory factory = ExpressionFactory.newInstance();
private final ELContext context = new ELContextImpl();
public void defineBean(String name, Object bean) {
ValueExpression varBean = factory.createValueExpression(bean, bean.getClass());
context.getVariableMapper().setVariable(name, varBean);
}
public Object eval(String expression) {
ValueExpression ve = factory.createValueExpression(context, "${" + expression + "}", Object.class);
return ve.getValue(context);
}
}
}
|
#!/bin/bash
dieharder -d 11 -g 61 -S 3927417979
|
#!/bin/bash
# test job
sleep 5
echo "I am "
whoami
echo "TMPDIR is $TMPDIR"
echo "GLITE_LOCAL_CUSTOMIZAION_DIR IS $GLITE_LOCAL_CUSTOMIZATION_DIR"
echo "i am currently in $PWD"
echo "Software Dir is $VO_SAGRID_SW_DIR"
echo "what's in SAGRID SWDIR ? "
ls -lht $VO_SAGRID_SW_DIR
date
uname -a
hostname -f
date
echo "LFC_HOST is $LFC_HOST"
echo "Top-BDII is $LCG_GFAL_INFOSYS"
echo "LFC_TYPE is $LFC_TYPE"
echo "now setting the variables correctly"
export LFC_HOST=devslngrd002.uct.ac.za
export LCG_GFAL_INFOSYS=srvslngrd001.uct.ac.za:2170
export LFC_TYPE=lfc
echo "new variables:"
echo "LFC_HOST is $LFC_HOST"
echo "Top-BDII is $LCG_GFAL_INFOSYS"
echo "LFC_TYPE is $LFC_TYPE"
echo "trying to interact with the logical file catalog"
#lcg-cp -v lfn:/grid/sagrid/portal.tar.gz file:/dev/null
#lcg-cp -v lfn:/grid/sagrid/SoftRepo/heasoft-6.11.1src-all.tar.gz file:/dev/null
#echo "which sagrid rpms are there ?"
rpm -qa |grep sagrid
#touch $VO_SAGRID_SW_DIR/main
echo "checking for MPI"
rpm -qa |grep -i mpi
which mpif90
exit 0; |
#!/usr/bin/bash
#
# lint_package.sh - functions for checking for packaging errors
#
# Copyright (c) 2015-2018 Pacman Development Team <pacman-dev@archlinux.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
[[ -n "$LIBMAKEPKG_LINT_PACKAGE_SH" ]] && return
LIBMAKEPKG_LINT_PACKAGE_SH=1
LIBRARY=${LIBRARY:-'/usr/share/makepkg'}
source "$LIBRARY/util/message.sh"
source "$LIBRARY/util/util.sh"
declare -a lint_package_functions
for lib in "$LIBRARY/lint_package/"*.sh; do
source "$lib"
done
readonly -a lint_package_functions
lint_package() {
cd_safe "$pkgdir"
msg "$(gettext "Checking for packaging issues...")"
local ret=0
for func in ${lint_package_functions[@]}; do
$func || ret=1
done
return $ret
}
|
from twisted.internet.defer import inlineCallbacks
from vumi.dispatchers.endpoint_dispatchers import Dispatcher
from vumi.dispatchers.tests.helpers import DummyDispatcher, DispatcherHelper
from vumi.tests.helpers import (
VumiTestCase, IHelper, PersistenceHelper, MessageHelper, WorkerHelper,
MessageDispatchHelper, success_result_of)
class TestDummyDispatcher(VumiTestCase):
def test_publish_inbound(self):
"""
DummyDispatcher should have a fake inbound publisher that remembers
messages.
"""
dispatcher = DummyDispatcher({
'transport_names': ['ri_conn'],
'exposed_names': ['ro_conn'],
})
self.assertEqual(dispatcher.exposed_publisher['ro_conn'].msgs, [])
dispatcher.publish_inbound_message('ro_conn', 'fake inbound')
self.assertEqual(
dispatcher.exposed_publisher['ro_conn'].msgs, ['fake inbound'])
dispatcher.exposed_publisher['ro_conn'].clear()
self.assertEqual(dispatcher.exposed_publisher['ro_conn'].msgs, [])
def test_publish_outbound(self):
"""
DummyDispatcher should have a fake outbound publisher that remembers
messages.
"""
dispatcher = DummyDispatcher({
'transport_names': ['ri_conn'],
'exposed_names': ['ro_conn'],
})
self.assertEqual(dispatcher.transport_publisher['ri_conn'].msgs, [])
dispatcher.publish_outbound_message('ri_conn', 'fake outbound')
self.assertEqual(
dispatcher.transport_publisher['ri_conn'].msgs, ['fake outbound'])
dispatcher.transport_publisher['ri_conn'].clear()
self.assertEqual(dispatcher.transport_publisher['ri_conn'].msgs, [])
def test_publish_event(self):
"""
DummyDispatcher should have a fake event publisher that remembers
messages.
"""
dispatcher = DummyDispatcher({
'transport_names': ['ri_conn'],
'exposed_names': ['ro_conn'],
})
self.assertEqual(
dispatcher.exposed_event_publisher['ro_conn'].msgs, [])
dispatcher.publish_inbound_event('ro_conn', 'fake event')
self.assertEqual(
dispatcher.exposed_event_publisher['ro_conn'].msgs, ['fake event'])
dispatcher.exposed_event_publisher['ro_conn'].clear()
self.assertEqual(
dispatcher.exposed_event_publisher['ro_conn'].msgs, [])
class RunningCheckDispatcher(Dispatcher):
disp_worker_running = False
def setup_dispatcher(self):
self.disp_worker_running = True
def teardown_dispatcher(self):
self.disp_worker_running = False
class FakeCleanupCheckHelper(object):
cleaned_up = False
def cleanup(self):
self.cleaned_up = True
class TestDispatcherHelper(VumiTestCase):
def test_implements_IHelper(self):
"""
DispatcherHelper instances should provide the IHelper interface.
"""
self.assertTrue(IHelper.providedBy(DispatcherHelper(None)))
def test_defaults(self):
"""
DispatcherHelper instances should have the expected parameter defaults.
"""
fake_disp_class = object()
disp_helper = DispatcherHelper(fake_disp_class)
self.assertEqual(disp_helper.dispatcher_class, fake_disp_class)
self.assertIsInstance(
disp_helper.persistence_helper, PersistenceHelper)
self.assertIsInstance(disp_helper.msg_helper, MessageHelper)
self.assertIsInstance(disp_helper.worker_helper, WorkerHelper)
dispatch_helper = disp_helper.dispatch_helper
self.assertIsInstance(dispatch_helper, MessageDispatchHelper)
self.assertEqual(dispatch_helper.msg_helper, disp_helper.msg_helper)
self.assertEqual(
dispatch_helper.worker_helper, disp_helper.worker_helper)
self.assertEqual(disp_helper.persistence_helper.use_riak, False)
def test_all_params(self):
"""
DispatcherHelper should pass use_riak to its PersistenceHelper and all
other params to its MessageHelper.
"""
fake_disp_class = object()
disp_helper = DispatcherHelper(
fake_disp_class, use_riak=True, transport_addr='Obs station')
self.assertEqual(disp_helper.persistence_helper.use_riak, True)
self.assertEqual(disp_helper.msg_helper.transport_addr, 'Obs station')
def test_setup_sync(self):
"""
DispatcherHelper.setup() should return ``None``, not a Deferred.
"""
msg_helper = DispatcherHelper(None)
self.add_cleanup(msg_helper.cleanup)
self.assertEqual(msg_helper.setup(), None)
def test_cleanup(self):
"""
DispatcherHelper.cleanup() should call .cleanup() on its
PersistenceHelper and WorkerHelper.
"""
disp_helper = DispatcherHelper(None)
disp_helper.persistence_helper = FakeCleanupCheckHelper()
disp_helper.worker_helper = FakeCleanupCheckHelper()
self.assertEqual(disp_helper.persistence_helper.cleaned_up, False)
self.assertEqual(disp_helper.worker_helper.cleaned_up, False)
success_result_of(disp_helper.cleanup())
self.assertEqual(disp_helper.persistence_helper.cleaned_up, True)
self.assertEqual(disp_helper.worker_helper.cleaned_up, True)
@inlineCallbacks
def test_get_dispatcher_defaults(self):
"""
.get_dispatcher() should return a started dispatcher.
"""
disp_helper = self.add_helper(DispatcherHelper(RunningCheckDispatcher))
app = yield disp_helper.get_dispatcher({
'receive_inbound_connectors': [],
'receive_outbound_connectors': [],
})
self.assertIsInstance(app, RunningCheckDispatcher)
self.assertEqual(app.disp_worker_running, True)
@inlineCallbacks
def test_get_dispatcher_no_start(self):
"""
.get_dispatcher() should return an unstarted dispatcher if passed
``start=False``.
"""
disp_helper = self.add_helper(DispatcherHelper(RunningCheckDispatcher))
app = yield disp_helper.get_dispatcher({
'receive_inbound_connectors': [],
'receive_outbound_connectors': [],
}, start=False)
self.assertIsInstance(app, RunningCheckDispatcher)
self.assertEqual(app.disp_worker_running, False)
@inlineCallbacks
def test_get_application_different_class(self):
"""
.get_dispatcher() should return an instance of the specified worker
class if one is provided.
"""
disp_helper = self.add_helper(DispatcherHelper(Dispatcher))
app = yield disp_helper.get_dispatcher({
'receive_inbound_connectors': [],
'receive_outbound_connectors': [],
}, cls=RunningCheckDispatcher)
self.assertIsInstance(app, RunningCheckDispatcher)
def test_get_connector_helper(self):
"""
.get_connector_helper() should return a DispatcherConnectorHelper
instance for the provided connector name.
"""
disp_helper = DispatcherHelper(None)
dc_helper = disp_helper.get_connector_helper('barconn')
self.assertEqual(dc_helper.msg_helper, disp_helper.msg_helper)
self.assertEqual(dc_helper.worker_helper._connector_name, 'barconn')
self.assertEqual(
dc_helper.worker_helper.broker, disp_helper.worker_helper.broker)
|
<filename>angular/src/app/heroes.component.ts
import { Component, OnInit } from '@angular/core';
import { Hero } from './hero';
import { HeroService } from './hero.service';
import { Router } from '@angular/router';
@Component({
selector: 'my-heroes',
templateUrl: './heroes.component.html',
styleUrls: ['./heroes.component.css'],
// Providers tell Angular to make a fresh instance of HeroService when it creates AppComponent.
// The AppComponent as well as its child components can use that service to get hero data
// In other words, here we have defined HeroService as a Provider for the AppComponent
// providers: [HeroService]
// Remove HeroService from providers of HeroComponent because it is promoted and now its part of
// providers for AppComponent because this service will be needed in multiple child components of
// AppComponent
providers: []
})
export class HeroesComponent implements OnInit {
heroes: Hero[];
selectedHero: Hero;
// Parameter of following constructor simultaneously defines a private property named heroService and
// maintains it to be a HeroService injection. heroService instance should not be created via
// `heroService = new HeroService()`
constructor(
private router: Router,
private heroService: HeroService) { }
ngOnInit(): void {
this.getHeroes();
}
getHeroes(): void {
// Without Promise:
// this.heroes = this.heroService.getHeroes();
// With Promise:
// Callback sets the component's heroes property to the array of heroes returned by the service
this.heroService.getHeroes().then((heroes) => this.heroes = heroes)
// Arrow function expression has shorter syntax than a function expression and it does bind
// its on this, argument, super or new.target.
// Format:
// (param1, param2,...,paramN) => { statements }
// (param1, param2,...,paramN) => expression
// Equivalent to (param1, param2,...,paramN) => { return expression; }
}
add(name: string): void {
name = name.trim();
if (!name) { return; }
this.heroService.create(name)
.then(hero => {
this.heroes.push(hero);
this.selectedHero = null;
});
}
delete(hero: Hero): void {
this.heroService
.delete(hero.id)
.then(() => {
this.heroes = this.heroes.filter(h => h !== hero);
if (this.selectedHero === hero) { this.selectedHero = null; }
});
}
onSelect(hero: Hero): void {
this.selectedHero = hero;
}
gotoDetail(): void {
this.router.navigate(['/detail', this.selectedHero.id])
}
}
|
func occurrencesOfToken(_ token: String, completion: @escaping ([Int]?) -> Void) {
// Simulate asynchronous operation, e.g., fetching token index data from a remote server
DispatchQueue.global().async {
// Perform the search for token occurrences
let tokenIndexData = searchForToken(token)
// Check if token index data is available
if let tokenIndexData = tokenIndexData {
// Extract the indices where the token is found
let indices = extractIndices(tokenIndexData, for: token)
// Call the completion handler with the found indices
DispatchQueue.main.async {
completion(indices)
}
} else {
// Call the completion handler with nil to indicate no token index data was found
DispatchQueue.main.async {
completion(nil)
}
}
}
}
// Example helper functions for illustration purposes
func searchForToken(_ token: String) -> String? {
// Simulate searching for token index data, e.g., fetching from a remote server
// Return token index data if found, otherwise return nil
return "SwiftKeyTokenIndexData"
}
func extractIndices(_ tokenIndexData: String, for token: String) -> [Int] {
// Simulate extracting indices where the token is found from the token index data
// Return the indices as an array of integers
return [5, 12, 23]
} |
module.exports = {
'PUT /photos': 'PhotosController.upload',
'GET /photos': 'PhotosController.get',
'POST /photos/like': 'PhotosController.like'
};
|
#!/bin/sh
alias sencha='/home/sencha/Cmd/sencha'
[ -e ../application.js ] && rm ../application.js
[ -e ../application.debug.js ] && rm ../application.debug.js
sencha --plain -ti compile -ignore ../application.js,../application.debug.js -debug=false concatenate -yui -out=../application.js
|
package io.cattle.platform.storage.service.impl;
import io.cattle.platform.core.dao.GenericResourceDao;
import io.cattle.platform.core.model.Image;
import io.cattle.platform.object.ObjectManager;
import io.cattle.platform.storage.pool.StoragePoolDriver;
import io.cattle.platform.storage.service.StorageService;
import io.cattle.platform.storage.service.dao.ImageDao;
import java.util.List;
import javax.inject.Inject;
public class StorageServiceImpl implements StorageService {
@Inject
ObjectManager objectManager;
List<StoragePoolDriver> drivers;
@Inject
GenericResourceDao genericResourceDao;
@Inject
ImageDao imageDao;
@Override
public Image registerRemoteImage(final String uuid) {
if (uuid == null) {
return null;
}
return populateNewRecord(uuid);
}
@Override
public boolean isValidUUID(String uuid) {
Image image = objectManager.newRecord(Image.class);
for (StoragePoolDriver driver : drivers) {
if (driver.populateImage(uuid, image)) {
return true;
}
}
return false;
}
protected Image populateNewRecord(String uuid) {
Image image = objectManager.newRecord(Image.class);
for (StoragePoolDriver driver : drivers) {
if (driver.populateImage(uuid, image)) {
break;
}
}
return genericResourceDao.createAndSchedule(image);
}
public List<StoragePoolDriver> getDrivers() {
return drivers;
}
@Inject
public void setDrivers(List<StoragePoolDriver> drivers) {
this.drivers = drivers;
}
}
|
<gh_stars>1-10
package string_handle;
import java.io.BufferedReader;
import java.io.InputStreamReader;
public class Boj5586 {
public static void main(String[] args) throws Exception{
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
char[] chars = br.readLine().toCharArray();
br.close();
int last = chars.length - 2;
int joi = 0;
int ioi = 0;
for(int i = 1; i <= last; i++){
if(chars[i] == 'O' && chars[i+1] == 'I'){
if(chars[i - 1] == 'J'){
joi++;
}
else if(chars[i - 1] == 'I'){
ioi++;
}
}
}
System.out.println(new StringBuilder().append(joi).append("\n").append(ioi));
}
}
|
import cv2
# Function to perform image processing technique
def image_processing(frame):
# Example: Convert the frame to grayscale
processed_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
return processed_frame
# Main program
if __name__ == "__main__":
# Capture video from the default webcam
cap = cv2.VideoCapture(0)
# Check if the webcam is opened successfully
if not cap.isOpened():
print("Error: Unable to open the webcam")
exit()
while True:
# Capture frame-by-frame
ret, frame = cap.read()
# Check if the frame is captured successfully
if not ret:
print("Error: Unable to capture frame")
break
# Perform image processing on the frame
processed_frame = image_processing(frame)
# Display the processed video in a window
cv2.imshow('Processed Video', processed_frame)
# Check for the 'q' key to exit the loop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release the video capture object and close all windows
cap.release()
cv2.destroyAllWindows() |
import random
def determine_winner(user_choice, computer_choice):
if user_choice == computer_choice:
return "It's a tie!"
elif (user_choice == "rock" and computer_choice == "scissors") or \
(user_choice == "paper" and computer_choice == "rock") or \
(user_choice == "scissors" and computer_choice == "paper"):
return "You win!"
else:
return "Computer wins!"
def main():
while True:
user_input = input("Enter your choice (rock, paper, or scissors), or 'q' to quit: ").lower()
if user_input == 'q':
print("Thanks for playing!")
break
elif user_input not in ['rock', 'paper', 'scissors']:
print("Invalid choice. Please enter 'rock', 'paper', or 'scissors'.")
continue
computer_choice = random.choice(['rock', 'paper', 'scissors'])
print(f"Computer chooses: {computer_choice}")
result = determine_winner(user_input, computer_choice)
print(result)
play_again = input("Do you want to play again? (yes/no): ").lower()
if play_again != 'yes':
print("Thanks for playing!")
break |
import { Component, Input } from '@angular/core';
import { SocialLink } from '../model/social-link.model';
@Component({
selector: 'oc-social-links',
templateUrl: './oc-social-links.component.html',
styleUrls: ['./oc-social-links.component.css'],
})
export class OcSocialLinksComponent {
/** data passed to a social link component */
@Input() socialLinks: SocialLink[];
}
|
import * as React from "react";
import { SvgIconProps } from "@material-ui/core/SvgIcon";
import { createSvgIcon } from "@material-ui/core";
const AdobePdf = createSvgIcon(
<path d="M369.9 97.9L286 14C277 5 264.8-.1 252.1-.1H48C21.5 0 0 21.5 0 48v416c0 26.5 21.5 48 48 48h288c26.5 0 48-21.5 48-48V131.9c0-12.7-5.1-25-14.1-34zm-22.6 22.7c2.1 2.1 3.5 4.6 4.2 7.4H256V32.5c2.8.7 5.3 2.1 7.4 4.2l83.9 83.9zM336 480H48c-8.8 0-16-7.2-16-16V48c0-8.8 7.2-16 16-16h176v104c0 13.3 10.7 24 24 24h104v304c0 8.8-7.2 16-16 16zm-22-171.2c-13.5-13.3-55-9.2-73.7-6.7-21.2-12.8-35.2-30.4-45.1-56.6 4.3-18 12-47.2 6.4-64.9-4.4-28.1-39.7-24.7-44.6-6.8-5 18.3-.3 44.4 8.4 77.8-11.9 28.4-29.7 66.9-42.1 88.6-20.8 10.7-54.1 29.3-58.8 52.4-3.5 16.8 22.9 39.4 53.1 6.4 9.1-9.9 19.3-24.8 31.3-45.5 26.7-8.8 56.1-19.8 82-24 21.9 12 47.6 19.9 64.6 19.9 27.7.1 28.9-30.2 18.5-40.6zm-229.2 89c5.9-15.9 28.6-34.4 35.5-40.8-22.1 35.3-35.5 41.5-35.5 40.8zM180 175.5c8.7 0 7.8 37.5 2.1 47.6-5.2-16.3-5-47.6-2.1-47.6zm-28.4 159.3c11.3-19.8 21-43.2 28.8-63.7 9.7 17.7 22.1 31.7 35.1 41.5-24.3 4.7-45.4 15.1-63.9 22.2zm153.4-5.9s-5.8 7-43.5-9.1c41-3 47.7 6.4 43.5 9.1z" />,
"AdobePdf"
);
const AdobePdfLogo = (props: SvgIconProps): JSX.Element => (
<AdobePdf viewBox="0 0 384 512" {...props} />
);
export default AdobePdfLogo;
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, <NAME>PORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" super-GLUE processors and helpers """
import logging
import os
import re
from ...file_utils import is_tf_available
from .utils import DataProcessor, InputExample, InputFeatures, InputFeatures_w, COPAInputExample, WSCInputExample, WiCInputExample
if is_tf_available():
import tensorflow as tf
logger = logging.getLogger(__name__)
def superglue_convert_examples_to_features(
examples,
tokenizer,
max_length=512,
task=None,
label_list=None,
output_mode=None,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True,
model_type = 'bert-base-uncased'
):
"""
Loads a data file into a list of ``InputFeatures``
Args:
examples: List of ``InputExamples``, ``COPAInputExamples``, ``WiCInputExamples``, ``WSCInputExamples``
or ``tf.data.Dataset`` containing the examples.
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: SuperGLUE task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``InputFeatures`` which can be fed to the model.
"""
is_tf_dataset = False
if is_tf_available() and isinstance(examples, tf.data.Dataset):
is_tf_dataset = True
if task is not None:
processor = superglue_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = superglue_output_modes[task]
logger.info("Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
def featurize_example_standard(examples = examples, processor = processor, tokenizer = tokenizer,
mask_padding_with_zero = mask_padding_with_zero):
features = []
for (ex_index, example) in enumerate(examples):
len_examples = 0
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
len_examples = tf.data.experimental.cardinality(examples)
else:
len_examples = len(examples)
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len_examples))
inputs = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(
len(attention_mask), max_length)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(
len(token_type_ids), max_length)
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
InputFeatures(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label
)
)
if is_tf_available() and is_tf_dataset:
def gen():
for ex in features:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
return tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64),
(
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
tf.TensorShape([]),
),
)
return features
def featurize_example_copa(examples = examples, processor = processor, tokenizer = tokenizer,
mask_padding_with_zero = mask_padding_with_zero):
# choice, question, premise
def _featurize_example(text_a, text_b, text_c, guid, cur_label=None, print_example=False,
max_length = max_length, model_type = model_type,
mask_padding_with_zero = mask_padding_with_zero):
'''
tokenize choice, question and premise. Choice and question have segment ids of 0, premise has segment ids of 1
:inputs:
text_a: first or second choice
test_b: question
text_c: premise of the question
guid: example id
:params:
max_length
model_type
mask_padding_with_zero: if mask padding with 0
'''
tokens_a = tokenizer.tokenize(text_a)
tokens_b = tokenizer.tokenize(text_b)
tokens_c = tokenizer.tokenize(text_c)
special_tokens_count = 6 if "roberta" in model_type else 4
_truncate_seq_pair(tokens_a, tokens_c, max_length - special_tokens_count - len(tokens_b))
tokens = tokens_a + [tokenizer.sep_token] # choice
if "roberta" in model_type:
tokens += [tokenizer.sep_token]
segment_ids = [0] * len(tokens)
tokens += tokens_b + [tokenizer.sep_token] # question
segment_ids += [0] * (len(tokens_b) + 1)
if "roberta" in model_type:
tokens += [tokenizer.sep_token]
segment_ids += [0]
tokens += tokens_c + [tokenizer.sep_token]
segment_ids += [1] * (len(tokens_c) + 1) # premise
tokens = [tokenizer.cls_token] + tokens
segment_ids = [0] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = tokenizer.convert_tokens_to_ids([tokenizer.pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([0] * padding_length) + segment_ids
else:
input_ids = input_ids + tokenizer.convert_tokens_to_ids([tokenizer.pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([0] * padding_length)
label_id = float(cur_label) if cur_label is not None else None
assert len(input_ids) == max_length
assert len(input_mask) == max_length
assert len(segment_ids) == max_length
if print_example:
logging.info("*** Example (COPA) ***")
logging.info("guid: %s" % (guid))
logging.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logging.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logging.info("label: %s (id = %s)" % (str(cur_label), str(label_id)))
return InputFeatures(input_ids=input_ids,
attention_mask=input_mask,
token_type_ids=segment_ids,
label=label_id)
features = []
for (ex_index, example) in enumerate(examples):
len_examples = 0
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
len_examples = tf.data.experimental.cardinality(examples)
else:
len_examples = len(examples)
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len_examples))
features.append([_featurize_example(example.text_a,
example.question,
example.text_pre,
cur_label=int(example.label == '0'),
print_example=True,
guid = example.guid),
_featurize_example(example.text_b,
example.question,
example.text_pre,
cur_label=int(example.label == '1'),
print_example=True,
guid = example.guid) ])
# print('processed features', len(features))
return features
def featurize_example_wsc(examples = examples, processor = processor, tokenizer = tokenizer, mask_padding_with_zero = mask_padding_with_zero):
def _featurize_example(example, max_seq_length, tokenizer = tokenizer,
label_map=label_map, model_type=model_type, print_example=False, mask_padding_with_zero = mask_padding_with_zero):
"""Tokenize example for WSC.
Args:
tokenizer: either a BertTokenizer or a RobertaTokenizer
max_seq_length: int. The maximum allowed number of bpe units for the input.
label_map: dictionary. A map that returns the label_id given the label string.
model_type: string. Either `bert` or `roberta`. For `roberta` there will be an extra sep token in
the middle.
print_example: bool. If set to True, print the tokenization information for current instance.
"""
tokens_a = tokenizer.tokenize(example.text)
token_word_ids = _get_word_ids(tokens_a, model_type)
span_1_tok_ids = _get_token_ids(token_word_ids, example.span_1[0], offset=1)
span_2_tok_ids = _get_token_ids(token_word_ids, example.span_2[0], offset=1)
special_tokens_count = 2
if len(tokens_a) > max_seq_length - special_tokens_count:
tokens_a = tokens_a[:max_seq_length - special_tokens_count]
tokens = tokens_a + [tokenizer.sep_token]
segment_ids = [0] * len(tokens)
tokens = [tokenizer.cls_token] + tokens
segment_ids = [0] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = tokenizer.convert_tokens_to_ids([tokenizer.pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = [0] * padding_length + segment_ids
else:
input_ids = input_ids + tokenizer.convert_tokens_to_ids([tokenizer.pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + [0] * padding_length
span_1_mask = [0] * len(input_ids)
for k in span_1_tok_ids:
if pad_on_left:
span_1_mask[k+padding_length] = 1
else:
span_1_mask[k] = 1
span_2_mask = [0] * len(input_ids)
for k in span_2_tok_ids:
if pad_on_left:
span_2_mask[k+padding_length] = 1
else:
span_2_mask[k] = 1
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(span_1_mask) == max_seq_length
assert len(span_2_mask) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
if print_example:
logging.info("*** Example (%s) ***" % task)
logging.info("guid: %s" % (example.guid))
logging.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logging.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logging.info("label: %s (id = %s)" % (str(example.label), str(label_id)))
return InputFeatures_w(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
span_1_mask=span_1_mask,
span_1_text=tokenizer.convert_tokens_to_ids(example.span_1[1]),
span_2_mask=span_2_mask,
span_2_text=tokenizer.convert_tokens_to_ids(example.span_2[1]),
label=label_id)
features = []
for (ex_index, example) in enumerate(examples):
len_examples = 0
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
len_examples = tf.data.experimental.cardinality(examples)
else:
len_examples = len(examples)
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len_examples))
features.append(_featurize_example(example, tokenizer = tokenizer, max_seq_length= max_length,
label_map=label_map, model_type=model_type, print_example=True))
return features
def featurize_example_wic(examples = examples, processor = processor, tokenizer = tokenizer, mask_padding_with_zero = mask_padding_with_zero):
def _featurize_example(example, max_seq_length, tokenizer = tokenizer ,
label_map=label_map, model_type=model_type, print_example=False, mask_padding_with_zero = mask_padding_with_zero):
"""Tokenize example for WiC.
Args:
tokenizer: either a BertTokenizer or a RobertaTokenizer
max_seq_length: int. The maximum allowed number of bpe units for the input.
label_map: dictionary. A map that returns the label_id given the label string.
model_type: string. Either `bert` or `roberta`. For `roberta` there will be an extra sep token in
the middle.
print_example: bool. If set to True, print the tokenization information for current instance.
"""
tokens_a = tokenizer.tokenize(example.sent1)
index1, word1 = _digits_to_index(tokenizer, example.sent1, example.idxs1)
token_word_ids_a = _get_word_ids(tokens_a, model_type)
sent_1_tok_ids = _get_token_ids(token_word_ids_a, index1, offset=1)
tokens_b = tokenizer.tokenize(example.sent2)
index2, word2 = _digits_to_index(tokenizer, example.sent2, example.idxs2)
token_word_ids_b = _get_word_ids(tokens_b, model_type)
sent_2_tok_ids = _get_token_ids(token_word_ids_b, index2, offset=1)
special_tokens_count = 5 if "roberta" in model_type else 3
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count)
tokens = tokens_a + [tokenizer.sep_token] # +1
if "roberta" in model_type:
tokens += [tokenizer.sep_token] # (+1)
segment_ids = [0] * len(tokens)
sent1_len = len(tokens)
tokens += tokens_b + [tokenizer.sep_token] # +1
segment_ids += [1] * (len(tokens_b) + 1)
if "roberta" in model_type:
tokens += [tokenizer.sep_token] # (+1)
segment_ids += [1]
tokens = [tokenizer.cls_token] + tokens # +1
segment_ids = [0] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = tokenizer.convert_tokens_to_ids([tokenizer.pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = [0] * padding_length + segment_ids
else:
input_ids = input_ids + tokenizer.convert_tokens_to_ids([tokenizer.pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + [0] * padding_length
span_1_mask = [0] * len(input_ids)
for k in sent_1_tok_ids:
if pad_on_left:
span_1_mask[k+padding_length] = 1
else:
span_1_mask[k] = 1
span_2_mask = [0] * len(input_ids)
for k in sent_2_tok_ids:
if pad_on_left:
span_2_mask[k + padding_length + sent1_len] = 1
else:
span_2_mask[k + sent1_len] = 1
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(span_1_mask) == max_seq_length
assert len(span_2_mask) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
if print_example:
logging.info("*** Example (%s) ***" % task)
logging.info("guid: %s" % (example.guid))
logging.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logging.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logging.info("label: %s (id = %s)" % (str(example.label), str(label_id)))
return InputFeatures_w(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
span_1_mask=span_1_mask,
span_1_text=tokenizer.convert_tokens_to_ids(word1),
span_2_mask=span_2_mask,
span_2_text=tokenizer.convert_tokens_to_ids(word2),
label=label_id)
features = []
for (ex_index, example) in enumerate(examples):
len_examples = 0
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
len_examples = tf.data.experimental.cardinality(examples)
else:
len_examples = len(examples)
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len_examples))
features.append(_featurize_example(example, tokenizer = tokenizer, max_seq_length=max_length,
label_map=label_map, model_type=model_type, print_example=True))
return features
if task in ['boolq', 'rte2', 'cb']:
return featurize_example_standard(examples, processor, tokenizer)
elif task == 'copa':
return featurize_example_copa(examples, processor, tokenizer) # return a tuple
elif task == 'wsc':
return featurize_example_wsc(examples, processor, tokenizer)
elif task == 'wic':
return featurize_example_wic(examples, processor, tokenizer)
######### HELPER FUNCS #########
# credit: https://github.com/IBM/superglue-mtl/blob/1eb3e581c0ef3b4c261e0256ec26116d2b657c40/data_utils.py#L720
#### Helper func for COPA #####
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
#### helper func for wic #####
# Given the beginning and the ending digit of the word in a sentenece, return
# the index of the word in the sentence and the corresponding word.
def _digits_to_index(tokenizer, sent, idxs):
start, end = idxs
word = sent[start:end]
words = sent.split(' ')
#words = tokenizer.tokenize(sent)
for i, ind in enumerate(words):
if word in ind:
return i, word
#### Helper funcs for WSC #####
# given the original span of the word in original string,
# these two funcs are constructed so that the span of target word
# in encoded string is returned.
def _get_token_ids(token_word_ids, span_word_id, offset=1):
"""Retrieve token ids based on word ids.
Args:
token_word_ids: the list of word ids for token.
span_word_id: int. the word id in the original string.
offset: int. if the tokenized sequence is prepended with special token, this offset will be set to
the number of special tokens (for example, if [CLS] is added, then offset=1).
For example, the token word ids can be:
['ir', 'an', 'Ġand', 'Ġaf', 'ghan', 'istan', 'Ġspeak', 'Ġthe', 'Ġsame', 'Ġlanguage', 'Ġ.']
And the original sentence is "iran and afghanistan speak the same language ."
Suppose the span_word_id is 2 (afghanistan), then the token id is [3, 4, 5]
"""
results = []
for ix, word_id in enumerate(token_word_ids):
if word_id == span_word_id:
results.append(ix + offset)
elif word_id > span_word_id:
break
return results
def _get_word_ids(tokens, model_type="bert"):
"""Given the BPE split results, mark each token with its original word ids.
Args:
tokens: a list of BPE units
For example, if original sentnece is `iran and afghanistan speak the same language .`, then the roberta
tokens will be:
['ir', 'an', 'Ġand', 'Ġaf', 'ghan', 'istan', 'Ġspeak', 'Ġthe', 'Ġsame', 'Ġlanguage', 'Ġ.']
The word ids will be:
[0, 0, 1, 2, 2, 2, 3, 4, 5, 6, 7]
Note: this method assume the original sentence is split by one space and is already tokenized.
"""
word_ids = []
for tok in tokens:
if len(word_ids) == 0:
word_ids.append(0)
continue
if "roberta" in model_type:
if tok[0] != "Ġ":
word_ids.append(word_ids[-1])
else:
word_ids.append(word_ids[-1] + 1)
else:
if tok[:1] == "##":
word_ids.append(word_ids[-1])
else:
word_ids.append(word_ids[-1] + 1)
return word_ids
####################################
############ SuperGlue #############
####################################
class BoolQProcessor(DataProcessor):
"""Processor for the BoolQ data set (Super-GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["question"].numpy().decode("utf-8"),
tensor_dict["passage"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json_to_list(os.path.join(data_dir, "train.jsonl")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json_to_list(os.path.join(data_dir, "val.jsonl")), "dev")
def get_labels(self):
"""See base class."""
return ['True', 'False']
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, line[2])
text_a = line[0]
text_b = line[1]
label = str(line[-1])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class CbProcessor(DataProcessor):
"""Processor for the CB data set (Super-GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["premise"].numpy().decode("utf-8"),
tensor_dict["hypothesis"].numpy().decode("utf-8"),
tensor_dict["label"].numpy(),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json_to_list(os.path.join(data_dir, "train.jsonl")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json_to_list(os.path.join(data_dir, "val.jsonl")), "dev")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, line[-1])
text_a = line[0]
text_b = line[1]
label = line[2]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RteProcessor_superglue(DataProcessor):
"""Processor for the RTE data set (Super-GLUE version). Similar to Glue task rte, but merged with more data"""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["premise"].numpy().decode("utf-8"),
tensor_dict["hypothesis"].numpy().decode("utf-8"),
tensor_dict["label"].numpy(),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json_to_list(os.path.join(data_dir, "train.jsonl")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json_to_list(os.path.join(data_dir, "val.jsonl")), "dev")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, line[-1])
text_a = line[0]
text_b = line[1]
label = line[2]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class CopaProcessor(DataProcessor):
"""Processor for the COPA data set (Super-GLUE version).
Bert model needs to be modified to take a premise with a question to select if choice 1 or choice 2 is favored.
"""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return COPAInputExample(
tensor_dict["idx"].numpy(),
tensor_dict["premise"].numpy().decode("utf-8"),
tensor_dict["choice1"].numpy().decode("utf-8"),
tensor_dict["choice2"].numpy().decode("utf-8"),
tensor_dict["question"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json_to_list(os.path.join(data_dir, "train.jsonl")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json_to_list(os.path.join(data_dir, "val.jsonl")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, line[-1])
text_pre = line[0]
text_a = line[1]
text_b = line[2]
question = "What was the cause of this?" if line[3] == 'cause' else "What happened as a result?"
label = str(line[4])
examples.append(COPAInputExample(
guid=guid, text_pre = text_pre,
text_a=text_a, text_b=text_b,
question = question, label=label))
return examples
class WscProcessor(DataProcessor):
"""Processor for the Multi-RC data set (Super-GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return WSCInputExample(
tensor_dict["target"]["idx"].numpy(),
tensor_dict["text"].numpy().decode("utf-8"),
(tensor_dict["target"]["span1_index"].numpy(), tensor_dict["target"]["span1_text"].numpy().decode("utf-8")),
(tensor_dict["target"]["span2_index"].numpy(), tensor_dict["target"]["span2_text"].numpy().decode("utf-8")),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json_to_dict(os.path.join(data_dir, "train.jsonl")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json_to_dict(os.path.join(data_dir, "val.jsonl")), "dev")
def get_labels(self):
"""See base class."""
return ["True", "False"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for line in lines: # inputs now are dict
guid = "%s-%s" % (set_type, line['idx'])
text = line['text']
label = str(line['label'])
span_1 = (line['target']['span1_index'], line['target']['span1_text'])
span_2 = (line['target']['span2_index'], line['target']['span2_text'])
examples.append(WSCInputExample(
guid=guid, text = text,
span_1=span_1, span_2=span_2,
label=label))
return examples
class WicProcessor(DataProcessor):
"""Processor for the Multi-RC data set (Super-GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return WSCInputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
(tensor_dict['start1'].numpy(), tensor_dict["end1"].numpy()),
(tensor_dict['start2'].numpy(), tensor_dict["end2"].numpy()),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json_to_list(os.path.join(data_dir, "train.jsonl")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_json_to_list(os.path.join(data_dir, "val.jsonl")), "dev")
def get_labels(self):
"""See base class."""
return ["True", "False"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for line in lines: # inputs now are dict
guid = "%s-%s" % (set_type, line[3])
sent1 = line[1]
sent2 = line[2]
label = str(line[4])
idxs1 = (line[5], line[7])
idxs2 = (line[6], line[8])
examples.append(WiCInputExample(
guid = guid,
sent1 = sent1, sent2 = sent2,
idxs1 = idxs1, idxs2 = idxs2,
label = label))
return examples
superglue_tasks_num_labels = {
"boolq": 2,
"cb" : 3,
"rte2" : 2,
"copa" : 2,
"wsc" : 2,
"wic" : 2,
}
superglue_processors = {
"boolq": BoolQProcessor,
"cb" : CbProcessor,
"rte2" : RteProcessor_superglue,
"copa" : CopaProcessor,
"wsc" : WscProcessor,
"wic" : WicProcessor,
}
superglue_output_modes = {
"boolq": 'classification',
"cb" : 'classification',
"rte2" : 'classification',
"copa" : 'classification',
"wsc" : 'classification',
"wic" : 'classification',
}
|
<gh_stars>0
const doSomething = () => {
return new Promise((resolve, reject) => {
(true)
? setTimeout(() => resolve('Do Something'), 5000)
: reject(new Error('Something was wrong'))
});
};
//1. sin captura de errores
const doAsync = async () => {
const waiting = await doSomething();
console.log(waiting, ' 1');
};
console.log('Before 1');
doAsync();
console.log('After 1');
//2. Capturando errores
const bestFunction = async () => {
try {
const waiting = await doSomething();
console.log(waiting, ' 2');
} catch (error) {
console.error(error);
}
}
console.log('Before 2');
bestFunction();
console.log('After 2');
/*
Para ejecutar este archivo con npm agrear a package.json:
>>> "async": "node src/async/index.js"
y al momento de ejecutar en la consola usar el comando:
>>> npm run async
*/ |
'use strict';
require('./logic');
require('../../scss/main.scss');
|
<html>
<head>
<title>Insert a New Record in a Database Table</title>
</head>
<body>
<h1>Insert a New Record in a Database Table</h1>
<form action="insert_record.php" method="POST">
<label for="name">Name:</label>
<input type="text" name="name" id="name" />
<label for="age">Age:</label>
<input type="number" name="age" id="age" />
<label for="email">Email:</label>
<input type="email" name="email" id="email" />
<input type="submit" value="Save" />
</form>
</body>
</html> |
<gh_stars>1-10
from django.http import HttpResponse
from django.template import Context, loader
from django.middleware.csrf import get_token
from models import Post, Category
def index(request):
all_posts = Post.objects.all()
template = loader.get_template('index.html')
context = Context({
'all_posts': all_posts,
'title' : 'All Posts'
})
return HttpResponse(template.render(context))
def add_post(request):
all_categories = Category.objects.all()
template = loader.get_template('add-post.html')
csrf_token = get_token(request)
context = Context({
'all_categories' : all_categories,
'csrf_token' : csrf_token,
'title' : 'Add Posts'
})
return HttpResponse(template.render(context)) |
def process_data(input_file_path, output_file_path):
with open(input_file_path, 'r') as fileHandler, open(output_file_path, 'w') as output_file:
for line in fileHandler:
data = line.split('\t')
if data[7] == 'C':
strand = '-'
else:
strand = '' # Assuming default value if condition is not met
string2write = '%s\t%s\t%s\t%s==%s\t%s\t%s\n' % (data[0], data[1], data[2], data[3], data[4], data[5], strand)
output_file.write(string2write)
# Close and finish
output_file.close()
fileHandler.close()
# Example usage
process_data('input_data.txt', 'output_data.txt') |
<reponame>bygui86/storage-poc
package com.rabbit.samples.storagepoc.solr.configs;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.experimental.FieldDefaults;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import springfox.documentation.builders.PathSelectors;
import springfox.documentation.spi.DocumentationType;
import springfox.documentation.spring.web.plugins.Docket;
import springfox.documentation.swagger2.annotations.EnableSwagger2;
import static springfox.documentation.builders.RequestHandlerSelectors.basePackage;
@Slf4j
@FieldDefaults(level = AccessLevel.PRIVATE)
@Getter(value = AccessLevel.PROTECTED)
@Configuration
@EnableSwagger2
public class SwaggerConfig {
@Value("${swagger.package.name:com.rabbit.samples}")
String swaggerPackageName;
/**
* Swagger API documentation setup
*
* @return {@link Docket}
*/
@Bean
public Docket api() {
log.debug("Loading SWAGGER configurations: package {}", getSwaggerPackageName());
return new Docket(DocumentationType.SWAGGER_2)
.select()
.apis(
basePackage(getSwaggerPackageName())
)
.paths(
PathSelectors.any()
)
.build();
}
}
|
package javafx.scene.control;
import javafx.beans.property.Property;
import javafx.beans.property.ReadOnlyDoubleProperty;
import javafx.beans.property.ReadOnlyProperty;
import javafx.scene.Node;
import javafx.stage.Modality;
import javafx.stage.StageStyle;
import javafx.stage.Window;
import java.util.List;
abstract class FXDialog {
/**************************************************************************
*
* Static fields
*
**************************************************************************/
/**************************************************************************
*
* Private fields
*
**************************************************************************/
protected Object owner;
/**************************************************************************
*
* Constructors
*
**************************************************************************/
protected FXDialog() {
// pretty much a no-op, but we expect subclasses to call init(...) once
// they have initialised their abstract property methods.
}
/**************************************************************************
*
* Public API
*
**************************************************************************/
public boolean requestPermissionToClose(final Dialog<?> dialog) {
// We only allow the dialog to be closed abnormally (i.e. via the X button)
// when there is a cancel button in the dialog, or when there is only
// one button in the dialog. In all other cases, we disable the ability
// (as best we can) to close a dialog abnormally.
boolean denyClose = true;
// if we are here, the close was abnormal, so we must call close to
// clean up, if we don't consume the event to cancel closing...
DialogPane dialogPane = dialog.getDialogPane();
if (dialogPane != null) {
List<ButtonType> buttons = dialogPane.getButtonTypes();
if (buttons.size() == 1) {
denyClose = false;
} else {
// look for cancel button type
for (ButtonType button : buttons) {
if (button == null) continue;
ButtonBar.ButtonData type = button.getButtonData();
if (type == null) continue;
// refer to the comments in close() - we support both CANCEL_CLOSE
// and isCancelButton() for allowing a dialog to close in
// abnormal circumstances. This allows for consistency with
// the ESC key being pressed (which triggers the cancel button
// being pressed)
if (type == ButtonBar.ButtonData.CANCEL_CLOSE || type.isCancelButton()) {
denyClose = false;
break;
}
}
}
}
return !denyClose;
}
/***************************************************************************
*
* Abstract API
*
**************************************************************************/
public abstract void show();
public abstract void showAndWait();
// This should only be called from Dialog - it should never be called by
// subclasses of FXDialog. Implementations should never call up to
// Dialog.close().
public abstract void close();
public abstract void initOwner(Window owner);
public abstract Window getOwner();
public abstract void initModality(Modality modality);
public abstract Modality getModality();
public abstract ReadOnlyProperty<Boolean> showingProperty();
public abstract Window getWindow();
public abstract void sizeToScene();
// --- x
public abstract double getX();
public abstract void setX(double x);
public abstract ReadOnlyDoubleProperty xProperty();
// --- y
public abstract double getY();
public abstract void setY(double y);
public abstract ReadOnlyDoubleProperty yProperty();
// --- resizable
abstract Property<Boolean> resizableProperty();
// --- focused
abstract ReadOnlyProperty<Boolean> focusedProperty();
// --- title
abstract Property<String> titleProperty();
// --- content
public abstract void setDialogPane(DialogPane node);
// --- root
public abstract Node getRoot();
// --- width
/**
* Property representing the width of the dialog.
*/
abstract ReadOnlyDoubleProperty widthProperty();
abstract void setWidth(double width);
// --- height
/**
* Property representing the height of the dialog.
*/
abstract ReadOnlyDoubleProperty heightProperty();
abstract void setHeight(double height);
// stage style
abstract void initStyle(StageStyle style);
abstract StageStyle getStyle();
abstract double getSceneHeight();
/***************************************************************************
*
* Implementation
*
**************************************************************************/
/***************************************************************************
*
* Support Classes
*
**************************************************************************/
/***************************************************************************
*
* Stylesheet Handling
*
**************************************************************************/
}
|
def detect_syntax_error(code_snippet):
try:
exec(code_snippet)
return "No syntax errors found"
except SyntaxError as e:
return f"SyntaxError: {e}"
except Exception as e:
return f"Error: {e}" |
import {ELogLevel} from "./ELogLevel";
/**
* A common interface for logging. Different implementations for browser and node are available.
*
* See:
* 1. ConsoleLogger
* 2. DefaultLogger
*/
export interface ILogger {
/**
* Log an error or an error message. Context can be any JSON object.
*
* @param {string | Error} message
* @param context
*/
error(message: string|Error, context?: any): void;
/**
* Log a warning error or a warning message. Context can be any JSON object.
*
* @param {string | Error} message
* @param context
*/
warn(message: string|Error, context?: any): void;
/**
* Log an info error or an info message. Context can be any JSON object.
*
* @param {string | Error} message
* @param context
*/
info(message: string|Error, context?: any): void;
/**
* Log a debugging error or an debugging message. Context can be any JSON object.
*
* @param {string | Error} message
* @param context
*/
debug(message: string|Error, context?: any): void;
/**
* Log an error or a message at the given log level. Context can be any JSON object.
*
* @param {ELogLevel} level
* @param {string | Error} message
* @param context
*/
log(level: ELogLevel, message: string|Error, context?: any): void;
} |
package io.github.yamporg.ifbhfix;
import com.buuz135.industrial.tile.block.BlackHoleTankBlock;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import net.minecraft.block.Block;
import net.minecraft.item.ItemStack;
import net.minecraft.nbt.NBTTagCompound;
import net.minecraftforge.common.capabilities.ICapabilityProvider;
public class BlockTankItem extends BlackHoleTankBlock.BlockTankItem {
public BlockTankItem(Block block) {
((BlackHoleTankBlock) block).super(block);
}
@Nullable
@Override
public String getCreatorModId(@Nonnull ItemStack itemStack) {
return IFBHFixMod.MOD_ID;
}
@Nullable
@Override
public ICapabilityProvider initCapabilities(ItemStack stack, @Nullable NBTTagCompound nbt) {
return new FluidHandler(stack);
}
}
|
<reponame>shin-kinoshita/dbflute-core
/*
* Copyright 2014-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package org.dbflute.logic.sql2entity.cmentity;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.util.Map;
import org.dbflute.helper.StringKeyMap;
import org.dbflute.logic.jdbc.metadata.basic.DfColumnExtractor;
import org.dbflute.logic.jdbc.metadata.info.DfColumnMeta;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DfCustomizeEntityMetaExtractor {
// ===================================================================================
// Definition
// ==========
private static final Logger _log = LoggerFactory.getLogger(DfCustomizeEntityMetaExtractor.class);
public static interface DfForcedJavaNativeProvider {
String provide(String columnName);
}
// ===================================================================================
// Main
// ====
public Map<String, DfColumnMeta> extractColumnMetaInfoMap(ResultSet rs, String sql, DfForcedJavaNativeProvider forcedJavaNativeProvider)
throws SQLException {
final Map<String, DfColumnMeta> columnMetaInfoMap = StringKeyMap.createAsFlexibleOrdered();
final ResultSetMetaData md = rs.getMetaData();
for (int i = 1; i <= md.getColumnCount(); i++) {
final DfColumnMeta columnMeta = new DfColumnMeta();
String sql2EntityRelatedTableName = null;
try {
sql2EntityRelatedTableName = md.getTableName(i);
} catch (SQLException continued) {
// because this table name is not required, basically only for classification
String msg = "ResultSetMetaData.getTableName(" + i + ") threw the exception: " + continued.getMessage();
_log.info(msg);
}
columnMeta.setSql2EntityRelatedTableName(sql2EntityRelatedTableName);
String columnName = md.getColumnLabel(i);
final String relatedColumnName = md.getColumnName(i);
columnMeta.setSql2EntityRelatedColumnName(relatedColumnName);
if (columnName == null || columnName.trim().length() == 0) {
columnName = relatedColumnName;
}
if (columnName == null || columnName.trim().length() == 0) {
final String ln = ln();
String msg = "The columnName is invalid: columnName=" + columnName + ln;
msg = msg + "ResultSetMetaData returned invalid value." + ln;
msg = msg + "sql=" + sql;
throw new IllegalStateException(msg);
}
columnMeta.setColumnName(columnName);
final int columnType = md.getColumnType(i);
columnMeta.setJdbcDefValue(columnType);
final String columnTypeName = md.getColumnTypeName(i);
columnMeta.setDbTypeName(columnTypeName);
int columnSize = md.getPrecision(i);
if (!DfColumnExtractor.isColumnSizeValid(columnSize)) {
columnSize = md.getColumnDisplaySize(i); // e.g. sum(COLUMN)
}
columnMeta.setColumnSize(columnSize);
final int scale = md.getScale(i);
columnMeta.setDecimalDigits(scale);
if (forcedJavaNativeProvider != null) {
final String sql2entityForcedJavaNative = forcedJavaNativeProvider.provide(columnName);
columnMeta.setSql2EntityForcedJavaNative(sql2entityForcedJavaNative);
}
// not use meta data because it might be not accuracy
// and it is unneeded in outside-SQL first
// but only used as optional determination for Scala
// so you can specify not-null mark at select column comment e.g. -- // *Member Name
// (see DfCustomizeEntityInfo#acceptSelectColumnComment())
//try {
// // basically it is unneeded in outside-SQL and might be not accuracy
// // but get it here just in case (use-or-not depends on Sql2Entity handling)
// final int nullable = md.isNullable(i);
// if (ResultSetMetaData.columnNoNulls == nullable) {
// columnMeta.setRequired(true);
// }
//} catch (SQLException continued) {
// // because this is added after production so for compatible just in case
// String msg = "ResultSetMetaData.isNullable(" + i + ") threw the exception: " + continued.getMessage();
// _log.info(msg);
//}
// column comment is not set here (no comment on meta data)
// if select column comment is specified, comment will be set later
columnMetaInfoMap.put(columnName, columnMeta);
}
return columnMetaInfoMap;
}
// ===================================================================================
// General Helper
// ==============
protected String ln() {
return "\n";
}
}
|
def encode_message(message, encoding):
encoded_message = ''
for char in message:
if char in encoding:
encoded_message += encoding[char][0] # Append the first pattern for the character
return encoded_message |
<reponame>hueyjj/Skadoosh<gh_stars>1-10
import { combineReducers } from 'redux';
import drawer from "../reducers/drawer";
import auth from "../reducers/auth";
import api from "../reducers/api";
import course from "../reducers/course";
import review from "../reducers/review";
import diagram from "../reducers/diagram";
import settings from "../reducers/settings";
import profile from "../reducers/profile";
const rootReducer = combineReducers({
drawer,
auth,
api,
course,
review,
diagram,
settings,
profile,
});
export default rootReducer;
|
#!/bin/bash
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Pip installs the relevant dependencies and runs the Haiku tests on CPU
set -e
set -x
virtualenv -p python3 .
source bin/activate
python --version
# Install JAX.
python -m pip install -r requirements-jax.txt
python -c 'import jax; print(jax.__version__)'
# Run setup.py, this installs the python dependencies
python -m pip install .
# Python test dependencies.
python -m pip install -r requirements-test.txt
# CPU count on macos or linux
if [ "$(uname)" == "Darwin" ]; then
N_JOBS=$(sysctl -n hw.logicalcpu)
else
N_JOBS=$(grep -c ^processor /proc/cpuinfo)
fi
# Run tests using pytest.
TEST_OPTS=()
if [[ "${INTEGRATION}" -eq "false" ]]; then
TEST_OPTS+=("--ignore=haiku/_src/integration/")
fi
python -m pytest -n "${N_JOBS}" haiku "${TEST_OPTS[@]}"
# Test docs still build.
cd docs/
pip install -r requirements.txt
make coverage_check
make doctest
make html
|
#!/bin/bash
. ./librispeech_lib.sh
set -euo pipefail
cat <<EOF > model_params.txt
EOF
bazel_ run //lingvo:trainer \
--interactive \
--mode=shell \
--run_locally=cpu \
--model=
--logdir=logs/ctc/
--model_params_file_override=model_params.txt
|
<filename>open-sphere-plugins/stk-terrain/src/test/java/io/opensphere/stkterrain/transformer/STKTerrainImageProviderTest.java<gh_stars>10-100
package io.opensphere.stkterrain.transformer;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import java.nio.ByteBuffer;
import org.easymock.EasyMock;
import org.easymock.EasyMockSupport;
import org.junit.Test;
import io.opensphere.core.cache.matcher.ZYXKeyPropertyMatcher;
import io.opensphere.core.data.DataRegistry;
import io.opensphere.core.data.util.DataModelCategory;
import io.opensphere.core.data.util.PropertyValueReceiver;
import io.opensphere.core.data.util.QueryTracker;
import io.opensphere.core.data.util.SimpleQuery;
import io.opensphere.core.image.Image;
import io.opensphere.core.model.GeographicBoundingBox;
import io.opensphere.core.model.LatLonAlt;
import io.opensphere.core.model.ZYXImageKey;
import io.opensphere.core.util.collections.New;
import io.opensphere.stkterrain.model.mesh.QuantizedMesh;
import io.opensphere.stkterrain.model.mesh.QuantizedMeshTest;
import io.opensphere.stkterrain.util.Constants;
/**
* Unit test for {@link STKTerrainImageProvider}.
*/
public class STKTerrainImageProviderTest
{
/**
* The test image key.
*/
private static final ZYXImageKey ourImageKey = new ZYXImageKey(1, 1, 1,
new GeographicBoundingBox(LatLonAlt.createFromDegrees(0, -90), LatLonAlt.createFromDegrees(45, 0)));
/**
* The test server url.
*/
private static final String ourServerUrl = "http://somehost/terrain";
/**
* The test tile set name.
*/
private static final String ourTileSetName = "world";
/**
* Unit test for getting the image for a given tile.
*/
@Test
public void testGetImage()
{
EasyMockSupport support = new EasyMockSupport();
QuantizedMesh mesh = new QuantizedMesh(ByteBuffer.wrap(QuantizedMeshTest.createMeshByes()));
DataRegistry dataRegistry = createDataRegistry(support, mesh);
support.replayAll();
STKTerrainImageProvider provider = new STKTerrainImageProvider(dataRegistry, ourServerUrl, ourTileSetName);
Image image = provider.getImage(ourImageKey);
assertEquals(mesh, image);
support.verifyAll();
}
/**
* Unit test for getting the image for a given tile.
*/
@Test
public void testGetImageNull()
{
EasyMockSupport support = new EasyMockSupport();
DataRegistry dataRegistry = createDataRegistry(support, null);
support.replayAll();
STKTerrainImageProvider provider = new STKTerrainImageProvider(dataRegistry, ourServerUrl, ourTileSetName);
Image image = provider.getImage(ourImageKey);
assertNull(image);
support.verifyAll();
}
/**
* Creates an easy mocked {@link DataRegistry}.
*
* @param support Used to create the mock.
* @param mesh The mesh to return in the query, or null if nothing to
* return.
* @return The mocked {@link DataRegistry}.
*/
private DataRegistry createDataRegistry(EasyMockSupport support, QuantizedMesh mesh)
{
DataRegistry dataRegistry = support.createMock(DataRegistry.class);
QueryTracker tracker = support.createMock(QueryTracker.class);
if (mesh == null)
{
EasyMock.expect(tracker.getException()).andReturn(null);
}
EasyMock.expect(dataRegistry.performQuery(EasyMock.isA(SimpleQuery.class))).andAnswer(() -> queryAnswer(mesh, tracker));
return dataRegistry;
}
/**
* The answer for the mocked query call.
*
* @param mesh The {@link QuantizedMesh} to return in the query, or null to
* return null.
* @param tracker A mocked {@link QueryTracker} to return.
* @return tracker.
*/
@SuppressWarnings("unchecked")
private QueryTracker queryAnswer(QuantizedMesh mesh, QueryTracker tracker)
{
SimpleQuery<QuantizedMesh> query = (SimpleQuery<QuantizedMesh>)EasyMock.getCurrentArguments()[0];
DataModelCategory actual = query.getDataModelCategory();
DataModelCategory expected = new DataModelCategory(ourServerUrl, QuantizedMesh.class.getName(), ourTileSetName);
assertEquals(expected, actual);
ZYXKeyPropertyMatcher matcher = (ZYXKeyPropertyMatcher)query.getParameters().get(0);
assertEquals(ourImageKey, matcher.getImageKey());
PropertyValueReceiver<QuantizedMesh> receiver = (PropertyValueReceiver<QuantizedMesh>)query.getPropertyValueReceivers()
.get(0);
assertEquals(Constants.QUANTIZED_MESH_PROPERTY_DESCRIPTOR, receiver.getPropertyDescriptor());
if (mesh != null)
{
receiver.receive(New.list(mesh));
}
return tracker;
}
}
|
package pl.project13.scala.words.other
trait ProgressableAdverb {
def TotalItems: Long
def InitialPrint = true
private var lastPercent = 0L
private var progressAt = 0L
if (InitialPrint) printProgress(0, 0, TotalItems)
def notifyProgress() {
notifyProgress(1)
}
def notifyProgress(howMuchProgress: Long) {
progressAt += howMuchProgress
val currentPercent = progressAt * 100L / TotalItems
if (currentPercent != lastPercent) {
printProgress(currentPercent, progressAt, TotalItems)
lastPercent = currentPercent
onProgressedPercent(currentPercent)
}
}
def printProgress(currentPercent: Long, progressAt: Long, total: Long) {
println(currentPercent + "% (" + progressAt + "/" + total + ")")
}
/**
* Extension point - will be called each time progress of 1% more has been made.
* In theory should be called 100 times, but that is not guarenteed as you may do more progress than 100%!
*/
def onProgressedPercent(currentPercent: Long) = ()
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.