text stringlengths 1 1.05M |
|---|
#!/bin/bash
# Derive the list of active POR (point of reference) entries
# for any given language, from the OPTD-maintained data file of POR:
# ../opentraveldata/optd_por_public.csv
#
# => optd_por_public_lang.csv
#
##
# Temporary path
TMP_DIR="/tmp/por"
##
# Path of the executable: set it to empty when this is the current directory.
EXEC_PATH=`dirname $0`
# Trick to get the actual full-path
pushd ${EXEC_PATH} > /dev/null
EXEC_FULL_PATH=`popd`
popd > /dev/null
EXEC_FULL_PATH=`echo ${EXEC_FULL_PATH} | sed -e 's|~|'${HOME}'|'`
#
CURRENT_DIR=`pwd`
if [ ${CURRENT_DIR} -ef ${EXEC_PATH} ]
then
EXEC_PATH="."
TMP_DIR="."
fi
# If the Geonames dump file is in the current directory, then the current
# directory is certainly intended to be the temporary directory.
if [ -f ${GEO_RAW_FILENAME} ]
then
TMP_DIR="."
fi
EXEC_PATH="${EXEC_PATH}/"
TMP_DIR="${TMP_DIR}/"
if [ ! -d ${TMP_DIR} -o ! -w ${TMP_DIR} ]
then
\mkdir -p ${TMP_DIR}
fi
##
# Sanity check: that (executable) script should be located in the
# tools/ sub-directory of the OpenTravelData project Git clone
EXEC_DIR_NAME=`basename ${EXEC_FULL_PATH}`
if [ "${EXEC_DIR_NAME}" != "tools" ]
then
echo
echo "[$0:$LINENO] Inconsistency error: this script ($0) should be located in the refdata/tools/ sub-directory of the OpenTravelData project Git clone, but apparently is not. EXEC_FULL_PATH=\"${EXEC_FULL_PATH}\""
echo
exit -1
fi
##
# Target language
TARGET_LANG="en"
##
# OpenTravelData directory
OPTD_DIR=`dirname ${EXEC_FULL_PATH}`
OPTD_DIR="${OPTD_DIR}/"
##
# OPTD sub-directories
DATA_DIR=${OPTD_DIR}opentraveldata/
TOOLS_DIR=${OPTD_DIR}tools/
##
# Log level
LOG_LEVEL=3
##
# File of OPTD-maintained POR (points of reference)
OPTD_POR_BASEFILENAME=optd_por_public
OPTD_POR_FILENAME=${OPTD_POR_BASEFILENAME}.csv
OPTD_POR_FILE=${DATA_DIR}${OPTD_POR_FILENAME}
##
# Target (generated files)
OPTD_POR_TGT_FILENAME=${OPTD_POR_BASEFILENAME}_${TARGET_LANG}.csv
OPTD_POR_TGT_FILE=${DATA_DIR}${OPTD_POR_TGT_FILENAME}
##
# Parse command-line options
if [ "$1" = "-h" -o "$1" = "--help" ];
then
echo
echo "Usage: $0 [<Target language>]"
echo " - Target language: '${TARGET_LANG}'"
echo " + ${OPTD_POR_FILE} contains the OPTD-maintained list of Points of Reference (POR)"
echo " + ${OPTD_POR_TGT_FILE} contains the list of OPTD-maintained POR for that language"
echo
exit -1
fi
##
# Target date
if [ "$1" != "" ];
then
TARGET_LANG="$1"
OPTD_POR_TGT_FILENAME=${OPTD_POR_BASEFILENAME}_${TARGET_LANG}.csv
OPTD_POR_TGT_FILE=${DATA_DIR}${OPTD_POR_TGT_FILENAME}
fi
##
# Cleaning
#
if [ "$1" = "--clean" ]
then
OPTD_POR_TGT_ALL_FILENAME=${OPTD_POR_BASEFILENAME}_??.csv
OPTD_POR_TGT_ALL_FILE=${DATA_DIR}${OPTD_POR_TGT_ALL_FILENAME}
\rm -f ${OPTD_POR_TGT_ALL_FILE}
exit
fi
##
# Extraction of the valid POR entries for the given date.
echo
echo "Extraction Step"
echo "---------------"
echo
EXTRACTER=extract_por_for_a_language.awk
time awk -F'^' -v tgt_lang=${TARGET_LANG} -f ${EXTRACTER} \
${OPTD_POR_FILE} > ${OPTD_POR_TGT_FILE}
##
# Reporting
#
echo
echo "Reporting Step"
echo "--------------"
echo
echo "wc -l ${OPTD_POR_FILE} ${OPTD_POR_TGT_FILE}"
echo
echo "Hints for next steps:"
echo "---------------------"
echo "# Display the list ordered by PageRank values:"
echo "sort -t';' -k5nr,5 ${OPTD_POR_TGT_FILE} | less"
echo
echo "# Filter only on the airports"
echo "awk -F';' '/^[A-Z]{3};[AC]{1,2}/ {if (\$2 != \"C\") {print \$0}}' ${OPTD_POR_TGT_FILE} | less"
echo
echo "# Combine both rules above"
echo "awk -F';' '/^[A-Z]{3};[AC]{1,2}/ {if (\$2 != \"C\") {print \$0}}' ${OPTD_POR_TGT_FILE} | sort -t';' -k5nr,5 | less"
echo
echo "# Filter only on the airports having no name for that language"
echo "awk -F';' '/^[A-Z]{3};[AC]{1,2}/ {if (\$2 != \"C\" && \$9 == \"\") {print \$0}}' ${OPTD_POR_TGT_FILE} | sort -t';' -k5nr,5 | less"
echo
echo "# Display the number of airports: 1. having a name for that language, having no name for that language, 3. in total"
echo "awk -F';' '/^[A-Z]{3};[AC]{1,2}/ {if (\$2 != \"C\" && \$5 != \"\" && \$9 != \"\") {print \$0}}' ${OPTD_POR_TGT_FILE} | wc -l"
echo "awk -F';' '/^[A-Z]{3};[AC]{1,2}/ {if (\$2 != \"C\" && \$5 != \"\" && \$9 == \"\") {print \$0}}' ${OPTD_POR_TGT_FILE} | wc -l"
echo "awk -F';' '/^[A-Z]{3};[AC]{1,2}/ {if (\$2 != \"C\" && \$5 != \"\") {print \$0}}' ${OPTD_POR_TGT_FILE} | wc -l"
echo
|
<gh_stars>0
import {SetLiveValidatorResult} from "./types"
import {LiveValidator, HookProps, ControlOutputDataProps} from "@common-types"
/**
* @description
* Записывает результат живого валидатора, в объект вывода данных контрола
*
* @param {LiveValidator} validator - Живой валидатор, результат которого будет записыватся
* @param {HookProps} hookProps - Данные для работы валидатора
* @param {ControlOutputDataProps} controlOutputData - Объект в котором хранятся данные вывода контрола
*
* @returns {void}
*/
export const setLiveValidatorResult:SetLiveValidatorResult = (validator, hookProps, controlOutputData) => {
const {errorData = null, modifiedValueToWrite = null} = validator(hookProps),
{shouldLockNotValidWrite, hasError} = errorData
if (shouldLockNotValidWrite) controlOutputData.isWriteInputEnable = false
/**
* Отметить флаг что в контроле была хоть одна ошибка
*/
if (hasError) controlOutputData.hasAnyError = true
/**
* Если в валидаторе модифицировали вводимое значение, и вернули, записать в объект вывода
*/
if (modifiedValueToWrite) controlOutputData.writeToControlValue = modifiedValueToWrite
/**
* Записать настройки вывода ошибки в главный объект вывода
*/
if (errorData) controlOutputData.errorDataForControl = errorData
} |
#!/bin/sh
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.!
set -e # exit immediately on error
set -x # display all commands
setup_ubuntu() {
export DEBIAN_FRONTEND=noninteractive
apt-get update
apt-get install -y build-essential cmake git pkg-config python3-pip
pip3 install --upgrade pip
export PATH="/usr/local/bin:$PATH"
. /etc/os-release
if [ "${VERSION_ID}" = "14.04" ]; then
apt-get install -y cmake3 python-dev
fi
}
setup_debian() {
setup_ubuntu
}
setup_fedora() {
dnf update -y
dnf install -y rpm-build gcc-c++ make cmake pkg-config python-pip python-devel
}
build_generic() {
mkdir -p build
cd build
cmake .. -DSPM_BUILD_TEST=ON
make -j2
make CTEST_OUTPUT_ON_FAILURE=1 test
make package_source
cd ..
}
build_python() {
cd build
make install
cd ..
export LD_LIBRARY_PATH=/usr/local/lib:/usr/local/lib64:$LD_LIBRARY_PATH
export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig:/usr/local/lib64/pkgconfig
ldconfig -v
cd python
python3 setup.py test
cd ..
}
build_linux_gcc_coverall_ubuntu() {
setup_debian
apt-get install -y lcov
pip3 install cpp-coveralls
pip3 install 'requests[security]'
build_generic
build_python
mkdir -p build
cd build
cmake .. -DSPM_COVERAGE=ON
make -j2
make coverage
coveralls --exclude-pattern '.*(include|usr|test|third_party|pb|_main).*' --gcov-options '\-lp' --gcov gcov
cd ..
}
build_linux_gcc_ubuntu() {
setup_ubuntu
build_generic
build_python
}
build_linux_gcc_ubuntu_i386() {
setup_ubuntu
build_generic
build_python
}
build_linux_gcc_debian() {
setup_debian
build_generic
build_python
}
build_linux_gcc_fedora() {
setup_fedora
build_generic
build_python
}
build_linux_clang_ubuntu() {
setup_ubuntu
apt-get install -y clang
export CXX="clang++" CC="clang"
build_generic
rm -fr build
}
build_osx() {
# brew update
# brew install protobuf || brew link --overwrite protobuf
# brew link --overwrite python@2
build_generic
# cd build
# make install
}
run_docker() {
docker pull "$1"
docker run -e COVERALLS_REPO_TOKEN=${COVERALLS_REPO_TOKEN} --rm -ti --name travis-ci -v `pwd`:/sentencepiece -w /sentencepiece -td "$1" /bin/bash
docker exec travis-ci bash -c "./test.sh native $2"
docker stop travis-ci
}
## main
if [ "$#" -ne 2 ]; then
echo "sh test.sh <docker_image> <mode>."
echo "when <docker_image> is native, runs command natively without docker."
exit
fi
if [ "$1" = "native" ]; then
eval "$2"
else
run_docker $1 $2
fi
|
<reponame>EdixonAlberto/instagrapi
import axios from 'axios';
class Request {
public static async api(query: string): Promise<TInstagramApi | TPostApi> {
const isUrl = query.search(/^(https)/) > -1;
const url: string = isUrl ? query : `${global.config.urlBase}/${query}`;
const { status, data } = await axios.get(url + '/?__a=1');
if (status === 200) return data;
else {
console.warn('WARN-REQUEST ->', status, data);
throw new Error('status request api');
}
}
}
export { Request };
|
import sys
from utils.console import parse_arguments, create_progressbar
from utils.output import write_to_console, write_to_file, generate_json, generate_text
from utils.vkontakte import fetch_messages_by_user_id, get_messages_count, create_api_connection
from analyze import generate_analyze_results
if __name__ == '__main__':
# Parse command-line arguments
args = parse_arguments()
# Create VkApiMethod instance to call vk.com API
vk = create_api_connection(login=args.login, password=args.pwd)
# Initialize progress bar and fetch messages
msg_count = get_messages_count(conn=vk, user_id=args.user_id)
if msg_count == 0:
print('This dialogue is empty, exiting...')
sys.exit(0)
pbar = create_progressbar(max_val=msg_count + (msg_count % 200))
messsages = fetch_messages_by_user_id(conn=vk, user_id=args.user_id, pbar=pbar)
# Generate stats and format results
analyze_results = generate_analyze_results(messsages)
# Add user id to output data
output_data = dict(user_id=args.user_id, **analyze_results)
# Get results according to given output options
if args.json_filename is not None:
json_ouptut = generate_json(output_data)
write_to_file(json_ouptut, args.json_filename)
else:
console_output = generate_text(output_data)
write_to_console(console_output)
|
<filename>nmap2md.py
#!/usr/bin/env python
import re
import sys
import magic
import xml.etree.ElementTree as ET
from optparse import OptionParser
import columns_definition
__version__ = "1.1.0"
parser = OptionParser(usage="%prog [options] file.xml", version="%prog " + __version__)
parser.add_option("-c", "--columns", default="Port,State,Service,Version", help="define a columns for the table")
parser.add_option(
"--hs",
default=4,
type="int",
help="address is used as a header, this option defines header number h1 -> h6"
)
parser.add_option(
"--rc",
"--row-cells",
default="[port.number]/[port.protocol],[state],[service.name],[service.product] [service.version]",
help="define rows which will report certain data. Those rows: [port.number], [port.protocol], [state], "
"[service.name], [service.product], [service.version] "
)
parser.add_option(
"--print-empty",
dest="print_empty",
action="store_true",
help="should addresses with no opened ports to be printed"
)
parser.set_defaults(print_empty=False)
(options, args) = parser.parse_args()
def fileCheck():
f = (args[0])
kind = magic.from_file(f)
if ('XML'.casefold() in kind.casefold()) == False:
print("File supplied is not a valid XML file")
print()
parser.print_help()
sys.exit()
try:
fileCheck()
except IndexError:
print("No filename supplied as an argument!")
print()
parser.print_help()
sys.exit()
except OSError as err:
print("Invalid or nonexistant filename supplied as an argument!")
print()
parser.print_help()
sys.exit()
columns = options.columns.split(",")
row_cells = options.rc.split(",")
definitions = columns_definition.Element.build(columns_definition.definition)
result = {}
md = ""
if len(columns) != len(row_cells):
print("[Err] Columns and row cells amount should be equal")
sys.exit()
# Wrong header number, setting to default option
if options.hs < 0 or options.hs > 6:
options.hs = 4
try:
tree = ET.parse(args[0])
except IndexError:
print("[Err] No file could be found")
print()
parser.print_help()
sys.exit()
except ET.ParseError:
print("[Err] Something went wrong when parsing the XML file - perhaps it's corrupted/invalid? Please check file sanity and try again.")
print()
parser.print_help()
sys.exit()
for host in tree.getroot().findall("host"):
address = host.find("address").attrib["addr"]
port_info = []
ports = host.find("ports")
if ports:
for port in ports.findall("port"):
cells = []
for rc in row_cells:
current_cell = rc
for bc in re.findall("(\[[a-z\.*]+\])", rc):
for definition in definitions:
elem = definition.find(bc[1:-1])
if elem:
xml_element = port.find(elem.xpathfull())
if xml_element is not None:
data = elem.data(xml_element)
current_cell = current_cell.replace(bc, data)
break
break
cells.append(current_cell)
port_info.append(cells)
result[address] = port_info
# Start converting data to Markdown
# IP addresses are defined as a header
for address in result:
if not options.print_empty and len(result[address]) == 0:
continue
if options.hs != 0:
md += "%s %s\n\n" % ('#' * options.hs, address)
md += "| %s |" % " | ".join(columns)
md += "\n"
# Adding +2 for 1 space on left and right sides
md += "|%s|" % "|".join(map(lambda s: '-' * (len(s) + 2), columns))
md += "\n"
for port_info in result[address]:
md += "| %s |" % " | ".join(port_info)
md += "\n"
md += "\n\n"
print()
print()
print(md) |
def objective_function(x):
return x**2 + 6*x - 4
def find_local_minima(func):
x = 0
delta = 0.01
while True:
x_new = x + delta
if objective_function(x_new) < objective_function(x):
x = x_new
else:
return x
print('The local minima is', find_local_minima(objective_function)) |
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.app.plugin.core.debug.gui.breakpoint;
import java.awt.event.MouseEvent;
import java.util.Set;
import org.junit.Before;
import org.junit.Test;
import generic.Unique;
import ghidra.app.plugin.core.codebrowser.CodeViewerProvider;
import ghidra.app.plugin.core.debug.service.breakpoint.DebuggerLogicalBreakpointServicePlugin;
import ghidra.app.plugin.core.progmgr.ProgramManagerPlugin;
import ghidra.app.services.*;
import ghidra.app.util.viewer.listingpanel.ListingPanel;
import ghidra.program.model.address.Address;
import ghidra.program.model.listing.Program;
import ghidra.program.util.ProgramLocation;
import ghidra.trace.model.breakpoint.TraceBreakpointKind;
import ghidra.util.Msg;
import ghidra.util.task.TaskMonitor;
import help.screenshot.GhidraScreenShotGenerator;
public class DebuggerBreakpointMarkerPluginScreenShots extends GhidraScreenShotGenerator {
DebuggerLogicalBreakpointService breakpointService;
DebuggerBreakpointMarkerPlugin breakpointMarkerPlugin;
ProgramManager programManager;
CodeViewerProvider listing;
Program program;
protected static Address addr(Program program, long offset) {
return program.getAddressFactory().getDefaultAddressSpace().getAddress(offset);
}
@Before
public void setUpMine() throws Exception {
breakpointService = addPlugin(tool, DebuggerLogicalBreakpointServicePlugin.class);
breakpointMarkerPlugin = addPlugin(tool, DebuggerBreakpointMarkerPlugin.class);
programManager = addPlugin(tool, ProgramManagerPlugin.class);
listing = waitForComponentProvider(CodeViewerProvider.class);
program = programManager.getCurrentProgram();
}
@Test
public void testCaptureDebuggerBreakpointMarkerPlugin() throws Throwable {
ListingPanel panel = listing.getListingPanel();
tool.getProject()
.getProjectData()
.getRootFolder()
.createFile("WinHelloCPP", program, TaskMonitor.DUMMY);
Msg.debug(this, "Placing breakpoint");
breakpointService.placeBreakpointAt(program, addr(program, 0x00401c60), 1,
Set.of(TraceBreakpointKind.SW_EXECUTE), "");
Msg.debug(this, "Disabling breakpoint");
LogicalBreakpoint lb = waitForValue(() -> Unique.assertAtMostOne(
breakpointService.getBreakpointsAt(program, addr(program, 0x00401c60))));
lb.disable();
Msg.debug(this, "Placing another");
breakpointService.placeBreakpointAt(program, addr(program, 0x00401c63), 1,
Set.of(TraceBreakpointKind.SW_EXECUTE), "");
Msg.debug(this, "Saving program");
program.save("Placed breakpoints", TaskMonitor.DUMMY);
Msg.debug(this, "Clicking and capturing");
DebuggerBreakpointMarkerPluginTest.clickListing(panel, addr(program, 0x00401c66),
MouseEvent.BUTTON3);
waitForSwing();
captureProviderWithScreenShot(listing);
}
@Test
public void testCaptureDebuggerPlaceBreakpointDialog() throws Throwable {
listing.goTo(program, new ProgramLocation(program, addr(program, 0x00401c63)));
performAction(breakpointMarkerPlugin.actionSetSoftwareBreakpoint, false);
DebuggerPlaceBreakpointDialog dialog =
waitForDialogComponent(DebuggerPlaceBreakpointDialog.class);
dialog.setName("After setup");
captureDialog(dialog);
}
}
|
package main
import (
"fmt"
)
// lowestIndex simply returns the index of the lowest value in the array starting
// at "start"
func lowestIndex(a []int, start int) (lowest int) {
lowest = start
for i := start + 1; i < len(a); i++ {
if a[i] < a[lowest] {
lowest = i
}
}
return
}
func selectionSort(arr *[]int) {
a := *arr
lowest := 0
for i := range a {
lowest = lowestIndex(a, i)
a[i], a[lowest] = a[lowest], a[i]
}
}
func main() {
arr := []int{3, 2, 8, 4, 9, 1, 1, 7, -1}
selectionSort(&arr)
fmt.Println(arr)
}
|
var config = {
GOOGLE_PLACE_API_KEY: "<KEY>",
WEATHER_API_KEY: "<KEY>",
};
|
CUDA_VISIBLE_DEVICES=0 python ./tools/test_net.py \
--config-file ms_mask_rcnn_R_50_FPN_3dce_mod.yaml \
--ckpt ./ms_mask_rcnn_R_50_FPN_3dce_mod_resnet/model_final.pth \
TEST.IMS_PER_BATCH 2 DATALOADER.NUM_WORKERS 1
|
<reponame>smagill/opensphere-desktop
package io.opensphere.filterbuilder.state;
import java.util.List;
import javax.xml.parsers.ParserConfigurationException;
import org.junit.Assert;
import org.junit.Test;
import org.w3c.dom.Document;
import com.bitsys.fade.mist.state.v4.QueryEntryType;
import io.opensphere.core.modulestate.ModuleStateController;
import io.opensphere.core.modulestate.StateXML;
import io.opensphere.core.util.XMLUtilities;
import io.opensphere.core.util.collections.New;
/** Test for {@link FilterManagerStateController}. */
public class FilterManagerStateControllerTest
{
/**
* Test for
* {@link FilterManagerStateController#canActivateState(org.w3c.dom.Node)}.
*
* @throws ParserConfigurationException Configuration error.
*/
@Test
public void testCanActivateState() throws ParserConfigurationException
{
Document doc = XMLUtilities.newDocument();
FilterManagerStateController controller = new FilterManagerStateController(null, null, null, null, null);
Assert.assertFalse(controller.canActivateState(doc));
doc.appendChild(StateXML.createElement(doc, ModuleStateController.STATE_NAME))
.appendChild(doc.createElementNS(ModuleStateController.STATE_NAMESPACE, "filters"));
Assert.assertTrue(controller.canActivateState(doc));
}
/**
* Test for
* {@link FilterManagerStateController#combineQueryEntries(java.util.List)}.
*/
@Test
public void testCombineQueryEntries()
{
List<QueryEntryType> entries = New.list();
entries.add(newQueryEntry("l1", null, "f1"));
entries.add(newQueryEntry("l1", "a1", null));
entries.add(newQueryEntry("l1", "a1", "f2"));
entries.add(newQueryEntry("l1", null, null));
entries.add(newQueryEntry("l3", "a3", "f3"));
FilterManagerStateController.combineQueryEntries(entries);
Assert.assertEquals(3, entries.size());
Assert.assertEquals(null, entries.get(0).getAreaId());
Assert.assertEquals("f1", entries.get(0).getFilterId());
Assert.assertEquals("a1", entries.get(1).getAreaId());
Assert.assertEquals("f2", entries.get(1).getFilterId());
Assert.assertEquals("a3", entries.get(2).getAreaId());
Assert.assertEquals("f3", entries.get(2).getFilterId());
}
/**
* Creates a new query entry.
*
* @param layer the layer ID
* @param area the area ID
* @param filter the filter ID
* @return the query entry
*/
private static QueryEntryType newQueryEntry(String layer, String area, String filter)
{
QueryEntryType entry = new QueryEntryType();
entry.setLayerId(layer);
entry.setAreaId(area);
entry.setFilterId(filter);
return entry;
}
}
|
<filename>routes/download.js
exports.download = (req, res) => {
try {
console.log("User requested file :", req.query.name);
res.sendFile(req.query.name);
} catch (err) {
console.error(err);
res.render("error", { err: JSON.stringify(err) });
}
};
|
export const CREATE_EVENT = 'CREATE_EVENT';
export const FETCH_ALL_EVENT = 'FETCH_ALL_EVENT';
export const UPDATE_EVENT = 'UPDATE_EVENT';
export const DELETE_EVENT = 'DELETE_EVENT';
export const AUTH = "AUTH";
export const LOGOUT = "LOGOUT";
export const SIGNUP = "SIGNUP";
export const LOGIN = "LOGIN";
export const SEARCH_BY_QUERY = 'SEARCH_BY_QUERY';
export const END_LOADING = 'END_LOADING';
export const START_LOADING = "START_LOADING";
export const GET_EVENT_BY_ID="GET_EVENT_BY_ID"
// sudo netstat -plten |grep java |
<filename>sdk/src/main/java/com/iovation/launchkey/sdk/error/AuthorizationInProgress.java
package com.iovation.launchkey.sdk.error;
import java.util.Date;
import java.util.Objects;
public class AuthorizationInProgress extends InvalidRequestException {
private final String authorizationRequestId;
private final boolean fromSameService;
private final Date expires;
/**
* @param message The detail message (which is saved for later retrieval
* by the {@link #getMessage()} method).
* @param cause the cause (which is saved for later retrieval by the
* {@link #getCause()} method). (A <tt>null</tt> value is
* permitted, and indicates that the cause is nonexistent or
* unknown.)
* @param errorCode HTTP status code or 0 if no HTTP status code was returned
* @param authorizationRequestId Identifier of the existing Authorization Request that caused this exception
* @param fromSameService Is the authorization in progress from the same Service requesting the new
* Authorization Request
* @param expires When the Authorization Request identified by authorizationRequestId will expire.
*/
public AuthorizationInProgress(
String message, Throwable cause, String errorCode, String authorizationRequestId,
boolean fromSameService, Date expires) {
super(message, cause, errorCode);
this.authorizationRequestId = authorizationRequestId;
this.fromSameService = fromSameService;
this.expires = expires;
}
/**
* Get the identifier of the existing Authorization Request that caused this exception
*
* @return Identifier of the existing Authorization Request that caused this exception
*/
public String getAuthorizationRequestId() {
return authorizationRequestId;
}
/**
* Is the authorization in progress from the same Service requesting the new Authorization Request
*
* @return Is the authorization in progress from the same Service requesting the new Authorization Request
*/
public boolean isFromSameService() {
return fromSameService;
}
/**
* Get when the Authorization Request identified by authorizationRequestId will expire.
*
* @return Expiration of the Authorization Request identified by authorizationRequestId
*/
public Date getExpires() {
return expires;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof AuthorizationInProgress)) return false;
if (!super.equals(o)) return false;
AuthorizationInProgress that = (AuthorizationInProgress) o;
return isFromSameService() == that.isFromSameService() &&
Objects.equals(getAuthorizationRequestId(), that.getAuthorizationRequestId()) &&
Objects.equals(getExpires(), that.getExpires());
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), getAuthorizationRequestId(), isFromSameService(), getExpires());
}
@Override
public String toString() {
return "AuthorizationInProgress{" +
"authorizationRequestId='" + authorizationRequestId + '\'' +
", fromSameService=" + fromSameService +
", expires=" + expires +
"} " + super.toString();
}
}
|
module MechanicalTurk
class TurksController < BaseController
def index
@turks = Turk.all
end
def show
@turk = Turk.get(params[:id])
end
end
end
|
#!/bin/bash
# Common functions definitions
function check_fileServerType_param
{
local fileServerType=$1
if [ "$fileServerType" != "gluster" -a "$fileServerType" != "azurefiles" -a "$fileServerType" != "nfs" ]; then
echo "Invalid fileServerType ($fileServerType) given. Only 'gluster', 'azurefiles' or 'nfs' are allowed. Exiting"
exit 1
fi
}
function create_azure_files_mahara_share
{
local storageAccountName=$1
local storageAccountKey=$2
local logFilePath=$3
az storage share create \
--name mahara \
--account-name $storageAccountName \
--account-key $storageAccountKey \
--fail-on-exist >> $logFilePath
}
function setup_and_mount_azure_files_mahara_share
{
local storageAccountName=$1
local storageAccountKey=$2
cat <<EOF > /etc/mahara_azure_files.credential
username=$storageAccountName
password=$storageAccountKey
EOF
chmod 600 /etc/mahara_azure_files.credential
grep "^//$storageAccountName.file.core.windows.net/mahara\s\s*/mahara\s\s*cifs" /etc/fstab
if [ $? != "0" ]; then
echo -e "\n//$storageAccountName.file.core.windows.net/mahara /mahara cifs credentials=/etc/mahara_azure_files.credential,uid=www-data,gid=www-data,nofail,vers=3.0,dir_mode=0770,file_mode=0660,serverino,mfsymlinks" >> /etc/fstab
fi
mkdir -p /mahara
mount /mahara
}
# Functions for making NFS share available
# TODO refactor these functions with the same ones in install_gluster.sh
function scan_for_new_disks
{
local BLACKLIST=${1} # E.g., /dev/sda|/dev/sdb
declare -a RET
local DEVS=$(ls -1 /dev/sd*|egrep -v "${BLACKLIST}"|egrep -v "[0-9]$")
for DEV in ${DEVS};
do
# Check each device if there is a "1" partition. If not,
# "assume" it is not partitioned.
if [ ! -b ${DEV}1 ];
then
RET+="${DEV} "
fi
done
echo "${RET}"
}
function create_raid0_ubuntu {
local RAIDDISK=${1} # E.g., /dev/md1
local RAIDCHUNKSIZE=${2} # E.g., 128
local DISKCOUNT=${3} # E.g., 4
shift
shift
shift
local DISKS="$@"
dpkg -s mdadm
if [ ${?} -eq 1 ];
then
echo "installing mdadm"
sudo apt-get -y -q install mdadm
fi
echo "Creating raid0"
udevadm control --stop-exec-queue
echo "yes" | mdadm --create $RAIDDISK --name=data --level=0 --chunk=$RAIDCHUNKSIZE --raid-devices=$DISKCOUNT $DISKS
udevadm control --start-exec-queue
mdadm --detail --verbose --scan > /etc/mdadm/mdadm.conf
}
function do_partition {
# This function creates one (1) primary partition on the
# disk device, using all available space
local DISK=${1} # E.g., /dev/sdc
echo "Partitioning disk $DISK"
echo -ne "n\np\n1\n\n\nw\n" | fdisk "${DISK}"
#> /dev/null 2>&1
#
# Use the bash-specific $PIPESTATUS to ensure we get the correct exit code
# from fdisk and not from echo
if [ ${PIPESTATUS[1]} -ne 0 ];
then
echo "An error occurred partitioning ${DISK}" >&2
echo "I cannot continue" >&2
exit 2
fi
}
function add_local_filesystem_to_fstab {
local UUID=${1}
local MOUNTPOINT=${2} # E.g., /mahara
grep "${UUID}" /etc/fstab >/dev/null 2>&1
if [ ${?} -eq 0 ];
then
echo "Not adding ${UUID} to fstab again (it's already there!)"
else
LINE="\nUUID=${UUID} ${MOUNTPOINT} ext4 defaults,noatime 0 0"
echo -e "${LINE}" >> /etc/fstab
fi
}
function create_filesystem_with_raid {
local MOUNTPOINT=${1} # E.g., /mahara
local RAIDDISK=${2} # E.g., /dev/md1
local RAIDPARTITION=${3} # E.g., /dev/md1p1
mkdir -p $MOUNTPOINT
local DISKS=$(scan_for_new_disks "/dev/sda|/dev/sdb")
echo "Disks are ${DISKS}"
declare -i DISKCOUNT
local DISKCOUNT=$(echo "$DISKS" | wc -w)
echo "Disk count is $DISKCOUNT"
if [ $DISKCOUNT = "0" ];
then
echo "No new (unpartitioned) disks available... Returning..."
return
elif [ $DISKCOUNT -gt 1 ];
then
create_raid0_ubuntu /dev/md1 128 $DISKCOUNT $DISKS
do_partition ${RAIDDISK}
local PARTITION="${RAIDPARTITION}"
else
do_partition ${DISKS}
local PARTITION=$(fdisk -l ${DISKS}|grep -A 1 Device|tail -n 1|awk '{print $1}')
fi
echo "Creating filesystem on ${PARTITION}."
mkfs -t ext4 ${PARTITION}
mkdir -p "${MOUNTPOINT}"
local UUID=$(blkid -u filesystem ${PARTITION}|awk -F "[= ]" '{print $3}'|tr -d "\"")
add_local_filesystem_to_fstab "${UUID}" "${MOUNTPOINT}"
echo "Mounting disk ${PARTITION} on ${MOUNTPOINT}"
mount "${MOUNTPOINT}"
}
function configure_nfs_server_and_export {
local MOUNTPOINT=${1} # E.g., /mahara
echo "Installing nfs server..."
apt install -y nfs-kernel-server
echo "Exporting ${MOUNTPOINT}..."
grep "^${MOUNTPOINT}" /etc/exports > /dev/null 2>&1
if [ $? = "0" ]; then
echo "${MOUNTPOINT} is already exported. Returning..."
else
echo -e "\n${MOUNTPOINT} *(rw,sync,no_root_squash)" >> /etc/exports
systemctl restart nfs-kernel-server.service
fi
}
#This function will set Mahara's siteurl variable
#to either the user supplied URL or will default
#to the Azure LB public dns
function configure_site_url {
local SITE_URL=${1}
local AZ_FQDN=${2}
if [ "${SITE_URL}" = "www.example.com" ]; then
siteFQDN=${AZ_FQDN}
fi
}
function configure_nfs_client_and_mount {
local NFS_SERVER=${1} # E.g., controller-vm-ab12cd
local NFS_DIR=${2} # E.g., /mahara
local MOUNTPOINT=${3} # E.g., /mahara
apt install -y nfs-common
mkdir -p ${MOUNTPOINT}
grep "^${NFS_SERVER}:${NFS_DIR}" /etc/fstab > /dev/null 2>&1
if [ $? = "0" ]; then
echo "${NFS_SERVER}:${NFS_DIR} already in /etc/fstab... skipping to add"
else
echo -e "\n${NFS_SERVER}:${NFS_DIR} ${MOUNTPOINT} nfs auto 0 0" >> /etc/fstab
fi
mount ${MOUNTPOINT}
}
SERVER_TIMESTAMP_FULLPATH="/mahara/html/mahara/.last_modified_time.mahara_on_azure"
LOCAL_TIMESTAMP_FULLPATH="/var/www/html/mahara/.last_modified_time.mahara_on_azure"
# Create a script to sync /mahara/html/mahara (gluster/NFS) and /var/www/html/mahara (local) and set up a minutely cron job
# Should be called by root and only on a VMSS web frontend VM
function setup_html_local_copy_cron_job {
if [ "$(whoami)" != "root" ]; then
echo "${0}: Must be run as root!"
return 1
fi
local SYNC_SCRIPT_FULLPATH="/usr/local/bin/sync_mahara_html_local_copy_if_modified.sh"
mkdir -p $(dirname ${SYNC_SCRIPT_FULLPATH})
local SYNC_LOG_FULLPATH="/var/log/mahara-html-sync.log"
cat <<EOF > ${SYNC_SCRIPT_FULLPATH}
#!/bin/bash
sleep \$((\$RANDOM%30))
if [ -f "$SERVER_TIMESTAMP_FULLPATH" ]; then
SERVER_TIMESTAMP=\$(cat $SERVER_TIMESTAMP_FULLPATH)
if [ -f "$LOCAL_TIMESTAMP_FULLPATH" ]; then
LOCAL_TIMESTAMP=\$(cat $LOCAL_TIMESTAMP_FULLPATH)
else
logger -p local2.notice -t mahara "Local timestamp file ($LOCAL_TIMESTAMP_FULLPATH) does not exist. Probably first time syncing? Continuing to sync."
mkdir -p /var/www/html
fi
if [ "\$SERVER_TIMESTAMP" != "\$LOCAL_TIMESTAMP" ]; then
logger -p local2.notice -t mahara "Server time stamp (\$SERVER_TIMESTAMP) is different from local time stamp (\$LOCAL_TIMESTAMP). Start syncing..."
if [[ \$(find $SYNC_LOG_FULLPATH -type f -size +20M 2> /dev/null) ]]; then
truncate -s 0 $SYNC_LOG_FULLPATH
fi
echo \$(date +%Y%m%d%H%M%S) >> $SYNC_LOG_FULLPATH
rsync -av --delete /mahara/html/mahara /var/www/html >> $SYNC_LOG_FULLPATH
fi
else
logger -p local2.notice -t mahara "Remote timestamp file ($SERVER_TIMESTAMP_FULLPATH) does not exist. Is /mahara mounted? Exiting with error."
exit 1
fi
EOF
chmod 500 ${SYNC_SCRIPT_FULLPATH}
local CRON_DESC_FULLPATH="/etc/cron.d/sync-mahara-html-local-copy"
cat <<EOF > ${CRON_DESC_FULLPATH}
* * * * * root ${SYNC_SCRIPT_FULLPATH}
EOF
chmod 644 ${CRON_DESC_FULLPATH}
}
LAST_MODIFIED_TIME_UPDATE_SCRIPT_FULLPATH="/usr/local/bin/update_last_modified_time_update.mahara_on_azure.sh"
# Create a script to modify the last modified timestamp file (/mahara/html/mahara/last_modified_time.mahara_on_azure)
# Should be called by root and only on the controller VM.
# The mahara admin should run the generated script everytime the /mahara/html/mahara directory content is updated (e.g., mahara upgrade, config change or plugin install/upgrade)
function create_last_modified_time_update_script {
if [ "$(whoami)" != "root" ]; then
echo "${0}: Must be run as root!"
return 1
fi
mkdir -p $(dirname $LAST_MODIFIED_TIME_UPDATE_SCRIPT_FULLPATH)
cat <<EOF > $LAST_MODIFIED_TIME_UPDATE_SCRIPT_FULLPATH
#!/bin/bash
echo \$(date +%Y%m%d%H%M%S) > $SERVER_TIMESTAMP_FULLPATH
EOF
chmod +x $LAST_MODIFIED_TIME_UPDATE_SCRIPT_FULLPATH
}
function run_once_last_modified_time_update_script {
$LAST_MODIFIED_TIME_UPDATE_SCRIPT_FULLPATH
}
# Long fail2ban config command moved here
function config_fail2ban
{
cat <<EOF > /etc/fail2ban/jail.conf
# Fail2Ban configuration file.
#
# This file was composed for Debian systems from the original one
# provided now under /usr/share/doc/fail2ban/examples/jail.conf
# for additional examples.
#
# Comments: use '#' for comment lines and ';' for inline comments
#
# To avoid merges during upgrades DO NOT MODIFY THIS FILE
# and rather provide your changes in /etc/fail2ban/jail.local
#
# The DEFAULT allows a global definition of the options. They can be overridden
# in each jail afterwards.
[DEFAULT]
# "ignoreip" can be an IP address, a CIDR mask or a DNS host. Fail2ban will not
# ban a host which matches an address in this list. Several addresses can be
# defined using space separator.
ignoreip = 127.0.0.1/8
# "bantime" is the number of seconds that a host is banned.
bantime = 600
# A host is banned if it has generated "maxretry" during the last "findtime"
# seconds.
findtime = 600
maxretry = 3
# "backend" specifies the backend used to get files modification.
# Available options are "pyinotify", "gamin", "polling" and "auto".
# This option can be overridden in each jail as well.
#
# pyinotify: requires pyinotify (a file alteration monitor) to be installed.
# If pyinotify is not installed, Fail2ban will use auto.
# gamin: requires Gamin (a file alteration monitor) to be installed.
# If Gamin is not installed, Fail2ban will use auto.
# polling: uses a polling algorithm which does not require external libraries.
# auto: will try to use the following backends, in order:
# pyinotify, gamin, polling.
backend = auto
# "usedns" specifies if jails should trust hostnames in logs,
# warn when reverse DNS lookups are performed, or ignore all hostnames in logs
#
# yes: if a hostname is encountered, a reverse DNS lookup will be performed.
# warn: if a hostname is encountered, a reverse DNS lookup will be performed,
# but it will be logged as a warning.
# no: if a hostname is encountered, will not be used for banning,
# but it will be logged as info.
usedns = warn
#
# Destination email address used solely for the interpolations in
# jail.{conf,local} configuration files.
destemail = root@localhost
#
# Name of the sender for mta actions
sendername = Fail2Ban
#
# ACTIONS
#
# Default banning action (e.g. iptables, iptables-new,
# iptables-multiport, shorewall, etc) It is used to define
# action_* variables. Can be overridden globally or per
# section within jail.local file
banaction = iptables-multiport
# email action. Since 0.8.1 upstream fail2ban uses sendmail
# MTA for the mailing. Change mta configuration parameter to mail
# if you want to revert to conventional 'mail'.
mta = sendmail
# Default protocol
protocol = tcp
# Specify chain where jumps would need to be added in iptables-* actions
chain = INPUT
#
# Action shortcuts. To be used to define action parameter
# The simplest action to take: ban only
action_ = %(banaction)s[name=%(__name__)s, port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"]
# ban & send an e-mail with whois report to the destemail.
action_mw = %(banaction)s[name=%(__name__)s, port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"]
%(mta)s-whois[name=%(__name__)s, dest="%(destemail)s", protocol="%(protocol)s", chain="%(chain)s", sendername="%(sendername)s"]
# ban & send an e-mail with whois report and relevant log lines
# to the destemail.
action_mwl = %(banaction)s[name=%(__name__)s, port="%(port)s", protocol="%(protocol)s", chain="%(chain)s"]
%(mta)s-whois-lines[name=%(__name__)s, dest="%(destemail)s", logpath=%(logpath)s, chain="%(chain)s", sendername="%(sendername)s"]
# Choose default action. To change, just override value of 'action' with the
# interpolation to the chosen action shortcut (e.g. action_mw, action_mwl, etc) in jail.local
# globally (section [DEFAULT]) or per specific section
action = %(action_)s
#
# JAILS
#
# Next jails corresponds to the standard configuration in Fail2ban 0.6 which
# was shipped in Debian. Enable any defined here jail by including
#
# [SECTION_NAME]
# enabled = true
#
# in /etc/fail2ban/jail.local.
#
# Optionally you may override any other parameter (e.g. banaction,
# action, port, logpath, etc) in that section within jail.local
[ssh]
enabled = true
port = ssh
filter = sshd
logpath = /var/log/auth.log
maxretry = 6
[dropbear]
enabled = false
port = ssh
filter = dropbear
logpath = /var/log/auth.log
maxretry = 6
# Generic filter for pam. Has to be used with action which bans all ports
# such as iptables-allports, shorewall
[pam-generic]
enabled = false
# pam-generic filter can be customized to monitor specific subset of 'tty's
filter = pam-generic
# port actually must be irrelevant but lets leave it all for some possible uses
port = all
banaction = iptables-allports
port = anyport
logpath = /var/log/auth.log
maxretry = 6
[xinetd-fail]
enabled = false
filter = xinetd-fail
port = all
banaction = iptables-multiport-log
logpath = /var/log/daemon.log
maxretry = 2
[ssh-ddos]
enabled = false
port = ssh
filter = sshd-ddos
logpath = /var/log/auth.log
maxretry = 6
# Here we use blackhole routes for not requiring any additional kernel support
# to store large volumes of banned IPs
[ssh-route]
enabled = false
filter = sshd
action = route
logpath = /var/log/sshd.log
maxretry = 6
# Here we use a combination of Netfilter/Iptables and IPsets
# for storing large volumes of banned IPs
#
# IPset comes in two versions. See ipset -V for which one to use
# requires the ipset package and kernel support.
[ssh-iptables-ipset4]
enabled = false
port = ssh
filter = sshd
banaction = iptables-ipset-proto4
logpath = /var/log/sshd.log
maxretry = 6
[ssh-iptables-ipset6]
enabled = false
port = ssh
filter = sshd
banaction = iptables-ipset-proto6
logpath = /var/log/sshd.log
maxretry = 6
#
# HTTP servers
#
[apache]
enabled = false
port = http,https
filter = apache-auth
logpath = /var/log/apache*/*error.log
maxretry = 6
# default action is now multiport, so apache-multiport jail was left
# for compatibility with previous (<0.7.6-2) releases
[apache-multiport]
enabled = false
port = http,https
filter = apache-auth
logpath = /var/log/apache*/*error.log
maxretry = 6
[apache-noscript]
enabled = false
port = http,https
filter = apache-noscript
logpath = /var/log/apache*/*error.log
maxretry = 6
[apache-overflows]
enabled = false
port = http,https
filter = apache-overflows
logpath = /var/log/apache*/*error.log
maxretry = 2
# Ban attackers that try to use PHP's URL-fopen() functionality
# through GET/POST variables. - Experimental, with more than a year
# of usage in production environments.
[php-url-fopen]
enabled = false
port = http,https
filter = php-url-fopen
logpath = /var/www/*/logs/access_log
# A simple PHP-fastcgi jail which works with lighttpd.
# If you run a lighttpd server, then you probably will
# find these kinds of messages in your error_log:
# ALERT – tried to register forbidden variable ‘GLOBALS’
# through GET variables (attacker '1.2.3.4', file '/var/www/default/htdocs/index.php')
[lighttpd-fastcgi]
enabled = false
port = http,https
filter = lighttpd-fastcgi
logpath = /var/log/lighttpd/error.log
# Same as above for mod_auth
# It catches wrong authentifications
[lighttpd-auth]
enabled = false
port = http,https
filter = suhosin
logpath = /var/log/lighttpd/error.log
[nginx-http-auth]
enabled = false
filter = nginx-http-auth
port = http,https
logpath = /var/log/nginx/error.log
# Monitor roundcube server
[roundcube-auth]
enabled = false
filter = roundcube-auth
port = http,https
logpath = /var/log/roundcube/userlogins
[sogo-auth]
enabled = false
filter = sogo-auth
port = http, https
# without proxy this would be:
# port = 20000
logpath = /var/log/sogo/sogo.log
#
# FTP servers
#
[vsftpd]
enabled = false
port = ftp,ftp-data,ftps,ftps-data
filter = vsftpd
logpath = /var/log/vsftpd.log
# or overwrite it in jails.local to be
# logpath = /var/log/auth.log
# if you want to rely on PAM failed login attempts
# vsftpd's failregex should match both of those formats
maxretry = 6
[proftpd]
enabled = false
port = ftp,ftp-data,ftps,ftps-data
filter = proftpd
logpath = /var/log/proftpd/proftpd.log
maxretry = 6
[pure-ftpd]
enabled = false
port = ftp,ftp-data,ftps,ftps-data
filter = pure-ftpd
logpath = /var/log/syslog
maxretry = 6
[wuftpd]
enabled = false
port = ftp,ftp-data,ftps,ftps-data
filter = wuftpd
logpath = /var/log/syslog
maxretry = 6
#
# Mail servers
#
[postfix]
enabled = false
port = smtp,ssmtp,submission
filter = postfix
logpath = /var/log/mail.log
[couriersmtp]
enabled = false
port = smtp,ssmtp,submission
filter = couriersmtp
logpath = /var/log/mail.log
#
# Mail servers authenticators: might be used for smtp,ftp,imap servers, so
# all relevant ports get banned
#
[courierauth]
enabled = false
port = smtp,ssmtp,submission,imap2,imap3,imaps,pop3,pop3s
filter = courierlogin
logpath = /var/log/mail.log
[sasl]
enabled = false
port = smtp,ssmtp,submission,imap2,imap3,imaps,pop3,pop3s
filter = postfix-sasl
# You might consider monitoring /var/log/mail.warn instead if you are
# running postfix since it would provide the same log lines at the
# "warn" level but overall at the smaller filesize.
logpath = /var/log/mail.log
[dovecot]
enabled = false
port = smtp,ssmtp,submission,imap2,imap3,imaps,pop3,pop3s
filter = dovecot
logpath = /var/log/mail.log
# To log wrong MySQL access attempts add to /etc/my.cnf:
# log-error=/var/log/mysqld.log
# log-warning = 2
[mysqld-auth]
enabled = false
filter = mysqld-auth
port = 3306
logpath = /var/log/mysqld.log
# DNS Servers
# These jails block attacks against named (bind9). By default, logging is off
# with bind9 installation. You will need something like this:
#
# logging {
# channel security_file {
# file "/var/log/named/security.log" versions 3 size 30m;
# severity dynamic;
# print-time yes;
# };
# category security {
# security_file;
# };
# };
#
# in your named.conf to provide proper logging
# !!! WARNING !!!
# Since UDP is connection-less protocol, spoofing of IP and imitation
# of illegal actions is way too simple. Thus enabling of this filter
# might provide an easy way for implementing a DoS against a chosen
# victim. See
# http://nion.modprobe.de/blog/archives/690-fail2ban-+-dns-fail.html
# Please DO NOT USE this jail unless you know what you are doing.
#[named-refused-udp]
#
#enabled = false
#port = domain,953
#protocol = udp
#filter = named-refused
#logpath = /var/log/named/security.log
[named-refused-tcp]
enabled = false
port = domain,953
protocol = tcp
filter = named-refused
logpath = /var/log/named/security.log
# Multiple jails, 1 per protocol, are necessary ATM:
# see https://github.com/fail2ban/fail2ban/issues/37
[asterisk-tcp]
enabled = false
filter = asterisk
port = 5060,5061
protocol = tcp
logpath = /var/log/asterisk/messages
[asterisk-udp]
enabled = false
filter = asterisk
port = 5060,5061
protocol = udp
logpath = /var/log/asterisk/messages
# Jail for more extended banning of persistent abusers
# !!! WARNING !!!
# Make sure that your loglevel specified in fail2ban.conf/.local
# is not at DEBUG level -- which might then cause fail2ban to fall into
# an infinite loop constantly feeding itself with non-informative lines
[recidive]
enabled = false
filter = recidive
logpath = /var/log/fail2ban.log
action = iptables-allports[name=recidive]
sendmail-whois-lines[name=recidive, logpath=/var/log/fail2ban.log]
bantime = 604800 ; 1 week
findtime = 86400 ; 1 day
maxretry = 5
EOF
}
|
<reponame>naga-project/webfx
package dev.webfx.kit.mapper.peers.javafxcontrols.base;
import javafx.scene.control.ToggleButton;
/**
* @author <NAME>
*/
public interface ToggleButtonPeerMixin
<N extends ToggleButton, NB extends ToggleButtonPeerBase<N, NB, NM>, NM extends ToggleButtonPeerMixin<N, NB, NM>>
extends ButtonBasePeerMixin<N, NB, NM> {
void updateSelected(Boolean selected);
}
|
export STARDLLS_VERSION=1.3.0
export DIPC_VERSION=1.5.0
export STARCORE_VERSION=1.3.8
export STARLANG_VERSION=1.1.0
export PROTOCOL_VERSION=1.2.1
|
#!/bin/bash
# EPOS Command Library 6.6.2.0 installation script
# Copyright (c) maxon motor ag 2014-2020
if [[ $UID != 0 ]]; then
echo 'Please run this installation script with sudo:'
echo 'sudo' $0 $*
exit 1
fi
function check_result {
if (($? > 0)); then
printf ' [FAILED]\n'
else
printf ' [OK]\n'
fi
}
function install {
echo '---------------------------------------------------------'
echo 'EPOS Command Library 6.6.2.0 installation started'
echo '---------------------------------------------------------'
#remove previous installation
printf ' - Remove existing installation'
rm -rf '/opt/EposCmdLib_6.6.2.0' > /dev/null
check_result
#copy examples, include and misc files
printf ' - Install library into directory: /opt/EposCmdLib_6.6.2.0'
mkdir '/opt/EposCmdLib_6.6.2.0' > /dev/null
mkdir '/opt/EposCmdLib_6.6.2.0/lib' > /dev/null
cp -rf ./include '/opt/EposCmdLib_6.6.2.0' > /dev/null
architecture=$(uname -m)
case $architecture in
armv6l)
cp -rf ./lib/arm/v6 '/opt/EposCmdLib_6.6.2.0/lib' > /dev/null
;;
armv7l)
cp -rf ./lib/arm/v7 '/opt/EposCmdLib_6.6.2.0/lib' > /dev/null
;;
aarch64)
cp -rf ./lib/arm/v8 '/opt/EposCmdLib_6.6.2.0/lib' > /dev/null
;;
x86|i386|i486|i586|i686)
cp -rf ./lib/intel/x86 '/opt/EposCmdLib_6.6.2.0/lib' > /dev/null
;;
x86_64)
cp -rf ./lib/intel/x86_64 '/opt/EposCmdLib_6.6.2.0/lib' > /dev/null
;;
esac
check_result
#copy examples, include and misc files
printf ' - Install examples into directory: /opt/EposCmdLib_6.6.2.0'
if [ ! -d "/opt/EposCmdLib_6.6.2.0" ]; then
mkdir '/opt/EposCmdLib_6.6.2.0' > /dev/null
fi
cp -rf ./examples '/opt/EposCmdLib_6.6.2.0' > /dev/null
cp -rf ./misc '/opt/EposCmdLib_6.6.2.0' > /dev/null
find "/opt/EposCmdLib_6.6.2.0/examples" -type d -exec chmod 777 {} \;
find "/opt/EposCmdLib_6.6.2.0/examples" -type f -exec chmod 666 {} \;
check_result
#create symlinks
printf ' - Library system integration'
architecture=$(uname -m)
case $architecture in
armv6)
ln -sf '/opt/EposCmdLib_6.6.2.0/lib/v6/libEposCmd.so.6.6.2.0' /usr/lib/libEposCmd.so
ln -sf '/opt/EposCmdLib_6.6.2.0/lib/v6/libftd2xx.so.1.4.8' /usr/lib/libftd2xx.so
;;
armv7l)
ln -sf '/opt/EposCmdLib_6.6.2.0/lib/v7/libEposCmd.so.6.6.2.0' /usr/lib/libEposCmd.so
ln -sf '/opt/EposCmdLib_6.6.2.0/lib/v7/libftd2xx.so.1.4.8' /usr/lib/libftd2xx.so
;;
aarch64)
ln -sf '/opt/EposCmdLib_6.6.2.0/lib/v8/libEposCmd.so.6.6.2.0' /usr/lib/libEposCmd.so
ln -sf '/opt/EposCmdLib_6.6.2.0/lib/v8/libftd2xx.so.1.4.8' /usr/lib/libftd2xx.so
;;
x86|i386|i486|i586|i686)
ln -sf '/opt/EposCmdLib_6.6.2.0/lib/x86/libEposCmd.so.6.6.2.0' /usr/lib/libEposCmd.so
ln -sf '/opt/EposCmdLib_6.6.2.0/lib/x86/libftd2xx.so.1.4.8' /usr/lib/libftd2xx.so
;;
x86_64)
ln -sf '/opt/EposCmdLib_6.6.2.0/lib/x86_64/libEposCmd.so.6.6.2.0' /usr/lib/libEposCmd.so
ln -sf '/opt/EposCmdLib_6.6.2.0/lib/x86_64/libftd2xx.so.1.4.8' /usr/lib/libftd2xx.so
;;
esac
check_result
#add udev rules
printf ' - Configure device access rights'
cp -f './misc/99-ftdi.rules' /etc/udev/rules.d > /dev/null
cp -f './misc/99-epos4.rules' /etc/udev/rules.d > /dev/null
check_result
udevadm control --reload-rules && udevadm trigger
#add sudo rules
printf ' - Configure user access rights'
touch -f /etc/sudoers.d/mmc_rule
echo $SUDO_USER 'ALL=(ALL) NOPASSWD: /bin/ip' > /etc/sudoers.d/mmc_rule
chmod 0440 /etc/sudoers.d/mmc_rule
check_result
echo '---------------------------------------------------------'
echo 'EPOS Command Library 6.6.2.0 installed'
echo '---------------------------------------------------------'
}
function uninstall {
echo '---------------------------------------------------------'
echo 'EPOS Command Library 6.6.2.0 deinstallation started'
echo '---------------------------------------------------------'
#remove access rights
printf ' - Reconfigure user access rights'
rm -f /etc/sudoers.d/mmc_rule > /dev/null
check_result
#remove udev rules
printf ' - Reconfigure device access rights'
rm -f /etc/udev/rules.d/99-epos4.rules > /dev/null
rm -f /etc/udev/rules.d/99-ftdi.rules > /dev/null
check_result
service udev restart
#remove symbolic links
printf ' - Remove library system integration'
rm -f /usr/lib/libEposCmd.so > /dev/null
rm -f /usr/lib/libftd2xx.so > /dev/null
check_result
#remove previous installation
printf ' - Remove existing installation'
rm -rf '/opt/EposCmdLib_6.6.2.0' > /dev/null
check_result
#remove user data
read -p " - Remove user ($SUDO_USER) data: /home/$SUDO_USER/.maxon_motor_ag [Yy/n]? " -n 1 -r
if [[ $REPLY =~ ^[Yy]$ ]]
then
rm -rf "/home/$SUDO_USER/.maxon_motor_ag" > /dev/null
fi
echo
echo '---------------------------------------------------------'
echo 'EPOS Command Library 6.6.2.0 uninstalled'
echo '---------------------------------------------------------'
}
if [ "$#" -eq 0 ]; then
install
else
for i in "$@"
do
case $i in
"-u"|"--uninstall")
uninstall
shift
;;
"-i"|"--install")
install
shift
;;
*)
echo "usage install.sh -i /--install/ [default] -u /--uninstall/"
shift
;;
esac
done
fi
|
source <(kubectl completion zsh)
# function kubectx {
# if [ -z "$1" ]; then
# kubectl config get-contexts
# else
# kubectl config use-context $1
# fi
# }
# function kubens {
# if [[ -z "$1" ]]; then
# kubectl get ns
# else
# kubectl config set-context --current --namespace=$1
# fi
# }
# source ~/dotfiles/zsh/modules/kubernetes/kubectl_fzf.plugin.zsh
# zplug "plugins/kubectl", from:oh-my-zsh, defer:2
# zplug "bonnefoa/kubectl-fzf", defer:3
export KUBECTL_FZF_OPTIONS=(-1 --header-lines=2 --layout reverse)
export KUBECTL_FZF_OPTIONS=(-1 --header-lines=2 --layout reverse -e)
export KUBECTL_FZF_OPTIONS=(-1 --header-lines=2 --layout reverse -e --no-hscroll --no-sort --bind space:accept)
|
<reponame>prasadtechnology/vertx<filename>src/main/java/io/vertx/blog/first/MyFirstVerticle.java<gh_stars>0
package io.vertx.blog.first;
import java.util.LinkedHashMap;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.vertx.core.AbstractVerticle;
import io.vertx.core.Future;
import io.vertx.core.http.HttpServerResponse;
import io.vertx.core.json.Json;
import io.vertx.core.json.JsonObject;
import io.vertx.ext.jdbc.JDBCClient;
import io.vertx.ext.sql.ResultSet;
import io.vertx.ext.sql.SQLConnection;
import io.vertx.ext.web.Router;
import io.vertx.ext.web.RoutingContext;
import io.vertx.ext.web.handler.BodyHandler;
import io.vertx.ext.web.handler.StaticHandler;
import io.vertx.rxjava.ext.asyncsql.MySQLClient;
public class MyFirstVerticle extends AbstractVerticle {
private static final Logger logger = LoggerFactory.getLogger(MyFirstVerticle.class);
// Store our product
private Map<String, Whisky> products = new LinkedHashMap();
// Create some product
private void createSomeData() {
Whisky bowmore = new Whisky("Bowmore 15 Years Laimrig", "Scotland, Islay");
products.put("1", bowmore);
Whisky talisker = new Whisky("Talisker 57° North", "Scotland, Island");
products.put("2", talisker);
}
@Override
public void start(Future<Void> fut) {
JsonObject mySQLClientConfig = new JsonObject()
.put("url", "jdbc:mysql://localhost:3306/lrnr_dev_01?autoReconnect=true")
.put("driver_class", "com.mysql.jdbc.Driver")
.put("user", "vikranth").put("password", "<PASSWORD>")
.put("queryTimeout", 10000);
JDBCClient sqlClient = JDBCClient.createShared(vertx, mySQLClientConfig);
createSomeData();
// Create a router object.
Router router = Router.router(vertx);
// Create the HTTP server and pass the "accept" method to the request
// handler.
vertx.createHttpServer().requestHandler(router::accept).listen(
// Retrieve the port from the configuration,
// default to 8080.
config().getInteger("http.port", 8090), result -> {
if (result.succeeded()) {
fut.complete();
} else {
fut.fail(result.cause());
}
});
router.get("/api/whiskies").handler(this::getAll);
router.post("/api/whiskies").handler(this::addOne);
router.delete("/api/whiskies/:id").handler(this::deleteOne);
// Serve static resources from the /assets directory
router.route("/assets/*").handler(StaticHandler.create("assets"));
router.route().handler(BodyHandler.create());
// Bind "/" to our hello message - so we are still compatible.
router.route("/").handler(routingContext -> {
HttpServerResponse response = routingContext.response();
sqlClient.getConnection(res -> {
if (res.succeeded()) {
SQLConnection connection = res.result();
logger.info("got the connection ======>");
connection.query("SELECT * from credentials", result -> {
if (result.succeeded()) {
// Get the result set
ResultSet resultSet = result.result();
logger.info("got the result set ======>");
response.putHeader("content-type", "application/json").end(resultSet.toJson().toString());
} else {
// Failed!
logger.info("failed to execute the query ========>");
}
});
} else {
// Failed to get connection - deal with it
}
});
});
}
private void getAll(RoutingContext routingContext) {
routingContext.response().putHeader("content-type", "application/json; charset=utf-8")
.end(Json.encodePrettily(products.values()));
}
private void addOne(RoutingContext routingContext) {
final Whisky whisky = Json.decodeValue(routingContext.getBodyAsString(), Whisky.class);
products.put(whisky.getId(), whisky);
routingContext.response().setStatusCode(201).putHeader("content-type", "application/json; charset=utf-8")
.end(Json.encodePrettily(whisky));
}
private void deleteOne(RoutingContext routingContext) {
String id = routingContext.request().getParam("id");
if (id == null) {
routingContext.response().setStatusCode(400).end();
} else {
Integer idAsInteger = Integer.valueOf(id);
products.remove(idAsInteger);
}
routingContext.response().setStatusCode(204).end();
}
} |
<gh_stars>1-10
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
import common
from datadog_checks.cassandra_nodetool import CassandraNodetoolCheck
@pytest.mark.integration
def test_integration(aggregator, cassandra_cluster):
"""
Testing Cassandra Nodetool Integration
"""
integration = CassandraNodetoolCheck(common.CHECK_NAME, {}, {})
integration.check(common.CONFIG_INSTANCE)
aggregator.assert_metric('cassandra.nodetool.status.replication_availability', value=200,
tags=['keyspace:test', 'datacenter:datacenter1', 'foo', 'bar'])
aggregator.assert_metric('cassandra.nodetool.status.replication_factor', value=2,
tags=['keyspace:test', 'datacenter:datacenter1', 'foo', 'bar'])
|
#!/bin/sh
/deploy/api0/bin/api0 foreground -mnesia dir '"/deploy/data/api0"' |
def parse_config(config_file_path: str) -> dict:
config = {}
with open(config_file_path, 'r') as file:
for line in file:
key, value = line.strip().split(' = ')
if value.lower() == 'true':
value = True
elif value.lower() == 'false':
value = False
elif value.isdigit():
value = int(value)
config[key] = value
return config |
<filename>lib/osa/util/constants.rb
# frozen_string_literal: true
module OSA
CLIENT_ID = 'befa4a9e-5d16-4a48-9792-4bd1d125abe8'
REDIRECT_URL = 'https://storage.googleapis.com/outlook-spam-automator/login.html'
SCOPE = 'https://graph.microsoft.com/Mail.ReadWrite https://graph.microsoft.com/MailboxSettings.ReadWrite offline_access'
end
|
public class Account {
private int accountNumber;
private double currentBalance;
private ArrayList<Transaction> transactions;
public Account(int accountNumber) {
this.accountNumber = accountNumber;
this.currentBalance = 0;
this.transactions = new ArrayList<>();
}
public int getAccountNumber() {
return accountNumber;
}
public double getCurrentBalance() {
return currentBalance;
}
public void setCurrentBalance(double currentBalance) {
this.currentBalance = currentBalance;
}
public ArrayList<Transaction> getTransactions() {
return transactions;
}
public void addTransaction(Transaction transaction) {
transactions.add(transaction);
}
} |
import pandas as pd
class eQTLAnalyzer:
def __init__(self, annot, dosages, gene_name):
self.annot = annot
self.dosages = dosages
ordering = self.annot['pos'].argsort()
self.annot = self.annot.iloc[ordering, :]
self.dosages = self.dosages.iloc[ordering, :]
self.gene_name = gene_name
def run_eQTL(self, count_matrix, covariates, extra_snps=None):
def eQTL_func(dosage_row, covariates, gene_counts):
# Implement eQTL analysis function here
# Return p-value for the given dosage row
pass
pvalues = self.dosages.apply(eQTL_func, axis=1, args=(covariates, count_matrix.loc[self.gene_name, :]))
self.pvalues = pvalues |
import React from 'react';
import ReactDOM from 'react-dom';
import { Provider } from 'react-redux';
import configureStore from 'src/redux/configureStore';
import { createAuthListener } from 'src/redux/modules/auth';
import Auth from 'src/Auth';
import ErrorBoundary from 'src/ErrorBoundary';
import { initAnalytics } from 'src/redux/utils/analytics';
import './styles/antd-theme.less';
import '@vgs/elemente/dist/elemente.esm.css';
import './styles/app.scss';
const store = configureStore();
store.subscribe(createAuthListener(store));
const Root = (
<ErrorBoundary>
<Provider store={store}>
<Auth />
</Provider>
</ErrorBoundary>
);
initAnalytics();
ReactDOM.render(Root, document.getElementById('localhoste-app'));
|
<filename>fractions/microprofile/microprofile-metrics/src/main/java/org/wildfly/swarm/microprofile/metrics/deployment/AMetricRegistryFactory.java
/*
* Copyright 2017 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.wildfly.swarm.microprofile.metrics.deployment;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import javax.annotation.PostConstruct;
import javax.enterprise.context.ApplicationScoped;
import javax.enterprise.inject.Default;
import javax.enterprise.inject.Produces;
import javax.enterprise.inject.spi.InjectionPoint;
import javax.inject.Inject;
import javax.naming.InitialContext;
import javax.naming.NamingException;
import org.eclipse.microprofile.metrics.Counter;
import org.eclipse.microprofile.metrics.Gauge;
import org.eclipse.microprofile.metrics.Histogram;
import org.eclipse.microprofile.metrics.Metadata;
import org.eclipse.microprofile.metrics.Meter;
import org.eclipse.microprofile.metrics.MetricRegistry;
import org.eclipse.microprofile.metrics.MetricType;
import org.eclipse.microprofile.metrics.Timer;
import org.eclipse.microprofile.metrics.annotation.Metric;
import org.eclipse.microprofile.metrics.annotation.RegistryType;
import org.wildfly.swarm.microprofile.metrics.api.RegistryFactory;
/**
* @author hrupp
*/
@ApplicationScoped
public class AMetricRegistryFactory {
@Inject
private MetricName metricName;
private ConcurrentMap<MetricRegistry.Type, MetricRegistry> registries;
@PostConstruct
void init() {
registries = new ConcurrentHashMap<>();
}
@Default
@Produces
@RegistryType(type = MetricRegistry.Type.APPLICATION)
public MetricRegistry getApplicationRegistry() {
return get(MetricRegistry.Type.APPLICATION);
}
@Produces
private <T> Gauge<T> gauge(InjectionPoint ip) {
// A forwarding Gauge must be returned as the Gauge creation happens when the declaring bean gets instantiated and the corresponding Gauge can be injected before which leads to producing a null value
return new Gauge<T>() {
@Override
@SuppressWarnings("unchecked")
public T getValue() {
// TODO: better error report when the gauge doesn't exist
return ((Gauge<T>) getApplicationRegistry().getGauges().get(metricName.of(ip))).getValue();
}
};
}
@Produces
public Counter getCounter(InjectionPoint ip) {
return getApplicationRegistry().counter(getMetadata(ip, MetricType.COUNTER));
}
@Produces
public Histogram getHistogram(InjectionPoint ip) {
return getApplicationRegistry().histogram(getMetadata(ip, MetricType.HISTOGRAM));
}
@Produces
public Meter getMeter(InjectionPoint ip) {
return getApplicationRegistry().meter(getMetadata(ip, MetricType.METERED));
}
@Produces
public Timer getTimer(InjectionPoint ip) {
return getApplicationRegistry().timer(getMetadata(ip, MetricType.TIMER));
}
public MetricRegistry get(MetricRegistry.Type type) {
return registries.computeIfAbsent(type, key -> {
try {
InitialContext context = new InitialContext();
Object o = context.lookup("jboss/swarm/metrics");
RegistryFactory factory = (RegistryFactory) o;
return factory.get(type);
} catch (NamingException e) {
throw new IllegalStateException("RegistryFactory not found");
}
});
}
private Metadata getMetadata(InjectionPoint ip, MetricType type) {
Metadata metadata = new Metadata(metricName.of(ip), type);
Metric metric = ip.getAnnotated().getAnnotation(Metric.class);
if (metric != null) {
if (!metric.unit().isEmpty()) {
metadata.setUnit(metric.unit());
}
if (!metric.description().isEmpty()) {
metadata.setDescription(metric.description());
}
if (!metric.displayName().isEmpty()) {
metadata.setDisplayName(metric.displayName());
}
if (metric.tags().length > 0) {
for (String tag : metric.tags()) {
metadata.addTags(tag);
}
}
}
return metadata;
}
}
|
package com.epam.reportportal.extension.azure;
import com.epam.reportportal.extension.CommonPluginCommand;
import com.epam.reportportal.extension.IntegrationGroupEnum;
import com.epam.reportportal.extension.PluginCommand;
import com.epam.reportportal.extension.ReportPortalExtensionPoint;
import com.epam.reportportal.extension.azure.command.binary.GetFileCommand;
import com.epam.reportportal.extension.azure.command.connection.TestConnectionCommand;
import com.epam.reportportal.extension.azure.entity.model.IntegrationParameters;
import com.epam.reportportal.extension.azure.event.launch.AzureStartLaunchEventListener;
import com.epam.reportportal.extension.azure.event.plugin.AzurePluginEventListener;
import com.epam.reportportal.extension.azure.event.plugin.PluginEventHandlerFactory;
import com.epam.reportportal.extension.azure.info.impl.PluginInfoProviderImpl;
import com.epam.reportportal.extension.azure.rest.client.ApiClient;
import com.epam.reportportal.extension.azure.rest.client.ApiException;
import com.epam.reportportal.extension.azure.rest.client.Configuration;
import com.epam.reportportal.extension.azure.rest.client.api.*;
import com.epam.reportportal.extension.azure.rest.client.auth.HttpBasicAuth;
import com.epam.reportportal.extension.azure.rest.client.model.AttachmentInfo;
import com.epam.reportportal.extension.azure.rest.client.model.AttachmentReference;
import com.epam.reportportal.extension.azure.rest.client.model.workitem.*;
import com.epam.reportportal.extension.azure.utils.MemoizingSupplier;
import com.epam.reportportal.extension.bugtracking.BtsConstants;
import com.epam.reportportal.extension.bugtracking.BtsExtension;
import com.epam.reportportal.extension.bugtracking.InternalTicketAssembler;
import com.epam.reportportal.extension.common.IntegrationTypeProperties;
import com.epam.reportportal.extension.event.PluginEvent;
import com.epam.reportportal.extension.event.StartLaunchEvent;
import com.epam.ta.reportportal.binary.impl.AttachmentDataStoreService;
import com.epam.ta.reportportal.dao.*;
import com.epam.ta.reportportal.entity.attachment.Attachment;
import com.epam.ta.reportportal.entity.integration.Integration;
import com.epam.ta.reportportal.entity.item.TestItem;
import com.epam.ta.reportportal.entity.log.Log;
import com.epam.ta.reportportal.exception.ReportPortalException;
import com.epam.ta.reportportal.filesystem.DataEncoder;
import com.epam.ta.reportportal.ws.model.ErrorType;
import com.epam.ta.reportportal.ws.model.externalsystem.AllowedValue;
import com.epam.ta.reportportal.ws.model.externalsystem.PostFormField;
import com.epam.ta.reportportal.ws.model.externalsystem.PostTicketRQ;
import com.epam.ta.reportportal.ws.model.externalsystem.Ticket;
import com.google.common.base.Suppliers;
import com.google.common.io.ByteStreams;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.tika.config.TikaConfig;
import org.apache.tika.mime.MimeType;
import org.apache.tika.mime.MimeTypeException;
import org.apache.tika.mime.MimeTypes;
import org.jasypt.util.text.BasicTextEncryptor;
import org.pf4j.Extension;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationListener;
import org.springframework.context.event.ApplicationEventMulticaster;
import org.springframework.context.support.AbstractApplicationContext;
import org.springframework.core.io.FileSystemResource;
import org.springframework.jdbc.datasource.init.ResourceDatabasePopulator;
import javax.annotation.PostConstruct;
import javax.sql.DataSource;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.format.DateTimeFormatter;
import java.util.*;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static com.epam.ta.reportportal.ws.model.ErrorType.UNABLE_INTERACT_WITH_INTEGRATION;
import static com.epam.ta.reportportal.ws.model.ErrorType.UNABLE_TO_LOAD_BINARY_DATA;
import static java.util.Optional.ofNullable;
/**
* @author <a href="mailto:<EMAIL>"><NAME></a>
*/
@Extension
public class AzureExtension implements ReportPortalExtensionPoint, DisposableBean, BtsExtension {
public static final Logger LOGGER = LoggerFactory.getLogger(AzureExtension.class);
public static final String BINARY_DATA_PROPERTIES_FILE_ID = "azure-binary-data.properties";
public static final String SCHEMA_SCRIPTS_DIR = "schema";
public static final String URL = "url";
public static final String PROJECT = "project";
public static final String OAUTH_ACCESS_KEY = "oauthAccessKey";
private static final String PLUGIN_ID = "Azure DevOps";
private static final String API_VERSION = "6.0";
private static final String EXPAND = "All";
private static final String AREA = "area";
private static final String ITERATION = "iteration";
private static final String BACK_LINK_HEADER = "<h3><i>Backlink to Report Portal:</i></h3>";
private static final String BACK_LINK_PATTERN = "<a href=\"%s\">Link to defect</a>";
private static final String COMMENTS_HEADER = "<h3><i>Test Item comments:</i></h3>";
private static final String LOGS_HEADER = "<h3><i>Test execution logs:</i></h3>";
private static final String IMAGE_CONTENT = "image";
private static final String AUTH_NAME = "accessToken";
private static final Integer DEPTH = 15;
private final String resourcesDir;
private final Supplier<Map<String, PluginCommand<?>>> pluginCommandMapping = new MemoizingSupplier<>(this::getCommands);
private final Supplier<ApplicationListener<PluginEvent>> pluginLoadedListenerSupplier;
private final Supplier<ApplicationListener<StartLaunchEvent>> startLaunchEventListenerSupplier;
private final MimeTypes mimeRepository;
@Autowired
private ApplicationContext applicationContext;
@Autowired
private DataSource dataSource;
@Autowired
private IntegrationTypeRepository integrationTypeRepository;
@Autowired
private IntegrationRepository integrationRepository;
@Autowired
private LaunchRepository launchRepository;
@Autowired
private TestItemRepository itemRepository;
@Autowired
private AttachmentDataStoreService attachmentDataStoreService;
@Autowired
private DataEncoder dataEncoder;
@Autowired
private BasicTextEncryptor basicTextEncryptor;
@Autowired
private LogRepository logRepository;
private WorkItemsApi workItemsApi;
private WorkItemTypesApi workItemTypesApi;
private FieldsApi fieldsApi;
private WorkItemTypesFieldApi workItemTypesFieldApi;
private ClassificationNodesApi classificationNodesApi;
private IntegrationParameters params;
private ApiClient defaultClient;
private String organizationName;
private Supplier<InternalTicketAssembler> ticketAssembler = Suppliers.memoize(() -> new InternalTicketAssembler(logRepository,
itemRepository,
attachmentDataStoreService,
dataEncoder
));
public AzureExtension(Map<String, Object> initParams) {
resourcesDir = IntegrationTypeProperties.RESOURCES_DIRECTORY.getValue(initParams).map(String::valueOf).orElse("");
pluginLoadedListenerSupplier = new MemoizingSupplier<>(() -> new AzurePluginEventListener(PLUGIN_ID, new PluginEventHandlerFactory(
integrationTypeRepository,
integrationRepository,
new PluginInfoProviderImpl(resourcesDir, BINARY_DATA_PROPERTIES_FILE_ID)
)));
startLaunchEventListenerSupplier = new MemoizingSupplier<>(() -> new AzureStartLaunchEventListener(launchRepository));
mimeRepository = TikaConfig.getDefaultConfig().getMimeRepository();
;
}
public WorkItemTypesApi getWorkItemTypesApi() {
return Optional.ofNullable(workItemTypesApi).orElse(new WorkItemTypesApi(defaultClient));
}
public FieldsApi getFieldsApi() {
return Optional.ofNullable(fieldsApi).orElse(new FieldsApi(defaultClient));
}
public WorkItemTypesFieldApi getWorkItemTypesFieldApi() {
return Optional.ofNullable(workItemTypesFieldApi).orElse(new WorkItemTypesFieldApi(defaultClient));
}
public ClassificationNodesApi getClassificationNodesApi() {
return Optional.ofNullable(classificationNodesApi).orElse(new ClassificationNodesApi(defaultClient));
}
public WorkItemsApi getWorkItemsApi() {
return Optional.ofNullable(workItemsApi).orElse(new WorkItemsApi(defaultClient));
}
@Override
public Map<String, ?> getPluginParams() {
Map<String, Object> params = new HashMap<>();
params.put(ALLOWED_COMMANDS, new ArrayList<>(pluginCommandMapping.get().keySet()));
return params;
}
@Override
public CommonPluginCommand<?> getCommonCommand(String commandName) {
throw new UnsupportedOperationException("Not supported yet");
}
@Override
public PluginCommand<?> getIntegrationCommand(String commandName) {
return pluginCommandMapping.get().get(commandName);
}
@Override
public IntegrationGroupEnum getIntegrationGroup() {
return IntegrationGroupEnum.BTS;
}
@PostConstruct
public void createIntegration() throws IOException {
initListeners();
initSchema();
}
private void initListeners() {
ApplicationEventMulticaster applicationEventMulticaster = applicationContext.getBean(AbstractApplicationContext.APPLICATION_EVENT_MULTICASTER_BEAN_NAME,
ApplicationEventMulticaster.class
);
applicationEventMulticaster.addApplicationListener(pluginLoadedListenerSupplier.get());
applicationEventMulticaster.addApplicationListener(startLaunchEventListenerSupplier.get());
}
private void initSchema() throws IOException {
try (Stream<Path> paths = Files.list(Paths.get(resourcesDir, SCHEMA_SCRIPTS_DIR))) {
FileSystemResource[] scriptResources = paths.sorted().map(FileSystemResource::new).toArray(FileSystemResource[]::new);
ResourceDatabasePopulator resourceDatabasePopulator = new ResourceDatabasePopulator(scriptResources);
resourceDatabasePopulator.execute(dataSource);
}
}
@Override
public void destroy() {
removeListeners();
}
private void removeListeners() {
ApplicationEventMulticaster applicationEventMulticaster = applicationContext.getBean(AbstractApplicationContext.APPLICATION_EVENT_MULTICASTER_BEAN_NAME,
ApplicationEventMulticaster.class
);
applicationEventMulticaster.removeApplicationListener(pluginLoadedListenerSupplier.get());
applicationEventMulticaster.removeApplicationListener(startLaunchEventListenerSupplier.get());
}
private Map<String, PluginCommand<?>> getCommands() {
Map<String, PluginCommand<?>> pluginCommandMapping = new HashMap<>();
pluginCommandMapping.put("getFile", new GetFileCommand(resourcesDir, BINARY_DATA_PROPERTIES_FILE_ID));
pluginCommandMapping.put("testConnection", new TestConnectionCommand(basicTextEncryptor));
return pluginCommandMapping;
}
@Override
// Never called method. Connection is tested via the command.
public boolean testConnection(Integration integration) {
return false;
}
@Override
public Optional<Ticket> getTicket(String id, Integration integration) {
initFields(integration);
WorkItemsApi workItemsApi = getWorkItemsApi();
try {
WorkItem workItem = workItemsApi.workItemsGetWorkItem(organizationName,
Integer.valueOf(id),
params.getProjectName(),
API_VERSION,
null,
null,
null
);
return Optional.of(convertWorkItemToTicket(workItem));
} catch (ApiException e) {
LOGGER.error("Unable to load ticket: " + e.getMessage(), e);
return Optional.empty();
}
}
@Override
public Ticket submitTicket(PostTicketRQ ticketRQ, Integration integration) {
initFields(integration);
List<AttachmentInfo> attachmentsURL = new ArrayList<>();
List<JsonPatchOperation> patchOperationList = new ArrayList<>();
ticketRQ.getBackLinks().keySet().forEach(backLinkId -> uploadAttachmentToAzure(ticketRQ, attachmentsURL, backLinkId));
String issueType = null;
List<PostFormField> fields = ticketRQ.getFields();
issueType = getPatchOperationsForFields(ticketRQ, patchOperationList, issueType, fields, attachmentsURL);
WorkItemsApi workItemsApi = getWorkItemsApi();
WorkItem workItem = null;
List<JsonPatchOperation> patchOperationsForAttachment = new ArrayList<>();
try {
workItem = workItemsApi.workItemsCreate(organizationName,
patchOperationList,
params.getProjectName(),
issueType,
API_VERSION,
null,
null,
null,
null
);
if (!attachmentsURL.isEmpty()) {
getPatchOperationsForAttachments(patchOperationsForAttachment, attachmentsURL);
workItemsApi.workItemsUpdate(organizationName,
patchOperationsForAttachment,
workItem.getId(),
params.getProjectName(),
API_VERSION,
null,
null,
null,
null
);
}
return convertWorkItemToTicket(workItem);
} catch (ApiException e) {
LOGGER.error("Unable to post issue: " + e.getMessage(), e);
throw new ReportPortalException(ErrorType.UNABLE_INTERACT_WITH_INTEGRATION,
String.format("Unable to post issue. Code: %s, Message: %s log - ", e.getCode(), e.getMessage()),
e
);
}
}
private String getPatchOperationsForFields(PostTicketRQ ticketRQ, List<JsonPatchOperation> patchOperationList, String issueType,
List<PostFormField> fields, List<AttachmentInfo> attachmentsURL) {
String valueDescription = "";
String operation = "add";
for (PostFormField field : fields) {
String id = replaceSeparators(field.getId());
String path = "/fields/" + id;
String value;
if (field.getId().equals("System_AreaId") || field.getId().equals("System_IterationId")) {
String searchedValue = field.getValue().get(0);
value = (field.getDefinedValues()
.stream()
.filter(allowedValue -> allowedValue.getValueName().equals(searchedValue))
.findFirst()
.get()
.getValueId());
} else if (field.getValue().size() == 0 && !field.getIsRequired()) {
continue;
} else {
value = field.getValue().get(0);
}
if ("issuetype".equals(field.getId())) {
issueType = value;
continue;
}
if ("System.Description".equals(id)) {
valueDescription = value;
continue;
}
patchOperationList.add(new JsonPatchOperation(null, operation, path, value));
}
valueDescription = valueDescription + getDescriptionFromTestItem(ticketRQ, attachmentsURL);
String pathDescription = "/fields/System.Description";
patchOperationList.add(new JsonPatchOperation(null, operation, pathDescription, valueDescription));
return issueType;
}
private void getPatchOperationsForAttachments(List<JsonPatchOperation> patchOperationsForAttachment,
List<AttachmentInfo> attachmentsURL) {
String operation = "add";
String path = "/relations/-";
for (AttachmentInfo attachmentURL : attachmentsURL) {
Map<String, Object> value = new HashMap<>();
value.put("rel", "AttachedFile");
value.put(URL, attachmentURL.getUrl());
Map<String, String> attributes = new HashMap<>();
attributes.put("comment", "");
value.put("attributes", attributes);
patchOperationsForAttachment.add(new JsonPatchOperation(null, operation, path, value));
}
}
@Override
public List<PostFormField> getTicketFields(String issueType, Integration integration) {
initFields(integration);
String projectName = params.getProjectName();
ClassificationNodesApi nodesApi = getClassificationNodesApi();
Map<String, List<WorkItemClassificationNode>> classificationNodes = getClassificationNodes(nodesApi, organizationName, projectName);
List<WorkItemClassificationNode> areaNodes = classificationNodes.get(AREA);
List<WorkItemClassificationNode> iterationNodes = classificationNodes.get(ITERATION);
WorkItemTypesFieldApi issueTypeFieldsApi = getWorkItemTypesFieldApi();
FieldsApi fieldsApi = getFieldsApi();
List<PostFormField> ticketFields = new ArrayList<>();
try {
List<WorkItemTypeFieldWithReferences> issueTypeFields = issueTypeFieldsApi.workItemTypesFieldList(
organizationName,
projectName,
issueType,
API_VERSION,
EXPAND
);
for (WorkItemTypeFieldWithReferences field : issueTypeFields) {
Optional<WorkItemField> detailedFieldOptional = getFieldDetails(fieldsApi, organizationName, projectName, field);
detailedFieldOptional.filter(detailedField -> !detailedField.isReadOnly() && !detailedField.getName()
.equals("Work Item Type")).ifPresent(f -> {
List<AllowedValue> allowedValues = prepareAllowedValues(field, areaNodes, iterationNodes);
List<String> defaultValue = new ArrayList<>();
if (allowedValues.size() > 0) {
defaultValue.add(allowedValues.get(0).getValueName());
}
PostFormField postFormField = new PostFormField(replaceIllegalCharacters(field.getReferenceName()),
field.getName(),
f.getType().toString(),
field.isAlwaysRequired(),
defaultValue,
allowedValues
);
ticketFields.add(postFormField);
});
}
return sortTicketFields(ticketFields, issueType);
} catch (ApiException e) {
LOGGER.error("Unable to load ticket fields: " + e.getMessage(), e);
throw new ReportPortalException(ErrorType.UNABLE_INTERACT_WITH_INTEGRATION,
String.format("Unable to load ticket fields. Code: %s, Message: %s", e.getCode(), e.getMessage()),
e
);
}
}
@Override
public List<String> getIssueTypes(Integration integration) {
initFields(integration);
WorkItemTypesApi issueTypesApi = getWorkItemTypesApi();
try {
List<WorkItemType> issueTypes = issueTypesApi.workItemTypesList(organizationName, params.getProjectName(), API_VERSION);
return issueTypes.stream().map(WorkItemType::getName).collect(Collectors.toList());
} catch (ApiException e) {
LOGGER.error("Unable to load issue types: " + e.getMessage(), e);
throw new ReportPortalException(ErrorType.UNABLE_INTERACT_WITH_INTEGRATION,
String.format("Unable to load issue types. Code: %s, Message: %s", e.getCode(), e.getMessage()),
e
);
}
}
private void initFields(Integration integration) {
params = getParams(integration);
String personalAccessToken = basicTextEncryptor.decrypt(BtsConstants.OAUTH_ACCESS_KEY.getParam(
integration.getParams(),
String.class
).orElseThrow(() -> new ReportPortalException(UNABLE_INTERACT_WITH_INTEGRATION, "OAUTH key cannot be NULL")));
defaultClient = getConfiguredApiClient(personalAccessToken);
organizationName = extractOrganizationNameFromUrl(defaultClient, params.getOrganizationUrl());
}
private IntegrationParameters getParams(Integration integration) {
IntegrationParameters result = new IntegrationParameters();
Map<String, Object> params = integration.getParams().getParams();
result.setOrganizationUrl(params.get(URL).toString());
result.setProjectName(params.get(PROJECT).toString());
result.setPersonalAccessToken(params.get(OAUTH_ACCESS_KEY).toString());
return result;
}
private ApiClient getConfiguredApiClient(String personalAccessToken) {
ApiClient defaultClient = Configuration.getDefaultApiClient();
HttpBasicAuth basicAuth = (HttpBasicAuth) defaultClient.getAuthentication(AUTH_NAME);
basicAuth.setPassword(personalAccessToken);
return defaultClient;
}
private String extractOrganizationNameFromUrl(ApiClient client, String organizationUrl) {
return organizationUrl.replace(client.getBasePath(), "");
}
private Ticket convertWorkItemToTicket(WorkItem workItem) {
Ticket ticket = new Ticket();
String ticketId = workItem.getId().toString();
String ticketUrl =
workItem.getUrl().substring(0, workItem.getUrl().lastIndexOf(ticketId)).replace("apis/wit/", "") + "edit/" + ticketId;
ticket.setId(ticketId);
ticket.setTicketUrl(ticketUrl);
ticket.setStatus(workItem.getFields().get("System.State").toString());
ticket.setSummary(workItem.getFields().get("System.Title").toString());
return ticket;
}
private List<WorkItemClassificationNode> extractNestedNodes(WorkItemClassificationNode node) {
List<WorkItemClassificationNode> nodes = new ArrayList<>();
nodes.add(node);
if (node.isHasChildren()) {
for (WorkItemClassificationNode childrenNode : node.getChildren()) {
nodes.addAll(extractNestedNodes(childrenNode));
}
}
return nodes;
}
private Map<String, List<WorkItemClassificationNode>> getClassificationNodes(ClassificationNodesApi nodesApi, String organizationName,
String projectName) {
List<WorkItemClassificationNode> areaNodes = new ArrayList<>();
List<WorkItemClassificationNode> iterationNodes = new ArrayList<>();
Map<String, List<WorkItemClassificationNode>> nodesGroupedByType = new HashMap<>();
try {
List<WorkItemClassificationNode> nodes = nodesApi.classificationNodesGetRootNodes(
organizationName,
projectName,
API_VERSION,
DEPTH
);
for (WorkItemClassificationNode node : nodes) {
if (node.getStructureType().equals(AREA)) {
areaNodes = extractNestedNodes(node);
} else if (node.getStructureType().equals(ITERATION)) {
iterationNodes = extractNestedNodes(node);
}
}
nodesGroupedByType.put(AREA, areaNodes);
nodesGroupedByType.put(ITERATION, iterationNodes);
return nodesGroupedByType;
} catch (ApiException e) {
LOGGER.error("Unable to load classification nodes: " + e.getMessage(), e);
throw new ReportPortalException(ErrorType.UNABLE_INTERACT_WITH_INTEGRATION,
String.format("Unable to load classification nodes. Code: %s, Message: %s", e.getCode(), e.getMessage()),
e
);
}
}
private Optional<WorkItemField> getFieldDetails(FieldsApi fieldsApi, String organizationName, String projectName,
WorkItemTypeFieldWithReferences field) throws ApiException {
try {
return Optional.ofNullable(fieldsApi.fieldsGet(organizationName, field.getReferenceName(), projectName, API_VERSION));
} catch (ApiException e) {
// Some special fields return 404 on request, we will skip them
if (e.getCode() == 404) {
return Optional.empty();
} else {
throw e;
}
}
}
private List<AllowedValue> prepareAllowedValues(WorkItemTypeFieldWithReferences field, List<WorkItemClassificationNode> areaNodes,
List<WorkItemClassificationNode> iterationNodes) {
List<AllowedValue> allowed = new ArrayList<>();
switch (field.getName()) {
case "Iteration ID":
for (WorkItemClassificationNode node : iterationNodes) {
allowed.add(new AllowedValue(node.getId().toString(), node.getName()));
}
break;
case "Area ID":
for (WorkItemClassificationNode node : areaNodes) {
allowed.add(new AllowedValue(node.getId().toString(), node.getName()));
}
break;
case "State":
String defaultValue = field.getDefaultValue().toString();
allowed.add(new AllowedValue(replaceIllegalCharacters(defaultValue), defaultValue));
break;
default:
for (Object value : field.getAllowedValues()) {
allowed.add(new AllowedValue(replaceIllegalCharacters(value.toString()), value.toString()));
}
break;
}
return allowed;
}
// ID value should not contain spaces and dots
private String replaceIllegalCharacters(String id) {
return id.replace(" ", "_").replace(".", "_");
}
// Replace ID separators back. The method is the opposite of the method above.
private String replaceSeparators(String id) {
return id.replace("_", ".");
}
private List<PostFormField> sortTicketFields(List<PostFormField> ticketFields, String issueType) {
List<PostFormField> sortedTicketFields = ticketFields.stream()
.sorted(Comparator.comparing(PostFormField::getIsRequired).reversed().thenComparing(PostFormField::getFieldName))
.collect(Collectors.toList());
// Add to the top a custom field representing the work item type
sortedTicketFields.add(
0,
new PostFormField("issuetype", "Issue Type", "issuetype", true, List.of(issueType), new ArrayList<AllowedValue>())
);
return sortedTicketFields;
}
private String getDescriptionFromTestItem(PostTicketRQ ticketRQ, List<AttachmentInfo> attachmentsURL) {
StringBuilder descriptionBuilder = new StringBuilder();
TestItem item = itemRepository.findById(ticketRQ.getTestItemId())
.orElseThrow(() -> new ReportPortalException(ErrorType.TEST_ITEM_NOT_FOUND, ticketRQ.getTestItemId()));
ticketRQ.getBackLinks()
.keySet()
.forEach(backLinkId -> updateDescriptionBuilder(descriptionBuilder, ticketRQ, backLinkId, item, attachmentsURL));
return descriptionBuilder.toString();
}
private void updateDescriptionBuilder(StringBuilder descriptionBuilder, PostTicketRQ ticketRQ, Long backLinkId, TestItem item,
List<AttachmentInfo> attachmentsURL) {
if (StringUtils.isNotBlank(ticketRQ.getBackLinks().get(backLinkId))) {
descriptionBuilder.append(BACK_LINK_HEADER).append(String.format(BACK_LINK_PATTERN, ticketRQ.getBackLinks().get(backLinkId)));
}
if (ticketRQ.getIsIncludeComments()) {
if (StringUtils.isNotBlank(ticketRQ.getBackLinks().get(backLinkId))) {
// Add a comment to the issue description, if there is one in the test-item
ofNullable(item.getItemResults()).flatMap(result -> ofNullable(result.getIssue())).ifPresent(issue -> {
if (StringUtils.isNotBlank(issue.getIssueDescription())) {
descriptionBuilder.append(COMMENTS_HEADER).append(issue.getIssueDescription());
}
});
}
}
// Add logs to the issue description, if they are in the test-item
addLogsInfoToDescription(descriptionBuilder, backLinkId, ticketRQ, attachmentsURL);
}
private void addLogsInfoToDescription(StringBuilder descriptionBuilder, Long backLinkId, PostTicketRQ ticketRQ,
List<AttachmentInfo> attachmentsURL) {
itemRepository.findById(backLinkId).ifPresent(item -> ofNullable(item.getLaunchId()).ifPresent(launchId -> {
List<Log> logs = logRepository.findAllUnderTestItemByLaunchIdAndTestItemIdsWithLimit(launchId,
Collections.singletonList(item.getItemId()),
ticketRQ.getNumberOfLogs()
);
if (CollectionUtils.isNotEmpty(logs) && (ticketRQ.getIsIncludeLogs() || ticketRQ.getIsIncludeScreenshots())) {
descriptionBuilder.append(LOGS_HEADER);
logs.forEach(log -> updateWithLog(descriptionBuilder,
log,
ticketRQ.getIsIncludeLogs(),
ticketRQ.getIsIncludeScreenshots(),
attachmentsURL
));
}
}));
}
private void updateWithLog(StringBuilder descriptionBuilder, Log log, boolean includeLog, boolean includeScreenshot,
List<AttachmentInfo> attachmentsURL) {
if (includeLog) {
descriptionBuilder.append("<div><pre>").append(getFormattedMessage(log)).append("</pre></div>");
}
if (includeScreenshot) {
ofNullable(log.getAttachment()).ifPresent(attachment -> addAttachmentToDescription(
descriptionBuilder,
attachment,
attachmentsURL
));
}
}
private void addAttachmentToDescription(StringBuilder descriptionBuilder, Attachment attachment, List<AttachmentInfo> attachmentsURL) {
if (StringUtils.isNotBlank(attachment.getContentType()) && StringUtils.isNotBlank(attachment.getFileId())) {
AttachmentInfo attachmentInfo = null;
for (AttachmentInfo info : attachmentsURL) {
if (info.getFileId().equals(attachment.getFileId())) {
attachmentInfo = info;
break;
}
}
String url = attachmentInfo.getUrl();
if (attachmentInfo.getContentType().contains(IMAGE_CONTENT)) {
descriptionBuilder.append("Attachment:<br>")
.append("<img src=\"")
.append(url)
.append("\" alt=\"")
.append(attachmentInfo.getFileName())
.append("\">");
} else {
descriptionBuilder.append("Attachment - ")
.append("<a href=\"")
.append(url)
.append("\">")
.append(attachmentInfo.getFileName())
.append("</a>");
}
}
}
private void uploadAttachmentToAzure(PostTicketRQ ticketRQ, List<AttachmentInfo> attachmentsURL, Long backLinkId) {
List<Attachment> attachments = new ArrayList<>();
itemRepository.findById(backLinkId).ifPresent(item -> ofNullable(item.getLaunchId()).ifPresent(launchId -> {
List<Log> logs = logRepository.findAllUnderTestItemByLaunchIdAndTestItemIdsWithLimit(launchId,
Collections.singletonList(item.getItemId()),
ticketRQ.getNumberOfLogs()
);
logs.forEach(log -> ofNullable(log.getAttachment()).ifPresent(attachment -> attachments.add(attachment)));
}));
for (Attachment attachment : attachments) {
Optional<InputStream> fileOptional = attachmentDataStoreService.load(attachment.getFileId());
if (fileOptional.isPresent()) {
try (InputStream file = fileOptional.get()) {
MimeType mimeType = mimeRepository.forName(attachment.getContentType());
byte[] bytes = ByteStreams.toByteArray(file);
AttachmentsApi attachmentsApi = new AttachmentsApi(defaultClient);
String fileName = attachment.getFileId() + mimeType.getExtension();
AttachmentReference attachmentReference = attachmentsApi.attachmentsCreate(organizationName,
bytes,
params.getProjectName(),
API_VERSION,
fileName,
null,
null
);
attachmentsURL.add(new AttachmentInfo(fileName,
attachment.getFileId(),
attachmentReference.getUrl(),
attachment.getContentType()
));
} catch (IOException | ApiException | MimeTypeException e) {
LOGGER.error("Unable to post ticket : " + e.getMessage(), e);
throw new ReportPortalException(UNABLE_INTERACT_WITH_INTEGRATION, "Unable to post ticket: " + e.getMessage(), e);
}
} else {
throw new ReportPortalException(UNABLE_TO_LOAD_BINARY_DATA);
}
}
}
private String getFormattedMessage(Log log) {
StringBuilder messageBuilder = new StringBuilder();
ofNullable(log.getLogTime()).ifPresent(logTime -> messageBuilder.append("Time: ")
.append(logTime.format(DateTimeFormatter.ofPattern("MM/dd/yyyy HH:mm:ss")))
.append(", "));
ofNullable(log.getLogLevel()).ifPresent(logLevel -> messageBuilder.append("Level: ").append(logLevel).append(", "));
messageBuilder.append("<br>").append("Log: ").append(log.getLogMessage());
return messageBuilder.toString();
}
}
|
# coding: utf-8
# ## Aim
#
# Analyse the drifts on the tuning tape
# In[95]:
from imctools.io import txtparser
import matplotlib.pyplot as plt
import os
import seaborn as sns
import pandas as pd
import numpy as np
get_ipython().magic('matplotlib notebook')
# Define the variables:
# The folder should contain the txt files from tunning tapes with 1 folder per day
# In[214]:
folder_name = '/mnt/bbvolume/hart_tma_data/20161025_tunning_tapes/'
out_folder = '/home/vitoz/Data/Analysis/20161026_tuning_tape'
# Load the txt files
# In[12]:
txt_parsers = [txtparser.TxtParser(os.path.join(folder_name, subfol, fn))
for subfol in os.listdir(folder_name) for fn in os.listdir(os.path.join(folder_name,subfol))]
# Following files were used:
# In[13]:
for p in txt_parsers:
print(p.filename)
# In[14]:
imc_acs = [p.get_imc_acquisition() for p in txt_parsers]
# In[220]:
ac = imc_acs[0]
ac.original_file
# In[97]:
tuning_meta=(pd.DataFrame({'imc_acquisition': imc_acs})
.assign(timepoint=lambda x:
x['imc_acquisition'].map(lambda ac:
int(ac.original_filename.rstrip('.txt').split('_')[3])),
date=lambda x:
x['imc_acquisition'].map(lambda ac:
os.path.basename(os.path.dirname(ac.original_file))))
)
tuning_meta['date'] = pd.to_datetime(tuning_meta['date'],format="%Y%m%d")
# In[175]:
tuning_meta.index.name = 'img_index'
# In[187]:
stats =[('mean' ,np.mean),('median', np.median)]
tuning_dat = tuning_meta['imc_acquisition'].map(
lambda x: [stat(v) for i, v in enumerate(x.get_img_stack_cyx()) for name, stat in stats])
#tuning_dat = pd.concat([tuning_meta, tuning_dat], axis=1)
cols = pd.MultiIndex.from_tuples([(m, name)
for m in tuning_meta['imc_acquisition'][0].channel_metals for name, stat in stats],names = ['metal', 'stat'])
tuning_dat = pd.DataFrame.from_items(tuning_dat.items())
tuning_dat.index = cols
tuning_dat = tuning_dat.T
tuning_dat = tuning_dat.reorder_levels([1, 0], axis=1)
tuning_dat.index.name = 'img_index'
tuning_dat = tuning_dat.stack()
tuning_dat = pd.DataFrame({'value': tuning_dat.stack()})
# In[209]:
tuning_meta['imc_acquisition'].map(lambda x: x.shape)
# In[211]:
sns.set(context='notebook')
sns.set_style('darkgrid')
g = sns.FacetGrid(data=tuning_dat.join(tuning_meta).reset_index(drop=False),
col='date', row='metal', sharex=True, sharey=False, margin_titles=True,hue='stat',)
g.map(plt.scatter,'timepoint', 'value')
# fix the axes of the plot
for row in g.axes:
# get the row max
max_y = np.max([ax.get_ylim()[1] for ax in row])
for ax in row:
ax.set_ylim((0, max_y))
# In[215]:
g.savefig(os.path.join(out_folder,'20161026_tuningtapesvstime.pdf'))
# In[204]:
ax = g.axes[0][0]
ax.get_xlim()
# In[ ]:
|
#!/bin/bash
set -eE
trap 'echo "An error occured, policy was not deployed"' ERR
./cleanup.sh
kubectl create configmap ingress-whitelist --from-file=ingress-whitelist.rego -n opa &> /dev/null
kubectl apply -f namespaces.yaml &> /dev/null
echo "Policy deployed!!!" |
#!/bin/bash
# Generic Colorize Functions
RED="`tput setaf 1`"
GREEN="`tput setaf 2`"
YELLOW="`tput setaf 3`"
BLUE="`tput setaf 4`"
MAGENTA="`tput setaf 5`"
CYAN="`tput setaf 6`"
WHITE="`tput setaf 7`"
RESET="`tput sgr0`"
function colorize() {
if [[ "$USE_COLORS" != "no" ]]; then
c="$1"
shift
echo -n ${c}${@}${RESET};
else
shift
echo -n "$@";
fi
}
function green() { colorize "${GREEN}" "${@}"; }
function red() { colorize "${RED}" "${@}"; }
function yellow() { colorize "${YELLOW}" "${@}"; }
function blue() { colorize "${BLUE}" "${@}"; }
function magenta() { colorize "${MAGENTA}" "${@}"; }
function cyan() { colorize "${CYAN}" "${@}"; }
function white() { colorize "${WHITE}" "${@}"; }
|
#!/bin/bash
# This file contains some utilities to test the elasticsearch scripts with
# the .deb/.rpm packages.
# WARNING: This testing file must be executed as root and can
# dramatically change your system. It should only be executed
# in a throw-away VM like those made by the Vagrantfile at
# the root of the Elasticsearch source code. This should
# cause the script to fail if it is executed any other way:
[ -f /etc/is_vagrant_vm ] || {
>&2 echo "must be run on a vagrant VM"
exit 1
}
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Export some useful paths.
export_elasticsearch_paths() {
export ESHOME="/usr/share/elasticsearch"
export ESPLUGINS="$ESHOME/plugins"
export ESMODULES="$ESHOME/modules"
export ESCONFIG="/etc/elasticsearch"
export ESSCRIPTS="$ESCONFIG/scripts"
export ESDATA="/var/lib/elasticsearch"
export ESLOG="/var/log/elasticsearch"
export ESPIDDIR="/var/run/elasticsearch"
if is_dpkg; then
export ESENVFILE="/etc/default/elasticsearch"
fi
if is_rpm; then
export ESENVFILE="/etc/sysconfig/elasticsearch"
fi
}
# Install the rpm or deb package.
# -u upgrade rather than install. This only matters for rpm.
# -v the version to upgrade to. Defaults to the version under test.
install_package() {
local version=$(cat version)
local rpmCommand='-i'
while getopts ":uv:" opt; do
case $opt in
u)
rpmCommand='-U'
dpkgCommand='--force-confnew'
;;
v)
version=$OPTARG
;;
\?)
echo "Invalid option: -$OPTARG" >&2
;;
esac
done
if is_rpm; then
rpm $rpmCommand elasticsearch-$version.rpm
elif is_dpkg; then
dpkg $dpkgCommand -i elasticsearch-$version.deb
else
skip "Only rpm or deb supported"
fi
}
# Checks that all directories & files are correctly installed after a deb or
# rpm install.
verify_package_installation() {
id elasticsearch
getent group elasticsearch
assert_file "$ESHOME" d root root 755
assert_file "$ESHOME/bin" d root root 755
assert_file "$ESHOME/bin/elasticsearch" f root root 755
assert_file "$ESHOME/bin/elasticsearch-plugin" f root root 755
assert_file "$ESHOME/bin/elasticsearch-translog" f root root 755
assert_file "$ESHOME/lib" d root root 755
assert_file "$ESCONFIG" d root elasticsearch 750
assert_file "$ESCONFIG/elasticsearch.yml" f root elasticsearch 660
assert_file "$ESCONFIG/jvm.options" f root elasticsearch 660
assert_file "$ESCONFIG/log4j2.properties" f root elasticsearch 660
assert_file "$ESSCRIPTS" d root elasticsearch 750
assert_file "$ESDATA" d elasticsearch elasticsearch 750
assert_file "$ESLOG" d elasticsearch elasticsearch 750
assert_file "$ESPLUGINS" d root root 755
assert_file "$ESMODULES" d root root 755
assert_file "$ESPIDDIR" d elasticsearch elasticsearch 755
assert_file "$ESHOME/NOTICE.txt" f root root 644
assert_file "$ESHOME/README.textile" f root root 644
if is_dpkg; then
# Env file
assert_file "/etc/default/elasticsearch" f root root 660
# Doc files
assert_file "/usr/share/doc/elasticsearch" d root root 755
assert_file "/usr/share/doc/elasticsearch/copyright" f root root 644
fi
if is_rpm; then
# Env file
assert_file "/etc/sysconfig/elasticsearch" f root root 660
# License file
assert_file "/usr/share/elasticsearch/LICENSE.txt" f root root 644
fi
if is_systemd; then
assert_file "/usr/lib/systemd/system/elasticsearch.service" f root root 644
assert_file "/usr/lib/tmpfiles.d/elasticsearch.conf" f root root 644
assert_file "/usr/lib/sysctl.d/elasticsearch.conf" f root root 644
if is_rpm; then
[[ $(/usr/sbin/sysctl vm.max_map_count) =~ "vm.max_map_count = 262144" ]]
else
[[ $(/sbin/sysctl vm.max_map_count) =~ "vm.max_map_count = 262144" ]]
fi
fi
if is_sysvinit; then
assert_file "/etc/init.d/elasticsearch" f root root 750
fi
run sudo -E -u vagrant LANG="en_US.UTF-8" cat "$ESCONFIG/elasticsearch.yml"
[ $status = 1 ]
[[ "$output" == *"Permission denied"* ]] || {
echo "Expected permission denied but found $output:"
false
}
}
|
#!/bin/sh
[ ! -n "$1" ] && echo "1st arg (target_dir) is required." && exit 0
rsync -avh --progress \
--include 'src/***' --include 'lib/***' --exclude '*' \
. $1/node_modules/coreds
|
import argparse
# Create an ArgumentParser object
parser = argparse.ArgumentParser()
# Add the command-line arguments expected by the avaStarter program
parser.add_argument('-nodeList', help='File containing the list of nodes')
parser.add_argument('-isController', help='Indicates if the node is the controller')
parser.add_argument('-ipAddress', help='IP address on which the program should listen')
parser.add_argument('-port', type=int, help='Port on which the program should listen')
# Parse the command-line arguments
args = parser.parse_args()
# Print the extracted values in a human-readable format
print(f"Node List: {args.nodeList}")
print(f"Is Controller: {args.isController}")
print(f"IP Address: {args.ipAddress}")
print(f"Port: {args.port}") |
<filename>src/infra/database/models/batatinha/BatatinhaSchema.js
const { Schema } = require('mongoose');
module.exports = () => {
const batatinhaSchema = new Schema({
batatinha_header: {
type: String,
required: true
},
batatinha_id: {
type: String,
required: true,
},
batatinha_name: {
type: String,
required: true,
},
batatinha_email: {
type: String,
required: true,
}
});
return batatinhaSchema;
};
|
#!/bin/bash
cp .env.dev .env
docker-compose up -d |
# Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import traceback
import streamlit as st
from streamlit import config
from streamlit.logger import get_logger
LOGGER = get_logger(__name__)
# Extract the streamlit package path
_streamlit_dir = os.path.dirname(st.__file__)
# Make it absolute, resolve aliases, and ensure there's a trailing path
# separator
_streamlit_dir = os.path.join(os.path.realpath(_streamlit_dir), "")
# When client.showErrorDetails is False, we show a generic warning in the
# frontend when we encounter an uncaught app exception.
_GENERIC_UNCAUGHT_EXCEPTION_TEXT = (
"Whoops — something went wrong! An error has been logged."
)
def handle_uncaught_app_exception(e: BaseException) -> None:
"""Handle an exception that originated from a user app.
By default, we show exceptions directly in the browser. However,
if the user has disabled client error details, we display a generic
warning in the frontend instead.
"""
if config.get_option("client.showErrorDetails"):
LOGGER.warning(traceback.format_exc())
st.exception(e)
# TODO: Clean up the stack trace, so it doesn't include ScriptRunner.
else:
# Use LOGGER.error, rather than LOGGER.debug, since we don't
# show debug logs by default.
LOGGER.error("Uncaught app exception", exc_info=e)
st.error(_GENERIC_UNCAUGHT_EXCEPTION_TEXT)
def _is_in_streamlit_package(file):
"""True if the given file is part of the streamlit package."""
try:
common_prefix = os.path.commonprefix([os.path.realpath(file), _streamlit_dir])
except ValueError:
# Raised if paths are on different drives.
return False
return common_prefix == _streamlit_dir
def get_nonstreamlit_traceback(extracted_tb):
return [
entry for entry in extracted_tb if not _is_in_streamlit_package(entry.filename)
]
|
# (C) Copyright 2017 Hewlett Packard Enterprise Development LP.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_db.sqlalchemy import enginefacade
from catena.chain_backends.ethereum import ethereum_api as chain_api
from catena.clouds.azure_api import Azure
from catena.clouds.openstack_api import OpenStack
from catena.common.utils import create_and_encrypt_sshkey
from catena.db.sqlalchemy import api as db_api
CONF = cfg.CONF
CLOUDS = {'openstack': OpenStack, 'azure': Azure}
def get_cloud_types():
return CLOUDS.keys()
def get_clouds():
context = db_api.get_context()
clouds = db_api.get_clouds(context)
result = [_cleanup_cloud(cloud) for cloud in clouds]
return result
def _cleanup_cloud(cloud):
# This is a white-list because we have secrets (like ssh keys) in the db
# that shouldn't be exposed
return {
'cloud_config': cloud['cloud_config'],
'id': cloud['id'],
'name': cloud['name'],
'created_at': cloud['created_at'],
'updated_at': cloud['updated_at'],
}
def get_cloud_api_by_model(cloud):
return CLOUDS[cloud.type](cloud)
def get_cloud_api(cloud_id):
context = db_api.get_context()
cloud = db_api.get_cloud(context, cloud_id)
return get_cloud_api_by_model(cloud)
def create_cloud(type, name, authentication, config):
context = db_api.get_context()
with enginefacade.writer.using(context):
cloud = db_api.create_cloud(context, type)
cloud.set_authentication(authentication)
cloud.name = name
cloud.set_cloud_config(config)
cloud.save(context)
return cloud
def create_node(blockchain_id, data):
context = db_api.get_context()
with enginefacade.writer.using(context):
blockchain = db_api.get_chain(context, blockchain_id)
controller_node = db_api.get_controller_node(context, blockchain)
cloud_api = get_cloud_api_by_model(blockchain.cloud)
public_key, encrypted_key = create_and_encrypt_sshkey()
id, ip = cloud_api.add_node(public_key, data['flavour'], data['name'],
blockchain)
node = db_api.create_node(context, id, blockchain, ip, encrypted_key,
data['type'], data['name'])
node_id = chain_api.provision_node(blockchain, node,
blockchain.get_cloud_config()[
'jumpbox_ip'],
controller_node.ip)
chain_config = {'eth_node_id': node_id.split('"')[1]}
node.set_chain_config(chain_config)
node.save(context)
return node
def delete_node(blockchain_id, node_id):
context = db_api.get_context()
with enginefacade.writer.using(context):
blockchain = db_api.get_chain(context, blockchain_id)
node = db_api.get_node(context, blockchain, node_id)
if node.type != 'controller':
cloud_api = get_cloud_api(blockchain.cloud_id)
cloud_api.delete_node(blockchain, node.id)
node.delete(context)
def get_nodes(blockchain_id):
context = db_api.get_context()
with enginefacade.reader.using(context):
blockchain = db_api.get_chain(context, blockchain_id)
result = [_cleanup_node_data(node) for node in blockchain.nodes]
return result
def get_node(blockchain_id, node_id):
context = db_api.get_context()
with enginefacade.reader.using(context):
blockchain = db_api.get_chain(context, blockchain_id)
node = db_api.get_node(context, blockchain, node_id)
return _cleanup_node_data(node)
def _cleanup_node_data(node):
# This is a white-list because we have secrets (like ssh keys) in the db
# that shouldn't be exposed
return {
'chain_config': node['chain_config'],
'ip': node['ip'],
'id': node['id'],
'name': node['name'],
'type': node['type'],
'chain_id': node['chain_id'],
'created_at': node['created_at'],
'updated_at': node['updated_at'],
}
def get_backend_info():
return chain_api.get_backend_info()
def create_chain(cloud_id, name, new_chain_config, new_cloud_config):
assert len(name) > 0, "Must specify a name"
context = db_api.get_context()
with enginefacade.writer.using(context):
cloud = db_api.get_cloud(context, cloud_id)
cloud_api = get_cloud_api_by_model(cloud)
chain = db_api.create_chain(context, name, "ethereum", cloud,
new_chain_config, new_cloud_config)
chain_api.initialize_chain(chain)
cloud_api.initialize_cloud(chain)
chain.save(context)
cloud_config = chain.get_cloud_config()
chain_config = chain.get_chain_config()
controller_name = chain.name + "_controller"
public_key, encrypted_key = create_and_encrypt_sshkey()
id, ip = cloud_api.add_node(public_key,
cloud_config['controller_flavour'],
controller_name, chain)
node = db_api.create_node(context, id=id, chain=chain, ip=ip,
ssh_key=encrypted_key,
name=chain.name + '_controller',
type='controller')
node_id = chain_api.provision_controller(chain, node,
cloud_config['jumpbox_ip'])
chain_config = {'eth_node_id': node_id.split('"')[1]}
node.set_chain_config(chain_config)
node.save(context)
return chain
def get_chains():
context = db_api.get_context()
chains = db_api.get_chains(context)
result = [_cleanup_chain_data(chain) for chain in chains]
return result
def get_chain(chain_id):
context = db_api.get_context()
chain = db_api.get_chain(context, chain_id)
return _cleanup_chain_data(chain)
def _cleanup_chain_data(chain):
# This is a white-list because we have secrets (like ssh keys) in the db
# that shouldn't be exposed
return {
'chain_backend': chain['chain_backend'],
'chain_config': chain['chain_config'],
'id': chain['id'],
'cloud_id': chain['cloud_id'],
'name': chain['name'],
'created_at': chain['created_at'],
'updated_at': chain['updated_at'],
}
def delete_chain(chain_id):
context = db_api.get_context()
with enginefacade.writer.using(context):
chain = db_api.get_chain(context, chain_id)
nodes = db_api.get_nodes(context, chain)
cloud_api = get_cloud_api(chain.cloud_id)
for node in nodes:
cloud_api.delete_node(chain, node['id'])
chain.delete(context)
def get_node_flavours(cloud_id):
return get_cloud_api(cloud_id).get_node_flavours()
def get_networks(cloud_id):
return get_cloud_api(cloud_id).get_networks()
def get_instances(cloud_id):
return get_cloud_api(cloud_id).get_instances()
|
def find_all_occurrences(document, word):
word_list = []
for line in document.splitlines():
index = 0
while index < len(line):
index = line.find(word, index)
if index == -1:
break
word_list.append((index, line[index]))
index += 1
return word_list |
<filename>go/arrow/memory/util.go
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package memory
import "unsafe"
func roundToPowerOf2(v, round int) int {
forceCarry := round - 1
truncateMask := ^forceCarry
return (v + forceCarry) & truncateMask
}
func roundUpToMultipleOf64(v int) int {
return roundToPowerOf2(v, 64)
}
func isMultipleOfPowerOf2(v int, d int) bool {
return (v & (d - 1)) == 0
}
func addressOf(b []byte) uintptr {
return uintptr(unsafe.Pointer(&b[0]))
}
|
#!/usr/bin/env python3
import logging
import datetime
from PIL import Image
from pytesseract import image_to_string
from bs4 import BeautifulSoup
from urllib.request import urlopen, Request
from io import BytesIO
import re
# The arrow library is used to handle datetimes
import arrow
# The request library is used to fetch content through HTTP
import requests
from .JP import fetch_production as JP_fetch_production
# please try to write PEP8 compliant code (use a linter). One of PEP8's
# requirement is to limit your line length to 79 characters.
def fetch_production(zone_key='JP-KN', session=None,
target_datetime: datetime.datetime = None,
logger: logging.Logger = logging.getLogger(__name__)):
"""
This method adds nuclear production on top of the solar data returned by the JP parser.
It tries to match the solar data with the nuclear data.
If there is a difference of more than 30 minutes between solar and nuclear data, the method will fail.
"""
r = session or requests.session()
if target_datetime is not None:
raise NotImplementedError('This parser can only fetch live data')
JP_data = JP_fetch_production(zone_key, session, target_datetime, logger)
nuclear_mw, nuclear_datetime = get_nuclear_production()
latest = JP_data[-1] #latest solar data is the most likely to fit with nuclear production
diff = None
if nuclear_datetime > latest["datetime"]:
diff = nuclear_datetime-latest["datetime"]
else:
diff = latest["datetime"]-nuclear_datetime
if abs(diff.seconds) > 30 * 60:
raise Exception("Difference between nuclear datetime and JP data is too large")
latest["production"]["nuclear"] = nuclear_mw
latest["production"]["unknown"] = latest["production"]["unknown"] - nuclear_mw
return latest
URL = "https://www.kepco.co.jp/energy_supply/energy/nuclear_power/info/monitor/live_unten"
IMAGE_CORE_URL = "https://www.kepco.co.jp/"
def getImageText(imgUrl, lang):
"""
Fetches image based on URL, crops it and extract text from the image.
"""
req = Request(imgUrl, headers={'User-Agent': 'Mozilla/5.0'})
img_bytes = urlopen(req).read()
img = Image.open(BytesIO(img_bytes))
width, height = img.size
img = img.crop((0, (height / 8), 160, height))
# cropping the image, makes it easier to read for tesseract
text = image_to_string(img, lang=lang)
return text
def extractCapacity(tr):
"""
The capacity for each unit has the class "list03".
and it uses the chinese symbol for 10k(万).
If this changes, the method will become inaccurate.
"""
td = tr.findAll("td", {"class":"list03"})
if len(td) == 0:
return None
raw_text = td[0].getText()
kw_energy = raw_text.split("万")[0]
return float(kw_energy) * 10000
def extractOperationPercentage(tr):
"""Operation percentage is located on images of type .gif"""
td = tr.findAll("img")
if len(td) == 0:
return None
img = td[0]
URL = IMAGE_CORE_URL + img["src"]
if ".gif" in URL:
text = getImageText(URL, "eng")
# will return a number and percentage eg ("104%"). Sometimes a little more eg: ("104% 4...")
split = text.split("%")
if len(split) == 0:
return None
return float(split[0]) / 100
else:
return None
def extractTime(soup):
"""
Time is located in an image.
Decipher the text containing the data and assumes there will only be 4 digits making up the datetime.
"""
imgRelative = soup.findAll("img", {"class": "time-data"})[0]["src"]
imgUrlFull = IMAGE_CORE_URL + imgRelative
text = getImageText(imgUrlFull, "jpn")
digits = re.findall(r'\d+', text)
digits = list(map(lambda x: int(x), digits))
if len(digits) != 4:
# something went wrong while extracting time from Japan
raise Exception("Something went wrong while extracting local time")
nuclear_datetime = arrow.now(tz="Asia/Tokyo").replace(month=digits[0], day=digits[1], hour=digits[2], minute=digits[3]).floor("minute").datetime
return nuclear_datetime
def get_nuclear_production():
"""
Fetches all the rows that contains data of nuclear units and calculates the total kw generated by all plants.
Illogically, all the rows has the class "mihama_realtime" which they might fix in the future.
"""
r = Request(URL, headers={'User-Agent': 'Mozilla/5.0'})
html = urlopen(r).read()
soup = BeautifulSoup(html, 'html.parser')
nuclear_datetime = extractTime(soup)
rows = soup.findAll("tr", {"class": "mihama_realtime"})
tr_list = soup.findAll("tr")
total_kw = 0
for tr in tr_list:
capacity = extractCapacity(tr)
operation_percentage = extractOperationPercentage(tr)
if (capacity == None or operation_percentage == None):
continue
kw = capacity * operation_percentage
total_kw = total_kw + kw
nuclear_mw = total_kw / 1000.0 # convert to mw
return (nuclear_mw, nuclear_datetime)
if __name__ == '__main__':
"""Main method, never used by the Electricity Map backend, but handy for testing."""
print('fetch_production() ->')
print(fetch_production())
|
package ma.ensias.ticket_me.adpater;
import android.content.Context;
import android.content.Intent;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ImageView;
import android.widget.TextView;
import androidx.annotation.NonNull;
import androidx.recyclerview.widget.RecyclerView;
import java.util.List;
import ma.ensias.ticket_me.R;
import ma.ensias.ticket_me.activities.EventActivity;
import ma.ensias.ticket_me.Database.Event;
public class AdapterDBEvent extends RecyclerView.Adapter<AdapterDBEvent.ViewHolderEvent> {
private final List<Event> events;
private final Context context;
public AdapterDBEvent(List<Event> events, Context context)
{
this.events = events;
this.context = context;
}
@NonNull
@Override
public AdapterDBEvent.ViewHolderEvent onCreateViewHolder(@NonNull ViewGroup parent, int viewType) {
LayoutInflater inflater = LayoutInflater.from(parent.getContext());
View view = inflater.inflate(R.layout.event_item,parent,false);
return new ViewHolderEvent(view);
}
@Override
public void onBindViewHolder(@NonNull AdapterDBEvent.ViewHolderEvent holder, int position) {
Event event = this.events.get(position);
holder.name_event.setText(event.getName());
holder.button_explore.setOnClickListener(v->{
Intent intent = new Intent(holder.button_explore.getContext(), EventActivity.class);
intent.putExtra("id_event",event.getId());
holder.button_explore.getContext().startActivity(intent);
});
}
@Override
public int getItemCount() {
return this.events.size();
}
public class ViewHolderEvent extends RecyclerView.ViewHolder {
private TextView name_event;
private ImageView button_explore;
public ViewHolderEvent(final View itemView) {
super(itemView);
name_event = itemView.findViewById(R.id.name_of_event);
button_explore = itemView.findViewById(R.id.button_explore_event);
}
}
} |
def freq_table(arr):
freq_dict = {}
for i in arr:
if i not in freq_dict:
freq_dict[i] = arr.count(i)
return freq_dict |
const {describe, it} = require('mocha');
const should = require('should');
const sinon = require('sinon');
const StripeAPIService = require('@tryghost/members-stripe-service');
const StripeWebhookService = require('../../../../lib/services/stripe-webhook');
const ProductRepository = require('../../../../lib/repositories/product');
const MemberRepository = require('../../../../lib/repositories/member');
function mock(Class) {
return sinon.stub(Object.create(Class.prototype));
}
describe('StripeWebhookService', function () {
describe('invoice.payment_succeeded webhooks', function () {
it('Should throw a 404 error when a member is not found for a valid Ghost Members invoice', async function () {
const stripeWebhookService = new StripeWebhookService({
stripeAPIService: mock(StripeAPIService),
productRepository: mock(ProductRepository),
memberRepository: mock(MemberRepository)
});
stripeWebhookService._stripeAPIService.getSubscription.resolves({
customer: 'customer_id',
plan: {
product: 'product_id'
}
});
stripeWebhookService._memberRepository.get.resolves(null);
stripeWebhookService._productRepository.get.resolves({
id: 'product_id'
});
try {
await stripeWebhookService.invoiceEvent({
subscription: 'sub_id'
});
should.fail();
} catch (err) {
should.equal(err.statusCode, 404);
}
});
});
});
|
#! /bin/bash
###########################################
#
###########################################
# constants
baseDir=$(cd `dirname "$0"`;pwd)
# functions
# main
[ -z "${BASH_SOURCE[0]}" -o "${BASH_SOURCE[0]}" = "$0" ] || return
cd $baseDir/..
if [ -d ./private/plugins ]; then
./private/plugins/scripts/uninstall-all.sh
fi
if [ -d ./public/plugins ]; then
./public/plugins/scripts/uninstall-all.sh
fi
|
#!/bin/bash
# This script provides common script functions for the hacks
# Requires STI_ROOT to be set
set -o errexit
set -o nounset
set -o pipefail
# The root of the build/dist directory
STI_ROOT=$(
unset CDPATH
sti_root=$(dirname "${BASH_SOURCE}")/..
cd "${sti_root}"
pwd
)
STI_OUTPUT_SUBPATH="${STI_OUTPUT_SUBPATH:-_output/local}"
STI_OUTPUT="${STI_ROOT}/${STI_OUTPUT_SUBPATH}"
STI_OUTPUT_BINPATH="${STI_OUTPUT}/bin"
STI_LOCAL_BINPATH="${STI_ROOT}/_output/local/go/bin"
STI_LOCAL_RELEASEPATH="${STI_ROOT}/_output/local/releases"
readonly STI_GO_PACKAGE=github.com/openshift/source-to-image
readonly STI_GOPATH="${STI_OUTPUT}/go"
readonly STI_CROSS_COMPILE_PLATFORMS=(
linux/amd64
darwin/amd64
windows/amd64
)
readonly STI_CROSS_COMPILE_TARGETS=(
cmd/s2i
)
readonly STI_CROSS_COMPILE_BINARIES=("${STI_CROSS_COMPILE_TARGETS[@]##*/}")
readonly STI_ALL_TARGETS=(
"${STI_CROSS_COMPILE_TARGETS[@]}"
)
readonly STI_BINARY_SYMLINKS=(
sti
)
readonly STI_BINARY_COPY=(
sti
)
# sti::build::binaries_from_targets take a list of build targets and return the
# full go package to be built
sti::build::binaries_from_targets() {
local target
for target; do
echo "${STI_GO_PACKAGE}/${target}"
done
}
# Asks golang what it thinks the host platform is. The go tool chain does some
# slightly different things when the target platform matches the host platform.
sti::build::host_platform() {
echo "$(go env GOHOSTOS)/$(go env GOHOSTARCH)"
}
# Build binaries targets specified
#
# Input:
# $@ - targets and go flags. If no targets are set then all binaries targets
# are built.
# STI_BUILD_PLATFORMS - Incoming variable of targets to build for. If unset
# then just the host architecture is built.
sti::build::build_binaries() {
# Create a sub-shell so that we don't pollute the outer environment
(
# Check for `go` binary and set ${GOPATH}.
sti::build::setup_env
# Fetch the version.
local version_ldflags
version_ldflags=$(sti::build::ldflags)
# Use eval to preserve embedded quoted strings.
local goflags
eval "goflags=(${STI_GOFLAGS:-})"
local -a targets=()
local arg
for arg; do
if [[ "${arg}" == -* ]]; then
# Assume arguments starting with a dash are flags to pass to go.
goflags+=("${arg}")
else
targets+=("${arg}")
fi
done
if [[ ${#targets[@]} -eq 0 ]]; then
targets=("${STI_ALL_TARGETS[@]}")
fi
local -a platforms=("${STI_BUILD_PLATFORMS[@]:+${STI_BUILD_PLATFORMS[@]}}")
if [[ ${#platforms[@]} -eq 0 ]]; then
platforms=("$(sti::build::host_platform)")
fi
local binaries
binaries=($(sti::build::binaries_from_targets "${targets[@]}"))
local platform
for platform in "${platforms[@]}"; do
sti::build::set_platform_envs "${platform}"
echo "++ Building go targets for ${platform}:" "${targets[@]}"
go install "${goflags[@]:+${goflags[@]}}" \
-ldflags "${version_ldflags}" \
"${binaries[@]}"
sti::build::unset_platform_envs "${platform}"
done
)
}
# Takes the platform name ($1) and sets the appropriate golang env variables
# for that platform.
sti::build::set_platform_envs() {
[[ -n ${1-} ]] || {
echo "!!! Internal error. No platform set in sti::build::set_platform_envs"
exit 1
}
export GOOS=${platform%/*}
export GOARCH=${platform##*/}
}
# Takes the platform name ($1) and resets the appropriate golang env variables
# for that platform.
sti::build::unset_platform_envs() {
unset GOOS
unset GOARCH
}
# Create the GOPATH tree under $STI_ROOT
sti::build::create_gopath_tree() {
local go_pkg_dir="${STI_GOPATH}/src/${STI_GO_PACKAGE}"
local go_pkg_basedir=$(dirname "${go_pkg_dir}")
mkdir -p "${go_pkg_basedir}"
rm -f "${go_pkg_dir}"
# TODO: This symlink should be relative.
ln -s "${STI_ROOT}" "${go_pkg_dir}"
}
# sti::build::setup_env will check that the `go` commands is available in
# ${PATH}. If not running on Travis, it will also check that the Go version is
# good enough for the Kubernetes build.
#
# Input Vars:
# STI_EXTRA_GOPATH - If set, this is included in created GOPATH
# STI_NO_GODEPS - If set, we don't add 'Godeps/_workspace' to GOPATH
#
# Output Vars:
# export GOPATH - A modified GOPATH to our created tree along with extra
# stuff.
# export GOBIN - This is actively unset if already set as we want binaries
# placed in a predictable place.
sti::build::setup_env() {
sti::build::create_gopath_tree
if [[ -z "$(which go)" ]]; then
echo <<EOF
Can't find 'go' in PATH, please fix and retry.
See http://golang.org/doc/install for installation instructions.
EOF
exit 2
fi
GOPATH=${STI_GOPATH}
# Append STI_EXTRA_GOPATH to the GOPATH if it is defined.
if [[ -n ${STI_EXTRA_GOPATH:-} ]]; then
GOPATH="${GOPATH}:${STI_EXTRA_GOPATH}"
fi
# Append the tree maintained by `godep` to the GOPATH unless STI_NO_GODEPS
# is defined.
if [[ -z ${STI_NO_GODEPS:-} ]]; then
GOPATH="${GOPATH}:${STI_ROOT}/Godeps/_workspace"
fi
export GOPATH
# Unset GOBIN in case it already exists in the current session.
unset GOBIN
}
# This will take binaries from $GOPATH/bin and copy them to the appropriate
# place in ${STI_OUTPUT_BINDIR}
#
# If STI_RELEASE_ARCHIVE is set to a directory, it will have tar archives of
# each STI_RELEASE_PLATFORMS created
#
# Ideally this wouldn't be necessary and we could just set GOBIN to
# STI_OUTPUT_BINDIR but that won't work in the face of cross compilation. 'go
# install' will place binaries that match the host platform directly in $GOBIN
# while placing cross compiled binaries into `platform_arch` subdirs. This
# complicates pretty much everything else we do around packaging and such.
sti::build::place_bins() {
(
local host_platform
host_platform=$(sti::build::host_platform)
echo "++ Placing binaries"
if [[ "${STI_RELEASE_ARCHIVE-}" != "" ]]; then
sti::build::get_version_vars
mkdir -p "${STI_LOCAL_RELEASEPATH}"
fi
for platform in "${STI_RELEASE_PLATFORMS[@]-(host_platform)}"; do
# The substitution on platform_src below will replace all slashes with
# underscores. It'll transform darwin/amd64 -> darwin_amd64.
local platform_src="/${platform//\//_}"
if [[ $platform == $host_platform ]]; then
platform_src=""
fi
# Skip this directory if the platform has no binaries.
local full_binpath_src="${STI_GOPATH}/bin${platform_src}"
if [[ ! -d "${full_binpath_src}" ]]; then
continue
fi
mkdir -p "${STI_OUTPUT_BINPATH}/${platform}"
# Create an array of binaries to release. Append .exe variants if the platform is windows.
local -a binaries=()
local binary
for binary in "${STI_RELEASE_BINARIES[@]}"; do
binaries+=("${binary}")
if [[ $platform == "windows/amd64" ]]; then
binaries+=("${binary}.exe")
fi
done
# Copy only the specified release binaries to the shared STI_OUTPUT_BINPATH.
local -a includes=()
for binary in "${binaries[@]}"; do
includes+=("--include=${binary}")
done
find "${full_binpath_src}" -maxdepth 1 -type f -exec \
rsync "${includes[@]}" --exclude="*" -pt {} "${STI_OUTPUT_BINPATH}/${platform}" \;
# If no release archive was requested, we're done.
if [[ "${STI_RELEASE_ARCHIVE-}" == "" ]]; then
continue
fi
# Create a temporary bin directory containing only the binaries marked for release.
local release_binpath=$(mktemp -d sti.release.${STI_RELEASE_ARCHIVE}.XXX)
find "${full_binpath_src}" -maxdepth 1 -type f -exec \
rsync "${includes[@]}" --exclude="*" -pt {} "${release_binpath}" \;
# Create binary copies where specified.
local suffix=""
if [[ $platform == "windows/amd64" ]]; then
suffix=".exe"
fi
for linkname in "${STI_BINARY_COPY[@]}"; do
local src="${release_binpath}/s2i${suffix}"
if [[ -f "${src}" ]]; then
cp "${release_binpath}/s2i${suffix}" "${release_binpath}/${linkname}${suffix}"
fi
done
# Create the release archive.
local platform_segment="${platform//\//-}"
local archive_name="${STI_RELEASE_ARCHIVE}-${STI_GIT_VERSION}-${STI_GIT_COMMIT}-${platform_segment}.tar.gz"
echo "++ Creating ${archive_name}"
tar -czf "${STI_LOCAL_RELEASEPATH}/${archive_name}" -C "${release_binpath}" .
rm -rf "${release_binpath}"
done
)
}
# sti::build::make_binary_symlinks makes symlinks for the sti
# binary in _output/local/go/bin
sti::build::make_binary_symlinks() {
local host_platform
host_platform=$(sti::build::host_platform)
if [[ -f "${STI_LOCAL_BINPATH}/s2i" ]]; then
for linkname in "${STI_BINARY_SYMLINKS[@]}"; do
if [[ $host_platform == "windows/amd64" ]]; then
cp "${STI_LOCAL_BINPATH}/s2i.exe" "${STI_LOCAL_BINPATH}/${linkname}.exe"
else
ln -sf "${STI_LOCAL_BINPATH}/s2i" "${STI_LOCAL_BINPATH}/${linkname}"
fi
done
fi
}
# sti::build::detect_local_release_tars verifies there is only one primary and one
# image binaries release tar in STI_LOCAL_RELEASEPATH for the given platform specified by
# argument 1, exiting if more than one of either is found.
#
# If the tars are discovered, their full paths are exported to the following env vars:
#
# STI_PRIMARY_RELEASE_TAR
sti::build::detect_local_release_tars() {
local platform="$1"
local primary=$(find ${STI_LOCAL_RELEASEPATH} -maxdepth 1 -type f -name source-to-image-*-${platform}-*)
if [[ $(echo "${primary}" | wc -l) -ne 1 ]]; then
echo "There should be exactly one ${platform} primary tar in $STI_LOCAL_RELEASEPATH"
exit 2
fi
export STI_PRIMARY_RELEASE_TAR="${primary}"
export STI_RELEASE_COMMIT="$(cat ${STI_LOCAL_RELEASEPATH}/.commit)"
}
# sti::build::get_version_vars loads the standard version variables as
# ENV vars
sti::build::get_version_vars() {
if [[ -n ${STI_VERSION_FILE-} ]]; then
source "${STI_VERSION_FILE}"
return
fi
sti::build::sti_version_vars
}
# sti::build::sti_version_vars looks up the current Git vars
sti::build::sti_version_vars() {
local git=(git --work-tree "${STI_ROOT}")
if [[ -n ${STI_GIT_COMMIT-} ]] || STI_GIT_COMMIT=$("${git[@]}" rev-parse --short "HEAD^{commit}" 2>/dev/null); then
if [[ -z ${STI_GIT_TREE_STATE-} ]]; then
# Check if the tree is dirty. default to dirty
if git_status=$("${git[@]}" status --porcelain 2>/dev/null) && [[ -z ${git_status} ]]; then
STI_GIT_TREE_STATE="clean"
else
STI_GIT_TREE_STATE="dirty"
fi
fi
# Use git describe to find the version based on annotated tags.
if [[ -n ${STI_GIT_VERSION-} ]] || STI_GIT_VERSION=$("${git[@]}" describe "${STI_GIT_COMMIT}^{commit}" 2>/dev/null); then
if [[ "${STI_GIT_TREE_STATE}" == "dirty" ]]; then
# git describe --dirty only considers changes to existing files, but
# that is problematic since new untracked .go files affect the build,
# so use our idea of "dirty" from git status instead.
STI_GIT_VERSION+="-dirty"
fi
# Try to match the "git describe" output to a regex to try to extract
# the "major" and "minor" versions and whether this is the exact tagged
# version or whether the tree is between two tagged versions.
if [[ "${STI_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)([.-].*)?$ ]]; then
STI_GIT_MAJOR=${BASH_REMATCH[1]}
STI_GIT_MINOR=${BASH_REMATCH[2]}
if [[ -n "${BASH_REMATCH[3]}" ]]; then
STI_GIT_MINOR+="+"
fi
fi
fi
fi
}
# Saves the environment flags to $1
sti::build::save_version_vars() {
local version_file=${1-}
[[ -n ${version_file} ]] || {
echo "!!! Internal error. No file specified in sti::build::save_version_vars"
return 1
}
cat <<EOF >"${version_file}"
STI_GIT_COMMIT='${STI_GIT_COMMIT-}'
STI_GIT_TREE_STATE='${STI_GIT_TREE_STATE-}'
STI_GIT_VERSION='${STI_GIT_VERSION-}'
STI_GIT_MAJOR='${STI_GIT_MAJOR-}'
STI_GIT_MINOR='${STI_GIT_MINOR-}'
EOF
}
# golang 1.5 wants `-X key=val`, but golang 1.4- REQUIRES `-X key val`
sti::build::ldflag() {
local key=${1}
local val=${2}
GO_VERSION=($(go version))
if [[ -z $(echo "${GO_VERSION[2]}" | grep -E 'go1.5') ]]; then
echo "-X ${STI_GO_PACKAGE}/pkg/version.${key} ${val}"
else
echo "-X ${STI_GO_PACKAGE}/pkg/version.${key}=${val}"
fi
}
# sti::build::ldflags calculates the -ldflags argument for building STI
sti::build::ldflags() {
(
# Run this in a subshell to prevent settings/variables from leaking.
set -o errexit
set -o nounset
set -o pipefail
cd "${STI_ROOT}"
sti::build::get_version_vars
declare -a ldflags=()
ldflags+=($(sti::build::ldflag "majorFromGit" "${STI_GIT_MAJOR}"))
ldflags+=($(sti::build::ldflag "minorFromGit" "${STI_GIT_MINOR}"))
ldflags+=($(sti::build::ldflag "versionFromGit" "${STI_GIT_VERSION}"))
ldflags+=($(sti::build::ldflag "commitFromGit" "${STI_GIT_COMMIT}"))
# The -ldflags parameter takes a single string, so join the output.
echo "${ldflags[*]-}"
)
}
|
package lx.calibre.util;
public final class ConvertUtils {
private ConvertUtils() {
}
public static long toLong(Object obj) {
return ((Number) obj).longValue();
}
public static String toString(Object obj) {
return obj != null ? obj.toString() : null;
}
public static Double toDouble(Object obj) {
if (obj == null) {
return null;
} else if (obj instanceof Number) {
return ((Number) obj).doubleValue();
} else {
return Double.valueOf(obj.toString());
}
}
}
|
/*
* MIT License
*
* Copyright (c) 2018 <NAME> (@smallcreep) <<EMAIL>>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.github.smallcreep.cucumber.seeds;
import com.jcabi.jdbc.Outcome;
/**
* Connection to te DataBase.
* @since 0.1.1
*/
public interface DataBase {
/**
* Check connection to the DataBase.
* @throws Exception If any error of connection
*/
void connect() throws Exception;
/**
* Execute SQL query.
*
* @param sql Sql query
* @param outcome The outcome of the operation
* @param <T> Type of response
* @return The result
* @throws Exception If fails
*/
<T> T result(Sql sql, Outcome<T> outcome) throws Exception;
/**
* Execute SQL query update/insert.
*
* @param sql Sql query
* @param outcome The outcome of the operation
* @param <T> Type of response
* @return The result
* @throws Exception If fails
*/
<T> T update(Sql sql, Outcome<T> outcome) throws Exception;
/**
* Get schema by name.
* @param schema Schema name
* @return Schema
* @throws Exception If fails
*/
Schema schema(String schema) throws Exception;
}
|
<reponame>shanghai-edu/vsphere-mon<gh_stars>1-10
package core
//NewMetricValue decorate metric object,return new metric with tags
func NewMetricValue(endpoint, metric string, val interface{}, dataType string, tags map[string]string) *MetricValue {
mv := MetricValue{
Endpoint: endpoint,
Metric: metric,
ValueUntyped: val,
CounterType: dataType,
TagsMap: map[string]string{},
}
for k, v := range tags {
mv.TagsMap[k] = v
}
return &mv
}
//GaugeValue Gauge type
func GaugeValue(endpoint, metric string, val interface{}, tags map[string]string) *MetricValue {
return NewMetricValue(endpoint, metric, val, GAUGE, tags)
}
//CounterValue Gauge type
func CounterValue(endpoint, metric string, val interface{}, tags map[string]string) *MetricValue {
return NewMetricValue(endpoint, metric, val, COUNTER, tags)
}
|
<filename>Include/KAI/Core/Type/ContainerOperations.h
#pragma once
#include <KAI/Core/Config/Base.h>
#include <KAI/Core/Base.h>
#include <KAI/Core/TriColor.h>
KAI_TYPE_BEGIN
template <typename Reference, bool IsContainer>
struct ContainerOperations
{
struct ColorSetter
{
ObjectColor::Color _c;
ColorSetter(ObjectColor::Color c) : _c(c) { }
template <class T>
void operator()(T &obj)
{
obj.SetColor(_c);
}
};
static void SetMarked(Reference R, bool M)
{
ForEach(R, SetMarked(M));
}
static void SetSwitch(Reference R, int S, bool M)
{
ForEach(R, SetSwitch(S, M));
}
static void SetColor(Reference R, ObjectColor::Color C)
{
ForEach(R, ColorSetter(C));
}
static void Erase(Reference R, Object const &Q)
{
R.Erase(Q);
}
template <class Fun>
static Fun ForEachContained(Reference R, Fun F)
{
return ForEach(R, F);
}
};
template <typename Reference>
struct ContainerOperations<Reference, false>
{
static void SetMarked(Reference, bool) { }
static void SetSwitch(Reference, int, bool) { }
static void SetColor(Reference, ObjectColor::Color) { }
static void Erase(Reference, Object const &) { }
template <class Fun, class Ref>
static Fun ForEachContained(Ref, Fun const &F) { return F; }
};
KAI_TYPE_END
|
<reponame>OhFinance/oh-app
import { Box, Grid } from "@material-ui/core";
import { Button, DOCS_URL, Flex, Heading, Subtitle } from "@ohfinance/oh-ui";
import { Web3ProviderButton } from "components/Web3ProviderButton";
import connectors from "config/constants/connectors";
const Login = () => {
return (
<Flex center column grow={1}>
<Box mb={2}>
<Heading align="center" gutterBottom>
Connect Wallet
</Heading>
<Subtitle align="center" color="textSecondary">
Login to Oh! Finance
</Subtitle>
</Box>
<Grid container spacing={2} justify="center">
{connectors.map((connector, i) => (
<Grid item key={i} xs={12} md={8}>
<Web3ProviderButton connector={connector} />
</Grid>
))}
</Grid>
<Flex mt={4} column center>
<Subtitle color="textSecondary" gutterBottom>
Don't have a crypto wallet yet?
</Subtitle>
<Button color="primary" href={DOCS_URL}>
Learn More
</Button>
</Flex>
</Flex>
);
};
export default Login;
|
#!/bin/sh
set -o errexit
TAG_VERSION=$1
BUILD_DIRECTORY=$2
function usage()
{
echo "This script builds the dynamically and statically linked version"
echo "and generates the checksum files of the Athena tag provided."
echo
echo "USAGE: $0 <tag> <build-directory>"
echo
exit 1
}
function checkout_tag()
{
echo "cloning Github repository $GITHUB_REPO to $CLONE_DIR .."
git clone $GITHUB_REPO $CLONE_DIR
echo "checking out tag $TAG_VERSION .."
cd $CLONE_DIR
git checkout $TAG_VERSION
}
function build_dynamic_linked_version()
{
echo "starting dynamic build .."
cd $CLONE_DIR
mkdir -p build/release
cd build/release
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_FLAGS="-fassociative-math" -DCMAKE_CXX_FLAGS="-fassociative-math" -DDO_TESTS=OFF ../..
make
cd ../..
echo "dynamic build done .."
generate_tarball $DYNAMIC_RELEASE
}
function build_static_linked_version()
{
echo "starting static build .."
cd $CLONE_DIR
make clean
mkdir -p build/release
cd build/release
cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_FLAGS="-fassociative-math" -DCMAKE_CXX_FLAGS="-fassociative-math" -DSTATIC=true -DDO_TESTS=OFF ../..
make
cd ../..
echo "static build done .."
generate_tarball $STATIC_RELEASE
}
function generate_tarball()
{
RELEASE_NAME=$1
if [ ! -d "$TARGET_DIR" ];
then
mkdir -p "$TARGET_DIR"
fi
TARBALL="$TARGET_DIR/$RELEASE_NAME.tar.gz"
echo "generating tarball $TARBALL .."
tar --transform "s,^,$RELEASE_NAME/," -c -f $TARBALL -z -C "$CLONE_DIR/build/release/src" \
solominer \
wallet \
Athena \
services \
legacy-services
generate_checksums $TARBALL
}
function generate_checksums()
{
FILE_TO_CHECK=$1
echo "generating md5sum .."
md5sum $FILE_TO_CHECK > $FILE_TO_CHECK.md5
echo "generating sha512sum .."
sha512sum $FILE_TO_CHECK > $FILE_TO_CHECK.sha512
}
function cleanup()
{
if [ -d "$CLONE_DIR" ];
then
echo "removing git clone directory .."
echo "rm -rf $CLONE_DIR"
fi
}
if [ -z $TAG_VERSION ];
then
usage
fi
if [ -z $BUILD_DIRECTORY ];
then
echo "No build directory given, will build in $HOME/build .."
BUILD_DIRECTORY="$HOME/build"
fi
if [ ! -d "$BUILD_DIRECTORY" ];
then
echo "creating build directory $BUILD_DIRECTOR .."
mkdir -p "$BUILD_DIRECTORY"
fi
# -- Config
GITHUB_REPO="https://github.com/athena-network/athena.git"
CLONE_DIR="$BUILD_DIRECTORY/athena-buildall"
TARGET_DIR="$BUILD_DIRECTORY/athena-releases"
DYNAMIC_RELEASE="athena-${TAG_VERSION}-linux-CLI"
STATIC_RELEASE="athena-${TAG_VERSION}-linux-staticboost-CLI"
checkout_tag
build_static_linked_version
build_dynamic_linked_version
cleanup
|
import tensorflow as tf
print("Creating tensors...")
# These operations return a tensor.
t1 = tf.add(1,2)
t2 = tf.sub(1,2)
t3 = tf.mul(1,2)
t4 = tf.div(1,2)
# Create a session
sess = tf.Session()
result = sess.run(t1)
print(result)
result = sess.run(t2)
print(result)
result = sess.run(t3)
print(result)
result = sess.run(t4)
print(result)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 6 14:34:08 2020
@author: CodeAndQuarks
Desc: A simple random maze generator that randomly creates a maze of size
in the range of (3x3-20x20).
There are modules I could have used to randomise the range,
but I wanted to see if I could randomise it myself,
to generalise its use for future projects where
implementation is dependent on user input.
"""
#imports
import numpy as np
import random as rd
#define functions
def randomdraw(array):
"""Randomly select a integer"""
const=len(array)
evenprob=(1.0/(const))
probs=[evenprob]*(const)
draw=np.random.choice(array,1,probs)
number=int(draw)
return number
def createpath(xf, yf, maze, row, col):
"""Create a useable path through the maze"""
neighbourhood = [(xf - 1, yf), (xf + 1, yf), (xf, yf - 1), (xf, yf + 1)]
maze[yf][xf] = 1
#each time createpath is called, neighbour will shuffle
rd.shuffle(neighbourhood)
for (xt, yt) in neighbourhood:
if maze[yt][xt]:
continue
if xt == xf:
row[max(yf, yt)][xf] = "+ "
if yt == yf:
col[yf][max(xf, xt)] = " "
#keep iterating
createpath(xt, yt, maze, row, col)
def GenMaze(width, height):
"""Generate a random text-based maze"""
mymaze = ""
xf=rd.randrange(width)
yf=rd.randrange(height)
maze = [[0] * width + [1] for _ in range(height)] + [[1] * (width + 1)]
col = [["| "] * width + ['|'] for _ in range(height)] + [[]]
row = [["+--"] * width + ['+'] for _ in range(height + 1)]
#iterate
createpath(xf,yf,maze,row,col)
#convert to string
for (r, c) in zip(row, col):
mymaze += ''.join(r + ['\n'] + c + ['\n'])
#return maze
return mymaze
def createfile(Maze):
print('Opening file....')
file = open("RandomMaze.txt", "w")
print('Writing Maze to text-file....')
file.write(Maze)
file.close()
print('Maze sucessfully re-configured')
def main():
arr=[3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
#randomise the width and the height
width=randomdraw(arr)
height=randomdraw(arr)
Maze=GenMaze(width, height)
#examine maze
print(Maze)
#Create a new randommaze text file.
createfile(Maze)
#create maze
main()
|
#!/bin/bash
SUBMISSION_URL="http://vcloud.sosy-lab.org/submit.php"
function print_help_and_exit {
echo "Submit files to VerifierCloud."
echo "Parameters:"
echo " --help Print this help"
echo " --analysis Analysis to use"
echo " --file File for verification"
exit
}
# loop over all input parameters and parse them
declare -a OPTIONS
while [ $# -gt 0 ]; do
case $1 in
"--help")
PRINT_HELP=1
;;
"--analysis")
shift
ANALYSIS=$1
;;
"--file")
shift
FILE=$1
;;
*)
echo "Unknown Parameter: $1"
print_help_and_exit
;;
esac
shift
done
if [ -z "$ANALYSIS" -o -z "$FILE" ]; then
print_help_and_exit
fi
if [ "$PRINT_HELP" ]; then
print_help_and_exit
fi
curl -F "cfile=@$FILE" -F "analysis=$ANALYSIS" "$SUBMISSION_URL"
# vim:sts=2:sw=2:expandtab
|
#!/bin/sh
set -e
export REPOSITORY_NAME="panvala/frontend"
scripts/publish-image.sh
|
(function(){
'use strict';
/**
* Code module for services.
*
* @author <NAME>
* @author $Author: fzhang $
* @version $Revision: 643 $ $Date: 2015-03-31 12:38:41 -0600 (Tue, 31 Mar 2015) $
*/
angular.module('services', []);
}());
|
<gh_stars>0
package auth
import "golang.org/x/oauth2"
// NewDropboxProvider defines details needed use Dropbox for OAuth2
// authorization.
//
// https://www.dropbox.com/developers/reference/oauth-guide
func NewDropboxProvider() *AuthProvider {
return &AuthProvider{
ID: Dropbox,
Key: "dropbox",
Enabled: true,
Config: oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: "https://www.dropbox.com/oauth2/authorize",
TokenURL: "https://api.dropbox.com/oauth2/token",
},
},
}
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
const state = {
count: 0,
interval: 0,
};
chrome.action.onClicked.addListener(onClicked);
chrome.runtime.onInstalled.addListener(onInstalled);
function onInstalled() {
state.interval = Math.ceil(5 * Math.random());
console.log(`interval = ${state.interval}`);
}
function onClicked() {
console.log(`count = ${state.count}`);
state.count += state.interval;
}
|
// +build !pro,!ent
package nomad
type EnterpriseState struct{}
func (s *Server) setupEnterprise(config *Config) error {
return nil
}
func (s *Server) startEnterpriseBackground() {}
|
import { combineReducers } from 'redux'
import home from './HomeReducer'
const reducer = (handlers, state, action) =>
handlers[action.type] ? handlers[action.type](state, action) : state
export default combineReducers({
home: home(reducer)
})
|
package com.yan.demo.dao;
import com.yan.demo.bean.OnClass;
import com.yan.demo.bean.OnClassExample;
import java.util.List;
import org.apache.ibatis.annotations.Param;
public interface OnClassMapper {
long countByExample(OnClassExample example);
int deleteByExample(OnClassExample example);
int deleteByPrimaryKey(Integer id);
int insert(OnClass record);
int insertSelective(OnClass record);
List<OnClass> selectByExample(OnClassExample example);
OnClass selectByPrimaryKey(Integer id);
int updateByExampleSelective(@Param("record") OnClass record, @Param("example") OnClassExample example);
int updateByExample(@Param("record") OnClass record, @Param("example") OnClassExample example);
int updateByPrimaryKeySelective(OnClass record);
int updateByPrimaryKey(OnClass record);
} |
#!/bin/bash
MD="mdd"
CH="ch.md"
[ -e $MD ] && rm -rf $MD
mkdir $MD
# Collect files.
cp index.rst $MD/
cp -R img $MD/
for f in chapter*/*; do
dir=$(dirname "$f")
if [ "${f##*.}" = "md" ] || [ "${f##*.}" = "ipynb" ]; then
mkdir -p $MD/$dir
cp $f $MD/$f
fi
done
# ipynb to md.
for f in $MD/chapter*/*ipynb; do
base=$(basename $f)
jupyter nbconvert --to markdown $f --output "${base%%.*}.md"
rm $f
done
for f in $MD/chapter*/*md; do
dir=$(dirname "$f")
# Remove inner link.
sed -i 's/\[\([^]]*\)\]([^\)]*.md)/\1/g' $f
# Refer pdf instead of svg.
sed -i s/\\.svg/.pdf/g $f
# Refer img in the same level.
sed -i 's/\](..\/img/\](img/g' $f
if [ "$f" != "$dir/index.md" ]; then
sed -i s/#\ /##\ /g $f
fi
done
# Convert svg to pdf.
for f in $MD/img/*svg; do
rsvg-convert -f pdf -z 0.80 -o "${f%%.*}.pdf" $f
rm $f
done
# Concat sections in each chapter.
for f in $MD/chapter*/index.md; do
sections=$(python -c 'import mdd_utils; print(mdd_utils.get_sections())' $f)
dir=$(dirname "$f")
chapter=$dir/$CH
cat $f $sections > $chapter
perl -i -0777 -pe 's/```eval_rst[^`]+```//ge' $chapter
done
chapters=$(python -c 'import mdd_utils; print(mdd_utils.get_chapters())' $MD/index.rst)
i=1
for chapter in $chapters; do
# Move matplotlib plots outside.
mv $MD/$chapter/*_files $MD/
# Move ch.md to ../ch0x.md
mv $MD/$chapter/$CH $MD/ch$(printf %02d $i).md
rm -rf $MD/$chapter
i=$((i + 1))
done
# Convert matplotlib-generated svg to pdf.
for f in $MD/*_files/*svg; do
rsvg-convert -f pdf -z 0.80 -o "${f%%.*}.pdf" $f
rm $f
done
rm $MD/toc.rst
# zip files.
[ -e "$MD.zip" ] && rm "$MD.zip"
zip -r "$MD.zip" $MD
[ -e $MD ] && rm -rf $MD
|
<filename>mosby-utils/src/main/java/net/fangcunjian/mosby/utils/logger/Logger.java
package net.fangcunjian.mosby.utils.logger;
/**
* Logger is a wrapper of {@link android.util.Log}
* But more pretty, simple and powerful
*/
public final class Logger {
public static final String DEFAULT_TAG = "Logger";
private static boolean debug = false;
private static LoggerPrinter loggerPrinter;
public static void setDebug(boolean isDebug) {
debug = isDebug;
}
public static LoggerPrinter getDefaultLogger() {
if ( loggerPrinter == null) {
loggerPrinter = LoggerFactory.getFactory(DEFAULT_TAG, debug);
}
return loggerPrinter;
}
}
|
#!/bin/bash -e
# exit on error to make sure they get resolved
# get common functionality
[ -z "${lib_dir}" ] && . ../../common.sh
# nos_template stdout
nos_set_evar engine_template_dir '/tmp'
echo "{{big_deal}}" > /tmp/nos_template.mustache
out=$(nos_template 'nos_template.mustache' '-' '{ "big_deal": "mustache"}')
if [[ "${out}" != "mustache" ]]; then
echo "TEST ('nos_template stdout') FAILED!"
false
fi
# nos_template file
nos_template 'nos_template.mustache' '/tmp/nos_template_out' '{ "big_deal": "mustache"}'
if [[ "$(cat /tmp/nos_template_out)" != "mustache" ]]; then
echo "TEST ('nos_template file') FAILED!"
false
fi
# nos_template_file
nos_set_evar engine_file_dir '/tmp'
nos_template_file 'nos_template.mustache' '/tmp/mustache.nos_template'
if [[ ! -a /tmp/mustache.nos_template ]]; then
echo "TEST ('nos_template_file') FAILED!"
false
fi
echo "ALL RENDER TESTS PASSED!"
|
<filename>src/Bounds2D.ts
export default class Bounds2D{
left:number
top:number
right:number
bottom:number
constructor(left:number, top:number, right:number, bottom:number){
this.left = left
this.top = top
this.right = right
this.bottom = bottom
}
get x(){
return this.left
}
get y(){
return this.top
}
get width(){
return this.right - this.left
}
get height(){
return this.bottom - this.top
}
get centerX(){
return this.left + (this.right - this.left) / 2
}
get centerY(){
return this.top + (this.bottom - this.top) / 2
}
get heightToWidthRatio(){
return this.height / this.width
}
get widthToHeightRatio(){
return this.width / this.height
}
get longerSide(){
return this.width > this.height ? this.width : this.height
}
get shorterSide(){
return this.width > this.height ? this.height : this.width
}
combine(other:Bounds2D){
if(other.left < this.left){
this.left = other.left
}
if(other.right > this.right){
this.right = other.right
}
if(other.top < this.top){
this.top = other.top
}
if(other.bottom > this.bottom){
this.bottom = other.bottom
}
}
} |
// RBException.h
// #define _SCL_SECURE_NO_WARNINGS
#ifndef SURFACE_EXCEPTION_H
#define SURFACE_EXCEPTION_H
#pragma once
#include <wx/string.h>
#include <wx/msw/winundef.h>
class RBException
{
public:
RBException(const wxString & msg)
: wxstrMsg( msg)
{ }
const wxChar *what() const
{
return wxstrMsg.c_str();
}
private:
wxString wxstrMsg;
};
#endif
|
<filename>pb5/balloons.py
#!/usr/bin/env python3
import sys
import os
import logging
import argparse
import itertools
from wordpal import puzzicon
_log = logging.getLogger(__name__)
_BLANK = '?'
class WordSearcher(object):
def __init__(self, puzzeme_set):
self.puzzerarian = puzzicon.Puzzarian(puzzeme_set)
def find(self, balloons, num_balloons):
assert isinstance(num_balloons, int) and num_balloons > 0, "target length must be a positive integer"
balloons = [b.upper() for b in balloons]
combos = itertools.combinations(balloons, num_balloons - 1)
for combo in combos:
_log.debug("examining balloon combo %s", combo)
combo = list(combo) + [_BLANK]
if len(combo) == num_balloons:
for perm in itertools.permutations(combo, len(combo)):
pattern = ''.join(perm)
_log.debug("searching dictionary for pattern %s", pattern)
filters = [puzzicon.Filters.canonical_wildcard(pattern)]
for match in self.puzzerarian.search(filters):
yield match.canonical
|
package com.example.google3;
import androidx.annotation.NonNull;
import androidx.appcompat.app.AppCompatActivity;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.TextView;
import com.google.firebase.auth.FirebaseAuth;
import com.google.firebase.database.DataSnapshot;
import com.google.firebase.database.DatabaseError;
import com.google.firebase.database.DatabaseReference;
import com.google.firebase.database.FirebaseDatabase;
import com.google.firebase.database.ValueEventListener;
public class Main2Activity extends AppCompatActivity {
private Button mButtonSignOut;
private TextView mTextviewName;
private TextView mTextviewEmail;
private FirebaseAuth mAuth;
private DatabaseReference mDatabase;
public Button avanzar;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main2);
avanzar = (Button) findViewById(R.id.control2);
avanzar.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent p = new Intent(getApplicationContext(), menuapp.class);
startActivity(p);
}
});
mAuth = FirebaseAuth.getInstance();
mDatabase = FirebaseDatabase.getInstance().getReference();
mButtonSignOut=(Button) findViewById(R.id.btnSignout);
mTextviewEmail = (TextView) findViewById(R.id.textViewEmail);
mTextviewName = (TextView) findViewById(R.id.textViewName);
mButtonSignOut.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
mAuth.signOut();
startActivity(new Intent(Main2Activity.this,LoginActivity.class));
finish();
}
});
getUserInfo();
}
private void getUserInfo(){
String id= mAuth.getCurrentUser().getUid();
mDatabase.child("Users").child(id).addValueEventListener(new ValueEventListener() {
@Override
public void onDataChange(@NonNull DataSnapshot dataSnapshot) {
if(dataSnapshot.exists()){
String name = dataSnapshot.child("name").getValue().toString();
String email = dataSnapshot.child("email").getValue().toString();
mTextviewName.setText(name);
mTextviewEmail.setText(email);
}
}
@Override
public void onCancelled(@NonNull DatabaseError databaseError) {
}
});
}
}
|
<gh_stars>100-1000
/*
* This source file is part of libRocket, the HTML/CSS Interface Middleware
*
* For the latest information, see http://www.librocket.com
*
* Copyright (c) 2008-2010 CodePoint Ltd, Shift Technology Ltd
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
#ifndef ROCKETCORERENDERINTERFACE_H
#define ROCKETCORERENDERINTERFACE_H
#include "ReferenceCountable.h"
#include "Header.h"
#include "Texture.h"
#include "Vertex.h"
namespace Rocket {
namespace Core {
class Context;
/**
The abstract base class for application-specific rendering implementation. Your application must provide a concrete
implementation of this class and install it through Core::SetRenderInterface() in order for anything to be rendered.
@author <NAME>
*/
class ROCKETCORE_API RenderInterface : public ReferenceCountable
{
public:
RenderInterface();
virtual ~RenderInterface();
/// Called by Rocket when it wants to render geometry that the application does not wish to optimise. Note that
/// Rocket renders everything as triangles.
/// @param[in] vertices The geometry's vertex data.
/// @param[in] num_vertices The number of vertices passed to the function.
/// @param[in] indices The geometry's index data.
/// @param[in] num_indices The number of indices passed to the function. This will always be a multiple of three.
/// @param[in] texture The texture to be applied to the geometry. This may be NULL, in which case the geometry is untextured.
/// @param[in] translation The translation to apply to the geometry.
virtual void RenderGeometry(Vertex* vertices, int num_vertices, int* indices, int num_indices, TextureHandle texture, const Vector2f& translation) = 0;
/// Called by Rocket when it wants to compile geometry it believes will be static for the forseeable future.
/// If supported, this should be return a pointer to an optimised, application-specific version of the data. If
/// not, do not override the function or return NULL; the simpler RenderGeometry() will be called instead.
/// @param[in] vertices The geometry's vertex data.
/// @param[in] num_vertices The number of vertices passed to the function.
/// @param[in] indices The geometry's index data.
/// @param[in] num_indices The number of indices passed to the function. This will always be a multiple of three.
/// @param[in] texture The texture to be applied to the geometry. This may be NULL, in which case the geometry is untextured.
/// @return The application-specific compiled geometry. Compiled geometry will be stored and rendered using RenderCompiledGeometry() in future calls, and released with ReleaseCompiledGeometry() when it is no longer needed.
virtual CompiledGeometryHandle CompileGeometry(Vertex* vertices, int num_vertices, int* indices, int num_indices, TextureHandle texture);
/// Called by Rocket when it wants to render application-compiled geometry.
/// @param[in] geometry The application-specific compiled geometry to render.
/// @param[in] translation The translation to apply to the geometry.
virtual void RenderCompiledGeometry(CompiledGeometryHandle geometry, const Vector2f& translation);
/// Called by Rocket when it wants to release application-compiled geometry.
/// @param[in] geometry The application-specific compiled geometry to release.
virtual void ReleaseCompiledGeometry(CompiledGeometryHandle geometry);
/// Called by Rocket when it wants to enable or disable scissoring to clip content.
/// @param[in] enable True if scissoring is to enabled, false if it is to be disabled.
virtual void EnableScissorRegion(bool enable) = 0;
/// Called by Rocket when it wants to change the scissor region.
/// @param[in] x The left-most pixel to be rendered. All pixels to the left of this should be clipped.
/// @param[in] y The top-most pixel to be rendered. All pixels to the top of this should be clipped.
/// @param[in] width The width of the scissored region. All pixels to the right of (x + width) should be clipped.
/// @param[in] height The height of the scissored region. All pixels to below (y + height) should be clipped.
virtual void SetScissorRegion(int x, int y, int width, int height) = 0;
/// Called by Rocket when a texture is required by the library.
/// @param[out] texture_handle The handle to write the texture handle for the loaded texture to.
/// @param[out] texture_dimensions The variable to write the dimensions of the loaded texture.
/// @param[in] source The application-defined image source, joined with the path of the referencing document.
/// @return True if the load attempt succeeded and the handle and dimensions are valid, false if not.
virtual bool LoadTexture(TextureHandle& texture_handle, Vector2i& texture_dimensions, const String& source);
/// Called by Rocket when a texture is required to be built from an internally-generated sequence of pixels.
/// @param[out] texture_handle The handle to write the texture handle for the generated texture to.
/// @param[in] source The raw 8-bit texture data. Each pixel is made up of four 8-bit values, indicating red, green, blue and alpha in that order.
/// @param[in] source_dimensions The dimensions, in pixels, of the source data.
/// @return True if the texture generation succeeded and the handle is valid, false if not.
virtual bool GenerateTexture(TextureHandle& texture_handle, const byte* source, const Vector2i& source_dimensions);
/// Called by Rocket when a loaded texture is no longer required.
/// @param texture The texture handle to release.
virtual void ReleaseTexture(TextureHandle texture);
/// Returns the native horizontal texel offset for the renderer.
/// @return The renderer's horizontal texel offset. The default implementation returns 0.
virtual float GetHorizontalTexelOffset();
/// Returns the native vertical texel offset for the renderer.
/// @return The renderer's vertical texel offset. The default implementation returns 0.
virtual float GetVerticalTexelOffset();
/// Returns the number of pixels per inch.
/// @returns The number of pixels per inch. The default implementation returns 100.
virtual float GetPixelsPerInch();
/// Called when this render interface is released.
virtual void Release();
/// Get the context currently being rendered. This is only valid during RenderGeometry,
/// CompileGeometry, RenderCompiledGeometry, EnableScissorRegion and SetScissorRegion.
Context* GetContext() const;
protected:
virtual void OnReferenceDeactivate();
private:
Context* context;
friend class Context;
};
}
}
#endif
|
<filename>src/commands/Settings/Welcome & Leave/setleave.js<gh_stars>0
const { Command } = require('klasa');
const { MessageEmbed } = require('discord.js');
module.exports = class extends Command {
constructor(...args) {
super(...args, {
enabled: true,
runIn: ['text'],
aliases: [],
cooldown: 10,
permissionLevel: 6,
description: 'Set the leave message and channel to give a farewell when user leaves',
extendedHelp: 'No extended help available.',
usage: '<msg|channel|disable> [channel:channelname] [message:str] [...]',
usageDelim: ' ',
subcommands: true,
});
}
async channel(msg, [channel]) {
if(!channel) return msg.send(this.generateFailed(`**${msg.author}, Please provide a valid channel**`));
await msg.guild.settings.update('greet.leave.channel', channel, msg.guild).then(() => {
return msg.send(this.generateSuccess(`**${msg.author}, Leave channel is now set to ${channel}**`));
});
}
async msg(msg, [...message]) {
if(!message) return msg.send(this.generateFailed(`**${msg.author}, Please provide a greet message**`));
await msg.guild.settings.update('greet.leave.enabled', true, msg.guild);
await msg.guild.settings.update('greet.leave.message', message.join(' '), msg.guild).then(() => {
return msg.send(this.generateSuccess(`**${msg.author}, Leave Message is now set to \n\n${message.join(' ')}**`));
});
}
async disable(msg) {
if(!msg.guild.settings.greet.leave.enabled) return msg.send(this.generateFailed('**Leave Message is already disabled in this server**'));
await msg.guild.settings.update('greet.leave.enabled', false, msg.guild).then(() => {
return msg.send(this.generateSuccess(`**${msg.author}, Leave Message is now disabled in this server**`));
});
}
generateSuccess(message) {
const embed = new MessageEmbed()
.setColor('#f48f42')
.setDescription(message);
return embed;
}
generateFailed(message) {
const embed = new MessageEmbed()
.setColor('RED')
.setDescription(message);
return embed;
}
}; |
<filename>src/main/java/net/jamsimulator/jams/mips/assembler/InstructionSnapshot.java
/*
* MIT License
*
* Copyright (c) 2021 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package net.jamsimulator.jams.mips.assembler;
import net.jamsimulator.jams.mips.assembler.exception.AssemblerException;
import net.jamsimulator.jams.mips.instruction.Instruction;
import net.jamsimulator.jams.mips.instruction.assembled.AssembledInstruction;
import net.jamsimulator.jams.mips.instruction.pseudo.PseudoInstruction;
import net.jamsimulator.jams.mips.label.LabelReference;
import net.jamsimulator.jams.mips.parameter.ParameterType;
import net.jamsimulator.jams.mips.parameter.parse.ParameterParseResult;
import net.jamsimulator.jams.mips.register.Registers;
import net.jamsimulator.jams.utils.InstructionUtils;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicReference;
public class InstructionSnapshot {
private final int line, address;
private final String raw, original;
private final String labelSufix;
private List<String> parameters;
private Instruction instruction;
public InstructionSnapshot(int line, int address, String raw, String original, String labelSufix) {
this.address = address;
this.line = line;
this.raw = raw;
this.original = original;
this.labelSufix = labelSufix;
}
public int scan(MIPS32Assembler assembler) {
assembler.addOriginalInstruction(line, address, original);
return decode(assembler);
}
public void assemble(MIPS32AssemblingFile file) {
ParameterParseResult[] parameters = assembleParameters(file);
try {
Assembler assembler = file.getAssembler();
AssembledInstruction[] assembledInstructions =
instruction.assemble(assembler.getInstructionSet(), address, parameters);
//Add instructions to memory
int relativeAddress = address;
for (AssembledInstruction assembledInstruction : assembledInstructions) {
assembler.getMemory().setWord(relativeAddress, assembledInstruction.getCode(), false, true, true);
relativeAddress += 4;
}
} catch (AssemblerException ex) {
throw new AssemblerException(line, "Error while assembling instruction.", ex);
}
}
private int decode(MIPS32Assembler assembler) {
int mnemonicIndex = raw.indexOf(' ');
int tabIndex = raw.indexOf("\t");
if (mnemonicIndex == -1) mnemonicIndex = tabIndex;
else if (tabIndex != -1) mnemonicIndex = Math.min(mnemonicIndex, tabIndex);
String mnemonic;
String parameters;
if (mnemonicIndex == -1) {
mnemonic = raw;
parameters = "";
} else {
mnemonic = raw.substring(0, mnemonicIndex);
parameters = this.raw.substring(mnemonicIndex + 1).trim();
}
var instructions = assembler.getInstructionSet().getInstructionByMnemonic(mnemonic);
return scanInstruction(assembler.getRegisters(), instructions, parameters, mnemonic);
}
private int scanInstruction(Registers registers, Set<Instruction> instructions, String rawParameters, String mnemonic) {
parameters = new LinkedList<>();
var parametersReference = new AtomicReference<List<String>>();
instruction = InstructionUtils.getBestInstruction(instructions, parametersReference, registers, rawParameters).orElse(null);
parameters = parametersReference.get();
if (instruction == null) {
throw new AssemblerException(line, "Instruction " + mnemonic + " with the given parameters not found.\n" + rawParameters);
}
return instruction instanceof PseudoInstruction
? ((PseudoInstruction) instruction).getInstructionAmount(parameters) << 2
: 4;
}
private ParameterParseResult[] assembleParameters(MIPS32AssemblingFile file) {
ParameterParseResult[] assembledParameters = new ParameterParseResult[parameters.size()];
int index = 0;
ParameterParseResult result;
for (ParameterType parameter : instruction.getParameters()) {
result = parameter.parse(parameters.get(index), file.getAssembler().getRegisters());
//Parse label
if (result.isHasLabel()) {
var optional = file.getLabel(result.getLabel());
if (optional.isEmpty()) {
if (!labelSufix.isEmpty()) {
optional = file.getLabel(result.getLabel() + labelSufix);
if (optional.isEmpty()) {
throw new AssemblerException(line, "Label " + result.getLabel() + labelSufix + " not found.");
}
} else {
throw new AssemblerException(line, "Label " + result.getLabel() + " not found.");
}
}
optional.get().addReference(new LabelReference(address, file.getName(), line));
result.setLabelValue(optional.get().getAddress());
}
assembledParameters[index++] = result;
}
return assembledParameters;
}
}
|
<gh_stars>1-10
var _;
_ = toString.length;
_ = toString.name;
toString();
|
/*
Command-lne app to convert Rimu source to HTML.
*/
package main
import (
"embed"
"fmt"
"io/ioutil"
"os"
"os/user"
"path"
"path/filepath"
"strconv"
"strings"
"github.com/srackham/go-rimu/v11/internal/utils/stringlist"
"github.com/srackham/go-rimu/v11/rimu"
)
const VERSION = "11.3.0"
const STDIN = "-"
// rimurcPath returns path of $HOME/.rimurc file.
// Return "" if $HOME not found.
func rimurcPath() (result string) {
if user, err := user.Current(); err == nil {
result = filepath.Join(user.HomeDir, ".rimurc")
}
return
}
// Helpers.
func die(message string) {
if message != "" {
fmt.Fprintln(os.Stderr, message)
}
os.Exit(1)
}
func fileExists(name string) bool {
_, err := os.Stat(name)
return err == nil
}
//go:embed resources/**
var embeddedFS embed.FS
func readResourceFile(name string) (result string) {
data, err := embeddedFS.ReadFile("resources/" + name)
if err != nil {
panic("embedded file: " + err.Error())
}
result = string(data)
return
}
func importLayoutFile(name string) string {
// External layouts not supported in go-rimu.
die("missing --layout: " + name)
return ""
}
func main() {
args := stringlist.StringList(os.Args)
args.Shift() // Skip program name.
nextArg := func(err string) string {
if len(args) == 0 {
die(err)
}
return args.Shift()
}
var safeMode interface{}
var htmlReplacement interface{}
layout := ""
noRimurc := false
var prependFiles stringlist.StringList
pass := false
// Parse command-line options.
prepend := ""
outfile := ""
outer:
for len(args) > 0 {
arg := args.Shift()
switch arg {
case "--help", "-h":
fmt.Printf("\n" + readResourceFile("manpage.txt") + "\n")
os.Exit(0)
case "--version":
fmt.Printf(VERSION + "\n")
os.Exit(0)
case "--lint", "-l": // Deprecated in Rimu 10.0.0
break
case "--output", "-o":
outfile = nextArg("missing --output file name")
case "--pass":
pass = true
case "--prepend", "-p":
prepend += nextArg("missing --prepend value") + "\n"
case "--prepend-file":
prependFiles.Push(nextArg("missing --prepend-file file name"))
case "--no-rimurc":
noRimurc = true
case "--safe-mode",
"--safeMode": // Deprecated in Rimu 7.1.0.
s := nextArg("missing --safe-mode value")
n, err := strconv.ParseInt(s, 10, strconv.IntSize)
if err != nil {
die("illegal --safe-mode option value: " + s)
}
safeMode = int(n)
case "--html-replacement",
"--htmlReplacement": // Deprecated in Rimu 7.1.0.
htmlReplacement = nextArg("missing --html-replacement value")
// Styling macro definitions shortcut options.
case "--highlightjs",
"--mathjax",
"--section-numbers",
"--theme",
"--title",
"--lang",
"--toc", // Deprecated in Rimu 8.0.0
"--no-toc",
"--sidebar-toc", // Deprecated in Rimu 10.0.0
"--dropdown-toc", // Deprecated in Rimu 10.0.0
"--custom-toc",
"--header-ids",
"--header-links":
macroValue := ""
if strings.Contains("--lang|--title|--theme", arg) {
macroValue = nextArg("missing " + arg + " value")
} else {
macroValue = "true"
}
prepend += "{" + arg + "}='" + macroValue + "'\n"
case "--layout",
"--styled-name": // Deprecated in Rimu 10.0.0
layout = nextArg("missing --layout value")
prepend += "{--header-ids}='true'\n"
case "--styled", "-s":
prepend += "{--header-ids}='true'\n"
prepend += "{--no-toc}='true'\n"
layout = "sequel"
default:
args.Unshift(arg) // argv contains source file names.
break outer
}
}
// args contains the list of source files.
files := args
if len(files) == 0 {
files.Push(STDIN)
} else if len(files) == 1 && layout != "" && files[0] != "-" && outfile != "" {
// Use the source file name with .html extension for the output file.
ext := path.Ext(files[0])
outfile = files[0][:len(files[0])-len(ext)] + ".html"
}
const RESOURCE_TAG = "resource:" // Tag for resource files.
const PREPEND = "--prepend options" // Tag for --prepend source.
if layout != "" {
// Envelope source files with header and footer.
files.Unshift(RESOURCE_TAG + layout + "-header.rmu")
files.Push(RESOURCE_TAG + layout + "-footer.rmu")
}
// Prepend $HOME/.rimurc file if it exists.
if !noRimurc && fileExists(rimurcPath()) {
prependFiles.Unshift(rimurcPath())
}
if prepend != "" {
prependFiles.Push(PREPEND)
}
files = append(prependFiles, files...)
// Convert Rimu source files to HTML.
output := ""
errors := 0
var opts rimu.RenderOptions
if htmlReplacement != nil {
opts.HtmlReplacement = htmlReplacement
}
for _, infile := range files {
var source string
switch {
case strings.HasPrefix(infile, RESOURCE_TAG):
infile = infile[len(RESOURCE_TAG):]
if (stringlist.StringList{"classic", "flex", "plain", "sequel", "v8"}).IndexOf(layout) >= 0 {
source = readResourceFile(infile)
} else {
source = importLayoutFile(infile)
}
opts.SafeMode = 0 // Resources are trusted.
case infile == STDIN:
bytes, _ := ioutil.ReadAll(os.Stdin)
source = string(bytes)
opts.SafeMode = safeMode
case infile == PREPEND:
source = prepend
opts.SafeMode = 0 // --prepend options are trusted.
default:
if !fileExists(infile) {
die("source file does not exist: " + infile)
}
bytes, err := ioutil.ReadFile(infile)
if err != nil {
die(err.Error())
}
source = string(bytes)
// Prepended and ~/.rimurc files are trusted.
if prependFiles.IndexOf(infile) > -1 {
opts.SafeMode = 0
} else {
opts.SafeMode = safeMode
}
}
// Skip .html and pass-through inputs.
if !(strings.HasSuffix(infile, ".html") || (pass && infile == STDIN)) {
opts.Callback = func(message rimu.CallbackMessage) {
f := infile
if infile == STDIN {
f = "/dev/stdin"
}
msg := message.Kind + ": " + f + ": " + message.Text
if len(msg) > 120 {
msg = msg[:117] + "..."
}
fmt.Fprintln(os.Stderr, msg)
if message.Kind == "error" {
errors++
}
}
source = rimu.Render(source, opts)
}
source = strings.TrimSpace(source)
if source != "" {
output += source + "\n"
}
}
output = strings.TrimSpace(output)
if outfile == "" || outfile == "-" {
fmt.Print(output)
} else {
err := ioutil.WriteFile(outfile, []byte(output), 0644)
if err != nil {
die(err.Error())
}
}
if errors > 0 {
os.Exit(1)
}
os.Exit(0)
}
|
<reponame>pradeep-gr/mbed-os5-onsemi
/*
* Copyright (c) 2013-2016 Realtek Semiconductor Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MBED_RTL8195A_CLK_H
#define MBED_RTL8195A_CLK_H
#define PLATFORM_CLK (200000000UL/6*5) // 166MHz
//#define SYSTEM_CLK PLATFORM_CLK
//#define TIMER_CLK (32*1000)
#define __BUILD_CCTRL_MACRO(name,ctrl) \
static inline void \
__##name##_ACTCK_Enable(void) \
{ \
__RTK_PERI_SETBIT(ctrl, BIT_ACTCK_##name); \
} \
static inline void \
__##name##_SLPCK_Enable(void) \
{ \
__RTK_PERI_SETBIT(ctrl, BIT_SLPCK_##name); \
} \
static inline void \
__##name##_ACTCK_Disable(void) \
{ \
__RTK_PERI_CLRBIT(ctrl, BIT_ACTCK_##name); \
} \
static inline void \
__##name##_SLPCK_Disable(void) \
{ \
__RTK_PERI_CLRBIT(ctrl, BIT_SLPCK_##name); \
} \
//enum clk_idx {
// CLK_ANACK = 0,
// CLK_A33CK = 1,
//};
// Interface to ROM functions
extern __longcall uint32_t HalGetCpuClk(void);
#define __CLK_GetCPUClk HalGetCpuClk
// Interface for HAL functions
static inline uint32_t CLK_GetCPUClk(void)
{
return __CLK_GetCPUClk();
}
extern void CLK_BackupCPUClk(void);
extern void CLK_ReFillCPUClk(void);
extern uint32_t CLK_Calculate(uint8_t clksel);
#endif
|
import subprocess
def execute_command(command: str) -> str:
if command == "exit":
sublime.run_command('exit')
return "Command executed successfully"
else:
try:
output = subprocess.check_output(command, shell=True, text=True)
return output
except subprocess.CalledProcessError as e:
return f"Error executing command: {e}" |
<gh_stars>0
package ee.ituk.api.user.dto;
import lombok.Getter;
@Getter
public class NewPasswordDto {
private String code;
private String password;
}
|
class UsersController < ApplicationController
def new
@user = User.new
end
def create
@user = User.new(user_params)
if @user.save
redirect_to @user
else
render :new
end
end
private
def user_params
params.require(:user).permit(:username, :email, :password)
end
end |
#!/bin/bash
exec 3<> /dev/null
function red {
printf "\e[91m$1\e[0m\n"
}
function green {
printf "\e[32m$1\e[0m\n"
}
set -e
set -x
ORIGIN="origin"
LOCALBRANCH="master"
REMOTEBRANCH="master"
trim() {
local var="$*"
# remove leading whitespace characters
var="${var#"${var%%[![:space:]]*}"}"
# remove trailing whitespace characters
var="${var%"${var##*[![:space:]]}"}"
echo -n "$var"
}
if [ "$1" = "--dev" ]; then
if [ ! -f package_dev.json ]; then red "package_dev.json does not exist - stop"; exit 1; fi
if [ ! -f package.json ]; then red "package.json does not exist - stop"; exit 1; fi
if [ -f package_prod.json ]; then red "package_prod.json does exist - stop"; exit 1; fi
mv package.json package_prod.json
if [ ! -f package_prod.json ]; then red "package_prod.json does not exist - stop"; exit 1; fi
mv package_dev.json package.json
if [ -f package_dev.json ]; then red "package_dev.json does exist - stop"; exit 1; fi
if [ ! -f package.json ]; then red "package.json does not exist - stop 2"; exit 1; fi
{ green "package.json -> package_prod.json and package_dev.json -> package.json [done]"; } 2>&3
exit 0
fi
if [ "$1" = "--prod" ]; then
if [ ! -f package_prod.json ]; then red "package_prod.json does not exist - stop"; exit 1; fi
if [ ! -f package.json ]; then red "package.json does not exist - stop"; exit 1; fi
if [ -f package_dev.json ]; then red "package_dev.json does exist - stop"; exit 1; fi
mv package.json package_dev.json
if [ ! -f package_dev.json ]; then red "package_dev.json does not exist - stop"; exit 1; fi
mv package_prod.json package.json
if [ -f package_prod.json ]; then red "package_prod.json does exist - stop"; exit 1; fi
if [ ! -f package.json ]; then red "package.json does not exist - stop 2"; exit 1; fi
{ green "package.json -> package_dev.json and package_prod.json -> package.json [done]"; } 2>&3
exit 0
fi
if [ -f package_prod.json ]; then
{ red "package_prod.json exist, before update run\n /bin/bash update.sh --prod"; } 2>&3
exit 1;
fi
make t
if [ "$(git rev-parse --abbrev-ref HEAD)" != $LOCALBRANCH ]; then
{ red "switch first branch to <$LOCALBRANCH>"; } 2>&3
exit 1;
fi
{ green "\ncurrent branch: $LOCALBRANCH"; } 2>&3
DIFF="$(git diff --numstat)"
DIFF="$(trim "$DIFF")"
if [ "$DIFF" != "" ]; then
{ red "\n\n Error: First commit changes ...\n\n"; } 2>&3
exit 2;
fi
DIFF="$(git diff --numstat $LOCALBRANCH $ORIGIN/$REMOTEBRANCH)"
DIFF="$(trim "$DIFF")"
if [ "$DIFF" != "" ] || [ "$1" = "force" ]; then
git push $ORIGIN $REMOTEBRANCH --tags
if [ "$?" != "0" ]; then
{ red "\n\nCan't git push - stop bumping version\n"; } 2>&3
exit 3;
fi
npm version patch
# make umd
# cat comment.txt dist/spvalidation.js > dist/test.js
# mv dist/test.js dist/spvalidation.js
# cat comment.txt dist/spvalidation.min.js > dist/test.js
# mv dist/test.js dist/spvalidation.min.js
node update-badge.js
git add README.md
# git add dist
# git add examples.es5.js
git commit --amend --no-edit
git push $ORIGIN $REMOTEBRANCH
if [ "$?" = "0" ]; then
npm publish
if [ "$?" != "0" ]; then
{ red "\n\nCan't npm publish\n try to run 'npm login'\n"; } 2>&3
exit 4;
fi
git push --tags --force
make h
#git push origin master --tags
else
{ red "\n\nCan't git push\n"; } 2>&3
exit 5
fi
else
{ red "\n\n Nothing new to publish, \n run 'make uf' if you're sure that there is still something that should be published\n\n"; } 2>&3
fi
|
const db = require('../data')
const getExistingHold = async (holdCategoryId, frn, transaction) => {
return db.hold.findOne({
transaction,
lock: true,
skipLocked: true,
where: { holdCategoryId, frn, closed: null }
})
}
module.exports = getExistingHold
|
def update_metadata(metadata: dict, operation_type: str) -> dict:
if operation_type == 'DATABASE_BACKUP':
metadata['name'] = metadata['name'].split('/')[-1] + ':label=BACKUP'
metadata['database'] = metadata['database'].split('/')[-1] + ':label=SOURCE_DATABASE'
elif operation_type == 'DATABASE_RESTORE':
metadata['DONE'] = metadata['@type'].split('.')[-1] + ':label=DONE'
metadata['START_TIME'] = metadata['progress']['startTime'] + ':label=START_TIME'
metadata['END_TIME'] = metadata['progress']['endTime'] + ':label=END_TIME'
return metadata |
def longest_palindrome(str):
# Base case
if len(str) == 0:
return
longest = ""
for i in range(len(str)):
current_palindrome = get_palindrome(str, i, i)
if len(current_palindrome) > len(longest):
longest = current_palindrome
current_palindrome = get_palindrome(str, i, i+1)
if len(current_palindrome) > len(longest):
longest = current_palindrome
return longest
def get_palindrome(str, left, right):
while left >= 0 and right < len(str) and (str[left] == str[right]):
left -= 1
right += 1
return str[left + 1: right]
string = "abaabaxfcyybbbfcbaa"
print(longest_palindrome(string))
# Output: bbbb |
<filename>__tests__/generate-vue-components.spec.ts<gh_stars>0
import { createComponentDefinition } from '../src/generate-vue-component';
describe('createComponentDefinition', () => {
it('should create a Vue component with the render method using createCommonRender', () => {
const generateComponentDefinition = createComponentDefinition('Components', []);
const output = generateComponentDefinition({
properties: [],
tagName: 'my-component',
methods: [],
events: [],
});
expect(output).toEqual(`
export const MyComponent = /*@__PURE__*/ defineContainer<Components.MyComponent>('my-component');
`
});
it('should create v-model bindings', () => {
const generateComponentDefinition = createComponentDefinition('Components', [{
elements: ['my-component'],
event: 'ionChange',
targetAttr: 'value'
}]);
const output = generateComponentDefinition({
properties: [
{
name: 'value',
internal: false,
mutable: false,
optional: false,
required: false,
type: 'string',
complexType: {
original: '',
resolved: '',
references: {},
},
docs: {
text: '',
tags: [],
},
},
],
tagName: 'my-component',
methods: [],
events: [
{
internal: false,
name: 'ionChange',
method: '',
bubbles: true,
cancelable: true,
composed: false,
docs: {
text: '',
tags: [],
},
complexType: {
original: '',
resolved: '',
references: {},
},
},
],
});
expect(output).toEqual(`
export const MyComponent = /*@__PURE__*/ defineContainer<Components.MyComponent>('my-component', [
'value',
'ionChange'
],
{
"modelProp": "value",
"modelUpdateEvent": "ionChange"
});
`);
});
it('should add router bindings', () => {
const generateComponentDefinition = createComponentDefinition('Components', [], ['my-component']);
const output = generateComponentDefinition({
tagName: 'my-component',
properties: [
{
name: 'value',
internal: false,
mutable: false,
optional: false,
required: false,
type: 'string',
complexType: {
original: '',
resolved: '',
references: {},
},
docs: {
text: '',
tags: [],
},
},
],
});
expect(output).toEqual(`
export const MyComponent = /*@__PURE__*/ defineContainer<Components.MyComponent>('my-component', [
'value'
],
{
"routerLinkComponent": true
});
`);
});
it('should add router and v-model bindings', () => {
const generateComponentDefinition = createComponentDefinition('Components', [{
elements: ['my-component'],
event: 'ionChange',
targetAttr: 'value'
}], ['my-component']);
const output = generateComponentDefinition({
tagName: 'my-component',
properties: [
{
name: 'value',
internal: false,
mutable: false,
optional: false,
required: false,
type: 'string',
complexType: {
original: '',
resolved: '',
references: {},
},
docs: {
text: '',
tags: [],
},
},
],
events: [
{
internal: false,
name: 'ionChange',
method: '',
bubbles: true,
cancelable: true,
composed: false,
docs: {
text: '',
tags: [],
},
complexType: {
original: '',
resolved: '',
references: {},
},
},
],
});
expect(output).toEqual(`
export const MyComponent = /*@__PURE__*/ defineContainer<Components.MyComponent>('my-component', [
'value',
'ionChange'
],
{
"modelProp": "value",
"modelUpdateEvent": "ionChange",
"routerLinkComponent": true
});
`);
});
it('should pass event references to the createCommonRender function', () => {
const generateComponentDefinition = createComponentDefinition('Components');
const output = generateComponentDefinition({
properties: [],
tagName: 'my-component',
methods: [],
events: [
{
internal: false,
name: 'my-event',
method: '',
bubbles: true,
cancelable: true,
composed: false,
docs: {
text: '',
tags: [],
},
complexType: {
original: '',
resolved: '',
references: {},
},
},
],
});
expect(output).toEqual(`
export const MyComponent = /*@__PURE__*/ defineContainer<Components.MyComponent>('my-component', [
'my-event'
]);
`);
});
it('should add a prop with Reference to the original component library prop type', () => {
const generateComponentDefinition = createComponentDefinition('Components');
const output = generateComponentDefinition({
properties: [
{
name: 'myProp',
internal: false,
mutable: false,
optional: false,
required: false,
type: 'string',
complexType: {
original: '',
resolved: '',
references: {},
},
docs: {
text: '',
tags: [],
},
},
],
tagName: 'my-component',
methods: [],
events: [],
});
expect(output).toEqual(`
export const MyComponent = /*@__PURE__*/ defineContainer<Components.MyComponent>('my-component', [
'myProp'
]);
`);
});
});
|
<reponame>nightskylark/DevExtreme<filename>testing/helpers/frameworkMocks.js
"use strict";
(function(root, factory) {
/* global jQuery */
if(typeof define === 'function' && define.amd) {
define(function(require, exports, module) {
module.exports = factory(
require("jquery"),
require("core/class"),
require("framework/browser_adapters").DefaultBrowserAdapter,
require("framework/navigation_devices"),
require("framework/navigation_manager"),
require("framework/application").Application
);
});
} else {
DevExpress.mocks = { framework: {} };
jQuery.extend(DevExpress.mocks.framework, factory(
jQuery,
DevExpress.Class,
DevExpress.framework.DefaultBrowserAdapter,
DevExpress.framework.NavigationDevices,
DevExpress.framework.NavigationManager,
DevExpress.framework.Application
));
}
}(window, function($, Class, DefaultBrowserAdapter, navigationDevicesModule, navigationManagerModule, Application) {
var exports = {};
exports.MockStateSource = Class.inherit({
ctor: function() {
this.__saveStateLog = [];
this.__restoreStateLog = [];
this.__removeStateLog = [];
},
saveState: function(storage) {
this.__saveStateLog.push(storage);
},
restoreState: function(storage) {
this.__restoreStateLog.push(storage);
},
removeState: function(storage) {
this.__removeStateLog.push(storage);
}
});
var createMethodsHistory = function() {
var result = {
__methodsHistory: [ ],
__clearHistory: function() {
this.__methodsHistory.splice(0, this.__methodsHistory.length);
}
},
methods = [ "init", "setUri", "back" ];
$.each(methods, function(i, methodName) {
result[methodName] = function() {
result.__methodsHistory.push({
methodName: methodName,
args: arguments
});
return this.callBase.apply(this, arguments);
};
});
return result;
};
exports.MockStackBasedNavigationDevice = navigationDevicesModule.StackBasedNavigationDevice.inherit($.extend(createMethodsHistory(), {
ctor: function(options) {
options = options || {};
this.__clearHistory();
options.window = options.window || new exports.MockBrowser();
options.browserAdapter = options.browserAdapter || new DefaultBrowserAdapter({ window: options.window });
this.callBase(options);
}
}));
exports.MockHistoryBasedNavigationDevice = navigationDevicesModule.HistoryBasedNavigationDevice.inherit($.extend(createMethodsHistory(), {
ctor: function(options) {
options = options || {};
this.__clearHistory();
options.window = options.window || new exports.MockBrowser();
options.browserAdapter = options.browserAdapter || new DefaultBrowserAdapter({ window: options.window });
this.callBase(options);
}
}));
var createHistory = function(browser, isOldBrowser, isAndroid) {
var oldHistory = {
_navigatedCount: 0,
_parent: browser,
_history: [ { uri: "" } ],
go: function(count) {
var current = this._current + count,
state = this._history[current];
if(++this._navigatedCount > 100) {
throw Error("Mock browser was navigated more than 100 times.");
}
if(!state) {
return;
}
this._current = current;
this.state = state;
this._parent.location.hash = state.uri;
this._parent.lastHash = this._parent.location.hash;
$(this._parent).trigger("hashchange", {});
},
back: function() {
if(this._current === 1 && isOldBrowser && isAndroid) {
this._history[0] = { uri: "" };
}
this.go(-1);
},
forward: function() {
this.go(1);
},
length: 1,
state: undefined,
_current: 0
};
var history = $.extend({}, oldHistory, {
pushState: function(state, title, uri) {
uri = uri || "";
this._history.push({ state: state, title: title, uri: uri });
this.state = state || null;
this._current++;
this._parent.lastHash = this._parent.location.hash = uri || "";
this.length = this._current + 1;
},
replaceState: function(state, title, uri) {
this._history[this._current] = { state: state, title: title, uri: uri };
this.state = state || null;
this._parent.lastHash = this._parent.location.hash = uri || "";
}
});
return isOldBrowser ? oldHistory : history;
};
exports.MockBrowser = Class.inherit({
ctor: function(options) {
options = options || {};
this.top = this;
this.history = createHistory(this, options.isOldBrowser, options.isAndroid);
this.location = { hash: options.hash || "" };
this.lastHash = this.location.hash;
this._firstDoEvents = true;
},
_normalizeHash: function(hash) {
if(hash[0] !== "#") {
hash = "#" + hash;
}
return hash;
},
doEvents: function() {
this.location.hash = this._normalizeHash(this.location.hash);
if(this._normalizeHash(this.lastHash) === this.location.hash) {
this._firstDoEvents = false;
return;
}
this.lastHash = this.location.hash;
if(this._firstDoEvents) {
this.history._history.pop();
this._firstDoEvents = false;
} else {
this.history._current++;
}
this.history.length = this.history._current + 1;
this.history._history.push({ uri: this.location.hash });
$(this).trigger("hashchange", {});
},
__raiseEvent: function(eventName) {
$(this).trigger(eventName);
}
});
exports.MockBrowserAdapter = Class.inherit({
ctor: function(options) {
options = options || {};
this.__callHistory = [];
this.canWorkInPureBrowser = options.canWorkInPureBrowser === undefined ? true : options.canWorkInPureBrowser;
},
replaceState: function(uri) {
this.__callHistory.push({
name: "replaceState",
args: arguments
});
this.__hash = uri;
return $.Deferred().resolve().promise();
},
pushState: function(uri) {
this.__callHistory.push({
name: "pushState",
args: arguments
});
this.__hash = uri;
return $.Deferred().resolve().promise();
},
createRootPage: function() {
this.__callHistory.push({
name: "createRootPage",
args: arguments
});
return $.Deferred().resolve().promise();
},
back: function() {
this.__callHistory.push({
name: "back",
args: arguments
});
return $.Deferred().resolve().promise();
},
getHash: function() {
this.__callHistory.push({
name: "getHash",
args: arguments
});
return this.__hash;
},
isRootPage: function() {
return this.__isRootPage;
},
popState: $.Callbacks(),
_window: new exports.MockBrowser(),
__isRootPage: false,
__canBack: false,
__hash: ""
});
exports.MockStackBasedNavigationManager = navigationManagerModule.StackBasedNavigationManager.inherit({
ctor: function(options) {
options = options || {};
this.callBase($.extend(options, {
navigationDevice: options.navigationDevice || new exports.MockStackBasedNavigationDevice()
}));
var callbacks = [ "navigating", "navigated", "navigationCanceled" ],
that = this;
this.__callbacksHistory = [];
$.each(callbacks, function(i, callbackName) {
that.__callbacksHistory[callbackName] = 0;
that.on(callbackName, function() {
that.__callbacksHistory[callbackName]++;
});
});
}
});
exports.MockHistoryBasedNavigationManager = navigationManagerModule.HistoryBasedNavigationManager.inherit({
});
exports.MockRouter = Class.inherit({
ctor: function(options) {
this.__parseLog = [];
this.__formatLog = [];
this.__parseResult = options.__parseResult;
this.__parseCallback = options.__parseCallback;
this.__formatResult = options.__formatResult;
this.__formatCallback = options.__formatCallback;
},
parse: function(uri) {
this.__parseLog.push(uri);
if(this.__parseCallback) {
return this.__parseCallback(uri);
}
return this.__parseResult;
},
format: function(routeData) {
this.__formatLog.push(routeData);
if(this.__formatCallback) {
return this.__formatCallback(window.uri);
}
return this.__formatResult;
}
});
exports.MockApplication = Application.inherit({
ctor: function(options) {
options = options || {};
this.__showViewLog = [];
this.callBase($.extend(false, {
ns: {},
navigationManager: new exports.MockStackBasedNavigationManager(),
router: options.router || new exports.MockRouter({
__parseResult: options.__routeParseResult,
__formatResult: options.__routeFormatResult
})
}, options));
},
_showViewImpl: function(viewInfo) {
this.__showViewLog.push(viewInfo);
var deferred = this._showViewImplMockDeferred || $.Deferred().resolve();
return deferred.promise();
}
});
return exports;
}));
|
var Share = function(element)
{
var dmp,
editor,
timer,
text = "";
;
var dmp = new diff_match_patch();
function diff() {
delta[timestamp] = dmp.diff_toDelta(dmp.diff_main(a, b));
};
var appendToDeltaFile = function(delta) {
var oldDeltas = JSON.parse( localStorage.getItem(draftName + "-deltas") );
var newDeltas = $.extend(oldDeltas, delta);
localStorage.setItem(draftName + "-deltas", JSON.stringify(newDeltas));
};
editor = CKEDITOR.replace(element);
editor.on( 'key', function( evt ) {
clearTimeout(timer);
timer = setTimeout(function() {
patch = dmp.patch_make(text, evt.editor.getData());
text = evt.editor.getData();
// console.log(delta);
// var ranges = editor2.getSelection().getRanges();
// editor2.setData( dmp.patch_apply(patch, editor2.getData())[0] );
// editor2.getSelection().selectRanges(ranges);
}, 500);
});
window.exports = {
dmp: dmp,
editor: editor,
}
}
|
<reponame>jhonfre1994/multi-tenant-spring-boot
package com.tenant.example.exceptions.responses;
import org.springframework.http.HttpStatus;
import org.springframework.web.bind.annotation.ResponseStatus;
/**
*
* @author jhonfre
*/
@ResponseStatus(HttpStatus.BAD_REQUEST)
public class BadRequestException extends RuntimeException {
/**
* use este error en los siguientes casos. - no se pudo eliminar un
* elemento, no se pudo completar el proceso por algun error, hubo un error
* con los datos enviados a la api.
*
* @param exception excepcion capturada
*/
public BadRequestException(String exception) {
super(exception);
}
}
|
#!/bin/sh
npm --prefix ui i
npm --prefix server i
npm --prefix server run build
npm --prefix server run install
docker build . -t timer:latest
|
package me.legit.models.decoration;
public class DecorationInventoryItem {
private DecorationId decorationId;
private Number count;
public DecorationInventoryItem(DecorationId decorationId, Number count) {
this.decorationId = decorationId;
this.count = count;
}
public DecorationId getDecorationId() {
return decorationId;
}
public Number getCount() {
return count;
}
}
|
#pragma once
#include <typed-geometry/functions/basic/limits.hh>
#include <typed-geometry/types/scalars/default.hh>
/**
* Provides random generators:
* - splitmix
* - xorshift
* - pcg
*
* Default rng: tg::rng
*
* Provides detail::uniform01<float / double>(rng) for 0..1 (inclusive)
*/
namespace tg
{
struct splitmix
{
public:
using result_type = u32;
static constexpr result_type(min)() { return 0; }
static constexpr result_type(max)() { return detail::limits<u32>::max(); }
constexpr splitmix() : m_seed(1) {}
template <class SeedT>
constexpr explicit splitmix(SeedT&& rd)
{
seed(rd);
}
template <class Rng>
constexpr auto seed(Rng& rd) -> decltype(u64(rd()), void())
{
m_seed = u64(rd()) << 31 | u64(rd());
}
constexpr void seed(u64 seed) { m_seed = (seed ^ 0x2b41a160bab708aduLL) + u64(seed == 0x2b41a160bab708aduLL); }
constexpr result_type operator()()
{
u64 z = (m_seed += u64(0x9E3779B97F4A7C15));
z = (z ^ (z >> 30)) * u64(0xBF58476D1CE4E5B9);
z = (z ^ (z >> 27)) * u64(0x94D049BB133111EB);
return result_type((z ^ (z >> 31)) >> 31);
}
constexpr void discard(unsigned long long n)
{
for (unsigned long long i = 0; i < n; ++i)
operator()();
}
constexpr bool operator==(splitmix const& rhs) const { return m_seed == rhs.m_seed; }
constexpr bool operator!=(splitmix const& rhs) const { return m_seed != rhs.m_seed; }
constexpr u64 state() const { return m_seed; }
private:
u64 m_seed;
};
struct xorshift
{
public:
using result_type = u32;
static constexpr result_type(min)() { return 0; }
static constexpr result_type(max)() { return detail::limits<u32>::max(); }
constexpr xorshift() : m_seed(0xc1f651c67c62c6e0ull) {}
template <class SeedT>
constexpr explicit xorshift(SeedT&& rd)
{
seed(rd);
}
template <class Rng>
constexpr auto seed(Rng& rd) -> decltype(u64(rd()), void())
{
m_seed = u64(rd()) << 31 | u64(rd());
}
constexpr void seed(u64 seed) { m_seed = (seed ^ 0x2b41a160bab708aduLL) + u64(seed == 0x2b41a160bab708aduLL); }
constexpr result_type operator()()
{
u64 result = m_seed * 0xd989bcacc137dcd5ull;
m_seed ^= m_seed >> 11;
m_seed ^= m_seed << 31;
m_seed ^= m_seed >> 18;
return u32(result >> 32ull);
}
constexpr void discard(unsigned long long n)
{
for (unsigned long long i = 0; i < n; ++i)
operator()();
}
constexpr bool operator==(xorshift const& rhs) const { return m_seed == rhs.m_seed; }
constexpr bool operator!=(xorshift const& rhs) const { return m_seed != rhs.m_seed; }
constexpr u64 state() const { return m_seed; }
private:
u64 m_seed;
};
struct pcg
{
public:
using result_type = u32;
static constexpr result_type(min)() { return 0; }
static constexpr result_type(max)() { return detail::limits<u32>::max(); }
constexpr pcg() : m_state(0x853c49e6748fea9bULL), m_inc(0xda3e39cb94b95bdbULL) {}
template <class SeedT>
constexpr explicit pcg(SeedT&& rd)
{
seed(rd);
}
template <class Rng>
constexpr auto seed(Rng& rd) -> decltype(u64(rd()), void())
{
u64 s0 = u64(rd()) << 31 | u64(rd());
u64 s1 = u64(rd()) << 31 | u64(rd());
m_state = 0;
m_inc = (s1 << 1) | 1;
(void)operator()();
m_state += s0;
(void)operator()();
}
constexpr result_type operator()()
{
u64 oldstate = m_state;
m_state = oldstate * 6364136223846793005ULL + m_inc;
u32 xorshifted = u32(((oldstate >> 18u) ^ oldstate) >> 27u);
auto rot = int(oldstate >> 59u);
return (xorshifted >> rot) | (xorshifted << ((-rot) & 31));
}
constexpr void discard(unsigned long long n)
{
for (unsigned long long i = 0; i < n; ++i)
operator()();
}
constexpr bool operator==(pcg const& rhs) const { return m_state == rhs.m_state && m_inc == rhs.m_inc; }
constexpr bool operator!=(pcg const& rhs) const { return m_state != rhs.m_state || m_inc != rhs.m_inc; }
private:
u64 m_state;
u64 m_inc;
};
namespace detail
{
template <class ScalarT>
struct unit_uniform
{
};
template <>
struct unit_uniform<float>
{
template <class Rng>
static float sample(Rng& rng)
{
return float(rng()) / float(Rng::max());
}
};
template <>
struct unit_uniform<double>
{
template <class Rng>
static double sample(Rng& rng)
{
auto m = u64(Rng::max());
auto x = rng() * m + rng();
return double(x) / double(m * m + m);
}
};
template <class ScalarT, class Rng>
ScalarT uniform01(Rng& rng)
{
return unit_uniform<ScalarT>::sample(rng);
}
} // namespace detail
} // namespace tg
|
# models.py
from django.db import models
class Movie(models.Model):
title = models.CharField(max_length=200)
description = models.TextField()
# views.py
from django.shortcuts import render
from .models import Movie
def movies_list(request):
movies = Movie.objects.all()
return render(request, 'movies/list.html', {'movies': movies})
def movie_add(request):
if request.method == 'POST':
movie = Movie(
title=request.POST['title'],
description=request.POST['description']
)
movie.save()
return redirect('movies_list')
return render(request, 'movies/add.html')
# urls.py
from django.urls import path
from . import views
urlpatterns = [
path('movies/', views.movies_list, name='movies_list'),
path('movies/add/', views.movie_add, name='movie_add'),
] |
package com.cutout.kit.immersionbar;
/**
* Author: 侯亚东
* Date: 2021-10-21 17:02
* Email: <EMAIL>
* Des: The interface On navigation bar listener.
*/
public interface OnNavigationBarListener {
/**
* On navigation bar change.
*
* @param show the show
*/
void onNavigationBarChange(boolean show);
}
|
//
// JJTabbarController.h
// xiaoyulvtu
//
// Created by 杨剑 on 2018/10/19.
// Copyright © 2018年 贱贱. All rights reserved.
//
#import <UIKit/UIKit.h>
#import <AudioToolbox/AudioToolbox.h>
//#import "JJRootShebeiController.h"
//#import "JJRootTongzhiController.h"
//#import "JJRootWoController.h"
#import "JJJianceController.h"
#import "JJDitushebeiController.h"
#import "JJTongjiController.h"
#import "JJShebeiliebiaoController.h"
#import "JJWodeController.h"
@interface JJTabbarController : UITabBarController
- (void)qiehuanShebei;
@end
|
<filename>packages/database-provider/src/dao/ranking.ts
import { RankingProvider } from "@cph-scorer/core";
import { Repository } from "typeorm";
import { RankingEntity } from "../entity/ranking";
import { RankingType, Ranking, Player, uuid } from "@cph-scorer/model";
import { PlayerEntity } from "../entity/player";
export class RankingDao implements RankingProvider {
constructor(private readonly rankingRepository: Repository<RankingEntity>) {}
public async findRanking(
id: string,
type: RankingType
): Promise<Ranking | null> {
const res = await this.rankingRepository
.createQueryBuilder("ranking")
.leftJoinAndSelect("ranking.players", "players")
.select([
"ranking.id",
"ranking.participation",
"ranking.point",
"ranking.goalAverage",
"ranking.type",
"players.id",
])
.where("ranking.type = :type AND players.id = :id", { type, id })
.getOne();
if (res === undefined) return null;
return res.toModel();
}
public async update(id: uuid, ranking: Partial<Ranking>): Promise<void> {
const r = await this.rankingRepository.findOneOrFail({
where: { id, type: ranking.type },
});
await this.rankingRepository.save(Object.assign(r, ranking));
}
public async getRanking(type: RankingType): Promise<Ranking[]> {
return (
await this.rankingRepository
.createQueryBuilder("ranking")
.leftJoinAndSelect("ranking.players", "players")
.select([
"ranking.id",
"ranking.participation",
"ranking.point",
"ranking.goalAverage",
"players.firstName",
"players.id",
"players.lastName",
])
.where("ranking.type = :type", { type })
.orderBy({
"ranking.point": "DESC",
"ranking.goalAverage": "DESC",
"ranking.participation": "ASC",
})
.getMany()
).map((x) => x.toModel());
}
public async createRanking(
player: Partial<Player>,
type: RankingType
): Promise<Ranking> {
const ranking = new RankingEntity();
const playerEntity = new PlayerEntity();
playerEntity.fromModel(player);
ranking.type = type;
ranking.players = [playerEntity];
return (await this.rankingRepository.save(ranking)).toModel();
}
}
|
<gh_stars>0
package state
import "fmt"
type LeaderState struct {
StateBase
State StateEnum
}
func (l *LeaderState) Do() {
fmt.Printf("Leader DO")
fmt.Println()
//l.SwitchTo(StateFollower)
}
|
#!/bin/bash
# could add this script to "npm run build"
# but would make it complicated to read.
# for now we just need to manually run this
cd lib
python3 -m venv twitterVenv
source twitterVenv/bin/activate
pip uninstall requests
pip install requests==2.25.1
pip install urllib3==1.26.2
pip install boto3==1.17.4
pip install tweepy
cd twitterVenv/lib/python3.7
cp -r site-packages python
zip -r python.zip python/
|
# Construct the episode URL using the url_template and next_episode
episode_url=$(printf ${url_template} ${next_episode})
# Print debug information
echo "Number of videos: ${num_videos}"
echo "last_episode: ${last_episode}"
echo "Regular expression result: ${regexp_result}"
echo "Next episode: ${next_episode}"
echo "URL: ${episode_url}"
# Check if the next episode is newer than the last downloaded episode
if [[ ${next_episode} -gt ${last_episode} ]]; then
# Initiate the download of the episode
# Add your download command here, for example:
# wget ${episode_url} -O episode_${next_episode}.mp4
echo "Downloading episode ${next_episode} from ${episode_url}"
else
echo "No new episode available for download"
fi |
The proposed algorithm for classifying spam emails using machine learning will involve two steps.
Step 1: Preparing the Data
The first step is to prepare the data by extracting the features from the emails. The features could include the presence of certain words, the presence of certain links, and more sophisticated techniques such as text analysis.
Step 2: Building Model
The second step involves building a machine learning model to classify the emails based on the features identified in step 1. Popular machine learning algorithms that can be used include Decision Trees, Support Vector Machines (SVMs), K-Nearest Neighbors (KNN), Logistic Regression, and Naïve Bayes.
After the model has been trained, it can be tested with a test dataset to evaluate the accuracy of the model. The model can then be used to classify the emails as either being a spam email or not. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.