text stringlengths 1 1.05M |
|---|
SEQ_COUNT=$(egrep '^>' ../params/alignment | wc -l)
CHAR_COUNT=$(wc -m < ../params/alignment)
if [[ ${CHAR_COUNT} -gt "10000" ]] ; then
echo "#Input may not contain more than 10000 characters." >> ../results/process.log
false
fi
if [[ ${FORMAT} = "1" ]] || [[ ${SEQ_COUNT} -gt "1" ]] ; then
echo "#Input is a multiple sequence alignment; expecting a single protein sequence." >> ../results/process.log
false
fi
if [[ ${SEQ_COUNT} = "0" ]] ; then
sed 's/[^a-z^A-Z]//g' ../params/alignment > ../params/alignment1
perl -pe 's/\s+//g' ../params/alignment1 > ../params/alignment
CHAR_COUNT=$(wc -m < ../params/alignment)
if [[ ${CHAR_COUNT} -gt "10000" ]] ; then
echo "#Input may not contain more than 10000 characters." >> ../results/process.log
false
else
sed -i "1 i\>${JOBID}" ../params/alignment
fi
fi
echo "#Query is a protein sequence with ${CHAR_COUNT} residues." >> ../results/process.log
echo "done" >> ../results/process.log
echo "#Executing MARCOIL." >> ../results/process.log
# Switch on correct Matrix
if [[ "mtk" = "%matrix_marcoil.content" ]] ; then
matrix_copy.sh "${MARCOILMTK}" ../0/R5.MTK
matrix_copy.sh "${MARCOILMTIDK}" ../0/R5.MTIDK
PARAMMATRIX="-C"
elif [[ "mtidk" = "%matrix_marcoil.content" ]] ; then
matrix_copy.sh "${MARCOILMTIDK}" ../0/R5.MTIDK
matrix_copy.sh "${MARCOILMTK}" ../0/R5.MTK
PARAMMATRIX="-C -i"
elif [[ "9fam" = "%matrix_marcoil.content" ]] ; then
PARAMMATRIX=""
fi
TRANSPROB=${MARCOILINPUT}/R3.transProbHigh
if [[ "%transition_probability.content" = "0" ]] ; then
TRANSPROB="${MARCOILINPUT}/R3.transProbLow"
fi
cp %alignment.path ../params/alignment.in
marcoil ${PARAMMATRIX} \
+dssSl \
-T ${TRANSPROB} \
-E ${MARCOILINPUT}/R2.emissProb \
-P ../params/alignment \
../params/alignment.in
echo "done" >> ../results/process.log
echo "#Preparting OUTPUT." >> ../results/process.log
# Prepare MARCOIL GNUPLOT
if [[ "${PARAMMATRIX}" = "-C -i" ]] || [[ "${PARAMMATRIX}" = "-C" ]] ;then
prepare_marcoil_gnuplot.pl "$(readlink -f ../params/alignment)" "$(readlink -f ../params/alignment.ProbListPSSM)"
cp ../params/alignment.ProbListPSSM ../params/alignment.ProbList
cp ../params/alignment.DomainsPSSM ../params/alignment.Domains
cp ../params/alignment.CompactProfilePSSM ../params/alignment.ProbPerState
else
prepare_marcoil_gnuplot.pl "$(readlink -f ../params/alignment)" "$(readlink -f ../params/alignment.ProbList)"
fi
# Numerical output
create_numerical_marcoil.rb "$(readlink -f ../params/)/"
cp ../params/* ../results/
echo "done" >> ../results/process.log |
class Pet:
def __init__(self, name, pet_type):
self.name = name
self.type = pet_type
class PetAdoptionSystem:
def __init__(self):
self.pets = []
def add_pet(self, name, pet_type):
new_pet = Pet(name, pet_type)
self.pets.append(new_pet)
print(f"{name} the {pet_type} has been added to the system.")
def list_all_pets(self):
print("Available pets:")
for pet in self.pets:
print(f"{pet.name} - {pet.type}")
def remove_pet(self, name):
for pet in self.pets:
if pet.name == name:
self.pets.remove(pet)
print(f"{name} has been adopted and removed from the system.")
return
print(f"No pet named {name} found in the system.")
def display_pets_by_type(self, pet_type):
print(f"Pets of type {pet_type}:")
for pet in self.pets:
if pet.type == pet_type:
print(f"{pet.name} - {pet.type}")
# Example usage
adoption_system = PetAdoptionSystem()
adoption_system.add_pet("Buddy", "dog")
adoption_system.add_pet("Whiskers", "cat")
adoption_system.add_pet("Polly", "bird")
adoption_system.add_pet("Max", "dog")
adoption_system.list_all_pets()
adoption_system.display_pets_by_type("dog")
adoption_system.remove_pet("Buddy")
adoption_system.list_all_pets() |
#!/bin/bash
# Step 1: Run trec_setup.sh script to set up collection mapping and index files
./bin/trec_setup.sh corpus/TREC
# Step 2: Replace the default configuration file with the customized configuration file
rm -f etc/terrier.properties
cp etc/terrier.custom etc/terrier.properties
echo "Terrier setup completed successfully." |
#!/bin/bash
# Copyright (c) 2017 The ACEseq workflow developers.
# Distributed under the MIT License (license terms are at https://www.github.com/eilslabs/ACEseqWorkflow/LICENSE.txt).
tmpSegments=${FILENAME_SEGMENTS}_tmp
nocontrol=${isNoControlWorkflow^^}
${RSCRIPT_BINARY} --vanilla "${TOOL_PSCBS_SEGMENTATION}" \
--file_data "${FILE_PSCBS_DATA}" \
--file_breakpoints "${FILENAME_BREAKPOINTS}" \
--chrLengthFile "${CHROMOSOME_LENGTH_FILE}" \
--file_fit "${tmpSegments}" \
--minwidth $min_seg_width \
--undo.SD $undo_SD \
-h $pscbs_prune_height \
--sv $SV \
--libloc "${libloc_PSCBS}" \
--nocontrol ${nocontrol}
if [[ "$?" != 0 ]]
then
echo "There was a non-zero exit code while generating fit.txt file;"
exit 2
fi
mv ${tmpSegments} ${FILENAME_SEGMENTS}
|
#!/bin/bash
#Moving to the local deformetrica/examples directory
dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $dir
# Atlas construction :
deformetrica registration 3D model.xml data_set.xml optimization_parameters.xml --output-dir=output
|
package queue
import "testing"
func TestEnqueueAndDequeue(t *testing.T) {
q := &Queue{}
q.enqueue("a")
q.enqueue("b")
actual1 := q.dequeue()
if actual1 != "a" {
t.Error("expected first element removed to be a")
}
actual2 := q.dequeue()
if actual2 != "b" {
t.Error("expected second element removed to be b")
}
}
|
<reponame>saviorocha/freeCodeCamp-study<gh_stars>0
import axios from 'axios'
import React, { useState, useContext, useEffect } from 'react'
const table = {
sports: 21,
history: 23,
politics: 24,
}
const API_ENDPOINT = 'https://opentdb.com/api.php?'
const url = ''
const AppContext = React.createContext()
const AppProvider = ({ children }) => {
return <AppContext.Provider value='hello'>{children}</AppContext.Provider>
}
// make sure use
export const useGlobalContext = () => {
return useContext(AppContext)
}
export { AppContext, AppProvider }
|
package main
import (
"fmt"
"os"
"github.com/spf13/cobra"
)
const version = "1.0.0"
var (
rootCmd = &cobra.Command{
Use: "dns-drainctl",
Short: "Drain by removing/replacing IP/net from DNS records with ease",
Example: `
Drain IP 172.16.31.10 in project api-project-xxx by removing IP from records
$ dns-drainctl gcloud --project api-project-xxx drain -f drain.json 172.16.31.10/32
Drain IP 172.16.31.10 in project api-project-xxx by replacing IP with 172.16.17.32
$ dns-drainctl gcloud --project api-project-xxx drain 172.16.31.10/32 -f drain.json --replace-by 172.16.17.32
Undrain by using json file written in drain process
$ dns-drainctl gcloud --project api-project-xxx undrain -f drain.json`,
}
)
func init() {
rootCmd.AddCommand(versionCmd)
rootCmd.AddCommand(gcloudCmd)
}
func main() {
if err := rootCmd.Execute(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
|
import { BaseMaterial } from "../material";
import { Engine } from "../Engine";
import { Shader } from "../shader";
import { WGSLUnlitVertex } from "../shaderlib";
import { ShaderStage } from "../webgpu";
import { WGSLClusterDebug } from "./wgsl/WGSLClusterDebug";
import { LightManager } from "./LightManager";
export class ClusterDebugMaterial extends BaseMaterial {
constructor(engine: Engine) {
super(
engine,
Shader.create(
"cluster_debug",
new WGSLUnlitVertex(),
ShaderStage.VERTEX,
new WGSLClusterDebug(LightManager.TILE_COUNT, LightManager.MAX_LIGHTS_PER_CLUSTER)
)
);
}
}
|
The best way to edit existing code and fix any bugs or syntax errors is to first carefully read through the code and make sure you understand what it does. Then, you can make small changes to the code one at a time and test to see if the bug still exists. If it does, you can debug the code by using techniques such as print statements or adding additional checks to make sure something isn’t going wrong. Finally, make sure to test the code fully to make sure the bug is fixed and any other unintended consequences don’t arise. |
#! /bin/bash
set -e
set -x
if [ "$ARCH" == "" ]; then
echo 'Error: $ARCH is not set'
exit 1
fi
# use RAM disk if possible
if [ "$CI" == "" ] && [ -d /dev/shm ]; then
TEMP_BASE=/dev/shm
else
TEMP_BASE=/tmp
fi
BUILD_DIR=$(mktemp -d -p "$TEMP_BASE" linuxdeploy-plugin-qt-build-XXXXXX)
cleanup () {
if [ -d "$BUILD_DIR" ]; then
rm -rf "$BUILD_DIR"
fi
}
trap cleanup EXIT
# store repo root as variable
REPO_ROOT=$(readlink -f $(dirname $(dirname "$0")))
OLD_CWD=$(readlink -f .)
pushd "$BUILD_DIR"
if [ "$ARCH" == "i386" ]; then
EXTRA_CMAKE_ARGS=("-DCMAKE_TOOLCHAIN_FILE=$REPO_ROOT/cmake/toolchains/i386-linux-gnu.cmake" "-DUSE_SYSTEM_CIMG=OFF")
fi
cmake "$REPO_ROOT" -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=RelWithDebInfo "${EXTRA_CMAKE_ARGS[@]}" -DBUILD_TESTING=On
make -j$(nproc)
ctest -V --no-tests=error
make install DESTDIR=AppDir
strip_path=$(which strip)
if [ "$ARCH" == "i386" ]; then
# download i386 strip for i386 AppImage
# https://github.com/linuxdeploy/linuxdeploy/issues/59
wget http://security.ubuntu.com/ubuntu/pool/main/b/binutils/binutils-multiarch_2.24-5ubuntu14.2_i386.deb
echo "0106f170cebf5800e863a558cad039e4f16a76d3424ae943209c3f6b0cacd511 binutils-multiarch_2.24-5ubuntu14.2_i386.deb" | sha256sum -c
wget http://security.ubuntu.com/ubuntu/pool/main/b/binutils/binutils-multiarch-dev_2.24-5ubuntu14.2_i386.deb
echo "ed9ca4fbbf492233228f79fae6b349a2ed2ee3e0927bdc795425fccf5fae648e binutils-multiarch-dev_2.24-5ubuntu14.2_i386.deb" | sha256sum -c
dpkg -x binutils-multiarch_2.24-5ubuntu14.2_i386.deb out/
dpkg -x binutils-multiarch-dev_2.24-5ubuntu14.2_i386.deb out/
rm binutils-multiarch*.deb
strip_path=$(readlink -f out/usr/bin/strip)
export LD_LIBRARY_PATH=$(readlink -f out/usr/lib)
fi
export UPD_INFO="gh-releases-zsync|linuxdeploy|linuxdeploy-plugin-qt|continuous|linuxdeploy-plugin-qt-$ARCH.AppImage"
wget https://github.com/TheAssassin/linuxdeploy/releases/download/continuous/linuxdeploy-"$ARCH".AppImage
chmod +x linuxdeploy*.AppImage
./linuxdeploy-"$ARCH".AppImage --appdir AppDir \
-d "$REPO_ROOT"/resources/linuxdeploy-plugin-qt.desktop \
-i "$REPO_ROOT"/resources/linuxdeploy-plugin-qt.svg \
-e $(which patchelf) \
-e "$strip_path" \
--output appimage
mv linuxdeploy-plugin-qt-"$ARCH".AppImage* "$OLD_CWD"/
|
<gh_stars>0
/**
* Given a string, return its encoding version.
*
* @param {String} str
* @return {String}
*
* @example
* For aabbbc should return 2a3bc
*
*/
function encodeLine(str) {
const arr = str.split('');
let char = '';
let count = 0;
let ret = '';
for (let i = 0; i < arr.length; i++) {
if (char !== arr[i]) {
if (count !== 0) ret += count > 1 ? count + char : char;
count = 0;
char = arr[i];
}
count++;
}
if (count !== 0) ret += count > 1 ? count + char : char;
return ret;
}
module.exports = encodeLine;
|
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies the dSYM of a vendored framework
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DWARF_DSYM_FOLDER_PATH}"
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/VisionLab/VisionLab.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/VisionLab/VisionLab.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
<filename>imageeditor/src/main/java/com/createchance/imageeditor/freetype/FreeType.java<gh_stars>10-100
package com.createchance.imageeditor.freetype;
/**
* FreeType 2 java access class.
*
* @author createchance
* @date 2018-10-12
*/
public class FreeType {
/**
* Call this when init just once.
*/
public static void init() {
System.loadLibrary("freetype");
}
public static native int[] loadText(String fontPath, int[] textArray, int textSize);
}
|
const express = require('express')
const app = express()
const path = require('path')
const bodyParser = require('body-parser')
// Parses urlencoded bodies
app.use(bodyParser.urlencoded({ extended: false }))
// Ensure that the file userdb.json is accessible
const userdb = require(path.resolve(__dirname, 'userdb.json'))
app.get('/', function (req, res) {
res.sendFile(path.resolve(__dirname, 'login.html'))
})
app.post('/', function (req, res) {
const user = req.body.username
const pass = req.body.password
if (user && pass && userdb[user] === pass) {
res.send('Welcome! You are logged in!')
} else {
res.status(400).send('Incorrect username or password')
}
})
// Used to make sure the server works
app.listen(3000, () => {
console.log('Listening on port 3000!')
}) |
// registered_task.rs
use task::{
task_action::TaskAction,
task_settings::TaskSettings,
task_trigger::{TaskTrigger, TaskIdleTrigger, TaskLogonTrigger},
RunLevel, Task,
};
pub fn register_new_task(name: &str, action: TaskAction, settings: TaskSettings, triggers: Vec<TaskTrigger>) {
// Create a new task using the provided parameters
let new_task = Task {
name: name.to_string(),
action,
settings,
triggers,
};
// Register the new task with the task scheduler
task_scheduler::register_task(new_task);
} |
def abbreviateNumber(number):
if 999 < abs(number) < 1000000:
number_prefix = round(number / 1000, 2)
number_suffix = "K"
elif 999999 < abs(number) < 1000000000:
number_prefix = round(number / 1000000, 2)
number_suffix = "M"
elif abs(number) > 999999999:
number_prefix = round(number / 1000000000, 2)
number_suffix = "B"
else:
return str(number) # No abbreviation needed for smaller numbers
if number < 0:
return "-" + str(abs(number_prefix)) + number_suffix
else:
return str(number_prefix) + number_suffix |
<gh_stars>0
import { ForeignKey, Model, Table } from 'sequelize-typescript';
import { Dish } from '../../dishes/models';
import { Filling } from './filling.model';
@Table({
tableName: 'dish_fillings',
})
export class DishFilling extends Model {
@ForeignKey(() => Filling)
filling_id: number;
@ForeignKey(() => Dish)
dish_id: number;
}
|
#!/usr/bin/env python
# Tests the angles produced by optimization routine
# usage: ./test_angles.py -g get_random_partition_graph -l 6 -r 7
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from networkx.generators.classic import barbell_graph
from itertools import product
import sys
import argparse
import random
import pickle
from operator import itemgetter
import qcommunity.modularity.graphs as gm
from qcommunity.utils.import_graph import generate_graph
from ibmqxbackend.ansatz import IBMQXVarForm
def run_angles(n_nodes,
B,
angles,
C=None,
backend='IBMQX',
backend_params={
'backend_device': None,
'depth': 3
}):
if backend == 'IBMQX':
if not isinstance(angles, (np.ndarray, np.generic, list)):
raise ValueError(
"Incorrect angles received: {} for backend {}".format(
angles, backend))
var_form = IBMQXVarForm(
num_qubits=n_nodes, depth=backend_params['depth'])
resstrs = var_form.run(
angles, backend_name=backend_params['backend_device'])
else:
raise ValueError("Unsupported backend: {}".format(backend))
modularities = [
(gm.compute_modularity(n_nodes, B, x, C=C), x) for x in resstrs
]
return max(modularities, key=itemgetter(0))
def test_angles(graph_generator_name,
left,
right,
angles,
seed=None,
verbose=0,
compute_optimal=False,
backend='IBMQX',
backend_params={
'backend_device': None,
'depth': 3
}):
# note that compute optimal uses brute force! Not recommended for medium and large problem
# angles should be a dictionary with fields 'beta' and 'gamma', e.g. {'beta': 2.0541782343349086, 'gamma': 0.34703642333837853}
rand_seed = seed
# Generate the graph
G, _ = generate_graph(graph_generator_name, left, right, seed=seed)
# Use angles
# Using NetworkX modularity matrix
B = nx.modularity_matrix(G).A
# Compute ideal cost
if compute_optimal:
optimal_modularity = gm.compute_modularity(G, B, solution_bitstring)
print("Optimal solution energy: ", optimal_modularity)
else:
optimal_modularity = None
if backend == 'IBMQX':
if not isinstance(angles, (np.ndarray, np.generic, list)):
raise ValueError(
"Incorrect angles received: {} for backend {}".format(
angles, backend))
var_form = IBMQXVarForm(
num_qubits=G.number_of_nodes(), depth=backend_params['depth'])
resstrs = var_form.run(angles)
else:
raise ValueError("Unsupported backend: {}".format(backend))
if verbose > 1:
# print distribution
allstrs = list(product([0, 1], repeat=len(qubits)))
freq = {}
for bitstr in allstrs:
freq[str(list(bitstr))] = 0
for resstr in resstrs:
resstr = str(list(resstr)) # for it to be hashable
if resstr in freq.keys():
freq[resstr] += 1
else:
raise ValueError("received incorrect string: {}".format(resstr))
for k, v in freq.items():
print("{} : {}".format(k, v))
# Raw results
modularities = [gm.compute_modularity(G, B, x) for x in resstrs]
mod_max = max(modularities)
# Probability of getting best modularity
if compute_optimal:
mod_pmax = float(np.sum(np.isclose(
modularities, optimal_modularity))) / float(len(modularities))
else:
mod_pmax = None
mod_mean = np.mean(modularities)
if verbose:
print("Best modularity found:", mod_max)
print("pmax: ", mod_pmax)
print("mean: ", mod_mean)
return {
'max': mod_max,
'mean': mod_mean,
'pmax': mod_pmax,
'optimal': optimal_modularity,
'x': angles
}
|
<filename>ex02/latticeview_v2.h<gh_stars>0
#include <iostream>
#include <fstream>
// The following function prints the lattice to file "output.ppm"
void Print_lattice (int *vlat, const int &vlx, const int &vly, const int &vwidth, const int &vheight, const char* vfilename="output.ppm")
{
const int vw= vwidth / vlx;
const int vh= vheight / vly;
int r[5], g[5], b[5];
r[0]= 255; g[0]= 255; b[0]= 255; //white use 0 in your lattice if you want to color it white
r[1]= 0; g[1]= 255; b[1]= 0; //green use 1 in your lattice if you want to color it green
r[2]= 255; g[2]= 0; b[2]= 0; //red
r[3]= 0; g[3]= 0; b[3]= 0; //black
r[4]= 0; g[4]= 0; b[4]= 255; //blue
std::ofstream out (vfilename);
out << "P3" << std::endl;
out << vw * vlx << " " << vh * vly << std::endl;
out << "255" << std::endl;
for (int i=vly-1; i>=0; i--)
for (int j=0; j<vh; j++)
for (int k=0; k<vlx; k++)
{
for (int l=0; l<vw; l++)
{
out << r[vlat[k+i*vlx]] << " " << g[vlat[k+i*vlx]] << " " << b[vlat[k+i*vlx]] << " ";
}
}
out << std::endl;
out.close ();
}
|
package session
import (
"github.com/golang/protobuf/ptypes"
"redditclone/internal/pkg/proto"
"redditclone/internal/domain/user"
)
func SessionProto2Session(sessionProto proto.Session) (s *Session, err error) {
user, err := user.UserProto2User(*sessionProto.User)
if err != nil {
return nil, err
}
data, err := DataProto2Data(*sessionProto.Data)
if err != nil {
return nil, err
}
s = &Session{
ID: uint(sessionProto.ID),
UserID: uint(sessionProto.UserID),
Token: sessionProto.Token,
User: *user,
Data: *data,
}
return s, nil
}
func Session2SessionProto(session Session) (sessionProto *proto.Session, err error) {
userProto, err := user.User2UserProto(session.User)
if err != nil {
return nil, err
}
dataProto, err := Data2DataProto(session.Data)
if err != nil {
return nil, err
}
sessionProto = &proto.Session{
ID: uint64(session.ID),
UserID: uint64(session.UserID),
Token: session.Token,
User: userProto,
Data: dataProto,
}
return sessionProto, nil
}
func DataProto2Data(dataProto proto.Data) (data *Data, err error) {
data = &Data{
UserID: uint(dataProto.UserID),
UserName: dataProto.UserName,
}
if dataProto.ExpirationTokenTime != nil {
data.ExpirationTokenTime, err = ptypes.Timestamp(dataProto.ExpirationTokenTime)
if err != nil {
return nil, err
}
}
return data, nil
}
func Data2DataProto(data Data) (dataProto *proto.Data, err error) {
dataProto = &proto.Data{
UserID: uint64(data.UserID),
UserName: data.UserName,
}
dataProto.ExpirationTokenTime, err = ptypes.TimestampProto(data.ExpirationTokenTime)
if err != nil {
return nil, err
}
return dataProto, nil
}
|
<reponame>posva/peeky
import fs from 'fs'
import path from 'path'
import match from 'anymatch'
import { V8Coverage } from 'collect-v8-coverage'
import { SourceMapConsumer } from 'source-map'
import copy from 'fast-copy'
import glob from 'fast-glob'
import shortid from 'shortid'
import type { Context } from '../types'
export interface FileCoverage {
path: string
functions: FunctionCoverage[]
lineRanges: CoverageLineRange[]
linesCovered: number
linesTotal: number
}
interface InternalFileCoverage extends Omit<FileCoverage, 'functions' | 'linesCovered'> {
functions: Map<string, FunctionCoverage>
}
export interface FunctionCoverage {
name: string
covered: boolean
}
export interface CoverageLineRange {
start: number
end: number
}
export async function getCoverage (
coverage: V8Coverage,
ctx: Context,
): Promise<FileCoverage[]> {
// Filter
const filteredCoverage = coverage.filter(item => {
if (!item.url.startsWith('file://')) return false
const file = item.url.substring(7)
return file.startsWith(ctx.options.coverage.root) &&
!match(ctx.options.coverage.ignored, file, { dot: true })
})
const coverageItems: Map<string, InternalFileCoverage> = new Map()
function addCoverage (fullPath: string, sourceContent: string, functionName: string, coveredRange?: CoverageLineRange) {
let relativePath = path.relative(ctx.options.coverage.root, fullPath)
if (relativePath.startsWith('..')) {
relativePath = fullPath
}
if (match(ctx.options.coverage.ignored, relativePath)) return
if (!functionName) {
functionName = `anonymous-${shortid()}`
}
let fileCoverage: InternalFileCoverage = coverageItems.get(fullPath)
if (!fileCoverage) {
fileCoverage = {
path: fullPath,
functions: new Map(),
lineRanges: [],
linesTotal: getCoverageTotalLines(sourceContent),
}
coverageItems.set(fullPath, fileCoverage)
}
let functionCoverage = fileCoverage.functions.get(functionName)
if (!functionCoverage) {
functionCoverage = {
name: functionName,
covered: !!coveredRange,
}
fileCoverage.functions.set(functionName, functionCoverage)
} else {
functionCoverage.covered = !!coveredRange
}
if (coveredRange) {
fileCoverage.lineRanges.push(coveredRange)
}
}
for (const c of filteredCoverage) {
const file = c.url.substring(7)
const rawContent = fs.existsSync(file) ? fs.readFileSync(file, 'utf8') : ''
// Source map
let sourceMapConsumer: SourceMapConsumer
const sourceMapUrlMatch = rawContent.match(/\/\/# sourceMappingURL=(.*)/)
if (sourceMapUrlMatch) {
try {
const sourceMapFile = path.resolve(path.dirname(file), sourceMapUrlMatch[1])
if (fs.existsSync(sourceMapFile)) {
const sourceMapSource = fs.readFileSync(sourceMapFile, 'utf8')
const sourceMapRaw = JSON.parse(sourceMapSource)
sourceMapConsumer = await new SourceMapConsumer(sourceMapRaw)
}
} catch (e) {
console.error(`Error while parsing source map for ${file}: ${e.stack ?? e.message}`)
}
}
// Functions
for (const fn of c.functions) {
for (const range of fn.ranges) {
const rangeData: CoverageLineRange = {
start: rawContent.substring(0, range.startOffset).split('\n').length,
end: rawContent.substring(0, range.endOffset).split('\n').length,
}
const covered = range.count > 0
// Source map correction
if (sourceMapConsumer) {
const original = sourceMapConsumer.originalPositionFor({
line: rangeData.start,
column: 0,
bias: SourceMapConsumer.LEAST_UPPER_BOUND,
})
if (original.source == null) {
// console.error(`Couldn't map source for ${file}`, rangeData, rawContent.substring(range.startOffset, range.endOffset))
continue
}
rangeData.start = original.line
if (covered) {
const originalEnd = sourceMapConsumer.originalPositionFor({
line: rangeData.end,
column: 0,
bias: SourceMapConsumer.GREATEST_LOWER_BOUND,
})
rangeData.end = originalEnd.line + 1
}
const fullPath = path.resolve(path.dirname(file), original.source)
const sourceContent = sourceMapConsumer.sourceContentFor(original.source)
addCoverage(fullPath, sourceContent, fn.functionName, covered ? rangeData : null)
} else {
addCoverage(file, rawContent, fn.functionName, covered ? rangeData : null)
}
}
}
}
// Result
return Array.from(coverageItems.values()).map(item => ({
path: item.path,
functions: Array.from(item.functions.values()),
lineRanges: item.lineRanges,
linesCovered: 0,
linesTotal: item.linesTotal,
}))
}
export function getCoverageTotalLines (sourceContent: string): number {
if (!sourceContent) return 1
return sourceContent
// Remove comments
.replace(/\/\*[\s\S]*?\*\/|\/\/.*/g, '')
// Remove import/exports
.replace(/(import|export)[\s\S]*?from\s*("|')[\s\S]*?("|')/g, '')
.split('\n').filter(line => !!line.trim()).length
}
export function mergeCoverage (coverages: FileCoverage[]): FileCoverage[] {
const map = new Map<string, FileCoverage>()
for (const coverage of coverages) {
const existing = map.get(coverage.path)
if (existing) {
existing.functions = mergeFunctionCoverage(existing.functions.concat(coverage.functions))
existing.lineRanges = mergeCoverageRanges(existing.lineRanges.concat(coverage.lineRanges))
} else {
map.set(coverage.path, copy(coverage))
}
}
return Array.from(map.values())
}
export function mergeFunctionCoverage (functions: FunctionCoverage[]): FunctionCoverage[] {
const map = new Map<string, FunctionCoverage>()
for (const functionCoverage of functions) {
const existing = map.get(functionCoverage.name)
if (existing) {
existing.covered = existing.covered || functionCoverage.covered
} else {
map.set(functionCoverage.name, copy(functionCoverage))
}
}
return Array.from(map.values())
}
export function mergeCoverageRanges (ranges: CoverageLineRange[]): CoverageLineRange[] {
const result: CoverageLineRange[] = []
let lastRange: CoverageLineRange | undefined
for (const range of ranges) {
if (!lastRange || lastRange.end < range.start) {
lastRange = copy(range)
result.push(lastRange)
} else {
lastRange.end = Math.max(lastRange.end, range.end)
}
}
return result
}
export function getCoveredLines (ranges: CoverageLineRange[]): number {
return ranges.reduce((sum, range) => sum + (range.end - range.start + 1), 0)
}
export function computeCoveredLines (coverage: FileCoverage[]): FileCoverage[] {
for (const file of coverage) {
file.linesCovered = getCoveredLines(file.lineRanges)
}
return coverage
}
export async function getEmptyCoverageFromFiles (match: string | string[], baseDir: string, ignore: string | string[]): Promise<FileCoverage[]> {
const files = await glob(Array.isArray(match) ? match : [match], {
cwd: baseDir,
ignore: Array.isArray(ignore) ? ignore : [ignore],
absolute: true,
})
return files.map(file => ({
path: file,
functions: [],
lineRanges: [],
linesTotal: getCoverageTotalLines(fs.readFileSync(file, 'utf8')),
linesCovered: 0,
}))
}
|
use std::collections::HashMap;
#[derive(Debug)]
enum UsState {
Alabama,
}
struct Inventory {
items: HashMap<String, i32>,
}
impl Inventory {
fn new() -> Inventory {
Inventory {
items: HashMap::new(),
}
}
fn add_item(&mut self, item_name: String, quantity: i32) {
let item_stock = self.items.entry(item_name).or_insert(0);
*item_stock += quantity;
}
fn remove_item(&mut self, item_name: &str, quantity: i32) {
if let Some(item_stock) = self.items.get_mut(item_name) {
*item_stock = item_stock.saturating_sub(quantity);
}
}
fn check_stock(&self, item_name: &str) -> i32 {
*self.items.get(item_name).unwrap_or(&0)
}
}
fn main() {
let mut warehouse_inventory = Inventory::new();
warehouse_inventory.add_item("Apples".to_string(), 50);
warehouse_inventory.add_item("Bananas".to_string(), 100);
warehouse_inventory.remove_item("Apples", 20);
println!("Current stock of Apples: {}", warehouse_inventory.check_stock("Apples"));
println!("Current stock of Bananas: {}", warehouse_inventory.check_stock("Bananas"));
} |
#!/bin/bash
#
# This file is part of the GROMACS molecular simulation package.
#
# Copyright 2019- The GROMACS Authors
# and the project initiators Erik Lindahl, Berk Hess and David van der Spoel.
# Consult the AUTHORS/COPYING files and https://www.gromacs.org for details.
#
# GROMACS is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# GROMACS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with GROMACS; if not, see
# https://www.gnu.org/licenses, or write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# If you want to redistribute modifications to GROMACS, please
# consider that scientific software is very special. Version
# control is crucial - bugs must be traceable. We will be happy to
# consider code for inclusion in the official distribution, but
# derived work must not be called official GROMACS. Details are found
# in the README & COPYING files - if they are missing, get the
# official version at https://www.gromacs.org.
#
# To help us fund GROMACS development, we humbly ask that you cite
# the research papers on the package. Check out https://www.gromacs.org.
# This script runs copyright header checks on modified files and
# reports/applies the necessary changes.
#
# See `copyright.sh -h` for a brief usage, and docs/dev-manual/code-formatting.rst
# for more details.
# Parse command-line arguments
function usage() {
echo "usage: copyright.sh [-f|--force] [--rev=REV]"
echo " [--copyright=<cmode>]"
echo " [--warnings=<file>] [<action>]"
echo "<action>: (check*|diff|update)[-(index|workdir*)] (*=default)"
echo "<cmode>: off|add|update*|replace|full"
}
action="check-workdir"
declare -a diffargs
baserev="origin/master"
force=
copyright_mode=update
warning_file=
for arg in "$@" ; do
if [[ "$arg" == "check-index" || "$arg" == "check-workdir" || \
"$arg" == "diff-index" || "$arg" == "diff-workdir" || \
"$arg" == "update-index" || "$arg" == "update-workdir" ]]
then
action=$arg
elif [[ "$arg" == "check" || "$arg" == "diff" || "$arg" == "update" ]] ; then
action=$arg-workdir
elif [[ "$action" == diff-* ]] ; then
diffargs+=("$arg")
elif [[ "$arg" == --copyright=* ]] ; then
copyright_mode=${arg#--copyright=}
elif [[ "$arg" == "-f" || "$arg" == "--force" ]] ; then
force=1
elif [[ "$arg" == --rev=* ]] ; then
baserev=${arg#--rev=}
elif [[ "$arg" == --warnings=* ]] ; then
warning_file=${arg#--warnings=}
elif [[ "$arg" == "-h" || "$arg" == "--help" ]] ; then
usage
exit 0
else
echo "Unknown option: $arg"
echo
usage
exit 2
fi
done
# Switch to the root of the source tree and check the config file
srcdir=`git rev-parse --show-toplevel`
pushd $srcdir >/dev/null
admin_dir=$srcdir/admin
# Actual processing starts: create a temporary directory
tmpdir=`mktemp -d -t gmxuncrust.XXXXXX`
# Produce a list of changed files
# Only include files that have proper filter set in .gitattributes
internal_diff_args=
if [[ $action == *-index ]]
then
internal_diff_args="--cached"
fi
git diff-index $internal_diff_args --diff-filter=ACMR $baserev >$tmpdir/difflist
cut -f2 <$tmpdir/difflist | \
git check-attr --stdin filter | \
sed -e 's/.*: filter: //' | \
paste $tmpdir/difflist - | \
grep -E '(complete_formatting|uncrustify|copyright|includesort)$' >$tmpdir/filtered
cut -f2 <$tmpdir/filtered >$tmpdir/filelist_all
grep -E '(complete_formatting|copyright)$' <$tmpdir/filtered | \
cut -f2 >$tmpdir/filelist_copyright
git diff-files --name-only | grep -Ff $tmpdir/filelist_all >$tmpdir/localmods
# Extract changed files to a temporary directory
mkdir $tmpdir/org
if [[ $action == *-index ]] ; then
git checkout-index --prefix=$tmpdir/org/ --stdin <$tmpdir/filelist_all
else
rsync --files-from=$tmpdir/filelist_all $srcdir $tmpdir/org
fi
# Duplicate the original files to a separate directory, where all changes will
# be made.
cp -r $tmpdir/org $tmpdir/new
# Create output file for what was done (in case no messages get written)
touch $tmpdir/messages
# Run uncrustify on the temporary directory
cd $tmpdir/new
# Update the copyright headers using the requested mode
if [[ $copyright_mode != "off" ]] ; then
cpscript_args="--update-year"
case "$copyright_mode" in
year)
;;
add)
cpscript_args+=" --add-missing"
;;
update)
cpscript_args+=" --add-missing --update-header"
;;
replace)
cpscript_args+=" --replace-header"
;;
full)
cpscript_args+=" --add-missing --update-header --replace-header"
;;
*)
echo "Unknown copyright mode: $copyright_mode"
exit 2
esac
if [[ $action == check-* ]] ; then
cpscript_args+=" --check"
fi
# TODO: Probably better to invoke python explicitly through a customizable
# variable.
if ! $admin_dir/copyright.py -F $tmpdir/filelist_copyright $cpscript_args >>$tmpdir/messages
then
echo "Copyright checking failed!"
rm -rf $tmpdir
exit 2
fi
fi
cd $tmpdir
# If a diff was requested, show it and we are done
if [[ $action == diff-* ]] ; then
git diff --no-index --no-prefix "${diffargs[@]}" org/ new/
rm -rf $tmpdir
exit 0
fi
# Find the changed files
git diff --no-index --name-only --exit-code org/ new/ | \
sed -e 's#new/##' > $tmpdir/changed
changes=
if [[ -s $tmpdir/changed ]]
then
changes=1
fi
# Check if changed files have changed outside the index
if grep -Ff $tmpdir/localmods $tmpdir/changed > $tmpdir/conflicts
then
awk '{print $0 ": has changes in work tree"}' $tmpdir/conflicts \
>> $tmpdir/messages
if [[ ! $force && $action == update-* ]] ; then
echo "Modified files found in work tree, skipping update. Use -f to override."
echo "The following would have been done:"
sort $tmpdir/messages
rm -rf $tmpdir
exit 2
fi
fi
# Update the index/work tree if requested
if [[ $action == update-index ]] ; then
grep -Ff $tmpdir/changed $tmpdir/filtered > $tmpdir/tohash
cd $tmpdir/new
IFS='
'
for change in `cut -f2 $tmpdir/tohash | \
git --git-dir=$srcdir/.git hash-object -w --stdin-paths --no-filters | \
paste - $tmpdir/tohash`
do
# NOTE: the patterns below contain literal tabs
sha1=${change%% *}
rest=${change#* }
mode=${rest:8:6}
path=${rest#* }
path=${path%% *}
# Contains a literal tab
echo "$mode $sha1 $path" >> $tmpdir/toindex
done
unset IFS
git --git-dir=$srcdir/.git update-index --index-info < $tmpdir/toindex
elif [[ $action == update-workdir ]] ; then
rsync --files-from=$tmpdir/changed $tmpdir/new/ $srcdir/
fi
# Get back to the original directory
popd >/dev/null
# Report what was done
sort $tmpdir/messages | tee $warning_file
rm -rf $tmpdir
exit $changes
|
import React from 'react';
import {Route} from 'react-router';
import App from './App';
import Home from './Home';
import Champions from './Champions';
import About from './About';
import Login from './Login';
import NotFound from './NotFound';
export default function(store) {
return (
<Route component={App}>
<Route path="/" component={Home}/>
<Route path="/champions" component={Champions}/>
<Route path="/about" component={About}/>
<Route path="/login" component={Login}/>
<Route path="*" component={NotFound}/>
</Route>
);
};
|
export const assign_marker_varying = `
vMarker = readFromTexture(tMarker, aInstance * float(uGroupCount) + group, uMarkerTexDim).a;
`; |
<reponame>guilhermedias/twu-biblioteca-guilherme
package com.twu.biblioteca;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
/**
* Created by fmorais on 8/4/15.
*/
public class UserAccountTest {
@Test
public void match_ShouldReturnFalse_WhenReceivesWrongCredentials() throws Exception {
UserAccount userAccount = new UserAccount("333-4444", "123", UserAccount.Type.CUSTOMER, "John", "<EMAIL>", "12345678");
assertFalse(userAccount.match("wrongLibraryNumber", "WrongPassword"));
}
@Test
public void match_ShouldReturnTrue_WhenReceivesTheRightCredentials() throws Exception {
UserAccount userAccount = new UserAccount("333-4444", "123", UserAccount.Type.CUSTOMER, "John", "<EMAIL>", "12345678");
assertTrue(userAccount.match("333-4444", "123"));
}
@Test
public void isCustomer_ShouldReturnTrue_WhenReceivesACostumer() throws Exception {
UserAccount user = new UserAccount("333-4444", "123", UserAccount.Type.CUSTOMER, "John", "<EMAIL>", "12345678");
assertTrue(user.isCustomer());
}
@Test
public void isCustomer_ShouldReturnFalse_WhenReceivesANonCostumer() throws Exception {
UserAccount user = new UserAccount("333-4444", "123", UserAccount.Type.LIBRARIAN, "John", "<EMAIL>", "12345678");
assertFalse(user.isCustomer());
}
@Test
public void isLibrarian_ShouldReturnTrue_WhenReceivesALibrarian() throws Exception {
UserAccount user = new UserAccount("333-4444", "123", UserAccount.Type.LIBRARIAN, "John", "<EMAIL>", "12345678");
assertTrue(user.isLibrarian());
}
@Test
public void showDetails_ShouldReturnNameEmailAndPhoneNumber() throws Exception {
UserAccount john = new UserAccount("333-4444", "123", UserAccount.Type.LIBRARIAN, "John", "<EMAIL>", "12345678");
assertEquals("John - <EMAIL> - 12345678", john.showDetails());
UserAccount joao = new UserAccount("333-4444", "123", UserAccount.Type.LIBRARIAN, "Joao", "<EMAIL>", "12345678");
assertEquals("Joao - <EMAIL> - 12345678", joao.showDetails());
}
@Test
public void isLibrarian_ShouldReturnFalse_WhenReceivesANonLibrarian() throws Exception {
UserAccount user = new UserAccount("333-4444", "123", UserAccount.Type.CUSTOMER, "John", "<EMAIL>", "12345678");
assertFalse(user.isLibrarian());
}
}
|
<?php
if ($_SERVER['REQUEST_METHOD'] === 'POST') {
// Retrieve form data
$username = $_POST['username'];
$email = $_POST['email'];
$password = $_POST['password'];
// Validate form data (e.g., check for empty fields, validate email format)
// Connect to the database (assuming the database connection is established)
// Prepare and execute the SQL query to insert the new user
$query = "INSERT INTO users (username, email, password) VALUES ('$username', '$email', '$password')";
$result = mysqli_query($connection, $query); // Assuming $connection is the database connection object
if ($result) {
echo "User added successfully";
} else {
echo "Error adding user: " . mysqli_error($connection);
}
// Close the database connection
mysqli_close($connection);
}
?> |
#!/bin/sh
if [ -z "$OP_PASSWORD" ]; then
OP_PASSWORD=$(date +%s | sha256sum | base64 | head -c 8 ; echo)
fi
sleep 2
if [ -z "$QUERY_PASSWORD" ]; then
QUERY_PASSWORD=$(date +%s | sha256sum | base64 | head -c 8 ; echo)
fi
sleep 2
if [ -z "$SPEC_PASSWORD" ]; then
SPEC_PASSWORD=$(date +%s | sha256sum | base64 | head -c 8 ; echo)
fi
sleep 2
if [ -z "$SPEC_OP_PASSWORD" ]; then
SPEC_OP_PASSWORD=$(date +%s | sha256sum | base64 | head -c 8 ; echo)
fi
sed -i "s|op_password=pass4word|op_password=$OP_PASSWORD |g" /opt/tetrinetx/bin/game.secure
sed -i "s|query_password=pass4word|query_password=$QUERY_PASSWORD |g" /opt/tetrinetx/bin/game.secure
sed -i "s|spec_password=pass4word|spec_password=$SPEC_PASSWORD |g" /opt/tetrinetx/bin/game.secure
sed -i "s|spec_op_password=pass4word|spec_op_password=$SPEC_OP_PASSWORD |g" /opt/tetrinetx/bin/game.secure
/usr/sbin/crond
/opt/tetrinetx/bin/tetrix.linux
/usr/sbin/nginx
while true
do
sleep 300
if [ -z $(pidof tetrix.linux) ]; then
/opt/tetrinetx/bin/tetrix.linux
fi
if [ -z $(pidof nginx) ]; then
/usr/sbin/nginx
fi
if [ -z $(pidof crond) ]; then
/usr/sbin/crond
fi
done
|
import h5py
import numpy
from afqmctools.utils.io import to_qmcpack_complex
def write_qmcpack_sparse(hcore, chol, nelec, nmo, e0=0.0, filename='hamiltonian.h5',
real_chol=False, verbose=False, cutoff=1e-16, ortho=None):
with h5py.File(filename, 'w') as fh5:
fh5['Hamiltonian/Energies'] = numpy.array([e0,0])
if real_chol:
fh5['Hamiltonian/hcore'] = hcore
else:
shape = hcore.shape
hcore = hcore.astype(numpy.complex128).view(numpy.float64)
hcore = hcore.reshape(shape+(2,))
fh5['Hamiltonian/hcore'] = hcore
if ortho is not None:
fh5['Hamiltonian/X'] = ortho
# number of cholesky vectors
nchol_vecs = chol.shape[-1]
ix, vals = to_sparse(chol, cutoff=cutoff)
nnz = len(vals)
mem = (8 if real_chol else 16) * nnz / (1024.0**3)
if verbose:
print(" # Total number of non-zero elements in sparse cholesky ERI"
" tensor: %d"%nnz)
nelem = chol.shape[0]*chol.shape[1]
print(" # Sparsity of ERI Cholesky tensor: "
"%f"%(1-float(nnz)/nelem))
print(" # Total memory required for ERI tensor: %13.8e GB"%(mem))
fh5['Hamiltonian/Factorized/block_sizes'] = numpy.array([nnz])
fh5['Hamiltonian/Factorized/index_0'] = numpy.array(ix)
if real_chol:
fh5['Hamiltonian/Factorized/vals_0'] = numpy.array(vals)
else:
fh5['Hamiltonian/Factorized/vals_0'] = (
to_qmcpack_complex(numpy.array(vals, dtype=numpy.complex128))
)
# Number of integral blocks used for chunked HDF5 storage.
# Currently hardcoded for simplicity.
nint_block = 1
(nalpha, nbeta) = nelec
unused = 0
fh5['Hamiltonian/dims'] = numpy.array([unused, nnz, nint_block, nmo,
nalpha, nbeta, unused,
nchol_vecs])
fh5['Hamiltonian/ComplexIntegrals'] = numpy.array([not int(real_chol)],
dtype=numpy.int32)
# TODO: FDM This is deprecated, remove eventually.
occups = [i for i in range(0, nalpha)]
occups += [i+nmo for i in range(0, nbeta)]
fh5['Hamiltonian/occups'] = numpy.array(occups)
def write_sparse_chol_chunk(ixs, vals, chunk, filename='hamiltonian.h5', real_chol=False):
with h5py.File(filename, 'a') as fh5:
fh5['Hamiltonian/Factorized/index_{:d}'.format(chunk)] = numpy.array(ixs)
if real_chol:
fh5['Hamiltonian/Factorized/vals_{:d}'.format(chunk)] = numpy.array(chunk)
else:
fh5['Hamiltonian/Factorized/vals_{:d}'.format(chunk)] = (
to_qmcpack_complex(numpy.array(vals, dtype=numpy.complex128))
)
def write_sparse_basic(filename, hcore, e0, nelec, real_chol=False):
nalpha, nbeta = nelec
nmo = hcore.shape[-1]
with h5py.File(filename, 'w') as fh5:
fh5['Hamiltonian/Energies'] = numpy.array([e0,0])
if real_chol:
fh5['Hamiltonian/hcore'] = hcore
else:
shape = hcore.shape
hcore = hcore.astype(numpy.complex128).view(numpy.float64)
hcore = hcore.reshape(shape+(2,))
fh5['Hamiltonian/hcore'] = hcore
occups = [i for i in range(0, nalpha)]
occups += [i+nmo for i in range(0, nbeta)]
fh5['Hamiltonian/occups'] = numpy.array(occups)
def write_qmcpack_dense(hcore, chol, nelec, nmo, enuc=0.0,
filename='hamiltonian.h5', real_chol=False,
verbose=False, ortho=None):
with h5py.File(filename, 'w') as fh5:
fh5['Hamiltonian/Energies'] = numpy.array([enuc,0])
if real_chol:
fh5['Hamiltonian/hcore'] = numpy.real(hcore)
fh5['Hamiltonian/DenseFactorized/L'] = numpy.real(chol)
else:
fh5['Hamiltonian/hcore'] = to_qmcpack_complex(hcore.astype(numpy.complex128))
fh5['Hamiltonian/DenseFactorized/L'] = to_qmcpack_complex(chol.astype(numpy.complex128))
fh5['Hamiltonian/dims'] = numpy.array([0, 0, 0, nmo,
nelec[0], nelec[1], 0,
chol.shape[-1]])
fh5['Hamiltonian/ComplexIntegrals'] = numpy.array([not int(real_chol)],
dtype=numpy.int32)
if ortho is not None:
fh5['Hamiltonian/X'] = ortho
def to_sparse(vals, cutoff=1e-8):
nz = numpy.where(numpy.abs(vals) > cutoff)
ix = numpy.empty(nz[0].size+nz[1].size, dtype=numpy.int32)
ix[0::2] = nz[0]
ix[1::2] = nz[1]
vals = vals[nz]
return ix, vals
|
<gh_stars>1-10
import React from "react";
import { Container, Row, Col } from "react-bootstrap";
import myImg from "../../Assets/avatar.svg";
import Tilt from "react-parallax-tilt";
import {
AiFillGithub,
AiOutlineTwitter,
AiFillInstagram,
AiFillFacebook,
} from "react-icons/ai";
import Particle from "../Particle";
import { FaLinkedinIn } from "react-icons/fa";
function Home2() {
return (
<Container fluid className="home-about-section" id="about">
<Particle />
<Container>
<Row>
<Col md={8} className="home-about-description">
<h1 style={{ fontSize: "2.6em" }}>
LET ME <span className="purple"> INTRODUCE </span> MYSELF
</h1>
<p className="home-about-body">
I'm Programmer that turns coffee into code.😉
<br />
<br />I am fluent in classics like
<i>
<b className="purple">
{" "}
C++, Javascript, Python and MERN stack.{" "}
</b>
</i>
<br />
<br />
My field of Interest's are building new
<i>
<b className="purple">Web Technologies and Products </b> and
also in areas related to{" "}
<b className="purple">Digital Marketing.</b>
</i>
<br />
<br />
Whenever possible, I also apply my passion for developing products
with
<i>
<b className="purple"> Modern Javascript Frameworks</b>
</i>
like
<i>
<b className="purple"> Node.js and React.js</b>
</i>
</p>
</Col>
<Col md={4} className="myAvtar">
<Tilt>
<img src={myImg} className="img-fluid" alt="avatar" />
</Tilt>
</Col>
</Row>
<Row>
<Col md={12} className="home-about-social">
<h1>FIND ME ON</h1>
<p>
Feel free to <span className="purple">connect </span>with me
</p>
<ul className="home-about-social-links">
<li className="social-icons">
<a
href="https://www.linkedin.com/in/sagar-mude"
target="_blank"
rel="noreferrer"
className="icon-colour home-social-icons"
>
<FaLinkedinIn />
</a>
</li>
<li className="social-icons">
<a
href="https://github.com/sagarmude7"
target="_blank"
rel="noreferrer"
className="icon-colour home-social-icons"
>
<AiFillGithub />
</a>
</li>
<li className="social-icons">
<a
href="https://www.facebook.com/sagar.mude.39"
target="_blank"
rel="noreferrer"
className="icon-colour home-social-icons"
>
<AiFillFacebook />
</a>
</li>
<li className="social-icons">
<a
href="https://www.instagram.com/sagarmude7/"
target="_blank"
rel="noreferrer"
className="icon-colour home-social-icons"
>
<AiFillInstagram />
</a>
</li>
<li className="social-icons">
<a
href="https://twitter.com/SAGARMUDE1"
target="_blank"
rel="noreferrer"
className="icon-colour home-social-icons"
>
<AiOutlineTwitter />
</a>
</li>
</ul>
</Col>
</Row>
</Container>
</Container>
);
}
export default Home2;
|
#!/usr/bin/env bash
if [ -z "$(pidof -x dbus-daemon)" ]; then
sudo mkdir -p /var/run/dbus
sudo rm -f /var/run/dbus/pid
sudo dbus-daemon --system;
fi
if [ -z "$(pidof -x Xvfb)" ]; then
export DISPLAY=:99
sudo rm -f /tmp/.X99-lock
sudo -b Xvfb $DISPLAY -screen 0 1920x1080x24 -noreset +extension RANDR
fi
"$@"
|
#!/usr/bin/env bash
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Echo every command being executed
set -x
# Exit the script on any command with non 0 return code
set -e
# Use the top level empty release file as a RELEASE flag.
RELEASE=false
if [[ -f "release" ]]; then
$RELEASE=true
fi
DIR=$1
if [[ $RELEASE = true ]]; then
# Release flow: Only run e2e release test, ignore unit tests in all other
# directories.
if [[ $DIR = "e2e" ]]; then
gcloud build submit . --config=$DIR/cloudbuild-release.yml \
--substitutions _RELEASE=$RELEASE
fi
else
# Regular flow: Only run changed packages plus e2e regular test.
# Nightly flow: Run everything.
if [[ -f "$DIR/run-ci" || $DIR == "e2e" || $NIGHTLY = true ]]; then
gcloud builds submit . --config=$DIR/cloudbuild.yml \
--substitutions _NIGHTLY=$NIGHTLY
fi
fi
|
import json
from naas.client import Client
class EmailNotifications:
@staticmethod
def list(params=None):
"""
Retrieve the list of email notifications
:param params: dict
:return: Response
"""
if params is None:
params = {}
rel = Client.rel_for('rels/email-notifications')
route = Client.routes().route_for(rel)
url = route.url_for(params)
request = Client.get(url)
return request
@staticmethod
def retrieve(_id, params=None):
"""
Retrieve the instance of a email notification
:param _id: int
:param params: dict
:return: Response
"""
if params is None:
params = {}
rel = Client.rel_for('rels/email-notification')
route = Client.routes().route_for(rel)
url = route.url_for(args={**params, **{'id': _id}})
request = Client.get(url)
return request
@staticmethod
def update(_id, params=None):
"""
Update an existing record
:param _id: int
:param params: dict
:return: Response
"""
if params is None:
params = {}
request_body = {
"email_notification": params
}
request_headers = {
"Content-Type": "application/json"
}
rel = Client.rel_for('rels/email-notification')
route = Client.routes().route_for(rel)
url = route.url_for(args={**params, **{'id': _id}})
request = Client.put(
url, headers=request_headers, data=json.dumps(request_body))
return request
@staticmethod
def create(params=None):
"""
Create a new record
:param params: dict
:return: Response
"""
if params is None:
params = {}
request_body = {
"email_notification": params
}
request_headers = {
"Content-Type": "application/json"
}
rel = Client.rel_for('rels/email-notifications')
route = Client.routes().route_for(rel)
url = route.url_for()
request = Client.post(
url, headers=request_headers, data=json.dumps(request_body))
return request
@staticmethod
def deliver(_id, params=None):
"""
Deliver the instance of a email notification
:param _id: int
:param params: dict
:return: Response
"""
if params is None:
params = {}
rel = Client.rel_for('rels/email-notification-deliver')
route = Client.routes().route_for(rel)
url = route.url_for(args={**params, **{'id': _id}})
request = Client.post(url)
return request
@staticmethod
def preview_html(_id, params=None):
"""
Preview the instance of a email notification
:param _id: int
:param params: dict
:return: Response
"""
if params is None:
params = {}
rel = Client.rel_for('rels/email-notification-preview')
route = Client.routes().route_for(rel)
url = route.url_for(args={**params, **{'id': _id}})
request = Client.get(url)
return request
|
import socket
def resolve_domain(domain):
try:
addr = socket.gethostbyname(domain) # Resolve the address by DNS
return addr
except socket.gaierror: # Raise when the domain name not found
return None |
require File.expand_path('../../../../spec_helper', __FILE__)
require File.expand_path('../../fixtures/classes', __FILE__)
describe "Socket::Option.new" do
it "should accept integers" do
so = Socket::Option.new(Socket::AF_INET, Socket::SOL_SOCKET, Socket::SO_KEEPALIVE, [0].pack('i'))
so.family.should == Socket::AF_INET
so.level.should == Socket::SOL_SOCKET
so.optname.should == Socket::SO_KEEPALIVE
end
it "should accept symbols" do
so = Socket::Option.new(:AF_INET, :SOL_SOCKET, :SO_KEEPALIVE, [0].pack('i'))
so.family.should == Socket::AF_INET
so.level.should == Socket::SOL_SOCKET
so.optname.should == Socket::SO_KEEPALIVE
so = Socket::Option.new(:INET, :SOCKET, :KEEPALIVE, [0].pack('i'))
so.family.should == Socket::AF_INET
so.level.should == Socket::SOL_SOCKET
so.optname.should == Socket::SO_KEEPALIVE
end
it "should raise error on unknown family" do
lambda { Socket::Option.new(:INET4, :SOCKET, :KEEPALIVE, [0].pack('i')) }.should raise_error(SocketError)
end
it "should raise error on unknown level" do
lambda { Socket::Option.new(:INET, :ROCKET, :KEEPALIVE, [0].pack('i')) }.should raise_error(SocketError)
end
it "should raise error on unknown option name" do
lambda { Socket::Option.new(:INET, :SOCKET, :ALIVE, [0].pack('i')) }.should raise_error(SocketError)
end
end
|
#!/bin/bash
set -e
echo "Add GOPATH and GOBIN"
sudo touch /etc/profile.d/aispaths.sh
sudo sh -c "echo export PATH=$PATH:/usr/local/go/bin > /etc/profile.d/aispaths.sh"
sudo sh -c "echo export GOBIN=$HOME/ais/bin >> /etc/profile.d/aispaths.sh"
sudo sh -c "echo export GOPATH=$HOME/ais/ >> /etc/profile.d/aispaths.sh"
sudo sh -c "echo export AISSRC=$HOME/ais/src/github.com/NVIDIA/aistore/ais >> /etc/profile.d/aispaths.sh"
sudo sh -c "echo export AISTORE_SRC=$HOME/ais/src/github.com/NVIDIA/aistore >> /etc/profile.d/aispaths.sh"
sudo chmod 777 /etc/profile.d/aispaths.sh
. /etc/profile.d/aispaths.sh
rm -rf ~/ais || true
mkdir -p ~/ais/{bin,pkg,src}
GOLANG_VERSION="go1.13"
CURRENT_GOLANG_VERSION=$(cat /usr/local/go/VERSION)
if [[ ${CURRENT_GOLANG_VERSION} != ${GOLANG_VERSION} ]]; then
echo "Current Golang version does not match with expected, so updating Golang to " ${GOLANG_VERSION}
sudo rm -rf /usr/local/go
sudo rm -rf /use/bin/go
echo "Downloading Go..."
curl -LO https://storage.googleapis.com/golang/${GOLANG_VERSION}.linux-amd64.tar.gz
shasum -a 256 go1.*
sudo tar -C /usr/local -xvzf ${GOLANG_VERSION}.linux-amd64.tar.gz > /dev/null
sudo ln -s /usr/local/go/bin/go /usr/bin/go
rm -rf ${GOLANG_VERSION}.linux-amd64.tar.gz
fi
echo "Getting AIStore source..."
go get -v github.com/NVIDIA/aistore
echo "Cloud provider set to: ${AIS_CLD_PROVIDER}"
cd ${AISTORE_SRC} && make node
|
package com.waflo.cooltimediaplattform.backend.jparepository;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.stereotype.Component;
import org.springframework.stereotype.Service;
import javax.persistence.EntityManager;
import javax.persistence.criteria.Predicate;
import javax.transaction.Transactional;
import javax.xml.crypto.Data;
import java.lang.reflect.ParameterizedType;
import java.util.List;
import java.util.Optional;
@Transactional
public class JpaRepository<T> implements IRepository<T,Long>{
private EntityManager em;//= DataAccessor.getInstance().factory.createEntityManager();
private final Class<T> persistentClass;
public JpaRepository(EntityManager em, Class<T> type){
this.em=em;
this.persistentClass = type;
}
@Override
public List<T> findAll() {
var criteriaQuery=em.getCriteriaBuilder().createQuery(persistentClass);
criteriaQuery.select(criteriaQuery.from(persistentClass));
return em.createQuery(criteriaQuery).getResultList();
}
@Override
public Optional<T> findById(Long id) {
return Optional.of(em.find(persistentClass, id));
}
@Override
@Transactional
public T save(T instance) {
em.persist(instance);
return instance;
}
@Transactional
@Override
public T update(T newObj){
return em.merge(newObj);
}
@Override
@Transactional
public void delete(T obj) {
em.remove(em.contains(obj)?em:em.merge(obj));
}
@Transactional
public void delete(long id){
var critBuilder=em.getCriteriaBuilder();
var deleteCriteria=critBuilder.createCriteriaDelete(persistentClass);
em.createQuery(deleteCriteria.where(critBuilder.equal(deleteCriteria.from(persistentClass).get("Id"), id))).executeUpdate();
}
}
|
import { Router, NavigationEnd } from '@angular/router';
import { Component, ViewEncapsulation, ViewChild, ElementRef, OnInit, AfterContentInit, ApplicationRef, NgZone } from '@angular/core';
import { Platform, MenuController } from 'ionic-angular';
import { StatusBar, Splashscreen } from 'ionic-native';
@Component({
templateUrl: 'app.html'
})
export class MyApp {
constructor(
public platform: Platform,
public menu: MenuController,
public application: ApplicationRef,
public router: Router,
private zone: NgZone
) {
this.initializeApp();
}
initializeApp() {
this.platform.ready().then(() => {
// Okay, so the platform is ready and our plugins are available.
// Here you can do any higher level native things you might need.
StatusBar.styleDefault();
Splashscreen.hide();
this.platform.resize.asObservable().subscribe((event) => {
this.zone.run(() => {
this.application.tick();
});
});
this.router.events.map(event => event instanceof NavigationEnd).subscribe(() => this.menu.close());
});
}
closeMenu() {
// close the menu when clicking a link from the menu
//this.menu.close();
// navigate to the new page if it is not the current page
//this.nav.setRoot(page.component);
}
}
|
def count_course_frequency(courses):
course_frequency = {}
for course in courses:
course = course.lower() # Convert the course name to lowercase
if course in course_frequency:
course_frequency[course] += 1
else:
course_frequency[course] = 1
return course_frequency |
const { createLambda } = require('./middlewares')
const { findOrders } = require('./orders-logic')
const handler = createLambda(async (event) => {
const orders = await findOrders(event.body.filters)
return { orders }
})
module.exports = { handler }
|
export const CharactersData: {
name: string;
friends: string[];
homeWorld?: string;
species: string;
}[] = [
{
name: "A",
friends: ["B", "D"],
homeWorld: "Planet A",
species: "Species A",
},
{
name: "B",
friends: ["A"],
homeWorld: "Planet A",
species: "Species B",
},
{
name: "C",
friends: [],
species: "Species B",
},
{
name: "D",
friends: ["A"],
homeWorld: "Planet B",
species: "Species A",
},
];
export const PlanetsData = [
{
name: "Planet A",
climate: "Tropical",
neighbors: ["Planet B"],
},
{
name: "Planet B",
climate: "Polar Chill",
neighbors: ["Planet A"],
},
{
name: "Planet C",
climate: "Arid",
neighbors: [],
},
];
export const SpeciesData = [
{
name: "Species A",
lifespan: 100,
origin: "Planet A",
},
{
name: "Species B",
lifespan: 80,
origin: "Planet C",
},
];
|
#!/bin/sh
export PATH=/usr/local/bin:$PATH
# source the common platform independent functionality and option parsing
script_location=$(cd "$(dirname "$0")"; pwd)
. ${script_location}/common_test.sh
retval=0
#cvmfs_unittests --gtest_shuffle \
# --gtest_death_test_use_fork || retval=1
cd ${SOURCE_DIRECTORY}/test
# everything will be placed in the home folder
echo "running CernVM-FS client test cases..."
CVMFS_TEST_CLASS_NAME=ClientIntegrationTests
./run.sh "$CLIENT_TEST_LOGFILE" -o ${CLIENT_TEST_LOGFILE}${XUNIT_OUTPUT_SUFFIX} \
-x src/004-davinci \
src/005-asetup \
src/006-buildkernel \
src/007-testjobs \
src/008-default_domain \
src/016-dnsunreachable \
src/017-dnstimeout \
src/024-reload-during-asetup \
src/039-reloadalarm \
src/040-aliencache \
src/045-oasis \
src/052-roundrobindns \
src/055-ownership \
src/056-lowspeedlimit \
src/057-parallelmakecache \
src/061-systemdnokill \
-- \
src/0*
retval=$?
exit $retval
|
#!/usr/bin/env bash
set -ex
TUNNEL_NAME=${TUNNEL_NAME:-tun2}
IP_ADDRESS=10.9.0.1/24
sudo ip tuntap del "${TUNNEL_NAME}" mode tun
sudo ip tuntap add "${TUNNEL_NAME}" mode tun
sudo ip link set "${TUNNEL_NAME}" up
sudo ip addr add ${IP_ADDRESS} dev "${TUNNEL_NAME}"
echo "Created tunnel ${TUNNEL_NAME} with ip address ${IP_ADDRESS}"
sudo ip route add 10.10.0.0/24 via 10.9.0.1
echo "Created route to 10.10.0.0/24 network via ${TUNNEL_NAME}"
|
<reponame>mponizil/apify-js
import fs from 'fs';
import fsExtra from 'fs-extra';
import path from 'path';
import { checkParamOrThrow } from 'apify-client/build/utils';
import LruCache from 'apify-shared/lru_cache';
import ListDictionary from 'apify-shared/list_dictionary';
import { ENV_VARS, LOCAL_STORAGE_SUBDIRS } from 'apify-shared/consts';
import { delayPromise, checkParamPrototypeOrThrow } from 'apify-shared/utilities';
import Promise from 'bluebird';
import crypto from 'crypto';
import Request from './request';
import { ensureDirExists, apifyClient, openRemoteStorage, openLocalStorage, ensureTokenOrLocalStorageEnvExists } from './utils';
export const LOCAL_STORAGE_SUBDIR = LOCAL_STORAGE_SUBDIRS.requestQueues;
const MAX_OPENED_QUEUES = 1000;
const MAX_CACHED_REQUESTS = 1000 * 1000;
// When requesting queue head we always fetch requestsInProgressCount * QUERY_HEAD_BUFFER number of requests.
export const QUERY_HEAD_MIN_LENGTH = 100;
export const QUERY_HEAD_BUFFER = 3;
// If queue was modified (request added/updated/deleted) before more than API_PROCESSED_REQUESTS_DELAY_MILLIS
// then we get head query to be consistent.
export const API_PROCESSED_REQUESTS_DELAY_MILLIS = 10 * 1000;
// How many times we try to get queue head with queueModifiedAt older than API_PROCESSED_REQUESTS_DELAY_MILLIS.
export const MAX_QUERIES_FOR_CONSISTENCY = 6;
const writeFilePromised = Promise.promisify(fs.writeFile);
const readdirPromised = Promise.promisify(fs.readdir);
const readFilePromised = Promise.promisify(fs.readFile);
const renamePromised = Promise.promisify(fs.rename);
const emptyDirPromised = Promise.promisify(fsExtra.emptyDir);
const { requestQueues } = apifyClient;
const queuesCache = new LruCache({ maxLength: MAX_OPENED_QUEUES }); // Open queues are stored here.
/**
* Helper function to validate params of *.addRequest().
* @ignore
*/
const validateAddRequestParams = (request, opts) => {
checkParamOrThrow(request, 'request', 'Object');
if (!(request instanceof Request)) {
request = new Request(request);
}
checkParamOrThrow(opts, 'opts', 'Object');
const { forefront = false } = opts;
checkParamOrThrow(forefront, 'opts.forefront', 'Boolean');
if (request.id) throw new Error('Request has already "id" so it cannot be added to the queue!');
return { forefront, request };
};
/**
* Helper function to validate params of *.getRequest().
* @ignore
*/
const validateGetRequestParams = (requestId) => {
checkParamOrThrow(requestId, 'requestId', 'String');
};
/**
* Helper function to validate params of *.markRequestHandled().
* @ignore
*/
const validateMarkRequestHandledParams = (request) => {
checkParamPrototypeOrThrow(request, 'request', Request, 'Apify.Request');
checkParamOrThrow(request.id, 'request.id', 'String');
};
/**
* Helper function to validate params of *.reclaimRequest().
* @ignore
*/
const validateReclaimRequestParams = (request, opts) => {
checkParamPrototypeOrThrow(request, 'request', Request, 'Apify.Request');
checkParamOrThrow(request.id, 'request.id', 'String');
checkParamOrThrow(opts, 'opts', 'Object');
const { forefront = false } = opts;
checkParamOrThrow(forefront, 'opts.forefront', 'Boolean');
return { forefront };
};
/**
* Helper function that creates ID from uniqueKey for local emulation of request queue.
* It's also used for local cache of remote request queue.
*
* This function may not exactly match how requestId is created server side.
* So we never pass requestId created by this to server and use it only for local cache.
*
* @ignore
*/
const getRequestId = (uniqueKey) => {
checkParamOrThrow(uniqueKey, 'uniqueKey', 'String');
const str = crypto
.createHash('sha256')
.update(uniqueKey)
.digest('base64')
.replace(/(\+|\/|=)/g, '');
return str.length > 15
? str.substr(0, 15)
: str;
};
/**
* A helper class that is used to report results from various
* [`RequestQueue`](../api/requestqueue) functions as well as
* [`Apify.utils.puppeteer.enqueueLinks()`](../api/puppeteer#puppeteer.enqueueLinks).
*
* @typedef {Object} QueueOperationInfo
* @property {Boolean} wasAlreadyPresent Indicates if request was already present in the queue.
* @property {Boolean} wasAlreadyHandled Indicates if request was already marked as handled.
* @property {String} requestId The ID of the added request
* @property {Request} request The original `Request` object passed to the `RequestQueue` function.
*/
/**
* Represents a queue of URLs to crawl, which is used for deep crawling of websites
* where you start with several URLs and then recursively
* follow links to other pages. The data structure supports both breadth-first and depth-first crawling orders.
*
* Each URL is represented using an instance of the {@link Request} class.
* The queue can only contain unique URLs. More precisely, it can only contain {@link Request} instances
* with distinct `uniqueKey` properties. By default, `uniqueKey` is generated from the URL, but it can also be overridden.
* To add a single URL multiple times to the queue,
* corresponding {@link Request} objects will need to have different `uniqueKey` properties.
*
* Do not instantiate this class directly, use the
* [`Apify.openRequestQueue()`](apify#module_Apify.openRequestQueue) function instead.
*
* `RequestQueue` is used by {@link BasicCrawler}, {@link CheerioCrawler}
* and {@link PuppeteerCrawler} as a source of URLs to crawl.
* Unlike {@link RequestList}, `RequestQueue` supports dynamic adding and removing of requests.
* On the other hand, the queue is not optimized for operations that add or remove a large number of URLs in a batch.
*
* `RequestQueue` stores its data either on local disk or in the Apify Cloud,
* depending on whether the `APIFY_LOCAL_STORAGE_DIR` or `APIFY_TOKEN` environment variable is set.
*
* If the `APIFY_LOCAL_STORAGE_DIR` environment variable is set, the queue data is stored in
* that local directory as follows:
* ```
* {APIFY_LOCAL_STORAGE_DIR}/request_queues/{QUEUE_ID}/{STATE}/{NUMBER}.json
* ```
* Note that `{QUEUE_ID}` is the name or ID of the request queue. The default queue has ID: `default`,
* unless you override it by setting the `APIFY_DEFAULT_REQUEST_QUEUE_ID` environment variable.
* Each request in the queue is stored as a separate JSON file, where `{STATE}` is either `handled` or `pending`,
* and `{NUMBER}` is an integer indicating the position of the request in the queue.
*
* If the `APIFY_TOKEN` environment variable is set but `APIFY_LOCAL_STORAGE_DIR` not, the data is stored in the
* <a href="https://www.apify.com/docs/storage#queue" target="_blank">Apify Request Queue</a>
* cloud storage. Note that you can force usage of the cloud storage also by passing the `forceCloud`
* option to [`Apify.openRequestQueue()`](apify#module_Apify.openRequestQueue) function,
* even if the `APIFY_LOCAL_STORAGE_DIR` variable is set.
*
* **Example usage:**
*
* ```javascript
* // Open the default request queue associated with the actor run
* const queue = await Apify.openRequestQueue();
*
* // Open a named request queue
* const queueWithName = await Apify.openRequestQueue('some-name');
*
* // Enqueue few requests
* await queue.addRequest(new Apify.Request({ url: 'http://example.com/aaa' }));
* await queue.addRequest(new Apify.Request({ url: 'http://example.com/bbb' }));
* await queue.addRequest(new Apify.Request({ url: 'http://example.com/foo/bar' }), { forefront: true });
*
* // Get requests from queue
* const request1 = await queue.fetchNextRequest();
* const request2 = await queue.fetchNextRequest();
* const request3 = await queue.fetchNextRequest();
*
* // Mark a request as handled
* await queue.markRequestHandled(request1);
*
* // If processing of a request fails then reclaim it back to the queue, so that it's crawled again
* await queue.reclaimRequest(request2);
* ```
* @hideconstructor
*/
export class RequestQueue {
constructor(queueId, queueName) {
checkParamOrThrow(queueId, 'queueId', 'String');
checkParamOrThrow(queueName, 'queueName', 'Maybe String');
this.queueId = queueId;
this.queueName = queueName;
this.queueHeadDict = new ListDictionary();
this.requestIdsInProgress = {};
this.inProgressCount = 0;
this.queryQueueHeadPromise = null;
// Caching requests to avoid duplicite addRequest() calls.
// Key is computed using getRequestId() and value is { id, isHandled }.
this.requestsCache = new LruCache({ maxLength: MAX_CACHED_REQUESTS });
// This contains false if we were not able to get queue head with queueModifiedAt older than
// at least API_PROCESSED_REQUESTS_DELAY_MILLIS.
this.isHeadConsistent = true;
}
/**
* Adds a request to the queue.
*
* If a request with the same `uniqueKey` property is already present in the queue,
* it will not be updated. You can find out whether this happened from the resulting
* {@link QueueOperationInfo} object.
*
* @param {Request|Object} request {@link Request} object, or an object to construct a `Request` instance from.
* @param {Object} [options]
* @param {Boolean} [options.forefront=false] If `true`, the request will be added to the foremost position in the queue.
* @return {QueueOperationInfo}
*/
addRequest(request, options = {}) {
const { forefront, request: newRequest } = validateAddRequestParams(request, options);
if (newRequest) {
request = newRequest;
}
const cacheKey = getRequestId(request.uniqueKey);
const cachedInfo = this.requestsCache.get(cacheKey);
if (cachedInfo) {
return Promise.resolve({
wasAlreadyPresent: true,
// We may assume that if request is in local cache then also the information if the
// request was already handled is there because just one client should be using one queue.
wasAlreadyHandled: cachedInfo.isHandled,
requestId: cachedInfo.id,
// TODO: Why not set request.id to cachedInfo.id???
request,
});
}
return requestQueues
.addRequest({
request,
queueId: this.queueId,
forefront,
})
.then((queueOperationInfo) => {
const { requestId, wasAlreadyHandled } = queueOperationInfo;
this._cacheRequest(cacheKey, queueOperationInfo);
if (forefront && !this.requestIdsInProgress[requestId] && !wasAlreadyHandled) {
this.queueHeadDict.add(requestId, requestId, true);
}
// TODO: Why not set request.id to cachedInfo.id???
queueOperationInfo.request = request;
return queueOperationInfo;
});
}
/**
* Gets the request from the queue specified by ID.
*
* @param {String} requestId Request ID
* @return {Promise<Request>}
*/
getRequest(requestId) {
validateGetRequestParams(requestId);
// TODO: Could we also use requestsCache here? It would be consistent with addRequest()
return requestQueues
.getRequest({
requestId,
queueId: this.queueId,
})
.then(obj => (obj ? new Request(obj) : obj));
}
/**
* Returns next request in the queue to be processed.
*
* @returns {Promise<Request>}
*/
fetchNextRequest() {
return this
._ensureHeadIsNonEmpty()
.then(() => {
const nextId = this.queueHeadDict.removeFirst();
// We are likely done at this point.
if (!nextId) return null;
this._addToInProgress(nextId);
return this
.getRequest(nextId)
.then((request) => {
// We need to handle this situation because request may not be available
// immediately after adding to the queue.
if (!request) {
this._removeFromInProgress(nextId);
this.queueHeadDict.add(nextId, nextId, false);
}
return request;
});
});
}
/**
* Marks request handled after successful processing.
*
* @param {Request} request
* @return {Promise<QueueOperationInfo>}
*/
markRequestHandled(request) {
validateMarkRequestHandledParams(request);
if (!this.requestIdsInProgress[request.id]) {
throw new Error(`Cannot mark request ${request.id} as handled as it is not in progress!`);
}
if (!request.handledAt) request.handledAt = new Date();
return requestQueues
.updateRequest({
request,
queueId: this.queueId,
})
.then((queueOperationInfo) => {
this._removeFromInProgress(request.id);
this._cacheRequest(getRequestId(request.uniqueKey), queueOperationInfo);
queueOperationInfo.request = request;
return queueOperationInfo;
});
}
/**
* Reclaims failed request back to the queue,
* so that it can be processed later again.
*
* @param {Request} request
* @param {Object} [options]
* @param {Boolean} [options.forefront=false]
* If `true` then requests get returned to the start of the queue
* and to the back of the queue otherwise.
* @return {Promise<QueueOperationInfo>}
*/
reclaimRequest(request, options = {}) {
const { forefront } = validateReclaimRequestParams(request, options);
return requestQueues
.updateRequest({
request,
queueId: this.queueId,
forefront,
})
.then((queueOperationInfo) => {
this._removeFromInProgress(request.id);
this._cacheRequest(getRequestId(request.uniqueKey), queueOperationInfo);
if (forefront) this.queueHeadDict.add(request.id, request.id, true);
queueOperationInfo.request = request;
return queueOperationInfo;
});
}
/**
* Resolves to `true` if the next call to {@link RequestQueue#fetchNextRequest} would return `null`, otherwise it resolves to `false`.
* Note that even if the queue is empty, there might be some pending requests currently being processed.
*
* Due to the nature of distributed storage systems,
* the function might occasionally return a false negative, but it should never return a false positive!
*
* @returns {Promise<Boolean>}
*/
isEmpty() {
return this
._ensureHeadIsNonEmpty()
.then(() => this.isHeadConsistent && this.queueHeadDict.length() === 0);
}
/**
* Resolves to `true` if all requests were already handled and there are no more left.
* Due to the nature of distributed storage systems,
* the function might occasionally return a false negative, but it will never return a false positive.
*
* @returns {Promise<Boolean>}
*/
isFinished() {
return this
._ensureHeadIsNonEmpty()
.then(() => this.isHeadConsistent && this.inProgressCount === 0 && this.queueHeadDict.length() === 0);
}
/**
* Caches information about request to beware of unneeded addRequest() calls.
*
* @ignore
*/
_cacheRequest(cacheKey, queueOperationInfo) {
checkParamOrThrow(cacheKey, 'cacheKey', 'String');
checkParamOrThrow(queueOperationInfo, 'queueOperationInfo', 'Object');
checkParamOrThrow(queueOperationInfo.requestId, 'queueOperationInfo.requestId', 'String');
checkParamOrThrow(queueOperationInfo.wasAlreadyHandled, 'queueOperationInfo.wasAlreadyHandled', 'Boolean');
this.requestsCache.add(cacheKey, {
id: queueOperationInfo.requestId,
isHandled: queueOperationInfo.wasAlreadyHandled,
});
}
/**
* @ignore
*/
_addToInProgress(requestId) {
checkParamOrThrow(requestId, 'requestId', 'String');
// Is already there.
if (this.requestIdsInProgress[requestId]) return;
this.requestIdsInProgress[requestId] = requestId;
this.inProgressCount++;
}
/**
* @ignore
*/
_removeFromInProgress(requestId) {
checkParamOrThrow(requestId, 'requestId', 'String');
// Is already removed.
if (!this.requestIdsInProgress[requestId]) return;
delete this.requestIdsInProgress[requestId];
this.inProgressCount--;
}
/**
* We always request more items than is in progress to ensure that something
* falls into head.
*
* @ignore
*/
_ensureHeadIsNonEmpty(checkModifiedAt = false, limit = Math.max(this.inProgressCount * QUERY_HEAD_BUFFER, QUERY_HEAD_MIN_LENGTH), iteration = 0) {
checkParamOrThrow(checkModifiedAt, 'checkModifiedAt', 'Boolean');
checkParamOrThrow(limit, 'limit', 'Number');
checkParamOrThrow(iteration, 'iteration', 'Number');
// If is nonempty resolve immediately.
if (this.queueHeadDict.length()) return Promise.resolve();
if (!this.queryQueueHeadPromise) {
const queryStartedAt = new Date();
this.queryQueueHeadPromise = requestQueues
.getHead({
limit,
queueId: this.queueId,
})
.then(({ items, queueModifiedAt }) => {
items.forEach(({ id, uniqueKey }) => {
if (!this.requestIdsInProgress[id]) {
this.queueHeadDict.add(id, id, false);
this._cacheRequest(getRequestId(uniqueKey), { requestId: id, wasAlreadyHandled: false });
}
});
// This is needed so that the next call can request queue head again.
this.queryQueueHeadPromise = null;
return {
limitReached: items.length === limit,
prevLimit: limit,
queueModifiedAt: new Date(queueModifiedAt),
queryStartedAt,
};
});
}
return this.queryQueueHeadPromise
.then(({ queueModifiedAt, limitReached, prevLimit, queryStartedAt }) => {
this.isHeadConsistent = true;
// If queue is still empty then it's likely because some of the other calls waiting
// for this promise already consumed all the returned requests or the limit was too
// low and contained only requests in progress.
//
// If limit was not reached in the call then there are no more requests to be returned.
const shouldRepeatWithHigherLimit = !this.queueHeadDict.length() && limitReached && prevLimit < REQUEST_QUEUE_HEAD_MAX_LIMIT;
// If checkModifiedAt=true then we must ensure that queueModifiedAt is older than
// queryStartedAt for at least API_PROCESSED_REQUESTS_DELAY_MILLIS.
const shouldRepeatForConsistency = (
checkModifiedAt
&& (queryStartedAt - queueModifiedAt > API_PROCESSED_REQUESTS_DELAY_MILLIS)
&& iteration
);
if (shouldRepeatWithHigherLimit || shouldRepeatForConsistency) {
// If we are queriing for consistency then we limit the number of queries to MAX_QUERIES_FOR_CONSISTENCY.
// If this is reached then we set this.isHeadConsistent=true so that empty() and finished() returns
// maybe false negative.
if (!shouldRepeatWithHigherLimit && iteration > MAX_QUERIES_FOR_CONSISTENCY) {
this.isHeadConsistent = false;
return;
}
const nextLimit = shouldRepeatWithHigherLimit
? prevLimit * 1.5
: prevLimit;
const delayMillis = shouldRepeatForConsistency
? API_PROCESSED_REQUESTS_DELAY_MILLIS
: 0;
return delayPromise(delayMillis)
.then(() => this._ensureHeadIsNonEmpty(checkModifiedAt, nextLimit, iteration + 1));
}
});
}
/**
* Removes the queue either from the Apify Cloud storage or from the local directory,
* depending on the mode of operation.
*
* @return {Promise}
*/
delete() {
return requestQueues
.deleteQueue({
queueId: this.queueId,
})
.then(() => {
queuesCache.remove(this.queueId);
if (this.queueName) queuesCache.remove(this.queueName);
});
}
/**
* Returns the number of handled requests.
*
* @return {Promise<number>}
*/
async handledCount() {
const queueInfo = await requestQueues.getQueue({ queueId: this.queueId });
return queueInfo.handledRequestCount;
}
}
/**
* Helper function that extracts queue order number from filename.
*
* @ignore
*/
const filePathToQueueOrderNo = (filepath) => {
const int = filepath
.split(path.sep).pop() // Get filename from path
.split('.')[0]; // Remove extension
return parseInt(int, 10);
};
/**
* Local directory-based implementation of the `RequestQueue` class.
*
* @ignore
*/
export class RequestQueueLocal {
constructor(queueId, localStorageDir) {
checkParamOrThrow(queueId, 'queueId', 'String');
checkParamOrThrow(localStorageDir, 'localStorageDir', 'String');
this.queueId = queueId;
this.localStoragePath = path.resolve(path.join(localStorageDir, LOCAL_STORAGE_SUBDIR, queueId));
this.localHandledEmulationPath = path.join(this.localStoragePath, 'handled');
this.localPendingEmulationPath = path.join(this.localStoragePath, 'pending');
this.queueOrderNoCounter = 0; // Counter used in _getQueueOrderNo to ensure there won't be a collision.
this.pendingCount = 0;
this._handledCount = 0;
this.inProgressCount = 0;
this.requestIdToQueueOrderNo = {};
this.queueOrderNoInProgress = {};
this.initializationPromise = this._initialize();
}
async _initialize() {
// NOTE: This created all root dirs as necessary
await ensureDirExists(this.localHandledEmulationPath);
await ensureDirExists(this.localPendingEmulationPath);
const [handled, pending] = await Promise.all([
readdirPromised(this.localHandledEmulationPath),
readdirPromised(this.localPendingEmulationPath),
]);
this.pendingCount = pending.length;
this._handledCount = handled.length;
const handledPaths = handled.map(filename => path.join(this.localHandledEmulationPath, filename));
const pendingPaths = pending.map(filename => path.join(this.localPendingEmulationPath, filename));
const filePaths = handledPaths.concat(pendingPaths);
return Promise.mapSeries(filePaths, filepath => this._readFile(filepath));
}
async _readFile(filepath) {
const str = await readFilePromised(filepath);
const request = JSON.parse(str);
const queueOrderNo = filePathToQueueOrderNo(filepath);
this.requestIdToQueueOrderNo[request.id] = queueOrderNo;
}
_getFilePath(queueOrderNo, isHandled = false) {
const fileName = `${queueOrderNo}.json`;
const dir = isHandled
? this.localHandledEmulationPath
: this.localPendingEmulationPath;
return path.join(dir, fileName);
}
_getQueueOrderNo(forefront = false) {
const sgn = (forefront ? 1 : 2) * (10 ** 15);
const base = (10 ** (13)); // Date.now() returns int with 13 numbers.
// We always add pending count for a case that two pages are insterted at the same millisecond.
const now = Date.now() + this.queueOrderNoCounter++;
const queueOrderNo = forefront
? sgn + (base - now)
: sgn + (base + now);
return queueOrderNo;
}
_getRequestByQueueOrderNo(queueOrderNo) {
checkParamOrThrow(queueOrderNo, 'queueOrderNo', 'Number');
return readFilePromised(this._getFilePath(queueOrderNo, false))
.catch((err) => {
if (err.code !== 'ENOENT') throw err;
return readFilePromised(this._getFilePath(queueOrderNo, true));
})
.then((str) => {
if (!str) throw new Error('Request was not found in none of handled and pending directories!');
const obj = JSON.parse(str);
return new Request(obj);
});
}
addRequest(request, opts = {}) {
const { forefront, request: newRequest } = validateAddRequestParams(request, opts);
if (newRequest) {
request = newRequest;
}
return this.initializationPromise
.then(() => {
const queueOrderNo = this._getQueueOrderNo(forefront);
// Add ID as server does.
// TODO: This way of cloning doesn't preserve Dates!
const requestCopy = JSON.parse(JSON.stringify(request));
requestCopy.id = getRequestId(request.uniqueKey);
// If request already exists then don't override it!
if (this.requestIdToQueueOrderNo[requestCopy.id]) {
return this
.getRequest(requestCopy.id)
.then(existingRequest => ({
requestId: existingRequest.id,
wasAlreadyHandled: existingRequest && existingRequest.handledAt,
wasAlreadyPresent: true,
request,
}));
}
this.requestIdToQueueOrderNo[requestCopy.id] = queueOrderNo;
if (!requestCopy.handledAt) this.pendingCount++;
const filePath = this._getFilePath(queueOrderNo, !!requestCopy.handledAt);
return writeFilePromised(filePath, JSON.stringify(requestCopy, null, 4))
.then(() => ({
requestId: requestCopy.id,
wasAlreadyHandled: false,
wasAlreadyPresent: false,
request,
}));
});
}
getRequest(requestId) {
validateGetRequestParams(requestId);
return this.initializationPromise
.then(() => {
const queueOrderNo = this.requestIdToQueueOrderNo[requestId];
return this._getRequestByQueueOrderNo(queueOrderNo);
});
}
async fetchNextRequest() {
await this.initializationPromise;
const files = await readdirPromised(this.localPendingEmulationPath);
let request = null;
while (!request && files.length) {
const filename = files.shift();
const queueOrderNo = filePathToQueueOrderNo(filename);
if (this.queueOrderNoInProgress[queueOrderNo]) continue; // eslint-disable-line
this.queueOrderNoInProgress[queueOrderNo] = true;
this.inProgressCount++;
// TODO: There must be a better way. This try/catch is here because there is a race condition between
// between this and call to reclaimRequest() or markRequestHandled() that may move/rename/deleted
// the file between readdirPromised() and this function.
// Ie. the file gets listed in readdirPromised() but removed from this.queueOrderNoInProgres
// meanwhile causing this to fail.
try {
request = await this._getRequestByQueueOrderNo(queueOrderNo);
} catch (err) {
delete this.queueOrderNoInProgress[queueOrderNo];
this.inProgressCount--;
if (err.code !== 'ENOENT') throw err;
}
}
return request;
}
markRequestHandled(request) {
validateMarkRequestHandledParams(request);
return this.initializationPromise
.then(() => {
const queueOrderNo = this.requestIdToQueueOrderNo[request.id];
const source = this._getFilePath(queueOrderNo, false);
const dest = this._getFilePath(queueOrderNo, true);
if (!this.queueOrderNoInProgress[queueOrderNo]) {
throw new Error(`Cannot mark request ${request.id} handled request that is not in progress!`);
}
if (!request.handledAt) request.handledAt = new Date();
// NOTE: First write to old file and then rename to new one to do the operation atomically.
// Situation where two files exists at the same time may cause race condition bugs.
return writeFilePromised(source, JSON.stringify(request, null, 4))
.then(() => renamePromised(source, dest))
.then(() => {
this.pendingCount--;
this._handledCount++;
this.inProgressCount--;
delete this.queueOrderNoInProgress[queueOrderNo];
return {
requestId: request.id,
wasAlreadyHandled: false,
wasAlreadyPresent: true,
request,
};
});
});
}
reclaimRequest(request, opts = {}) {
const { forefront } = validateReclaimRequestParams(request, opts);
return this.initializationPromise
.then(() => {
const oldQueueOrderNo = this.requestIdToQueueOrderNo[request.id];
const newQueueOrderNo = this._getQueueOrderNo(forefront);
const source = this._getFilePath(oldQueueOrderNo);
const dest = this._getFilePath(newQueueOrderNo);
if (!this.queueOrderNoInProgress[oldQueueOrderNo]) {
throw new Error(`Cannot reclaim request ${request.id} that is not in progress!`);
}
this.requestIdToQueueOrderNo[request.id] = newQueueOrderNo;
// NOTE: First write to old file and then rename to new one to do the operation atomically.
// Situation where two files exists at the same time may cause race condition bugs.
return writeFilePromised(source, JSON.stringify(request, null, 4))
.then(() => renamePromised(source, dest))
.then(() => {
this.inProgressCount--;
delete this.queueOrderNoInProgress[oldQueueOrderNo];
return {
requestId: request.id,
wasAlreadyHandled: false,
wasAlreadyPresent: true,
request,
};
});
});
}
isEmpty() {
return this.initializationPromise
.then(() => this.pendingCount === this.inProgressCount);
}
isFinished() {
return this.initializationPromise
.then(() => this.pendingCount === 0);
}
delete() {
return emptyDirPromised(this.localStoragePath)
.then(() => {
queuesCache.remove(this.queueId);
});
}
async handledCount() {
await this.initializationPromise;
return this._handledCount;
}
}
/**
* Helper function that first requests queue by ID and if queue doesn't exist then gets it by name.
*
* @ignore
*/
const getOrCreateQueue = (queueIdOrName) => {
return requestQueues.getQueue({ queueId: queueIdOrName })
.then((existingQueue) => {
if (existingQueue) return existingQueue;
return requestQueues.getOrCreateQueue({ queueName: queueIdOrName });
});
};
/**
* Opens a request queue and returns a promise resolving to an instance
* of the {@link RequestQueue} class.
*
* {@link RequestQueue} represents a queue of URLs to crawl, which is stored either on local filesystem or in the cloud.
* The queue is used for deep crawling of websites, where you start with several URLs and then
* recursively follow links to other pages. The data structure supports both breadth-first
* and depth-first crawling orders.
*
* For more details and code examples, see the {@link RequestQueue} class.
*
* @param {string} [queueIdOrName]
* ID or name of the request queue to be opened. If `null` or `undefined`,
* the function returns the default request queue associated with the actor run.
* @param {object} [options]
* @param {boolean} [options.forceCloud=false]
* If set to `true` then the function uses cloud storage usage even if the `APIFY_LOCAL_STORAGE_DIR`
* environment variable is set. This way it is possible to combine local and cloud storage.
* @returns {Promise<RequestQueue>}
* @memberof module:Apify
* @name openRequestQueue
*/
export const openRequestQueue = (queueIdOrName, options = {}) => {
checkParamOrThrow(queueIdOrName, 'queueIdOrName', 'Maybe String');
checkParamOrThrow(options, 'options', 'Object');
ensureTokenOrLocalStorageEnvExists('request queue');
const { forceCloud = false } = options;
checkParamOrThrow(forceCloud, 'options.forceCloud', 'Boolean');
return process.env[ENV_VARS.LOCAL_STORAGE_DIR] && !forceCloud
? openLocalStorage(queueIdOrName, ENV_VARS.DEFAULT_REQUEST_QUEUE_ID, RequestQueueLocal, queuesCache)
: openRemoteStorage(queueIdOrName, ENV_VARS.DEFAULT_REQUEST_QUEUE_ID, RequestQueue, queuesCache, getOrCreateQueue);
};
|
/**
* Copyright 2015-2021 <NAME> (http://vsilaev.com)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.tascalate.async.tools.instrumentation;
import java.lang.instrument.ClassFileTransformer;
import java.lang.instrument.Instrumentation;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import org.apache.commons.javaflow.instrumentation.JavaFlowClassTransformer;
import org.apache.commons.javaflow.spi.InstrumentationUtils;
import net.tascalate.instrument.agent.AbstractLambdaAwareInstrumentationAgent;
public class AsyncAwaitInstrumentationAgent extends AbstractLambdaAwareInstrumentationAgent {
private final ClassFileTransformer continuationsTransformer = new JavaFlowClassTransformer();
protected AsyncAwaitInstrumentationAgent(String arguments, Instrumentation instrumentation) {
super(arguments, instrumentation);
}
/**
* JVM hook to statically load the javaagent at startup.
*
* After the Java Virtual Machine (JVM) has initialized, the premain method
* will be called. Then the real application main method will be called.
*
* @param args arguments supplied to the agent
* @param instrumentation {@link Instrumentation} object passed by JVM
* @throws Exception thrown when agent is unable to start
*/
public static void premain(String args, Instrumentation instrumentation) throws Exception {
AsyncAwaitInstrumentationAgent agent = new AsyncAwaitInstrumentationAgent(args, instrumentation);
agent.attachDefaultLambdaInstrumentationHook();
agent.install();
}
/**
* JVM hook to dynamically load javaagent at runtime.
*
* The agent class may have an agentmain method for use when the agent is
* started after VM startup.
*
* @param args arguments supplied to the agent
* @param instrumentation {@link Instrumentation} object passed by JVM
* @throws Exception thrown when agent is unable to start
*/
public static void agentmain(String args, Instrumentation instrumentation) throws Exception {
AsyncAwaitInstrumentationAgent agent = new AsyncAwaitInstrumentationAgent(args, instrumentation);
agent.attachDefaultLambdaInstrumentationHook();
Set<String> nonRetransformPackages = new HashSet<String>(BASE_OWN_PACKAGES);
nonRetransformPackages.addAll(
InstrumentationUtils.packagePrefixesOf(InstrumentationUtils.class)
);
nonRetransformPackages.addAll(Dependencies.PACKAGES);
agent.attach(nonRetransformPackages);
}
@Override
protected Collection<ClassFileTransformer> createTransformers(boolean canRetransform) {
if (canRetransform) {
ClassFileTransformer transformer = new AsyncAwaitClassFileTransformer(continuationsTransformer, instrumentation);
return Collections.singleton(transformer);
} else {
return Collections.emptySet();
}
}
@Override
protected String readLambdaClassName(byte[] bytes) {
return InstrumentationUtils.readClassName(bytes);
}
void attachDefaultLambdaInstrumentationHook() throws Exception {
attachLambdaInstrumentationHook(createLambdaClassTransformer(continuationsTransformer));
}
}
|
#!/bin/bash
# profiles = xccdf_org.ssgproject.content_profile_ospp
. shared.sh
preauth_set=1
authfail_set=0
account_set=1
auth_files[0]="/etc/pam.d/system-auth"
auth_files[1]="/etc/pam.d/password-auth"
interval="900"
set_default_configuration
insert_or_remove_settings $preauth_set $authfail_set $account_set $interval ${auth_files[*]}
|
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
source $(dirname $0)/config-go.sh
# Go to the top of the tree.
cd "${KUBE_REPO_ROOT}"
# Check for `go` binary and set ${GOPATH}.
kube::setup_go_environment
find_test_dirs() {
cd src/${KUBE_GO_PACKAGE}
find . -not \( \
\( \
-wholename './output' \
-o -wholename './_output' \
-o -wholename './release' \
-o -wholename './target' \
-o -wholename '*/third_party/*' \
-o -wholename '*/Godeps/*' \
\) -prune \
\) -name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./||' | sort -u
}
find_test_pkgs() {
find_test_dirs | xargs -n1 printf "${KUBE_GO_PACKAGE}/%s\n"
}
# -covermode=atomic becomes default with -race in Go >=1.3
KUBE_COVER=${KUBE_COVER:--cover -covermode=atomic}
KUBE_TIMEOUT=${KUBE_TIMEOUT:--timeout 30s}
cd "${KUBE_TARGET}"
usage() {
cat << EOF
usage: $0 [OPTIONS] [TARGETS]
OPTIONS:
-i <number> : number of times to run each test, must be >= 1
EOF
}
isnum() {
[[ "$1" =~ ^[0-9]+$ ]]
}
iterations=1
while getopts "hi:" opt ; do
case $opt in
h)
usage
exit 0
;;
i)
iterations="$OPTARG"
if ! isnum "${iterations}" || [[ "${iterations}" -le 0 ]]; then
echo "$0": argument to -i must be numeric and greater than 0 >&2
usage >&2
exit 1
fi
;;
?)
usage >&2
exit 1
;;
:)
echo "Option -$OPTARG <value>" >&2
usage >&2
exit 1
;;
esac
done
shift $((OPTIND - 1))
# Use eval to preserve embedded quoted strings.
eval "goflags=(${GOFLAGS:-})"
# Filter out arguments that start with "-" and move them to goflags.
testcases=()
for arg; do
if [[ "${arg}" == -* ]]; then
goflags+=("${arg}")
else
testcases+=("${arg}")
fi
done
set -- ${testcases[@]+"${testcases[@]}"}
if [[ "${iterations}" -gt 1 ]]; then
if [[ $# -eq 0 ]]; then
set -- $(find_test_dirs)
fi
echo "Running ${iterations} times"
fails=0
for arg; do
trap 'exit 1' SIGINT
echo
pkg=${KUBE_GO_PACKAGE}/${arg}
echo "${pkg}"
# keep going, even if there are failures
pass=0
count=0
for i in $(seq 1 ${iterations}); do
if go test "${goflags[@]:+${goflags[@]}}" \
-race ${KUBE_TIMEOUT} "${pkg}"; then
pass=$((pass + 1))
else
fails=$((fails + 1))
fi
count=$((count + 1))
done 2>&1
echo "${pass}" / "${count}" passed
done
if [[ ${fails} -gt 0 ]]; then
exit 1
else
exit 0
fi
fi
if [[ -n "$1" ]]; then
covdir="/tmp/k8s_coverage/$(date "+%s")"
echo saving coverage output in "${covdir}"
for arg; do
trap 'exit 1' SIGINT
mkdir -p "${covdir}/${arg}"
pkg=${KUBE_GO_PACKAGE}/${arg}
go test "${goflags[@]:+${goflags[@]}}" \
-race \
${KUBE_TIMEOUT} \
${KUBE_COVER} -coverprofile="${covdir}/${arg}/coverage.out" \
"${pkg}"
done
exit 0
fi
find_test_pkgs | xargs go test "${goflags[@]:+${goflags[@]}}" \
-race \
${KUBE_TIMEOUT} \
${KUBE_COVER}
|
def caesar_cipher(text, key):
# Initialize the encryption string
encryption = ""
# Iterate over the plaintext
for char in text:
# Check if the character is alphabetic
if char.isalpha():
# Get the ascii code of the character
ascii_code = ord(char)
# Shift the ascii code
ascii_code += key
# Check if the new character is out of bounds
if ascii_code > ord('z'):
# Wrap around
ascii_code -= 26
# Convert the shifted ascii code to character
encryption += chr(ascii_code)
else:
# Leave the character unchanged
encryption += char
# Return the encrypted text
return encryption |
import torch
def perform_evaluation(model, eval_checkpoint_path, args, eval_dataset, eval_loader, model_save_path, device):
# Load the trained model from the checkpoint path
model.load_state_dict(torch.load(eval_checkpoint_path))
print('Model loaded from', eval_checkpoint_path)
# Evaluate the model's performance on the evaluation dataset
eval_output_path = '{}/{}.result'.format(model_save_path, str(args.eval_num_checkpoint))
evaluate(eval_dataset, eval_loader, model, device, eval_output_path)
return eval_output_path |
#!/bin/bash
# shellcheck disable=SC2155
function hassos_pre_image() {
local BOOT_DATA="$(path_boot_dir)"
local UBOOT_GXL="${BINARIES_DIR}/u-boot.gxl"
local SPL_IMG="$(path_spl_img)"
cp "${BINARIES_DIR}/boot.scr" "${BOOT_DATA}/boot.scr"
cp "${BINARIES_DIR}/meson-g12b-s922x-khadas-vim3.dtb" "${BOOT_DATA}/meson-g12b-s922x-khadas-vim3.dtb"
cp "${BOARD_DIR}/boot-env.txt" "${BOOT_DATA}/haos-config.txt"
cp "${BOARD_DIR}/cmdline.txt" "${BOOT_DATA}/cmdline.txt"
# SPL
create_spl_image
dd if="${UBOOT_GXL}" of="${SPL_IMG}" conv=notrunc bs=1 count=444
dd if="${UBOOT_GXL}" of="${SPL_IMG}" conv=notrunc bs=512 skip=1 seek=1
}
function hassos_post_image() {
convert_disk_image_xz
}
|
<reponame>vaskoz/jruby
require_relative '../stdlib/cmath'
|
<reponame>getbud/bud<filename>bud/bud.go
package bud
import (
"regexp"
"time"
"github.com/getbud/bud/recurrence"
)
// Account represents a bank account.
type Account struct {
// UUID is a unique identifier for this Account.
UUID string `json:"uuid"`
// Name is the name of this Account.
Name string `json:"name"`
// Balance is the current balance of this Account.
Balance int `json:"balance"`
}
// Category represents a grouping of Transactions of a certain kind. A Transaction may belong to
// only one Category, otherwise it would make visualisations far less useful.
type Category struct {
// UUID is a unique identifier for this Category.
UUID string `json:"uuid"`
// Name is the name of this Category.
Name string `json:"name"`
}
// PlannedTransaction represents an upcoming Transaction that has not yet occurred. It may be a
// one-off payment, or something that is recurring.
type PlannedTransaction struct {
// UUID is a unique identifier for this PlannedTransaction.
UUID string `json:"uuid"`
// AccountUUID denotes the Account that this PlannedTransaction belongs to.
AccountUUID string `json:"account_uuid"`
// CategoryUUID denotes the Category that this PlannedTransaction belongs to.
CategoryUUID string `json:"category_uuid"`
// Description is a description of this PlannedTransaction. The format should be similar to the
// Description found on a regular Transaction.
Description string `json:"description"`
// Amount is the expected amount that will be applied to the Balance of the linked Account for
// this PlannedTransaction. It may be either positive or negative.
Amount int `json:"amount"`
// Recurrence defines when this PlannedTransaction is expected to be applied. Even if the
// Transaction is only planned to happen once, this will still be set to define the expected
// date of the Transaction.
Recurrence recurrence.Rule `json:"recurrence"`
}
// Transaction represents a Transaction that has happened, i.e. it has been reflected in a
// statement that has been imported.
type Transaction struct {
// UUID is a unique identifier for this Transaction.
UUID string `json:"uuid"`
// AccountUUID denotes the Account that this Transaction belongs to.
AccountUUID string `json:"account_uuid"`
// CategoryUUID denotes the Category that this Transaction belongs to.
CategoryUUID string `json:"category_uuid"`
// PlannedTransactionUUID is optionally set if this Transaction matched a Transformation that is
// linked to a PlannedTransaction. This allows Bud to identify transactions that have or have
// not happened.
PlannedTransactionUUID string `json:"planned_transaction_uuid,omitempty"`
// TransformationUUID is optionally set if this Transaction matched a Transformation.
TransformationUUID string `json:"transformation_uuid,omitempty"`
// Description is a description of this Transaction. It may have been changed since it was
// originally imported.
Description string `json:"description"`
// Amount specifies the amount to apply to the balance of the Account linked to this
// Transaction. It may be either positive or negative.
Amount int `json:"amount"`
// TransactedAt specifies the date that this Transaction happened on.
TransactedAt time.Time `json:"transacted_at"`
}
// Transformation represents a description of how a Transaction may be transformed, given a certain
// input, e.g. a Transaction with a matching description may be changed to have a new description.
type Transformation struct {
// UUID is a unique identifier for this Transformation.
UUID string `json:"uuid"`
// Description provides a description for what Transactions this Transformation applies to.
Description string `json:"description"`
// Pattern is a regular expression pattern that is applied to the description of a Transaction.
// If the regular expression matches on a Transaction's description then the Transformation will
// be applied to it.
Pattern *regexp.Regexp `json:"pattern"`
// CategoryUUID denotes which Category will be applied to a matching Transaction.
CategoryUUID string `json:"category_uuid"`
// PlannedTransactionUUID is an optional PlannedTransaction to link this Transformation to. If
// a Transaction matches this Transformation then the PlannedTransactionUUID will be given to
// that matching Transaction, linking the two.
PlannedTransactionUUID string `json:"planned_transaction_uuid,omitempty"`
// DescriptionFormat presents a format that may be used for creating a new name for a matching
// Transaction. The format may include numbered 'variables' captured by this Transformation's
// pattern via Regular expression capture groups. For example; given the following regular
// expression: '^SUZUKI FINANCE (\d+)$', the description format could be 'Suzuki Finance: $1'.
DescriptionFormat string
}
|
<filename>cmd/server/authz.go
package main
import (
"crypto/tls"
"strings"
"github.com/pkg/errors"
)
func checkClientSNI(domain string) func(tls.ConnectionState) error {
return func(cs tls.ConnectionState) error {
if !strings.HasSuffix(cs.ServerName, domain) {
return errors.Errorf("unauthorized domain name: %s", cs.ServerName)
}
return nil
}
}
func checkPeerSAN(required string) func(tls.ConnectionState) error {
return func(cs tls.ConnectionState) error {
if len(cs.PeerCertificates) != 1 {
return errors.New("exactly one peer certificate is required")
}
found := false
for _, name := range cs.PeerCertificates[0].DNSNames {
found = found || name == required
}
if !found {
return errors.Errorf("%s must be present in SANs", required)
}
return nil
}
}
|
var classarmnn_1_1profiling_1_1_profiling_guid =
[
[ "ProfilingGuid", "classarmnn_1_1profiling_1_1_profiling_guid.xhtml#ad2ab306c078af3bc68cd7c797fe66172", null ],
[ "operator uint64_t", "classarmnn_1_1profiling_1_1_profiling_guid.xhtml#a5c63d22a5b2c943dee98c114da727d0f", null ],
[ "operator!=", "classarmnn_1_1profiling_1_1_profiling_guid.xhtml#a7846f7c842ea6cc563b5b2992cb049b8", null ],
[ "operator<", "classarmnn_1_1profiling_1_1_profiling_guid.xhtml#a468632a503f8b142365af3edc3c61fd3", null ],
[ "operator<=", "classarmnn_1_1profiling_1_1_profiling_guid.xhtml#a668c2c01dbbb6ea557d3f1ca257d0c68", null ],
[ "operator==", "classarmnn_1_1profiling_1_1_profiling_guid.xhtml#a72a432239d8751bf298be8717c684f0f", null ],
[ "operator>", "classarmnn_1_1profiling_1_1_profiling_guid.xhtml#a0d588c55cd0b79e00d0dfc552ffb9f05", null ],
[ "operator>=", "classarmnn_1_1profiling_1_1_profiling_guid.xhtml#a5e759aa4b5f950d5d5d2376c3a0529d6", null ],
[ "m_Guid", "classarmnn_1_1profiling_1_1_profiling_guid.xhtml#ab6b3b4830bf0417aa54e88663048075a", null ]
]; |
public class SecurityCode {
private int code;
public int getCode() {
return code;
}
public void setCode(int code) {
if (code > 0) {
if (code < 1000) {
this.code = code + 1000;
} else {
this.code = code;
}
} else {
// Handle invalid input, e.g., throw an exception or set a default code
// For this solution, let's set a default code of 1000
this.code = 1000;
}
}
} |
#!/bin/bash
echo $1 $2 $3
PY=python3
if [ $1 = 'c' ]; then
$PY cap_finder.py
elif [ $1 = 's' ]; then
# $PY sim.py
$PY sim_wtrace_exp.py
else
echo "Arg did not match!"
fi |
package io.cattle.platform.servicediscovery.deployment.impl;
import io.cattle.platform.core.model.Service;
import io.cattle.platform.lock.definition.AbstractMultiLockDefinition;
import io.cattle.platform.lock.definition.LockDefinition;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
public class ServicesSidekickLock extends AbstractMultiLockDefinition {
public ServicesSidekickLock(List<Service> services) {
super(getLockDefinitions(services));
}
protected static LockDefinition[] getLockDefinitions(List<Service> services) {
LockDefinition[] result = new LockDefinition[services.size()];
// sort so we don't run into situation when 2 threads try to acquire lock in diff order
Collections.sort(services, new Comparator<Service>() {
@Override
public int compare(final Service s1, final Service s2) {
return s1.getId().compareTo(s2.getId());
}
});
int i = 0;
for (Service service : services) {
result[i++] = new ServiceLock(service);
}
return result;
}
}
|
def num_combinations(elements, length):
if length == 1:
return len(elements)
elif length == 0:
return 0
else:
return len(elements) * num_combinations(elements, length - 1)
print(num_combinations(elements, length))
# Output: 9 |
package org.openapitools.client.api
import argonaut._
import argonaut.EncodeJson._
import argonaut.DecodeJson._
import org.http4s.{EntityDecoder, EntityEncoder}
import org.http4s.argonaut._
import org.joda.time.DateTime
import Order._
case class Order (
id: Option[Long],
petId: Option[Long],
quantity: Option[Integer],
shipDate: Option[DateTime],
/* Order Status */
status: Option[Status],
complete: Option[Boolean])
object Order {
import DateTimeCodecs._
sealed trait Status
case object Placed extends Status
case object Approved extends Status
case object Delivered extends Status
object Status {
def toStatus(s: String): Option[Status] = s match {
case "Placed" => Some(Placed)
case "Approved" => Some(Approved)
case "Delivered" => Some(Delivered)
case _ => None
}
def fromStatus(x: Status): String = x match {
case Placed => "Placed"
case Approved => "Approved"
case Delivered => "Delivered"
}
}
implicit val StatusEnumEncoder: EncodeJson[Status] =
EncodeJson[Status](is => StringEncodeJson(Status.fromStatus(is)))
implicit val StatusEnumDecoder: DecodeJson[Status] =
DecodeJson.optionDecoder[Status](n => n.string.flatMap(jStr => Status.toStatus(jStr)), "Status failed to de-serialize")
implicit val OrderCodecJson: CodecJson[Order] = CodecJson.derive[Order]
implicit val OrderDecoder: EntityDecoder[Order] = jsonOf[Order]
implicit val OrderEncoder: EntityEncoder[Order] = jsonEncoderOf[Order]
}
|
<filename>src/commands/settings/MaxMentionsCommand.js
const ConfigCommand = require('../ConfigCommand');
const DisableMaxMentionsCommand = require('./maxmentions/DisableMaxMentionsCommand');
const GetMaxMentionsCommand = require('./maxmentions/GetMaxMentionsCommand');
const SetMaxMentionsCommand = require('./maxmentions/SetMaxMentionsCommand');
class MaxMentionsCommand extends ConfigCommand {
static description = 'Configure how many users a user should be allowed to mention in one message';
static usage = 'get|set|disable';
static names = ['maxmentions','maximummentions'];
static userPerms = ['MANAGE_GUILD'];
static getSubCommands() {
return [
DisableMaxMentionsCommand,
GetMaxMentionsCommand,
SetMaxMentionsCommand,
];
}
}
module.exports = MaxMentionsCommand;
|
<filename>src/main/java/com/qk/carina/demo/api/GetCreatedUserMethod.java
package com.qk.carina.demo.api;
import com.qaprosoft.carina.core.foundation.api.AbstractApiMethodV2;
import com.qaprosoft.carina.core.foundation.utils.Configuration;
public class GetCreatedUserMethod extends AbstractApiMethodV2 {
public GetCreatedUserMethod(String userid) {
super(null, null);
replaceUrlPlaceholder("base_url", Configuration.getEnvArg("api_url"));
replaceUrlPlaceholder("userid", userid);
}
}
|
#!/bin/bash
# -*-mode: ksh; ksh-indent: 2; -*-
./bootstrap.sh
ERRORS=0
WARNINGS=0
./ctl.sh start
STDS="main_func.htt loop.htt file.htt big.htt textplain.htt texthtml.htt body.htt anybody.htt mix.htt PARPContentLength.htt modify.htt modify_2.htt modify_3.htt modify_4.htt chunked.htt nbytes.htt"
for E in $STDS; do
./htt.sh -se scripts/${E}
if [ $? -ne 0 ]; then
ERRORS=`expr $ERRORS + 1`
echo "FAILED $E"
fi
done
./ctl.sh stop
sleep 1
./ctl.sh start -D DisableModifyBodyHook
STDS="main_func.htt loop.htt file.htt big.htt textplain.htt texthtml.htt body.htt mix.htt PARPContentLength.htt"
for E in $STDS; do
./htt.sh -se scripts/${E}
if [ $? -ne 0 ]; then
ERRORS=`expr $ERRORS + 1`
echo "FAILED $E"
fi
done
./ctl.sh stop
sleep 1
./ctl.sh start -D noerror
sleep 1
./htt.sh -s scripts/error.htt
if [ $? -ne 0 ]; then
ERRORS=`expr $ERRORS + 1`
echo "FAILED error.htt"
fi
./ctl.sh stop
grep \\$\\$\\$ ../httpd_src/modules/parp/mod_parp.c
if [ $? -ne 1 ]; then
WARNINGS=`expr $WARNINGS + 1`
echo "WARNING: found pattern '\$\$\$'"
fi
LINES=`grep fprintf ../httpd_src/modules/parp/mod_parp.c | wc -l | awk '{print $1}'`
if [ $LINES != "0" ]; then
WARNINGS=`expr $WARNINGS + 1`
echo "WARNING: found pattern 'fprintf'"
fi
if [ `grep -c "exit signal" Server/logs/error_log` -gt 0 ]; then
WARNINGS=`expr $WARNINGS + 1`
echo "WARNING: found 'exit signal' message"
fi
if [ $WARNINGS -ne 0 ]; then
echo "ERROR: got $WARNINGS warnings"
fi
if [ $ERRORS -ne 0 ]; then
echo "ERROR: end with $ERRORS errors"
exit 1
fi
CFS=`find . -name "*core*"`
if [ "$CFS" != "" ]; then
echo "ERROR: found core file"
exit 1
fi
echo "normal end"
exit 0
|
<reponame>mindhivenz/meteor-base<gh_stars>0
import {
observable,
computed,
action,
} from 'mobx'
import { app } from '@mindhive/di'
import { SUPER_USER } from '../roles'
const VIEWER_STATE_PATH = 'viewerState'
// Expects viewer data to be auto published
export default class ViewerStore {
@observable loading = true
// Use ref because we don't update the internals, only the reference,
// and makes user pure JS (avoiding issues with Roles package thinking user.roles is not an array)
@observable.ref user = null
@observable isAuthenticatedLive = false
constructor() {
const {
Meteor,
Users,
Tracker,
storage,
} = app()
let firstRun = true
Tracker.autorun(() => {
const userId = Meteor.userId()
if (userId) {
const user = Users.findOne(userId)
if (user) {
this._updateFromServer(user)
} else if (firstRun) {
const offlineState = storage.read(VIEWER_STATE_PATH)
if (offlineState && offlineState.user && offlineState.user._id === userId) {
// Assume user hasn't changed so we can work offline, and get a head start even if online
this._updateFromOfflineState(offlineState)
}
// OK to leave all other state as-is, because initialised values are correct (we're firstRun)
} else {
this._waitingForViewerSubscription()
}
} else {
this._updateFromServer(null)
}
firstRun = false
})
}
@action _waitingForViewerSubscription() {
this.isAuthenticatedLive = false
this.loading = true
this._applyFromServer(null)
}
@action _updateFromServer(user) {
const { storage } = app()
this.isAuthenticatedLive = !! user
this._applyFromServer(user)
this.loading = false
if (user) {
const offlineState = {}
this._buildOfflineState(offlineState)
storage.write(VIEWER_STATE_PATH, offlineState)
} else {
storage.write(VIEWER_STATE_PATH, null)
}
}
_applyFromServer(user) {
this.user = user
}
@action _updateFromOfflineState(state) {
this._applyFromOfflineState(state)
this.loading = false
}
_applyFromOfflineState(state) {
this.user = state.user
}
_buildOfflineState(state) {
state.user = this.user
}
@computed get isAuthenticated() {
return !! this.user
}
hasRole(role) {
return app().Roles.userIsInRole(this.user, role)
}
@computed get isSuperUser() {
return this.hasRole(SUPER_USER)
}
}
|
package common
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestSliceContains_True(t *testing.T) {
s := []string{"resize", "start", "untag", "delete"}
result := SliceContains("delete", s)
assert.True(t, result)
}
func TestSliceContains_False(t *testing.T) {
s := []string{"resize", "start", "untag", "delete"}
result := SliceContains("foo", s)
assert.False(t, result)
}
|
package de.ids_mannheim.korap.web.filter;
import javax.ws.rs.core.SecurityContext;
import javax.ws.rs.ext.Provider;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerRequestFilter;
import com.sun.jersey.spi.container.ContainerResponseFilter;
import com.sun.jersey.spi.container.ResourceFilter;
import de.ids_mannheim.korap.authentication.http.HttpAuthorizationHandler;
import de.ids_mannheim.korap.constant.TokenType;
import de.ids_mannheim.korap.exceptions.KustvaktException;
import de.ids_mannheim.korap.security.context.KustvaktContext;
import de.ids_mannheim.korap.security.context.TokenContext;
/**
* @author hanl
* @date 08/02/2016
*/
@Provider
public class DemoFilter implements ContainerRequestFilter, ResourceFilter {
@Override
public ContainerRequest filter (ContainerRequest request) {
String authentication =
request.getHeaderValue(ContainerRequest.AUTHORIZATION);
if (authentication == null || authentication.isEmpty()) {
try {
request.getUserPrincipal();
}
catch (UnsupportedOperationException e) {
request.setSecurityContext(createContext());
}
}
return request;
}
private SecurityContext createContext () {
TokenContext context = new TokenContext();
String token = null;
try {
token = HttpAuthorizationHandler
.createBasicAuthorizationHeaderValue("demo", "demo2015");
}
catch (KustvaktException e) {
e.printStackTrace();
}
context.setToken(token);
context.setTokenType(TokenType.BASIC);
context.setUsername("demo");
return new KustvaktContext(context);
}
@Override
public ContainerRequestFilter getRequestFilter () {
return this;
}
@Override
public ContainerResponseFilter getResponseFilter () {
return null;
}
}
|
class Photo < ApplicationRecord
belongs_to :owner, class_name: 'User', inverse_of: :owned_photos
has_many :photo_in_albums, dependent: :destroy
has_many :albums, through: :photo_in_albums
has_many :comments, dependent: :destroy
validates :image, presence: true
validates :description, allow_nil: true, length: {maximum: 1_000}
mount_uploader :image, PhotoUploader
paginates_per 8
def image_url(version = :default)
return image.thumbnail.url if version == :thumbnail and image.thumbnail.file.exists?
return image.medium.url if version.in? %i[ medium thumbnail ] and image.medium.file.exists?
image.url
end
end
|
#!/bin/sh
#####################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#####################################################################
# set the parent directory as OFBiz Home
OFBIZ_HOME="$( cd -P "$( dirname "$0" )" && pwd )"/
# console log file
OFBIZ_LOG=runtime/logs/console.log
# delete the last log
rm -f $OFBIZ_LOG
# Allows to run from Jenkins. See http://wiki.jenkins-ci.org/display/JENKINS/ProcessTreeKiller. Cons: the calling Jenkins job does not terminate if the log is not enabled, pros: this allows to monitor the log in Jenkins
#BUILD_ID=dontKillMe
# JLR post Gradle comment, not sure this is still true...
echo $OFBIZ_HOME
# start ofbiz
(cd "$OFBIZ_HOME" && exec gradlew ofbiz)
|
// Trait for running commands
trait Runnable {
fn run(&self);
}
// Struct implementing the Runnable trait
struct StartCmd {
url: String,
waypoint: String,
swarm_path: String,
swarm_persona: String,
is_operator: bool,
use_upstream_url: bool,
}
impl Runnable for StartCmd {
/// Start the application.
fn run(&self) {
// Implementation for starting the application
println!("Starting the application with URL: {}, Waypoint: {}, Swarm Path: {}, Swarm Persona: {}, Operator Status: {}, Use Upstream URL: {}", self.url, self.waypoint, self.swarm_path, self.swarm_persona, self.is_operator, self.use_upstream_url);
}
}
// Function to process a transaction command and update the blockchain network
fn process_transaction_command(url: &str, waypoint: &str, swarm_path: &str, swarm_persona: &str, is_operator: bool, use_upstream_url: bool) {
// Implementation for processing the transaction command and updating the blockchain network
println!("Processing transaction command with URL: {}, Waypoint: {}, Swarm Path: {}, Swarm Persona: {}, Operator Status: {}, Use Upstream URL: {}", url, waypoint, swarm_path, swarm_persona, is_operator, use_upstream_url);
} |
#!/bin/bash
BEAKER_debug=on BEAKER_destroy=no bundle exec rake beaker 2>&1 | tee beaker.out
cat beaker.out | ./ansi2html.sh > beaker.out.html
|
<reponame>youaxa/ara-poc-open<filename>server/src/main/java/com/decathlon/ara/service/dto/problem/ProblemFilterDTO.java
package com.decathlon.ara.service.dto.problem;
import com.decathlon.ara.domain.enumeration.DefectExistence;
import com.decathlon.ara.domain.enumeration.ProblemStatusFilter;
import com.decathlon.ara.domain.filter.ProblemFilter;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import lombok.experimental.Wither;
/**
* Input of the problem filtering API.<br>
* Same as {@link ProblemFilter} but without {@code projectId}, as this field is provided in REST API URL.
*/
@Data
@NoArgsConstructor
@AllArgsConstructor
@Wither
public class ProblemFilterDTO {
private String name;
private ProblemStatusFilter status;
private Long blamedTeamId;
private String defectId;
private DefectExistence defectExistence;
private Long rootCauseId;
}
|
package com.me.keyword;
import cn.hutool.core.io.file.FileReader;
import lombok.SneakyThrows;
import java.io.IOException;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.function.Consumer;
/**
* @author zs
* @date 2021/10/30
*/
public class HotWord {
public static void main(String[] args) throws IOException {
FileReader fileReader = new FileReader("Corpus.txt");
List<String> lines = fileReader.readLines();
FileReader hotWords = new FileReader("mydic.dic");
List<String> hotWordsList = hotWords.readLines();
Set<String> hotWordSet = new HashSet<>(hotWordsList);
long startTime = System.currentTimeMillis();
lines.parallelStream().forEach(new Consumer<String>() {
@SneakyThrows
@Override
public void accept(String line) {
List<String> splits = IKwordUtil.analyze(line);
for (String ss : splits) {
if (hotWordSet.contains(ss)) {
System.out.println(line + "包含热词:" + ss);
}
}
}
});
long endTime = System.currentTimeMillis();
System.out.println(lines.size() + "条数据使用了:" + (endTime - startTime) + "ms");
}
}
|
import { ApiServiceModule } from './api-service.module';
describe('ApiServiceModule', () => {
let apiServiceModule: ApiServiceModule;
beforeEach(() => {
apiServiceModule = new ApiServiceModule();
});
it('should create an instance', () => {
expect(apiServiceModule).toBeTruthy();
});
});
|
#!/bin/bash
# shellcheck disable=SC1091
set -o errexit
set -o nounset
set -o pipefail
# set -o xtrace # Uncomment this line for debugging purpose
# Load libraries
. /opt/bitnami/scripts/libapache.sh
# Load Apache environment
. /opt/bitnami/scripts/apache-env.sh
# Ensure Apache environment variables are valid
apache_validate
# Ensure Apache daemon user exists when running as 'root'
am_i_root && ensure_user_exists "$APACHE_DAEMON_USER" --group "$APACHE_DAEMON_GROUP"
# Copy vhosts files
if ! is_dir_empty "/vhosts"; then
info "Found mounted virtual hosts in '/vhosts'. Copying them to '${APACHE_BASE_DIR}/conf/vhosts'"
cp -Lr "/vhosts/." "${APACHE_VHOSTS_DIR}"
fi
# Mount certificate files
if ! is_dir_empty "${APACHE_BASE_DIR}/certs"; then
warn "The directory '${APACHE_BASE_DIR}/certs' was externally mounted. This is a legacy configuration and will be deprecated soon. Please mount certificate files at '/certs' instead. Find an example at: https://github.com/bitnami/bitnami-docker-apache#using-custom-ssl-certificates"
warn "Restoring certificates at '${APACHE_BASE_DIR}/certs' to '${APACHE_CONF_DIR}/bitnami/certs'"
rm -rf "${APACHE_CONF_DIR}/bitnami/certs"
ln -sf "${APACHE_BASE_DIR}/certs" "${APACHE_CONF_DIR}/bitnami/certs"
elif ! is_dir_empty "/certs"; then
info "Mounting certificates files from '/certs'"
rm -rf "${APACHE_CONF_DIR}/bitnami/certs"
ln -sf "/certs" "${APACHE_CONF_DIR}/bitnami/certs"
fi
# Mount application files
if ! is_dir_empty "/app"; then
info "Mounting application files from '/app'"
rm -rf "$APACHE_HTDOCS_DIR"
ln -sf "/app" "$APACHE_HTDOCS_DIR"
fi
# Restore persisted configuration files (deprecated)
if ! is_dir_empty "/bitnami/apache/conf"; then
warn "The directory '/bitnami/apache/conf' was externally mounted. This is a legacy configuration and will be deprecated soon. Please mount certificate files at '${APACHE_CONF_DIR}' instead. Find an example at: https://github.com/bitnami/bitnami-docker-apache#full-configuration"
warn "Restoring configuration at '/bitnami/apache/conf' to '${APACHE_CONF_DIR}'"
rm -rf "$APACHE_CONF_DIR"
ln -sf "/bitnami/apache/conf" "$APACHE_CONF_DIR"
fi
# Update ports in configuration
[[ -n "$APACHE_HTTP_PORT_NUMBER" ]] && info "Configuring the HTTP port" && apache_configure_http_port "$APACHE_HTTP_PORT_NUMBER"
[[ -n "$APACHE_HTTPS_PORT_NUMBER" ]] && info "Configuring the HTTPS port" && apache_configure_https_port "$APACHE_HTTPS_PORT_NUMBER"
# Fix logging issue when running as root
! am_i_root || chmod o+w "$(readlink /dev/stdout)" "$(readlink /dev/stderr)"
|
#!/bin/bash
function docker_tag_exists() {
EXISTS=$(curl -s https://hub.docker.com/v2/repositories/$1/tags/?page_size=10000 | jq -r "[.results | .[] | .name == \"$2\"] | any")
test $EXISTS = true
}
if docker_tag_exists svenruppert/maven-3.6.0-zulu 1.8.192; then
echo skip building, image already existing - svenruppert/maven-3.6.0-zulu 1.8.192
else
echo start building the images
docker build -t svenruppert/maven-3.6.0-zulu .
docker tag svenruppert/maven-3.6.0-zulu:latest svenruppert/maven-3.6.0-zulu:1.8.192
docker push svenruppert/maven-3.6.0-zulu:1.8.192
fi
docker image rm svenruppert/maven-3.6.0-zulu:latest
docker image rm svenruppert/maven-3.6.0-zulu:1.8.192 |
function generateClientGrid($clients) {
$html = '<div class="row">';
foreach ($clients as $client) {
$html .= '<div class="col-sm-6 col-md-3 col-lg-3">';
$html .= '<div class="card">';
$html .= '<img class="card-img" src="' . $client->image_path . '" alt="Card image">';
$html .= '</div>';
$html .= '</div>';
}
$html .= '</div>';
$html .= '<div class="row">';
$html .= '<div class="d-flex justify-content-center">';
// Add pagination links here (e.g., using Laravel's pagination)
$html .= '</div>';
$html .= '</div>';
return $html;
}
// Usage
$clients = [
(object)['image_path' => 'client1.jpg'],
(object)['image_path' => 'client2.jpg'],
(object)['image_path' => 'client3.jpg'],
// ... more client objects
];
echo generateClientGrid($clients); |
function reverseWord(word) {
let reversedWord = "";
for (let i = word.length - 1; i >= 0; i--) {
reversedWord += word[i];
}
return reversedWord;
}
const result = reverseWord("word");
console.log(result); |
<reponame>prajnakurkal/Sound-Effect-Piano
import java.awt.*;
import javax.swing.*;
import java.awt.event.*;
class LayeredPaneExample extends JFrame implements ActionListener
{
JLayeredPane pane;
JButton [] white = new JButton [8];
JButton [] black = new JButton [5];
String [] whiteSounds = {"drum_roll_rimshot.wav", "hit_with_frying_pan_y.wav", "glass_shatter2.wav", "fart_z.wav",
"gasp_ohhh.wav", "fanfare_x.wav","honk_x.wav", "scream_x.wav"};
String [] blackSounds = {"hammer_anvil3.wav", "doorbell_x.wav","quick_fart_x.wav", "gun_44mag_11.wav", "gurgle_x.wav"};
public LayeredPaneExample()
{
super("LayeredPane Example");
setSize(1600, 800);
pane = getLayeredPane();
setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
setResizable(false);
whiteKeys();
blackKeys();
}
public void whiteKeys()
{
for(int i = 0; i < 1600; i += 200)
{
JButton whiteKey = new JButton();
whiteKey.setBackground(Color.white);
whiteKey.setBounds(i, 0, 200, 1600);
whiteKey.addActionListener(this);
white[i/200] = whiteKey;
pane.add(whiteKey, new Integer(0));
}
}
public void blackKeys()
{
for(int i = 150; i < 1400; i += 200)
{
JButton blackKey = new JButton();
blackKey.setBackground(Color.black);
blackKey.addActionListener(this);
if(i != 550 && i != 1350)
{
blackKey.setBounds(i, 0, 100, 350);
black[i/280] = blackKey;
pane.add(blackKey, new Integer(1));
}
}
}
public void actionPerformed(ActionEvent e)
{
for(int i = 0; i < 8; i++)
{
if(e.getSource() == white[i])
StdAudio.play(whiteSounds[i]);
}
for(int i = 0; i < 5; i++)
{
if(e.getSource() == black[i])
StdAudio.play(blackSounds[i]);
}
}
public static void main(String[] args)
{
LayeredPaneExample panel = new LayeredPaneExample();
panel.setVisible(true);
}
} |
import { Injectable , OnInit } from '@angular/core';
import { CanActivate, ActivatedRouteSnapshot, RouterStateSnapshot } from '@angular/router';
import { Router } from "@angular/router";
import { SesssionStorageService } from '../service/storage'
@Injectable({providedIn : 'root'})
export class RouteguardService implements CanActivate ,OnInit{
constructor(
private router: Router ,
private sgo : SesssionStorageService,
) { };
ngOnInit(){
}
canActivate(route: ActivatedRouteSnapshot, state: RouterStateSnapshot): boolean{
let userInfo = this.sgo.get("loginInfo") ;
if(userInfo){
return true ;
}else{
this.router.navigate(['/passport/login'])
return true ;
};
};
}
|
#!/usr/bin/env bash
#
# Generate AOSP compatible vendor data for provided device & buildID
#
set -e # fail on unhandled error
set -u # fail on undefined variable
#set -x # debug
readonly SCRIPTS_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Helper script to download Nexus factory images from web
readonly DOWNLOAD_SCRIPT="$SCRIPTS_ROOT/scripts/download-nexus-image.sh"
# Helper script to extract system & vendor images data
readonly EXTRACT_SCRIPT="$SCRIPTS_ROOT/scripts/extract-factory-images.sh"
# Helper script to generate "proprietary-blobs.txt" file
readonly GEN_BLOBS_LIST_SCRIPT="$SCRIPTS_ROOT/scripts/gen-prop-blobs-list.sh"
# Helper script to de-optimize bytecode prebuilts
readonly REPAIR_SCRIPT="$SCRIPTS_ROOT/scripts/system-img-repair.sh"
# Helper script to generate vendor AOSP includes & makefiles
readonly VGEN_SCRIPT="$SCRIPTS_ROOT/scripts/generate-vendor.sh"
# Change this if you don't want to apply used Java version system-wide
readonly LC_J_HOME="/usr/local/java/jdk1.8.0_71/bin/java"
declare -a sysTools=("mkdir" "readlink" "dirname")
declare -a availDevices=("bullhead" "flounder" "angler")
abort() {
exit $1
}
usage() {
cat <<_EOF
Usage: $(basename $0) [options]
OPTIONS:
-d|--device : Device codename (angler, bullhead, etc.)
-a|--alias : Device alias (e.g. flounder volantis (WiFi) vs volantisg (LTE))
-b|--buildID : BuildID string (e.g. MMB29P)
-o|--output : Path to save generated vendor data
-i|--imgs-tar : Read factory tar from file instead of downloading (optional)
-k|--keep : Keep all factory images extracted & de-optimized data (optional)
_EOF
abort 1
}
command_exists() {
type "$1" &> /dev/null
}
run_as_root() {
if [[ $EUID -ne 0 ]]; then
echo "[-] Script must run as root"
abort 1
fi
}
# Check that system tools exist
for i in "${sysTools[@]}"
do
if ! command_exists $i; then
echo "[-] '$i' command not found"
abort 1
fi
done
# Resolve Java location
readonly JAVALINK=$(which java)
if [[ "$JAVALINK" == "" ]]; then
echo "[!] Java binary not found in path, using hardcoded path"
if [ ! -f $LC_J_HOME ]; then
echo "[-] '$LC_J_HOME' not found in system"
abort 1
fi
export JAVA_HOME=$LC_J_HOME
export PATH=$(dirname $LC_J_HOME):$PATH
else
readonly JAVAPATH=$(readlink -f $JAVALINK)
readonly JAVADIR=$(dirname $JAVAPATH)
export JAVA_HOME="$JAVAPATH"
export PATH="$JAVADIR":$PATH
fi
# Check if script run as root
run_as_root
DEVICE=""
BUILDID=""
OUTPUT_DIR=""
INPUT_IMGS_TAR=""
KEEP_DATA=false
HOST_OS=""
DEV_ALIAS=""
while [[ $# > 0 ]]
do
arg="$1"
case $arg in
-o|--output)
OUTPUT_DIR=$(echo "$2" | sed 's:/*$::')
shift
;;
-d|--device)
DEVICE=$(echo $2 | tr '[:upper:]' '[:lower:]')
shift
;;
-a|--alias)
DEV_ALIAS=$(echo $2 | tr '[:upper:]' '[:lower:]')
shift
;;
-b|--buildID)
BUILDID=$(echo $2 | tr '[:upper:]' '[:lower:]')
shift
;;
-i|--imgs-tar)
INPUT_IMGS_TAR=$2
shift
;;
-k|--keep)
KEEP_DATA=true
;;
*)
echo "[-] Invalid argument '$1'"
usage
;;
esac
shift
done
if [[ "$DEVICE" == "" ]]; then
echo "[-] device codename cannot be empty"
usage
fi
if [[ "$BUILDID" == "" ]]; then
echo "[-] buildID cannot be empty"
usage
fi
if [[ "$OUTPUT_DIR" == "" || ! -d "$OUTPUT_DIR" ]]; then
echo "[-] Output directory not found"
usage
fi
if [[ "$INPUT_IMGS_TAR" != "" && ! -f "$INPUT_IMGS_TAR" ]]; then
echo "[-] '$INPUT_IMGS_TAR' file not found"
abort 1
fi
# Adjust hosts tools based on OS
HOST_OS=$(uname)
if [[ "$HOST_OS" != "Linux" && "$HOST_OS" != "Darwin" ]]; then
echo '[-] '$HOST_OS' OS is not supported'
abort 1
fi
# Check if supported device
deviceOK=false
for devNm in "${availDevices[@]}"
do
if [[ "$devNm" == "$DEVICE" ]]; then
deviceOK=true
fi
done
if [ "$deviceOK" = false ]; then
echo "[-] '$DEVICE' is not supported"
abort 1
fi
# Prepare output dir structure
OUT_BASE="$OUTPUT_DIR/$DEVICE/$BUILDID"
if [ ! -d "$OUT_BASE" ]; then
mkdir -p $OUT_BASE
fi
FACTORY_IMGS_DATA="$OUT_BASE/factory_imgs_data"
FACTORY_IMGS_R_DATA="$OUT_BASE/factory_imgs_repaired_data"
echo "[*] Setting output base to '$OUT_BASE'"
# Download images if not provided
if [[ "$INPUT_IMGS_TAR" == "" ]]; then
# Factory image alias for devices with naming incompatibilities with AOSP
if [[ "$DEVICE" == "flounder" && "$DEV_ALIAS" == "" ]]; then
echo "[-] Building for flounder requires setting the device alias option - 'volantis' or 'volantisg'"
abort 1
fi
if [[ "$DEV_ALIAS" == "" ]]; then
DEV_ALIAS="$DEVICE"
fi
if ! $DOWNLOAD_SCRIPT --device $DEVICE --alias $DEV_ALIAS \
--buildID $BUILDID --output "$OUT_BASE"; then
echo "[-] Images download failed"
abort 1
fi
archName="$(find $OUT_BASE -iname "*$DEV_ALIAS*$BUILDID*.tgz" | head -1)"
else
archName="$INPUT_IMGS_TAR"
fi
# Clear old data if present & extract data from factory images
if [ -d "$FACTORY_IMGS_DATA" ]; then
rm -rf "$FACTORY_IMGS_DATA"/*
else
mkdir -p "$FACTORY_IMGS_DATA"
fi
if ! $EXTRACT_SCRIPT --input "$archName" --output "$FACTORY_IMGS_DATA" \
--simg2img "$SCRIPTS_ROOT/hostTools/$HOST_OS/simg2img"; then
echo "[-] Factory images data extract failed"
abort 1
fi
# Generate unified readonly "proprietary-blobs.txt"
if ! $GEN_BLOBS_LIST_SCRIPT --input "$FACTORY_IMGS_DATA/vendor" \
--output "$SCRIPTS_ROOT/$DEVICE" \
--sys-list "$SCRIPTS_ROOT/$DEVICE/system-proprietary-blobs.txt"; then
echo "[-] 'proprietary-blobs.txt' generation failed"
abort 1
fi
# De-optimize bytecode from system partition
if [ -d "$FACTORY_IMGS_R_DATA" ]; then
rm -rf "$FACTORY_IMGS_R_DATA"/*
else
mkdir -p "$FACTORY_IMGS_R_DATA"
fi
if ! $REPAIR_SCRIPT --input "$FACTORY_IMGS_DATA/system" \
--output "$FACTORY_IMGS_R_DATA" \
--oat2dex "$SCRIPTS_ROOT/hostTools/Java/oat2dex.jar"; then
echo "[-] System partition de-optimization failed"
abort 1
fi
# Bytecode under vendor partition doesn't require de-opt for (up to now)
# However, move it to repaired data directory to have a single source for
# next script
mv "$FACTORY_IMGS_DATA/vendor" "$FACTORY_IMGS_R_DATA"
# Copy vendor partition image size as saved from $EXTRACT_SCRIPT script
# $VGEN_SCRIPT will fail over to last known working default if image size
# file not found when parsing data
cp "$FACTORY_IMGS_DATA/vendor_partition_size" "$FACTORY_IMGS_R_DATA"
if ! $VGEN_SCRIPT --input "$FACTORY_IMGS_R_DATA" --output "$OUT_BASE" \
--blobs-list "$SCRIPTS_ROOT/$DEVICE/proprietary-blobs.txt"; then
echo "[-] Vendor generation failed"
abort 1
fi
if [ "$KEEP_DATA" = false ]; then
rm -rf "$FACTORY_IMGS_DATA"
rm -rf "$FACTORY_IMGS_R_DATA"
fi
echo "[*] All actions completed successfully"
echo "[*] Import "$OUT_BASE/vendor" to AOSP root"
abort 0
|
<filename>src/Chapter2_1Text/Date.java
package Chapter2_1Text;
public class Date implements Comparable<Date> {
private final int day;
private final int month;
private final int year;
public Date(int d, int m, int y) {
day = d;
month = m;
year = y;
}
public int day() {
return day;
}
public int month() {
return month;
}
public int year() {
return year;
}
public int compareTo(Date that) {
if (this.year > that.year) return +1;
if (this.year < that.year) return -1;
if (this.month > that.month) return +1;
if (this.month < that.month) return -1;
if (this.day > that.day) return +1;
if (this.day < that.day) return -1;
return 0;
}
public String toString() {
return month + "/" + day + "/" + year;
}
}
|
<reponame>wuximing/dsshop
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.getComponentController = exports.getComponentControllerNames = exports.unregisterComponentController = exports.registerComponentController = void 0;
var LOAD_COMPONENT_CONTROLLERS = {};
/**
* 全局注册组件。
* @param name 组件名称
* @param plugin 注册的组件类
* @returns void
*/
function registerComponentController(name, plugin) {
LOAD_COMPONENT_CONTROLLERS[name] = plugin;
}
exports.registerComponentController = registerComponentController;
/**
* 删除全局组件。
* @param name 组件名
* @returns void
*/
function unregisterComponentController(name) {
delete LOAD_COMPONENT_CONTROLLERS[name];
}
exports.unregisterComponentController = unregisterComponentController;
/**
* 获取以注册的组件名。
* @returns string[] 返回已注册的组件名称
*/
function getComponentControllerNames() {
return Object.keys(LOAD_COMPONENT_CONTROLLERS);
}
exports.getComponentControllerNames = getComponentControllerNames;
/**
* 根据组件名获取组件类。
* @param name 组件名
* @returns 返回组件类
*/
function getComponentController(name) {
return LOAD_COMPONENT_CONTROLLERS[name];
}
exports.getComponentController = getComponentController;
//# sourceMappingURL=index.js.map |
import React from 'react';
export interface IconChromeProps extends React.SVGAttributes<SVGElement> {
color?: string;
size?: string | number;
className?: string;
style?: React.CSSProperties;
}
export const IconChrome: React.SFC<IconChromeProps> = (
props: IconChromeProps
): React.ReactElement => {
const { color, size, style, ...restProps } = props;
return (
<svg
xmlns="http://www.w3.org/2000/svg"
width={size}
height={size}
viewBox="0 0 24 24"
fill="none"
stroke={color}
className="feather feather-chrome"
strokeWidth="2"
strokeLinecap="round"
strokeLinejoin="round"
style={{ verticalAlign: 'middle', ...style }}
{...restProps}
>
<circle cx="12" cy="12" r="10" />
<circle cx="12" cy="12" r="4" />
<line x1="21.17" y1="8" x2="12" y2="8" />
<line x1="3.95" y1="6.06" x2="8.54" y2="14" />
<line x1="10.88" y1="21.94" x2="15.46" y2="14" />
</svg>
);
};
IconChrome.defaultProps = {
color: 'currentColor',
size: '1em',
};
export default IconChrome;
|
<reponame>lanpinguo/rootfs_build<filename>u-boot/drivers/video/sunxi/disp2/disp/de/lowlevel_sun8iw11/de_lcd_type.h
#ifndef __DE_LCD_TYPE_H__
#define __DE_LCD_TYPE_H__
#include "de_lcd.h"
//
// detail information of registers
//
typedef union
{
u32 dwval;
struct
{
u32 io_map_sel : 1 ; // default: 0;
u32 res0 : 29 ; // default: ;
u32 tcon_gamma_en : 1 ; // default: 0;
u32 tcon_en : 1 ; // default: 0;
} bits;
} tcon_gctl_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 tcon_irq_flag : 16 ; // default: 0;
u32 tcon_irq_en : 16 ; // default: 0;
} bits;
} tcon_gint0_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 tcon1_line_int_num : 12 ; // default: 0;
u32 res0 : 4 ; // default: ;
u32 tcon0_line_int_num : 12 ; // default: 0;
u32 res1 : 4 ; // default: ;
} bits;
} tcon_gint1_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 tcon0_frm_test : 2 ; // default: 0;
u32 res0 : 2 ; // default: ;
u32 tcon0_frm_mode_b : 1 ; // default: 0;
u32 tcon0_frm_mode_g : 1 ; // default: 0;
u32 tcon0_frm_mode_r : 1 ; // default: 0;
u32 res1 : 24 ; // default: ;
u32 tcon0_frm_en : 1 ; // default: 0;
} bits;
} tcon0_frm_ctl_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 seed_value : 13 ; // default: 0;
u32 res0 : 19 ; // default: ;
} bits;
} tcon0_frm_seed_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 frm_table_value ; // default: 0;
} bits;
} tcon0_frm_tab_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 src_sel : 3 ; // default: 0;
u32 res0 : 1 ; // default: ;
u32 start_delay : 5 ; // default: 0;
u32 res1 : 11 ; // default: ;
u32 interlace_en : 1 ; // default: 0;
u32 fifo1_rst : 1 ; // default: 0;
u32 test_value : 1 ; // default: 0;
u32 rb_swap : 1 ; // default: 0;
u32 tcon0_if : 2 ; // default: 0;
u32 res2 : 2 ; // default: ;
u32 tcon0_work_mode : 1 ; // default: 0;
u32 res3 : 2 ; // default: ;
u32 tcon0_en : 1 ; // default: 0;
} bits;
} tcon0_ctl_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 tcon0_dclk_div : 7 ; // default: 0;
u32 res0 : 21 ; // default: ;
u32 tcon0_dclk_en : 4 ; // default: 0;
} bits;
} tcon0_dclk_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 y : 12 ; // default: 0;
u32 res0 : 4 ; // default: ;
u32 x : 12 ; // default: 0;
u32 res1 : 4 ; // default: ;
} bits;
} tcon0_basic0_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 hbp : 12 ; // default: 0;
u32 res0 : 4 ; // default: ;
u32 ht : 13 ; // default: 0;
u32 res1 : 2 ; // default: ;
u32 reservd : 1 ; // default: 0;
} bits;
} tcon0_basic1_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 vbp : 12 ; // default: 0;
u32 res0 : 4 ; // default: ;
u32 vt : 13 ; // default: 0;
u32 res1 : 3 ; // default: ;
} bits;
} tcon0_basic2_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 vspw : 10 ; // default: 0;
u32 res0 : 6 ; // default: ;
u32 hspw : 10 ; // default: 0;
u32 res1 : 6 ; // default: ;
} bits;
} tcon0_basic3_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 res0 : 20 ; // default: ;
u32 syuv_fdly : 2 ; // default: 0;
u32 syuv_seq : 2 ; // default: 0;
u32 srgb_seq : 4 ; // default: 0;
u32 hv_mode : 4 ; // default: 0;
} bits;
} tcon0_hv_if_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 trigger_en : 1 ; // default: 0;
u32 trigger_start : 1 ; // default: 0;
u32 trigger_fifo_en : 1 ; // default: 0;
u32 trigger_fifo_bist_en : 1 ; // default: 0;
u32 trigger_sync_mode : 2 ; // default: 0;
u32 res0 : 10 ; // default: ;
u32 flush : 1 ; // default: 0;
u32 auto_ : 1 ; // default: 0;
u32 res1 : 4 ; // default: ;
u32 rd_flag : 1 ; // default: 0;
u32 wr_flag : 1 ; // default: 0;
u32 vsync_cs_sel : 1 ; // default: 0;
u32 ca : 1 ; // default: 0;
u32 da : 1 ; // default: 0;
u32 res2 : 1 ; // default: ;
u32 cpu_mode : 4 ; // default: 0;
} bits;
} tcon0_cpu_if_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 data_wr : 24 ; // default: 0;
u32 res0 : 8 ; // default: ;
} bits;
} tcon0_cpu_wr_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 data_rd0 : 24 ; // default: ;
u32 res0 : 8 ; // default: ;
} bits;
} tcon0_cpu_rd0_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 data_rd1 : 24 ; // default: ;
u32 res0 : 8 ; // default: ;
} bits;
} tcon0_cpu_rd1_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 tcon0_lvds_data_revert : 4 ; //default; 0:revert
u32 tcon0_lvds_clk_revert : 1 ; //default; 0: revert
u32 res0 : 15 ; // default: ;
u32 tcon0_lvds_clk_sel : 1 ; // default: 0;
u32 res1 : 2 ; // default: ;
u32 tcon0_lvds_correct_mode : 1 ; // default: 0;
u32 tcon0_lvds_debug_mode : 1 ; // default: 0;
u32 tcon0_lvds_debug_en : 1 ; // default: 0;
u32 tcon0_lvds_bitwidth : 1 ; // default: 0;
u32 tcon0_lvds_mode : 1 ; // default: 0;
u32 tcon0_lvds_dir : 1 ; // default: 0;
u32 tcon0_lvds_even_odd_dir : 1 ; // default: 0;
u32 tcon0_lvds_link : 1 ; // default: 0;
u32 tcon0_lvds_en : 1 ; // default: 0;
} bits;
} tcon0_lvds_if_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 data_inv : 24 ; // default: 0;
u32 sync_inv : 2 ; // default: 0;
u32 clk_inv : 1 ; // default: 0;
u32 de_inv : 1 ; // default: 0;
u32 dclk_sel : 3 ; // default: 0;
u32 io_output_sel : 1 ; // default: 0;
} bits;
} tcon0_io_pol_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 data_output_tri_en : 24 ; // default: 0xffffff;
u32 io0_output_tri_en : 1 ; // default: 1;
u32 io1_output_tri_en : 1 ; // default: 1;
u32 io2_output_tri_en : 1 ; // default: 1;
u32 io3_output_tri_en : 1 ; // default: 1;
u32 rgb_endian : 1 ; // default: ;
u32 res0 : 3 ; // default: ;
} bits;
} tcon0_io_tri_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 src_sel : 2 ; // default: 0;
u32 res0 : 2 ; // default: ;
u32 start_delay : 5 ; // default: 0;
u32 res1 : 11 ; // default: ;
u32 interlace_en : 1 ; // default: 0;
u32 res2 : 10 ; // default: ;
u32 tcon1_en : 1 ; // default: 0;
} bits;
} tcon1_ctl_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 y : 12 ; // default: 0;
u32 res0 : 4 ; // default: ;
u32 x : 12 ; // default: 0;
u32 res1 : 4 ; // default: ;
} bits;
} tcon1_basic0_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 ls_yo : 12 ; // default: 0;
u32 res0 : 4 ; // default: ;
u32 ls_xo : 12 ; // default: 0;
u32 res1 : 4 ; // default: ;
} bits;
} tcon1_basic1_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 yo : 12 ; // default: 0;
u32 res0 : 4 ; // default: ;
u32 xo : 12 ; // default: 0;
u32 res1 : 4 ; // default: ;
} bits;
} tcon1_basic2_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 hbp : 12 ; // default: 0;
u32 res0 : 4 ; // default: ;
u32 ht : 13 ; // default: 0;
u32 res1 : 3 ; // default: ;
} bits;
} tcon1_basic3_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 vbp : 12 ; // default: 0;
u32 res0 : 4 ; // default: ;
u32 vt : 13 ; // default: 0;
u32 res1 : 3 ; // default: ;
} bits;
} tcon1_basic4_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 vspw : 10 ; // default: 0;
u32 res0 : 6 ; // default: ;
u32 hspw : 10 ; // default: 0;
u32 res1 : 6 ; // default: ;
} bits;
} tcon1_basic5_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 sync_y : 16 ; // default: 0;
u32 sync_x : 16 ; // default: 0;
} bits;
} tcon1_ps_sync_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 data_inv : 24 ; // default: 0;
u32 io0_inv : 1 ; // default: 0;
u32 io1_inv : 1 ; // default: 0;
u32 io2_inv : 1 ; // default: 0;
u32 io3_inv : 1 ; // default: 0;
u32 res0 : 4 ; // default: ;
} bits;
} tcon1_io_pol_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 data_output_tri_en : 24 ; // default: 0xffffff;
u32 io0_output_tri_en : 1 ; // default: 1;
u32 io1_output_tri_en : 1 ; // default: 1;
u32 io2_output_tri_en : 1 ; // default: 1;
u32 io3_output_tri_en : 1 ; // default: 1;
u32 res0 : 4 ; // default: ;
} bits;
} tcon1_io_tri_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 ecc_fifo_setting : 8 ; // default: ;
u32 ecc_fifo_blank_en : 1 ; // default: ;
u32 res0 : 7 ; // default: ;
u32 ecc_fifo_err_bits : 8 ; // default: ;
u32 res1 : 6 ; // default: ;
u32 ecc_fifo_err_flag : 1 ; // default: ;
u32 ecc_fifo_bist_en : 1 ; // default: ;
} bits;
} tcon_ecc_fifo_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 tcon1_current_line : 12 ; // default: ;
u32 res0 : 1 ; // default: ;
u32 ecc_fifo_bypass : 1 ; // default: 0;
u32 res1 : 2 ; // default: ;
u32 tcon0_current_line : 12 ; // default: ;
u32 tcon1_field_polarity : 1 ; // default: ;
u32 tcon0_field_polarity : 1 ; // default: ;
u32 tcon1_fifo_under_flow : 1 ; // default: ;
u32 tcon0_fifo_under_flow : 1 ; // default: ;
} bits;
} tcon_debug_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 res0 : 31 ; // default: ;
u32 ceu_en : 1 ; // default: 0;
} bits;
} tcon_ceu_ctl_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 value : 13 ; // default: 0;
u32 res0 : 19 ; // default: ;
} bits;
} tcon_ceu_coef_mul_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 value : 19 ; // default: 0;
u32 res0 : 13 ; // default: ;
} bits;
} tcon_ceu_coef_add_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 max : 8 ; // default: 0;
u32 res0 : 8 ; // default: ;
u32 min : 8 ; // default: 0;
u32 res1 : 8 ; // default: ;
} bits;
} tcon_ceu_coef_rang_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 block_size : 12 ; // default: 0;
u32 res0 : 4 ; // default: ;
u32 block_space : 12 ; // default: 0;
u32 res1 : 4 ; // default: ;
} bits;
} tcon0_cpu_tri0_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 block_num : 16 ; // default: 0;
u32 block_current_num : 16 ; // default: 0;
} bits;
} tcon0_cpu_tri1_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 trans_start_set : 13 ; // default: 0;
u32 sync_mode : 2 ; // default: 0;
u32 trans_start_mode : 1 ; // default: 0;
u32 start_delay : 16 ; // default: 0x20;
} bits;
} tcon0_cpu_tri2_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 counter_m : 8 ; // default: 0;
u32 counter_n : 16 ; // default: 0;
u32 res0 : 4 ; // default: ;
u32 tri_int_mode : 2 ; // default: 0;
u32 res1 : 2 ; // default: ;
} bits;
} tcon0_cpu_tri3_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 data : 24 ; // default: 0;
u32 a1 : 1 ; // default: 0;
u32 res0 : 3 ; // default: ;
u32 en : 1 ; // default: 0;
u32 res1 : 3 ; // default: ;
} bits;
} tcon0_cpu_tri4_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 data : 24 ; // default: ;
u32 a1 : 1 ; // default: 0;
u32 res0 : 7 ; // default: ;
} bits;
} tcon0_cpu_tri5_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 out_format : 1 ; // default: 0;
u32 res0 : 30 ; // default: ;
u32 cmap_en : 1 ; // default: 0;
} bits;
} tcon_cmap_ctl_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 out0 : 16 ; // default: 0;
u32 out1 : 16 ; // default: 0;
} bits;
} tcon_cmap_odd0_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 out2 : 16 ; // default: 0;
u32 out3 : 16 ; // default: 0;
} bits;
} tcon_cmap_odd1_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 out0 : 16 ; // default: 0;
u32 out1 : 16 ; // default: 0;
} bits;
} tcon_cmap_even0_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 out2 : 16 ; // default: 0;
u32 out3 : 16 ; // default: 0;
} bits;
} tcon_cmap_even1_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 safe_period_mode : 2 ; // default: 0;
u32 res0 : 14 ; // default: ;
u32 safe_period_fifo_num : 13 ; // default: 0;
u32 res1 : 3 ; // default: ;
} bits;
} tcon_safe_period_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 dsi_src : 2 ; // default: 0;
u32 res0 : 6 ; // default: ;
u32 hdmi_src : 2 ; // default: 0;
u32 res1 : 22 ; // default: ;
} bits;
} tcon_mux_ctl_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 pwsmb : 1 ; // default: 0;
u32 pwslv : 1 ; // default: 0;
u32 res0 : 2 ; // default: ;
u32 pd : 2 ; // default: 0;
u32 res1 : 2 ; // default: ;
u32 v : 2 ; // default: 0;
u32 res2 : 2 ; // default: ;
u32 den : 4 ; // default: 0;
u32 denc : 1 ; // default: 0;
u32 c : 2 ; // default: 0;
u32 res3 : 1 ; // default: ;
u32 en_drvd : 4 ; // default: 0;
u32 en_drvc : 1 ; // default: 0;
u32 res4 : 5 ; // default: ;
u32 en_ldo : 1 ; // default: 0;
u32 en_mb : 1 ; // default: 0;
} bits;
} tcon0_lvds_ana_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 res0 : 31 ; // default: ;
u32 tcon1_fill_en : 1 ; // default: 0;
} bits;
} tcon1_fill_ctl_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 fill_begin : 24 ; // default: 0;
u32 res0 : 8 ; // default: ;
} bits;
} tcon1_fill_begin_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 fill_end : 24 ; // default: 0;
u32 res0 : 8 ; // default: ;
} bits;
} tcon1_fill_end_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 fill_value : 24 ; // default: 0;
u32 res0 : 8 ; // default: ;
} bits;
} tcon1_fill_data_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 pixel : 24 ; // default: 0;
u32 res0 : 8 ; // default: ;
} bits;
struct
{
u32 blue : 8 ; // default: 0;
u32 green : 8 ; // default: 0;
u32 red : 8 ; // default: 0;
u32 res0 : 8 ; // default: ;
} bytes;
} tcon_gamma_tlb_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 res0 ; // default: ;
} bits;
} tcon_reservd_reg_t;
//edit by lrx---start
typedef union
{
u32 dwval;
struct
{
u32 tv0_clk_src :1 ; //0:clk from ccu,1:clk from tve0
u32 res3 :3 ;
u32 tv1_clk_src :1 ; //0:clk from ccu,1:clk from tve1
u32 res2 :3 ;
u32 tv0_out :1 ; //0:lcd0 to gpiod,1:tv0 to gpiod
u32 res1 :3 ;
u32 tv1_out :1 ; //0:lcd1 to gpioh,1:tv1 to gpioh
u32 res0 :19 ; // default: ;
} bits;
} tcon_tv_setup_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 de_port0_perh :2 ; //0:lcd0,1:lcd1,2:tv0,3:tv1
u32 res1 :2 ;
u32 de_port1_perh :2 ; //0:lcd0,1:lcd1,2:tv0,3:tv1
u32 res0 :26 ; // default: ;
} bits;
} tcon_de_perh_reg_t;
typedef union
{
u32 dwval;
struct
{
u32 res4 :16 ;
u32 dsi_clk_gate :1 ; //0:disable,1:enable
u32 res3 :3 ;
u32 tv0_clk_gate :1 ; //0:disable,1:enable
u32 res2 :3 ;
u32 tv1_clk_gate :1 ; //0:disable,1:enable
u32 res1 :3 ;
u32 hdmi_src :2 ; //0:disable,1:tv0,2:tv1
u32 res0 :2 ; // default: ;
} bits;
} tcon_clk_gate_reg_t;
//top device define
typedef struct
{
tcon_tv_setup_reg_t tcon_tv_setup; //0x000
tcon_reservd_reg_t tcon_reg_0004; //0x004
tcon_reservd_reg_t tcon_reg_0008; //0x008
tcon_reservd_reg_t tcon_reg_000c; //0x00c
tcon_reservd_reg_t tcon_reg_0010; //0x010
tcon_reservd_reg_t tcon_reg_0014; //0x014
tcon_reservd_reg_t tcon_reg_0018; //0x018
tcon_de_perh_reg_t tcon_de_perh; //0x01c
tcon_clk_gate_reg_t tcon_clk_gate; //0x020
}__de_lcd_top_dev_t;
//edit by lrx---end
//device define
typedef struct
{
tcon_gctl_reg_t tcon_gctl; //0x000
tcon_gint0_reg_t tcon_gint0; //0x004
tcon_gint1_reg_t tcon_gint1; //0x008
tcon_reservd_reg_t tcon_reg00c; //0x00c
tcon0_frm_ctl_reg_t tcon0_frm_ctl; //0x010
tcon0_frm_seed_reg_t tcon0_frm_seed_pr; //0x014
tcon0_frm_seed_reg_t tcon0_frm_seed_pg; //0x018
tcon0_frm_seed_reg_t tcon0_frm_seed_pb; //0x01c
tcon0_frm_seed_reg_t tcon0_frm_seed_lr; //0x020
tcon0_frm_seed_reg_t tcon0_frm_seed_lg; //0x024
tcon0_frm_seed_reg_t tcon0_frm_seed_lb; //0x028
tcon0_frm_tab_reg_t tcon0_frm_tbl_0; //0x02c
tcon0_frm_tab_reg_t tcon0_frm_tbl_1; //0x030
tcon0_frm_tab_reg_t tcon0_frm_tbl_2; //0x034
tcon0_frm_tab_reg_t tcon0_frm_tbl_3; //0x038
tcon_reservd_reg_t tcon_reg03c; //0x03c
tcon0_ctl_reg_t tcon0_ctl; //0x040
tcon0_dclk_reg_t tcon0_dclk; //0x044
tcon0_basic0_reg_t tcon0_basic0; //0x048
tcon0_basic1_reg_t tcon0_basic1; //0x04c
tcon0_basic2_reg_t tcon0_basic2; //0x050
tcon0_basic3_reg_t tcon0_basic3; //0x054
tcon0_hv_if_reg_t tcon0_hv_ctl; //0x058
tcon_reservd_reg_t tcon_reg05c; //0x05c
tcon0_cpu_if_reg_t tcon0_cpu_ctl; //0x060
tcon0_cpu_wr_reg_t tcon0_cpu_wr; //0x064
tcon0_cpu_rd0_reg_t tcon0_cpu_rd; //0x068
tcon0_cpu_rd1_reg_t tcon0_cpu_fet; //0x06c
tcon_reservd_reg_t tcon_reg070[5]; //0x070~0x80
tcon0_lvds_if_reg_t tcon0_lvds_ctl; //0x084
tcon0_io_pol_reg_t tcon0_io_pol; //0x088
tcon0_io_tri_reg_t tcon0_io_tri; //0x08c
tcon1_ctl_reg_t tcon1_ctl; //0x090
tcon1_basic0_reg_t tcon1_basic0; //0x094
tcon1_basic1_reg_t tcon1_basic1; //0x098
tcon1_basic2_reg_t tcon1_basic2; //0x09c
tcon1_basic3_reg_t tcon1_basic3; //0x0a0
tcon1_basic4_reg_t tcon1_basic4; //0x0a4
tcon1_basic5_reg_t tcon1_basic5; //0x0a8
tcon_reservd_reg_t tcon_reg0ac; //0x0ac
tcon1_ps_sync_reg_t tcon1_ps_ctl; //0x0b0
tcon_reservd_reg_t tcon_reg0b4[15]; //0x0b4~0x0ec
tcon1_io_pol_reg_t tcon1_io_pol; //0x0f0
tcon1_io_tri_reg_t tcon1_io_tri; //0x0f4
tcon_ecc_fifo_reg_t tcon_ecfifo_ctl; //0x0f8
tcon_debug_reg_t tcon_debug; //0x0fc
tcon_ceu_ctl_reg_t tcon_ceu_ctl; //0x100
tcon_reservd_reg_t tcon_reg104[3]; //0x104~0x10c
tcon_ceu_coef_mul_reg_t tcon_ceu_coef_rr; //0x110
tcon_ceu_coef_mul_reg_t tcon_ceu_coef_rg; //0x114
tcon_ceu_coef_mul_reg_t tcon_ceu_coef_rb; //0x118
tcon_ceu_coef_add_reg_t tcon_ceu_coef_rc; //0x11c
tcon_ceu_coef_mul_reg_t tcon_ceu_coef_gr; //0x120
tcon_ceu_coef_mul_reg_t tcon_ceu_coef_gg; //0x124
tcon_ceu_coef_mul_reg_t tcon_ceu_coef_gb; //0x128
tcon_ceu_coef_add_reg_t tcon_ceu_coef_gc; //0x12c
tcon_ceu_coef_mul_reg_t tcon_ceu_coef_br; //0x130
tcon_ceu_coef_mul_reg_t tcon_ceu_coef_bg; //0x134
tcon_ceu_coef_mul_reg_t tcon_ceu_coef_bb; //0x138
tcon_ceu_coef_add_reg_t tcon_ceu_coef_bc; //0x13c
tcon_ceu_coef_rang_reg_t tcon_ceu_coef_rv; //0x140
tcon_ceu_coef_rang_reg_t tcon_ceu_coef_gv; //0x144
tcon_ceu_coef_rang_reg_t tcon_ceu_coef_bv; //0x148
tcon_reservd_reg_t tcon_reg14c[5]; //0x14c~0x15c
tcon0_cpu_tri0_reg_t tcon0_cpu_tri0; //0x160
tcon0_cpu_tri1_reg_t tcon0_cpu_tri1; //0x164
tcon0_cpu_tri2_reg_t tcon0_cpu_tri2; //0x168
tcon0_cpu_tri3_reg_t tcon0_cpu_tri3; //0x16c
tcon0_cpu_tri4_reg_t tcon0_cpu_tri4; //0x170
tcon0_cpu_tri5_reg_t tcon0_cpu_tri5; //0x174
tcon_reservd_reg_t tcon_reg178[2]; //0x178~0x17c
tcon_cmap_ctl_reg_t tcon_cmap_ctl; //0x180
tcon_reservd_reg_t tcon_reg184[3]; //0x184~0x18c
tcon_cmap_odd0_reg_t tcon_cmap_odd0; //0x190
tcon_cmap_odd1_reg_t tcon_cmap_odd1; //0x194
tcon_cmap_even0_reg_t tcon_cmap_even0; //0x198
tcon_cmap_even1_reg_t tcon_cmap_even1; //0x19c
tcon_reservd_reg_t tcon_reg1a0[20]; //0x1a0~0x1ec
tcon_safe_period_reg_t tcon_volume_ctl; //0x1f0
tcon_reservd_reg_t tcon_reg1f4[3]; //0x1f4~0x1fc
tcon_mux_ctl_reg_t tcon_mul_ctl; //0x200
tcon_reservd_reg_t tcon_reg204[7]; //0x204~0x21c
tcon0_lvds_ana_reg_t tcon0_lvds_ana[2]; //0x220~0x224
tcon_reservd_reg_t tcon_reg228[54]; //0x228~0x2fc
tcon1_fill_ctl_reg_t tcon_fill_ctl; //0x300
tcon1_fill_begin_reg_t tcon_fill_start0; //0x304
tcon1_fill_end_reg_t tcon_fill_end0; //0x308
tcon1_fill_data_reg_t tcon_fill_data0; //0x30c
tcon1_fill_begin_reg_t tcon_fill_start1; //0x310
tcon1_fill_end_reg_t tcon_fill_end1; //0x314
tcon1_fill_data_reg_t tcon_fill_data1; //0x318
tcon1_fill_begin_reg_t tcon_fill_start2; //0x31c
tcon1_fill_end_reg_t tcon_fill_end2; //0x320
tcon1_fill_data_reg_t tcon_fill_data2; //0x324
tcon_reservd_reg_t tcon_reg328[54]; //0x328~0x3fc
tcon_gamma_tlb_reg_t tcon_gamma_tlb[256]; //0x400
}__de_lcd_dev_t;
//s32 tcon0_cfg_mode_auto(u32 sel, disp_panel_para * panel);
//s32 tcon0_cfg_mode_tri(u32 sel, disp_panel_para * panel);
s32 tcon_cmap(u32 sel, u32 mode,unsigned int lcd_cmap_tbl[2][3][4]);
s32 tcon_gamma(u32 sel, u32 mode,u32 gamma_tbl[256]);
s32 tcon_ceu(u32 sel,u32 mode,s32 b,s32 c,s32 s,s32 h);
s32 tcon0_frm(u32 sel,u32 mode);
#endif
|
#!/bin/bash
echo "sudo docker build -t rnaseq-umi-cpp -f Dockerfile.build_ARM64 ${PWD}"
sudo docker build -t rnaseq-umi-cpp -f Dockerfile.build_ARM64 ${PWD}
echo sudo docker run --rm -v ${PWD}:/local rnaseq-umi-cpp /bin/sh -c "cp -r source/w* /local/. "
sudo docker run --rm -v ${PWD}:/local rnaseq-umi-cpp /bin/sh -c "cp -r source/w* /local/. "
echo "sudo docker build -t bwa:build -f Dockerfile.build_bwa ."
sudo docker build -t bwa:build -f Dockerfile.build_bwa .
echo "sudo docker run --rm -v ${PWD}:/local bwa:build /bin/sh -c '"' cp -r /usr/local/bin/bwa /local/.'"'"
sudo docker run --rm -v ${PWD}:/local bwa:build /bin/sh -c "cp ./bwa /local/."
echo "sudo docker build -t biodepot/rnaseq-umi-cpp:profiler -f Dockerfile-arm-profiler ${PWD}"
sudo docker build -t biodepot/rnaseq-umi-cpp:profiler -f Dockerfile-arm-profiler ${PWD}
echo "sudo rm -rf w96 w384"
sudo rm -rf w96 w384 bwa
sudo docker rmi bwa:build
|
TERMUX_PKG_HOMEPAGE=https://developer.gnome.org/glib/
TERMUX_PKG_DESCRIPTION="Library providing core building blocks for libraries and applications written in C"
TERMUX_PKG_VERSION=2.56.1
TERMUX_PKG_SHA256=40ef3f44f2c651c7a31aedee44259809b6f03d3d20be44545cd7d177221c0b8d
TERMUX_PKG_SRCURL=https://ftp.gnome.org/pub/gnome/sources/glib/${TERMUX_PKG_VERSION:0:4}/glib-${TERMUX_PKG_VERSION}.tar.xz
# libandroid-support to get langinfo.h in include path.
TERMUX_PKG_DEPENDS="libffi, pcre, libandroid-support"
TERMUX_PKG_RM_AFTER_INSTALL="share/gtk-doc share/locale share/glib-2.0/gettext share/gdb/auto-load share/glib-2.0/codegen share/glib-2.0/gdb bin/gtester-report bin/glib-gettextize bin/gdbus-codegen"
# Needed by pkg-config for glib-2.0:
TERMUX_PKG_DEVPACKAGE_DEPENDS="pcre-dev"
TERMUX_PKG_INCLUDE_IN_DEVPACKAGE="lib/glib-2.0/include"
# --enable-compile-warnings=no to get rid of format strings causing errors.
# --disable-znodelete to avoid DF_1_NODELETE which most Android 5.0 linkers does not support.
TERMUX_PKG_EXTRA_CONFIGURE_ARGS="
--cache-file=termux_configure.cache
--disable-compile-warnings
--disable-gtk-doc
--disable-gtk-doc-html
--disable-libelf
--disable-libmount
--disable-znodelete
--with-pcre=system
"
termux_step_pre_configure () {
# glib checks for __BIONIC__ instead of __ANDROID__:
CFLAGS="$CFLAGS -D__BIONIC__=1"
cd $TERMUX_PKG_BUILDDIR
# https://developer.gnome.org/glib/stable/glib-cross-compiling.html
echo "glib_cv_long_long_format=ll" >> termux_configure.cache
echo "glib_cv_stack_grows=no" >> termux_configure.cache
echo "glib_cv_uscore=no" >> termux_configure.cache
chmod a-w termux_configure.cache
}
|
package org.hswebframework.web.crud.events;
import lombok.AllArgsConstructor;
import org.apache.commons.beanutils.BeanUtilsBean;
import org.apache.commons.collections.CollectionUtils;
import org.hswebframework.ezorm.core.param.QueryParam;
import org.hswebframework.ezorm.rdb.events.*;
import org.hswebframework.ezorm.rdb.events.EventListener;
import org.hswebframework.ezorm.rdb.events.EventType;
import org.hswebframework.ezorm.rdb.executor.NullValue;
import org.hswebframework.ezorm.rdb.mapping.*;
import org.hswebframework.ezorm.rdb.mapping.events.MappingContextKeys;
import org.hswebframework.ezorm.rdb.mapping.events.MappingEventTypes;
import org.hswebframework.ezorm.rdb.mapping.events.ReactiveResultHolder;
import org.hswebframework.ezorm.rdb.metadata.RDBColumnMetadata;
import org.hswebframework.ezorm.rdb.metadata.TableOrViewMetadata;
import org.hswebframework.web.api.crud.entity.Entity;
import org.hswebframework.web.bean.FastBeanCopier;
import org.hswebframework.web.crud.annotation.EnableEntityEvent;
import org.hswebframework.web.event.AsyncEvent;
import org.hswebframework.web.event.GenericsPayloadApplicationEvent;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.ApplicationEventPublisher;
import org.springframework.scheduling.annotation.Async;
import reactor.core.publisher.Mono;
import reactor.function.Function3;
import reactor.util.function.Tuple2;
import reactor.util.function.Tuples;
import java.util.*;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiFunction;
@SuppressWarnings("all")
@AllArgsConstructor
public class EntityEventListener implements EventListener {
private final ApplicationEventPublisher eventPublisher;
private final EntityEventListenerConfigure listenerConfigure;
@Override
public String getId() {
return "entity-listener";
}
@Override
public String getName() {
return "实体变更事件监听器";
}
@Override
public void onEvent(EventType type, EventContext context) {
if (context.get(MappingContextKeys.error).isPresent()) {
return;
}
EntityColumnMapping mapping = context.get(MappingContextKeys.columnMapping).orElse(null);
Class<Entity> entityType;
if (mapping == null ||
!Entity.class.isAssignableFrom(entityType = (Class) mapping.getEntityType()) ||
!listenerConfigure.isEnabled(entityType)) {
return;
}
if (type == MappingEventTypes.select_before) {
handleQueryBefore(mapping, context);
}
if (type == MappingEventTypes.insert_before) {
boolean single = context.get(MappingContextKeys.type).map("single"::equals).orElse(false);
if (single) {
handleSingleOperation(mapping.getEntityType(),
EntityEventType.create,
context,
EntityPrepareCreateEvent::new,
EntityBeforeCreateEvent::new,
EntityCreatedEvent::new);
} else {
handleBatchOperation(mapping.getEntityType(),
EntityEventType.save,
context,
EntityPrepareSaveEvent::new,
EntityBeforeCreateEvent::new,
EntityCreatedEvent::new);
}
}
if (type == MappingEventTypes.save_before) {
boolean single = context.get(MappingContextKeys.type).map("single"::equals).orElse(false);
if (single) {
handleSingleOperation(mapping.getEntityType(),
EntityEventType.save,
context,
EntityPrepareSaveEvent::new,
EntityBeforeSaveEvent::new,
EntitySavedEvent::new);
} else {
handleBatchOperation(mapping.getEntityType(),
EntityEventType.save,
context,
EntityPrepareSaveEvent::new,
EntityBeforeSaveEvent::new,
EntitySavedEvent::new);
}
}
if (type == MappingEventTypes.update_before) {
handleUpdateBefore(context);
}
if (type == MappingEventTypes.delete_before) {
handleDeleteBefore(entityType, context);
}
}
protected void handleQueryBefore(EntityColumnMapping mapping, EventContext context) {
context.get(MappingContextKeys.reactiveResultHolder)
.ifPresent(holder -> {
context.get(MappingContextKeys.queryOaram)
.ifPresent(queryParam -> {
EntityBeforeQueryEvent event = new EntityBeforeQueryEvent<>(queryParam, mapping.getEntityType());
eventPublisher.publishEvent(new GenericsPayloadApplicationEvent<>(this, event, mapping.getEntityType()));
holder
.before(
event.getAsync()
);
});
});
}
protected List<Object> createAfterData(List<Object> olds,
EventContext context) {
List<Object> newValues = new ArrayList<>(olds.size());
EntityColumnMapping mapping = context
.get(MappingContextKeys.columnMapping)
.orElseThrow(UnsupportedOperationException::new);
TableOrViewMetadata table = context.get(ContextKeys.table).orElseThrow(UnsupportedOperationException::new);
RDBColumnMetadata idColumn = table
.getColumns()
.stream()
.filter(RDBColumnMetadata::isPrimaryKey)
.findFirst()
.orElse(null);
if (idColumn == null) {
return Collections.emptyList();
}
for (Object old : olds) {
Object newValue = context
.get(MappingContextKeys.instance)
.filter(Entity.class::isInstance)
.map(Entity.class::cast)
.orElseGet(() -> {
return context
.get(MappingContextKeys.updateColumnInstance)
.map(map -> {
Object data = FastBeanCopier.copy(map, FastBeanCopier.copy(old, mapping.getEntityType()));
//set null
for (Map.Entry<String, Object> stringObjectEntry : map.entrySet()) {
if (stringObjectEntry.getValue() == null || stringObjectEntry.getValue() instanceof NullValue) {
try {
BeanUtilsBean
.getInstance()
.setProperty(data, stringObjectEntry.getKey(), null);
}catch (Throwable ignore){}
}
}
return data;
})
.map(Entity.class::cast)
.orElse(null);
});
if (newValue != null) {
FastBeanCopier.copy(old, newValue, FastBeanCopier.include(idColumn.getAlias()));
}
newValues.add(newValue);
}
return newValues;
}
protected Mono<Void> sendUpdateEvent(List<Object> before,
List<Object> after,
Class<Object> type,
Function3<List<Object>, List<Object>, Class<Object>, AsyncEvent> mapper) {
AsyncEvent event = mapper.apply(before, after, type);
eventPublisher.publishEvent(new GenericsPayloadApplicationEvent<>(this, event, type));
return event.getAsync();
}
protected Mono<Void> sendDeleteEvent(List<Object> olds,
Class<Object> type,
BiFunction<List<Object>, Class<Object>, AsyncEvent> eventBuilder) {
AsyncEvent deletedEvent = eventBuilder.apply(olds, type);
eventPublisher.publishEvent(new GenericsPayloadApplicationEvent<>(this, deletedEvent, type));
return deletedEvent.getAsync();
}
protected void handleUpdateBefore(DSLUpdate<?, ?> update, EventContext context) {
Object repo = context.get(MappingContextKeys.repository).orElse(null);
EntityColumnMapping mapping = context
.get(MappingContextKeys.columnMapping)
.orElseThrow(UnsupportedOperationException::new);
Class entityType = (Class) mapping.getEntityType();
if (repo instanceof ReactiveRepository) {
context.get(MappingContextKeys.reactiveResultHolder)
.ifPresent(holder -> {
AtomicReference<Tuple2<List<Object>, List<Object>>> updated = new AtomicReference<>();
//prepare
if (isEnabled(entityType,
EntityEventType.modify,
EntityEventPhase.prepare,
EntityEventPhase.before,
EntityEventPhase.after)) {
holder.before(
((ReactiveRepository<Object, ?>) repo)
.createQuery()
.setParam(update.toQueryParam())
.fetch()
.collectList()
.flatMap((list) -> {
List<Object> after = createAfterData(list, context);
updated.set(Tuples.of(list, after));
return sendUpdateEvent(list,
after,
entityType,
EntityPrepareModifyEvent::new);
})
.then()
);
}
//before
if (isEnabled(entityType, EntityEventType.modify, EntityEventPhase.before)) {
holder.invoke(Mono.defer(() -> {
Tuple2<List<Object>, List<Object>> _tmp = updated.get();
if (_tmp != null) {
return sendUpdateEvent(_tmp.getT1(),
_tmp.getT2(),
entityType,
EntityBeforeModifyEvent::new);
}
return Mono.empty();
}));
}
//after
if (isEnabled(entityType, EntityEventType.modify, EntityEventPhase.after)) {
holder.after(v -> {
return Mono
.defer(() -> {
Tuple2<List<Object>, List<Object>> _tmp = updated.getAndSet(null);
if (_tmp != null) {
return sendUpdateEvent(_tmp.getT1(),
_tmp.getT2(),
entityType,
EntityModifyEvent::new);
}
return Mono.empty();
});
});
}
});
} else if (repo instanceof SyncRepository) {
if (isEnabled(entityType, EntityEventType.modify, EntityEventPhase.before)) {
QueryParam param = update.toQueryParam();
SyncRepository<Object, ?> syncRepository = ((SyncRepository<Object, ?>) repo);
List<Object> list = syncRepository.createQuery()
.setParam(param)
.fetch();
sendUpdateEvent(list,
createAfterData(list, context),
(Class<Object>) mapping.getEntityType(),
EntityBeforeModifyEvent::new)
.block();
}
}
}
protected void handleUpdateBefore(EventContext context) {
context.<DSLUpdate<?, ?>>get(ContextKeys.source())
.ifPresent(dslUpdate -> {
handleUpdateBefore(dslUpdate, context);
});
}
protected void handleDeleteBefore(Class<Entity> entityType, EventContext context) {
EntityColumnMapping mapping = context
.get(MappingContextKeys.columnMapping)
.orElseThrow(UnsupportedOperationException::new);
context.<DSLDelete>get(ContextKeys.source())
.ifPresent(dslUpdate -> {
Object repo = context.get(MappingContextKeys.repository).orElse(null);
if (repo instanceof ReactiveRepository) {
context.get(MappingContextKeys.reactiveResultHolder)
.ifPresent(holder -> {
AtomicReference<List<Object>> deleted = new AtomicReference<>();
if (isEnabled(entityType, EntityEventType.delete, EntityEventPhase.before, EntityEventPhase.after)) {
holder.before(((ReactiveRepository<Object, ?>) repo)
.createQuery()
.setParam(dslUpdate.toQueryParam())
.fetch()
.collectList()
.filter(CollectionUtils::isNotEmpty)
.flatMap(list -> {
deleted.set(list);
return this
.sendDeleteEvent(list, (Class) mapping.getEntityType(), EntityBeforeDeleteEvent::new);
})
);
}
if (isEnabled(entityType, EntityEventType.delete, EntityEventPhase.after)) {
holder.after(v -> {
return Mono
.defer(() -> {
List<Object> _tmp = deleted.getAndSet(null);
if (CollectionUtils.isNotEmpty(_tmp)) {
return sendDeleteEvent(_tmp, (Class) mapping.getEntityType(), EntityDeletedEvent::new);
}
return Mono.empty();
});
});
}
});
} else if (repo instanceof SyncRepository) {
QueryParam param = dslUpdate.toQueryParam();
SyncRepository<Object, ?> syncRepository = ((SyncRepository<Object, ?>) repo);
List<Object> list = syncRepository.createQuery()
.setParam(param)
.fetch();
this.sendDeleteEvent(list, (Class) mapping.getEntityType(), EntityBeforeDeleteEvent::new)
.block();
}
});
}
protected void handleUpdateAfter(EventContext context) {
}
protected void handleBatchOperation(Class clazz,
EntityEventType entityEventType,
EventContext context,
BiFunction<List<?>, Class, AsyncEvent> before,
BiFunction<List<?>, Class, AsyncEvent> execute,
BiFunction<List<?>, Class, AsyncEvent> after) {
context.get(MappingContextKeys.instance)
.filter(List.class::isInstance)
.map(List.class::cast)
.ifPresent(lst -> {
AsyncEvent prepareEvent = before.apply(lst, clazz);
AsyncEvent afterEvent = after.apply(lst, clazz);
AsyncEvent beforeEvent = execute.apply(lst, clazz);
Object repo = context.get(MappingContextKeys.repository).orElse(null);
if (repo instanceof ReactiveRepository) {
Optional<ReactiveResultHolder> resultHolder = context.get(MappingContextKeys.reactiveResultHolder);
if (resultHolder.isPresent()) {
ReactiveResultHolder holder = resultHolder.get();
if (null != prepareEvent && isEnabled(clazz, entityEventType, EntityEventPhase.prepare)) {
eventPublisher.publishEvent(new GenericsPayloadApplicationEvent<>(this, prepareEvent, clazz));
holder.before(prepareEvent.getAsync());
}
if (null != beforeEvent && isEnabled(clazz, entityEventType, EntityEventPhase.before)) {
holder.invoke(Mono.defer(() -> {
eventPublisher.publishEvent(new GenericsPayloadApplicationEvent<>(this, beforeEvent, clazz));
return beforeEvent.getAsync();
}));
}
if (null != afterEvent && isEnabled(clazz, entityEventType, EntityEventPhase.after)) {
holder.after(v -> {
eventPublisher.publishEvent(new GenericsPayloadApplicationEvent<>(this, afterEvent, clazz));
return afterEvent.getAsync();
});
}
return;
}
}
eventPublisher.publishEvent(new GenericsPayloadApplicationEvent<>(this, afterEvent, clazz));
//block非响应式的支持
afterEvent.getAsync().block();
});
}
boolean isEnabled(Class clazz, EntityEventType entityEventType, EntityEventPhase... phase) {
for (EntityEventPhase entityEventPhase : phase) {
if (listenerConfigure.isEnabled(clazz, entityEventType, entityEventPhase)) {
return true;
}
}
return false;
}
protected void handleSingleOperation(Class clazz,
EntityEventType entityEventType,
EventContext context,
BiFunction<List<?>, Class, AsyncEvent> before,
BiFunction<List<?>, Class, AsyncEvent> execute,
BiFunction<List<?>, Class, AsyncEvent> after) {
context.get(MappingContextKeys.instance)
.filter(Entity.class::isInstance)
.map(Entity.class::cast)
.ifPresent(entity -> {
AsyncEvent prepareEvent = before.apply(Collections.singletonList(entity), clazz);
AsyncEvent beforeEvent = execute.apply(Collections.singletonList(entity), clazz);
AsyncEvent afterEvent = after.apply(Collections.singletonList(entity), clazz);
Object repo = context.get(MappingContextKeys.repository).orElse(null);
if (repo instanceof ReactiveRepository) {
Optional<ReactiveResultHolder> resultHolder = context.get(MappingContextKeys.reactiveResultHolder);
if (resultHolder.isPresent()) {
ReactiveResultHolder holder = resultHolder.get();
if (null != prepareEvent && isEnabled(clazz, entityEventType, EntityEventPhase.prepare)) {
eventPublisher.publishEvent(new GenericsPayloadApplicationEvent<>(this, prepareEvent, clazz));
holder.before(prepareEvent.getAsync());
}
if (null != beforeEvent && isEnabled(clazz, entityEventType, EntityEventPhase.before)) {
holder.invoke(Mono.defer(() -> {
eventPublisher.publishEvent(new GenericsPayloadApplicationEvent<>(this, beforeEvent, clazz));
return beforeEvent.getAsync();
}));
}
if (null != afterEvent && isEnabled(clazz, entityEventType, EntityEventPhase.after)) {
holder.after(v -> {
eventPublisher.publishEvent(new GenericsPayloadApplicationEvent<>(this, afterEvent, clazz));
return afterEvent.getAsync();
});
}
return;
}
}
eventPublisher.publishEvent(new GenericsPayloadApplicationEvent<>(this, afterEvent, clazz));
//block非响应式的支持
afterEvent.getAsync().block();
});
}
}
|
<reponame>dhinojosa/language-matrix
package com.evolutionnext.jdbc;
import java.sql.*;
public class UsingDriverManager {
public static void main(String[] args) throws SQLException,
ClassNotFoundException {
if (args.length != 2) {
System.out.println("Application needs two arguments, e.g. java com" +
".evolutionnext.jdbc.UsingDriverManager <root> <password>");
System.exit(1);
}
String username = args[0];
String password = args[1];
Class.forName("com.mysql.cj.jdbc.Driver");
Connection connection = DriverManager.getConnection("jdbc:mysql" +
"://localhost:3306/sakila", username, password);
PreparedStatement preparedStatement = connection.prepareStatement(
"SELECT first_name, last_name from customer where last_name like ? order by first_name");
preparedStatement.setString(1, "Sa%");
ResultSet resultSet = preparedStatement.executeQuery();
while (resultSet.next()) {
System.out.printf("first_name = %s, last_name = %s;\n", resultSet.getString(1), resultSet.getString(2));
}
preparedStatement.close();
resultSet.close();
connection.close();
}
}
|
#!/usr/bin/env bash
main() {
for i in PARAM_IMAGE_FILE,image-file PARAM_IMAGE_NAME,image-name; do
KEY=${i%,*}
VAL=${i#*,}
if [[ -z "${!KEY}" ]]; then
echo "param ${VAL} is required!"
exit 1
fi
done
local image_dir="${IMAGE_DIR:-images}"
echo "> Ensuring image dir '${image_dir}' exists..."
mkdir -p "$image_dir"
local full_image_path="${image_dir}/${PARAM_IMAGE_FILE}"
echo "> Exporting image '${PARAM_IMAGE_NAME}' as '${full_image_path}'..."
docker save -o "${full_image_path}" ${PARAM_IMAGE_NAME}
echo "> Saved as '${full_image_path}'..."
}
if [[ -n $(echo "$0" | sed 's/.*_test$//;s/.*_test\.sh$//') ]]; then
main "$@"
fi
|
<filename>tests/text_preprocessing_test.py
import unittest
from processing_functions import text_preprocessing
import pandas as pd
class TextPreprocessingTests(unittest.TestCase):
def test_lowercase(self):
test_df = pd.Series([
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.",
"Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.",
"Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur."
])
result = text_preprocessing.lowercase_words(test_df)
expected = pd.Series([
"lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.",
"ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.",
"duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur."
])
assert((result == expected).all())
def test_remove_punctuation(self):
test_df = pd.Series([
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.",
"Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.",
"Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur."
])
result = text_preprocessing.remove_punctuation(test_df)
expected = pd.Series([
"Lorem ipsum dolor sit amet consectetur adipiscing elit sed do eiusmod tempor incididunt ut labore et dolore magna aliqua",
"Ut enim ad minim veniam quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat",
"Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur"
])
assert((result == expected).all())
def test_normalized_spacing(self):
test_df = pd.Series([
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.",
"Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.",
"Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur."
])
result = text_preprocessing.normalize_spacing(test_df)
expected = pd.Series([
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.",
"Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.",
"Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur."
])
assert((result == expected).all())
def test_remove_stopwords(self):
test_df = pd.Series([
"This is a random test sentence and it contains some stopwords.",
"Here's another random test sentence, it also includes a few stopwords."
])
result = text_preprocessing.remove_stopwords(test_df)
expected = pd.Series([
"This random test sentence contains stopwords.",
"Here's another random test sentence, also includes stopwords."
])
assert((result == expected).all()) |
namespace SalesProject.Domain.Entities
{
public class BaseEntity
{
// Base class implementation
}
public class Product : BaseEntity
{
public string Name { get; set; }
public string NcmCode { get; set; }
public decimal CombinedPrice { get; set; }
public decimal AdditionalCosts { get; set; }
public Product() { }
public Product(string name, string ncmCode, decimal combinedPrice, decimal additionalCosts)
{
Name = name;
NcmCode = ncmCode;
CombinedPrice = combinedPrice;
AdditionalCosts = additionalCosts;
}
public decimal CalculateTotalCost()
{
return CombinedPrice + AdditionalCosts;
}
}
} |
def calculate_balance(transactions):
total_balance = 0
for _, _, amount in transactions:
total_balance += amount
return total_balance |
package resolvers
import (
"github.com/bradpurchase/grocerytime-backend/internal/pkg/auth"
"github.com/bradpurchase/grocerytime-backend/internal/pkg/meals"
"github.com/graphql-go/graphql"
)
// MealsResolver resolves the meals query
func MealsResolver(p graphql.ResolveParams) (interface{}, error) {
header := p.Info.RootValue.(map[string]interface{})["Authorization"]
user, err := auth.FetchAuthenticatedUser(header.(string))
if err != nil {
return nil, err
}
meals, err := meals.RetrieveMeals(user.ID, p.Args)
if err != nil {
return nil, err
}
return meals, nil
}
|
package ch.raiffeisen.openbank.branch.persistency.model;
import javax.persistence.Column;
import javax.persistence.Embeddable;
/**
* Geographic location of the ATM specified by geographic coordinates or UTM coordinates.
*
* @author <NAME>
*/
@Embeddable
public class GeographicCoordinates {
/**
* Latitude measured in decimal degrees.
*/
@Column(name = "LATITUDE", nullable = false)
private Double latitude;
/**
* "Angular measurement of the distance of a location on the earth east or west of the Greenwich
* observatory. The longitude is measured in decimal degrees."
*/
@Column(name = "LONGITUDE", nullable = false)
private Double longitude;
public Double getLatitude() {
return latitude;
}
public void setLatitude(Double latitude) {
this.latitude = latitude;
}
public Double getLongitude() {
return longitude;
}
public void setLongitude(Double longitude) {
this.longitude = longitude;
}
}
|
'use strict';
var gju = require('geojson-utils');
function isPoly(l) {
return l.feature &&
l.feature.geometry &&
l.feature.geometry.type &&
['Polygon', 'MultiPolygon'].indexOf(l.feature.geometry.type) !== -1;
}
var leafletPip = {
bassackwards: false,
pointInLayer: function(p, layer, first) {
if (typeof p.lat === 'number') p = [p.lng, p.lat];
else if (leafletPip.bassackwards) p = p.concat().reverse();
var results = [];
layer.eachLayer(function(l) {
if (first && results.length) return;
if (isPoly(l) && gju.pointInPolygon({
type: 'Point',
coordinates: p
}, l.toGeoJSON().geometry)) {
results.push(l);
}
});
return results;
}
};
module.exports = leafletPip;
|
#!/bin/bash
# Builds this Docker image and tags it.
# Useful only for the maintainer of this Docker image.
docker build --tag multiproductions/phptest .
|
<filename>min/services/hashing.py
import hashlib
def gethash(filename):
new_hash=hashlib.sha256()
with open(filename,'rb',buffering=0)as file_name:
for hash_array in iter(lambda:file_name.read(128*1024),b''):
new_hash.update(hash_array)
return new_hash.hexdigest()
# Created by pyminifier (https://github.com/liftoff/pyminifier)
|
<filename>enterprise-huajietaojin-web/src/api/qrcode-image-service.js<gh_stars>0
import request from '@/utils/request'
const QrcodeService = {
createStorePreview: (form) => {
return request({
url: '/system-proxy/qrcode/images/stores/preview',
method: 'post',
data: form
})
},
createCouponPreview: (form) => {
return request({
url: '/system-proxy/qrcode/images/coupons/preview',
method: 'post',
data: form
})
}
}
export default QrcodeService
|
package com.pangzhao.quartz;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
@Component
public class MyBean {
@Scheduled(cron = "0/1 * * * * ?")
public void print(){
System.out.println(Thread.currentThread().getName()+" :spring task run...");
}
}
|
#!/bin/bash
# Functional parameter passing
# global variable
USERNAME=$1
# function definitions - start
# calculate age in days
funcAgeInDays () {
echo "Hello $USERNAME, You are $1 Years Old."
echo "That makes you approximately `expr $1 \* 365` days old... "
}
# function definitions - stop
# script - start
clear
echo "Enter Your Age: "
read USERAGE
# calculate the number of days
funcAgeInDays $USERAGE
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.