text stringlengths 1 1.05M |
|---|
import React from "react"
import {useSelector} from "react-redux"
import Feature from "src/js/state/Feature"
import Layout from "src/js/state/Layout"
import {ActionButtonProps} from "./action-button"
import useColumns from "./hooks/useColumns"
import useExport from "./hooks/useExport"
import usePackets from "./hooks/usePackets"
import useView from "./hooks/useView"
import ResponsiveActions from "./responsive-actions"
export default function Actions() {
const mainView = useSelector(Layout.getMainView)
const showSummary = useSelector(Feature.show("summary"))
const view = useView()
const packets = usePackets()
const exportAction = useExport()
const columns = useColumns()
const actions: ActionButtonProps[] =
!showSummary || mainView === "search"
? [packets, exportAction, columns, view]
: [view]
return <ResponsiveActions actions={actions} mainView={mainView} />
}
|
<reponame>cilavery/jamstack-test
import React from 'react'
import {Link } from 'gatsby'
import {Header, Layout } from '../components'
export default () => (
<Layout>
<Header headerText="Contact Me!"/>
<p>email me. follow me. just don't be me.</p>
</Layout>
) |
(* Import the necessary libraries for the animation *)
#use "topfind";;
#require "iEase";;
#require "graphics";;
(* Implement the animation logic using the ex_ease_int.ml file *)
#use "ex_ease_int.ml";; |
<filename>INFO/Books Codes/Oracle Database 10g PLSQL/Code/Chapter9/testCallPP.sql
/*
* testCallPP.sql
* Chapter 9, Oracle10g PL/SQL Programming
* by <NAME>, <NAME> and <NAME>
*
* This script demonstrates sequential calls to callPP.sql.
*/
@@callPP.sql
@@callPP.sql
|
<gh_stars>0
# Generated by Django 3.0.3 on 2020-05-30 10:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('microimprocessing', '0002_auto_20200530_1241'),
]
operations = [
migrations.AddField(
model_name='serverdatafilename',
name='file_path',
field=models.FilePathField(null=True, verbose_name='File Path on server'),
),
migrations.AlterField(
model_name='serverdatafilename',
name='server_dataset_path',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='microimprocessing.ServerDatasetPath'),
),
]
|
const getCurrentDateString = () => {
let dateObj = new Date();
let month = dateObj.getUTCMonth() + 1; //months from 1-12
let day = dateObj.getUTCDate();
let year = dateObj.getUTCFullYear();
return `${month}/${day}/${year}`;
}
const currentDate = getCurrentDateString();
console.log(currentDate); // 5/8/2020 |
#!/usr/bin/env bash
# Will cause a bash script to exit immediately when a command fails.
set -e
# Sets the exit code of a pipeline to that of the rightmost command
# to exit with a non-zero status.
set -o pipefail
# Treat unset variables as an error and exit immediately.
set -u
# Defines default variables.
verbose=0
# Implements on-exit trap function.
on_exit()
{
# @param $1 integer (optional) Exit status. If not set, use '$?'.
local exit_status=${1:-$?}
[ "$verbose" -eq 1 ] && echo "[INFO] $0 finishes with exit code $exit_status."
exit "$exit_status"
}
# Implements on-error trap function.
on_error()
{
# @param $1 integer (optional) Exit status. If not set, use '$?'.
local exit_status=${1:-$?}
[ "$verbose" -eq 1 ] && echo "[ERROR] $0 finishes with exit code $exit_status!"
get_backtrace >&2
exit "$exit_status"
}
# Displays simple stack trace.
get_backtrace()
{
while caller $((n++)); do :; done
}
# Handle bash errors. Exit on error. Trap exit.
# Trap normal exit signal (exit on all errors).
trap on_exit EXIT
# Trap non-normal exit signals: 1/HUP, 2/INT, 3/QUIT, 15/TERM, ERR.
trap on_error 1 2 3 15 ERR
# Defines variables.
verbose="$(
! [ "${INPUT_LOGLEVEL:-0}" -gt 0 ]
echo $?
)"
args=()
if [ -n "$INPUT_INPUTFILE" ]; then
args+=("$(printf -- "-i%s" "$INPUT_INPUTFILE")")
fi
# Parse level of verbosity.
case "$INPUT_LOGLEVEL" in
1)
args+=("$(printf -- "%s" "-v")")
;;
2)
args+=("$(printf -- "%s" "-v")")
args+=("$(printf -- "%s" "-D")")
;;
3)
set -x
args+=("$(printf -- "%s" "-v")")
args+=("$(printf -- "%s" "-D")")
;;
"")
# Pass-through.
;;
*)
echo "[ERROR] Unknown log level: $INPUT_LOGLEVEL!"
exit 1
;;
esac
# Parse the inputs and run the script.
case "$INPUT_INPUTFORMAT" in
csv)
if [ -n "$INPUT_OUTPUTFORMAT" ]; then
args+=("$(printf -- "-f%s" "$INPUT_OUTPUTFORMAT")")
fi
if [ -n "$INPUT_PAIR" ]; then
args+=("$(printf -- "-p%s" "$INPUT_PAIR")")
fi
if [ -n "$INPUT_MODELINGMODE" ]; then
args+=("$(printf -- "-m%s" "$INPUT_MODELINGMODE")")
fi
/opt/src/fx-data-convert-from-csv.py "${args[@]}" "$@"
;;
fxt | hcc | hst | hst509)
if [ -n "$INPUT_INPUTFORMAT" ]; then
args+=("$(printf -- "-f%s" "$INPUT_INPUTFORMAT")")
fi
/opt/src/fx-data-convert-to-csv.py "${args[@]}" "$@"
;;
"")
echo "[ERROR] Please specify the input format!"
exit 1
;;
*)
echo "[ERROR] Unknown input format: $INPUT_INPUTFORMAT!"
exit 1
;;
esac
|
#!/bin/bash -x
NUMEPOCH=2
BATCHSIZE=3
DATASETPATH='/content/DAIN/demo/spit_source_DAIN/128_img_separation/20210625_133002_prep_t_train'
PRETRAINED='42265-Fri-Jun-25-13-31'
LR=0.0005
#cd /content/DAIN
CUDA_VISIBLE_DEVICES=0
python train.py \
--datasetPath "${DATASETPATH}" \
--pretrained "/${PRETRAINED}" \
--numEpoch ${NUMEPOCH} \
--batch_size ${BATCHSIZE} \
--save_which 1 \
--lr ${LR} \
--rectify_lr 0.0005 \
--flow_lr_coe 0.01 \
--occ_lr_coe 0.0 \
--filter_lr_coe 1.0 \
--ctx_lr_coe 1.0 \
--alpha 0.0 1.0 \
--patience 4 \
--factor 0.2
|
import java.util.HashSet;
import java.util.Set;
public class LibraryManager {
private Set<String> books;
public LibraryManager() {
books = new HashSet<>();
}
// Add the specified book to the library
public void addBook(String bookTitle) {
books.add(bookTitle);
}
// Remove the specified book from the library
public void removeBook(String bookTitle) {
books.remove(bookTitle);
}
// Display the list of available books in the library
public void displayBooks() {
for (String book : books) {
System.out.println(book);
}
}
// Show a notification for the specified book
public void showNotification(String bookTitle) {
// Display a notification for the specified book
System.out.println("Notification: " + bookTitle);
}
} |
#!/bin/bash
# script to make geotiffs from ascat images
# make monthly mean power ratio geotiffs
region="$1"
if [ "${region}" == "" ]
then
echo "Must specify region."
exit
fi
datatype='quev'
for file in `find [12]* -type f -name \*\*${datatype}-a-${region}\*\.sir\.gz`
do
# avoid picking up the 1-day data files
start_day=`basename ${file} .sir.gz | awk -F- '{print $4}'`
end_day=`basename ${file} .sir.gz | awk -F- '{print $5}'`
if [ ${end_day} = ${start_day} ]
then
continue
fi
echo ${file}
gunzip -v -f ${file}
dirname=`dirname ${file}`
sirname=`basename ${file} .gz`
tiffname=`basename ${sirname} .sir`.tif
year=`echo ${dirname} | awk -F\/ '{print $2}'`
sir2geotiff ${dirname}/${sirname}
gzip -v -f ${dirname}/${sirname}
done
|
#!/bin/bash
set -eu
# We need to start the systemd services we explicitely stopped at step _0.sh
# We add the enablement of the systemd services here because if a node gets rebooted
# before the convergence step for whatever reason the migrated services will
# not be enabled and we potentially have a bigger disruption.
services=$(services_to_migrate)
if [[ ${keep_sahara_services_on_upgrade} =~ [Ff]alse ]] ; then
services=${services%%openstack-sahara*}
fi
for service in $services; do
if [[ ${service%%-clone} =~ .*-cleanup ]]; then
# we don't want to start {netns,ovs}-cleanup
log_debug "Skipping ${service}"
continue
fi
manage_systemd_service start "${service%%-clone}"
manage_systemd_service enable "${service%%-clone}"
check_resource_systemd "${service%%-clone}" started 600
done
|
package weclaw1;
import org.springframework.stereotype.Controller;
import org.springframework.ui.Model;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
@Controller
public class PersonController {
@RequestMapping("/person")
public String greeting(@RequestParam(value="name", required=true) String name,
@RequestParam(value="surname", required=true) String surname,
@RequestParam(value="age", required=true) int age, Model model) {
model.addAttribute("name", name);
model.addAttribute("surname", surname);
model.addAttribute("age", age);
return "person";
}
} |
#!/bin/bash
[ ! -z $REVIEW_CONFIG_FILE ] || REVIEW_CONFIG_FILE=config.yml
# コマンド手打ちで作業したい時は以下の通り /book に pwd がマウントされます
# docker run -i -t -v $(pwd):/book vvakame/review:4.1 /bin/bash
docker run -t --rm -v $(pwd):/book vvakame/review:5.1 /bin/bash -ci "cd /book && ./setup.sh && REVIEW_CONFIG_FILE=$REVIEW_CONFIG_FILE npm run pdf"
|
#!/usr/bin/env bash
apt update
apt -y upgrade
apt -y autoremove
apt install -y git build-essential git libpng-dev curl fish lsb-release
|
#pragma once
#ifndef KAI_PRE_PROCESSOR_H
# define KAI_PRE_PROCESSOR_H
#
# define KAI_PP_CAT BOOST_PP_CAT
# define KAI_PP_STRINGISE BOOST_PP_STRINGIZE
#
// KAI_STATIC_MESSAGE
// usage:
// #pragma KAI_STATIC_MESSAGE("hello world")
// results in:
// File.cpp(123): hello world
// in the compiler output window
# ifdef KAI_COMPILER_MSVC
# define KAI_STATIC_TODO(T) message(KAI_PP_CAT(KAI_PP_CAT(KAI_PP_CAT(__FILE__, "("), KAI_PP_STRINGISE(__LINE__)), KAI_STATIC_MESSAGE_TRAIL(KAI_PP_CAT("TODO: ", T))))
# define KAI_STATIC_MESSAGE(T) message(KAI_PP_CAT(KAI_PP_CAT(KAI_PP_CAT(__FILE__, "("), KAI_PP_STRINGISE(__LINE__)), KAI_STATIC_MESSAGE_TRAIL(T)))
# define KAI_STATIC_MESSAGE_TRAIL(T) KAI_PP_CAT(KAI_PP_CAT(KAI_PP_CAT("): ", "Func"), ": "), T)
# else
# define KAI_STATIC_MESSAGE(T) error(T)
# endif
#endif // KAI_PRE_PROCESSOR_H
|
<filename>src/data/samples.h<gh_stars>0
//
// samples.h
// XAA1
//
// Created by <NAME> on 18/12/2015.
// Copyright (c) 2015 <NAME>. All rights reserved.
//
#ifndef XAA1_samples_h
#define XAA1_samples_h
#include <deque>
#include <string>
#include <memory>
#include <iterator>
#include <random>
typedef std::deque<double> Doubles;
struct Sample{
Doubles input;
Doubles output;
std::string sample_id;
};
typedef std::deque<Sample> Samples;
typedef std::shared_ptr<Samples> SamplesSP;
class SamplesPermutationIterator;
class SamplesPermutation {
static std::minstd_rand0 rand;
SamplesSP samples;
std::deque<size_t> permutation;
public:
SamplesPermutation(SamplesSP const & samples) : samples(samples), permutation(getPermutation(samples->size())){}
static std::deque<size_t> getPermutation(size_t size);
SamplesPermutationIterator begin();
SamplesPermutationIterator end();
};
class SamplesPermutationIterator : public std::iterator<std::forward_iterator_tag, const Sample>{
size_t idx = 0;
std::deque<size_t>& permutation;
SamplesSP const & samples;
public:
SamplesPermutationIterator(SamplesSP const & samples, std::deque<size_t> & permutation) : samples(samples), permutation(permutation){};
SamplesPermutationIterator(const SamplesPermutationIterator& it) = default;
SamplesPermutationIterator& operator++() {++idx;return *this;}
SamplesPermutationIterator operator++(int) {SamplesPermutationIterator tmp(*this); ++*this; return tmp;}
bool isEnd() const {return idx >= permutation.size();}
void goToEnd(){ idx = permutation.size();}
void goToBegin(){ idx = 0; }
bool operator==(const SamplesPermutationIterator& it) {return (idx == it.idx || (isEnd() && it.isEnd())) && &permutation == &(it.permutation);}
bool operator!=(const SamplesPermutationIterator& it) {return !operator==(it);}
Sample const& operator*() {return (*samples)[permutation[idx]];}
};
struct Result{
Doubles outputs;
std::string sample_id;
};
typedef std::deque<Result> Results;
#endif
|
// @flow
import { promise } from './utils'
function a() {
return '1111'
}
console.log(a())
const b = () => {
console.log('我是箭头函数')
}
b()
promise().then((result) => {
console.log(result)
})
class Person {
name: string
constructor(name: string) {
this.name = name
}
say() {
console.log(`我是class,name是${this.name}`)
}
}
const tom = new Person('tom')
tom.say()
function sum(a: number, b: string) {
return a + b
}
sum(12, 1)
|
#!/bin/bash
# This script is used to create the base OS image for monerobox.
# It requires a clean Armbian_5.69_Rock64_Ubuntu_bionic_default_4.4.167 releases installed
# and a user "rock64" with sudo permission
if [ "$EUID" -ne 0 ] ; then
echo "Please run as root or with sudo"
exit
fi
# update apt repo
apt update
apt upgrade -y
# setup zero-conf network
echo "monerobox" > /etc/hostname
apt install -y avahi-daemon
# install docker
apt install -y \
apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common
# apt install -y docker-ce docker-ce-cli containerd.io
curl -fsSL https://get.docker.com -o get-docker.sh
sudo bash get-docker.sh
sudo usermod -aG docker pi
# install docker-compose
apt install -y python-pip python-setuptools python-dev libltdl7 libffi-dev
pip install docker-compose==1.24.0
# add user rock64 to docker group
usermod -aG docker pi
# reboot
echo "System will reboot it 10 seconds."
sleep 10
reboot
|
package com.chequer.axboot.core.model.extract.metadata;
import com.chequer.axboot.core.utils.ArrayUtils;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AccessLevel;
import lombok.Data;
import lombok.Getter;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.StringJoiner;
import java.util.stream.Collectors;
import static java.util.stream.Collectors.toList;
@Data
public class Table {
public static class Key {
public static final String SINGLE = "SINGLE";
public static final String COMPOSITE = "COMPOSITE";
}
private String tableName;
@Getter(AccessLevel.NONE)
private String remarks;
public String getRemarks() {
return remarks == null ? "" : remarks;
}
private List<Column> columns;
private String className;
public String keyType() {
if (ArrayUtils.isNotEmpty(columns)) {
return columns.stream().filter(Column::isKey).count() == 1 ? Table.Key.SINGLE : Table.Key.COMPOSITE;
}
throw new UnsupportedOperationException("required Table's Primary Key");
}
@JsonProperty("json")
public List<Map<String, String>> getJson() {
return columns.stream()
.map(column -> {
String _column = column.hibernateField().getFieldName();
Map<String, String> keyValueMap = new HashMap<>();
keyValueMap.put(_column, "");
return keyValueMap;
}
).collect(toList());
}
public String keyClassName(String className) {
if (keyType().equals(Table.Key.SINGLE)) {
return columns.stream().filter(Column::isKey).findAny().get().hibernateField().getJavaType();
}
return className + "Id";
}
public String keyClassRefName(String className) {
String keyClassName = keyClassName(className);
if (keyType().equals(Table.Key.SINGLE)) {
return keyClassName;
}
return String.format("%s.%s", className, keyClassName);
}
public String returnKeyName() {
if (keyType().equals(Table.Key.SINGLE)) {
return columns.stream().filter(Column::isKey).findAny().get().hibernateField().getFieldName();
} else {
StringJoiner returnKeyName = new StringJoiner(", ");
columns.stream().filter(Column::isKey).forEach(column -> returnKeyName.add(column.hibernateField().getFieldName()));
return returnKeyName.toString();
}
}
public String queryFields() {
return columns.stream().map(Column::getColumnName).collect(Collectors.joining(",\n "));
}
public String selectQueryFields() {
StringJoiner stringJoiner = new StringJoiner(",\n ");
for (Column column : columns) {
stringJoiner.add(column.getColumnName() + " AS " + column.hibernateField().getFieldName());
}
return stringJoiner.toString();
}
public String queryValues() {
StringJoiner stringJoiner = new StringJoiner(",\n ");
for (Column column : columns) {
stringJoiner.add("#{" + column.hibernateField().getFieldName() + "}");
}
return stringJoiner.toString();
}
public String setColumns() {
StringJoiner stringJoiner = new StringJoiner(",\n ");
for (Column column : columns) {
if (!column.isKey()) {
stringJoiner.add(column.getColumnName() + " = #{" + column.hibernateField().getFieldName() + "}");
}
}
return stringJoiner.toString();
}
public String idWhere() {
StringJoiner stringJoiner = new StringJoiner(", ");
for (Column column : columns) {
if (column.isKey()) {
stringJoiner.add(column.getColumnName() + " = #{" + column.hibernateField().getFieldName() + "}");
}
}
return stringJoiner.toString();
}
}
|
const radius = 5;
const circumference = 2*Math.PI*radius;
console.log("The circumference is " + circumference);
const area = Math.PI * radius**2;
console.log("The area is " + area);
// Output: The circumference is 31.41592653589793
// The area is 78.53981633974483 |
package io.opensphere.core.data.util;
import io.opensphere.core.data.util.QueryTracker.QueryStatus;
import io.opensphere.core.data.util.QueryTracker.QueryTrackerListener;
/** Adapter for {@link QueryTrackerListener}. */
public class QueryTrackerListenerAdapter implements QueryTrackerListener
{
@Override
public void fractionCompleteChanged(QueryTracker tracker, float fractionComplete)
{
}
@Override
public void statusChanged(QueryTracker tracker, QueryStatus status)
{
}
}
|
const express = require('express')
const { verifyUserAccessToken } = require('../middlewares')
const router = express.Router()
const { getUserContestSubmissions } = require('../../models/submissions')
router.get(
'/:contestId/users/:userId/submissions',
verifyUserAccessToken,
(req, res) => {
getUserContestSubmissions(req)
.then((results) => {
return res.status(200).json({
success: true,
results,
error: null,
})
})
.catch((error) => {
return res.status(400).json({
success: false,
results: null,
error,
})
})
}
)
module.exports = router
|
/*
SELECT cast(retentiontime*60 as int) as retentiontime, count(retentiontime) Frequency
FROM mspeak
GROUP BY cast(retentiontime*60 as int)
SELECT cast(drifttime*60 as int) as drifttime, count(drifttime) Frequency
FROM mspeak
GROUP BY cast(drifttime*60 as int)
SELECT cast(intensity/10 as bigint)*10 as intensity, count(intensity) Frequency
FROM mspeak
GROUP BY cast(intensity/10 as bigint)
select * from DimRetentionTimeRel
*/
select
mc.id, s.sequence, (select top 1 label from DimMassChargeRel where id = mc.id) label, min(msp.masscharge) 'min', avg(msp.masscharge) 'avg', max(msp.masscharge) 'max', count(*) 'count'
-- ISNULL((select top 1 id from DimMassChargeRel where msp.masscharge >= minrange and msp.masscharge < maxrange), 1) MassChargeRelID
-- ,*
from mspeak msp
LEFT join DimMassChargeRel mc on msp.retentiontime >= mc.minrange and msp.retentiontime < mc.maxrange
LEFT JOIN mspeaksequence msps on msp.id = msps.mspeakid
LEFT JOIN sequence s on msps.sequenceid = s.id
group by mc.id, s.sequence
order by label, len(s.sequence), s.sequence, mc.id
select
rt.id, s.sequence, (select top 1 label from DimRetentionTimeRel where id = rt.id) label, min(msp.retentiontime) * 60 'min', avg(msp.retentiontime) * 60 'avg', max(msp.retentiontime) * 60 'max', count(*) 'count'
-- ISNULL((select top 1 id from DimMassChargeRel where msp.masscharge >= minrange and msp.masscharge < maxrange), 1) MassChargeRelID
-- ,*
from mspeak msp
LEFT join DimRetentionTimeRel rt on msp.retentiontime * 60 > rt.minrange and msp.retentiontime * 60 <= rt.maxrange
LEFT JOIN mspeaksequence msps on msp.id = msps.mspeakid
LEFT JOIN sequence s on msps.sequenceid = s.id
group by rt.id, s.sequence
order by len(s.sequence), s.sequence, rt.id
select
dt.id, s.sequence, (select top 1 label from DimDriftTimeRel where id = dt.id) label, min(msp.drifttime) * 60 'min', avg(msp.drifttime) * 60 'avg', max(msp.drifttime) * 60 'max', count(*) 'count'
-- ISNULL((select top 1 id from DimMassChargeRel where msp.masscharge >= minrange and msp.masscharge < maxrange), 1) MassChargeRelID
-- ,*
from mspeak msp
LEFT join DimDriftTimeRel dt on msp.drifttime * 60 > dt.minrange and msp.drifttime * 60 <= dt.maxrange
LEFT JOIN mspeaksequence msps on msp.id = msps.mspeakid
LEFT JOIN sequence s on msps.sequenceid = s.id
group by dt.id, s.sequence
order by len(s.sequence), s.sequence, dt.id
select
--rt.id, --s.sequence,
dt.label, min(msp.drifttime) 'min', avg(msp.drifttime) 'avg', max(msp.drifttime) 'max', count(*) 'count'
-- ISNULL((select top 1 id from DimMassChargeRel where msp.masscharge >= minrange and msp.masscharge < maxrange), 1) MassChargeRelID
-- ,*
from mspeak msp
LEFT join DimDriftTimeRel dt on msp.retentiontime * 60 > dt.minrange and msp.retentiontime * 60 <= dt.maxrange
LEFT JOIN mspeaksequence msps on msp.id = msps.mspeakid
LEFT JOIN sequence s on msps.sequenceid = s.id
group by dt.label --, s.sequence
--order by mc.id --len(s.sequence), s.sequence
select * from DimDriftTimeRel
select top 50 drifttime * 60, * from mspeak |
<reponame>longwind09/sampling
package org.felix.ml.sampling.filter.single;
import org.felix.ml.sampling.FilterContext;
import org.felix.ml.sampling.SamplePackage;
import org.felix.ml.sampling.ScoreResult;
import org.felix.ml.sampling.exception.ConfigException;
import org.felix.ml.sampling.filter.BaseFilter;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
/**
*
* @0
*/
public class RandomFilter extends BaseFilter {
private float rate = 0.1f;
private Random r = new Random();
public void init(String param) throws ConfigException {
try {
this.rate = Float.parseFloat(param);
if (this.rate <= 0 || this.rate > 1)
throw new ConfigException(String.format("wrong config for %s,param:%s", getClass().getSimpleName(), param));
super.init(param);
} catch (Exception e) {
throw new ConfigException(String.format("wrong config for %s,param:%s", getClass().getSimpleName(), param));
}
}
public List<Integer> doFilter(SamplePackage spackage, ScoreResult scoreResult, FilterContext context) {
List<Integer> ret = new ArrayList<Integer>();
for (Integer id : spackage.getFilterBefore()) {
float f = r.nextFloat();
if (f < rate)
ret.add(id);
}
return ret;
}
}
|
<gh_stars>0
// pages/webViewList/brandDetail/brandDetail.js
const api = require('../../../config/api.js');
Page({
/**
* 页面的初始数据
*/
data: {
url:''
},
/**
* 生命周期函数--监听页面加载
*/
onLoad: function (options) {
let url=JSON.parse(options.url)
console.log(url,11111111);
this.setData({
url:api.webViewUrl+url.routeName+'?'+url.name+'='+url.id+'&type=app'
})
},
/**
* 生命周期函数--监听页面初次渲染完成
*/
onReady: function () {
},
/**
* 生命周期函数--监听页面显示
*/
onShow: function () {
},
/**
* 生命周期函数--监听页面隐藏
*/
onHide: function () {
},
/**
* 生命周期函数--监听页面卸载
*/
onUnload: function () {
},
/**
* 页面相关事件处理函数--监听用户下拉动作
*/
onPullDownRefresh: function () {
},
/**
* 页面上拉触底事件的处理函数
*/
onReachBottom: function () {
},
/**
* 用户点击右上角分享
*/
onShareAppMessage: function () {
return {
title: '加盟好餐饮,就找餐盟严选!',
}
}
}) |
// EnemySetup.cpp
// <NAME>, 23rd August 1998.
#include <MFStdLib.h>
#include <windows.h>
#include <windowsx.h>
#include <ddlib.h>
#include <commctrl.h>
#include "resource.h"
#include "fmatrix.h"
#include "inline.h"
#include "gi.h"
#include "EdStrings.h"
#include "GEdit.h"
#include "ticklist.h"
//---------------------------------------------------------------
SLONG enemyf_flags,
enemyf_to_change;
extern CBYTE *WaypointExtra(EventPoint *ep, CBYTE *msg);
#define STR_LEN 800
//---------------------------------------------------------------
#define INIT_COMBO_BOX(i,s,d) the_ctrl = GetDlgItem(hWnd,i); \
c0 = 1; \
lbitem_str = s[0]; \
while(*lbitem_str!='!') \
{ \
SendMessage(the_ctrl,CB_ADDSTRING,0,(LPARAM)lbitem_str); \
lbitem_str = s[c0++]; \
} \
SendMessage(the_ctrl,CB_SETCURSEL,d,0);
BOOL CALLBACK efs_proc(HWND hWnd,UINT message,WPARAM wParam,LPARAM lParam)
{
SLONG ep;
SLONG c0 = 0;
HWND the_ctrl;
EventPoint *ep_ptr, *ep_base=current_mission->EventPoints;
CBYTE msg[STR_LEN],str[STR_LEN];
switch(message)
{
case WM_INITDIALOG:
{
// fill the list box
the_ctrl = GetDlgItem(hWnd,IDC_COMBO1);
ep = current_mission->UsedEPoints;
c0 = 0;
while (ep) {
ep_ptr = TO_EVENTPOINT(ep_base,ep);
if (ep_ptr->WaypointType==WPT_CREATE_ENEMIES) {
WaypointExtra(ep_ptr,msg);
sprintf(str,"%d%c: %s",ep,'A' + ep_ptr->Group,msg);
SendMessage(the_ctrl,CB_ADDSTRING,0,(LPARAM)str);
if (ep==enemyf_to_change) SendMessage(the_ctrl,CB_SETCURSEL,c0,0);
c0++;
}
ep = ep_ptr->Next;
}
// Subclass and init the listbox
ticklist_init(hWnd, IDC_LIST1, wenemy_flag_strings,enemyf_flags);
return TRUE;
}
case WM_COMMAND:
switch(LOWORD(wParam))
{
case IDOK:
SendMessage(hWnd,WM_CLOSE,0,0);
return TRUE;
}
break;
case WM_MEASUREITEM:
return ticklist_measure(hWnd, wParam, lParam);
case WM_DRAWITEM:
return ticklist_draw(hWnd, wParam, lParam);
case WM_CLOSE:
enemyf_flags = ticklist_bitmask(hWnd,IDC_LIST1);
enemyf_to_change = SendMessage ( GetDlgItem(hWnd,IDC_COMBO1),
CB_GETCURSEL,
0,0
);
// now translate phoney people indices to real one
if (enemyf_to_change==-1) {
enemyf_to_change=0;
} else {
memset(msg,0,STR_LEN);
SendMessage(GetDlgItem(hWnd,IDC_COMBO1),CB_GETLBTEXT,enemyf_to_change,(long)msg);
sscanf(msg,"%d",&enemyf_to_change);
}
ticklist_close(hWnd, IDC_LIST1);
EndDialog(hWnd,0);
return TRUE;
}
return FALSE;
}
//---------------------------------------------------------------
void do_enemy_flags_setup(EventPoint *the_ep)
{
// Set the dialog.
enemyf_to_change = the_ep->Data[0];
enemyf_flags = the_ep->Data[1];
// Do the dialog.
DialogBox (
GEDIT_hinstance,
MAKEINTRESOURCE(IDD_ENEMYFLAGS_SETUP),
GEDIT_view_wnd,
(DLGPROC)efs_proc
);
// Set the data.
the_ep->Data[0] = enemyf_to_change;
the_ep->Data[1] = enemyf_flags;
}
//---------------------------------------------------------------
|
#!/bin/bash
echo T1027 - Ovfuscated files or Information
echo Atomic Test #1 - Decode base64 Data into Script
sh -c "echo ZWNobyBIZWxsbyBmcm9tIHRoZSBBdG9taWMgUmVkIFRlYW0= > /tmp/encoded.dat"
cat /tmp/encoded.dat | base64 -d > /tmp/art.sh
chmod +x /tmp/art.sh
/tmp/art.sh
echo Atomic Test #1 - Decode base64 Data into Script
|
#!/bin/bash
################# Colors ##############################
RED=`tput bold && tput setaf 1`
GREEN=`tput bold && tput setaf 2`
YELLOW=`tput bold && tput setaf 3`
BLUE=`tput bold && tput setaf 4`
NC=`tput sgr0`
function RED(){ echo -e ${RED}${1}${NC} ;}
function GREEN(){ echo -e ${GREEN}${1}${NC} ;}
function YELLOW(){ echo -e ${YELLOW}${1}${NC} ;}
function BLUE(){ echo -e ${BLUE}${1}${NC} ;}
#######################################################
############ Defining constants # ################
INSTALLATION_FOLDER="/home/${SUDO_USER}/Script"
LINK_FOLDER="/usr/local/bin"
################ Functions #######################
function checkForSudoRights(){
if [ $UID -ne 0 ]; then
RED "You must run this script as root!"; exit 1
fi
}
function generateScriptFolder(){
if [ -d ~/Script ]; then
GREEN " Folder [${INSTALLATION_FOLDER}] found."
else
YELLOW " Folder [${NC}$INSTALLATION_FOLDER${YELLOW}] does not exists."
YELLOW " Creating the installation folder ..."
mkdir ${INSTALLATION_FOLDER}
GREEN " Folder [${NC}${INSTALLATION_FOLDER}${GREEN}] have been created."
fi
}
function copyScriptsAndCreateLinks(){
for script in scripts/*.sh; do
cp ${script} ${INSTALLATION_FOLDER}
script_name=${script: 8}
BLUE "\t${i} - [${NC}${script_name}${BLUE}\t] copied in: ~/Script"
if [ -f ${LINK_FOLDER}/${script_name} ]; then
YELLOW "\tError : [${LINK_FOLDER}/${script_name}] already exist."
RED "\tNo link created for : ${script_name} ."
else
ln -s ${INSTALLATION_FOLDER}/${script_name} ${LINK_FOLDER}/${script_name}
GREEN "\tLink sucessfully created."
fi
((i++))
done
}
################### Script ###################
checkForSudoRights
generateScriptFolder
copyScriptsAndCreateLinks
|
/*
Copyright © 2021 Google
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package operations
import (
"context"
"errors"
"fmt"
"regexp"
"strings"
"time"
log "github.com/sirupsen/logrus"
)
const (
StatusDone = "DONE"
)
var (
operationRegex = regexp.MustCompile(`operation-[\d\w]+-[\d\w]+`)
)
// Operation is an interface for GCE and GKE Operations.
type Operation interface {
// IsFinished checks whether the Operation status is complete.
IsFinished(ctx context.Context) (bool, error)
// String returns the Operation ID (i.e. the path).
// Note: this Operation ID is relative to the Service.
String() string
}
// OperationStatus is a distillation of a GCP Operation status (which vary by API).
type OperationStatus struct {
Status string
Error string
}
type Handler interface {
Wait(ctx context.Context, op Operation) error
}
// HandlerImpl is a thread-safe implementation of Handler.
type HandlerImpl struct {
interval time.Duration
deadline time.Duration
}
func NewHandler(interval time.Duration, deadline time.Duration) *HandlerImpl {
return &HandlerImpl{interval: interval, deadline: deadline}
}
// Wait loops over Operation.IsFinished method until the operation is complete.
func (h HandlerImpl) Wait(ctx context.Context, op Operation) error {
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(h.deadline))
defer cancel()
ticker := time.NewTicker(h.interval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return fmt.Errorf("context error: %w", ctx.Err())
case <-ticker.C:
log.Debugf("Polling for %s", op)
done, err := op.IsFinished(ctx)
if err != nil {
return err
}
if done {
return nil
}
}
}
}
// ObtainID attempts to retrieve an operation name from the error.
func ObtainID(err error) string {
return operationRegex.FindString(err.Error())
}
func IsFinished(ctx context.Context, poll func(ctx context.Context) (OperationStatus, error)) (bool, error) {
status, err := poll(ctx)
if err != nil {
return false, err
}
if status.Status != StatusDone {
return false, nil
}
if status.Error != "" {
return true, errors.New(status.Error)
}
return true, nil
}
// WaitForOperationInProgress will attempt a retry of the function.
func WaitForOperationInProgress(ctx context.Context, f func(ctx context.Context) error, wait func(ctx context.Context, op string) error) error {
err := f(ctx)
if err == nil {
return nil
}
op := ObtainID(err)
if op == "" {
return err
}
if !strings.Contains(err.Error(), fmt.Sprintf("Operation %s is currently", op)) {
// Match format of errors returned by the GKE API.
return err
}
log.Infof("Operation %s is in progress; wait for operation to complete: %v", op, err)
if err := wait(ctx, op); err != nil {
return err
}
log.Infof("Operation %s is complete; retrying. Retry due to: %v", op, err)
return f(ctx)
}
|
<reponame>vitalics/playwright<gh_stars>1-10
/**
* Copyright (c) Microsoft Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import type { SpawnOptions } from 'child_process';
import { spawn } from 'child_process';
export function spawnAsync(cmd: string, args: string[], options: SpawnOptions = {}): Promise<{stdout: string, stderr: string, code: number | null, error?: Error}> {
const process = spawn(cmd, args, Object.assign({ windowsHide: true }, options));
return new Promise(resolve => {
let stdout = '';
let stderr = '';
if (process.stdout)
process.stdout.on('data', data => stdout += data);
if (process.stderr)
process.stderr.on('data', data => stderr += data);
process.on('close', code => resolve({ stdout, stderr, code }));
process.on('error', error => resolve({ stdout, stderr, code: 0, error }));
});
}
|
import { Component, OnInit, AfterViewInit, ViewChild } from '@angular/core';
import { HttpClientModule } from '@angular/common/http';
import { HttpModule } from '@angular/http';
import { MatSnackBar } from '@angular/material';
import { OverlayContainer } from '@angular/cdk/overlay';
import { MatTableDataSource, MatSort, MatDialog } from '@angular/material';
import { Observable } from 'rxjs/Observable';
import { IssueService } from './issue.service';
import { Issue } from './issue-model';
import { ANIMATE_ON_ROUTE_ENTER } from '@app/core';
import { AngularFirestore } from 'angularfire2/firestore';
import { EditDialogComponent } from './edit-dialog.component';
import { SharedModule } from '../shared/shared.module'
/*
import {MatDialogModule} from '@angular/material/dialog';
import {MatPaginatorModule} from '@angular/material/paginator';
*/
@Component({
selector: 'anms-issue',
templateUrl: './issue.component.html',
styleUrls: ['./issue.component.sass']
})
export class IssueComponent implements OnInit, AfterViewInit {
displayedColumns = ['nome', 'inicio', 'andamento', 'fim', 'valor', 'editar'];
dataSource: MatTableDataSource<any>;
animateOnRouteEnter = ANIMATE_ON_ROUTE_ENTER;
issues: Observable<Issue[]>;
nome: string;
inicio: string;
andamento: string;
fim: string;
valor: string;
uid?: string;
time: number;
constructor(
private issueService: IssueService,
public snackBar: MatSnackBar,
private afs: AngularFirestore,
public dialog: MatDialog//,
//public sort: MatSort
) { }
ngOnInit() {
this.issues = this.issueService.getSnapshot();
}
createIssue() {
this.issueService.create(this.nome, this.inicio, this.andamento, this.fim, this.valor);
this.nome = '';
this.inicio = '';
this.andamento = '';
this.fim = '';
this.valor = '';
this.snackBar.open('Sua Issue foi Cadastrada com Sucesso, nosso <NAME>, vai acompanhar !', 'X', { duration: 2200 });
}
ngAfterViewInit() {
this.afs.collection<any>('issues').valueChanges().subscribe(data => {
this.dataSource = new MatTableDataSource(data);
//this.dataSource.sort = this.sort;
})
}
applyFilter(filterValue: string) {
filterValue = filterValue.trim();
filterValue = filterValue.toLowerCase();
this.dataSource.filter = filterValue;
}
openDialog(data): void {
const dialogRef = this.dialog.open(EditDialogComponent, {
width: '600px',
data: data
});
}
} |
<reponame>ahmedreza1/mil-59-build-modal-core-component
import React from 'react'
import { Meta, Story } from '@storybook/react'
import { myModal as Modal, Props } from '../src/components/Modal/Modal'
const meta: Meta = {
title: 'Layout/Modal',
component: Modal,
argTypes: {
onClick: { action: 'clicked' },
children: {
defaultValue: "Open Modal"
}
}
}
export default meta;
const Template: Story<Props> = (args) => <Modal {...args} />
export const Default = Template.bind({});
export const Secondary = Template.bind({});
Secondary.args = {
variant: "Large",
children: "I am Large"
}
|
def get_distinct_count(arr):
my_dict = {}
for item in arr:
if item not in my_dict.keys():
my_dict[item] = 1
return len(my_dict.keys()) |
<reponame>buidler-labs/hedera-mirror-node<filename>hedera-mirror-importer/src/main/java/com/hedera/mirror/importer/repository/upsert/ScheduleUpsertQueryGenerator.java
package com.hedera.mirror.importer.repository.upsert;
/*-
*
* Hedera Mirror Node
*
* Copyright (C) 2019 - 2022 Hedera Hashgraph, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
import java.util.List;
import java.util.Set;
import javax.inject.Named;
import lombok.Value;
import com.hedera.mirror.common.domain.schedule.Schedule_;
@Named
@Value
public class ScheduleUpsertQueryGenerator extends AbstractUpsertQueryGenerator<Schedule_> {
private final String finalTableName = "schedule";
private final String temporaryTableName = getFinalTableName() + "_temp";
// scheduleId is used for completeness
private final List<String> conflictIdColumns = List.of(Schedule_.SCHEDULE_ID);
private final Set<String> nullableColumns = Set.of(Schedule_.EXECUTED_TIMESTAMP);
private final Set<String> nonUpdatableColumns = Set.of(Schedule_.CONSENSUS_TIMESTAMP,
Schedule_.CREATOR_ACCOUNT_ID, Schedule_.PAYER_ACCOUNT_ID, Schedule_.SCHEDULE_ID,
Schedule_.TRANSACTION_BODY);
@Override
public String getInsertWhereClause() {
return String.format(" where %s is not null",
getFullTempTableColumnName(Schedule_.CONSENSUS_TIMESTAMP));
}
@Override
public String getUpdateWhereClause() {
return String.format(" where %s = %s and %s is not null",
getFullFinalTableColumnName(Schedule_.SCHEDULE_ID),
getFullTempTableColumnName(Schedule_.SCHEDULE_ID),
getFullTempTableColumnName(Schedule_.EXECUTED_TIMESTAMP));
}
}
|
#!/bin/bash
# run tests for phptidy
# Usage: ./test.sh
../phptidy.php suffix
passed=0
failed=0
for f in *.php
do
if [[ ${f} == *.phptidy.php ]]
then
continue
fi
colordiff -u ${f}.expected.phptidy.php ${f}.phptidy.php \
&& let passed=passed+1 \
|| let failed=failed+1
done
echo "=> ${passed} tests passed, ${failed} tests failed."
|
<gh_stars>1-10
class CreateDonations < ActiveRecord::Migration[5.0]
def change
create_table :donations do |t|
t.integer :user_id
t.integer :place_id
t.timestamps
end
add_foreign_key :donations, :users
add_foreign_key :donations, :places
end
def down
remove_foreign_key :donations, :users
remove_foreign_key :donations, :places
drop_table :donations
end
end
|
import re
def annotate_links_no_site(text, site):
pattern = r'(https?://\S+|www\.\S+)'
annotated_text = text
urls = re.findall(pattern, text)
for url in urls:
if site not in url:
annotated_url = f'<a href="{url}">{url}</a>'
annotated_text = annotated_text.replace(url, annotated_url)
return annotated_text |
<filename>src/main/scala/gov/nasa/jpl/imce/oti/magicdraw/dynamicScripts/validation/MOFMultiplicityValidation.scala
/*
* Copyright 2016 California Institute of Technology ("Caltech").
* U.S. Government sponsorship acknowledged.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* License Terms
*/
package gov.nasa.jpl.imce.oti.magicdraw.dynamicScripts.validation
import java.awt.event.ActionEvent
import com.nomagic.magicdraw.core.{Application, Project}
import com.nomagic.magicdraw.ui.browser.{Node, Tree}
import com.nomagic.magicdraw.uml.symbols.{DiagramPresentationElement,PresentationElement}
import com.nomagic.magicdraw.uml.symbols.shapes.PackageView
import com.nomagic.uml2.ext.magicdraw.classes.mdkernel.{Element, Package}
import com.nomagic.uml2.ext.magicdraw.mdprofiles.Profile
import gov.nasa.jpl.dynamicScripts.DynamicScriptsTypes
import gov.nasa.jpl.dynamicScripts.magicdraw.ui.symbols.internal.SymbolHelper._
import gov.nasa.jpl.dynamicScripts.magicdraw.validation.MagicDrawValidationDataResults
import gov.nasa.jpl.imce.oti.magicdraw.dynamicScripts.actions._
import gov.nasa.jpl.imce.oti.magicdraw.dynamicScripts.utils.OTIHelper
import org.omg.oti.magicdraw.uml.canonicalXMI.MagicDrawIDGenerator
import org.omg.oti.magicdraw.uml.canonicalXMI.helper._
import org.omg.oti.magicdraw.uml.read.MagicDrawUML
import org.omg.oti.uml.read.api.UMLPackage
import org.omg.oti.uml.validation._
import scala.collection.JavaConversions._
import scala.collection.immutable._
import scala.util.{Success,Try}
import scala.{Option,None,Some,StringContext}
import scala.Predef.ArrowAssoc
/**
* Validates all MultiplicityElements in scope of the selected packages per MOF 2.5 well-formedness constraints
*
* @see MOF 2.5, Section 12.4 EMOF Constraints
* [32] The values of MultiplicityElement::lowerValue and upperValue must be
* of kind LiteralInteger and LiteralUnlimitedNatural respectively.
* @see MOF 2.5, Section 14.4 CMOF Constraints
* [14] The values of MultiplicityElement::lowerValue and upperValue must
* be of kind LiteralInteger and LiteralUnlimitedNatural respectively.
*/
object MOFMultiplicityValidation {
def doit
( p: Project, ev: ActionEvent,
script: DynamicScriptsTypes.BrowserContextMenuAction,
tree: Tree, node: Node,
pkg: Profile, selection: java.util.Collection[Element] )
: Try[Option[MagicDrawValidationDataResults]]
= OTIHelper.toTry(
MagicDrawOTIHelper.getOTIMagicDrawAdapterForProfileCharacteristics(p),
(oa: MagicDrawOTIProfileAdapter) => {
val app = Application.getInstance()
val guiLog = app.getGUILog
guiLog.clearLog()
implicit val umlOps = oa.umlOps
import umlOps._
val selectedPackages
: Set[UMLPackage[MagicDrawUML]]
= selection.toSet selectByKindOf { case p: Package => umlPackage(p) }
doit(p, oa, selectedPackages)
})
def doit
( p: Project, ev: ActionEvent,
script: DynamicScriptsTypes.BrowserContextMenuAction,
tree: Tree, node: Node,
top: Package, selection: java.util.Collection[Element] )
: Try[Option[MagicDrawValidationDataResults]]
= OTIHelper.toTry(
MagicDrawOTIHelper.getOTIMagicDrawAdapterForProfileCharacteristics(p),
(oa: MagicDrawOTIProfileAdapter) => {
val app = Application.getInstance()
val guiLog = app.getGUILog
guiLog.clearLog()
implicit val umlOps = oa.umlOps
import umlOps._
val selectedPackages
: Set[UMLPackage[MagicDrawUML]]
= selection.toSet selectByKindOf { case p: Package => umlPackage(p) }
doit(p, oa, selectedPackages)
})
def doit
( p: Project,
ev: ActionEvent,
script: DynamicScriptsTypes.DiagramContextMenuAction,
dpe: DiagramPresentationElement,
triggerView: PackageView,
triggerElement: Profile,
selection: java.util.Collection[PresentationElement] )
: Try[Option[MagicDrawValidationDataResults]]
= OTIHelper.toTry(
MagicDrawOTIHelper.getOTIMagicDrawAdapterForProfileCharacteristics(p),
(oa: MagicDrawOTIProfileAdapter) => {
val app = Application.getInstance()
val guiLog = app.getGUILog
guiLog.clearLog()
implicit val umlOps = oa.umlOps
import umlOps._
val selectedPackages
: Set[UMLPackage[MagicDrawUML]]
= selection.toSet selectByKindOf { case pv: PackageView => umlPackage(getPackageOfView(pv).get) }
doit(p, oa, selectedPackages)
})
def doit
( p: Project,
ev: ActionEvent,
script: DynamicScriptsTypes.DiagramContextMenuAction,
dpe: DiagramPresentationElement,
triggerView: PackageView,
triggerElement: Package,
selection: java.util.Collection[PresentationElement] )
: Try[Option[MagicDrawValidationDataResults]]
= OTIHelper.toTry(
MagicDrawOTIHelper.getOTIMagicDrawAdapterForProfileCharacteristics(p),
(oa: MagicDrawOTIProfileAdapter) => {
val app = Application.getInstance()
val guiLog = app.getGUILog
guiLog.clearLog()
implicit val umlOps = oa.umlOps
import umlOps._
val selectedPackages
: Set[UMLPackage[MagicDrawUML]]
= selection.toSet selectByKindOf { case pv: PackageView => umlPackage(getPackageOfView(pv).get) }
doit(p, oa, selectedPackages)
})
def doit
( p: Project,
oa: MagicDrawOTIProfileAdapter,
pkgs: Set[UMLPackage[MagicDrawUML]] )
: Try[Option[MagicDrawValidationDataResults]]
= OTIHelper.toTry(
MagicDrawOTIHelper.getOTIMagicDrawInfoForDataCharacteristics(p),
(ordsa: MagicDrawOTIResolvedDocumentSetAdapterForDataProvider) => {
implicit val umlOps = oa.umlOps
import umlOps._
val otiV = OTIMagicDrawValidation(p)
implicit val idg = MagicDrawIDGenerator()(ordsa.rds.ds)
implicit val otiCharacteristicsProvider = oa.otiCharacteristicsProvider
val elementMessages = scala.collection.mutable.HashMap[
Element,
scala.collection.mutable.ArrayBuffer[OTIMagicDrawValidation.MDValidationInfo]]()
for {
v <- ConnectableMultiplicityValidationHelper.analyzePackageContents(pkgs)
if MultiplicityValueValidationStatus.ValidValueStatus != v.status
mdPoP = umlMagicDrawUMLMultiplicityElement(v.parameter_or_property).getMagicDrawMultiplicityElement
vOptInfo <- (v.status, v.value, v.valueRepair) match {
case (MultiplicityValueValidationStatus.ValidValueStatus, _, _) =>
Success(None)
case (MultiplicityValueValidationStatus.RedundantValueStatus, Some(vDelete), _) =>
val mdVDelete = umlMagicDrawUMLElement(vDelete).getMagicDrawElement
otiV.makeValidationInfo(
otiV.MD_OTI_ValidationConstraint_RedundantValue,
Some(s"Delete redundant ${v.role.propertyName} value for ${v.parameter_or_property.qualifiedName.get}"),
DeleteRedundantValue(v.role) :: Nil)
case (_, Some(vDelete), Some(vRepair)) =>
val mdVDelete = umlMagicDrawUMLElement(vDelete).getMagicDrawElement
if (MultiplicityElement_lowerValue == v.role)
otiV.makeValidationInfo(
otiV.MD_OTI_ValidationConstraint_InvalidValueAsInteger,
Some(s"Replace lower value for ${v.parameter_or_property.qualifiedName.get} with $vRepair"),
ReplaceLowerIntegerValue(vDelete.xmiType.head, vRepair) :: Nil)
else
otiV.makeValidationInfo(
otiV.MD_OTI_ValidationConstraint_InvalidValueAsUnlimitedNatural,
Some(s"Replace upper value for ${v.parameter_or_property.qualifiedName.get} with $vRepair"),
ReplaceUpperUnlimitedNaturalValue(vDelete.xmiType.head, vRepair) :: Nil)
case (MultiplicityValueValidationStatus.InvalidValueAsIntegerStatus, _, _) =>
otiV.makeValidationInfo(
otiV.MD_OTI_ValidationConstraint_InvalidValueAsInteger,
v.explanation,
Nil)
case (MultiplicityValueValidationStatus.InvalidValueAsStringStatus, _, _) =>
otiV.makeValidationInfo(
otiV.MD_OTI_ValidationConstraint_InvalidValueAsString,
v.explanation,
Nil)
case (MultiplicityValueValidationStatus.InvalidValueAsUnlimitedNaturalStatus, _, _) =>
otiV.makeValidationInfo(
otiV.MD_OTI_ValidationConstraint_InvalidValueAsUnlimitedNatural,
v.explanation,
Nil)
case (MultiplicityValueValidationStatus.InvalidValueKindStatus, _, _) =>
otiV.makeValidationInfo(
otiV.MD_OTI_ValidationConstraint_InvalidValueKind,
v.explanation,
Nil)
case _ =>
Success(None)
}
vInfo <- vOptInfo
validationInfo = elementMessages.getOrElseUpdate(
mdPoP, scala.collection.mutable.ArrayBuffer[OTIMagicDrawValidation.MDValidationInfo]())
} validationInfo += vInfo
val elementValidationMessages: Map[Element, Iterable[OTIMagicDrawValidation.MDValidationInfo]] =
(for {tuple <- elementMessages} yield tuple._1 -> tuple._2.to[Seq]).toMap
val validation =
otiV.makeMDIllegalArgumentExceptionValidation(
"EMOF [32] & CMOF [14] Multiplicity Validation",
elementValidationMessages)
otiV.toTryOptionMagicDrawValidationDataResults(p, "MOF MultiplicityElement Validation", validation)
})
} |
public static String getMaxCommonSubstring(String s1, String s2) {
int m = s1.length();
int n = s2.length();
int maxLength = 0;
int endIndex = 0;
int[][] dp = new int[m][n];
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
if (s1.charAt(i) == s2.charAt(j)) {
if (i == 0 || j == 0) {
dp[i][j] = 1;
} else {
dp[i][j] = 1 + dp[i - 1][j - 1];
}
if (maxLength < dp[i][j]) {
maxLength = dp[i][j];
endIndex = i;
}
}
}
}
return s1.substring(endIndex - maxLength + 1, endIndex + 1);
} |
package io.opensphere.mantle.data.impl.dgset.v1;
import java.util.Objects;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlRootElement;
import io.opensphere.mantle.data.ActiveGroupEntry;
/**
* The Class JAXBActiveGroupEntry.
*/
@XmlRootElement(name = "ActiveGroupEntry")
@XmlAccessorType(XmlAccessType.FIELD)
public class JAXBActiveGroupEntry implements ActiveGroupEntry
{
/** The name. */
@XmlAttribute(name = "id")
private String myGroupId;
/** The name. */
@XmlAttribute(name = "name", required = false)
private String myGroupName;
/**
* Default CTOR.
*/
public JAXBActiveGroupEntry()
{
}
/**
* Instantiates a new jAXB active group entry.
*
* @param other the other
*/
public JAXBActiveGroupEntry(ActiveGroupEntry other)
{
myGroupName = other.getName();
myGroupId = other.getId();
}
/**
* Instantiates a new jAXB active group entry.
*
* @param name the name
* @param id the id
*/
public JAXBActiveGroupEntry(String name, String id)
{
myGroupName = name;
myGroupId = id;
}
@Override
public boolean equals(Object obj)
{
if (this == obj)
{
return true;
}
if (obj == null || getClass() != obj.getClass())
{
return false;
}
JAXBActiveGroupEntry other = (JAXBActiveGroupEntry)obj;
return Objects.equals(myGroupId, other.myGroupId);
}
@Override
public String getId()
{
return myGroupId;
}
@Override
public String getName()
{
return myGroupName;
}
@Override
public int hashCode()
{
final int prime = 31;
int result = 1;
result = prime * result + (myGroupId == null ? 0 : myGroupId.hashCode());
return result;
}
/**
* Sets the id.
*
* @param id the new id
*/
public void setId(String id)
{
myGroupId = id;
}
/**
* Sets the name.
*
* @param name the new name
*/
public void setName(String name)
{
myGroupName = name;
}
@Override
public String toString()
{
return myGroupName + " [" + myGroupId + "]";
}
}
|
#!/bin/bash
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ "$#" -ne 4 ]; then
echo "Usage: ./setup_auth.sh service-account-name zone cluster namespace"
echo " Get these values by visiting https://console.cloud.google.com/ai-platform/pipelines/clusters"
echo " eg: ./setup_auth.sh kfpdemo us-central1-a cluster-1 default"
exit
fi
PROJECT_ID=$(gcloud config get-value project)
SA_NAME=$1
ZONE=$2
CLUSTER=$3
NAMESPACE=$4
# See: https://github.com/kubeflow/pipelines/blob/master/manifests/gcp_marketplace/guide.md#gcp-service-account-credentials
gcloud container clusters get-credentials "$CLUSTER" --zone "$ZONE" --project "$PROJECT_ID"
# Create service account
gcloud iam service-accounts create $SA_NAME \
--display-name $SA_NAME --project "$PROJECT_ID"
# Grant permissions to the service account by binding roles
# roles/editor is needed to launch a CAIP Notebook.
# The others (storage, bigquery, ml, dataflow) are pretty common for GCP ML pipelines
# That said, "admin" is a bit of an overkill; you might want to provide narrower roles for your users
for ROLE in roles/editor roles/storage.admin roles/bigquery.admin roles/ml.admin roles/dataflow.admin roles/pubsub.admin; do
gcloud projects add-iam-policy-binding $PROJECT_ID \
--member=serviceAccount:$SA_NAME@$PROJECT_ID.iam.gserviceaccount.com \
--role=$ROLE
done
# Create credential for the service account
gcloud iam service-accounts keys create application_default_credentials.json --iam-account $SA_NAME@$PROJECT_ID.iam.gserviceaccount.com
# Attempt to create a k8s secret. If already exists, override.
kubectl create secret generic user-gcp-sa \
--from-file=user-gcp-sa.json=application_default_credentials.json \
-n $NAMESPACE --dry-run -o yaml | kubectl apply -f -
# remove private key file
rm application_default_credentials.json
|
#!/bin/bash
CI=1000
PI="${PI:-5000}"
CI=1000
RUNS="${RUNS:-1}"
AD=100
CUR_PATH=`pwd`
SUB_DIR="${SUB_DIR:-"sanity_test"}"
DIR=$CUR_PATH/splash2_stats/$SUB_DIR
THREADS="${THREADS:-"1 32"}"
LOG_FILE="$DIR/perf_log.txt"
DEBUG_FILE="$DIR/perf_debug-ad$AD.txt"
BUILD_ERROR_FILE="$DIR/perf_test_build_error-ad$AD.txt"
BUILD_DEBUG_FILE="$DIR/perf_test_build_log-ad$AD.txt"
TEMP="$DIR/tmp_file"
OUT_FILE="out"
CYCLE=5000
rm -f $TEMP
run_program() {
threads=$2
suffix_conf=$3
prefix="timeout 5m taskset 0x00000001 "
declare suffix
if [ $suffix_conf -eq 0 ]; then
suffix="orig"
else
suffix="lc"
fi
case "$1" in
water-nsquared)
command="cd water-nsquared; $prefix ./water-nsquared-$suffix < input.$threads > $OUT_FILE; sleep 0.5"
;;
water-spatial)
command="cd water-spatial; $prefix ./water-spatial-$suffix < input.$threads > $OUT_FILE; sleep 0.5"
;;
ocean-cp)
command="cd ocean/contiguous_partitions; $prefix ./ocean-cp-$suffix -n1026 -p $threads -e1e-07 -r2000 -t28800 > $OUT_FILE"
;;
ocean-ncp)
command="cd ocean/non_contiguous_partitions; $prefix ./ocean-ncp-$suffix -n258 -p $threads -e1e-07 -r2000 -t28800 > $OUT_FILE"
;;
barnes)
command="cd barnes; $prefix ./barnes-$suffix < input.$threads > $OUT_FILE"
;;
volrend)
command="cd volrend; $prefix ./volrend-$suffix $threads inputs/head > $OUT_FILE"
;;
fmm)
command="cd fmm; $prefix ./fmm-$suffix < inputs/input.65535.$threads > $OUT_FILE"
;;
raytrace)
command="cd raytrace; $prefix ./raytrace-$suffix -p $threads -m72 inputs/balls4.env > $OUT_FILE"
;;
radiosity)
command="cd radiosity; $prefix ./radiosity-$suffix -p $threads -batch -largeroom > $OUT_FILE"
;;
radix)
command="cd radix; $prefix ./radix-$suffix -p$threads -n134217728 -r1024 -m524288 > $OUT_FILE"
;;
fft)
command="cd fft; $prefix ./fft-$suffix -m24 -p$threads -n1048576 -l4 > $OUT_FILE"
;;
lu-c)
command="cd lu/contiguous_blocks; $prefix ./lu-c-$suffix -n4096 -p$threads -b16 > $OUT_FILE"
;;
lu-nc)
command="cd lu/non_contiguous_blocks; $prefix ./lu-nc-$suffix -n2048 -p$threads -b16 > $OUT_FILE"
;;
cholesky)
command="cd cholesky; $prefix ./cholesky-$suffix -p$threads -B32 -C1024 inputs/tk29.O > $OUT_FILE"
;;
esac
echo -e "Command for $1 running $th threads:-\n$command" >> $DEBUG_FILE
echo $command
}
read_tune_param() {
case "$2" in
2) ci_type="opt-tl";;
4) ci_type="naive-tl";;
6) ci_type="cd-tl";;
8) ci_type="legacy-acc";;
9) ci_type="opt-acc";;
10) ci_type="legacy-tl";;
11) ci_type="naive-acc";;
12) ci_type="opt-int";;
13) ci_type="naive-int";;
*)
echo "Wrong CI Type $2"
exit
;;
esac
if [ $2 -eq 8 ]; then
intv=5000
else
tune_file="../${ci_type}-tuning-th$3-${CYCLE}.txt"
while read line; do
present=`echo $line | grep $1 | wc -l`
if [ $present -eq 1 ]; then
intv=`echo $line | cut -d' ' -f 2`
break
fi
done < $tune_file
fi
echo $intv
}
get_time() {
echo "Running (for time) $1 with $3 thread(s)" >> $DEBUG_FILE
command=$(run_program $1 $3 $2)
eval $command
time_in_us=`cat $OUT_FILE | grep "$1 runtime: " | cut -d ':' -f 2 | cut -d ' ' -f 2 | tr -d '[:space:]'`
echo "Duration: $time_in_us us" >> $DEBUG_FILE
cd - > /dev/null
echo $time_in_us
}
run_splash2_for_time() {
for thread in $THREADS
do
echo "Running (for time) $1 with $thread thread(s)" | tee -a $DEBUG_FILE
command=$(run_program $1 $thread $2)
eval $command
time_in_us=`cat $OUT_FILE | grep "$1 runtime: " | cut -d ':' -f 2 | cut -d ' ' -f 2 | tr -d '[:space:]'`
echo "Duration: $time_in_us us" | tee -a $DEBUG_FILE
cd - > /dev/null
done
}
run_splash2_for_time_n_avg_ic() {
for thread in $THREADS
do
echo "Running (for time & avg_ic) $1 with $thread thread(s)" | tee -a $DEBUG_FILE
command=$(run_program $1 $thread $2)
eval $command;
time_in_us=`cat $OUT_FILE | grep "$1 runtime: " | cut -d ':' -f 2 | cut -d ' ' -f 2 | tr -d '[:space:]'`
avg_ic=`cat $OUT_FILE | grep "avg_intv_ic"`
echo "**************************** Running $1 ******************************" >> $TEMP
cat $OUT_FILE >> $TEMP
echo "**************************** Finished Running $1 ******************************" >> $TEMP
echo "Duration: $time_in_us us" | tee -a $DEBUG_FILE
echo -e "Average IC:-\n$avg_ic" | tee -a $DEBUG_FILE
cd - > /dev/null
done
}
run_splash2_for_time_n_avg_perf_stats() {
for thread in $THREADS
do
echo "Running (for time & avg perf stats) $1 with $thread thread(s)" | tee -a $DEBUG_FILE
command=$(run_program $1 $thread $2)
eval $command
time_in_us=`cat $OUT_FILE | grep "$1 runtime: " | cut -d ':' -f 2 | cut -d ' ' -f 2 | tr -d '[:space:]'`
samples=`cat $OUT_FILE | grep "samples"`
echo "Duration: $time_in_us us" | tee -a $DEBUG_FILE
echo -e "Average Perf Stats:-\n$samples" | tee -a $DEBUG_FILE
cd - > /dev/null
done
}
run_splash2_for_intv_stats() {
echo "Running (for interval stats) $1 with 1 thread" | tee -a $DEBUG_FILE
command=$(run_program $1 1 $2)
eval $command
samples=`cat $OUT_FILE | grep "PushSeq"`
echo -e "No. of threads that ran should have as many of the following lines:-\n$samples" | tee -a $DEBUG_FILE
cd - > /dev/null
}
get_accuracy() {
echo "Running (for interval stats) $1 with 1 thread" | tee -a $DEBUG_FILE
rm -f /local_home/nilanjana/temp/interval_stats/interval_stats_thread*.txt
cdf_name="$1-$3.cdf"
sample_name="$1-$3.s100"
pc_name="$1-$3.pc"
# run command
command=$(run_program $1 1 $2)
eval $command
cd -
cd /local_home/nilanjana/temp/interval_stats > /dev/null
# create sampled cdf
cat interval_stats_thread*.txt | grep -ve "PushSeq\|Total" |\
awk '{print $4}' |\
sort -n |\
awk 'BEGIN {OFMT="%f"} {lines[i++]=$0} END {for(l in lines){print l/(i-1)," ",lines[l]}}' |\
sort -n -k 2 \
> $cdf_name
gawk -v lines="$(cat $cdf_name | wc -l)" 'lines<1000 || NR % int(lines/100) == 1 {print} {line=$0} END {print line}' $cdf_name > $sample_name
echo "Sampled cdf to $sample_name"
gawk 'BEGIN {split("1 5 10 25 50 75 90 95 99",ptiles," "); p=1}
!val[p] && $1+0>=(ptiles[p]+0)/100.0 {val[p]=$2; p++}
END { for(i=1;i<=length(ptiles);i++) { if(ptiles[i]) {print ptiles[i], ": ", val[i]}}}' file="$sample_name" $sample_name > ./$pc_name
echo "Percentile-wise intervals (in cycles) for $1:"
cat ./$pc_name
cd - > /dev/null
}
build_splash2_orig() {
#run original
echo "Building original program for $1: " | tee -a $DEBUG_FILE
BUILD_LOG=$BUILD_DEBUG_FILE ERROR_LOG=$BUILD_ERROR_FILE make -f Makefile.orig $1-clean
BUILD_LOG=$BUILD_DEBUG_FILE ERROR_LOG=$BUILD_ERROR_FILE make -f Makefile.orig $1
#make -f Makefile.orig clean; make -f Makefile.orig
}
build_splash2_orig_papi() {
#Build original program with PAPI hardware interrupts
echo "Building original program for $1 with PAPI hardware interrupts(PI: $PI retired instructions) : " | tee -a $DEBUG_FILE
BUILD_LOG=$BUILD_DEBUG_FILE ERROR_LOG=$BUILD_ERROR_FILE make -f Makefile.orig $1-clean
BUILD_LOG=$BUILD_DEBUG_FILE ERROR_LOG=$BUILD_ERROR_FILE EXTRA_FLAGS="-DPAPI -DIC_THRESHOLD=$PI" make -f Makefile.orig $1
#make -f Makefile.orig $1-clean; EXTRA_FLAGS="-DPAPI -DIC_THRESHOLD=5000" make -f Makefile.orig $1
}
build_splash2_orig_fiber() {
#Build orig-fiber
echo "Building orig with fiber program for $1: " | tee -a $DEBUG_FILE
BUILD_LOG=$BUILD_DEBUG_FILE ERROR_LOG=$BUILD_ERROR_FILE make -f Makefile.orig.libfiber $1-clean
BUILD_LOG=$BUILD_DEBUG_FILE ERROR_LOG=$BUILD_ERROR_FILE make -f Makefile.orig.libfiber $1
#make -f Makefile.orig.fiber $1-clean; make -f Makefile.orig.fiber $1
}
build_splash2_ci_naive() {
#run naive
echo "Building naive program for $1: " | tee -a $DEBUG_FILE
BUILD_LOG=$BUILD_DEBUG_FILE ERROR_LOG=$BUILD_ERROR_FILE make -f Makefile.lc $1-clean
BUILD_LOG=$BUILD_DEBUG_FILE ERROR_LOG=$BUILD_ERROR_FILE ALLOWED_DEVIATION=0 CLOCK_TYPE=1 PUSH_INTV=$PI CMMT_INTV=$CI INST_LEVEL=3 EXTRA_FLAGS="-DAVG_STATS" make -f Makefile.lc $1
#make -f Makefile.lc $1-clean; ALLOWED_DEVIATION=0 CLOCK_TYPE=1 PUSH_INTV=5000 CMMT_INTV=1000 INST_LEVEL=3 EXTRA_FLAGS="-DAVG_STATS" make -f Makefile.lc $1
}
build_splash2_ci_opt() {
#run periodic
AD=100
CI_SETTING=2
PI=$(read_tune_param $1 $CI_SETTING 1)
CI=`echo "scale=0; $PI/5" | bc`
echo "Building periodic opt program for $1 with PI:$PI, CI:$CI: " | tee -a $DEBUG_FILE $BUILD_DEBUG_FILE
BUILD_LOG=$BUILD_DEBUG_FILE ERROR_LOG=$BUILD_ERROR_FILE make -f Makefile.lc $1-clean
BUILD_LOG=$BUILD_DEBUG_FILE ERROR_LOG=$BUILD_ERROR_FILE ALLOWED_DEVIATION=$AD CLOCK_TYPE=1 PUSH_INTV=$PI CMMT_INTV=$CI CYCLE_INTV=$CYCLE INST_LEVEL=$CI_SETTING EXTRA_FLAGS="-DAVG_STATS" make -f Makefile.lc $1
#make -f Makefile.lc $1-clean; ALLOWED_DEVIATION=100 CLOCK_TYPE=1 PUSH_INTV=5000 CMMT_INTV=1000 INST_LEVEL=1 EXTRA_FLAGS="-DAVG_STATS" make -f Makefile.lc $1
}
build_splash2_ci_opt_cycles() {
#run periodic
AD=100
CI_SETTING=12
PI=$(read_tune_param $1 $CI_SETTING 1)
CI=`echo "scale=0; $PI/5" | bc`
echo "Building periodic opt cycles program for $1 with PI:$PI, CI:$CI: " | tee -a $DEBUG_FILE $BUILD_DEBUG_FILE
BUILD_LOG=$BUILD_DEBUG_FILE ERROR_LOG=$BUILD_ERROR_FILE make -f Makefile.lc $1-clean
BUILD_LOG=$BUILD_DEBUG_FILE ERROR_LOG=$BUILD_ERROR_FILE ALLOWED_DEVIATION=$AD CLOCK_TYPE=1 PUSH_INTV=$PI CMMT_INTV=$CI CYCLE_INTV=$CYCLE INST_LEVEL=$CI_SETTING EXTRA_FLAGS="-DAVG_STATS" make -f Makefile.lc $1
#make -f Makefile.lc $1-clean; ALLOWED_DEVIATION=100 CLOCK_TYPE=1 PUSH_INTV=5000 CMMT_INTV=1000 INST_LEVEL=1 EXTRA_FLAGS="-DAVG_STATS" make -f Makefile.lc $1
}
build_splash2_ci_opt_perf_cntrs() {
#Build original program with Periodic CI & perf counting
echo "Building original program for $1 with Periodic CI (PI: $PI IR instructions): " >> $DEBUG_FILE
BUILD_LOG=$BUILD_DEBUG_FILE ERROR_LOG=$BUILD_ERROR_FILE make -f Makefile.lc $1-clean
BUILD_LOG=$BUILD_DEBUG_FILE ERROR_LOG=$BUILD_ERROR_FILE ALLOWED_DEVIATION=$AD CLOCK_TYPE=1 PUSH_INTV=$PI CMMT_INTV=$CI INST_LEVEL=1 EXTRA_FLAGS="-DAVG_STATS -DPERF_CNTR" make -f Makefile.lc $1
#make -f Makefile.lc $1-clean; ALLOWED_DEVIATION=100 CLOCK_TYPE=1 PUSH_INTV=5000 CMMT_INTV=1000 INST_LEVEL=1 EXTRA_FLAGS="-DAVG_STATS -DPERF_CNTR" make -f Makefile.lc $1
}
build_splash2_ci_opt_intv_accuracy() {
#build periodic with interval stats
AD=100
CI_SETTING=2
PI=$(read_tune_param $1 $CI_SETTING 1)
CI=`echo "scale=0; $PI/5" | bc`
echo "Building periodic opt program for $1 that prints interval statistics with PI:$PI, CI:$CI: " >> $DEBUG_FILE
BUILD_LOG=$BUILD_DEBUG_FILE ERROR_LOG=$BUILD_ERROR_FILE make -f Makefile.lc $1-clean
BUILD_LOG=$BUILD_DEBUG_FILE ERROR_LOG=$BUILD_ERROR_FILE ALLOWED_DEVIATION=$AD CLOCK_TYPE=1 PUSH_INTV=$PI CMMT_INTV=$CI CYCLE_INTV=$CYCLE INST_LEVEL=$CI_SETTING EXTRA_FLAGS="-DAVG_STATS -DINTV_SAMPLING" make -f Makefile.lc $1
#make -f Makefile.lc $1-clean; ALLOWED_DEVIATION=100 CLOCK_TYPE=1 PUSH_INTV=5000 CMMT_INTV=1000 INST_LEVEL=1 EXTRA_FLAGS="-DAVG_STATS -DINTV_SAMPLING" make -f Makefile.lc $1
}
build_splash2_ci_opt_cycles_intv_accuracy() {
#build periodic with interval stats
AD=100
CI_SETTING=12
PI=$(read_tune_param $1 $CI_SETTING 1)
CI=`echo "scale=0; $PI/5" | bc`
echo "Building periodic opt cycles program for $1 that prints interval statistics with PI:$PI, CI:$CI: " >> $DEBUG_FILE
BUILD_LOG=$BUILD_DEBUG_FILE ERROR_LOG=$BUILD_ERROR_FILE make -f Makefile.lc $1-clean
BUILD_LOG=$BUILD_DEBUG_FILE ERROR_LOG=$BUILD_ERROR_FILE ALLOWED_DEVIATION=$AD CLOCK_TYPE=1 PUSH_INTV=$PI CMMT_INTV=$CI CYCLE_INTV=$CYCLE INST_LEVEL=$CI_SETTING EXTRA_FLAGS="-DAVG_STATS -DINTV_SAMPLING" make -f Makefile.lc $1
#make -f Makefile.lc $1-clean; ALLOWED_DEVIATION=100 CLOCK_TYPE=1 PUSH_INTV=5000 CMMT_INTV=1000 INST_LEVEL=1 EXTRA_FLAGS="-DAVG_STATS -DINTV_SAMPLING" make -f Makefile.lc $1
}
build_splash2_ci_opt_fiber() {
#run fiber-ci
echo "Building fiber with CI program for $1: " | tee -a $DEBUG_FILE
BUILD_LOG=$BUILD_DEBUG_FILE ERROR_LOG=$BUILD_ERROR_FILE make -f Makefile.lc.libfiber $1-clean
BUILD_LOG=$BUILD_DEBUG_FILE ERROR_LOG=$BUILD_ERROR_FILE ALLOWED_DEVIATION=$AD CLOCK_TYPE=1 PUSH_INTV=$PI CMMT_INTV=$CI INST_LEVEL=1 make -f Makefile.lc.libfiber $1
#make -f Makefile.lc.fiber $1-clean; ALLOWED_DEVIATION=100 CLOCK_TYPE=1 PUSH_INTV=5000 CMMT_INTV=1000 INST_LEVEL=1 make -f Makefile.lc.fiber $1
}
test_splash2_perf() {
thread=1
OUTPUT_FILE_ORIG="$DIR/perf_orig.txt"
OUTPUT_FILE_OPT="$DIR/perf_opt.txt"
OUTPUT_COMP="$DIR/perf_comp.txt"
OUTPUT_INTV="$DIR/ir_intv.txt"
declare -A res_orig res_opt
rm -f $OUTPUT_FILE_ORIG $OUTPUT_FILE_OPT $OUTPUT_COMP $OUTPUT_INTV
echo "Orig" | tee -a $OUTPUT_FILE_ORIG $LOG_FILE
echo "----------------------------------" | tee -a $LOG_FILE
for bench in "$@"
do
BENCH_DIR=""
case "$bench" in
"radix" | "fft" | "lu-c" | "lu-nc" | "cholesky")
BENCH_DIR="kernels"
;;
*)
BENCH_DIR="apps"
;;
esac
cd $BENCH_DIR
build_splash2_orig $bench > /dev/null
command=$(run_program $bench $thread 0)
eval $command
time_in_us=`cat $OUT_FILE | grep "$bench runtime: " | cut -d ':' -f 2 | cut -d ' ' -f 2 | tr -d '[:space:]'`
res_orig[$bench]=$time_in_us
echo -e "$bench\t$time_in_us" | tee -a $OUTPUT_FILE_ORIG $LOG_FILE
cd - > /dev/null
cd ../ > /dev/null
done
echo "----------------------------------" | tee -a $LOG_FILE
echo "Opt" | tee -a $OUTPUT_FILE_OPT $LOG_FILE
echo "----------------------------------" | tee -a $OUTPUT_INTV $LOG_FILE
echo "IR Interval Stats" > $OUTPUT_INTV
for bench in "$@"
do
BENCH_DIR=""
case "$bench" in
"radix" | "fft" | "lu-c" | "lu-nc" | "cholesky")
BENCH_DIR="kernels"
;;
*)
BENCH_DIR="apps"
;;
esac
cd $BENCH_DIR > /dev/null
build_splash2_ci_opt $bench > /dev/null
command=$(run_program $bench $thread 1)
eval $command
time_in_us=`cat $OUT_FILE | grep "$bench runtime: " | cut -d ':' -f 2 | cut -d ' ' -f 2 | tr -d '[:space:]'`
avg_ic=`cat $OUT_FILE | grep "avg_intv_ic"`
res_opt[$bench]=$time_in_us
echo -e "$bench\t$time_in_us" | tee -a $OUTPUT_FILE_OPT $LOG_FILE
echo $bench >> $OUTPUT_INTV
echo "----------------------------------" >> $OUTPUT_INTV
echo $avg_ic >> $OUTPUT_INTV
echo "----------------------------------" >> $OUTPUT_INTV
cd - > /dev/null
cd ../ > /dev/null
done
echo "----------------------------------" | tee -a $LOG_FILE
echo "Comparing orig & opt:-" | tee -a $LOG_FILE
echo "----------------------------------" | tee -a $LOG_FILE
echo -e "Benchmark\tSlowdown" | tee -a $LOG_FILE
for bench in "$@"
do
comp=`echo "scale=2;(${res_opt[$bench]}/${res_orig[$bench]})" | bc`
echo -e "$bench\t$comp" >> $OUTPUT_COMP
echo "$bench:${comp}x" | tee -a $LOG_FILE
done
echo "----------------------------------" | tee -a $LOG_FILE
cat $OUTPUT_INTV | tee -a $LOG_FILE
}
# log to check output & see if the transformations led to erroneous program flow
test_splash2_output() {
OUTPUT_FILE="$DIR/output_orig.txt"
thread=1
rm -f $OUTPUT_FILE
for bench in "$@"
do
BENCH_DIR=""
case "$bench" in
"radix" | "fft" | "lu-c" | "lu-nc" | "cholesky")
BENCH_DIR="kernels"
;;
*)
BENCH_DIR="apps"
;;
esac
cd $BENCH_DIR > /dev/null
echo "Running (for time) $bench with $thread thread(s)" | tee -a $OUTPUT_FILE $LOG_FILE
build_splash2_orig $bench > /dev/null
command=$(run_program $bench $thread 0)
eval $command
cat $OUT_FILE >> $OUTPUT_FILE
cd - > /dev/null
cd ../ > /dev/null
done
OUTPUT_FILE="$DIR/output_opt.txt"
thread=1
rm -f $OUTPUT_FILE
for bench in "$@"
do
BENCH_DIR=""
case "$bench" in
"radix" | "fft" | "lu-c" | "lu-nc" | "cholesky")
BENCH_DIR="kernels"
;;
*)
BENCH_DIR="apps"
;;
esac
cd $BENCH_DIR > /dev/null
echo "Running (for time) $bench with $thread thread(s)" | tee -a $OUTPUT_FILE $LOG_FILE
build_splash2_ci_opt $bench > /dev/null
command=$(run_program $bench $thread 1)
eval $command
cat $OUT_FILE >> $OUTPUT_FILE
cd - > /dev/null
cd ../ > /dev/null
done
echo "Run \"diff --suppress-common-lines -yiEw $DIR/output_orig.txt $DIR/output_opt.txt\"" | tee -a $LOG_FILE
}
test_splash2_orig() {
for bench in "$@"
do
BENCH_DIR=""
case "$bench" in
"radix" | "fft" | "lu-c" | "lu-nc" | "cholesky")
BENCH_DIR="kernels"
;;
*)
BENCH_DIR="apps"
;;
esac
cd $BENCH_DIR
build_splash2_orig $bench
run_splash2_for_time $bench 0
cd .. > /dev/null
done
}
test_splash2_orig_papi() {
for bench in "$@"
do
BENCH_DIR=""
case "$bench" in
"radix" | "fft" | "lu-c" | "lu-nc" | "cholesky")
BENCH_DIR="kernels"
;;
*)
BENCH_DIR="apps"
;;
esac
cd $BENCH_DIR
build_splash2_orig_papi $bench
run_splash2_for_time_n_avg_perf_stats $bench 0
cd .. > /dev/null
done
}
test_splash2_orig_fiber() {
for bench in "$@"
do
BENCH_DIR=""
case "$bench" in
"radix" | "fft" | "lu-c" | "lu-nc" | "cholesky")
BENCH_DIR="kernels"
;;
*)
BENCH_DIR="apps"
;;
esac
case "$bench" in
"cholesky" | "barnes" | "volrend" | "fmm" | "radiosity")
echo "Orig-Fiber configuration does not terminate for $bench" | tee -a $DEBUG_FILE
#continue
;;
esac
cd $BENCH_DIR
build_splash2_orig_fiber $bench
run_splash2_for_time $bench 0
cd .. > /dev/null
done
}
test_splash2_naive() {
for bench in "$@"
do
BENCH_DIR=""
case "$bench" in
"radix" | "fft" | "lu-c" | "lu-nc" | "cholesky")
BENCH_DIR="kernels"
;;
*)
BENCH_DIR="apps"
;;
esac
cd $BENCH_DIR
build_splash2_ci_naive $bench
run_splash2_for_time_n_avg_ic $bench 1
cd .. > /dev/null
done
}
test_splash2_opt() {
for bench in "$@"
do
BENCH_DIR=""
case "$bench" in
"radix" | "fft" | "lu-c" | "lu-nc" | "cholesky")
BENCH_DIR="kernels"
;;
*)
BENCH_DIR="apps"
;;
esac
cd $BENCH_DIR
build_splash2_ci_opt $bench
run_splash2_for_time_n_avg_ic $bench 1
cd .. > /dev/null
done
}
check_perf_opt() {
for bench in "$@"
do
BENCH_DIR=""
case "$bench" in
"radix" | "fft" | "lu-c" | "lu-nc" | "cholesky")
BENCH_DIR="kernels"
;;
*)
BENCH_DIR="apps"
;;
esac
cd $BENCH_DIR
build_splash2_orig $bench
orig_time=$(get_time $bench 0 1)
build_splash2_ci_opt $bench
opt_time=$(get_time $bench 1 1)
build_splash2_ci_opt_cycles $bench
opt_cycles_time=$(get_time $bench 1 1)
slowdown_opt=`echo "scale=2;(($opt_time-$orig_time)*100/$orig_time)" | bc`
slowdown_opt_cycles=`echo "scale=2;(($opt_cycles_time-$orig_time)*100/$orig_time)" | bc`
orig_time=`echo "scale=2;($orig_time/1000)" | bc`
opt_time=`echo "scale=2;($opt_time/1000)" | bc`
opt_cycles_time=`echo "scale=2;($opt_cycles_time/1000)" | bc`
echo "Orig run time for $bench: $orig_time ms"
echo "Opt run time for $bench: $opt_time ms"
echo "Opt-Cycles run time for $bench: $opt_cycles_time"
echo "Runtime overhead for opt: $slowdown_opt %"
echo "Runtime overhead for opt-cycles: $slowdown_opt_cycles %"
echo ""
cd .. > /dev/null
done
}
check_intv_opt() {
for bench in "$@"
do
BENCH_DIR=""
case "$bench" in
"radix" | "fft" | "lu-c" | "lu-nc" | "cholesky")
BENCH_DIR="kernels"
;;
*)
BENCH_DIR="apps"
;;
esac
cd $BENCH_DIR
build_splash2_ci_opt_intv_accuracy $bench
get_accuracy $bench 1 "ci"
build_splash2_ci_opt_cycles_intv_accuracy $bench
get_accuracy $bench 1 "ci-cycles"
cd .. > /dev/null
done
}
build_all() {
for bench in "$@"
do
BENCH_DIR=""
case "$bench" in
"radix" | "fft" | "lu-c" | "lu-nc" | "cholesky")
BENCH_DIR="kernels"
;;
*)
BENCH_DIR="apps"
;;
esac
cd $BENCH_DIR
echo "Building $bench"
BUILD_DEBUG_FILE="${bench}_make_log"
BUILD_ERROR_FILE="${bench}_make_err"
build_splash2_ci_opt_cycles $bench
#build_splash2_ci_opt $bench
mv $BUILD_DEBUG_FILE $BUILD_ERROR_FILE $DIR
cd .. > /dev/null
done
}
run_all() {
for bench in "$@"
do
BENCH_DIR=""
case "$bench" in
"radix" | "fft" | "lu-c" | "lu-nc" | "cholesky")
BENCH_DIR="kernels"
;;
*)
BENCH_DIR="apps"
;;
esac
cd $BENCH_DIR
command=$(run_program $bench 1 1)
echo "$command"
time eval $command
mv $OUT_FILE $DIR/${bench}_output
cd - > /dev/null
cd .. > /dev/null
done
}
test_splash2_opt_perf_cntrs() {
for bench in "$@"
do
BENCH_DIR=""
case "$bench" in
"radix" | "fft" | "lu-c" | "lu-nc" | "cholesky")
BENCH_DIR="kernels"
;;
*)
BENCH_DIR="apps"
;;
esac
cd $BENCH_DIR
build_splash2_ci_opt_perf_cntrs $bench
run_splash2_for_time_n_avg_perf_stats $bench 1
cd .. > /dev/null
done
}
test_splash2_opt_intv_accuracy() {
for bench in "$@"
do
BENCH_DIR=""
case "$bench" in
"radix" | "fft" | "lu-c" | "lu-nc" | "cholesky")
BENCH_DIR="kernels"
;;
*)
BENCH_DIR="apps"
;;
esac
cd $BENCH_DIR
build_splash2_ci_opt_intv_accuracy $bench
run_splash2_for_intv_stats $bench 1
cd .. > /dev/null
done
}
test_splash2_opt_fiber() {
for bench in "$@"
do
BENCH_DIR=""
case "$bench" in
"radix" | "fft" | "lu-c" | "lu-nc" | "cholesky")
BENCH_DIR="kernels"
;;
*)
BENCH_DIR="apps"
;;
esac
cd $BENCH_DIR
build_splash2_ci_opt_fiber $bench
run_splash2_for_time $bench 1
cd .. > /dev/null
done
}
sanity_test() {
build_all $@
exit
run_all $@
check_perf_opt $@
check_intv_opt $@
test_splash2_opt $@
test_splash2_opt_intv_accuracy $@ # for interval stats
test_splash2_orig $@
test_splash2_perf $@
test_splash2_output $@
test_splash2_naive $@
test_splash2_orig_papi $@ # for papi
test_splash2_opt_perf_cntrs $@ # for perf counters
test_splash2_orig_fiber $@ # for fiber
test_splash2_orig_fiber $@ # for fiber
test_splash2_opt_fiber $@ # for fiber
}
mkdir -p $DIR
rm -f $DEBUG_FILE $BUILD_ERROR_FILE $BUILD_DEBUG_FILE $LOG_FILE
if [ $# -eq 0 ]; then
#sanity_test water-nsquared water-spatial ocean-cp ocean-ncp barnes volrend fmm raytrace radiosity radix fft lu-c lu-nc cholesky
sanity_test water-nsquared water-spatial ocean-cp ocean-ncp raytrace radix fft lu-c lu-nc radiosity barnes volrend fmm cholesky
else
sanity_test $@
fi
|
<gh_stars>0
import {
py_login,
py_getuser,
py_logout
} from '@/api/user'
const getDefaultState = () => {
return {
token: '',
name: '',
avatar: '',
userrights: '',
pwdaes: '',
visitcode: [],
userid: 0
}
}
const state = getDefaultState()
const mutations = {
RESET_STATE: (state) => {
Object.assign(state, getDefaultState())
},
SET_TOKEN: (state, token) => {
state.token = token
},
SET_NAME: (state, name) => {
state.name = name
},
SET_AVATAR: (state, avatar) => {
state.avatar = avatar
},
SET_USERRIGHTS: (state, userrights) => {
state.userrights = userrights
},
SET_USERID: (state, userid) => {
state.userid = userid
}
}
const actions = {
login({
commit
}, userInfo) {
const {
username,
pwdaes,
} = userInfo
return new Promise((resolve, reject) => {
py_login({
username: username.trim(),
pwdaes: pwdaes,
}).then(response => {
resolve(response)
}).catch(error => {
reject(error)
})
})
},
getInfo({
commit,
state
}) {
return new Promise((resolve, reject) => {
py_getuser({}).then(response => {
if (response && response.error === 0) {
commit('SET_USERID', response.userid || 0)
commit('SET_USERRIGHTS', response.rights || '111111111')
commit('SET_NAME', response.username || '')
}
resolve(response || {})
}).catch(error => {
reject(error)
})
})
},
// user logout
async logout({
commit
}) {
await py_logout({})
}
}
export default {
namespaced: true,
state,
mutations,
actions
}
|
#!/usr/bin/env bash
# execute mysql queries
mysql_query () {
sql=$1
MYSQL_PWD=$password mysql -h "$host" -P "$port" -u "$user" -e "$sql" $database
}
# import a csv file into mysql
importcsv () {
path="$1"
filename=$(basename "$path")
table="$(echo $filename | sed 's/\.csv.*//')"
sql="load data local infile '/dev/stdin'
into table \`$table\`
fields terminated by ',' ESCAPED BY '' OPTIONALLY ENCLOSED BY '\"'
lines terminated by '\r\n'
ignore 1 lines"
if [[ "$filename" == *.csv.bz2 ]]; then
cols="$(cat $path | bunzip2 | head -n 1)"
cat_or_pv "$path" "$filename" | bunzip2 | mysql_query "$sql ($cols);"
elif [[ "$filename" == *.csv ]]; then
cols="$(cat $path | head -n 1)"
cat_or_pv "$path" "$filename" | mysql_query "$sql ($cols);"
else
echo "error: $filename is not a .csv or .csv.bz2 file, skipping"
fi
}
# display usage manual if no files are specified
if [ "$#" -eq 0 ]; then
echo "usage: "
echo " mysql_loadcsv table.csv table2.csv.bz2"
exit 1
fi
# load database config
scriptdir=$(dirname "$(realpath $0)")
source "$scriptdir/mysql.conf"
# use pv to show progress if available
if ! [ -x "$(command -v pv)" ]; then
echo "warning: 'pv' command not found, import progress will not be displayed"
cat_or_pv() { echo "importing $2" 1>&2 ; cat $1; }
else
cat_or_pv() { pv "$1" --name "$2"; }
fi
# import csv files passed as arguments
for path in "$@"; do
if [[ -f "$path" && -r "$path" ]]; then
importcsv $path
else
echo "error: '$path' is not a readable file"
exit 1
fi
done
|
<gh_stars>1-10
//-----------------------------------------------------------------------------
// lid.c
//
// Project: EPA SWMM5
// Version: 5.1
// Date: 03/20/14 (Build 5.1.001)
// 05/19/14 (Build 5.1.006)
// 09/15/14 (Build 5.1.007)
// 03/19/15 (Build 5.1.008)
// 04/30/15 (Build 5.1.009)
// 08/05/15 (Build 5.1.010)
// 08/01/16 (Build 5.1.011)
// 03/14/17 (Build 5.1.012)
// 05/10/18 (Build 5.1.013)
// Author: <NAME> (US EPA)
//
// This module handles all data processing involving LID (Low Impact
// Development) practices used to treat runoff for individual subcatchments
// within a project. The actual computation of LID performance is made by
// functions within the lidproc.c module. See LidTypes below for the types
// of LIDs that can be modeled.
//
// An LID process is described by the TLidProc data structure and consists of
// size-independent design data for the different vertical layers that make
// up a specific type of LID. The collection of these LID process designs is
// stored in the LidProcs array.
//
// When a member of LidProcs is to be deployed in a particular subcatchment,
// its sizing and treatment data are stored in a TLidUnit data structure.
// The collection of all TLidUnits deployed in a subcatchment is held in a
// TLidGroup list data structure. The LidGroups array contains a TLidGroup
// list for each subcatchment in the project.
//
// During a runoff time step, each subcatchment calls the lid_getRunoff()
// function to compute flux rates and a water balance through each layer
// of each LID unit in the subcatchment. The resulting outflows (runoff,
// drain flow, evaporation and infiltration) are added to those computed
// for the non-LID portion of the subcatchment.
//
// An option exists for the detailed time series of flux rates and storage
// levels for a specific LID unit to be written to a text file named by the
// user for viewing outside of the SWMM program.
//
// Build 5.1.008:
// - More input error reporting added.
// - Rooftop Disconnection added to the types of LIDs.
// - LID drain flows are now tracked separately.
// - LID drain flows can now be routed to separate outlets.
// - Check added to insure LID flows not returned to nonexistent pervious area.
//
// Build 5.1.009:
// - Fixed bug where LID's could return outflow to non-LID area when LIDs
// make up entire subcatchment.
//
// Build 5.1.010:
// - Support for new Modified Green Ampt infiltration model added.
// - Imported variable HasWetLids now properly initialized.
// - Initial state of reporting (lidUnit->rptFile->wasDry) changed to
// prevent duplicate printing of first line of detailed report file.
//
// Build 5.1.011:
// - The top of the storage layer is no longer used as a limit for an
// underdrain offset thus allowing upturned drains to be modeled.
// - Column headings for the detailed LID report file were modified.
//
// Build 5.1.012:
// - Redefined initialization of wasDry for LID reporting.
//
// Build 5.1.013:
// - Support added for LID units treating pervious area runoff.
// - Support added for open/closed head levels and multiplier v. head
// control curve for underdrain flow.
// - Support added for unclogging permeable pavement at fixed intervals.
// - Support added for pollutant removal in underdrain flow.
//-----------------------------------------------------------------------------
#define _CRT_SECURE_NO_DEPRECATE
#include <math.h>
#include "headers.h"
#include "lid.h"
#define ERR_PAVE_LAYER " - check pavement layer parameters"
#define ERR_SOIL_LAYER " - check soil layer parameters"
#define ERR_STOR_LAYER " - check storage layer parameters"
#define ERR_SWALE_SURF " - check swale surface parameters"
#define ERR_GREEN_AMPT " - check subcatchment Green-Ampt parameters"
#define ERR_DRAIN_OFFSET " - drain offset exceeds storage height"
#define ERR_DRAIN_HEADS " - invalid drain open/closed heads" //(5.1.013)
#define ERR_SWALE_WIDTH " - invalid swale width"
//-----------------------------------------------------------------------------
// Enumerations
//-----------------------------------------------------------------------------
enum LidLayerTypes {
SURF, // surface layer
SOIL, // soil layer
STOR, // storage layer
PAVE, // pavement layer
DRAINMAT, // drainage mat layer
DRAIN, // underdrain system
REMOVALS}; // pollutant removals //(5.1.013)
//// Note: DRAINMAT must be placed before DRAIN so the two keywords can
/// be distinguished from one another when parsing a line of input.
char* LidLayerWords[] =
{"SURFACE", "SOIL", "STORAGE", "PAVEMENT", "DRAINMAT", "DRAIN",
"REMOVALS", NULL}; //(5.1.013)
char* LidTypeWords[] =
{"BC", //bio-retention cell
"RG", //rain garden
"GR", //green roof
"IT", //infiltration trench
"PP", //porous pavement
"RB", //rain barrel
"VS", //vegetative swale
"RD", //rooftop disconnection
NULL};
//-----------------------------------------------------------------------------
// Data Structures
//-----------------------------------------------------------------------------
// LID List - list of LID units contained in an LID group
//-----------------------------------------------------------------------------
// Shared Variables
//-----------------------------------------------------------------------------
static TLidProc* LidProcs; // array of LID processes
static int LidCount; // number of LID processes
static TLidGroup* LidGroups; // array of LID process groups
static int GroupCount; // number of LID groups (subcatchments)
static double EvapRate; // evaporation rate (ft/s)
static double NativeInfil; // native soil infil. rate (ft/s)
static double MaxNativeInfil; // native soil infil. rate limit (ft/s)
//-----------------------------------------------------------------------------
// Imported Variables (from SUBCATCH.C)
//-----------------------------------------------------------------------------
// Volumes (ft3) for a subcatchment over a time step
extern double Vevap; // evaporation
extern double Vpevap; // pervious area evaporation
extern double Vinfil; // non-LID infiltration
extern double VlidInfil; // infiltration from LID units
extern double VlidIn; // impervious area flow to LID units
extern double VlidOut; // surface outflow from LID units
extern double VlidDrain; // drain outflow from LID units
extern double VlidReturn; // LID outflow returned to pervious area
extern char HasWetLids; // TRUE if any LIDs are wet
// (from RUNOFF.C)
//-----------------------------------------------------------------------------
// External Functions (prototyped in lid.h)
//-----------------------------------------------------------------------------
// lid_create called by createObjects in project.c
// lid_delete called by deleteObjects in project.c
// lid_validate called by project_validate
// lid_initState called by project_init
// lid_readProcParams called by parseLine in input.c
// lid_readGroupParams called by parseLine in input.c
// lid_setOldGroupState called by subcatch_setOldState
// lid_setReturnQual called by findLidLoads in surfqual.c
// lid_getReturnQual called by subcatch_getRunon
// lid_getPervArea called by subcatch_getFracPerv
// lid_getFlowToPerv called by subcatch_getRunon
// lid_getSurfaceDepth called by subcatch_getDepth
// lid_getDepthOnPavement called by sweptSurfacesDry in subcatch.c
// lid_getStoredVolume called by subcatch_getStorage
// lid_getRunon called by subcatch_getRunon
// lid_getRunoff called by subcatch_getRunoff
// lid_addDrainRunon called by subcatch_getRunon
// lid_addDrainLoads called by surfqual_getWashoff
// lid_addDrainInflow called by addLidDrainInflows in routing.c
// lid_writeSummary called by inputrpt_writeInput
// lid_writeWaterBalance called by statsrpt_writeReport
// lid_getLidUnitCount called by LID API toolkit in toolkitAPI.c
// lid_getLidUnit called by LID API toolkit in toolkitAPI.c
// lid_getLidProc called by LID API toolkit in toolkitAPI.c
// lid_getLidGroup called by LID API toolkit in toolkitAPI.c
// lid_validateLidProc called by LID API toolkit in toolkitAPI.c
// lid_validateLidGroup called by LID API toolkit in toolkitAPI.c
// lid_updateLidUnit called by LID API toolkit in toolkitAPI.c
// lid_updateAllLidUnit called by LID API toolkit in toolkitAPI.c
// lid_updateLidGroup called by LID API toolkit in toolkitAPI.c
//-----------------------------------------------------------------------------
// Local Functions
//-----------------------------------------------------------------------------
static void freeLidGroup(int j);
static int readSurfaceData(int j, char* tok[], int ntoks);
static int readPavementData(int j, char* tok[], int ntoks);
static int readSoilData(int j, char* tok[], int ntoks);
static int readStorageData(int j, char* tok[], int ntoks);
static int readDrainData(int j, char* tok[], int ntoks);
static int readDrainMatData(int j, char* toks[], int ntoks);
static int readRemovalsData(int j, char* toks[], int ntoks); //(5.1.013)
static int addLidUnit(int j, int k, int n, double x[], char* fname,
int drainSubcatch, int drainNode);
static int createLidRptFile(TLidUnit* lidUnit, char* fname);
static void initLidRptFile(char* title, char* lidID, char* subcatchID,
TLidUnit* lidUnit);
static void validateLidProc(int j);
static void validateLidGroup(int j);
static int isLidPervious(int k);
static double getImpervAreaRunoff(int j);
static double getPervAreaRunoff(int j); //(5.1.013)
static double getSurfaceDepth(int subcatch);
static void findNativeInfil(int j, double tStep);
static void evalLidUnit(int j, TLidUnit* lidUnit, double lidArea,
double lidInflow, double tStep, double *qRunoff,
double *qDrain, double *qReturn);
//=============================================================================
void lid_create(int lidCount, int subcatchCount)
//
// Purpose: creates an array of LID objects.
// Input: n = number of LID processes
// Output: none
//
{
int j;
//... assign NULL values to LID arrays
LidProcs = NULL;
LidGroups = NULL;
LidCount = lidCount;
//... create LID groups
GroupCount = subcatchCount;
if ( GroupCount > 0 )
{
LidGroups = (TLidGroup *) calloc(GroupCount, sizeof(TLidGroup));
if ( LidGroups == NULL )
{
ErrorCode = ERR_MEMORY;
return;
}
}
//... initialize LID groups
for (j = 0; j < GroupCount; j++) LidGroups[j] = NULL;
//... create LID objects
if ( LidCount == 0 ) return;
LidProcs = (TLidProc *) calloc(LidCount, sizeof(TLidProc));
if ( LidProcs == NULL )
{
ErrorCode = ERR_MEMORY;
return;
}
//... initialize LID objects
for (j = 0; j < LidCount; j++)
{
LidProcs[j].lidType = -1;
LidProcs[j].surface.thickness = 0.0;
LidProcs[j].surface.voidFrac = 1.0;
LidProcs[j].surface.roughness = 0.0;
LidProcs[j].surface.surfSlope = 0.0;
LidProcs[j].pavement.thickness = 0.0;
LidProcs[j].soil.thickness = 0.0;
LidProcs[j].storage.thickness = 0.0;
LidProcs[j].storage.kSat = 0.0;
LidProcs[j].drain.coeff = 0.0;
LidProcs[j].drain.offset = 0.0;
LidProcs[j].drainMat.thickness = 0.0;
LidProcs[j].drainMat.roughness = 0.0;
LidProcs[j].drainRmvl = NULL; //(5.1.013)
LidProcs[j].drainRmvl = (double *) //
calloc(Nobjects[POLLUT], sizeof(double)); //
if (LidProcs[j].drainRmvl == NULL) //
{ //
ErrorCode = ERR_MEMORY; //
return; //
} //
}
}
//=============================================================================
void lid_delete()
//
// Purpose: deletes all LID objects
// Input: none
// Output: none
//
{
int j;
for (j = 0; j < GroupCount; j++) freeLidGroup(j);
FREE(LidGroups);
for (j = 0; j < LidCount; j++) FREE(LidProcs[j].drainRmvl); //(5.1.013)
FREE(LidProcs);
GroupCount = 0;
LidCount = 0;
}
//=============================================================================
void freeLidGroup(int j)
//
// Purpose: frees all LID units associated with a subcatchment.
// Input: j = group (or subcatchment) index
// Output: none
//
{
TLidGroup lidGroup = LidGroups[j];
TLidList* lidList;
TLidUnit* lidUnit;
TLidList* nextLidUnit;
if ( lidGroup == NULL ) return;
lidList = lidGroup->lidList;
while (lidList)
{
lidUnit = lidList->lidUnit;
if ( lidUnit->rptFile )
{
if ( lidUnit->rptFile->file ) fclose(lidUnit->rptFile->file);
free(lidUnit->rptFile);
}
nextLidUnit = lidList->nextLidUnit;
free(lidUnit);
free(lidList);
lidList = nextLidUnit;
}
free(lidGroup);
LidGroups[j] = NULL;
}
//=============================================================================
int lid_readProcParams(char* toks[], int ntoks)
//
// Purpose: reads LID process information from line of input data file
// Input: toks = array of string tokens
// ntoks = number of tokens
// Output: returns error code
//
// Format for first line that defines a LID process is:
// LID_ID LID_Type
//
// Followed by some combination of lines below depending on LID_Type:
// LID_ID SURFACE <parameters>
// LID_ID PAVEMENT <parameters>
// LID_ID SOIL <parameters>
// LID_ID STORAGE <parameters>
// LID_ID DRAIN <parameters>
// LID_ID DRAINMAT <parameters>
// LID_ID REMOVALS <parameters> //(5.1.013)
//
{
int j, m;
// --- check for minimum number of tokens
if ( ntoks < 2 ) return error_setInpError(ERR_ITEMS, "");
// --- check that LID exists in database
j = project_findObject(LID, toks[0]);
if ( j < 0 ) return error_setInpError(ERR_NAME, toks[0]);
// --- assign ID if not done yet
if ( LidProcs[j].ID == NULL )
LidProcs[j].ID = project_findID(LID, toks[0]);
// --- check if second token is the type of LID
m = findmatch(toks[1], LidTypeWords);
if ( m >= 0 )
{
LidProcs[j].lidType = m;
return 0;
}
// --- check if second token is name of LID layer
else m = findmatch(toks[1], LidLayerWords);
// --- read input parameters for the identified layer
switch (m)
{
case SURF: return readSurfaceData(j, toks, ntoks);
case SOIL: return readSoilData(j, toks, ntoks);
case STOR: return readStorageData(j, toks, ntoks);
case PAVE: return readPavementData(j, toks, ntoks);
case DRAIN: return readDrainData(j, toks, ntoks);
case DRAINMAT: return readDrainMatData(j, toks, ntoks);
case REMOVALS: return readRemovalsData(j, toks, ntoks); //(5.1.013)
}
return error_setInpError(ERR_KEYWORD, toks[1]);
}
//=============================================================================
int lid_readGroupParams(char* toks[], int ntoks)
//
// Purpose: reads input data for a LID unit placed in a subcatchment.
// Input: toks = array of string tokens
// ntoks = number of tokens
// Output: returns error code
//
// Format of input data line is:
// Subcatch_ID LID_ID Number Area Width InitSat FromImp ToPerv
// (RptFile DrainTo FromPerv) //(5.1.013)
// where:
// Subcatch_ID = name of subcatchment
// LID_ID = name of LID process
// Number (n) = number of replicate units
// Area (x[0]) = area of each unit
// Width (x[1]) = outflow width of each unit
// InitSat (x[2]) = % that LID is initially saturated
// FromImp (x[3]) = % of impervious runoff sent to LID
// ToPerv (x[4]) = 1 if outflow goes to pervious sub-area; 0 if not
// RptFile = name of detailed results file (optional)
// DrainTo = name of subcatch/node for drain flow (optional)
// FromPerv (x[5]) = % of pervious runoff sent to LID //(5.1.013)
//
{
int i, j, k, n;
double x[6]; //(5.1.013)
char* fname = NULL;
int drainSubcatch = -1, drainNode = -1;
//... check for valid number of input tokens
if ( ntoks < 8 ) return error_setInpError(ERR_ITEMS, "");
//... find subcatchment
j = project_findObject(SUBCATCH, toks[0]);
if ( j < 0 ) return error_setInpError(ERR_NAME, toks[0]);
//... find LID process in list of LID processes
k = project_findObject(LID, toks[1]);
if ( k < 0 ) return error_setInpError(ERR_NAME, toks[1]);
//... get number of replicates
n = atoi(toks[2]);
if ( n < 0 ) return error_setInpError(ERR_NUMBER, toks[2]);
if ( n == 0 ) return 0;
//... convert next 4 tokens to doubles
for (i = 3; i <= 7; i++)
{
if ( ! getDouble(toks[i], &x[i-3]) || x[i-3] < 0.0 )
return error_setInpError(ERR_NUMBER, toks[i]);
}
//... check for valid percentages on tokens 5 & 6 (x[2] & x[3])
for (i = 2; i <= 3; i++) if ( x[i] > 100.0 )
return error_setInpError(ERR_NUMBER, toks[i+3]);
//... read optional report file name
if ( ntoks >= 9 && strcmp(toks[8], "*") != 0 ) fname = toks[8];
//... read optional underdrain outlet
if ( ntoks >= 10 && strcmp(toks[9], "*") != 0 )
{
drainSubcatch = project_findObject(SUBCATCH, toks[9]);
if ( drainSubcatch < 0 )
{
drainNode = project_findObject(NODE, toks[9]);
if ( drainNode < 0 ) return error_setInpError(ERR_NAME, toks[9]);
}
}
//... read percent of pervious area treated by LID unit //(5.1.013)
x[5] = 0.0; //
if (ntoks >= 11) //
{ //
if (!getDouble(toks[10], &x[5]) || x[5] < 0.0 || x[5] > 100.0) //
return error_setInpError(ERR_NUMBER, toks[10]); //
} //
//... create a new LID unit and add it to the subcatchment's LID group
return addLidUnit(j, k, n, x, fname, drainSubcatch, drainNode);
}
//=============================================================================
int addLidUnit(int j, int k, int n, double x[], char* fname,
int drainSubcatch, int drainNode)
//
// Purpose: adds an LID unit to a subcatchment's LID group.
// Input: j = subcatchment index
// k = LID control index
// n = number of replicate units
// x = LID unit's parameters
// fname = name of detailed performance report file
// drainSubcatch = index of subcatchment receiving underdrain flow
// drainNode = index of node receiving underdrain flow
// Output: returns an error code
//
{
TLidUnit* lidUnit;
TLidList* lidList;
TLidGroup lidGroup;
//... create a LID group (pointer to an LidGroup struct)
// if one doesn't already exist
lidGroup = LidGroups[j];
if ( !lidGroup )
{
lidGroup = (struct LidGroup *) malloc(sizeof(struct LidGroup));
if ( !lidGroup ) return error_setInpError(ERR_MEMORY, "");
lidGroup->lidList = NULL;
LidGroups[j] = lidGroup;
}
//... create a new LID unit to add to the group
lidUnit = (TLidUnit *) malloc(sizeof(TLidUnit));
if ( !lidUnit ) return error_setInpError(ERR_MEMORY, "");
lidUnit->rptFile = NULL;
//... add the LID unit to the group
lidList = (TLidList *) malloc(sizeof(TLidList));
if ( !lidList )
{
free(lidUnit);
return error_setInpError(ERR_MEMORY, "");
}
lidList->lidUnit = lidUnit;
lidList->nextLidUnit = lidGroup->lidList;
lidGroup->lidList = lidList;
//... assign parameter values to LID unit
lidUnit->lidIndex = k;
lidUnit->number = n;
lidUnit->area = x[0] / SQR(UCF(LENGTH));
lidUnit->fullWidth = x[1] / UCF(LENGTH);
lidUnit->initSat = x[2] / 100.0;
lidUnit->fromImperv = x[3] / 100.0;
lidUnit->toPerv = (x[4] > 0.0);
lidUnit->fromPerv = x[5] / 100.0; //(5.1.013)
lidUnit->drainSubcatch = drainSubcatch;
lidUnit->drainNode = drainNode;
//... open report file if it was supplied
if ( fname != NULL )
{
if ( !createLidRptFile(lidUnit, fname) )
return error_setInpError(ERR_RPT_FILE, fname);
}
return 0;
}
//=============================================================================
int createLidRptFile(TLidUnit* lidUnit, char* fname)
{
TLidRptFile* rptFile;
rptFile = (TLidRptFile *) malloc(sizeof(TLidRptFile));
if ( rptFile == NULL ) return 0;
lidUnit->rptFile = rptFile;
rptFile->file = fopen(fname, "wt");
if ( rptFile->file == NULL ) return 0;
return 1;
}
//=============================================================================
int readSurfaceData(int j, char* toks[], int ntoks)
//
// Purpose: reads surface layer data for a LID process from line of input
// data file
// Input: j = LID process index
// toks = array of string tokens
// ntoks = number of tokens
// Output: returns error code
//
// Format of data is:
// LID_ID SURFACE StorageHt VegVolFrac Roughness SurfSlope SideSlope DamHt
//
{
int i;
double x[5];
if ( ntoks < 7 ) return error_setInpError(ERR_ITEMS, "");
for (i = 2; i < 7; i++)
{
if ( ! getDouble(toks[i], &x[i-2]) || x[i-2] < 0.0 )
return error_setInpError(ERR_NUMBER, toks[i]);
}
if ( x[1] >= 1.0 ) return error_setInpError(ERR_NUMBER, toks[3]);
if ( x[0] == 0.0 ) x[1] = 0.0;
LidProcs[j].surface.thickness = x[0] / UCF(RAINDEPTH);
LidProcs[j].surface.voidFrac = 1.0 - x[1];
LidProcs[j].surface.roughness = x[2];
LidProcs[j].surface.surfSlope = x[3] / 100.0;
LidProcs[j].surface.sideSlope = x[4];
return 0;
}
//=============================================================================
int readPavementData(int j, char* toks[], int ntoks)
//
// Purpose: reads pavement layer data for a LID process from line of input
// data file
// Input: j = LID process index
// toks = array of string tokens
// ntoks = number of tokens
// Output: returns error code
//
// Format of data is:
// LID_ID PAVEMENT Thickness VoidRatio FracImperv Permeability ClogFactor
// (RegenDays RegenDegree) //(5.1.013)
//
{
int i;
double x[7]; //(5.1.013)
if ( ntoks < 7 ) return error_setInpError(ERR_ITEMS, "");
for (i = 2; i < 7; i++)
{
if ( ! getDouble(toks[i], &x[i-2]) || x[i-2] < 0.0 )
return error_setInpError(ERR_NUMBER, toks[i]);
}
// ... read optional clogging regeneration properties //(5.1.013)
x[5] = 0.0; //
if (ntoks > 7) //
{ //
if (!getDouble(toks[7], &x[5]) || x[5] < 0.0) //
return error_setInpError(ERR_NUMBER, toks[7]); //
} //
x[6] = 0.0; //
if (ntoks > 8) //
{ //
if (!getDouble(toks[8], &x[6]) || x[6] < 0.0 || x[6] > 1.0) //
return error_setInpError(ERR_NUMBER, toks[8]); //
} //
//... convert void ratio to void fraction
x[1] = x[1]/(x[1] + 1.0);
LidProcs[j].pavement.thickness = x[0] / UCF(RAINDEPTH);
LidProcs[j].pavement.voidFrac = x[1];
LidProcs[j].pavement.impervFrac = x[2];
LidProcs[j].pavement.kSat = x[3] / UCF(RAINFALL);
if (LidProcs[j].pavement.thickness > 0.0)
{
LidProcs[j].pavement.clogFactor = x[4] *
LidProcs[j].pavement.thickness *
LidProcs[j].pavement.voidFrac *
(1.0 - LidProcs[j].pavement.impervFrac);
}
else
{
LidProcs[j].pavement.clogFactor = 0.0;
}
LidProcs[j].pavement.regenDays = x[5]; //(5.1.013)
LidProcs[j].pavement.regenDegree = x[6]; //
return 0;
}
//=============================================================================
int readSoilData(int j, char* toks[], int ntoks)
//
// Purpose: reads soil layer data for a LID process from line of input
// data file
// Input: j = LID process index
// toks = array of string tokens
// ntoks = number of tokens
// Output: returns error code
//
// Format of data is:
// LID_ID SOIL Thickness Porosity FieldCap WiltPt Ksat Kslope Suction
//
{
int i;
double x[7];
if ( ntoks < 9 ) return error_setInpError(ERR_ITEMS, "");
for (i = 2; i < 9; i++)
{
if ( ! getDouble(toks[i], &x[i-2]) || x[i-2] < 0.0 )
return error_setInpError(ERR_NUMBER, toks[i]);
}
LidProcs[j].soil.thickness = x[0] / UCF(RAINDEPTH);
LidProcs[j].soil.porosity = x[1];
LidProcs[j].soil.fieldCap = x[2];
LidProcs[j].soil.wiltPoint = x[3];
LidProcs[j].soil.kSat = x[4] / UCF(RAINFALL);
LidProcs[j].soil.kSlope = x[5];
LidProcs[j].soil.suction = x[6] / UCF(RAINDEPTH);
return 0;
}
//=============================================================================
int readStorageData(int j, char* toks[], int ntoks)
//
// Purpose: reads drainage layer data for a LID process from line of input
// data file
// Input: j = LID process index
// toks = array of string tokens
// ntoks = number of tokens
// Output: returns error code
//
// Format of data is:
// LID_ID STORAGE Thickness VoidRatio Ksat ClogFactor
//
{
int i;
double x[6];
//... read numerical parameters
if ( ntoks < 6 ) return error_setInpError(ERR_ITEMS, "");
for (i = 2; i < 6; i++)
{
if ( ! getDouble(toks[i], &x[i-2]) || x[i-2] < 0.0 )
return error_setInpError(ERR_NUMBER, toks[i]);
}
//... convert void ratio to void fraction
x[1] = x[1]/(x[1] + 1.0);
//... save parameters to LID storage layer structure
LidProcs[j].storage.thickness = x[0] / UCF(RAINDEPTH);
LidProcs[j].storage.voidFrac = x[1];
LidProcs[j].storage.kSat = x[2] / UCF(RAINFALL);
if (LidProcs[j].storage.thickness > 0.0)
{
LidProcs[j].storage.clogFactor = x[3] *
LidProcs[j].storage.thickness *
LidProcs[j].storage.voidFrac;
}
else
{
LidProcs[j].storage.clogFactor = 0.0;
}
return 0;
}
//=============================================================================
int readDrainData(int j, char* toks[], int ntoks)
//
// Purpose: reads underdrain data for a LID process from line of input
// data file
// Input: j = LID process index
// toks = array of string tokens
// ntoks = number of tokens
// Output: returns error code
//
// Format of data is:
// LID_ID DRAIN coeff expon offset delay hOpen hClose curve //(5.1.013)
//
{
int i;
double x[6]; //(5.1.013)
//... read numerical parameters
if ( ntoks < 6 ) return error_setInpError(ERR_ITEMS, "");
for (i = 0; i < 6; i++) x[i] = 0.0; //(5.1.013)
for (i = 2; i < 8; i++) //
{
if ( ( ntoks > i && ! getDouble(toks[i], &x[i-2]) ) || x[i-2] < 0.0 ) //(5.1.013)
return error_setInpError(ERR_NUMBER, toks[i]);
}
i = -1; //(5.1.013)
if ( ntoks >= 9 ) //
{ //
i = project_findObject(CURVE, toks[8]); //
if (i < 0) return error_setInpError(ERR_NAME, toks[8]); //
} //
//... save parameters to LID drain layer structure
LidProcs[j].drain.coeff = x[0];
LidProcs[j].drain.expon = x[1];
LidProcs[j].drain.offset = x[2] / UCF(RAINDEPTH);
LidProcs[j].drain.delay = x[3] * 3600.0;
LidProcs[j].drain.hOpen = x[4] / UCF(RAINDEPTH); //(5.1.013)
LidProcs[j].drain.hClose = x[5] / UCF(RAINDEPTH); //
LidProcs[j].drain.qCurve = i; //
return 0;
}
//=============================================================================
int readDrainMatData(int j, char* toks[], int ntoks)
//
// Purpose: reads drainage mat data for a LID process from line of input
// data file
// Input: j = LID process index
// toks = array of string tokens
// ntoks = number of tokens
// Output: returns error code
//
// Format of data is:
// LID_ID DRAINMAT thickness voidRatio roughness
//
{
int i;
double x[3];
//... read numerical parameters
if ( ntoks < 5 ) return error_setInpError(ERR_ITEMS, "");
if ( LidProcs[j].lidType != GREEN_ROOF ) return 0;
for (i = 2; i < 5; i++)
{
if ( ! getDouble(toks[i], &x[i-2]) || x[i-2] < 0.0 )
return error_setInpError(ERR_NUMBER, toks[i]);
}
//... save parameters to LID drain layer structure
LidProcs[j].drainMat.thickness = x[0] / UCF(RAINDEPTH);;
LidProcs[j].drainMat.voidFrac = x[1];
LidProcs[j].drainMat.roughness = x[2];
return 0;
}
//=============================================================================
//// This function was added to release 5.1.013. //// //(5.1.013)
int readRemovalsData(int j, char* toks[], int ntoks)
//
// Purpose: reads pollutant removal data for a LID process from line of input
// data file
// Input: j = LID process index
// toks = array of string tokens
// ntoks = number of tokens
// Output: returns error code
//
// Format of data is:
// LID_ID REMOVALS pollut1 %removal1 pollut2 %removal2 ...
//
{
int i = 2;
int p;
double rmvl;
//... start with 3rd token
if (ntoks < 4) return error_setInpError(ERR_ITEMS, "");
while (ntoks > i)
{
//... find pollutant index from its name
p = project_findObject(POLLUT, toks[i]);
if (p < 0) return error_setInpError(ERR_NAME, toks[i]);
//... check that a next token exists
i++;
if (ntoks == i) return error_setInpError(ERR_ITEMS, "");
//... get the % removal value from the next token
if (!getDouble(toks[i], &rmvl) || rmvl < 0.0 || rmvl > 100.0)
return error_setInpError(ERR_NUMBER, toks[i]);
//... save the pollutant removal for the LID process as a fraction
LidProcs[j].drainRmvl[p] = rmvl / 100.0;
i++;
}
return 0;
}
//=============================================================================
void lid_writeSummary()
//
// Purpose: writes summary of LID processes used to report file.
// Input: none
// Output: none
//
{
int j, k;
double pctArea;
TLidUnit* lidUnit;
TLidList* lidList;
TLidGroup lidGroup;
fprintf(Frpt.file, "\n");
fprintf(Frpt.file, "\n");
fprintf(Frpt.file, "\n *******************");
fprintf(Frpt.file, "\n LID Control Summary");
fprintf(Frpt.file, "\n *******************");
fprintf(Frpt.file,
"\n No. of Unit Unit %% Area %% Imperv %% Perv"); //(5.1.013)
fprintf(Frpt.file, //
"\n Subcatchment LID Control Units Area Width Covered Treated Treated"); //
fprintf(Frpt.file, //
"\n ---------------------------------------------------------------------------------------------------"); //
for (j = 0; j < GroupCount; j++)
{
lidGroup = LidGroups[j];
if ( lidGroup == NULL ) continue;
lidList = lidGroup->lidList;
while ( lidList )
{
lidUnit = lidList->lidUnit;
k = lidUnit->lidIndex;
pctArea = lidUnit->area * lidUnit->number / Subcatch[j].area * 100.0;
fprintf(Frpt.file, "\n %-16s %-16s", Subcatch[j].ID, LidProcs[k].ID);
fprintf(Frpt.file, "%6d %10.2f %10.2f %10.2f %10.2f %10.2f", //(5.1.013)
lidUnit->number, lidUnit->area * SQR(UCF(LENGTH)),
lidUnit->fullWidth * UCF(LENGTH), pctArea,
lidUnit->fromImperv*100.0, lidUnit->fromPerv*100.0); //(5.1.013)
lidList = lidList->nextLidUnit;
}
}
}
//=============================================================================
void lid_validate()
//
// Purpose: validates LID process and group parameters.
// Input: none
// Output: none
//
{
int j;
for (j = 0; j < LidCount; j++) validateLidProc(j);
for (j = 0; j < GroupCount; j++) validateLidGroup(j);
}
//=============================================================================
void validateLidProc(int j)
//
// Purpose: validates LID process parameters.
// Input: j = LID process index
// Output: none
//
{
int layerMissing = FALSE;
//... check that LID type was supplied
if ( LidProcs[j].lidType < 0 )
{
report_writeErrorMsg(ERR_LID_TYPE, LidProcs[j].ID);
return;
}
//... check that required layers were defined
switch (LidProcs[j].lidType)
{
case BIO_CELL:
case RAIN_GARDEN:
if ( LidProcs[j].soil.thickness <= 0.0 ) layerMissing = TRUE;
break;
case GREEN_ROOF:
if ( LidProcs[j].soil.thickness <= 0.0 ) layerMissing = TRUE;
if ( LidProcs[j].drainMat.thickness <= 0.0) layerMissing = TRUE;
break;
case POROUS_PAVEMENT:
if ( LidProcs[j].pavement.thickness <= 0.0 ) layerMissing = TRUE;
break;
case INFIL_TRENCH:
if ( LidProcs[j].storage.thickness <= 0.0 ) layerMissing = TRUE;
break;
}
if ( layerMissing )
{
report_writeErrorMsg(ERR_LID_LAYER, LidProcs[j].ID);
return;
}
//... check pavement layer parameters
if ( LidProcs[j].lidType == POROUS_PAVEMENT )
{
if ( LidProcs[j].pavement.thickness <= 0.0
|| LidProcs[j].pavement.kSat <= 0.0
|| LidProcs[j].pavement.voidFrac <= 0.0
|| LidProcs[j].pavement.voidFrac > 1.0
|| LidProcs[j].pavement.impervFrac > 1.0 )
{
strcpy(Msg, LidProcs[j].ID);
strcat(Msg, ERR_PAVE_LAYER);
report_writeErrorMsg(ERR_LID_PARAMS, Msg);
}
}
//... check soil layer parameters
if ( LidProcs[j].soil.thickness > 0.0 )
{
if ( LidProcs[j].soil.porosity <= 0.0
|| LidProcs[j].soil.fieldCap >= LidProcs[j].soil.porosity
|| LidProcs[j].soil.wiltPoint >= LidProcs[j].soil.fieldCap
|| LidProcs[j].soil.kSat <= 0.0
|| LidProcs[j].soil.kSlope < 0.0 )
{
strcpy(Msg, LidProcs[j].ID);
strcat(Msg, ERR_SOIL_LAYER);
report_writeErrorMsg(ERR_LID_PARAMS, Msg);
}
}
//... check storage layer parameters
if ( LidProcs[j].storage.thickness > 0.0 )
{
if ( LidProcs[j].storage.voidFrac <= 0.0 ||
LidProcs[j].storage.voidFrac > 1.0 )
{
strcpy(Msg, LidProcs[j].ID);
strcat(Msg, ERR_STOR_LAYER);
report_writeErrorMsg(ERR_LID_PARAMS, Msg);
}
}
//... if no storage layer adjust void fraction and drain offset
else
{
LidProcs[j].storage.voidFrac = 1.0;
LidProcs[j].drain.offset = 0.0;
}
//... check for invalid drain open/closed heads //(5.1.013)
if (LidProcs[j].drain.hOpen > 0.0 && //
LidProcs[j].drain.hOpen <= LidProcs[j].drain.hClose) //
{ //
strcpy(Msg, LidProcs[j].ID); //
strcat(Msg, ERR_DRAIN_HEADS); //
report_writeErrorMsg(ERR_LID_PARAMS, Msg); //
} //
//... compute the surface layer's overland flow constant (alpha)
if ( LidProcs[j].lidType == VEG_SWALE )
{
if ( LidProcs[j].surface.roughness *
LidProcs[j].surface.surfSlope <= 0.0 ||
LidProcs[j].surface.thickness == 0.0
)
{
strcpy(Msg, LidProcs[j].ID);
strcat(Msg, ERR_SWALE_SURF);
report_writeErrorMsg(ERR_LID_PARAMS, Msg);
}
else LidProcs[j].surface.alpha =
1.49 * sqrt(LidProcs[j].surface.surfSlope) /
LidProcs[j].surface.roughness;
}
else
{
//... compute surface overland flow coeff.
if ( LidProcs[j].surface.roughness > 0.0 )
LidProcs[j].surface.alpha = 1.49 / LidProcs[j].surface.roughness *
sqrt(LidProcs[j].surface.surfSlope);
else LidProcs[j].surface.alpha = 0.0;
}
//... compute drainage mat layer's flow coeff.
if ( LidProcs[j].drainMat.roughness > 0.0 )
{
LidProcs[j].drainMat.alpha = 1.49 / LidProcs[j].drainMat.roughness *
sqrt(LidProcs[j].surface.surfSlope);
}
else LidProcs[j].drainMat.alpha = 0.0;
//... for certain LID types, immediate overflow of excess surface water
// occurs if either the surface roughness or slope is zero
LidProcs[j].surface.canOverflow = TRUE;
switch (LidProcs[j].lidType)
{
case ROOF_DISCON: LidProcs[j].surface.canOverflow = FALSE; break;
case INFIL_TRENCH:
case POROUS_PAVEMENT:
case BIO_CELL:
case RAIN_GARDEN:
case GREEN_ROOF:
if ( LidProcs[j].surface.alpha > 0.0 )
LidProcs[j].surface.canOverflow = FALSE;
}
//... rain barrels have 100% void space and impermeable bottom
if ( LidProcs[j].lidType == RAIN_BARREL )
{
LidProcs[j].storage.voidFrac = 1.0;
LidProcs[j].storage.kSat = 0.0;
}
//... set storage layer parameters of a green roof
if ( LidProcs[j].lidType == GREEN_ROOF )
{
LidProcs[j].storage.thickness = LidProcs[j].drainMat.thickness;
LidProcs[j].storage.voidFrac = LidProcs[j].drainMat.voidFrac;
LidProcs[j].storage.clogFactor = 0.0;
LidProcs[j].storage.kSat = 0.0;
}
}
//=============================================================================
void validateLidGroup(int j)
//
// Purpose: validates properties of LID units grouped in a subcatchment.
// Input: j = subcatchment index
// Output: returns 1 if data are valid, 0 if not
//
{
int k;
double p[3];
double totalArea = Subcatch[j].area;
double totalLidArea = 0.0;
double fromImperv = 0.0;
double fromPerv = 0.0; //(5.1.013)
TLidUnit* lidUnit;
TLidList* lidList;
TLidGroup lidGroup;
lidGroup = LidGroups[j];
if ( lidGroup == NULL ) return;
lidList = lidGroup->lidList;
while ( lidList )
{
lidUnit = lidList->lidUnit;
k = lidUnit->lidIndex;
//... update contributing fractions
totalLidArea += (lidUnit->area * lidUnit->number);
fromImperv += lidUnit->fromImperv;
fromPerv += lidUnit->fromPerv; //(5.1.013)
//... assign biocell soil layer infiltration parameters
lidUnit->soilInfil.Ks = 0.0;
if ( LidProcs[k].soil.thickness > 0.0 )
{
p[0] = LidProcs[k].soil.suction * UCF(RAINDEPTH);
p[1] = LidProcs[k].soil.kSat * UCF(RAINFALL);
p[2] = (LidProcs[k].soil.porosity - LidProcs[k].soil.wiltPoint) *
(1.0 - lidUnit->initSat);
if ( grnampt_setParams(&(lidUnit->soilInfil), p) == FALSE )
{
strcpy(Msg, LidProcs[k].ID);
strcat(Msg, ERR_SOIL_LAYER);
report_writeErrorMsg(ERR_LID_PARAMS, Msg);
}
}
//... assign vegetative swale infiltration parameters
if ( LidProcs[k].lidType == VEG_SWALE )
{
if ( InfilModel == GREEN_AMPT || InfilModel == MOD_GREEN_AMPT )
{
p[0] = GAInfil[j].S * UCF(RAINDEPTH);
p[1] = GAInfil[j].Ks * UCF(RAINFALL);
p[2] = GAInfil[j].IMDmax;
if ( grnampt_setParams(&(lidUnit->soilInfil), p) == FALSE )
{
strcpy(Msg, LidProcs[k].ID);
strcat(Msg, ERR_GREEN_AMPT);
report_writeErrorMsg(ERR_LID_PARAMS, Msg);
}
}
if ( lidUnit->fullWidth <= 0.0 )
{
strcpy(Msg, LidProcs[k].ID);
strcat(Msg, ERR_SWALE_WIDTH);
report_writeErrorMsg(ERR_LID_PARAMS, Msg);
}
}
//... LID unit cannot send outflow back to subcatchment's
// pervious area if none exists
if ( Subcatch[j].fracImperv >= 0.999 ) lidUnit->toPerv = 0;
//... assign drain outlet if not set by user
if ( lidUnit->drainNode == -1 && lidUnit->drainSubcatch == -1 )
{
lidUnit->drainNode = Subcatch[j].outNode;
lidUnit->drainSubcatch = Subcatch[j].outSubcatch;
}
lidList = lidList->nextLidUnit;
}
//... check contributing area fractions
if ( totalLidArea > 1.001 * totalArea )
{
report_writeErrorMsg(ERR_LID_AREAS, Subcatch[j].ID);
}
if ( fromImperv > 1.001 || fromPerv > 1.001 ) //(5.1.013)
{
report_writeErrorMsg(ERR_LID_CAPTURE_AREA, Subcatch[j].ID);
}
//... Make subcatchment LID area equal total area if the two are close
if ( totalLidArea > 0.999 * totalArea ) totalLidArea = totalArea;
Subcatch[j].lidArea = totalLidArea;
}
//=============================================================================
void lid_initState()
//
// Purpose: initializes the internal state of each LID in a subcatchment.
// Input: none
// Output: none
//
{
int i, j, k;
TLidUnit* lidUnit;
TLidList* lidList;
TLidGroup lidGroup;
double initVol;
double initDryTime = StartDryDays * SECperDAY;
HasWetLids = FALSE;
for (j = 0; j < GroupCount; j++)
{
//... check if group exists
lidGroup = LidGroups[j];
if ( lidGroup == NULL ) continue;
//... initialize group variables
lidGroup->pervArea = 0.0;
lidGroup->flowToPerv = 0.0;
lidGroup->oldDrainFlow = 0.0;
lidGroup->newDrainFlow = 0.0;
//... examine each LID in the group
lidList = lidGroup->lidList;
while ( lidList )
{
//... initialize depth & moisture content
lidUnit = lidList->lidUnit;
k = lidUnit->lidIndex;
lidUnit->surfaceDepth = 0.0;
lidUnit->storageDepth = 0.0;
lidUnit->soilMoisture = 0.0;
lidUnit->paveDepth = 0.0;
lidUnit->dryTime = initDryTime;
lidUnit->volTreated = 0.0; //(5.1.013)
lidUnit->nextRegenDay = LidProcs[k].pavement.regenDays; //
initVol = 0.0;
if ( LidProcs[k].soil.thickness > 0.0 )
{
lidUnit->soilMoisture = LidProcs[k].soil.wiltPoint +
lidUnit->initSat * (LidProcs[k].soil.porosity -
LidProcs[k].soil.wiltPoint);
initVol += lidUnit->soilMoisture * LidProcs[k].soil.thickness;
}
if ( LidProcs[k].storage.thickness > 0.0 )
{
lidUnit->storageDepth = lidUnit->initSat *
LidProcs[k].storage.thickness;
initVol += lidUnit->storageDepth * LidProcs[k].storage.voidFrac;
}
if ( LidProcs[k].drainMat.thickness > 0.0 )
{
lidUnit->storageDepth = lidUnit->initSat *
LidProcs[k].drainMat.thickness;
initVol += lidUnit->storageDepth * LidProcs[k].drainMat.voidFrac;
}
if ( lidUnit->initSat > 0.0 ) HasWetLids = TRUE;
//... initialize water balance totals
lidproc_initWaterBalance(lidUnit, initVol);
//... initialize water rate
lidproc_initWaterRate(lidUnit);
lidUnit->volTreated = 0.0;
//... initialize report file for the LID
if ( lidUnit->rptFile )
{
initLidRptFile(Title[0], LidProcs[k].ID, Subcatch[j].ID, lidUnit);
}
//... initialize drain flows
lidUnit->oldDrainFlow = 0.0;
lidUnit->newDrainFlow = 0.0;
//... set previous flux rates to 0
for (i = 0; i < MAX_LAYERS; i++)
{
lidUnit->oldFluxRates[i] = 0.0;
}
//... initialize infiltration state variables
if ( lidUnit->soilInfil.Ks > 0.0 )
grnampt_initState(&(lidUnit->soilInfil));
//... add contribution to pervious LID area
if ( isLidPervious(lidUnit->lidIndex) )
lidGroup->pervArea += (lidUnit->area * lidUnit->number);
lidList = lidList->nextLidUnit;
}
}
}
//=============================================================================
void lid_setOldGroupState(int j)
//
// Purpose: saves the current drain flow rate for the LIDs in a subcatchment.
// Input: j = subcatchment index
// Output: none
//
{
TLidList* lidList;
if ( LidGroups[j] != NULL )
{
LidGroups[j]->oldDrainFlow = LidGroups[j]->newDrainFlow;
LidGroups[j]->newDrainFlow = 0.0;
lidList = LidGroups[j]->lidList;
while (lidList)
{
lidList->lidUnit->oldDrainFlow = lidList->lidUnit->newDrainFlow;
lidList->lidUnit->newDrainFlow = 0.0;
lidList = lidList->nextLidUnit;
}
}
}
//=============================================================================
int isLidPervious(int k)
//
// Purpose: determines if a LID process allows infiltration or not.
// Input: k = LID process index
// Output: returns 1 if process is pervious or 0 if not
//
{
return ( LidProcs[k].storage.thickness == 0.0 ||
LidProcs[k].storage.kSat > 0.0 );
}
//=============================================================================
double getSurfaceDepth(int j)
//
// Purpose: computes the depth (volume per unit area) of ponded water on the
// surface of all LIDs within a subcatchment.
// Input: j = subcatchment index
// Output: returns volumetric depth of ponded water (ft)
//
{
int k;
double depth = 0.0;
TLidUnit* lidUnit;
TLidList* lidList;
TLidGroup lidGroup;
lidGroup = LidGroups[j];
if ( lidGroup == NULL ) return 0.0;
if ( Subcatch[j].lidArea == 0.0 ) return 0.0;
lidList = lidGroup->lidList;
while ( lidList )
{
lidUnit = lidList->lidUnit;
k = lidUnit->lidIndex;
depth += lidUnit->surfaceDepth * LidProcs[k].surface.voidFrac *
lidUnit->area * lidUnit->number;
lidList = lidList->nextLidUnit;
}
return depth / Subcatch[j].lidArea;
}
//=============================================================================
double lid_getPervArea(int j)
//
// Purpose: retrieves amount of pervious LID area in a subcatchment.
// Input: j = subcatchment index
// Output: returns amount of pervious LID area (ft2)
//
{
if ( LidGroups[j] ) return LidGroups[j]->pervArea;
else return 0.0;
}
//=============================================================================
double lid_getFlowToPerv(int j)
//
// Purpose: retrieves flow returned from LID treatment to pervious area of
// a subcatchment.
// Input: j = subcatchment index
// Output: returns flow returned to pervious area (cfs)
//
{
if ( LidGroups[j] != NULL ) return LidGroups[j]->flowToPerv;
return 0.0;
}
//=============================================================================
double lid_getStoredVolume(int j)
//
// Purpose: computes stored volume of water for all LIDs
// grouped within a subcatchment.
// Input: j = subcatchment index
// Output: returns stored volume of water (ft3)
//
{
double total = 0.0;
TLidUnit* lidUnit;
TLidList* lidList;
TLidGroup lidGroup;
lidGroup = LidGroups[j];
if ( lidGroup == NULL || Subcatch[j].lidArea == 0.0 ) return 0.0;
lidList = lidGroup->lidList;
while ( lidList )
{
lidUnit = lidList->lidUnit;
total += lidUnit->waterBalance.finalVol * lidUnit->area * lidUnit->number;
lidList = lidList->nextLidUnit;
}
return total;
}
//=============================================================================
double lid_getDrainFlow(int j, int timePeriod)
//
// Purpose: returns flow from all of a subcatchment's LID drains for
// a designated time period
// Input: j = subcatchment index
// timePeriod = either PREVIOUS or CURRENT
// Output: total drain flow (cfs) from the subcatchment.
{
if ( LidGroups[j] != NULL )
{
if ( timePeriod == PREVIOUS ) return LidGroups[j]->oldDrainFlow;
else return LidGroups[j]->newDrainFlow;
}
return 0.0;
}
//=============================================================================
//// This function was modified for relelase 5.1.013. //// //(5.1.013)
void lid_addDrainLoads(int j, double c[], double tStep)
//
// Purpose: adds pollutant loads routed from drains to system
// mass balance totals.
// Input: j = subcatchment index
// c = array of pollutant washoff concentrations (mass/L)
// tStep = time step (sec)
// Output: none.
//
{
int isRunoffLoad; // true if drain becomes external runoff load
int p; // pollutant index
double r; // pollutant fractional removal
double w; // pollutant mass load (lb or kg)
TLidUnit* lidUnit;
TLidList* lidList;
TLidGroup lidGroup;
//... check if LID group exists
lidGroup = LidGroups[j];
if ( lidGroup != NULL )
{
//... examine each LID unit in the group
lidList = lidGroup->lidList;
while ( lidList )
{
lidUnit = lidList->lidUnit;
//... see if unit's drain flow becomes external runoff
isRunoffLoad = (lidUnit->drainNode >= 0 ||
lidUnit->drainSubcatch == j);
//... for each pollutant not routed back on to subcatchment surface
if (!lidUnit->toPerv) for (p = 0; p < Nobjects[POLLUT]; p++)
{
//... get mass load flowing through the drain
w = lidUnit->newDrainFlow * c[p] * tStep * LperFT3 * Pollut[p].mcf;
//... get fractional removal for this load
r = LidProcs[lidUnit->lidIndex].drainRmvl[p];
//... update system mass balance totals
massbal_updateLoadingTotals(BMP_REMOVAL_LOAD, p, r*w);
if (isRunoffLoad)
massbal_updateLoadingTotals(RUNOFF_LOAD, p, w*(1.0-r));
}
// process next LID unit in the group
lidList = lidList->nextLidUnit;
}
}
}
//=============================================================================
void lid_addDrainRunon(int j)
//
// Purpose: adds drain flows from LIDs in a given subcatchment to the
// subcatchments that were designated to receive them
// Input: j = index of subcatchment contributing underdrain flows
// Output: none.
//
{
int i; // index of an LID unit's LID process //(5.1.013)
int k; // index of subcatchment receiving LID drain flow
int p; // pollutant index
double q; // drain flow rate (cfs)
double w; // mass of polllutant from drain flow //(5.1.013)
TLidUnit* lidUnit;
TLidList* lidList;
TLidGroup lidGroup;
//... check if LID group exists
lidGroup = LidGroups[j];
if ( lidGroup != NULL )
{
//... examine each LID in the group
lidList = lidGroup->lidList;
while ( lidList )
{
//... see if LID's drain discharges to another subcatchment
lidUnit = lidList->lidUnit;
i = lidUnit->lidIndex; //(5.1.013)
k = lidUnit->drainSubcatch;
if ( k >= 0 && k != j )
{
//... distribute drain flow across subcatchment's areas
q = lidUnit->oldDrainFlow;
subcatch_addRunonFlow(k, q);
//... add pollutant loads from drain to subcatchment
// (newQual[] contains loading rate (mass/sec) at this
// point which is converted later on to a concentration)
for (p = 0; p < Nobjects[POLLUT]; p++)
{
w = q * Subcatch[j].oldQual[p] * LperFT3; //(5.1.013)
w = w * (1.0 - LidProcs[i].drainRmvl[p]); //
Subcatch[k].newQual[p] += w; //
}
}
lidList = lidList->nextLidUnit;
}
}
}
//=============================================================================
void lid_addDrainInflow(int j, double f)
//
// Purpose: adds LID drain flow to conveyance system nodes
// Input: j = subcatchment index
// f = time interval weighting factor
// Output: none.
//
// Note: this function updates the total lateral flow (Node[].newLatFlow)
// and pollutant mass (Node[].newQual[]) inflow seen by nodes that
// receive drain flow from the LID units in subcatchment j.
{
int i, // LID process index //(5.1.013)
k, // node index
p; // pollutant index
double q, // drain flow (cfs)
w, w1, w2; // pollutant mass loads (mass/sec)
TLidUnit* lidUnit;
TLidList* lidList;
TLidGroup lidGroup;
//... check if LID group exists
lidGroup = LidGroups[j];
if ( lidGroup != NULL )
{
//... examine each LID in the group
lidList = lidGroup->lidList;
while ( lidList )
{
//... see if LID's drain discharges to conveyance system node
lidUnit = lidList->lidUnit;
i = lidUnit->lidIndex; //(5.1.013)
k = lidUnit->drainNode;
if ( k >= 0 )
{
//... add drain flow to node's wet weather inflow
q = (1.0 - f) * lidUnit->oldDrainFlow + f * lidUnit->newDrainFlow;
Node[k].newLatFlow += q;
massbal_addInflowFlow(WET_WEATHER_INFLOW, q);
//... add pollutant load, based on parent subcatchment quality
for (p = 0; p < Nobjects[POLLUT]; p++)
{
//... get previous & current drain loads
w1 = lidUnit->oldDrainFlow * Subcatch[j].oldQual[p];
w2 = lidUnit->newDrainFlow * Subcatch[j].newQual[p];
//... add interpolated load to node's wet weather loading
w = (1.0 - f) * w1 + f * w2;
w = w * (1.0 - LidProcs[i].drainRmvl[p]); //(5.1.013)
Node[k].newQual[p] += w;
massbal_addInflowQual(WET_WEATHER_INFLOW, p, w);
}
}
lidList = lidList->nextLidUnit;
}
}
}
//=============================================================================
void lid_getRunoff(int j, double tStep)
//
// Purpose: computes runoff and drain flows from the LIDs in a subcatchment.
// Input: j = subcatchment index
// tStep = time step (sec)
// Output: updates following global quantities after LID treatment applied:
// Vevap, Vpevap, VlidInfil, VlidIn, VlidOut, VlidDrain.
//
{
TLidGroup theLidGroup; // group of LIDs placed in the subcatchment
TLidList* lidList; // list of LID units in the group
TLidUnit* lidUnit; // a member of the list of LID units
double lidArea; // area of an LID unit
double qImperv = 0.0; // runoff from impervious areas (cfs)
double qPerv = 0.0; // runoff from pervious areas (cfs) //(5.1.013)
double lidInflow = 0.0; // inflow to an LID unit (ft/s)
double qRunoff = 0.0; // surface runoff from all LID units (cfs)
double qDrain = 0.0; // drain flow from all LID units (cfs)
double qReturn = 0.0; // LID outflow returned to pervious area (cfs)
//... return if there are no LID's
theLidGroup = LidGroups[j];
if ( !theLidGroup ) return;
lidList = theLidGroup->lidList;
if ( !lidList ) return;
//... determine if evaporation can occur
EvapRate = Evap.rate;
if ( Evap.dryOnly && Subcatch[j].rainfall > 0.0 ) EvapRate = 0.0;
//... find subcatchment's infiltration rate into native soil
findNativeInfil(j, tStep);
//... get impervious and pervious area runoff from non-LID
// portion of subcatchment (cfs)
if ( Subcatch[j].area > Subcatch[j].lidArea )
{
qImperv = getImpervAreaRunoff(j);
qPerv = getPervAreaRunoff(j); //(5.1.013)
}
//... evaluate performance of each LID unit placed in the subcatchment
while ( lidList )
{
//... find area of the LID unit
lidUnit = lidList->lidUnit;
lidArea = lidUnit->area * lidUnit->number;
//... if LID unit has area, evaluate its performance
if ( lidArea > 0.0 )
{
//... find runoff from non-LID area treated by LID area (ft/sec)
lidInflow = (qImperv * lidUnit->fromImperv + //(5.1.013)
qPerv * lidUnit->fromPerv) / lidArea; //
//... update total runoff volume treated
VlidIn += lidInflow * lidArea * tStep;
//... add rainfall onto LID inflow (ft/s)
lidInflow = lidInflow + Subcatch[j].rainfall;
// ... add upstream runon only if LID occupies full subcatchment
if ( Subcatch[j].area == Subcatch[j].lidArea )
{
lidInflow += Subcatch[j].runon;
}
//... evaluate the LID unit's performance, updating the LID group's
// total surface runoff, drain flow, and flow returned to
// pervious area
evalLidUnit(j, lidUnit, lidArea, lidInflow, tStep,
&qRunoff, &qDrain, &qReturn);
}
lidList = lidList->nextLidUnit;
}
//... save the LID group's total drain & return flows
theLidGroup->newDrainFlow = qDrain;
theLidGroup->flowToPerv = qReturn;
//... save the LID group's total surface, drain and return flow volumes
VlidOut = qRunoff * tStep;
VlidDrain = qDrain * tStep;
VlidReturn = qReturn * tStep;
}
//=============================================================================
void findNativeInfil(int j, double tStep)
//
// Purpose: determines a subcatchment's current infiltration rate into
// its native soil.
// Input: j = subcatchment index
// tStep = time step (sec)
// Output: sets values for module-level variables NativeInfil
//
{
double nonLidArea;
//... subcatchment has non-LID pervious area
nonLidArea = Subcatch[j].area - Subcatch[j].lidArea;
if ( nonLidArea > 0.0 && Subcatch[j].fracImperv < 1.0 )
{
NativeInfil = Vinfil / nonLidArea / tStep;
}
//... otherwise find infil. rate for the subcatchment's rainfall + runon
else
{
NativeInfil = infil_getInfil(j, InfilModel, tStep,
Subcatch[j].rainfall,
Subcatch[j].runon,
getSurfaceDepth(j)); //(5.1.008)
}
//... see if there is any groundwater-imposed limit on infil.
if ( !IgnoreGwater && Subcatch[j].groundwater )
{
MaxNativeInfil = Subcatch[j].groundwater->maxInfilVol / tStep;
}
else MaxNativeInfil = BIG;
}
//=============================================================================
double getImpervAreaRunoff(int j)
//
// Purpose: computes runoff from impervious area of a subcatchment that
// is available for LID treatment.
// Input: j = subcatchment index
// Output: returns runoff flow rate (cfs)
//
{
int i;
double q = 0.0, // runoff rate (ft/sec)
nonLidArea; // non-LID area (ft2)
// --- runoff from impervious area w/ & w/o depression storage
for (i = IMPERV0; i <= IMPERV1; i++)
{
q += Subcatch[j].subArea[i].runoff * Subcatch[j].subArea[i].fArea;
}
// --- adjust for any fraction of runoff sent to pervious area
if ( Subcatch[j].subArea[IMPERV0].routeTo == TO_PERV &&
Subcatch[j].fracImperv < 1.0 )
{
q *= Subcatch[j].subArea[IMPERV0].fOutlet;
}
nonLidArea = Subcatch[j].area - Subcatch[j].lidArea;
return q * nonLidArea;
}
//=============================================================================
//// This function was added for release 5.1.013. //// //(5.1.013)
double getPervAreaRunoff(int j)
//
// Purpose: computes runoff from pervious area of a subcatchment that
// is available for LID treatment.
// Input: j = subcatchment index
// Output: returns runoff flow rate (cfs)
//
{
double q = 0.0, // runoff rate (ft/sec)
nonLidArea; // non-LID area (ft2)
// --- runoff from pervious area
q = Subcatch[j].subArea[PERV].runoff * Subcatch[j].subArea[PERV].fArea;
// --- adjust for any fraction of runoff sent to impervious area
if (Subcatch[j].subArea[PERV].routeTo == TO_IMPERV &&
Subcatch[j].fracImperv > 0.0)
{
q *= Subcatch[j].subArea[PERV].fOutlet;
}
nonLidArea = Subcatch[j].area - Subcatch[j].lidArea;
return q * nonLidArea;
}
//=============================================================================
void evalLidUnit(int j, TLidUnit* lidUnit, double lidArea, double lidInflow,
double tStep, double *qRunoff, double *qDrain, double *qReturn)
//
// Purpose: evaluates performance of a specific LID unit over current time step.
// Input: j = subcatchment index
// lidUnit = ptr. to LID unit being evaluated
// lidArea = area of LID unit
// lidInflow = inflow to LID unit (ft/s)
// tStep = time step (sec)
// Output: qRunoff = sum of surface runoff from all LIDs (cfs)
// qDrain = sum of drain flows from all LIDs (cfs)
// qReturn = sum of LID flows returned to pervious area (cfs)
//
{
TLidProc* lidProc; // LID process associated with lidUnit
double lidRunoff, // surface runoff from LID unit (cfs)
lidEvap, // evaporation rate from LID unit (ft/s)
lidInfil, // infiltration rate from LID unit (ft/s)
lidDrain; // drain flow rate from LID unit (ft/s & cfs)
//... identify the LID process of the LID unit being analyzed
lidProc = &LidProcs[lidUnit->lidIndex];
//... initialize evap and infil losses
lidEvap = 0.0;
lidInfil = 0.0;
//... find surface runoff from the LID unit (in cfs)
lidRunoff = lidproc_getOutflow(lidUnit, lidProc, lidInflow, EvapRate,
NativeInfil, MaxNativeInfil, tStep,
&lidEvap, &lidInfil, &lidDrain) * lidArea;
//... convert drain flow to CFS
lidDrain *= lidArea;
//... revise flows if LID outflow returned to pervious area
if ( lidUnit->toPerv && Subcatch[j].area > Subcatch[j].lidArea )
{
//... surface runoff is always returned
*qReturn += lidRunoff;
lidRunoff = 0.0;
//... drain flow returned if it has same outlet as subcatchment
if ( lidUnit->drainNode == Subcatch[j].outNode &&
lidUnit->drainSubcatch == Subcatch[j].outSubcatch )
{
*qReturn += lidDrain;
lidDrain = 0.0;
}
}
//... update system flow balance if drain flow goes to a
// conveyance system node
if ( lidUnit->drainNode >= 0 )
{
massbal_updateRunoffTotals(RUNOFF_DRAINS, lidDrain * tStep);
}
//... save new drain outflow
lidUnit->newDrainFlow = lidDrain;
//... update moisture losses (ft3)
Vevap += lidEvap * tStep * lidArea;
VlidInfil += lidInfil * tStep * lidArea;
if ( isLidPervious(lidUnit->lidIndex) )
{
Vpevap += lidEvap * tStep * lidArea;
}
//... update time since last rainfall (for Rain Barrel emptying)
if ( Subcatch[j].rainfall > MIN_RUNOFF ) lidUnit->dryTime = 0.0;
else lidUnit->dryTime += tStep;
//... update LID water balance and save results
lidproc_saveResults(lidUnit, UCF(RAINFALL), UCF(RAINDEPTH));
//... update LID group totals
*qRunoff += lidRunoff;
*qDrain += lidDrain;
}
//=============================================================================
void lid_writeWaterBalance()
//
// Purpose: writes a LID performance summary table to the project's report file.
// Input: none
// Output: none
//
{
int j;
int k = 0;
double ucf = UCF(RAINDEPTH);
double inflow;
double outflow;
double err;
TLidUnit* lidUnit;
TLidList* lidList;
TLidGroup lidGroup;
//... check that project has LIDs
for ( j = 0; j < GroupCount; j++ )
{
if ( LidGroups[j] ) k++;
}
if ( k == 0 ) return;
//... write table header
fprintf(Frpt.file,
"\n"
"\n ***********************"
"\n LID Performance Summary"
"\n ***********************\n");
fprintf(Frpt.file,
"\n --------------------------------------------------------------------------------------------------------------------"
"\n Total Evap Infil Surface Drain Initial Final Continuity"
"\n Inflow Loss Loss Outflow Outflow Storage Storage Error");
if ( UnitSystem == US ) fprintf(Frpt.file,
"\n Subcatchment LID Control in in in in in in in %%");
else fprintf(Frpt.file,
"\n Subcatchment LID Control mm mm mm mm mm mm mm %%");
fprintf(Frpt.file,
"\n --------------------------------------------------------------------------------------------------------------------");
//... examine each LID unit in each subcatchment
for ( j = 0; j < GroupCount; j++ )
{
lidGroup = LidGroups[j];
if ( !lidGroup || Subcatch[j].lidArea == 0.0 ) continue;
lidList = lidGroup->lidList;
while ( lidList )
{
//... write water balance components to report file
lidUnit = lidList->lidUnit;
k = lidUnit->lidIndex;
fprintf(Frpt.file, "\n %-16s %-16s", Subcatch[j].ID,
LidProcs[k].ID);
fprintf(Frpt.file, "%10.2f%10.2f%10.2f%10.2f%10.2f%10.2f%10.2f",
lidUnit->waterBalance.inflow*ucf,
lidUnit->waterBalance.evap*ucf,
lidUnit->waterBalance.infil*ucf,
lidUnit->waterBalance.surfFlow*ucf,
lidUnit->waterBalance.drainFlow*ucf,
lidUnit->waterBalance.initVol*ucf,
lidUnit->waterBalance.finalVol*ucf);
//... compute flow balance error
inflow = lidUnit->waterBalance.initVol +
lidUnit->waterBalance.inflow;
outflow = lidUnit->waterBalance.finalVol +
lidUnit->waterBalance.evap +
lidUnit->waterBalance.infil +
lidUnit->waterBalance.surfFlow +
lidUnit->waterBalance.drainFlow;
if ( inflow > 0.0 ) err = (inflow - outflow) / inflow;
else err = 1.0;
fprintf(Frpt.file, " %10.2f", err*100.0);
lidList = lidList->nextLidUnit;
}
}
}
//=============================================================================
void initLidRptFile(char* title, char* lidID, char* subcatchID, TLidUnit* lidUnit)
//
// Purpose: initializes the report file used for a specific LID unit
// Input: title = project's title
// lidID = LID process name
// subcatchID = subcatchment ID name
// lidUnit = ptr. to LID unit
// Output: none
//
{
static int colCount = 14;
static char* head1[] = {
"\n \t", " Elapsed\t",
" Total\t", " Total\t", " Surface\t", " Pavement\t", " Soil\t",
" Storage\t", " Surface\t", " Drain\t", " Surface\t", " Pavement\t",
" Soil\t", " Storage"};
static char* head2[] = {
"\n \t", " Time\t",
" Inflow\t", " Evap\t", " Infil\t", " Perc\t", " Perc\t",
" Exfil\t", " Runoff\t", " OutFlow\t", " Level\t", " Level\t",
" Moisture\t", " Level"};
static char* units1[] = {
"\nDate Time \t", " Hours\t",
" in/hr\t", " in/hr\t", " in/hr\t", " in/hr\t", " in/hr\t",
" in/hr\t", " in/hr\t", " in/hr\t", " inches\t", " inches\t",
" Content\t", " inches"};
static char* units2[] = {
"\nDate Time \t", " Hours\t",
" mm/hr\t", " mm/hr\t", " mm/hr\t", " mm/hr\t", " mm/hr\t",
" mm/hr\t", " mm/hr\t", " mm/hr\t", " mm\t", " mm\t",
" Content\t", " mm"};
static char line9[] = " ---------";
int i;
FILE* f = lidUnit->rptFile->file;
//... check that file was opened
if ( f == NULL ) return;
//... write title lines
fprintf(f, "SWMM5 LID Report File\n");
fprintf(f, "\nProject: %s", title);
fprintf(f, "\nLID Unit: %s in Subcatchment %s\n", lidID, subcatchID);
//... write column headings
for ( i = 0; i < colCount; i++) fprintf(f, "%s", head1[i]);
for ( i = 0; i < colCount; i++) fprintf(f, "%s", head2[i]);
if ( UnitSystem == US )
{
for ( i = 0; i < colCount; i++) fprintf(f, "%s", units1[i]);
}
else for ( i = 0; i < colCount; i++) fprintf(f, "%s", units2[i]);
fprintf(f, "\n----------- --------");
for ( i = 1; i < colCount; i++) fprintf(f, "\t%s", line9);
//... initialize LID dryness state
lidUnit->rptFile->wasDry = 1;
strcpy(lidUnit->rptFile->results, "");
}
int lid_getLidUnitCount(int index)
// Input: index = Index of desired subcatchment
// Output: int = number of lid units for subcatchment
// Return: number of lid units for subcatchment
// Purpose: count number of lid units for subcatchment
{
int unitCount = 0;
TLidList* lidList = NULL;
TLidGroup lidGroup;
lidGroup = LidGroups[index];
if (lidGroup)
{
lidList = lidGroup->lidList;
while (lidList)
{
lidList = lidList->nextLidUnit;
unitCount += 1;
}
}
return unitCount;
}
TLidUnit* lid_getLidUnit(int index, int lidIndex, int* errcode)
//
// Input: index = Index of desired subcatchment
// lidIndex = Index of desired lid control (subcatchment allow for multiple lids)
// errcode = ptr to errcode
// Output: TLidUnit = TLidUnit ptr
// Return: TLidUnit ptr
// Purpose: Gets lid unit (TLidUnit) ptr
{
int currLidIndex = 0;
int unitCount = 1;
TLidUnit* lidUnit = NULL;
TLidList* lidList;
TLidGroup lidGroup;
lidGroup = LidGroups[index];
if (!lidGroup)
{
*errcode = ERR_API_UNDEFINED_LID;
}
else
{
lidList = lidGroup->lidList;
// Patch solution for now
// Realized the lid units are stored in reverse order of
// how they are defined in the [LID USAGE]
// For now, I will just count the number of Lid Units in Lid List
unitCount = lid_getLidUnitCount(index);
if (lidIndex > (unitCount - 1))
{
*errcode = ERR_API_LIDUNIT_INDEX;
return(NULL);
}
else
{
// update lidIndex due to reverse order
lidIndex = unitCount - lidIndex - 1;
// Traverse through lid list to find lid unit
while ((lidList) && (currLidIndex <= lidIndex))
{
lidUnit = lidList->lidUnit;
currLidIndex += 1;
lidList = lidList->nextLidUnit;
}
// Verify that the lid unit found matches the one specified by the user
if (!((currLidIndex - 1) == lidIndex))
{
*errcode = ERR_API_LIDUNIT_INDEX;
lidUnit = NULL;
}
}
}
return lidUnit;
}
TLidProc* lid_getLidProc(int index)
//
// Input: index = Index of desired lid control
// Output: ptr = TLidProc ptr
// Return: TLidProc ptr
// Purpose: Gets lid process (TLidProc) ptr
{
TLidProc* ptr;
ptr = &LidProcs[index];
return ptr;
}
TLidGroup lid_getLidGroup(int index)
//
// Input: index = index of desired subcatchment
// Output: result = result data desired
// Return: TLidGroup ptr
// Purpose: Gets lid group (TLidGroup) ptr
{
TLidGroup ptr;
ptr = LidGroups[index];
return ptr;
}
void lid_validateLidProc(int index)
//
// Purpose: hook to lid internal function to validate LID process parameters.
// Input: index = Index of desired subcatchment
// Output: none
{
validateLidProc(index);
}
void lid_validateLidGroup(int index)
//
// Purpose: hook to lid internal function to validate LID process parameters.
// Input: index = Index of desired subcatchment
// Output: none
{
validateLidGroup(index);
}
void lid_updateLidUnit(TLidUnit* lidUnit, int subIndex)
//
// Purpose: update a lid unit parameters due to change in lid control parameters
// Input: lidIndex = Index of desired lid control (subcatchment allow for multiple lids)
// Output: none
{
int lidIndex;
lidIndex = lidUnit->lidIndex;
lidUnit->nextRegenDay = LidProcs[lidIndex].pavement.regenDays;
lid_validateLidGroup(subIndex);
if (LidProcs[lidIndex].soil.thickness > 0.0)
{
lidUnit->soilMoisture = LidProcs[lidIndex].soil.wiltPoint +
lidUnit->initSat * (LidProcs[lidIndex].soil.porosity -
LidProcs[lidIndex].soil.wiltPoint);
}
if (LidProcs[lidIndex].storage.thickness > 0.0)
{
lidUnit->storageDepth = lidUnit->initSat *
LidProcs[lidIndex].storage.thickness;
}
if (LidProcs[lidIndex].drainMat.thickness > 0.0)
{
lidUnit->storageDepth = lidUnit->initSat *
LidProcs[lidIndex].drainMat.thickness;
}
}
void lid_updateLidGroup(int index)
{
int i;
double area, nonLidArea;
TLidUnit* lidUnit;
TLidList* lidList;
TLidGroup lidGroup;
TGroundwater *gw;
TAquifer a;
//... check if group exists
lidGroup = LidGroups[index];
if (lidGroup == NULL) return;
lidGroup->pervArea = 0.0;
//... examine each LID in the group
lidList = lidGroup->lidList;
while (lidList)
{
lidUnit = lidList->lidUnit;
if (isLidPervious(lidUnit->lidIndex))
{
lidGroup->pervArea += (lidUnit->area * lidUnit->number);
}
lidList = lidList->nextLidUnit;
}
// recalculate subcatchment alpha
nonLidArea = Subcatch[index].area;
nonLidArea -= Subcatch[index].lidArea;
for (i = IMPERV0; i <= PERV; i++)
{
if (i == PERV)
{
area = (1.0 - Subcatch[index].fracImperv) * nonLidArea;
}
else
{
area = Subcatch[index].fracImperv * nonLidArea;
}
Subcatch[index].subArea[i].alpha = 0.0;
if (area > 0.0 && Subcatch[index].subArea[i].N > 0.0)
{
Subcatch[index].subArea[i].alpha = 1.49 * Subcatch[index].width / area *
sqrt(Subcatch[index].slope) / Subcatch[index].subArea[i].N;
}
}
// update GW max infil vol
gw = Subcatch[index].groundwater;
if (gw)
{
a = Aquifer[gw->aquifer];
gw->maxInfilVol = (gw->surfElev - gw->waterTableElev) *
(a.porosity - gw->theta) /
subcatch_getFracPerv(index);
}
}
void lid_updateAllLidUnit(int lidIndex)
{
//
// Purpose: update all lid unit parameters due to change in lid control parameters
// Input: lidIndex = Index of desired lid control (subcatchment allow for multiple lids)
// Output: none
int j, k;
TLidUnit* lidUnit;
TLidList* lidList;
TLidGroup lidGroup;
for (j = 0; j < GroupCount; j++)
{
//... check if group exists
lidGroup = LidGroups[j];
if (lidGroup == NULL) continue;
//... examine each LID in the group
lidList = lidGroup->lidList;
while (lidList)
{
lidUnit = lidList->lidUnit;
k = lidUnit->lidIndex;
if (k == lidIndex)
{
lid_updateLidUnit(lidUnit, j);
}
lidList = lidList->nextLidUnit;
}
}
} |
#!/bin/sh
xrandr --output eDP-1 --primary --mode 1920x1080 --pos 0x0 --rotate normal --output DP-1 --off --output HDMI-1 --off --output DP-2 --off --output HDMI-2 --off
|
package com.readytalk.swt.color;
import lombok.ToString;
/**
* Represents the HSB (hue, saturation, brightness) color model
*/
@ToString
public class HSB {
public final float hue;
public final float saturation;
public final float brightness;
/**
* @param hue a degree of angle between 0 and 360
* @param saturation a fraction between 0 and 1
* @param brightness a fraction between 0 and 1
*/
public HSB(float hue, float saturation, float brightness) {
this.hue = hue;
this.saturation = saturation;
this.brightness = brightness;
}
}
|
<reponame>vutuandat20385/stemup-tts<gh_stars>0
function editNew(id) {
var desc = CKEDITOR.instances.editor1.getData();
var html = jQuery.parseHTML(desc);
var tmp = document.createElement('div');
tmp.innerHTML = desc;
var img = tmp.getElementsByTagName('img');
var src = img.src;
var check;
$("#cbshow").prop("checked") ? check = 1 : check = 0;
var show = $("input[name=cbShow]").val();
var name_new = $("input[name=txtnamenews]").val();
var tag_new = $("input[name=txttagnews]").val();
var des_new = $("textarea#txtdesnews").val();
var pos = $('#inp_stt').val();
var img_new = $("input[name=image_avatar]").val();
var class_new = $("#sltype").val();
var related_news = $('#displaynone1').val();
var featured = $('#cbfeatured').val();
var source = $('#txtsource').val();
var check_desc, check_name, check_ava, check_des;
name_new = name_new.trim();
url_name = convert_str(name_new);
tag_new = tag_new.trim();
des_new = des_new.trim();
desc = desc.trim();
if (name_new != '') {
$("#txtnamenews").css("border", "");
$("#txtnamenews").attr('title', '');
check_name = 1;
} else {
$("#txtnamenews").css('border', '1px solid red');
$("#txtnamenews").attr('title', 'Không được để trống tên');
check_name = 0;
}
if (des_new != '') {
$("#txtdesnews").css("border", "");
$("#txtdesnews").attr('title', '');
check_des = 1;
} else {
$("#txtdesnews").css('border', '1px solid red');
$("#txtdesnews").attr('title', 'Không được để trống tên');
check_des = 0;
}
if (img_new != '') {
$("#image_avatar").css("border", "");
$("#image_avatar").attr('title', '');
check_ava = 1;
} else {
$("#image_avatar").css('border', '1px solid red');
$("#image_avatar").attr('title', 'Không được để trống tên');
check_ava = 0;
}
if (desc != '') {
$("#cke_editor1").css("border", "");
$("#cke_editor1").attr('title', '');
check_desc = 1;
} else {
$("#cke_editor1").css('border', '1px solid red');
$("#cke_editor1").attr('title', 'Không được để trống nội dung');
check_desc = 0;
}
var formData = new FormData();
formData.append('content', desc);
formData.append('name', name_new);
formData.append('url_name', url_name);
formData.append('des', des_new);
formData.append('class', class_new);
formData.append('tag', tag_new);
if (check_ava == 1) {
formData.append('avatar_news', $('#image_avatar')[0].files[0], $("#image_avatar")[0].files[0].name);
}
formData.append('id', id);
formData.append('pos', pos);
formData.append('related_news', related_news);
formData.append('featured', featured);
formData.append('source', source);
name = JSON.stringify({
'name': name_new,
'id': id,
})
if (check_desc == 1 && check_name == 1 && check_des == 1) {
$.ajax({
type: "POST",
data: name,
url: site_url + "/sadmin/check_name_exist_update/",
contentType: "application/json",
success: function(data) {
// console.log(data);
if (data['check'] == 1) {
$("#txtnamenews").css('border', '');
$("#txtnamenews").attr('title', '');
$.ajax({
type: "POST",
data: formData,
url: site_url + "/sadmin/update_new/",
// contentType: 'application/json',
contentType: false,
processData: false,
success: function(data) {
// console.log(data);
if (data['mess'] == 'success') {
alert("Sửa bài viết thành công!");
window.location.href = site_url + "/sadmin/manage_news";
} else {
alert("Oops, Đã xảy ra một số lỗi! Bạn vui lòng thử lại!");
}
},
error: function(xhr, ajaxOptions, thrownError) {
console.log(xhr);
console.log(ajaxOptions);
console.log(thrownError);
}
})
} else {
$("#txtnamenews").css('border', '1px solid red');
$("#txtnamenews").attr('title', 'Tên bài đã tồn tại.');
}
},
error: function(xhr, ajaxOptions, thrownError) {
console.log(xhr);
console.log(ajaxOptions);
console.log(thrownError);
}
})
}
} |
def list_files_and_directories(directory_path):
import os
items = []
for item in os.listdir(directory_path):
if os.path.isfile(os.path.join(directory_path, item)):
items.append(item)
else:
items.append(item + "/")
return items |
#!/bin/bash
# by sapphonie - sappho.io
# trap ctrl-c properly (SIGINT)
trap exit INT
# check temp file
if test -f "/tmp/stvmover"; then
echo "stv mover already running! aborting. . . . ."
exit 1
fi
if [ -d "/srv/daemon-data/" ]; then
srvroot="/srv/daemon-data/"
elif [ -d "/var/lib/pterodactyl/volumes/" ]; then
srvroot="/var/lib/pterodactyl/volumes/"
else
echo "no ptero dir, exiting"
exit 255
fi
# make temp file
touch /tmp/stvmover
# fix permissions
# TODO: WHY??
chmod 775 -R "$srvroot"
chown pterodactyl:pterodactyl "$srvroot"
demosRoot="/var/www/demos/"
mkdir -p -v "$demosRoot"
# only find demos not modified more than 10 minutes ago (-mmin +10) and feed it into this loop
find "$srvroot"/ -iname '*.dem' -mmin +10 -print0 | while read -rd $'\0' file
do
# does file have the dem header?
if hexdump -n 8 "$file" | grep "4c48 4432 4d45 004f" &> /dev/null ;
# file is almost certainly a real dem file
then
realfilename=$(basename "$file")
servernumber=$(echo "$realfilename" | cut -d '-' -f 1)
# make temp server directories
mkdir -pv "$demosRoot""$servernumber"
# MOVE to demo folder
mv -v "$file" "$demosRoot""$servernumber"/"$realfilename"
# file does have the dem suffix but contains invalid data
else
echo "$file is the wrong format, deleting";
rm "$file" -v
fi
done
# cleanup demos older than 2 weeks
find "$demosRoot" -iname '*.dem' -mmin +43200 -exec rm {} \;
# cleanup demos older than 2 weeks
find "$srvroot" -iname '*.dem' -mmin +43200 -exec rm {} \;
# cleanup logs older than 2 weeks in ptero folders
find "$srvroot" -iname '*.log' -mmin +43200 -exec rm {} \;
# fix permissions. again.
chmod 775 -R /var/www/html/
chown -R www-data:www-data /var/www/html/
rm /tmp/stvmover
|
SELECT SUM(item_price) as total_purchase
FROM items i
JOIN customers c
ON i.customer_id = c.customer_id |
#!/bin/bash
SPRITES=$(find -regextype sed -regex '.*/player_[0-9]\{1,\}.png' | sort -V)
MODELS=$(find -regextype sed -regex '.*/character_[0-9]\{1,\}.png' | sort -V)
function ask_for_meta {
convert $2 -scale 100x200 /tmp/skins_set_meta
SNAME=$(basename $1)
SNAME=${SNAME%.*}
METAFILE=u_skins/meta/$SNAME.txt
FORCE=$3
if $FORCE || ! [ -f $METAFILE ]
then
echo $METAFILE
YADOUT=$(yad --form --image=/tmp/skins_set_meta --field $SNAME:LBL --field=Name --field=Author --field=Description --field=Comment)
if [ -z "$YADOUT" ]; then exit; fi # canceled
OIFS="$IFS"
IFS='|'
read -a VALUES <<< "$YADOUT"
IFS="$OIFS"
NAME=${VALUES[1]}
AUTHOR=${VALUES[2]}
DESCRIPTION=${VALUES[3]}
COMMENT=${VALUES[4]}
if [ -n "$NAME" ] && [ -n "$AUTHOR" ]
then
echo -n > $METAFILE # clear it
echo 'name = "'$NAME'",' >> $METAFILE
echo 'author = "'$AUTHOR'",' >> $METAFILE
# only write description and comment if they are specified
if [ -n "$DESCRIPTION" ]
then
echo 'description = "'$DESCRIPTION'",' >> $METAFILE
fi
if [ -n "$COMMENT" ]
then
echo 'comment = "'$COMMENT'",' >> $METAFILE
fi
echo "Saved !"
fi
fi
}
if [ -z $1 ]
then
for i in $SPRITES
do
ask_for_meta $i $i false
done
for i in $MODELS
do
ask_for_meta $i ${i%.*}_preview.png false
done
else
if [ -f ${1%.*}_preview.png ]
then
ask_for_meta $1 ${1%.*}_preview.png true
else
ask_for_meta $1 $1 true
fi
fi
rm /tmp/skins_set_meta
|
// Copyright 2020 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "system-proxy/server_proxy.h"
#include <netinet/in.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <curl/curl.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <utility>
#include <base/bind.h>
#include <base/bind_helpers.h>
#include <base/callback_helpers.h>
#include <base/files/file_util.h>
#include <base/files/scoped_file.h>
#include <base/strings/string_util.h>
#include <base/task/single_thread_task_executor.h>
#include <brillo/dbus/async_event_sequencer.h>
#include <brillo/message_loops/base_message_loop.h>
#include <chromeos/patchpanel/socket.h>
#include <chromeos/patchpanel/socket_forwarder.h>
#include "bindings/worker_common.pb.h"
#include "system-proxy/protobuf_util.h"
#include "system-proxy/proxy_connect_job.h"
namespace system_proxy {
namespace {
constexpr char kUsername[] = "proxy:user";
constexpr char kUsernameEncoded[] = "proxy%3Auser";
constexpr char kPassword[] = "<PASSWORD>";
constexpr char kPasswordEncoded[] = "<PASSWORD>";
constexpr int kTestPort = 3128;
constexpr char kFakeProxyAddress[] = "http://127.0.0.1";
} // namespace
using ::testing::_;
using ::testing::Return;
class MockServerProxy : public ServerProxy {
public:
explicit MockServerProxy(base::OnceClosure quit_closure)
: ServerProxy(std::move(quit_closure)) {}
MockServerProxy(const MockServerProxy&) = delete;
MockServerProxy& operator=(const MockServerProxy&) = delete;
~MockServerProxy() override = default;
MOCK_METHOD(int, GetStdinPipe, (), (override));
MOCK_METHOD(int, GetStdoutPipe, (), (override));
};
class MockProxyConnectJob : public ProxyConnectJob {
public:
MockProxyConnectJob(std::unique_ptr<patchpanel::Socket> socket,
const std::string& credentials,
ResolveProxyCallback resolve_proxy_callback,
AuthenticationRequiredCallback auth_required_callback,
OnConnectionSetupFinishedCallback setup_finished_callback)
: ProxyConnectJob(std::move(socket),
credentials,
CURLAUTH_ANY,
std::move(resolve_proxy_callback),
std::move(auth_required_callback),
std::move(setup_finished_callback)) {}
MockProxyConnectJob(const MockProxyConnectJob&) = delete;
MockProxyConnectJob& operator=(const MockProxyConnectJob&) = delete;
~MockProxyConnectJob() override = default;
MOCK_METHOD(bool, Start, (), (override));
};
class ServerProxyTest : public ::testing::Test {
public:
ServerProxyTest() {
server_proxy_ =
std::make_unique<MockServerProxy>(brillo_loop_.QuitClosure());
}
ServerProxyTest(const ServerProxyTest&) = delete;
ServerProxyTest& operator=(const ServerProxyTest&) = delete;
~ServerProxyTest() override {}
protected:
// Redirects the standard streams of the worker so that the tests can write
// data in the worker's stdin input and read data from the worker's stdout
// output.
void RedirectStdPipes() {
int fds[2];
CHECK(base::CreateLocalNonBlockingPipe(fds));
stdin_read_fd_.reset(fds[0]);
stdin_write_fd_.reset(fds[1]);
CHECK(base::CreateLocalNonBlockingPipe(fds));
stdout_read_fd_.reset(fds[0]);
stdout_write_fd_.reset(fds[1]);
ON_CALL(*server_proxy_, GetStdinPipe())
.WillByDefault(Return(stdin_read_fd_.get()));
// Don't redirect all the calls to |stdout_write_fd_| or the test result
// will not be printed in the console. Instead, when wanting to read the
// standard output, set the expectation to once return |stdout_write_fd_|.
ON_CALL(*server_proxy_, GetStdoutPipe())
.WillByDefault(Return(STDOUT_FILENO));
server_proxy_->Init();
}
// SystemProxyAdaptor instance that creates fake worker processes.
std::unique_ptr<MockServerProxy> server_proxy_;
base::SingleThreadTaskExecutor task_executor_{base::MessagePumpType::IO};
brillo::BaseMessageLoop brillo_loop_{task_executor_.task_runner()};
base::ScopedFD stdin_read_fd_, stdin_write_fd_, stdout_read_fd_,
stdout_write_fd_;
};
TEST_F(ServerProxyTest, FetchCredentials) {
worker::Credentials credentials;
credentials.set_username(kUsername);
credentials.set_password(<PASSWORD>);
credentials.add_policy_credentials_auth_schemes("basic");
credentials.add_policy_credentials_auth_schemes("digest");
worker::WorkerConfigs configs;
*configs.mutable_credentials() = credentials;
RedirectStdPipes();
EXPECT_TRUE(WriteProtobuf(stdin_write_fd_.get(), configs));
brillo_loop_.RunOnce(false);
std::string expected_credentials =
base::JoinString({kUsernameEncoded, kPasswordEncoded}, ":");
EXPECT_EQ(server_proxy_->system_credentials_, expected_credentials);
EXPECT_EQ(server_proxy_->system_credentials_auth_schemes_,
CURLAUTH_BASIC | CURLAUTH_DIGEST | CURLAUTH_NEGOTIATE);
}
TEST_F(ServerProxyTest, FetchListeningAddress) {
worker::SocketAddress address;
address.set_addr(INADDR_ANY);
address.set_port(kTestPort);
worker::WorkerConfigs configs;
*configs.mutable_listening_address() = address;
// Redirect the worker stdin and stdout pipes.
RedirectStdPipes();
// Send the config to the worker's stdin input.
EXPECT_TRUE(WriteProtobuf(stdin_write_fd_.get(), configs));
brillo_loop_.RunOnce(false);
EXPECT_EQ(server_proxy_->listening_addr_, INADDR_ANY);
EXPECT_EQ(server_proxy_->listening_port_, kTestPort);
}
// Tests that ServerProxy handles the basic flow of a connect request:
// - server accepts a connection a creates a job for it until the connection is
// finished;
// - the connect request from the client socket is read and parsed;
// - proxy resolution request is correctly handled by the job and ServerProxy;
// - client is sent an HTTP error code in case of failure;
// - the failed connection job is removed from the queue.
TEST_F(ServerProxyTest, HandleConnectRequest) {
server_proxy_->listening_addr_ = htonl(INADDR_LOOPBACK);
server_proxy_->listening_port_ = kTestPort;
// Redirect the worker stdin and stdout pipes.
RedirectStdPipes();
server_proxy_->CreateListeningSocket();
CHECK_NE(-1, server_proxy_->listening_fd_->fd());
brillo_loop_.RunOnce(false);
struct sockaddr_in ipv4addr;
ipv4addr.sin_family = AF_INET;
ipv4addr.sin_port = htons(kTestPort);
ipv4addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
auto client_socket =
std::make_unique<patchpanel::Socket>(AF_INET, SOCK_STREAM);
EXPECT_TRUE(client_socket->Connect((const struct sockaddr*)&ipv4addr,
sizeof(ipv4addr)));
brillo_loop_.RunOnce(false);
EXPECT_EQ(1, server_proxy_->pending_connect_jobs_.size());
const std::string_view http_req =
"CONNECT www.example.server.com:443 HTTP/1.1\r\n\r\n";
client_socket->SendTo(http_req.data(), http_req.size());
EXPECT_CALL(*server_proxy_, GetStdoutPipe())
.WillOnce(Return(stdout_write_fd_.get()));
brillo_loop_.RunOnce(false);
worker::WorkerRequest request;
// Read the request from the worker's stdout output.
ASSERT_TRUE(ReadProtobuf(stdout_read_fd_.get(), &request));
ASSERT_TRUE(request.has_proxy_resolution_request());
EXPECT_EQ("https://www.example.server.com:443",
request.proxy_resolution_request().target_url());
EXPECT_EQ(1, server_proxy_->pending_proxy_resolution_requests_.size());
// Write reply with a fake proxy to the worker's standard input.
worker::ProxyResolutionReply reply;
reply.set_target_url(request.proxy_resolution_request().target_url());
reply.add_proxy_servers(kFakeProxyAddress);
worker::WorkerConfigs configs;
*configs.mutable_proxy_resolution_reply() = reply;
ASSERT_TRUE(WriteProtobuf(stdin_write_fd_.get(), configs));
brillo_loop_.RunOnce(false);
// Verify that the correct HTTP error code is sent to the client. Because
// curl_perform will fail, this will be reported as an internal server error.
const std::string expected_http_reply =
"HTTP/1.1 500 Internal Server Error - Origin: local proxy\r\n\r\n";
std::vector<char> buf(expected_http_reply.size());
ASSERT_TRUE(base::ReadFromFD(client_socket->fd(), buf.data(), buf.size()));
buf.push_back('\0');
const std::string actual_http_reply(buf.data());
EXPECT_EQ(expected_http_reply, actual_http_reply);
EXPECT_EQ(0, server_proxy_->pending_connect_jobs_.size());
}
// Tests the |OnConnectionSetupFinished| callback is handled correctly in case
// of success or error.
TEST_F(ServerProxyTest, HandlePendingJobs) {
int connection_count = 100;
int success_count = 51;
int failure_count = 49;
// Create |connection_count| connections.
for (int i = 0; i < connection_count; ++i) {
auto client_socket =
std::make_unique<patchpanel::Socket>(AF_INET, SOCK_STREAM);
auto mock_connect_job = std::make_unique<MockProxyConnectJob>(
std::move(client_socket), "" /* credentials */,
base::BindOnce([](const std::string& target_url,
OnProxyResolvedCallback callback) {}),
base::BindRepeating([](const std::string& proxy_url,
const std::string& realm,
const std::string& scheme,
const std::string& bad_cached_credentials,
OnAuthAcquiredCallback callback) {}),
base::BindOnce(&ServerProxy::OnConnectionSetupFinished,
base::Unretained(server_proxy_.get())));
server_proxy_->pending_connect_jobs_[mock_connect_job.get()] =
std::move(mock_connect_job);
}
// Resolve |failure_count| pending connections with error.
for (int i = 0; i < failure_count; ++i) {
auto job_iter = server_proxy_->pending_connect_jobs_.begin();
std::move(job_iter->second->setup_finished_callback_)
.Run(nullptr, job_iter->first);
}
// Expect failed requests have been cleared from the pending list and no
// forwarder.
EXPECT_EQ(success_count, server_proxy_->pending_connect_jobs_.size());
EXPECT_EQ(0, server_proxy_->forwarders_.size());
// Resolve |success_count| successful connections.
for (int i = 0; i < success_count; ++i) {
auto fwd = std::make_unique<patchpanel::SocketForwarder>(
"" /* thread name */,
std::make_unique<patchpanel::Socket>(AF_INET, SOCK_STREAM),
std::make_unique<patchpanel::Socket>(AF_INET, SOCK_STREAM));
fwd->Start();
auto job_iter = server_proxy_->pending_connect_jobs_.begin();
std::move(job_iter->second->setup_finished_callback_)
.Run(std::move(fwd), job_iter->first);
}
// Expect the successful requests to have been cleared and |success_count|
// active forwarders.
EXPECT_EQ(0, server_proxy_->pending_connect_jobs_.size());
EXPECT_EQ(success_count, server_proxy_->forwarders_.size());
}
// Test to ensure proxy resolution requests are correctly handled if the
// associated job is canceled before resolution.
TEST_F(ServerProxyTest, HandleCanceledJobWhilePendingProxyResolution) {
server_proxy_->listening_addr_ = htonl(INADDR_LOOPBACK);
server_proxy_->listening_port_ = 3129;
// Redirect the worker stdin and stdout pipes.
RedirectStdPipes();
server_proxy_->CreateListeningSocket();
CHECK_NE(-1, server_proxy_->listening_fd_->fd());
brillo_loop_.RunOnce(false);
struct sockaddr_in ipv4addr;
ipv4addr.sin_family = AF_INET;
ipv4addr.sin_port = htons(3129);
ipv4addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
auto client_socket =
std::make_unique<patchpanel::Socket>(AF_INET, SOCK_STREAM);
EXPECT_TRUE(client_socket->Connect((const struct sockaddr*)&ipv4addr,
sizeof(ipv4addr)));
brillo_loop_.RunOnce(false);
EXPECT_EQ(1, server_proxy_->pending_connect_jobs_.size());
const std::string_view http_req =
"CONNECT www.example.server.com:443 HTTP/1.1\r\n\r\n";
client_socket->SendTo(http_req.data(), http_req.size());
EXPECT_CALL(*server_proxy_, GetStdoutPipe())
.WillOnce(Return(stdout_write_fd_.get()));
brillo_loop_.RunOnce(false);
EXPECT_EQ(1, server_proxy_->pending_connect_jobs_.size());
server_proxy_->pending_connect_jobs_.clear();
EXPECT_EQ(1, server_proxy_->pending_proxy_resolution_requests_.size());
server_proxy_->OnProxyResolved("https://www.example.server.com:443", {});
EXPECT_EQ(0, server_proxy_->pending_proxy_resolution_requests_.size());
}
// This test verifies that the athentication request is forwarded to the parent
// process and that the pending authentication requests are resolved when the
// parent sends the credentials associated with the protection space included in
// the request.
TEST_F(ServerProxyTest, HandlePendingAuthRequests) {
RedirectStdPipes();
worker::ProtectionSpace protection_space;
protection_space.set_origin(kFakeProxyAddress);
protection_space.set_scheme("Basic");
protection_space.set_realm("Proxy test realm");
std::string actual_credentials = "";
EXPECT_CALL(*server_proxy_, GetStdoutPipe())
.WillOnce(Return(stdout_write_fd_.get()));
server_proxy_->AuthenticationRequired(
protection_space.origin(), protection_space.scheme(),
protection_space.realm(), /* bad_cached_credentials = */ "",
base::Bind(
[](std::string* actual_credentials, const std::string& credentials) {
*actual_credentials = credentials;
},
&actual_credentials));
EXPECT_EQ(1, server_proxy_->pending_auth_required_requests_.size());
EXPECT_EQ(protection_space.SerializeAsString(),
server_proxy_->pending_auth_required_requests_.begin()->first);
brillo_loop_.RunOnce(false);
worker::WorkerRequest request;
// Read the request from the worker's stdout output.
ASSERT_TRUE(ReadProtobuf(stdout_read_fd_.get(), &request));
ASSERT_TRUE(request.has_auth_required_request());
ASSERT_TRUE(request.auth_required_request().has_protection_space());
EXPECT_EQ(
request.auth_required_request().protection_space().SerializeAsString(),
protection_space.SerializeAsString());
// Write reply with a fake credentials to the worker's standard input.
worker::Credentials credentials;
*credentials.mutable_protection_space() = protection_space;
credentials.set_username("test_user");
credentials.set_password("<PASSWORD>");
worker::WorkerConfigs configs;
*configs.mutable_credentials() = credentials;
ASSERT_TRUE(WriteProtobuf(stdin_write_fd_.get(), configs));
brillo_loop_.RunOnce(false);
EXPECT_EQ(0, server_proxy_->pending_auth_required_requests_.size());
EXPECT_EQ("test_user:test_pwd", actual_credentials);
}
// This test verifies that pending athentication requests are solved when the
// parent returns empty credentials for the protection space.
TEST_F(ServerProxyTest, HandlePendingAuthRequestsNoCredentials) {
RedirectStdPipes();
worker::ProtectionSpace protection_space;
protection_space.set_origin(kFakeProxyAddress);
protection_space.set_scheme("Basic");
protection_space.set_realm("Proxy test realm");
std::string actual_credentials = "";
EXPECT_CALL(*server_proxy_, GetStdoutPipe())
.WillOnce(Return(stdout_write_fd_.get()));
server_proxy_->AuthenticationRequired(
protection_space.origin(), protection_space.scheme(),
protection_space.realm(), /* bad_cached_credentials = */ "",
base::Bind(
[](std::string* actual_credentials, const std::string& credentials) {
*actual_credentials = credentials;
},
&actual_credentials));
EXPECT_EQ(1, server_proxy_->pending_auth_required_requests_.size());
EXPECT_EQ(protection_space.SerializeAsString(),
server_proxy_->pending_auth_required_requests_.begin()->first);
brillo_loop_.RunOnce(false);
worker::WorkerRequest request;
// Read the request from the worker's stdout output.
ASSERT_TRUE(ReadProtobuf(stdout_read_fd_.get(), &request));
ASSERT_TRUE(request.has_auth_required_request());
ASSERT_TRUE(request.auth_required_request().has_protection_space());
EXPECT_EQ(
request.auth_required_request().protection_space().SerializeAsString(),
protection_space.SerializeAsString());
// Write reply with a fake credentials to the worker's standard input.
worker::Credentials credentials;
*credentials.mutable_protection_space() = protection_space;
worker::WorkerConfigs configs;
*configs.mutable_credentials() = credentials;
ASSERT_TRUE(WriteProtobuf(stdin_write_fd_.get(), configs));
brillo_loop_.RunOnce(false);
EXPECT_EQ(0, server_proxy_->pending_auth_required_requests_.size());
EXPECT_EQ("", actual_credentials);
}
// This test verifies that the athentication request is solved with cached
// credentials.
TEST_F(ServerProxyTest, HandlePendingAuthRequestsCachedCredentials) {
RedirectStdPipes();
worker::ProtectionSpace protection_space;
protection_space.set_origin(kFakeProxyAddress);
protection_space.set_scheme("Basic");
protection_space.set_realm("Proxy test realm");
std::string actual_credentials = "";
server_proxy_->auth_cache_[protection_space.SerializeAsString()] =
"<PASSWORD>_<PASSWORD>:test_<PASSWORD>";
server_proxy_->AuthenticationRequired(
protection_space.origin(), protection_space.scheme(),
protection_space.realm(), /* bad_cached_credentials = */ "",
base::Bind(
[](std::string* actual_credentials, const std::string& credentials) {
*actual_credentials = credentials;
},
&actual_credentials));
brillo_loop_.RunOnce(false);
EXPECT_EQ(0, server_proxy_->pending_auth_required_requests_.size());
EXPECT_EQ("test_user:test_pwd", actual_credentials);
}
// This test verifies that the stored credentials are removed when receiving a
// |ClearUserCredentials| request.
TEST_F(ServerProxyTest, ClearUserCredentials) {
worker::ProtectionSpace protection_space;
protection_space.set_origin(kFakeProxyAddress);
protection_space.set_scheme("Basic");
protection_space.set_realm("Proxy test realm");
// Add an entry in the cache.
server_proxy_->auth_cache_[protection_space.SerializeAsString()] =
"<PASSWORD>:<PASSWORD>";
worker::ClearUserCredentials clear_user_credentials;
worker::WorkerConfigs configs;
*configs.mutable_clear_user_credentials() = clear_user_credentials;
// Redirect the worker stdin and stdout pipes.
RedirectStdPipes();
// Send the config to the worker's stdin input.
EXPECT_TRUE(WriteProtobuf(stdin_write_fd_.get(), configs));
brillo_loop_.RunOnce(false);
// Expect that the credentials were cleared.
EXPECT_EQ(0, server_proxy_->auth_cache_.size());
}
// Verifies that even if there are credentials in the cache for the remote
// web-proxy, the ServerProxy sends a request to the parent web-proxy if the
// credentials are flagged as bad.
TEST_F(ServerProxyTest, AuthRequestsBadCachedCredentials) {
constexpr char kBadCachedCredetials[] = "bad_user:bad_pwd";
constexpr char kCredetials[] = "test_user:test_pwd";
RedirectStdPipes();
EXPECT_CALL(*server_proxy_, GetStdoutPipe())
.WillOnce(Return(stdout_write_fd_.get()));
// Add credentials to the cache for the proxy.
worker::ProtectionSpace protection_space;
protection_space.set_origin(kFakeProxyAddress);
protection_space.set_scheme("Basic");
protection_space.set_realm("Proxy test realm");
server_proxy_->auth_cache_[protection_space.SerializeAsString()] =
kBadCachedCredetials;
// Request credentials for the proxy.
std::string actual_credentials = "";
server_proxy_->AuthenticationRequired(
protection_space.origin(), protection_space.scheme(),
protection_space.realm(), kBadCachedCredetials,
base::Bind(
[](std::string* actual_credentials, const std::string& credentials) {
*actual_credentials = credentials;
},
&actual_credentials));
// Expect that the credentials are not served from the cache.
EXPECT_EQ(1, server_proxy_->pending_auth_required_requests_.size());
EXPECT_EQ(protection_space.SerializeAsString(),
server_proxy_->pending_auth_required_requests_.begin()->first);
brillo_loop_.RunOnce(false);
worker::WorkerRequest request;
// Read the request from the worker's stdout output.
ASSERT_TRUE(ReadProtobuf(stdout_read_fd_.get(), &request));
ASSERT_TRUE(request.has_auth_required_request());
ASSERT_TRUE(request.auth_required_request().has_protection_space());
EXPECT_EQ(
request.auth_required_request().protection_space().SerializeAsString(),
protection_space.SerializeAsString());
// Write reply with a fake credentials to the worker's standard input.
worker::Credentials credentials;
*credentials.mutable_protection_space() = protection_space;
credentials.set_username("test_user");
credentials.set_password("<PASSWORD>");
worker::WorkerConfigs configs;
*configs.mutable_credentials() = credentials;
ASSERT_TRUE(WriteProtobuf(stdin_write_fd_.get(), configs));
brillo_loop_.RunOnce(false);
EXPECT_EQ(0, server_proxy_->pending_auth_required_requests_.size());
EXPECT_EQ(kCredetials, actual_credentials);
}
} // namespace system_proxy
|
function containsSubstring(str, sub) {
return str.indexOf(sub) !== -1;
} |
#include <iostream>
int binarySearch(int arr[], int n, int element)
{
int low = 0, high = n-1;
int mid;
while (low <= high)
{
mid = (low + high) / 2;
if (arr[mid] == element)
return mid;
else if (arr[mid] > element)
high = mid - 1;
else
low = mid + 1;
}
return -1;
}
int main()
{
int arr[] = {2, 4, 6, 8, 10};
int n = sizeof(arr) / sizeof(arr[0]);
int element = 8;
int answer = binarySearch(arr, n, element);
if (answer == -1)
std::cout << "Element is not present in the array." << std::endl;
else
std::cout << "Element is present at index " << answer << std::endl;
return 0;
} |
<filename>container_runtimes/docker/sdk/docker_test.go
package docker
import (
"context"
"os"
"strings"
"testing"
"time"
dockerTypes "github.com/docker/docker/api/types"
)
func TestDocker(t *testing.T) {
ctx := context.Background()
cli, err := CreateClient(ctx)
if err != nil {
t.Fatal(err)
}
workdir := "../fixture"
name := "fx-test-docker-image"
if err := cli.BuildImage(ctx, workdir, name); err != nil {
t.Fatal(err)
}
// wait a while for image to be tagged successfully after build
time.Sleep(2 * time.Second)
var imgInfo dockerTypes.ImageInspect
if err := cli.InspectImage(ctx, name, &imgInfo); err != nil {
t.Fatal(err)
}
found := false
for _, t := range imgInfo.RepoTags {
slice := strings.Split(t, ":")
if slice[0] == name {
found = true
break
}
}
if !found {
t.Fatalf("should have built image with tag %s", name)
}
username := os.Getenv("DOCKER_USERNAME")
password := os.Getenv("DOCKER_PASSWORD")
if username == "" || password == "" {
t.Skip("Skip push image test since DOCKER_USERNAME and DOCKER_PASSWORD not set in enviroment variable")
}
img, err := cli.PushImage(ctx, name)
if err != nil {
t.Fatal(err)
}
expect := username + "/" + name
if img != expect {
t.Fatalf("should get %s but got %s", expect, img)
}
}
|
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/512+512+512-N-VB/7-model --tokenizer_name model-configs/1536-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/512+512+512-N-VB/7-1024+0+512-shuffled-N-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function shuffle_remove_all_but_nouns_first_two_thirds_sixth --eval_function last_sixth_eval |
<filename>src/main/generated/com/globalcollect/gateway/sdk/java/gc/merchant/riskassessments/RiskassessmentsClient.java
package com.globalcollect.gateway.sdk.java.gc.merchant.riskassessments;
import com.globalcollect.gateway.sdk.java.CallContext;
import com.globalcollect.gateway.sdk.java.GcApiException;
import com.globalcollect.gateway.sdk.java.GcAuthorizationException;
import com.globalcollect.gateway.sdk.java.GcIdempotenceException;
import com.globalcollect.gateway.sdk.java.GcReferenceException;
import com.globalcollect.gateway.sdk.java.GcValidationException;
import com.globalcollect.gateway.sdk.java.GlobalCollectException;
import com.globalcollect.gateway.sdk.java.gc.riskassessments.RiskAssessmentBankAccount;
import com.globalcollect.gateway.sdk.java.gc.riskassessments.RiskAssessmentCard;
import com.globalcollect.gateway.sdk.java.gc.riskassessments.RiskAssessmentResponse;
/**
* Riskassessments client. Thread-safe.
*/
public interface RiskassessmentsClient {
/**
* Resource /{merchantId}/riskassessments/cards
* Risk-assess card
*
* @param body RiskAssessmentCard
* @return RiskAssessmentResponse
* @throws GcValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
* @throws GcAuthorizationException if the request was not allowed (HTTP status code 403)
* @throws GcReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
* or there was a conflict (HTTP status code 404, 409 or 410)
* @throws GlobalCollectException if something went wrong at the GlobalCollect platform,
* the GlobalCollect platform was unable to process a message from a downstream partner/acquirer,
* or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
* @throws GcApiException if the GlobalCollect platform returned any other error
*/
RiskAssessmentResponse cards(RiskAssessmentCard body);
/**
* Resource /{merchantId}/riskassessments/cards
* Risk-assess card
*
* @param body RiskAssessmentCard
* @param context CallContext
* @return RiskAssessmentResponse
* @throws GcValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
* @throws GcAuthorizationException if the request was not allowed (HTTP status code 403)
* @throws GcIdempotenceException if an idempotent request caused a conflict (HTTP status code 409)
* @throws GcReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
* or there was a conflict (HTTP status code 404, 409 or 410)
* @throws GlobalCollectException if something went wrong at the GlobalCollect platform,
* the GlobalCollect platform was unable to process a message from a downstream partner/acquirer,
* or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
* @throws GcApiException if the GlobalCollect platform returned any other error
*/
RiskAssessmentResponse cards(RiskAssessmentCard body, CallContext context);
/**
* Resource /{merchantId}/riskassessments/bankaccounts
* Risk-assess bank account
*
* @param body RiskAssessmentBankAccount
* @return RiskAssessmentResponse
* @throws GcValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
* @throws GcAuthorizationException if the request was not allowed (HTTP status code 403)
* @throws GcReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
* or there was a conflict (HTTP status code 404, 409 or 410)
* @throws GlobalCollectException if something went wrong at the GlobalCollect platform,
* the GlobalCollect platform was unable to process a message from a downstream partner/acquirer,
* or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
* @throws GcApiException if the GlobalCollect platform returned any other error
*/
RiskAssessmentResponse bankaccounts(RiskAssessmentBankAccount body);
/**
* Resource /{merchantId}/riskassessments/bankaccounts
* Risk-assess bank account
*
* @param body RiskAssessmentBankAccount
* @param context CallContext
* @return RiskAssessmentResponse
* @throws GcValidationException if the request was not correct and couldn't be processed (HTTP status code 400)
* @throws GcAuthorizationException if the request was not allowed (HTTP status code 403)
* @throws GcIdempotenceException if an idempotent request caused a conflict (HTTP status code 409)
* @throws GcReferenceException if an object was attempted to be referenced that doesn't exist or has been removed,
* or there was a conflict (HTTP status code 404, 409 or 410)
* @throws GlobalCollectException if something went wrong at the GlobalCollect platform,
* the GlobalCollect platform was unable to process a message from a downstream partner/acquirer,
* or the service that you're trying to reach is temporary unavailable (HTTP status code 500, 502 or 503)
* @throws GcApiException if the GlobalCollect platform returned any other error
*/
RiskAssessmentResponse bankaccounts(RiskAssessmentBankAccount body, CallContext context);
}
|
-- phpMyAdmin SQL Dump
-- version 4.8.2
-- https://www.phpmyadmin.net/
--
-- Host: 127.0.0.1
-- Generation Time: Jan 04, 2019 at 10:23 AM
-- Server version: 10.1.34-MariaDB
-- PHP Version: 7.2.7
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
SET AUTOCOMMIT = 0;
START TRANSACTION;
SET time_zone = "+00:00";
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8mb4 */;
--
-- Database: `qlsv`
--
-- --------------------------------------------------------
--
-- Table structure for table `lop`
--
CREATE TABLE `lop` (
`LopID` int(11) NOT NULL,
`MaLop` varchar(20) NOT NULL,
`TenLop` varchar(100) NOT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8;
--
-- Dumping data for table `lop`
--
INSERT INTO `lop` (`LopID`, `MaLop`, `TenLop`) VALUES
(3, 'THTD55', 'Tin học trắc địa K55'),
(4, 'THKT62', 'Tin học Kinh tếK62'),
(6, 'CNPM65', 'Công nghệ phần mềm K65'),
(7, 'HTTT64', 'Hệ thống thông tin K64');
-- --------------------------------------------------------
--
-- Table structure for table `sinhvien`
--
CREATE TABLE `sinhvien` (
`SinhVienID` int(11) NOT NULL,
`MaSinhVien` varchar(20) NOT NULL,
`TenSinhVien` varchar(100) NOT NULL,
`LopID` int(11) NOT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8;
--
-- Dumping data for table `sinhvien`
--
INSERT INTO `sinhvien` (`SinhVienID`, `MaSinhVien`, `TenSinhVien`, `LopID`) VALUES
(1, '0987654321', '<NAME>', 3),
(4, 'SV2', '<NAME>', 7);
-- --------------------------------------------------------
--
-- Table structure for table `taikhoan`
--
CREATE TABLE `taikhoan` (
`ID` int(11) NOT NULL,
`TenDangNhap` varchar(20) NOT NULL,
`MatKhau` varchar(20) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
--
-- Dumping data for table `taikhoan`
--
INSERT INTO `taikhoan` (`ID`, `TenDangNhap`, `MatKhau`) VALUES
(1, 'admin', '12345');
--
-- Indexes for dumped tables
--
--
-- Indexes for table `lop`
--
ALTER TABLE `lop`
ADD PRIMARY KEY (`LopID`);
--
-- Indexes for table `sinhvien`
--
ALTER TABLE `sinhvien`
ADD PRIMARY KEY (`SinhVienID`),
ADD KEY `LopID` (`LopID`);
--
-- Indexes for table `taikhoan`
--
ALTER TABLE `taikhoan`
ADD PRIMARY KEY (`ID`);
--
-- AUTO_INCREMENT for dumped tables
--
--
-- AUTO_INCREMENT for table `lop`
--
ALTER TABLE `lop`
MODIFY `LopID` int(11) NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=8;
--
-- AUTO_INCREMENT for table `sinhvien`
--
ALTER TABLE `sinhvien`
MODIFY `SinhVienID` int(11) NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=5;
--
-- AUTO_INCREMENT for table `taikhoan`
--
ALTER TABLE `taikhoan`
MODIFY `ID` int(11) NOT NULL AUTO_INCREMENT, AUTO_INCREMENT=2;
COMMIT;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
|
#!/bin/sh
# Copyright 2005-2019 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities granted to it by
# virtue of its status as an intergovernmental organisation nor does it submit to any jurisdiction.
#
set -x
. ./include.sh
cd ${data_dir}/bufr
#Define a common label for all the tmp files
label="bufr_dump_decode_filter_test"
#Create log file
fLog=${label}".log"
rm -f $fLog
touch $fLog
#Define filter rules file
fRules=${label}.filter
#-----------------------------------------------------------
# NOTE: not all of our BUFR files pass this test. bufr_filter is limited
# in what it can do compared to Python or Fortran!
# The following do not work:
# ias1_240.bufr -- too large, parser out of memory
# tropical_cyclone.bufr -- multi message
# syno_multi.bufr -- multi message
#-----------------------------------------------------------
files=`cat ${data_dir}/bufr/bufr_data_files.txt`
exclude="ias1_240.bufr syno_multi.bufr tropical_cyclone.bufr aeolus_wmo_26.bufr"
for f in $files
do
process_bufr=1
for ex in $exclude; do
if [ "$f" = "$ex" ]; then process_bufr=0; break; fi
done
if [ $process_bufr = 1 ]; then
echo "Test: bufr_dump -Dfilter " >> $fLog
echo "file: $f" >> $fLog
${tools_dir}/bufr_dump -Dfilter $f > $fRules
${tools_dir}/codes_bufr_filter $fRules $f >/dev/null
rm -f $fRules
fi
done
rm -f $fLog $fRules
|
<filename>src/background.js<gh_stars>1-10
var count = 0;
function update() {
browser.storage.local.get({
username: '',
showNotifications: false,
checkingInterval: 60
})
.then((options) => {
setTimeout(update, 1000 * Math.max(10, options.checkingInterval));
if (!options.username) {
return;
}
fetch('https://api.chess.com/int/player/' + options.username + '/notices', {
cache: 'reload'
})
.then((response) => {
if (response.ok) {
return response.json();
}
response.json().then((error) => {
browser.browserAction.setTitle({
title: error.message
});
});
throw new Error(response.statusText);
})
.then((notifications) => {
browser.browserAction.setTitle({
title: ''
});
browser.browserAction.enable();
let newCount = notifications.games_to_move + notifications.challenge_waiting;
browser.browserAction.setBadgeText({
text: newCount > 0 ? newCount.toString() : ''
});
if (count >= newCount) {
return;
}
count = newCount;
if (count <= 0) {
return;
}
if (options.showNotifications) {
browser.notifications.create({
type: 'basic',
iconUrl: browser.extension.getURL('icons/128/pawn_color.png'),
title: 'Your Turn!',
message: 1 === count ? "It's your turn to move." : "It's your turn to move, " + count + ' games are waiting.'
});
}
})
.catch((error) => {
browser.browserAction.setTitle({
title: error.message
});
browser.browserAction.disable();
});
});
}
update();
browser.browserAction.onClicked.addListener((e) => {
browser.storage.local.get({
username: ''
})
.then((options) => {
if (!options.username) {
browser.runtime.openOptionsPage();
return;
}
browser.tabs.create({
'url': 'https://www.chess.com/goto_ready_game',
'active': true
});
});
});
|
#!/bin/bash
set -x
set -e
echo force-unsafe-io > /etc/dpkg/dpkg.cfg.d/02apt-speedup
apt-get update
# vscode
wget -O /tmp/vscode.deb 'https://go.microsoft.com/fwlink/?LinkID=760868'
dpkg -i /tmp/vscode.deb || true
apt-get install -y --no-install-recommends --fix-broken
apt-get install -y --no-install-recommends \
libx11-xcb1 \
libxtst6 \
libasound2 \
x11-apps \
xvfb \
xauth \
python-pip \
python-wheel \
python-setuptools \
make \
# plugins
pip install virtualenv
sudo -HEu project code --force --install-extension bmewburn.vscode-intelephense-client
sudo -HEu project code --force --install-extension k--kato.intellij-idea-keybindings
sudo -HEu project code --force --install-extension nadim-vscode.symfony-code-snippets
sudo -HEu project code --force --install-extension ikappas.composer
sudo -HEu project xvfb-run code --verbose 2>&1 | tee /tmp/vscode &
timeout=60
while [ $timeout -gt 0 ]
do
if grep -Fq 'update#setState checking for updates' </tmp/vscode
then
break
else
sleep 1
((timeout--))
fi
done
killall code || true
cp -frv /build/files/* / || true
chown -R 1000:1000 /home/project
source /usr/local/build_scripts/cleanup_apt.sh
|
package com.eiah.util;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
/**
* LocalDateTime 时间转换工具类
* created by eiah on 2017-07-02
*/
public class DataTimeUtil {
// 默认时间格式
private static final DateTimeFormatter DEFAULT_DATETIME_FORMATTER = TimeFormat.LONG_DATE_PATTERN_LINE.formatter;
// 无参数的构造函数,防止被实例化
private DataTimeUtil() {
};
/**
* String 转化为 LocalDateTime
* @param timeStr 被转化的字符串
* @return LocalDateTime
*/
public static LocalDateTime parseTime(String timeStr) {
return LocalDateTime.parse(timeStr, DEFAULT_DATETIME_FORMATTER);
}
/**
* String 转化为 LocalDateTime
* @param timeStr 被转化的字符串
* @param timeFormat 转化的时间格式
* @return LocalDateTime
*/
public static LocalDateTime parseTime(String timeStr, TimeFormat timeFormat) {
return LocalDateTime.parse(timeStr, timeFormat.formatter);
}
/**
* LocalDateTime 转化为String
* @param time LocalDateTime
* @return String
*/
public static String parseTime(LocalDateTime time) {
return DEFAULT_DATETIME_FORMATTER.format(time);
}
/**
* LocalDateTime 时间转 String
* @param time LocalDateTime
* @param format 时间格式
* @return String
*/
public static String parseTime(LocalDateTime time, TimeFormat format) {
return format.formatter.format(time);
}
/**
* 获取当前时间
* @return String
*/
public static String getCurrentDateTime() {
return DEFAULT_DATETIME_FORMATTER.format(LocalDateTime.now());
}
/**
* 获取当前时间
* @param timeFormat 时间格式
* @return
*/
public static String getCurrentDateTime(TimeFormat timeFormat) {
return timeFormat.formatter.format(LocalDateTime.now());
}
/**
* 内部枚举类
*
* created by eiah on 2017-07-02
*/
public enum TimeFormat {
// 短时间格式 年月日
/**
* 时间格式:yyyy-MM-dd
*/
SHORT_DATE_PATTERN_LINE("yyyy-MM-dd"),
/**
* 时间格式:yyyy/MM/dd
*/
SHORT_DATE_PATTERN_SLASH("yyyy/MM/dd"),
/**
* 时间格式:yyyy\\MM\\dd
*/
SHORT_DATE_PATTERN_DOUBLE_SLASH("yyyy\\MM\\dd"),
/**
* 时间格式:yyyyMMdd
*/
SHORT_DATE_PATTERN_NONE("yyyyMMdd"),
// 长时间格式 年月日时分秒
/**
* 时间格式:yyyy-MM-dd HH:mm:ss
*/
LONG_DATE_PATTERN_LINE("yyyy-MM-dd HH:mm:ss"),
/**
* 时间格式:yyyy/MM/dd HH:mm:ss
*/
LONG_DATE_PATTERN_SLASH("yyyy/MM/dd HH:mm:ss"),
/**
* 时间格式:yyyy\\MM\\dd HH:mm:ss
*/
LONG_DATE_PATTERN_DOUBLE_SLASH("yyyy\\MM\\dd HH:mm:ss"),
/**
* 时间格式:yyyyMMdd HH:mm:ss
*/
LONG_DATE_PATTERN_NONE("yyyyMMdd HH:mm:ss"),
// 长时间格式 年月日时分秒 带毫秒
LONG_DATE_PATTERN_WITH_MILSEC_LINE("yyyy-MM-dd HH:mm:ss.SSS"),
LONG_DATE_PATTERN_WITH_MILSEC_SLASH("yyyy/MM/dd HH:mm:ss.SSS"),
LONG_DATE_PATTERN_WITH_MILSEC_DOUBLE_SLASH("yyyy\\MM\\dd HH:mm:ss.SSS"),
LONG_DATE_PATTERN_WITH_MILSEC_NONE("yyyyMMdd HH:mm:ss.SSS"),
LONG_DATE_PATTERN_WITH_MILSEC_NULL("yyyyMMddHHmmssSSS");
private transient DateTimeFormatter formatter;
TimeFormat(String pattern) {
formatter = DateTimeFormatter.ofPattern(pattern);
}
}
/**
* 测试
*
* @param args
*/
public static void main(String[] args) {
// 获取当前时间
System.out.println(DataTimeUtil.parseTime(DataTimeUtil.getCurrentDateTime()));
System.out.println(DataTimeUtil.getCurrentDateTime(TimeFormat.LONG_DATE_PATTERN_WITH_MILSEC_NULL));
}
}
|
#!/usr/bin/env node
/*
* @copyright Copyright (c) Sematext Group, Inc. - All Rights Reserved
*
* @licence SPM for Docker is free-to-use, proprietary software.
* THIS IS PROPRIETARY SOURCE CODE OF Sematext Group, Inc. (Sematext)
* This source code may not be copied, reverse engineered, or altered for any purpose.
* This source code is to be used exclusively by users and customers of Sematext.
* Please see the full license (found in LICENSE in this distribution) for details on its license and the licenses of its dependencies.
*/
var fs = require('fs')
var AdmZip = require('adm-zip')
var zip = new AdmZip()
var config = require('spm-agent').Config
var util = require('util')
var ls = require('ls')
var os = require('os')
var path = require('path')
var dockerInfo = {}
var systemInfo = {}
function printDockerInfo (err, info) {
if (!err) {
dockerInfo.dockerInfo = info
}
createZipFile()
}
try {
dockerInfo.socketDetails = fs.statSync('/var/run/docker.sock')
systemInfo = {
operatingSystem: os.type() + ', ' + os.platform() + ', ' + os.release() + ', ' + os.arch(),
processVersions: process.versions,
processEnvironment: process.env,
dockerInfo: dockerInfo
}
var Docker = require('dockerode')
var docker = new Docker()
docker.info(printDockerInfo)
} catch (ex) {
dockerInfo = ex
console.log(ex)
createZipFile()
}
function createZipFile () {
var cfgDumpFileName = path.join(os.tmpdir(), 'spm-cfg-dump.txt')
var logfiles = ls(config.logger.dir + '/*')
console.log('Adding file ' + cfgDumpFileName)
logfiles.forEach(function (f) {
console.log('Adding file ' + f.file)
zip.addLocalFile(f.full)
})
console.log(util.inspect(config).toString() + '\nSystem-Info:\n' + util.inspect(systemInfo, {depth: 10}))
fs.writeFileSync(cfgDumpFileName, util.inspect(config).toString() + '\nSystem-Info:\n' + util.inspect(systemInfo))
zip.addLocalFile(cfgDumpFileName)
var archFileName = path.join(os.tmpdir(), 'sematext-diagnose.zip')
zip.writeZip(archFileName)
console.log('Sematext diagnostics info is in ' + archFileName)
console.log('Run "docker cp sematext-agent:' + archFileName + ' ." to copy the file to the docker host')
console.log('Please e-mail the file to <EMAIL>')
fs.unlink(cfgDumpFileName, function () {})
}
|
#!/bin/bash
git pull
python main.py off |
#!/usr/bin/env bash
# script-template.sh https://gist.github.com/m-radzikowski/53e0b39e9a59a1518990e76c2bff8038 by Maciej Radzikowski
# MIT License https://gist.github.com/m-radzikowski/d925ac457478db14c2146deadd0020cd
# https://betterdev.blog/minimal-safe-bash-script-template/
set -Eeuo pipefail
trap cleanup SIGINT SIGTERM ERR EXIT
# shellcheck disable=SC2034
script_dir=$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd -P)
usage() {
cat <<EOF
Usage: $(basename "${BASH_SOURCE[0]}") [-h] [-v]
This script deploys a FCOS VM to a vSphere instance using the provided Ignition configuration.
Available options:
-h, --help Print this help and exit
-v, --verbose Print script debug info
-b, --bu-file Path to the bu config to use for provisioning
-d, --download-dir Path where CoreOS (images and files) should be stored locally
-e, --debug Enable extra debugging of the VM via Serial Connection logging
-g, --host-signing-key Path to the SSH Host Signing Key
-i, --host-signing-pw Password for the SSH Host Signing Key
-l, --library vSphere Library name to store template in, defaults to 'fcos'
-n, --name Name of the VM to create
-o, --deploy Whether to deploy the VM (requires GOVC_URL, GOVC_USERNAME, GOVC_PASSWORD to be set)
-p, --prefix Prefix for the VM names for easier identification in vSphere, defaults to 'fcos'
-s, --stream CoreOS stream, defaults to 'stable'
-t, --tls-certs Path to the Certificate Authority from where to copy the '$name.cert.pem' and '$name.key.pem' files
-u, --user-signing-key Path to the SSH User Signing Key
EOF
exit
}
cleanup() {
trap - SIGINT SIGTERM ERR EXIT
if [[ -n "${buInc}" ]]; then
if [[ -n "${commonConfig}" ]]; then
for tmp in "${commonConfig}"/*; do
tmpName=$(realpath --canonicalize-missing "${buInc}/$(basename "${tmp}")")
message=$(printf "Removing temporary common config from '%s'\n" "${tmpName}")
[[ $verbose == 1 ]] && msg "${message}"
rm -rf "${tmpName}"
done
fi
for tmp in "${buInc}/ssh/ssh_host_"*; do
tmpName=$(realpath --canonicalize-missing "${buInc}/ssh/$(basename "${tmp}")")
message=$(printf "Removing temporary SSH host key from '%s'\n" "${tmpName}")
[[ $verbose == 1 ]] && msg "${message}"
rm -f "${tmpName}"
done
if [[ -n "${userSigningKey-}" ]] && [[ -f "${buInc}/ssh/${userSigningKey}" ]]; then
message=$(printf "Removing temporary SSH user signing certificate from '%s'\n" "${buInc}/ssh/${userSigningKey}")
[[ $verbose == 1 ]] && msg "${message}"
rm -f "${buInc}/ssh/${userSigningKey}"
fi
if [[ -n "${name}" ]]; then
for tmp in "${buInc}/certs/app"*; do
tmpName=$(realpath --canonicalize-missing "${buInc}/certs/$(basename "${tmp}")")
message=$(printf "Removing temporary TLS certificate from '%s'\n" "${tmpName}")
[[ $verbose == 1 ]] && msg "${message}"
rm -f "${tmpName}"
done
staticCerts=("${buInc}/certs/ca.cert.pem" "${buInc}/certs/ca-chain.cert.pem" "${buInc}/certs/ia.cert.pem")
for tmp in "${staticCerts[@]}"; do
if [[ -f "${tmp}" ]]; then
tmpName=$(realpath --canonicalize-missing "${buInc}/certs/$(basename "${tmp}")")
message=$(printf "Removing temporary TLS certificate from '%s'\n" "${tmpName}")
[[ $verbose == 1 ]] && msg "${message}"
rm -f "${tmpName}"
fi
done
fi
fi
if [[ -n "${ign_config_file}" ]]; then
message=$(printf "Removing Ignition file from '%s'\n" "${ign_config_file}")
[[ $verbose == 1 ]] && msg "${message}"
rm -f "${ign_config_file}"
fi
}
setup_colors() {
if [[ -t 2 ]] && [[ -z "${NO_COLOR-}" ]] && [[ "${TERM-}" != "dumb" ]]; then
NOFORMAT='\033[0m' RED='\033[0;31m' GREEN='\033[0;32m' ORANGE='\033[0;33m' BLUE='\033[0;34m' PURPLE='\033[0;35m' CYAN='\033[0;36m' YELLOW='\033[1;33m'
else
# shellcheck disable=SC2034
NOFORMAT='' RED='' GREEN='' ORANGE='' BLUE='' PURPLE='' CYAN='' YELLOW=''
fi
}
msg() {
echo >&2 -e "${1-}"
}
die() {
local msg=$1
local code=${2-1} # default exit status 1
msg "$msg"
exit "$code"
}
parse_params() {
# default values of variables set from params
bu=''
deploy=0
download=''
debug=0
hostSigningKey=''
hostSigningPw="${SIMPLE_CA_SSH_PASSWORD-}"
library='fcos'
name=''
prefix='fcos'
stream='stable'
tlsCerts=''
userSigningKey=''
verbose=0
while :; do
case "${1-}" in
-h | --help) usage ;;
-v | --verbose)
set -x
verbose=1
;;
--no-color) NO_COLOR=1 ;;
-b | --bu-file)
bu="${2-}"
shift
;;
-d | --download-dir)
download="${2-}"
shift
;;
-e | --debug)
debug=1
;;
-g | --host-signing-key)
hostSigningKey="${2-}"
shift
;;
-i | --host-signing-pw)
hostSigningPw="${2-}"
shift
;;
-l | --library)
library="${2-}"
shift
;;
-n | --name)
name="${2-}"
shift
;;
-o | --deploy) deploy=1 ;;
-s | --stream)
stream="${2-}"
shift
;;
-t | --tls-certs)
tlsCerts="${2-}"
shift
;;
-u | --user-signing-key)
userSigningKey="${2-}"
shift
;;
-?*) die "Unknown option: $1" ;;
*) break ;;
esac
shift
done
# check required params and arguments
[[ -z "${download-}" ]] && die "Missing required parameter: download-dir"
[[ -z "${name-}" ]] && die "Missing required parameter: name"
[[ -z "${bu-}" ]] && die "Missing required parameter: bu-file"
[[ $deploy == 1 ]] && [[ -z "${GOVC_URL-}" ]] && [[ -z "${GOVC_USERNAME-}" ]] && [[ -z "${GOVC_PASSWORD-}" ]] && die "Missing required environment variables: GOVC_URL, GOVC_USERNAME, or GOVC_PASSWORD"
[[ -z "${tlsCerts-}" ]] && die "Missing required parameter: tls-certs"
return 0
}
parse_params "$@"
setup_colors
# script logic here
download=$(realpath --canonicalize-missing "${download}")
bu=$(realpath --canonicalize-missing "${bu}")
buDir=$(dirname "${bu}")
vmConfig=$(realpath --canonicalize-missing "${buDir}/resources.json")
buInc=$(realpath --canonicalize-missing "${buDir}/includes")
commonConfig=$(realpath --canonicalize-missing "${buDir}/../common")
signing_key=$(realpath --canonicalize-missing "${download}/fedora.asc")
if [[ -n "${hostSigningKey-}" ]]; then
hostSigningKey=$(realpath --canonicalize-missing "${hostSigningKey}")
[[ ! -f "${hostSigningKey-}" ]] && die "Parameter 'host-signing-key' does not point to an existing SSH key file"
fi
if [[ -n "${userSigningKey-}" ]]; then
userSigningKey=$(realpath --canonicalize-missing "${userSigningKey}")
[[ ! -f "${userSigningKey-}" ]] && die "Parameter 'user-signing-key' does not point to an existing SSH key file"
fi
tlsCerts=$(realpath --canonicalize-missing "${tlsCerts}")
stream_json=$(realpath --canonicalize-missing "${download}/${stream}.json")
ova_version=''
ign_config=''
ign_config_file=''
[[ ! -d "${tlsCerts-}" ]] && die "Parameter 'tls-certs' does not point to an existing location"
msg "Creating SSH Host Keys"
ssh-keygen -t ecdsa -N "" -f "${buInc}/ssh/ssh_host_ecdsa_key" -C "${name},${name}.local"
ssh-keygen -t ed25519 -N "" -f "${buInc}/ssh/ssh_host_ed25519_key" -C "${name},${name}.local"
ssh-keygen -t rsa -b 4096 -N "" -f "${buInc}/ssh/ssh_host_rsa_key" -C "${name},${name}.local"
if [[ -n "${hostSigningKey-}" ]]; then
msg "Creating signed SSH certificates"
ssh-keygen -s "${hostSigningKey}" \
-t rsa-sha2-512 \
-P "${hostSigningPw}" \
-I "${name} host key" \
-n "${name},${name}.local" \
-V -5m:+3650d \
-h \
"${buInc}/ssh/ssh_host_ecdsa_key" \
"${buInc}/ssh/ssh_host_ed25519_key" \
"${buInc}/ssh/ssh_host_rsa_key"
fi
if [[ -n "${userSigningKey-}" ]]; then
message=$(printf "Temporarily copying SSH user signing certificate from '%s' to '%s'\n" "${userSigningKey}" "${buInc}/ssh")
msg "${message}"
cp -f "${userSigningKey}" "${buInc}/ssh"
fi
message=$(printf "Temporarily copying common config from '%s' to '%s'\n" "${commonConfig}" "${buInc}")
msg "${message}"
cp -fr "${commonConfig}/." "${buInc}"
message=$(printf "Temporarily copying certificates from '%s' to '%s'\n" "${tlsCerts}" "${buInc}")
msg "${message}"
cp -f "${tlsCerts}/certs/ca-chain.cert.pem" "${buInc}/certs"
cp -f "${tlsCerts}/certs/ca.cert.pem" "${buInc}/certs"
cp -f "${tlsCerts}/certs/ia.cert.pem" "${buInc}/certs"
cp -f "${tlsCerts}/certs/${name}.cert.pem" "${buInc}/certs/app.cert.pem"
cp -f "${tlsCerts}/certs/${name}.cert-chain.pem" "${buInc}/certs/app.cert-chain.pem"
cp -f "${tlsCerts}/private/${name}.key.pem" "${buInc}/certs/app.key.pem"
message=$(printf "Converting bu file '%s' to ign config\n" "${bu}")
msg "${message}"
ign_config=$(butane --strict --files-dir="${buInc}" "${bu}" | gzip | base64 -w0)
ign_config_file=$(realpath --canonicalize-missing "${buDir}/${name}.ign.gzip.b64")
echo "${ign_config}" > "${ign_config_file}"
if [[ $deploy == 1 ]]; then
# Init download directory if it doesn't exist
if [[ ! -d "${download}" ]]; then
message=$(printf "Creating CoreOS Downloads Folder at '%s'\n" "${download}")
msg "${message}"
mkdir -p "${download}"
fi
# Download the signing key for verification purposes
if [[ ! -f "${signing_key}" ]]; then
message=$(printf "Downloading the Fedora signing key to '%s'" "${signing_key}")
msg "${message}"
curl -sS "https://getfedora.org/static/fedora.gpg" -o "${signing_key}"
fi
# Make the signing key useful for verification purposes
if [[ ! -f "${signing_key}.gpg" ]]; then
gpg --dearmor "${signing_key}"
fi
# Download the CoreOS VM description for the particular stream
message=$(printf "Downloading stream json to '%s'\n" "${stream_json}")
msg "${message}"
curl -sS "https://builds.coreos.fedoraproject.org/streams/${stream}.json" -o "${stream_json}"
ova_version=$(jq --raw-output '.architectures.x86_64.artifacts.vmware.release' "${stream_json}")
ova_url_location=$(jq --raw-output '.architectures.x86_64.artifacts.vmware.formats.ova.disk.location' "${stream_json}")
ova_url_signature=$(jq --raw-output '.architectures.x86_64.artifacts.vmware.formats.ova.disk.signature' "${stream_json}")
ova_sha256=$(jq --raw-output '.architectures.x86_64.artifacts.vmware.formats.ova.disk.sha256' "${stream_json}")
ova_file_path=$(realpath --canonicalize-missing "${download}/coreos-${stream}-${ova_version}.ova")
ova_file_signature=$(realpath --canonicalize-missing "${download}/coreos-${stream}-${ova_version}.sig")
ova_name="coreos-${stream}-${ova_version}"
message=$(printf "Latest CoreOS Version for stream '%s' is '%s'\n" "${stream}" "${ova_version}")
msg "${message}"
# Download the latest available ova file for a particular stream
if [[ ! -f "${ova_file_path}" ]]; then
message=$(printf "Downloading CoreOS Version for stream '%s' with version '%s'\n" "${stream}" "${ova_version}")
msg "${message}"
curl -sS "${ova_url_location}" -o "${ova_file_path}"
curl -sS "${ova_url_signature}" -o "${ova_file_signature}"
fi
message=$(printf "Verifying signature for '%s'\n" "${ova_file_path}")
msg "${message}"
gpg --no-default-keyring --keyring "${signing_key}.gpg" --verify "${ova_file_signature}" "${ova_file_path}"
message=$(printf "Verifying checksum for '%s'\n" "${ova_file_path}")
msg "${message}"
message=$(printf "%s %s" "${ova_sha256}" "${ova_file_path}" | sha256sum --check)
msg "${message}"
msg "\nIgnition configuration transpiled and CoreOS Template downloaded; will now deploy to vCenter\n\n"
set +e
if ! govc about.cert;
then
msg "${RED}No valid certificate for govc found, will attempt to use 'GOVC_TLS_CA_CERTS'.\n${NOFORMAT}"
if [[ -z ${GOVC_TLS_CA_CERTS-} ]]; then
message=$(printf "The environment variable 'GOVC_TLS_CA_CERTS' is not set.\n")
msg "${message}"
if [[ ! -f "${HOME}/.govmomi/certificates/${GOVC_URL}.pem" ]]; then
message=$(printf "%sNo matching certificate found at '%s/.govmomi/certificates/%s.pem'.\n%s" "${RED}" "${HOME}" "${GOVC_URL}" "${NOFORMAT}")
msg "${message}"
message=$(printf "%sPlease download the certificate using the following command and verify it:\n%s" "${RED}" "${NOFORMAT}")
msg "${message}"
message=$(printf "%s\tmkdir -p '%s/.govmomi/certificates/' && govc about.cert -k -show | tee '%s/.govmomi/certificates/%s.pem'\n%s" "${RED}" "${HOME}" "${HOME}" "${GOVC_URL}" "${NOFORMAT}")
msg "${message}"
exit 1
fi
message=$(printf "Found certificate at '%s/.govmomi/certificates/%s.pem', exporting it as required.\n" "${HOME}" "${GOVC_URL}")
msg "${message}"
export GOVC_TLS_CA_CERTS="${HOME}/.govmomi/certificates/${GOVC_URL}.pem"
fi
fi
set -e
if [[ $(govc library.ls | grep -c "${library}") -eq 0 ]]; then
message=$(printf "The library '%s' does not exist in vCenter, creating it now\n" "${library}")
msg "${message}"
govc library.create "${library}"
fi
if [[ $(govc library.ls "/${library}/*" | grep -c "${ova_name}") -eq 0 ]]; then
message=$(printf "Uploading ova '%s' as '%s' to vCenter library '%s'\n" "${ova_file_path}" "${ova_name}" "${library}")
msg "${message}"
govc library.import -n "${ova_name}" "${library}" "${ova_file_path}"
fi
message=$(printf "Deploying ova '%s' as '%s'\n" "${ova_name}" "${prefix}-${name}")
msg "${message}"
govc library.deploy "${library}/${ova_name}" "${prefix}-${name}"
govc vm.change -vm "${prefix}-${name}" -e "guestinfo.ignition.config.data.encoding=gzip+base64"
govc vm.change -vm "${prefix}-${name}" -f "guestinfo.ignition.config.data=${ign_config_file}"
if [[ -f "${vmConfig}" ]]; then
vmResources=$(cat "${vmConfig}")
msg "Resource config found; updating VM"
msg "Updating CPU Cores"
govc vm.change -vm "${prefix}-${name}" -c="$(echo "${vmResources}" | jq '.cpu_cores')"
msg "Updating RAM"
govc vm.change -vm "${prefix}-${name}" -m="$(echo "${vmResources}" | jq '.ram')"
msg "Updating resizing root disk"
govc vm.disk.change -vm "${prefix}-${name}" \
-disk.filePath="[datastore] ${prefix}-${name}/${prefix}-${name}.vmdk" \
-size="$(echo "${vmResources}" | jq -r '.disks.root')"
if govc datastore.ls "docker/${prefix}-${name}-docker.vmdk"; then
msg "Docker disk exists, continuing"
else
govc datastore.mkdir -p docker
govc datastore.disk.create -size "$(echo "${vmResources}" | jq -r '.disks.docker')" \
"docker/${prefix}-${name}-docker.vmdk"
fi
if govc datastore.ls "data/${prefix}-${name}-data.vmdk"; then
msg "Data disk exists, continuing"
else
govc datastore.mkdir -p data
govc datastore.disk.create -size "$(echo "${vmResources}" | jq -r '.disks.data')" \
"data/${prefix}-${name}-data.vmdk"
fi
# See https://github.com/vmware/govmomi/blob/master/govc/USAGE.md#vmdiskattach
msg "Attaching docker disk"
govc vm.disk.attach -vm "${prefix}-${name}" -disk="docker/${prefix}-${name}-docker.vmdk" \
-link=false -mode=independent_persistent -sharing=sharingNone
msg "Attaching app disk"
govc vm.disk.attach -vm "${prefix}-${name}" -disk="data/${prefix}-${name}-data.vmdk" \
-link=false -mode=independent_persistent -sharing=sharingNone
fi
govc vm.info -e "${prefix}-${name}"
if [[ $debug == 1 ]]; then
# In case of problems: the two lines below attach serial connection and create a debug log for the start up (including provisioning)
message=$(printf "Enabling VM debugging, check log file in vSphere Datastore at '%s'" "${prefix}-${name}/${prefix}-${name}.log")
msg "${message}"
govc device.serial.add -vm "${prefix}-${name}"
govc device.serial.connect -vm "${prefix}-${name}" "[datastore] ${prefix}-${name}/${prefix}-${name}.log"
fi
message=$(printf "Powering VM '%s' on\n" "${prefix}-${name}")
msg "${message}"
govc vm.power -on "${prefix}-${name}"
msg "${YELLOW}For security reasons the 'guestinfo.ignition.config.data' parameter should be removed once startup completes:${NOFORMAT}"
message=$(printf "govc vm.change -vm '%s-%s' -e 'guestinfo.ignition.config.data='" "${prefix}" "${name}")
msg "${message}"
else
echo "${ign_config}" | base64 -d | gzip -d | jq >"$(realpath --canonicalize-missing "${buDir}/${name}.ign.json")"
fi
|
unameOut="$(uname -s)"
case "${unameOut}" in
Linux*) machine=Linux;;
Darwin*) machine=Mac;;
CYGWIN*) machine=Cygwin;;
MINGW*) machine=MinGw;;
*) machine="UNKNOWN:${unameOut}"
esac
echo ${machine}
|
<gh_stars>10-100
FactoryBot.define do
factory :visitor do
visit
first_name do
FFaker::Name.first_name
end
last_name do
FFaker::Name.last_name
end
date_of_birth do '1980-01-10' end
sort_index do |v|
v.visit.visitors.count
end
trait :banned do
banned { true }
end
trait :not_on_list do
not_on_list { true }
end
end
end
|
import { applyChanges, Doc } from 'automerge';
import { getClock, later, recentChanges, union } from 'automerge-clocks';
import { Map, fromJS } from 'immutable';
import { Message } from './types';
/**
* An Automerge Network protocol getting consensus
* between two documents in different places.
*/
export class Peer {
_theirClock: Map<string, number>;
_sendMsg: (msg: Message) => void;
constructor(sendMsg: (msg: Message) => void) {
this._theirClock = Map();
this._sendMsg = sendMsg;
}
public applyMessage<T>(msg: Message, doc: Doc<T>): Doc<T> | undefined {
let ourDoc = doc;
// Convert msg clock to Immutable Map incase its been serialized
const msgClock = fromJS(msg.clock);
// 1. If they've sent us changes, we'll try to apply them.
if (msg.changes) {
ourDoc = applyChanges(doc, msg.changes);
}
// 2. If we have any changes to let them know about,
// we should send it to them.
const ourChanges = recentChanges(doc, msgClock);
if (ourChanges.length > 0) {
this.sendMsg({
clock: getClock(ourDoc),
changes: ourChanges,
});
}
// 3. If our clock is still earlier than their clock,
// then we should let them know, which will prompt
// them to send us changes via 2. listed above.
const ourClock = getClock(ourDoc);
if (later(msgClock, ourClock)) {
this.sendMsg({
clock: ourClock,
});
}
if (msg.changes) {
return ourDoc;
}
return;
}
public notify<T>(doc: Doc<T>) {
// 1. If we think that we have changes to share, we'll send them.
const ourChanges = recentChanges(doc, this._theirClock);
if (ourChanges.length > 0) {
this.sendMsg({
clock: getClock(doc),
changes: ourChanges,
});
return;
}
// 2. Otherwise, we just let them know where we're at.
// If our copy of "theirClock" is wrong, they'll
// update us via 3. in 'applyMessage'.
this.sendMsg({
clock: getClock(doc),
});
}
private sendMsg(msg: Message) {
// Whenever we send a message, we should optimistically
// update theirClock with what we're about to send them.
this._theirClock = union(this._theirClock, msg.clock);
this._sendMsg(msg);
}
}
|
package software.amazon.jsii.tests.calculator;
@software.amazon.jsii.Jsii(module = software.amazon.jsii.tests.calculator.$Module.class, fqn = "jsii-calc.AsyncVirtualMethods")
public class AsyncVirtualMethods extends software.amazon.jsii.JsiiObject {
protected AsyncVirtualMethods(final software.amazon.jsii.JsiiObject.InitializationMode mode) {
super(mode);
}
public AsyncVirtualMethods() {
super(software.amazon.jsii.JsiiObject.InitializationMode.Jsii);
software.amazon.jsii.JsiiEngine.getInstance().createNewObject(this);
}
public java.lang.Number callMe() {
return this.jsiiAsyncCall("callMe", java.lang.Number.class);
}
public java.lang.Number overrideMe(final java.lang.Number mult) {
return this.jsiiAsyncCall("overrideMe", java.lang.Number.class, java.util.stream.Stream.of(java.util.Objects.requireNonNull(mult, "mult is required")).toArray());
}
public java.lang.Number overrideMeToo() {
return this.jsiiAsyncCall("overrideMeToo", java.lang.Number.class);
}
/**
* Just calls "overrideMeToo"
*/
public java.lang.Number callMe2() {
return this.jsiiAsyncCall("callMe2", java.lang.Number.class);
}
/**
* This method calls the "callMe" async method indirectly, which will then
* invoke a virtual method. This is a "double promise" situation, which
* means that callbacks are not going to be available immediate, but only
* after an "immediates" cycle.
*/
public java.lang.Number callMeDoublePromise() {
return this.jsiiAsyncCall("callMeDoublePromise", java.lang.Number.class);
}
public java.lang.Number dontOverrideMe() {
return this.jsiiCall("dontOverrideMe", java.lang.Number.class);
}
}
|
package com.github.picadoh.imc.compiler;
import com.github.picadoh.imc.report.CompilationErrorReport;
import javax.tools.DiagnosticCollector;
import javax.tools.JavaCompiler;
import javax.tools.JavaFileObject;
import javax.tools.ToolProvider;
import java.nio.charset.Charset;
import java.util.List;
import java.util.Locale;
class CompilerTool {
private final List<String> options;
public CompilerTool(List<String> options) {
this.options = options;
}
public CompilerResult compile(List<SourceCode> sources) {
JavaCompiler compiler = getSystemJavaCompiler();
DiagnosticCollector<JavaFileObject> diagnosticCollector = getDiagnosticCollector();
InMemoryFileManager classManager = getClassManager(compiler);
JavaCompiler.CompilationTask task = compiler.getTask(null,
classManager, diagnosticCollector, options, null, sources);
CompilerResult compilerResult = classManager.getCompilerResult();
if (!task.call()) {
return compilerResult.withCompilationErrorReport(getCompilationErrorReport(diagnosticCollector));
}
return compilerResult;
}
JavaCompiler getSystemJavaCompiler() {
return ToolProvider.getSystemJavaCompiler();
}
CompilationErrorReport getCompilationErrorReport(DiagnosticCollector<JavaFileObject> collector) {
return new CompilationErrorReport(options, collector.getDiagnostics());
}
DiagnosticCollector<JavaFileObject> getDiagnosticCollector() {
return new DiagnosticCollector<>();
}
InMemoryFileManager getClassManager(JavaCompiler compiler) {
return new InMemoryFileManager(compiler.getStandardFileManager(null,
Locale.getDefault(), Charset.defaultCharset()));
}
}
|
if command -v apps-maintain &>/dev/null; then
# If not running interactively, don't do anything
if [[ $- != *i* ]]; then
:
else
apps-maintain
fi
fi
|
import paramiko
class ClusterMonitor:
def __init__(self):
self.available_capacity = 0
self.used_percentage = 0
self.node_status = {}
def monitor_cluster(self, address, username, password, nodes):
# Available Capacity
self.available_capacity = self.get_available_capacity()
# Used Percentage
self.used_percentage = self.get_used_percentage()
# SSH Connection to Nodes
for node in nodes:
self.node_status[node] = self.check_ssh_connection(address, username, password, node)
return self.available_capacity, self.used_percentage, self.node_status
def get_available_capacity(self):
# Implement logic to retrieve available capacity from SCOM or other monitoring system
return 1000 # Placeholder value
def get_used_percentage(self):
# Implement logic to retrieve used percentage from SCOM or other monitoring system
return 60 # Placeholder value
def check_ssh_connection(self, address, username, password, node):
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(node, username=username, password=password)
client.close()
return "Connected"
except paramiko.AuthenticationException:
return "Authentication Failed"
except paramiko.SSHException:
return "SSH Connection Failed"
except Exception as e:
return f"Error: {str(e)}"
# Example usage
monitor = ClusterMonitor()
address = "cluster_address"
username = "username"
password = "password"
nodes = ["node1", "node2", "node3"]
available_capacity, used_percentage, node_status = monitor.monitor_cluster(address, username, password, nodes)
print(f"Available Capacity: {available_capacity} units")
print(f"Used Percentage: {used_percentage}%")
print("Node Status:")
for node, status in node_status.items():
print(f"{node}: {status}") |
<gh_stars>0
/*
* Copyright 2014-2020 The Ideal Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style
* license that can be found in the LICENSE file or at
* https://developers.google.com/open-source/licenses/bsd
*/
package ideal.development.elements;
import ideal.library.elements.*;
public interface kind extends deeply_immutable_data, reference_equality, stringable {
simple_name name();
flavor_profile default_profile();
boolean is_namespace();
}
|
package main
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
)
func TestFetchPubkeys(t *testing.T) {
var pubkeys []string
var err error
validPubKeys := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, `ssh-rsa dsfj435224jl24j3/34j2l4j23l4j/234ljk243lj24234/sdfs comment1`)
fmt.Fprintln(w, `ssh-rsa usfj4356754jl24j3/34j2l4j23l4j/234ljk243l4235we/rwe comment2`)
}))
defer validPubKeys.Close()
notFoundPubKeys := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
}))
defer notFoundPubKeys.Close()
pubkeys, err = FetchPubKeys(validPubKeys.URL+"/{{ .UserName }}", "user1")
if err != nil {
t.Errorf("Should not return an error (%s).", err.Error())
}
if len(pubkeys) != 2 {
t.Error("Should have two pubkeys.")
}
pubkeys, err = FetchPubKeys(notFoundPubKeys.URL+"/{{ .UserName }}", "user1")
if err == nil {
t.Error("Should return an error.")
}
if len(pubkeys) != 0 {
t.Error("Should not return pubkeys on errors.")
}
}
|
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
import matplotlib.pyplot as plt
import numpy as np
# define the model
model = Sequential()
model.add(Conv2D(30, (5, 5), input_shape=(150, 150 ,3), activation='relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Conv2D(15, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(200, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# train the model
history = model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=25) |
SELECT *
FROM employees
ORDER BY birth_date ASC
LIMIT 1; |
load 'helpers'
@test "adapter should return expected supported operations" {
run grpcurl --plaintext $MESHERY_ADAPTER_ADDR:10002 meshes.MeshService.SupportedOperations
[ "$status" -eq 0 ]
[[ $(echo $output | jq '.ops[] | select( .key == "consul_182_demo" )' | jq -j .key ) = "consul_182_demo" ]]
[[ $(echo $output | jq '.ops[] | select( .key == "consul_191_demo" )' | jq -j .key ) = "consul_191_demo" ]]
[[ $(echo $output | jq '.ops[] | select( .key == "bookinfo" )' | jq -j .key ) = "bookinfo" ]]
[[ $(echo $output | jq '.ops[] | select( .key == "httpbin" )' | jq -j .key ) = "httpbin" ]]
[[ $(echo $output | jq '.ops[] | select( .key == "imagehub" )' | jq -j .key ) = "imagehub" ]]
[[ $(echo $output | jq '.ops[] | select( .key == "custom" )' | jq -j .key ) = "custom" ]]
}
@test "adapter should return expected mesh name" {
run bash -c "grpcurl --plaintext $MESHERY_ADAPTER_ADDR:10002 meshes.MeshService.MeshName | jq -j .name"
[ "$status" -eq 0 ]
[ "$output" = "CONSUL" ]
}
|
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe RequestsFeed::RequestsFeed, type: :component do
subject { render_inline(described_class.new(**params)) }
let(:contributor) { create(:contributor) }
let(:params) { { contributor: contributor } }
it { should have_css('.RequestsFeed') }
it { should have_text('hat bisher auf keine Recherche geantwortet') }
context 'given a contributor with replies' do
let!(:reply) { create(:message, sender: contributor, request: create(:request, title: 'Lorem Ipsum')) }
it { should have_link('Lorem Ipsum', href: "/requests/#{reply.request.id}#contributor-#{contributor.id}") }
end
end
|
#!/bin/sh
# the script must be started by root or with sudo permissions
# see http://www.oracle.com/technetwork/java/javase/downloads/java-archive-javase8-2177648.html
# for jdk downloads
# specify the jdk
JDK='jdk1.8.0_77'
JDKFILE='jdk-8u77-linux-x64.tar.gz'
JDKPATH="http://download.oracle.com/otn-pub/java/jdk/8u77-b03/$JDKFILE"
# oracle request header
ORACLELIC="Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie"
# script path
PROG=$0
REALPATH=$(readlink -f "$PROG")
BASEDIR=$(dirname $REALPATH)
# create dir
mkdir -p /usr/java
# install jdk
cd /tmp && wget --no-cookies --no-check-certificate --header "$ORACLELIC" "$JDKPATH" -O /tmp/$JDKFILE && mkdir -p /usr/java && tar zxf /tmp/$JDKFILE -C /usr/java && ln -s /usr/java/$JDK /usr/java/jdk1.8.0
# install jce - please check the license agreement!
cd /tmp && wget --no-cookies --no-check-certificate --header "$ORACLELIC" "http://download.oracle.com/otn-pub/java/jce/8/jce_policy-8.zip" -O /tmp/jce_policy-8.zip && unzip -o /tmp/jce_policy-8.zip && \cp -f /tmp/UnlimitedJCEPolicyJDK8/* /usr/java/jdk1.8.0/jre/lib/security && rm -rf /tmp/UnlimitedJCEPolicyJDK8
# add jdk to path and set JAVA_HOME
cp $BASEDIR/java.sh /etc/profile.d
source ~/.bashrc
# validate the installation
java -version
|
<filename>moda.js<gh_stars>0
const lista=[
1,
2,
3,
1,
2,
3,
4,
2,
2,
2,
1,
];
const listaCount={};
lista.map(
function(elemento){
if(listaCount[elemento]){
listaCount[elemento]+=1;
}else{
listaCount[elemento]=1;
}
}
);
const listaArray=Object.entries(listaCount).sort(
function(elementoA,elementoB){
return elementoA[1]-elementoB[1];
}
);
const moda=listaArray[listaArray.length-1];
|
from component.components.components import Components
from component.components.camera_component import CameraComponent
from component.components.button_component import ButtonComponent
from component.components.nfc_component import NfcComponent
from component.components.vibration_component import VibrationComponent
from component.components.display_component import DisplayComponent
__all__ = ['CameraComponent', 'ButtonComponent', 'NfcComponent', 'VibrationComponent', 'DisplayComponent']
|
################################################################################
### Head: Main
##
main_usage () {
##local cmd_name="$0"
local cmd_name="$THE_CMD_FILE_NAME"
cat << EOF
Usage:
$ $cmd_name [action]
Example:
## help
$ $cmd_name
$ $cmd_name help
## version
$ $cmd_name version
## self_update
$ $cmd_name self_update
## self_actions
$ $cmd_name self_actions
## fzf
<?php include_once(dirname(__DIR__) . '/Model/fzf/Usage.sh'); ?>
## theme
<?php include_once(dirname(__DIR__) . '/Model/Usage.sh'); ?>
Debug:
$ export DEBUG_GRUBRC_THEME_CTRL=true
EOF
}
main_check_args_size () {
if [ $1 -le 0 ]; then
shift
main_run_default_sub_cmd "$@"
exit 1
fi
}
main_run_default_sub_cmd () {
main_usage "$@"
}
main_run_sub_cmd () {
local sub_cmd="$1"
shift
local function_name="$(sub_cmd_find_function_name "$sub_cmd")"
## if ! command -v $function_name > /dev/null; then
if ! type -p $function_name > /dev/null; then
util_debug_echo "[Debug] sub_cmd_function_not_exist: sub_cmd=$sub_cmd; function_name=$function_name"
echo
main_run_default_sub_cmd "$@"
return 1
fi
"$function_name" "$@" ## run sub cmd function
}
## Start
main_check_args_size $# "$@"
main_run_sub_cmd "$@"
##
### Tail: Main
################################################################################
|
package main
import "fmt"
import "os"
type point struct {
x, y int
}
// Go by Example: String Formatting
// https://gobyexample.com/string-formatting
func main() {
// 旗标、宽度、精度、索引
fmt.Printf("\n旗标、宽度、精度、索引\n----\n")
// fmt.Printf("|%0+- #[1]*.[2]*[3]d|%0+- #[1]*.[2]*[4]d|\n", 8, 4, 32, 64)
fmt.Printf("|%v|\n", 8)
// 格式化排版
fmt.Printf("\n格式化排版\n----\n")
fmt.Printf("|%6d|%9d|\n", 12, 345)
fmt.Printf("|%6.2f|%9.2f|\n", 1.2, 3.45)
fmt.Printf("|%-6.2f|%-9.2f|\n", 1.2, 3.450999)
// 浮点型精度
fmt.Printf("\n浮点型精度\n----\n")
fmt.Printf("|%f|%8.4f|%8.f|%.4f|%.f|\n", 3.2, 3.2, 3.2, 3.2, 3.2)
fmt.Printf("|%.3f|%.3g|\n", 12.345678, 12.345678)
fmt.Printf("|%.2f|\n", 12.345678+12.345678i)
// 字符串精度
fmt.Printf("\n字符串精度\n----\n")
s := "你好世界!"
fmt.Printf("|%29s|%8.2s|%8.s|%.2s|%.s|\n", s, s, s, s, s)
fmt.Printf("|%29x|%8.2x|%8.x|%.2x|%.x|\n", s, s, s, s, s)
// 带引号字符串
fmt.Printf("\n带引号字符串\n----\n")
s1 := "Hello 世界!" // CanBackquote
s2 := "Hello\n世界!" // !CanBackquote
fmt.Printf("%q\n", s1) // 双引号
fmt.Printf("%#q\n", s1) // 反引号成功
fmt.Printf("%#q\n", s2) // 反引号失败
fmt.Printf("%+q\n", s2) // 仅包含 ASCII 字符
// Unicode 码点
fmt.Printf("\nUnicode 码点\n----\n")
fmt.Printf("%U, %#U\n", '好', '好')
fmt.Printf("%U, %#U\n", '\n', '\n')
// 接口类型将输出其内部包含的值
fmt.Printf("\n接口类型将输出其内部包含的值\n----\n")
var i interface{} = struct {
name string
age int
}{"AAA", 20}
fmt.Printf("%v\n", i) // 只输出字段值
fmt.Printf("%+v\n", i) // 同时输出字段名
fmt.Printf("%#v\n", i) // Go 语法格式
// 结构类型将输出其内部包含的值
fmt.Printf("\n结构类型将输出其内部包含的值\n----\n")
p := point{1, 2}
fmt.Printf("%v\n", p)
fmt.Printf("%+v\n", p)
fmt.Printf("%#v\n", p)
// 输出类型
fmt.Printf("\n输出类型\n----\n")
fmt.Printf("%T\n", i)
fmt.Fprintf(os.Stderr, "an %s\n", "errorss")
} |
<filename>subtitledl/functions/checks.py
import os
import urllib.request
import urllib.error
EXTENSIONS = [
".avi",
".mp4",
".mkv",
".mpg",
".wmv",
".mov",
".3gp",
".vob",
".rm",
".flv",
".3g2",
".mpeg"
]
def checkConnection():
try:
urllib.request.urlopen("http://google.com/", timeout=1)
except urllib.error.URLError or urllib.error.HTTPError:
raise Exception(
"ConnectionError: Cannot connect to Internet. Check Connection.")
def checkExtension(extension):
if extension not in EXTENSIONS:
raise Exception(
"ExtensionError: Given file is not a valid movie file.")
def checkExists(filepath):
if (os.path.exists(filepath) == True):
raise Exception("FileExistsError: Subtitle file already exists.")
|
package string_handle;
import java.io.BufferedReader;
import java.io.InputStreamReader;
/**
*
* @author minchoba
* 백준 2774번 : 아름다운 수
*
* @see https://www.acmicpc.net/problem/2774/
*
*/
public class Boj2774 {
private static final String END_LINE = "\n";
public static void main(String[] args) throws Exception{
// 버퍼를 통한 값 입력
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
int T = Integer.parseInt(br.readLine());
StringBuilder sb = new StringBuilder();
while(T-- > 0){
boolean[] beauty = new boolean[10]; // 0~9 까지 해당하는 정수의 출현을 알려줄 배열
String num = br.readLine();
for(char tmp : num.toCharArray()){ // 숫자를 한개씩 향상된 포문으로 받아와
beauty[Character.getNumericValue(tmp)] = true; // 해당 인덱스 값을 참으로 바꿔주고
}
sb.append(res(beauty)).append(END_LINE); // 결과 메소드를 통한 답을 버퍼에 담음
}
System.out.println(sb.toString()); // 결과값 한번에 출력
}
/**
* 결과 값 메소드
*
* @param arr 몇개의 수를 포함하고있는지 담은 배열
* @return 아름다운 정도를 반환
*/
private static int res(boolean[] arr){
int cnt = 0;
for(int i = 0; i < arr.length; i++){ // 배열 내부가 참의 값이면 아름다움을 증가시켜줌
if(arr[i]){
cnt++;
}
}
return cnt; // 아름다운 정도 반환
}
}
|
Scalr.regPage('Scalr.ui.tools.aws.rds.instances.createSubnetGroup', function (loadParams, moduleParams) {
var vpcPolicy = Scalr.getGovernance('ec2', 'aws.vpc');
var form = Scalr.utils.Window({
xtype: 'form',
title: 'Create subnet group',
fieldDefaults: Ext.isEmpty(vpcPolicy) ? {
anchor: '100%'
} : {
width: 530
},
scalrOptions: {
modalWindow: true
},
width: 600,
defaults: {
labelWidth: 120
},
bodyCls: 'x-container-fieldset x-fieldset-no-bottom-padding',
items: [{
xtype: 'textfield',
name: 'dbSubnetGroupName',
fieldLabel: 'Name',
allowBlank: false
},{
xtype: 'textfield',
name: 'dbSubnetGroupDescription',
fieldLabel: 'Description',
allowBlank: false
},{
xtype: 'vpcsubnetfield',
name: 'subnets',
fieldLabel: 'Subnets',
flex: 1,
allowBlank: false,
iconAlign: 'right',
iconPosition: 'outer',
getSubmitValue: function () {
var me = this;
var value = me.getValue();
if (Ext.isEmpty(value)) {
value = '';
}
return Ext.encode(value);
},
listeners: {
afterrender: function (me) {
var cloudLocation = moduleParams.cloudLocation;
var vpcId = moduleParams.vpcId;
me.getStore().getProxy().params = {
cloudLocation: cloudLocation,
vpcId: vpcId,
extended: 1
};
var addNewPlugin = me.getPlugin('comboaddnew');
//addNewPlugin.enable();
addNewPlugin.postUrl = '?' + Ext.Object.toQueryString({
cloudLocation: cloudLocation,
vpcId: vpcId
});
me.getPlugin('fieldicons').
toggleIcon('governance', !Ext.isEmpty(vpcPolicy));
}
}
}],
dockedItems: [{
xtype: 'container',
dock: 'bottom',
cls: 'x-docked-buttons',
layout: {
type: 'hbox',
pack: 'center'
},
items: [{
xtype: 'button',
text: 'Create',
handler: function() {
if (form.getForm().isValid()) {
Scalr.Request({
processBox: {
type: 'save'
},
form: form.getForm(),
params: {
cloudLocation: moduleParams.cloudLocation
},
scope: this,
url: '/tools/aws/rds/instances/xCreateSubnetGroup',
success: function (response) {
var subnetGroup = response['subnetGroup'];
if (subnetGroup) {
Scalr.event.fireEvent(
'update',
'/tools/aws/rds/instances/createSubnetGroup',
subnetGroup
);
}
form.close();
}
});
}
}
}, {
xtype: 'button',
text: 'Cancel',
handler: function() {
form.close();
}
}]
}]
});
return form;
});
|
#include <systems/BladeShooterSystem.h>
#include <components/BladeComponent.h>
#include <vector>
#include <components/PhysicsComponent.h>
#include <components/BladeShooterComponent.h>
#include <game-objects/GameObjectTag.h>
#include <components/Texcoords.h>
#include <components/SplitDirectionComponent.h>
#include <components/TimeStepComponent.h>
class BladeShooterSystem::BladeShooterSystemImpl{
public:
BladeShooterSystemImpl() {}
~BladeShooterSystemImpl(){}
sf::Clock m_clock;
void update(anax::World& world, std::vector<anax::Entity>& entities, b2World& box2dWorld){
auto allEntities = world.getEntities();
float timeStep = 0;
bool isTimeSlowedDown = false;
for(anax::Entity entity : allEntities){
if(entity.isValid() && entity.hasComponent<TimeStepComponent>()){
auto& timeStepComp = entity.getComponent<TimeStepComponent>();
timeStep = timeStepComp.timeStep;
isTimeSlowedDown = timeStepComp.isTimeSlowedDown;
break;
}
}
for(anax::Entity bladeShooterEntity : entities){
if(!bladeShooterEntity.isValid()){
continue;
}
sf::Time currentTime = m_clock.getElapsedTime();
auto& bladeShooterComp = bladeShooterEntity.getComponent<BladeShooterComponent>();
auto bladeShooterState = bladeShooterComp.bladeShooterState;
if(bladeShooterState == BladeShooterState::NOT_STARTED){
bladeShooterComp.lastTimeBladeShot = m_clock.getElapsedTime();
bladeShooterComp.bladeShooterState = BladeShooterState::SHOOTING;
createBlade(bladeShooterEntity, box2dWorld);
}else if(isTimeSlowedDown){
sf::Time elaspedTime = (bladeShooterComp.lastTimeBladeShot + bladeShooterComp.slowedDownAccumulation);
float diff = (elaspedTime - bladeShooterComp.lastTimeBladeShot).asMilliseconds() * 20;
if(diff >= bladeShooterComp.delayBetweenBladeShots.asMilliseconds()){
createBlade(bladeShooterEntity, box2dWorld);
bladeShooterComp.lastTimeBladeShot = elaspedTime;
bladeShooterComp.slowedDownAccumulation = sf::seconds(0);
}else{
bladeShooterComp.slowedDownAccumulation += sf::seconds(timeStep);
}
}else if((currentTime - bladeShooterComp.lastTimeBladeShot).asMilliseconds() >= bladeShooterComp.delayBetweenBladeShots.asMilliseconds()){
createBlade(bladeShooterEntity, box2dWorld);
bladeShooterComp.lastTimeBladeShot = currentTime;
}
}
}
void createBlade(anax::Entity& entity, b2World& box2dWorld){
auto& bladeShooterComp = entity.getComponent<BladeShooterComponent>();
auto& physicsComp = entity.getComponent<PhysicsComponent>();
if(bladeShooterComp.previousBlades.size() > 0){
if(bladeShooterComp.previousBlades[0].isValid() && bladeShooterComp.previousBlades[0].isActivated()){
return;
}else{
bladeShooterComp.previousBlades.clear();
}
}
b2Body* body = physicsComp.physicsBody;
b2Vec2 startingPosition = body->GetPosition();
auto& world = entity.getWorld();
auto bladeEntity = world.createEntity();
auto& bladeComp = bladeEntity.addComponent<BladeComponent>();
auto& bladePhysicsComp = bladeEntity.addComponent<PhysicsComponent>();
auto& texCoordsComp = bladeEntity.addComponent<Texcoords>();
auto& splitDirectionComp = bladeEntity.addComponent<SplitDirectionComponent>();
splitDirectionComp.splitDirection = Direction::NONE;
bladeComp.bladeLinearVelocity = bladeShooterComp.bladeLinerVelocty;
bladePhysicsComp.physicsBody = createBladeBody(startingPosition, bladeShooterComp.bladeSize, box2dWorld);
bladeEntity.activate();
bladeShooterComp.previousBlades.push_back(bladeEntity);
}
b2Body* createBladeBody(b2Vec2 startingPosition, b2Vec2 shapeSize, b2World& box2dWorld){
b2BodyDef bd;
bd.type = b2_dynamicBody;
bd.position = startingPosition;
b2PolygonShape shape;
shape.SetAsBox(shapeSize.x, shapeSize.y);
b2FixtureDef fd;
fd.shape = &shape;
fd.density = 1.0f;
fd.filter.categoryBits = GameObjectTag::BLADE | GameObjectTag::DEATH_BRINGER_OBJECT;
fd.filter.maskBits = ~GameObjectTag::BLADE_SHOOTER | ~GameObjectTag::NINJA_SENSE;
b2Body* bladeBody = box2dWorld.CreateBody(&bd);
bladeBody->SetGravityScale(0.0f);
bladeBody->CreateFixture(&fd);
return bladeBody;
}
};
BladeShooterSystem::BladeShooterSystem() : Base(anax::ComponentFilter().requires<PhysicsComponent,BladeShooterComponent>()), m_impl(new BladeShooterSystemImpl()) {
}
BladeShooterSystem::~BladeShooterSystem() {
}
void BladeShooterSystem::update(b2World& box2dWorld){
auto entities = getEntities();
m_impl->update( getWorld(), entities, box2dWorld);
}
|
<filename>src/router/modules/dashboard.js
/*
* @Description:
* @Version: 1.0
* @Autor: CHENWJ
* @Date: 2021-05-07 11:35:04
* @LastEditors: CHENWJ
* @LastEditTime: 2021-05-07 14:38:49
*/
import About from '../../views/About.vue'
const meta = {
auth: true
};
export default {
path: '/about',
name: 'about',
meta,
component: About
};
|
#!/bin/sh
mvn clean install
mvn clean install -Pdev
mvn clean install -Pno-cache
mvn clean install -Pno-cache-dev
mvn clean install -Pwithout-bootstrap
mvn clean install -Pwithout-bootstrap-dev
mvn clean install -Pno-fonts
mvn clean install -Pno-fonts-dev
mvn clean install -Pno-cache-no-fonts
mvn clean install -Pwithout-jsf
mvn clean install -Pwithout-jsf-dev |
#!/bin/sh
REPO=nothingdocker
IMAGE_NAME=`basename $PWD`
DOCKER_NAME=${1:-dev}
BASE_PORT=$2
docker rm -f $DOCKER_NAME
docker run -d --restart=always --privileged \
--name $DOCKER_NAME \
-v /sys/fs/cgroup:/sys/fs/cgroup:ro \
-v /data/share:/data/share \
-v /data/share/npm-global:/root/.npm-global \
-v /data/src/tms:/data/src \
-v /data/platform/$DOCKER_NAME/config:/data/config \
-v /data/platform/$DOCKER_NAME/config/nginx:/etc/nginx \
-v /data/platform/$DOCKER_NAME/config/supervisor:/etc/supervisord.d \
-v /data/platform/$DOCKER_NAME/mongo/db:/var/lib/mongo \
-v /data/platform/$DOCKER_NAME/mongo/logs:/var/log/mongodb \
-p $[$BASE_PORT+22]:22 \
-p $[$BASE_PORT+90]:8080 \
-p $[$BASE_PORT+379]:2379 \
-p $[$BASE_PORT+554]:8554 \
-p $[$BASE_PORT+555]:8555 \
-p $[$BASE_PORT+556]:8556 \
-p $[$BASE_PORT+672]:5672 \
-p $[$BASE_PORT+673]:15672 \
-p $[$BASE_PORT+717]:27017 \
$REPO/$IMAGE_NAME
docker exec -it $DOCKER_NAME bash
|
class Valid:
def __init__(self):
self.equivalence_class = "VALID"
def __str__(self):
return self.equivalence_class |
import { Dispatch, SetStateAction } from 'react';
import * as firebase from 'firebase/app';
import 'firebase/auth';
import 'firebase/database';
import 'firebase/storage';
const {
REACT_APP_FIREBASE_API_KEY,
REACT_APP_FIREBASE_APP_ID,
REACT_APP_FIREBASE_AUTH_DOMAIN,
REACT_APP_FIREBASE_DATABASE_URL,
REACT_APP_FIREBASE_MESSAGING_SENDER_ID,
REACT_APP_FIREBASE_PROJECT_ID,
REACT_APP_FIREBASE_STORAGE_BUCKET
} = process.env;
const config = {
apiKey: REACT_APP_FIREBASE_API_KEY,
authDomain: REACT_APP_FIREBASE_AUTH_DOMAIN,
databaseURL: REACT_APP_FIREBASE_DATABASE_URL,
projectId: REACT_APP_FIREBASE_PROJECT_ID,
storageBucket: REACT_APP_FIREBASE_STORAGE_BUCKET,
messagingSenderId: REACT_APP_FIREBASE_MESSAGING_SENDER_ID,
appId: REACT_APP_FIREBASE_APP_ID
};
firebase.initializeApp(config);
export const provider = new firebase.auth.GithubAuthProvider();
export const signIn = () => firebase.auth().signInWithRedirect(provider);
let uid = '';
firebase.auth().getRedirectResult().then(function (result) {
console.info('User successfully signed in');
}).catch(function (error) {
console.error('Login error', error);
});
export const signOut = () => (
firebase.auth().signOut().then(function () {
console.info('User successfully signed out');
uid = '';
}).catch(function (error) {
console.error('Logout error', error);
})
);
export const checkLoggedInStatus = (setIsLoggedIn: Dispatch<SetStateAction<boolean | undefined>>, setUserId: Dispatch<SetStateAction<string | null>>) => {
firebase.auth().onAuthStateChanged(function (user) {
if (user && user.uid) {
uid = user.uid;
setUserId(uid);
locationsRef = databaseRef.child(uid).child("locations");
itemsRef = databaseRef.child(uid).child("items");
setIsLoggedIn(true);
} else {
setUserId(null);
setIsLoggedIn(false);
}
});
};
export const storage = firebase.storage();
export const db = firebase.database();
export const databaseRef = firebase.database().ref();
export let locationsRef = uid !== '' && databaseRef.child(uid).child("locations");
export let itemsRef = uid !== '' && databaseRef.child(uid).child("items");
export const getItemDataOnce = (itemId: string) => {
return db.ref(`/${uid}/items/${itemId}`).once('value').then(function (snapshot) {
const item = snapshot.val();
return item;
});
}
export const logUserStorageFileUrls = () => {
const fileUrls: string[] = [];
storage
.ref("images")
.listAll()
.then((res) => {
res.items?.forEach((itemRef) => {
itemRef.getDownloadURL().then((url) => {
fileUrls.push(url);
});
});
}).then(() => {
console.warn({ fileUrls });
});
}
export default firebase;
|
<reponame>joelgtsantos/BestBuyApp
define({
/*
This is an auto generated file and any modifications to it may result in corruption of the action sequence.
*/
/** onClickBtnBack defined for HeaderJoel **/
AS_UWI_g705f89ccccf4d8ca6bf0b005c9b2501: function AS_UWI_g705f89ccccf4d8ca6bf0b005c9b2501(eventobject) {
var self = this;
this.onBtnBackClick();
},
/** onClickBtnSearch defined for HeaderJoel **/
AS_UWI_a1289c20276d4b4ab5e22e65a85101ff: function AS_UWI_a1289c20276d4b4ab5e22e65a85101ff(eventobject) {
var self = this;
this.onClickBtnSearch();
},
/** onRowClick defined for sgmCategories **/
AS_Segment_e216b94e64b140a0b33d3495915c6005: function AS_Segment_e216b94e64b140a0b33d3495915c6005(eventobject, sectionNumber, rowNumber) {
var self = this;
this.onCategoryClick(rowNumber);
},
/** onTouchStart defined for imgReturn **/
AS_Image_f95c257016484171b47c03c2f035d9a0: function AS_Image_f95c257016484171b47c03c2f035d9a0(eventobject, x, y) {
var self = this;
this.onClickBtnClose();
},
/** onTouchStart defined for btnSearch **/
AS_Button_a9914bca47c64c99990bb086d18ffb1d: function AS_Button_a9914bca47c64c99990bb086d18ffb1d(eventobject, x, y) {
var self = this;
this.onClickBtnSearchP();
},
/** postShow defined for frmMain **/
AS_Form_dda301d161704c0594d8f54dc594af84: function AS_Form_dda301d161704c0594d8f54dc594af84(eventobject) {
var self = this;
this.moveSegmentAnimation();
}
}); |
from LPV import ErrorType
from LPV import LPV_Exception
from LPV import (LPV_Lexer,
Token,
TokenType)
from LPV import LPV_Parser, Node, NodeVisitor
class ERROR(ErrorType):
MATH = "MathError"
class T_type:
LITERAL = TokenType('LITERAL')
PLUS, MINUS, MULT, DIV = (
TokenType('PLUS'),
TokenType('MINUS'),
TokenType('MULT'),
TokenType('DIV')
)
LPAREN = TokenType('LPAREN')
RPAREN = TokenType('RPAREN')
class Lexer(LPV_Lexer):
def __init__(self):
self.DIGIT = lambda c: c.isdigit()
self.WS = lambda c: c.isspace()
self.rules = (
(self.WS, self.lex_whitespace),
(self.DIGIT, self.lex_num),
(["+", "-", "*", "/"], self.lex_op),
(["(", ")"], self.lex_paren)
)
super().__init__()
def lex_whitespace(self):
self.skip_while(self.WS)
return None
def lex_num(self):
self.enter_while(self.DIGIT)
if self.char == ".":
self.enter()
self.enter_while(self.DIGIT)
return Token(T_type.LITERAL, float(self.clear()))
return Token(T_type.LITERAL, int(self.clear()))
def lex_op(self):
self.enter()
if self.chars == "+":
return Token(T_type.PLUS, self.clear())
if self.chars == "-":
return Token(T_type.MINUS, self.clear())
if self.chars == "*":
return Token(T_type.MULT, self.clear())
if self.chars == "/":
return Token(T_type.DIV, self.clear())
def lex_paren(self):
if self.char == "(":
self.enter()
return Token(T_type.LPAREN, self.clear())
self.enter()
return Token(T_type.RPAREN, self.clear())
class Parser(LPV_Parser):
def __init__(self):
self.start = self.parse_expr, ()
super().__init__()
def parse_factor(self):
lc = self.get_lc()
if self.token == T_type.LITERAL:
return LiteralValue(
self.eat().value,
*lc
)
elif self.check_type((T_type.PLUS, T_type.MINUS)):
return UnaryOp(
self.eat(),
self.parse_expr(),
*lc
)
elif self.token == T_type.LPAREN:
self.eat()
node = self.parse_expr()
self.eat(T_type.RPAREN)
return node
else:
msg = "Unexpected "
if self.is_eof():
msg += "EOF"
else:
msg += f"'{self.token.value}'"
self.throw_error(
ERROR.SYNTAX,
msg+", expected valid expression"
)
def parse_term(self):
lc = self.get_lc()
node = self.parse_factor()
while self.check_type((T_type.MULT,T_type.DIV)):
node = BinOp(
node,
self.eat(),
self.parse_factor(),
*lc
)
return node
def parse_expr(self):
lc = self.get_lc()
node = self.parse_term()
while self.check_type((T_type.PLUS,T_type.MINUS)):
node = BinOp(
node,
self.eat(),
self.parse_term(),
*lc
)
return node
class BinOp(Node):
def __init__(self, left:Node, op:Token, right:Node, line: int, col: int):
self.left, self.op, self.right = left, op, right
super().__init__(line, col=col)
class LiteralValue(Node):
def __init__(self, value, line: int, col: int):
self.value = value
super().__init__(line, col=col)
class UnaryOp(Node):
def __init__(self, left:Token, right:Node, line: int, col: int):
self.left, self.right = left, right
super().__init__(line, col=col)
class Interpreter(NodeVisitor):
def visit_BinOp(self, node: BinOp):
left, right = self.visits(node.left, node.right)
if node.op == T_type.PLUS:
return left+right
if node.op == T_type.MINUS:
return left-right
if node.op == T_type.MULT:
return left*right
if node.op == T_type.DIV:
return left/right
def visit_LiteralValue(self, node: LiteralValue):
return node.value
def visit_UnaryOp(self, node: UnaryOp):
right = self.visit(node.right)
if node.left == T_type.PLUS:
return +right
if node.left == T_type.MINUS:
return -right
calc_lexer, calc_parser, calc = Lexer(), Parser(), Interpreter()
def do_nothing(_):
pass
def calculate(math: str, ignore_error_crash:bool=True):
handler = print if ignore_error_crash is False else do_nothing
if math and not math.isspace():
return calc.run(
calc_parser.parse(
calc_lexer.lex(
math, crash_handler=handler
), handler
), math, handler
)
if __name__ == "__main__":
while True:
try:
code = input(">>> ")
if code and not code.isspace():
t_tree = calc_lexer.lex(code)
n_tree = calc_parser.parse(t_tree)
print(calc.run(n_tree, code))
except LPV_Exception as e:
print(e)
except (KeyboardInterrupt, EOFError):
break
|
import React, { useState, useEffect } from 'react'
import { firebase } from '../../instances'
import AuthContext from '../AuthContext'
const AuthProvider = (props) => {
const [currentUser, setCurrentUser] = useState(null)
const [isLoggedIn, setIsLoggedIn] = useState(false)
const [isLoading, setIsLoading] = useState(true)
const [uid, setUid] = useState('')
useEffect(() => {
const unsubscribe = firebase.auth().onAuthStateChanged(async (user) => {
setCurrentUser(user)
if (user) {
setIsLoggedIn(true)
setUid(user.uid)
} else {
setIsLoggedIn(false)
}
setIsLoading(false)
})
return unsubscribe
}, [])
const signIn = async (email, password) => {
try {
const userCred = await firebase.auth().signInWithEmailAndPassword(email, password)
localStorage.setItem('uid', userCred.user.uid)
return Promise.resolve({
email: userCred.user.email,
uid: userCred.user.uid,
name: userCred.user.displayName,
})
} catch (err) {
return Promise.reject(err)
}
}
const signUp = async (email, password) => {
try {
const userCred = await firebase.auth().createUserWithEmailAndPassword(email, password)
localStorage.setItem('uid', userCred.user.uid)
return Promise.resolve({
email: userCred.user.email,
uid: userCred.user.uid,
name: userCred.user.displayName,
})
} catch (err) {
return Promise.reject(err)
}
}
const signOut = async () => {
try {
await firebase.auth().signOut()
return Promise.resolve(true)
} catch (err) {
return Promise.reject(err)
}
}
const obj = {
currentUser,
isLoggedIn,
isLoading,
uid,
signIn,
signOut,
signUp,
}
const { children } = props
return <AuthContext.Provider value={obj}>{children}</AuthContext.Provider>
}
export default AuthProvider
|
#!/usr/bin/env bash
set -e
set -x
export VERSION_SCRIPT="import sys; print('%s.%s' % sys.version_info[0:2])"
export PYTHON_VERSION=`python -c "$VERSION_SCRIPT"`
export PYTHONPATH=./docs/src
pytest --cov=fastapi --cov=tests --cov=docs/src --cov-report=term-missing ${@}
mypy fastapi --disallow-untyped-defs
if [ "${PYTHON_VERSION}" = '3.7' ]; then
echo "Skipping 'black' on 3.7. See issue https://github.com/ambv/black/issues/494"
else
black fastapi tests --check
fi
isort --multi-line=3 --trailing-comma --force-grid-wrap=0 --combine-as --line-width 88 --recursive --check-only fastapi tests
|
set -e
which wg > /dev/null
if [ $? != 0 ]; then
echo "wg not found"
exit 1
fi
which uci > /dev/null
if [ $? != 0 ]; then
echo "uci not found"
exit 1
fi
wg -v | head -1
if [ `uci changes | wc -l` != 0 ]; then
echo "Have uncommited changes"
exit 1
fi
exit 0
|
#!/bin/bash
# original at https://github.com/confluentinc/cp-docker-images/blob/5.3.1-post/examples/kafka-cluster-ssl/secrets/create-certs.sh
set -o nounset \
-o errexit \
-o verbose \
-o xtrace
# Generate CA key
openssl req -new -x509 -keyout snakeoil-ca-1.key -out snakeoil-ca-1.crt -days 365 -subj '/CN=localhost/OU=TEST/O=KT' -passin pass:ktktkt -passout pass:ktktkt
for i in broker1
do
echo $i
keytool -genkey -noprompt \
-alias $i \
-dname "CN=localhost, OU=TEST, O=KT" \
-keystore kafka.$i.keystore.jks \
-keyalg RSA \
-storepass ktktkt \
-keypass ktktkt
# Create CSR, sign the key and import back into keystore
keytool -keystore kafka.$i.keystore.jks -alias $i -certreq -file $i.csr -storepass ktktkt -keypass ktktkt
openssl x509 -req -CA snakeoil-ca-1.crt -CAkey snakeoil-ca-1.key -in $i.csr -out $i-ca1-signed.crt -days 9999 -CAcreateserial -passin pass:ktktkt
keytool -keystore kafka.$i.keystore.jks -alias CARoot -import -file snakeoil-ca-1.crt -storepass ktktkt -keypass ktktkt
keytool -keystore kafka.$i.keystore.jks -alias $i -import -file $i-ca1-signed.crt -storepass ktktkt -keypass ktktkt
# Create truststore and import the CA cert.
keytool -keystore kafka.$i.truststore.jks -alias CARoot -import -file snakeoil-ca-1.crt -storepass ktktkt -keypass ktktkt
echo "ktktkt" > ${i}_sslkey_creds
echo "ktktkt" > ${i}_keystore_creds
echo "ktktkt" > ${i}_truststore_creds
done
# generate public/private key pair for kt
openssl genrsa -out kt-test.key 2048
openssl req -new -key kt-test.key -out kt-test.csr -subj '/CN=localhost/OU=TEST/O=KT'
openssl x509 -req -days 9999 -in kt-test.csr -CA snakeoil-ca-1.crt -CAkey snakeoil-ca-1.key -CAcreateserial -out kt-test.crt
|
#/bin/sh
# c 2018,2019,2021
set -e
FILE=$1
TARGET=$3
PREFIX=$2/$4
FIX=$5
OPTS=$6
echo export PATH="$PREFIX/bin:$PATH"
export PATH="$PREFIX/bin:$PATH"
echo tar xf $FILE
tar xf $FILE
echo mkdir build-gcc
mkdir build-gcc
echo cd build-gcc
cd build-gcc
echo ../gcc-*/configure --target=$TARGET --disable-nls --enable-languages=c,c++ --without-headers --prefix=$PREFIX
../gcc-*/configure --target=$TARGET --disable-nls --enable-languages=c,c++ --without-headers --prefix=$PREFIX
echo make all-gcc
make all-gcc
if [ "x$FIX" = xfixpic ]
then
if [ "x$OPTS" = x ]
then
echo aux-gcc.sh: warning: fixpic with no opts
echo "make all-target-libgcc || true"
make all-target-libgcc || true
else
echo "make all-target-libgcc CFLAGS_FOR_TARGET=\"$OPTS\" || true"
make all-target-libgcc CFLAGS_FOR_TARGET="$OPTS" || true
fi
# cc1: error: code model kernel does not support PIC mode
echo sed -i 's/PICFLAG/DISABLED_PICFLAG/g' $TARGET/libgcc/Makefile
sed -i 's/PICFLAG/DISABLED_PICFLAG/g' $TARGET/libgcc/Makefile
elif [ "x$FIX" = xnofix ]
then
true
else
echo error: unknown fix string: $FIX
exit 11
fi
if [ "x$OPTS" = x ]
then
echo make all-target-libgcc
make all-target-libgcc
else
echo make all-target-libgcc CFLAGS_FOR_TARGET="$OPTS"
make all-target-libgcc CFLAGS_FOR_TARGET="$OPTS"
fi
echo make install-gcc
make install-gcc
echo make install-target-libgcc
make install-target-libgcc
echo ok
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.