text
stringlengths
1
1.05M
/** * iniciar.js: * ---------- * Funcion principal que pone en funcionamiento el servidor * y sus dependencias (Base de datos, enrutadores y manejadores) * * Seccion # 1 * Redes de Computadoras I (CI-4835) * Universidad Simon Bolivar, 2016. */ //Dependencias var servidor = require("./servidor"); var rutas = require("./router"); var manejadorSolicitudes = require("./manejador"); var baseDatos = require("./basedatos"); var manejador = {}; var moment = require('moment'); manejador["/"] = manejadorSolicitudes.login; manejador["/login"] = manejadorSolicitudes.login; manejador["/subirLogin"] = manejadorSolicitudes.subirLogin; manejador["/registro"] = manejadorSolicitudes.registro; manejador["/subirRegistro"] = manejadorSolicitudes.subirRegistro; manejador["/transferir"] = manejadorSolicitudes.transferir; manejador["/subirTransferir"] = manejadorSolicitudes.subirTransferir; manejador["/listaUsuarios"] = manejadorSolicitudes.listaUsuarios; manejador["/balance"] = manejadorSolicitudes.balance; manejador["/subirBalance"] = manejadorSolicitudes.subirBalance; manejador["/favicon.ico"] = manejadorSolicitudes.favicon; manejador["/logo.png"] = manejadorSolicitudes.logo; baseDatos.iniciar(); moment.locale('es'); var fecha = moment().format('MMMM D YYYY, h:mm:ss a'); console.log("Servidor iniciado: " + fecha); servidor.iniciar(rutas.rutas,manejador);
#!/bin/bash # Execute sudo bash ./deletetopo.sh # ----------------------- DELETE TOPOLOGY STEP -------------------------------- sudo ./sdntool bridge --delete --switchname ovs1 --bridgename br1 sudo ./sdntool bridge --delete --switchname ovs1 --bridgename br2
<reponame>NODistance/philosophy-xu // The Vue build version to load with the `import` command // (runtime-only or standalone) has been set in webpack.base.conf with an alias. import Vue from 'vue' import App from './App' import router from './router' import MintUI from 'mint-ui' import 'mint-ui/lib/style.css' import axios from 'axios' import wx from 'weixin-js-sdk' import API from './components/untils/API.js' Vue.use(MintUI); Vue.config.productionTip = false axios.defaults.withCredentials = true /* eslint-disable no-new */ new Vue({ el: '#app', router, template: '<App/>', components: { App }, data () { return { } }, methods: { url: function () { let url = location.href.substring(0, location.href.length - location.hash.length) return url.endsWith('?') ? url.substring(0, url.length - 1) : url }, configOfWX: function() { axios.get('/api/v1/jsconfig.json?shortUrl=philosophy&url=' + this.url() + this.$router.currentRoute.path, { }) .then(function (response) { let json = response.data; console.log(json) wx.config({ debug: true, appId: json.appId, timestamp: json.timestamp, nonceStr: json.nonceStr, signature: json.signature, jsApiList: ['chooseImage'] }); // wx.config 成功调用后,在wx.ready进行调用API接口 wx.ready(function() { }); wx.success(function (res) { console.log('haha' + JSON.stringify(res)) }); }) .catch(function (error){ console.log(JSON.stringify(error)); }) }, }, beforeCreate () { }, created () { this.configOfWX(); }, })
package by.kam32ar.server.model; import java.io.File; import java.net.URL; import java.net.URLClassLoader; import java.sql.DriverManager; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; import java.util.List; import java.util.Properties; import by.kam32ar.server.helper.AdvancedConnection; import by.kam32ar.server.helper.DriverDelegate; import by.kam32ar.server.helper.NamedParameterStatement; import by.kam32ar.server.logic.Message; import com.mysql.jdbc.Driver; import com.mysql.jdbc.Statement; /** * Database abstraction class */ public class DatabaseDataManager implements DataManager { public DatabaseDataManager(Properties properties) throws Exception { initDatabase(properties); } private NamedParameterStatement queryInsertMessage; private NamedParameterStatement querySelectMessage; private void initDatabase(Properties properties) throws Exception { // Load driver String driver = properties.getProperty("database.driver"); if (driver != null) { String driverFile = properties.getProperty("database.driverFile"); if (driverFile != null) { URL url = new URL("jar:file:" + new File(driverFile).getAbsolutePath() + "!/"); URLClassLoader cl = new URLClassLoader(new URL[] { url }); Driver d = (Driver) Class.forName(driver, true, cl) .newInstance(); DriverManager.registerDriver(new DriverDelegate(d)); } else { Class.forName(driver); } } // Connect database String url = properties.getProperty("database.url"); String user = properties.getProperty("database.user"); String password = properties.getProperty("database.password"); AdvancedConnection connection = new AdvancedConnection(url, user, password); // Load statements from configuration String query; query = properties.getProperty("database.insertMessage"); if (query != null) { queryInsertMessage = new NamedParameterStatement(connection, query); } query = properties.getProperty("database.selectMessages"); if (query != null) { querySelectMessage = new NamedParameterStatement(connection, query); } } @Override public synchronized int insertMessage(Message message) throws Exception { if (queryInsertMessage != null) { queryInsertMessage.prepare(Statement.RETURN_GENERATED_KEYS); queryInsertMessage = assignBattleVariables(queryInsertMessage, message); queryInsertMessage.executeUpdate(); ResultSet result = queryInsertMessage.getGeneratedKeys(); if (result != null && result.next()) { return result.getInt(1); } } return 0; } @Override public List<Message> selectMessages(String room) throws Exception { List<Message> messages = new ArrayList<Message>(); querySelectMessage.prepare(); querySelectMessage.setString("room", room); ResultSet resultSet = querySelectMessage.executeQuery(); while (resultSet.next()) { Message message = new Message(); message.setId(resultSet.getInt("id")); message.setRoom(resultSet.getString("room")); message.setTime(resultSet.getInt("time")); message.setMessage(resultSet.getString("msg")); message.setNick(resultSet.getString("name")); messages.add(message); } return messages; } private NamedParameterStatement assignBattleVariables( NamedParameterStatement statement, Message message) throws SQLException { statement.setInt("time", message.getTime()); statement.setString("name", message.getNick()); statement.setString("msg", message.getMessage()); statement.setString("room", message.getRoom()); return statement; } }
use std::fs; use std::io; use std::io::BufRead; use std::path::Path; #[derive(Debug, PartialEq)] pub enum Mode { Normal, Debug, } pub struct Config { pub mode: Mode, } fn parse_mode(mode_str: &str) -> Result<Mode, String> { match mode_str.trim() { "Normal" => Ok(Mode::Normal), "Debug" => Ok(Mode::Debug), _ => Err(format!("Invalid mode value: {}", mode_str)), } } fn parse_config(file_path: &str) -> Result<Config, String> { let path = Path::new(file_path); if !path.exists() { return Err("File does not exist".to_string()); } let file = match fs::File::open(&path) { Ok(file) => file, Err(_) => return Err("Failed to open file".to_string()), }; let reader = io::BufReader::new(file); let mut mode_value = String::new(); for line in reader.lines() { let line = match line { Ok(line) => line, Err(_) => return Err("Failed to read line".to_string()), }; if line.starts_with("mode") { let parts: Vec<&str> = line.split('=').map(|s| s.trim()).collect(); if parts.len() != 2 || parts[0] != "mode" { return Err("Invalid file format".to_string()); } mode_value = parts[1].to_string(); } } if mode_value.is_empty() { return Err("Mode value not found in the file".to_string()); } let mode = parse_mode(&mode_value)?; Ok(Config { mode }) } fn main() { match parse_config("config.txt") { Ok(config) => println!("Parsed config: {:?}", config), Err(err) => println!("Error: {}", err), } }
package com.codernauti.sweetie.geogift; import android.content.Context; import android.os.Bundle; import android.support.v4.app.Fragment; import android.support.v7.app.AppCompatActivity; import android.support.v7.widget.Toolbar; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.FrameLayout; import android.widget.ImageView; import android.widget.TextView; import com.bumptech.glide.Glide; import com.bumptech.glide.load.engine.DiskCacheStrategy; import com.codernauti.sweetie.R; import java.util.Random; public class GeogiftDoneFragment extends Fragment implements GeogiftDoneContract.View{ private static final String TAG = "GeogiftDoneFragment"; private static final int MESSAGE_SELECTION = 0; private static final int PHOTO_SELECTION = 1; private static final int HEART_SELECTION = 2; private Toolbar mToolBar; // polaroid container private FrameLayout mPolaroidFrame; private ImageView mImageThumb; private TextView mMessagePolaroidText; private ImageView mPinPolaroid; // postit private FrameLayout mPostitFrame; private TextView mMessagePostitText; private ImageView mPinPostit; // heart private ImageView mHeartPic; private Context mContext; private String titleGeogift; Random random; int degree; private GeogiftVM geoItem = null; private GeogiftDoneContract.Presenter mPresenter; public static GeogiftDoneFragment newInstance(Bundle bundle) { GeogiftDoneFragment newGeogiftDoneFragment = new GeogiftDoneFragment(); newGeogiftDoneFragment.setArguments(bundle); return newGeogiftDoneFragment; } @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); mContext = getContext(); } @Override public void setPresenter(GeogiftDoneContract.Presenter presenter) { mPresenter = presenter; } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { final ViewGroup root = (ViewGroup) inflater.inflate(R.layout.geogift_done_fragment, container, false); // initialize toolbar mToolBar = (Toolbar) root.findViewById(R.id.geogift_done_toolbar); AppCompatActivity parentActivity = (AppCompatActivity) getActivity(); parentActivity.setSupportActionBar(mToolBar); parentActivity.getSupportActionBar().setDisplayHomeAsUpEnabled(true); parentActivity.getSupportActionBar().setTitle(titleGeogift); // polaroid mPolaroidFrame = (FrameLayout) root.findViewById(R.id.geogift_done_polaroid_container); mPolaroidFrame.setVisibility(View.GONE); mImageThumb = (ImageView) root.findViewById(R.id.geogift_done_image_thumb); mImageThumb.setVisibility(View.GONE); mMessagePolaroidText = (TextView) root.findViewById(R.id.geogift_done_polaroid_text); mMessagePolaroidText.setVisibility(View.GONE); mPinPolaroid = (ImageView) root.findViewById(R.id.geogift_done_pin_polaroid); mPinPolaroid.setVisibility(View.GONE); // postit mPostitFrame = (FrameLayout) root.findViewById(R.id.geogift_done_postit_container); mPostitFrame.setVisibility(View.GONE); mMessagePostitText = (TextView) root.findViewById(R.id.geogift_done_postit_text); mMessagePostitText.setVisibility(View.GONE); mPinPostit = (ImageView) root.findViewById(R.id.geogift_done_pin_postit); mPinPostit.setVisibility(View.GONE); // heart mHeartPic = (ImageView) root.findViewById(R.id.geogift_done_heart_picture); mHeartPic.setVisibility(View.GONE); return root; } public void drawGeogift(GeogiftVM geoitem, int type){ switch ( type ){ case MESSAGE_SELECTION: mPostitFrame.setVisibility(View.VISIBLE); random = new Random(); degree = random.nextInt(6) -3; mPostitFrame.setRotation(degree); mMessagePostitText.setVisibility(View.VISIBLE); mPinPostit.setVisibility(View.VISIBLE); mMessagePostitText.setText(geoitem.getMessage()); //mMessagePostitText.setText(String.valueOf(degree)); break; case PHOTO_SELECTION: mPolaroidFrame.setVisibility(View.VISIBLE); random = new Random(); degree = random.nextInt(6) -3; mPolaroidFrame.setRotation(degree); mImageThumb.setVisibility(View.VISIBLE); mMessagePolaroidText.setVisibility(View.VISIBLE); mPinPolaroid.setVisibility(View.VISIBLE); Glide.with(this).load(geoitem.getUriStorage()) .diskCacheStrategy(DiskCacheStrategy.ALL) .into(mImageThumb); mMessagePolaroidText.setText(geoitem.getMessage()); break; case HEART_SELECTION: mHeartPic.setVisibility(View.VISIBLE); break; } } @Override public void updateGeogift(GeogiftVM geoitem) { Log.d(TAG, "updateGeogift"); mToolBar.setTitle(geoitem.getTitle()); drawGeogift(geoitem, geoitem.getType()); if(geoitem.getIsTriggered()){ } else{ } } @Override public void onResume() { super.onResume(); } @Override public void onPause() { super.onPause(); } }
import java.text.SimpleDateFormat; import java.util.Date; public class Logger { private static final SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"); private LogLevel logLevel; public Logger(LogLevel logLevel) { this.logLevel = logLevel; } public void setLogLevel(LogLevel logLevel) { this.logLevel = logLevel; } public void debug(String message) { log(LogLevel.DEBUG, message); } public void info(String message) { log(LogLevel.INFO, message); } public void warn(String message) { log(LogLevel.WARN, message); } public void error(String message) { log(LogLevel.ERROR, message); } private void log(LogLevel level, String message) { if (level.ordinal() >= logLevel.ordinal()) { String formattedMessage = String.format("[%s] %s %s", level, dateFormat.format(new Date()), message); System.out.println(formattedMessage); } } public enum LogLevel { DEBUG, INFO, WARN, ERROR } }
// Copyright 2020 Self Group Ltd. All Rights Reserved. import { v4 as uuidv4 } from 'uuid' import { QRCode, ErrorCorrectLevel, QRNumber, QRAlphaNum, QR8BitByte, QRKanji } from 'qrcode-generator-ts/js' import IdentityService from './identity-service' import Jwt from './jwt' import Messaging from './messaging' import Fact from './fact' import * as message from './msgproto/message_generated' import * as mtype from './msgproto/types_generated' import FactResponse from './fact-response' import MessagingService from './messaging-service' import Crypto from './crypto' import { logging, Logger } from './logging' import * as flatbuffers from 'flatbuffers' type MessageProcessor = (n: number) => any const logger = logging.getLogger('core.self-sdk') /** * A service to manage fact requests */ export default class FactsService { DEFAULT_INTERMEDIARY = 'self_intermediary' jwt: Jwt ms: Messaging is: IdentityService env: string messagingService: MessagingService crypto: Crypto /** * The constructor for FactsService * @param jwt the Jwt * @param ms the Messaging object * @param is the IdentityService * @param env the environment on what you want to run your app. */ constructor(jwt: Jwt, ms: MessagingService, is: IdentityService, ec: Crypto, env: string) { this.jwt = jwt this.ms = ms.ms this.messagingService = ms this.is = is this.env = env this.crypto = ec } /** * Send a fact request to a specific user * @param selfid user identifier to send the fact request. * @param facts an array with the facts you're requesting. * @param opts optional parameters like conversation id or the expiration time */ async request( selfid: string, facts: Fact[], opts?: { cid?: string; exp?: number; async?: boolean } ): Promise<FactResponse> { let options = opts ? opts : {} let as = options.async ? options.async : false // Check if the current app still has credits if (this.jwt.checkPaidActions) { let app = await this.is.app(this.jwt.appID) if (app.paid_actions == false) { throw new Error( 'Your credits have expired, please log in to the developer portal and top up your account.' ) } } if (as == false) { let permited = await this.messagingService.isPermited(selfid) if (!permited) { throw new Error("You're not permitting connections from " + selfid) } } let id = uuidv4() // Get user's device let devices = await this.is.devices(selfid) let j = this.buildRequest(selfid, facts, opts) let ciphertext = this.jwt.toSignedJson(j) var msgs = [] for (var i = 0; i < devices.length; i++) { var msg = await this.buildEnvelope(id, selfid, devices[i], ciphertext) msgs.push(msg) } if (as) { logger.debug('sending ' + id) this.ms.send(j.cid, { data: msgs, waitForResponse: false }) let res = new FactResponse() res.status = '200' return res } logger.debug(`requesting ${id}`) let res = await this.ms.request(j.cid, id, msgs) if ('errorMessage' in res) { throw new Error(res.errorMessage) } return res } async buildEnvelope( id: string, selfid: string, device: string, plaintext: string ): Promise<Uint8Array> { let ciphertext = await this.crypto.encrypt(plaintext, [{ id: selfid, device: device, }]) let builder = new flatbuffers.Builder(1024) let rid = builder.createString(id) let snd = builder.createString(`${this.jwt.appID}:${this.jwt.deviceID}`) let rcp = builder.createString(`${selfid}:${device}`) let ctx = message.SelfMessaging.Message.createCiphertextVector( builder, Buffer.from(ciphertext) ) message.SelfMessaging.Message.startMessage(builder) message.SelfMessaging.Message.addId(builder, rid) message.SelfMessaging.Message.addMsgtype(builder, mtype.SelfMessaging.MsgType.MSG) message.SelfMessaging.Message.addSender(builder, snd) message.SelfMessaging.Message.addRecipient(builder, rcp) message.SelfMessaging.Message.addCiphertext(builder, ctx) message.SelfMessaging.Message.addMetadata(builder, message.SelfMessaging.Metadata.createMetadata( builder, flatbuffers.createLong(0, 0), flatbuffers.createLong(0, 0) ) ) let msg = message.SelfMessaging.Message.endMessage(builder) builder.finish(msg) return builder.asUint8Array() } /** * Sends a request via an intermediary * @param selfid user identifier to send the fact request. * @param facts an array with the facts you're requesting. * @param opts optional parameters like conversation id or the expiration time * or the selfid of the intermediary you want to use (defaulting to self_intermediary) */ async requestViaIntermediary( selfid: string, facts: Fact[], opts?: { cid?: string; exp?: number; intermediary?: string } ): Promise<FactResponse> { let options = opts ? opts : {} // Check if the current app still has credits if (this.jwt.checkPaidActions) { let app = await this.is.app(this.jwt.appID) if (app.paid_actions == false) { throw new Error( 'Your credits have expired, please log in to the developer portal and top up your account.' ) } } let permited = await this.messagingService.isPermited(selfid) if (!permited) { throw new Error("You're not permitting connections from " + selfid) } let id = uuidv4() // Get intermediary's device let intermediary = options.intermediary ? options.intermediary : 'self_intermediary' let devices = await this.is.devices(intermediary) let j = this.buildRequest(selfid, facts, opts) let ciphertext = this.jwt.toSignedJson(j) // Envelope var msgs = [] for (var i = 0; i < devices.length; i++) { var msg = await this.buildEnvelope(id, intermediary, devices[i], ciphertext) msgs.push(msg) } logger.debug(`requesting ${j.cid}`) let res = await this.ms.request(j.cid, id, msgs) if ('errorMessage' in res) { throw new Error(res.errorMessage) } return res } /** * Subscribes to fact responses `identities.facts.query.resp` and calls * the given callback. * @param callback procedure to be called when a new facts response is received. */ subscribe(callback: (n: any) => any) { this.ms.subscribe('identities.facts.query.resp', callback) } /** * Generates a QR code your users can scan from their app to share facts with your app. * @param facts an array with the facts you're requesting. * @param opts allows you specify optional parameters like the conversation id <cid>, the selfid or the expiration time. */ generateQR(facts: Fact[], opts?: { selfid?: string; cid?: string; exp?: number }): Buffer { let options = opts ? opts : {} let selfid = options.selfid ? options.selfid : '-' let body = this.jwt.toSignedJson(this.buildRequest(selfid, facts, options)) let qr = new QRCode() qr.setTypeNumber(20) qr.setErrorCorrectLevel(ErrorCorrectLevel.L) qr.addData(body) qr.make() let data = qr.toDataURL(5).split(',') let buf = Buffer.from(data[1], 'base64') return buf } /** * Generates a deep link url so you can request facts with a simple link. * @param callback the url you want your users to be sent back after authentication. * @param facts an array with the facts you're requesting. * @param opts optional parameters like selfid or conversation id */ generateDeepLink( callback: string, facts: Fact[], opts?: { selfid?: string; cid?: string } ): string { let options = opts ? opts : {} let selfid = options.selfid ? options.selfid : '-' let body = this.jwt.toSignedJson(this.buildRequest(selfid, facts, options)) let encodedBody = this.jwt.encode(body) if (this.env === '') { return `https://joinself.page.link/?link=${callback}%3Fqr=${encodedBody}&apn=com.joinself.app` } else if (this.env === 'development') { return `https://joinself.page.link/?link=${callback}%3Fqr=${encodedBody}&apn=com.joinself.app.dev` } return `https://joinself.page.link/?link=${callback}%3Fqr=${encodedBody}&apn=com.joinself.app.${this.env}` } /** * builds an authentication request * @param selfid identifier for the user you want to authenticate * @param facts an array with the facts you're requesting. * @param opts optional parameters like conversation id or the expiration time */ private buildRequest(selfid: string, facts: Fact[], opts?: { cid?: string; exp?: number }): any { let options = opts ? opts : {} let cid = options.cid ? options.cid : uuidv4() let expTimeout = options.exp ? options.exp : 300000 for (var i = 0; i < facts.length; i++) { if (!Fact.isValid(facts[i])) { throw new TypeError('invalid facts') } } // Calculate expirations let iat = new Date(Math.floor(this.jwt.now())) let exp = new Date(Math.floor(this.jwt.now() + expTimeout * 60)) // Ciphertext return { typ: 'identities.facts.query.req', iss: this.jwt.appID, sub: selfid, aud: selfid, iat: iat.toISOString(), exp: exp.toISOString(), cid: cid, jti: uuidv4(), facts: facts } } }
#!/bin/bash # -------------------------------------------------------------------------- # # Copyright 2002-2019, OpenNebula Project, OpenNebula Systems # # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # # not use this file except in compliance with the License. You may obtain # # a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # #--------------------------------------------------------------------------- # ############################################################################## # WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! # # This script needs to be modified to enable fencing of the host. By default it # will fail, as the first line is 'exit 1'. You will need to remove it. # # In order to perform the fencing, you will probably need to install a fencing # utility. They are typically found in: fence-agents-all (CentOS) and fence- # agents (Ubuntu). They come with many utilities: fence_ilo, fence_ipmilan, # fence_apc, etc... # # To call the fencing utility, you will need to pass some parameters, which are # typically the iLO IP of the host, etc. We recommend you enter this information # in the host's template, and pick it up using the xpath example below. AS AN # EXAMPLE (only an example) the script below expects that you have defined a # parameter called FENCE_IP in the Host's template, and it will rely on that to # call the fencing mechanism. You should customize this to your needs. It is # perfectly OK to discard the code below and use a different mechanism, like # storing the information required to perform the fencing in a separate CMDB, # etc. However, you will probably need to get the host's NAME, which should be # done as shown below. # # WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! ############################################################################# # @param $1 the host information in base64 # @return 0 on success. Make sure this script does not return 0 if it fails. # To enable remove this line echo ""Fence host not configured, please edit ft/fence_host.sh"" && exit 1 #------------------------------------------------------------------------------- # Get host parameters with XPATH #------------------------------------------------------------------------------- if [ -z "$ONE_LOCATION" ]; then XPATH=/var/lib/one/remotes/datastore/xpath.rb else XPATH=$ONE_LOCATION/var/remotes/datastore/xpath.rb fi if [ ! -x "$XPATH" ]; then echo "XPATH not found: $XPATH" exit 1 fi XPATH="${XPATH} -b $1" unset i j XPATH_ELEMENTS while IFS= read -r -d '' element; do XPATH_ELEMENTS[i++]="$element" done < <($XPATH /HOST/ID \ /HOST/NAME \ /HOST/TEMPLATE/FENCE_IP ) HOST_ID="${XPATH_ELEMENTS[j++]}" NAME="${XPATH_ELEMENTS[j++]}" FENCE_IP="${XPATH_ELEMENTS[j++]}" if [ -z "$FENCE_IP" ]; then echo "Fence ip not found" exit 1 fi #------------------------------------------------------------------------------- # Fence #------------------------------------------------------------------------------- # Example: # fence_ilo -a $FENCE_IP -l <username> -p <password>
<filename>src/shared/modules/Mailer/vos/MailCategoryVO.ts import INamedVO from '../../../interfaces/INamedVO'; export default class MailCategoryVO implements INamedVO { public static API_TYPE_ID: string = "mail_category"; public id: number; public _type: string = MailCategoryVO.API_TYPE_ID; public name: string; }
package com.prismacloud.service.impl; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.gson.JsonElement; import com.google.gson.JsonObject; import com.google.gson.JsonParser; import com.google.gson.stream.JsonReader; import com.prismacloud.config.PrismaCloudConfiguration; import com.prismacloud.model.JsonApiModelFailureCriteria; import com.prismacloud.util.JSONUtils; import com.prismacloud.service.PrismaCloudService; import com.prismacloud.model.IacTemplateParameters; import com.prismacloud.model.JsonApiModelAsyncScanRequest; import com.prismacloud.model.JsonApiModelAsyncScanRequestData; import com.prismacloud.model.JsonApiModelAsyncScanRequestDataAttributes; import com.prismacloud.model.JsonApiModelScanTrigger; import com.prismacloud.model.JsonApiModelScanTriggerData; import com.prismacloud.model.JsonApiModelScanTriggerDataAttributes; import java.io.File; import java.io.IOException; import java.io.UnsupportedEncodingException; import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Map; import java.util.UUID; import org.apache.commons.lang3.StringUtils; import org.apache.http.ParseException; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.apache.http.entity.ContentType; import org.apache.http.entity.FileEntity; import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; import org.apache.http.util.EntityUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * @author Sacumen (www.sacumen.com) * PrismaCloudServiceImpl is the implementation of the all services defined by PrismaCloudService */ public class PrismaCloudServiceImpl implements PrismaCloudService { Logger logger = LoggerFactory.getLogger(PrismaCloudServiceImpl.class); /** * This method returns the valid token using access_key and secret key. * @param prismaCloudConfiguration * @return * @throws IOException */ @Override public String getAccessToken(PrismaCloudConfiguration prismaCloudConfiguration) throws IOException { logger.info("Entered into PrismaCloudServiceImpl.getAccessToken"); return generateToken(prismaCloudConfiguration); } /** * This method scan the zip file and returns JSON as string. * @param prismaCloudConfiguration * @param filePath * @return * @throws IOException * @throws InterruptedException */ @Override public String getScanDetails(PrismaCloudConfiguration prismaCloudConfiguration, String filePath) throws IOException, InterruptedException { logger.info("Entered into PrismaCloudServiceImpl.getScanDetails"); return getScanResult(prismaCloudConfiguration, filePath); } /** * This method is used for API token generation. * @param prismaCloudConfiguration * @return * @throws ParseException * @throws IOException */ private String generateToken(PrismaCloudConfiguration prismaCloudConfiguration) throws ParseException, IOException { logger.debug("Entered into PrismaCloudServiceImpl.generateToken"); try (CloseableHttpClient client = HttpClients.createDefault(); CloseableHttpResponse authResponse = getJwtToken(client, prismaCloudConfiguration)) { int statusCode = authResponse.getStatusLine().getStatusCode(); if (statusCode == 200) { String responseBody = EntityUtils.toString(authResponse.getEntity()); JsonReader reader = JSONUtils.parseJSONWitReader(responseBody); JsonParser jsonParser =new JsonParser(); JsonObject responseJsonObject = jsonParser.parse(reader).getAsJsonObject(); return responseJsonObject.get("token").getAsString(); } } return StringUtils.EMPTY; } /** * Below method is used for API token generation * @param client * @param prismaCloudConfiguration * @return * @throws IOException */ private CloseableHttpResponse getJwtToken(CloseableHttpClient client, PrismaCloudConfiguration prismaCloudConfiguration) throws IOException { logger.debug("Entered into PrismaCloudServiceImpl.getJwtToken"); JsonObject requestBody = new JsonObject(); requestBody.addProperty("username", prismaCloudConfiguration.getAccessKey()); requestBody.addProperty("password", prismaCloudConfiguration.getSecretKey()); StringEntity entity = new StringEntity(requestBody.toString()); HttpPost httpPost = new HttpPost(prismaCloudConfiguration.getAuthUrl()); httpPost.setHeader("Accept", "application/json"); httpPost.setHeader("Content-Type", "application/json"); httpPost.setHeader("x-redlock-auth", null); httpPost.setEntity(entity); return client.execute(httpPost); } /** *Below method is used to get scan details from prisma clod API * @param prismaCloudConfiguration * @param filePath * @return * @throws IOException */ public String getScanResult(PrismaCloudConfiguration prismaCloudConfiguration, String filePath) throws IOException, InterruptedException { String responseBody = ""; String authToken = generateToken(prismaCloudConfiguration); String processingStatus = "processing"; try (CloseableHttpClient client = HttpClients.createDefault(); CloseableHttpResponse scanUrlResponse = getPrismaCloudScanDetails(client, prismaCloudConfiguration, authToken)) { if (scanUrlResponse.getStatusLine().getStatusCode() == 200 || scanUrlResponse.getStatusLine().getStatusCode() == 201) { logger.info("Got the getPrismaCloudScanDetails"); responseBody = EntityUtils.toString(scanUrlResponse.getEntity()); JsonReader reader = JSONUtils.parseJSONWitReader(responseBody); JsonParser jsonParser = new JsonParser(); JsonObject jsonObject = jsonParser.parse(reader).getAsJsonObject(); JsonElement firstelement = jsonParser.parse(jsonObject.get("data").toString()); JsonElement secondelement = firstelement.getAsJsonObject().get("links"); String scanID = firstelement.getAsJsonObject().get("id").getAsString(); String s3LocationURL = secondelement.getAsJsonObject().get("url").toString(); logger.info("********s3LocationURL******* " + s3LocationURL.substring(1, s3LocationURL.length() - 1)); try (CloseableHttpResponse uploadFileResponse = uploadFileToS3(client, s3LocationURL.substring(1, s3LocationURL.length() - 1), filePath)) { if (uploadFileResponse.getStatusLine().getStatusCode() == 200 || uploadFileResponse.getStatusLine().getStatusCode() == 201) { logger.info("Uploaded the file to S3 bucket successfully"); try (CloseableHttpResponse triggerScanResponse = triggerScan(client, scanID, authToken, prismaCloudConfiguration)) { if (triggerScanResponse.getStatusLine().getStatusCode() == 200) { logger.info("Triggered the scan with scanID "+scanID); while("processing".equals(processingStatus)){ try (CloseableHttpResponse jobStatusResponse = getScanJobStatus(client, scanID, authToken, prismaCloudConfiguration)) { if (jobStatusResponse.getStatusLine().getStatusCode() == 200) { Object obj = new JsonParser().parse(EntityUtils.toString(jobStatusResponse.getEntity(), StandardCharsets.UTF_8)); JsonObject statusObject = (JsonObject) obj; if(statusObject.has("data")){ JsonObject attributes = statusObject.get("data").getAsJsonObject().get("attributes").getAsJsonObject(); processingStatus = attributes.get("status").getAsString(); if (!processingStatus.equals("processing")){ break; } } Thread.sleep(5000); } else { logger.info("Get job status failed"); return EntityUtils.toString(jobStatusResponse.getEntity(), StandardCharsets.UTF_8); } } } try ( CloseableHttpResponse scanResultResponse = getScanResult(client, scanID, authToken, prismaCloudConfiguration)) { if (scanResultResponse.getStatusLine().getStatusCode() == 200) { logger.info("Getting Scan result========for scanID========"+scanID); String result = EntityUtils.toString(scanResultResponse.getEntity()); Object obj = new JsonParser().parse(result); JsonObject jsonObjectParent = (JsonObject) obj; jsonObjectParent.addProperty("processingStatus", processingStatus); return jsonObjectParent.toString(); } } } else { logger.info("Trigger Scan Failed"); return EntityUtils.toString(triggerScanResponse.getEntity(), StandardCharsets.UTF_8); } } } else { logger.info("File upload failed"); return EntityUtils.toString(uploadFileResponse.getEntity(), StandardCharsets.UTF_8); } } } else { logger.info("Problem while calling scan details"); return EntityUtils.toString(scanUrlResponse.getEntity(), StandardCharsets.UTF_8); } } return StringUtils.EMPTY; } /** * Below methos is used to get scan details from prisma cloud API * @param client * @param prismaCloudConfiguration * @param authToken * @return * @throws IOException */ private CloseableHttpResponse getPrismaCloudScanDetails(CloseableHttpClient client, PrismaCloudConfiguration prismaCloudConfiguration, String authToken) throws IOException { logger.info("Entered into PrismaCloudServiceImpl.getScanResult"); StringEntity entity = getRequest(prismaCloudConfiguration); HttpPost httpPost = new HttpPost(prismaCloudConfiguration.getScanUrl()); httpPost.setHeader("Accept", "application/vnd.api+json"); httpPost.setHeader("Content-Type", "application/vnd.api+json"); httpPost.setHeader("x-redlock-auth", authToken); httpPost.setEntity(entity); return client.execute(httpPost); } /** * Below methos is used to upload the file to given S3 Bucket * @param client * @param s3LocationURL * @param filePath * @return * @throws IOException */ private CloseableHttpResponse uploadFileToS3(CloseableHttpClient client, String s3LocationURL, String filePath) throws IOException { logger.info("Entered into PrismaCloudServiceImpl.uploadFileToS3"); HttpPut httpPut = new HttpPut(s3LocationURL); File fileToUpload = new File(filePath); fileToUpload.setReadable(true); httpPut.setEntity(new FileEntity(fileToUpload, ContentType.APPLICATION_OCTET_STREAM)); return client.execute(httpPut); } /** * Below method is used to start scan process for uploaded file. * @param client * @param scanId * @param authToken * @param prismaCloudConfiguration * @return * @throws IOException */ private CloseableHttpResponse triggerScan(CloseableHttpClient client, String scanId, String authToken, PrismaCloudConfiguration prismaCloudConfiguration) throws IOException { logger.info("Entered into PrismaCloudServiceImpl.triggerScan"); JsonApiModelScanTrigger jsonApiModelScanTrigger = new JsonApiModelScanTrigger(); JsonApiModelScanTriggerData jsonApiModelScanTriggerData = new JsonApiModelScanTriggerData(); jsonApiModelScanTriggerData.setId(UUID.fromString(scanId)); JsonApiModelScanTriggerDataAttributes jsonApiModelScanTriggerDataAttributes = new JsonApiModelScanTriggerDataAttributes(); if (prismaCloudConfiguration.getTemplateType() .equalsIgnoreCase("tf")) { jsonApiModelScanTriggerDataAttributes.setTemplateType("tf"); } else if (prismaCloudConfiguration.getTemplateType() .equalsIgnoreCase("cft")) { jsonApiModelScanTriggerDataAttributes.setTemplateType("cft"); } else if (prismaCloudConfiguration.getTemplateType() .equalsIgnoreCase("k8s")) { jsonApiModelScanTriggerDataAttributes.setTemplateType("k8s"); }else { jsonApiModelScanTriggerDataAttributes.setTemplateType(""); } jsonApiModelScanTriggerDataAttributes.setTemplateVersion(prismaCloudConfiguration.getTemplateVersion()); IacTemplateParameters iacTemplateParameters = new IacTemplateParameters(); jsonApiModelScanTriggerDataAttributes.setTemplateParameters(iacTemplateParameters); jsonApiModelScanTriggerDataAttributes.setTemplateVersion(prismaCloudConfiguration.getTemplateVersion()); jsonApiModelScanTriggerData.setAttributes(jsonApiModelScanTriggerDataAttributes); jsonApiModelScanTrigger.setData(jsonApiModelScanTriggerData); ObjectMapper mapper = new ObjectMapper(); StringEntity entity = new StringEntity(mapper.writeValueAsString(jsonApiModelScanTrigger)); HttpPost httpPost = new HttpPost(prismaCloudConfiguration.getScanUrl().concat("/").concat(scanId)); httpPost.setHeader("Accept", "application/vnd.api+json"); httpPost.setHeader("Content-Type", "application/vnd.api+json"); httpPost.setHeader("x-redlock-auth", authToken); httpPost.setEntity(entity); return client.execute(httpPost); } /** * Below method is used to get scan status for uploaded file. * @param client * @param scanId * @param authToken * @param prismaCloudConfiguration * @return * @throws IOException */ private CloseableHttpResponse getScanJobStatus(CloseableHttpClient client, String scanId, String authToken, PrismaCloudConfiguration prismaCloudConfiguration) throws IOException { logger.info("Entered into PrismaCloudServiceImpl.getScanJobStatus"); JsonObject requestBody = new JsonObject(); requestBody.addProperty("scanId", scanId); StringEntity entity = new StringEntity(requestBody.toString()); HttpGet httpGet = new HttpGet(prismaCloudConfiguration.getScanUrl().concat("/").concat(scanId).concat("/status")); httpGet.setHeader("Accept", "application/vnd.api+json"); httpGet.setHeader("Content-Type", "application/vnd.api+json"); httpGet.setHeader("x-redlock-auth", authToken); return client.execute(httpGet); } /** * Below method is used to get scan result of uploaded file. * @param client * @param scanId * @param authToken * @param prismaCloudConfiguration * @return * @throws IOException */ private CloseableHttpResponse getScanResult(CloseableHttpClient client, String scanId, String authToken, PrismaCloudConfiguration prismaCloudConfiguration) throws IOException { logger.info("Entered into PrismaCloudServiceImpl.getScanResult"); JsonObject requestBody = new JsonObject(); requestBody.addProperty("scanId", scanId); StringEntity entity = new StringEntity(requestBody.toString()); HttpGet httpGet = new HttpGet(prismaCloudConfiguration.getScanUrl().concat("/").concat(scanId).concat("/results")); httpGet.setHeader("Accept", "application/vnd.api+json"); httpGet.setHeader("Content-Type", "application/vnd.api+json"); httpGet.setHeader("x-redlock-auth", authToken); return client.execute(httpGet); } /** * Below method is used to format/create request for API call * @return * @throws UnsupportedEncodingException * @throws JsonProcessingException */ private StringEntity getRequest(PrismaCloudConfiguration prismaCloudConfiguration) throws UnsupportedEncodingException, JsonProcessingException { // Form json request JsonApiModelAsyncScanRequest jsonApiModelAsyncScanRequest = new JsonApiModelAsyncScanRequest(); JsonApiModelAsyncScanRequestData jsonApiModelAsyncScanRequestData = new JsonApiModelAsyncScanRequestData(); JsonApiModelAsyncScanRequestDataAttributes jsonApiModelAsyncScanRequestDataAttributes = new JsonApiModelAsyncScanRequestDataAttributes(); //Setting tags String tagsArray[] = prismaCloudConfiguration.getTags().split(","); Map<String, String> iacScanTagsMap = new HashMap<>(); for (int i = 0; i < tagsArray.length; i++) { String tag[] = tagsArray[i].split(":"); if (tag[0] != null) { String value = (tag[1] == null) ? "" : tag[1].trim(); iacScanTagsMap.put(tag[0].trim(), value); } } jsonApiModelAsyncScanRequestDataAttributes.setTags(iacScanTagsMap); //setting scan type jsonApiModelAsyncScanRequestData.setType("async-scan"); //Setting asset attribute jsonApiModelAsyncScanRequestDataAttributes.setAssetName(prismaCloudConfiguration.getAssetName()); jsonApiModelAsyncScanRequestDataAttributes.setAssetType(prismaCloudConfiguration.getAssetType()); //Setting scanAttributes Map<String, String> jsonApiModelScanAttributes = new HashMap<>(); jsonApiModelScanAttributes.put("buildNumber", prismaCloudConfiguration.getBuildNumber()); jsonApiModelScanAttributes.put("projectName", prismaCloudConfiguration.getJobName()); jsonApiModelAsyncScanRequestDataAttributes.setScanAttributes(jsonApiModelScanAttributes); JsonApiModelFailureCriteria jsonApiModelFailureCriteria = new JsonApiModelFailureCriteria(); jsonApiModelFailureCriteria.setHigh(prismaCloudConfiguration.getHigh()); jsonApiModelFailureCriteria.setMedium(prismaCloudConfiguration.getMedium()); jsonApiModelFailureCriteria.setLow(prismaCloudConfiguration.getLow()); jsonApiModelFailureCriteria.setOperator(prismaCloudConfiguration.getOperator()); jsonApiModelAsyncScanRequestDataAttributes.setFailureCriteria(jsonApiModelFailureCriteria); jsonApiModelAsyncScanRequestData.setAttributes(jsonApiModelAsyncScanRequestDataAttributes); jsonApiModelAsyncScanRequest.setData(jsonApiModelAsyncScanRequestData); ObjectMapper mapper = new ObjectMapper(); return new StringEntity(mapper.writeValueAsString(jsonApiModelAsyncScanRequest)); } }
SELECT CONCAT(trigger_schema,'.',trigger_name) AS trigger_name, CONCAT_WS(' ',action_timing,event_manipulation, ' OF ',event_object_table) AS trigger_event, action_statement FROM information_schema.triggers;
# # Cookbook Name:: optuitive-gocd # Recipe:: server # # Copyright (c) 2017 The Authors, All Rights Reserved. include_recipe 'gocd::server'
#!/usr/bin/env bash ############################################################################### # SLURM Configurations #SBATCH -J Pendulum-v0/TRPO_aggregate #SBATCH -t 03:00:00 #SBATCH -n 1 #SBATCH -c 1 #SBATCH --mem-per-cpu=2000 #SBATCH -o /work/scratch/dt11kypo/logs/benchmark/Pendulum-v0/TRPO/%A.out #SBATCH -e /work/scratch/dt11kypo/logs/benchmark/Pendulum-v0/TRPO/%A.err ############################################################################### # Your PROGRAM call starts here echo "Starting Job $SLURM_JOB_ID, Index $SLURM_ARRAY_TASK_ID" # Program specific arguments CMD="python3 /work/home/dt11kypo/mushroom-rl-benchmark/mushroom_rl_benchmark/experiment/slurm/aggregate_results.py \ ${@:1}\ --seed 0" echo "$CMD" eval $CMD
#!/bin/bash # Runs astyle with parameters which should be checked in a pre-commit hook astyle \ --style=otbs \ --indent=spaces=4 \ --convert-tabs \ --keep-one-line-statements \ --pad-header \ "$@"
#include "cpu_usage.h" #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <time.h> #include <sys/timeb.h> #include <sys/stat.h> extern int exitFlag; extern FILE * ofsCPU; typedef struct { unsigned int cpu_total; unsigned int cpu_usage; } CPU_USAGE; void TerminateCapture(int status); double GetCPUInfo(CPU_USAGE * pCU) { static char buf[32]; unsigned int cpu_user, cpu_nice, cpu_kernel, cpu_idle, cpu_iowait, cpu_irq, cpu_softirq; unsigned int cpu_usage, cpu_total; FILE * ifs = fopen("/proc/stat", "r"); if (ifs == NULL) { printf("Error: cannot open /proc/stat\n"); exit(0); } fscanf(ifs, "%s %u %u %u %u %u %u %u", buf, &cpu_user, &cpu_nice, &cpu_kernel, &cpu_idle, &cpu_iowait, &cpu_irq, &cpu_softirq ); fclose(ifs); cpu_total = cpu_user + cpu_nice + cpu_kernel + cpu_idle + cpu_iowait + cpu_irq + cpu_softirq; cpu_usage = cpu_total - cpu_idle; if (pCU->cpu_total == 0) { pCU->cpu_total = cpu_total; pCU->cpu_usage = cpu_usage; return -1.0f; } else { double u = (double)(cpu_usage - pCU->cpu_usage) / (cpu_total - pCU->cpu_total); pCU->cpu_total = cpu_total; pCU->cpu_usage = cpu_usage; return u; } } void * CaptureCPUUsage(void * arg) { CPU_USAGE cu; double u; cu.cpu_total = 0; cu.cpu_usage = 0; GetCPUInfo(&cu); usleep(500000); struct timeb ts; unsigned int clk = 0; while (1) { u = GetCPUInfo(&cu); ftime(&ts); fprintf(ofsCPU, "%.3lf %.2lf\n", ts.time + ts.millitm / (double) 1000.0f, u); if (++clk % 4 == 0) { int finishStatus = 0; if (exitFlag == 1) finishStatus = FINISH_BY_PCAP; else { FILE * ifs = fopen("/sdcard/mpp/dc_stop_flag", "r"); if (ifs != NULL) { fclose(ifs); finishStatus = FINISH_BY_USER; } } if (finishStatus != 0) { exitFlag = 1; //TODO: we are waiting for the packet capture routine to stop. //But it might not be (e.g., waiting infinitly for the packet). //If my guess is correct, then terminate it in tcpdump.c Line 1384 //May never end the program as long as there's no packet //Find a smart way to more elegantly //terminate it. At least, force pcap_close to flush usleep(300000); TerminateCapture(finishStatus); } } usleep(250000); } printf("Thread CaptureCPUUsage() exit.\n"); return 0; }
const db = require('./mainDB') const Specialty_pizza = { add: ( description, price ) => db.none( `INSERT INTO specialty_pizza ( description, price ) VALUES ( '${description}', '${price}' )` ), getAll: () => db.any( `SELECT * FROM specialty_pizza` ), getById: specialty_pizza_id => db.one( `SELECT * FROM specialty_pizza WHERE id = ${specialty_pizza_id}` ), api_update: ( id, description, price ) => db.none( `UPDATE specialty_pizza SET description='${description}', price=${price} WHERE id = ${id}` ), update: ( id, description, price ) => { let sql = `BEGIN TRANSACTION;` if ( description != '' ) sql += `UPDATE specialty_pizza SET description='${description}' WHERE id = '${id}';` if ( price != '' ) sql += `UPDATE specialty_pizza SET price='${price}' WHERE id = '${id}';` sql += `COMMIT;` db.none( sql ) }, delete: id => db.none( `DELETE FROM specialty_pizza WHERE id = '${id}'` ), getPrice: pizza_id => db.one( `SELECT price FROM specialty_pizza WHERE id = '${specialty_pizza_id}'` ) } module.exports = { Specialty_pizza }
const express = require('express'); const sqlite3 = require('sqlite3').verbose(); const cors = require('cors'); const db = new sqlite3.Database('eat.db'); const app = express(); // CORS app.use(cors({ origin:['http://localhost:8080'], methods:['GET','POST'], credentials: true // enable set cookie })); // support for POST messages app.use(express.json()); app.use(express.urlencoded({extended: true})); app.get('/api/food/all', (req, res) => { db.all('SELECT * FROM food ORDER BY name', (err, rows) => { res.send(rows); }); }); app.get('/api/menu/all', (req, res) => { let sql = `SELECT menu.date, GROUP_CONCAT(food.type) AS types, GROUP_CONCAT(food.name) AS foods, ROUND(SUM(food.price), 2) AS y FROM menu INNER JOIN meal ON menu.id = meal.menu_id INNER JOIN food ON meal.food_id = food.id GROUP BY menu.id `; db.all(sql, (err, rows) => { if(err) res.send(err); res.send(rows); }); }); app.post('/api/menu/submit', (req, res) => { let sql = 'INSERT INTO menu (date) VALUES (?)'; let stmt = db.prepare(sql); stmt.run(req.body.date, (err) => { let mealSql = 'INSERT INTO meal (menu_id, food_id) VALUES(?, ?)'; let mealStmt = db.prepare(mealSql); for(let i=0; i<req.body.foods.length; i++) { mealStmt.run(stmt.lastID, req.body.foods[i].foodId); } res.send('OK'); }); }); app.post('/api/food/register', (req, res) => { let sql = 'INSERT INTO food (name, type, price) VALUES (?, ?, ?)'; let stmt = db.prepare(sql); stmt.run(req.body.name, req.body.type, req.body.price, (err) => { res.send('OK'); }); }); app.use('/', express.static('public')); app.listen(8083);
<filename>src/CancerLocator.java import org.apache.commons.math3.linear.*; import org.apache.commons.math3.stat.descriptive.moment.Mean; import org.apache.commons.math3.util.CombinatoricsUtils; import org.apache.commons.math3.util.FastMath; import java.io.*; import java.text.DecimalFormat; import java.util.*; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.stream.Collectors; import java.util.stream.DoubleStream; import java.util.stream.Stream; public class CancerLocator { public static void main(String[] args) throws IOException, InterruptedException { //String currentDir = System.getProperty("user.dir"); // command line args String configFile = args[0]; // config file // read the config file Properties prop = new Properties(); InputStream config; config = new FileInputStream(configFile); prop.load(config); // training samples String trainFile = prop.getProperty("trainFile"); // testing samples, methylation value String testMethyFile = prop.getProperty("testMethyFile"); // testing samples, reads depth String testDepthFile = prop.getProperty("testDepthFile"); // sample type to tissue type mapping file String typeMappingFile = prop.getProperty("typeMappingFile"); // prediction results String resultFile = prop.getProperty("resultFile"); PrintWriter results = new PrintWriter(resultFile, "UTF-8"); results.println(join(new String[] {"Sample ID", "Log-likelihood ratio", "Predicted tumor burden", "Predicted sample class"}, "\t")); // methylation range cutoff used in feature filtering double rangeCut = Double.parseDouble(prop.getProperty("methylationRangeCutoff")); // likelihood ratio cutoff used in prediction double ratioCut = Double.parseDouble(prop.getProperty("logLikelihoodRatioCutoff")); // theta step //double thetaStep = Double.parseDouble(prop.getProperty("thetaStep")); double thetaStep = 0.01; // #threads used int nThreads = Integer.parseInt(prop.getProperty("nThreads")); int nBetas = 201; // num of beta values used in Simpson integration System.out.println("Run configration:"); for (String key : prop.stringPropertyNames()) { String value = prop.getProperty(key); System.out.println(key + ": " + value); } System.out.println(); // sample type to prediction class mapping HashMap<String, String> type2Class = new HashMap<>(); String normalType="normal"; // the type name of normal samples BufferedReader br = new BufferedReader(new FileReader(typeMappingFile)); String line; while((line=br.readLine())!=null) { String[] fields = line.trim().split("\t"); type2Class.put(fields[0],fields[1]); //normal samples may have alternative label in training file if (fields[1].equals("normal")) normalType=fields[0]; } // the theta values int nThetas= (int) (1/thetaStep + 1); List<Double> thetaList = DoubleStream.iterate(0, n -> n + thetaStep).limit(nThetas) .boxed().collect(Collectors.toList()); if (thetaList.get(nThetas-1) >= 0.9999) { //theta must be less than 1 thetaList.remove(nThetas-1); nThetas--; } RealVector thetas = new ArrayRealVector(thetaList.toArray(new Double[nThetas])); // read the training data int featureNum = 0; Map<String, ArrayList<double[]>> trainBeta = new HashMap<>(); br = new BufferedReader(new FileReader(trainFile)); while ((line = br.readLine()) != null) { String[] fields = line.trim().split("\t"); if (featureNum == 0) featureNum = fields.length - 1; String type = fields[0]; // type of the sample double[] betaValue = str2double(Arrays.copyOfRange(fields, 1, fields.length)); ArrayList<double[]> betaValues = trainBeta.get(type); if (betaValues == null) { betaValues = new ArrayList<>(); trainBeta.put(type, betaValues); } betaValues.add(betaValue); } br.close(); // build methylation models List<String> sampleTypes = new ArrayList<>(trainBeta.keySet()); List<String> diseaseTypes = new ArrayList<>(sampleTypes); diseaseTypes.remove(normalType); Map<String, RealMatrix> rawValues = new HashMap<>(); Map<String, MethyModel> models = new HashMap<>(); for (String type : sampleTypes) { ArrayList<double[]> data = trainBeta.get(type); //beta-values int sampleSize = data.size(); RealMatrix dataMatrix = new BlockRealMatrix(sampleSize, featureNum); for (int i = 0; i < sampleSize; i++) { dataMatrix.setRow(i, data.get(i)); } models.put(type, new MethyModel(dataMatrix)); rawValues.put(type, dataMatrix); } // filter features boolean[] selectedFeatures = new boolean[featureNum]; Arrays.fill(selectedFeatures, Boolean.TRUE); // within each type for (String type : sampleTypes) { RealVector alpha = models.get(type).getAlpha(); RealVector beta = models.get(type).getBeta(); for (int i = 0; i < featureNum; i++) { if (!(alpha.getEntry(i) > 0 && beta.getEntry(i) > 0)) { selectedFeatures[i] = false; } } } // across types Map<Integer, Double> featureRanges = new HashMap<>(); for (int i = 0; i < featureNum; i++) { if (selectedFeatures[i]) { double ncr = 0; // normal cancer range double ctr = 0; // cancer type range double[] means = new double[sampleTypes.size()]; for (int typeInd = 0; typeInd < sampleTypes.size(); typeInd++) { String type = sampleTypes.get(typeInd); double[] values = rawValues.get(type).getColumn(i); means[typeInd] = calMean(values); } for (int typeInd1 = 0; typeInd1 < sampleTypes.size(); typeInd1++) { String type1 = sampleTypes.get(typeInd1); for (int typeInd2 = typeInd1 + 1; typeInd2 < sampleTypes.size(); typeInd2++) { String type2 = sampleTypes.get(typeInd2); double methyDiff = Math.abs(means[typeInd1]-means[typeInd2]); if (type1.equals(normalType) | type2.equals(normalType)) { if (methyDiff > ncr) { ncr = methyDiff; } } else { if (methyDiff > ctr) { ctr = methyDiff; } } } } double featureRange = Math.max(ncr, ctr); featureRanges.put(i, featureRange); } } int nSelectedFeatures = 0; for (int i = 0; i < featureNum; i++) { if (selectedFeatures[i]) { if (featureRanges.get(i) < rangeCut) { selectedFeatures[i] = false; } else { nSelectedFeatures++; } } } // build the mixture models // only selected features considered System.out.println("Calculating the mixture models..."); Map<String, HashMap<Integer, MixModel>> mixModels = new HashMap<>(); boolean[] goodMixModels = new boolean[nSelectedFeatures]; Arrays.fill(goodMixModels, Boolean.TRUE); MethyModel ctrModel = models.get(normalType); for (String type : diseaseTypes) { MethyModel diseaseModel = models.get(type); HashMap<Integer, MixModel> typeMixModels = new HashMap<>(); for (int copyNum : new int[]{2}) { // no CNV considered // thetas at DNA level RealVector thetasDNA = calThetasDNA(thetas, copyNum); MixModel mix = new MixModel(diseaseModel.selectFeature(selectedFeatures), ctrModel.selectFeature(selectedFeatures), thetasDNA, nBetas, nThreads); typeMixModels.put(copyNum, mix); for (int featureIdx = 0; featureIdx < nSelectedFeatures; featureIdx++) { RealMatrix mixDens = mix.getMixDens()[featureIdx]; for (int beta = 0; beta < mixDens.getRowDimension(); beta++) { double dens = mixDens.getEntry(beta, 0); if (Double.isNaN(dens) || Double.isInfinite(dens)) { goodMixModels[featureIdx] = false; } } } } System.out.println(type+" model calculated"); mixModels.put(type, typeMixModels); } System.out.println(); // update mixture models for (String type : diseaseTypes) { MixModel model = mixModels.get(type).get(2).selectFeature(goodMixModels); mixModels.get(type).put(2, model); } // update good features int j = 0; for (int i = 0; i < featureNum; i++) { if (selectedFeatures[i]) { if (!goodMixModels[j]) {// don't use this feature selectedFeatures[i] = false; nSelectedFeatures--; } j++; } } System.out.println(nSelectedFeatures + " features used in inference"); System.out.println(); // load the testing data // read the depth file first Map<String, MethySample> testSet = new HashMap<>(); br = new BufferedReader(new FileReader(testDepthFile)); while ((line = br.readLine()) != null) { String[] fields = line.trim().split("\t"); String sampleID = fields[0]; String[] temp = Arrays.copyOfRange(fields, 1, fields.length); int[] depth = str2int(temp); MethySample sample = new MethySample(sampleID, fields[0], 0, -1, true); sample.setDepth(depth); testSet.put(sampleID, sample); } br.close(); // then read the file with methylated CpG numbers br = new BufferedReader(new FileReader(testMethyFile)); List<String> testSamples = new ArrayList<>(); while ((line = br.readLine()) != null) { String[] fields = line.trim().split("\t"); String sampleID = fields[0]; testSamples.add(sampleID); String[] temp = Arrays.copyOfRange(fields, 1, fields.length); int[] methyDepth = str2int(temp); int[] depth = testSet.get(sampleID).getDepth(); double[] methy = new double[methyDepth.length]; for (int i = 0; i < methy.length; i++) { if (depth[i] > 0) { methy[i] = ((double) methyDepth[i]) / depth[i]; } else { methy[i] = Double.NaN; } } testSet.get(sampleID).setMethy(methy); testSet.get(sampleID).selfSelectFeature(selectedFeatures); } br.close(); System.out.println("Making predictions..."); // make predictions on the test set Map<String, MethySample> predictions = new HashMap<>(); //multithreading ExecutorService executor = Executors.newFixedThreadPool(nThreads); for (String sampleId : testSet.keySet()) { MethySample testSample = testSet.get(sampleId); Runnable worker = new Predictor(testSample, mixModels, predictions, diseaseTypes, thetas); executor.execute(worker); } executor.shutdown(); while (!executor.isTerminated()) { Thread.sleep(10000); } // write the results // in the same order of samples for (String sampleId : testSamples) { MethySample predSample = predictions.get(sampleId); double predTheta; try { predTheta = predSample.getTheta(); } catch (Exception e) { predTheta = -1; } double densRatio = predSample.getDensRatio()/nSelectedFeatures; //normalized by feature number String predType = predSample.getType(); if (densRatio==0) predType = normalType; // no matter what cutoff used String predClass = densRatio<ratioCut?type2Class.get(normalType):type2Class.get(predType); DecimalFormat thetaFormat = new DecimalFormat("#.###"); // for predicted theta String thetaOutput = thetaFormat.format(predTheta); String output = join(new String[] {sampleId, Double.toString(densRatio), thetaOutput, predClass} ,"\t"); results.println(output); } results.close(); System.out.println("All jobs done!"); } private static double calMean(double[] values) { List<Double> nonNa = new ArrayList<>(); for (int i = 0; i<values.length; i++) { if (!Double.isNaN(values[i])) { nonNa.add(values[i]); } } int nonNaCount = nonNa.size(); double[] nonNaValues = Stream.of(nonNa.toArray(new Double[nonNaCount])). mapToDouble(Double::doubleValue).toArray(); Mean mean = new Mean(); return mean.evaluate(nonNaValues,0,nonNaValues.length); } private static MethySample samplePred(MethySample subTestSample, Map<String, HashMap<Integer, MixModel>> subMixModels, List<String> diseaseTypes, RealVector thetas) { String sampleId = subTestSample.getId(); int nTheta = subMixModels.get(diseaseTypes.get(0)).get(2).getThetaNum(); // prediction w/o CNA considered RealMatrix dens = new BlockRealMatrix(diseaseTypes.size(), nTheta); for (int i = 0; i < diseaseTypes.size(); i++) { String type = diseaseTypes.get(i); HashMap<Integer, MixModel> typeSubMixModels = subMixModels.get(type); RealMatrix thetaDists = calSampleDens(subTestSample, typeSubMixModels.get(2)); dens.setRowVector(i, calSumLogDens(thetaDists)); } CancerPrediction pred = new CancerPrediction(dens, thetas, false); Integer[] typeRanks = pred.getTypeRanks(); String typePred = diseaseTypes.get(typeRanks[0]); MethySample predSample = new MethySample(typePred, pred.getBestTheta()); predSample.setId(sampleId); predSample.setDensRatio(pred.getBestRatio()); return predSample; } public static class Predictor implements Runnable { MethySample subTestSample; Map<String, HashMap<Integer, MixModel>> subMixModels; Map<String, MethySample> predictions; List<String> diseaseTypes; RealVector thetas; Predictor(MethySample subTestSample, Map<String, HashMap<Integer, MixModel>> subMixModels, Map<String, MethySample> predictions, List<String> diseaseTypes, RealVector thetas) { this.subTestSample = subTestSample; this.subMixModels = subMixModels; this.predictions = predictions; this.diseaseTypes = diseaseTypes; this.thetas = thetas; } @Override public void run() { String sampleId = subTestSample.getId(); MethySample predSample = samplePred(subTestSample, subMixModels, diseaseTypes, thetas); predictions.put(sampleId, predSample); } } // calculate DNA level thetas public static RealVector calThetasDNA(RealVector thetas, int copyNum) { RealVector thetasDNA = thetas.mapMultiply((double) copyNum); if (copyNum != 0) { RealVector ctrRatios = thetas.mapMultiply(-1); ctrRatios.mapAddToSelf(1); ctrRatios.mapMultiplyToSelf(2); RealVector temp = thetasDNA.add(ctrRatios); thetasDNA = thetasDNA.ebeDivide(temp); } return thetasDNA; } private static RealMatrix calSampleDens(MethySample sample, MixModel model) { double[] betaValues = sample.getMethy(); int[] depths = sample.getDepth(); RealMatrix[] mixDens = model.getMixDens(); int nFeature = sample.getFeatureNum(); int nTheta = model.getThetaNum(); int nBetas = mixDens[0].getRowDimension(); RealVector betas = new ArrayRealVector(nBetas); for (int i = 0; i < nBetas; i++) betas.setEntry(i,i/(nBetas-1.0)); RealMatrix dens = new BlockRealMatrix(nFeature, nTheta); for (int i = 0; i < nFeature; i++) for (int j = 0; j < nTheta; j++) { RealVector betaDens = mixDens[i].getColumnVector(j); int methyCounts = (int) Math.round(depths[i]*betaValues[i]); dens.setEntry(i, j, calLogCompoundDens(betas, betaDens, depths[i],methyCounts)); } return dens; } private static double calLogCompoundDens(RealVector betas, RealVector betaDens, int n, int k) { double logComb = CombinatoricsUtils.binomialCoefficientLog(n,k); int nBetas = betas.getDimension(); RealVector dens = new ArrayRealVector(nBetas); for (int i = 0; i < nBetas; i++) { dens.setEntry(i, betaDens.getEntry(i) * FastMath.pow(betas.getEntry(i), k) * FastMath.pow(1 - betas.getEntry(i), n - k)); } double prob = integSimpson(betas,dens); double logProb=(prob==0)?-1000:FastMath.log(prob); // avoid -Inf return logComb+logProb; } private static RealVector calSumLogDens(RealMatrix thetaDists) { int nFeature = thetaDists.getRowDimension(); int nTheta = thetaDists.getColumnDimension(); RealVector sumLogDens = new ArrayRealVector(nTheta); for (int i = 0; i < nTheta; i++) { double sum = 0; int nonNaFeature = 0; for (int j = 0; j < nFeature; j++) { Double logDen = thetaDists.getEntry(j, i); if (!Double.isNaN(logDen)) { nonNaFeature++; sum += logDen; } } if (nonNaFeature != 0) { sumLogDens.setEntry(i, sum/nonNaFeature); } else { //no feature available sumLogDens.setEntry(i, Double.NaN); } } return sumLogDens; } public static double integSimpson(RealVector points, RealVector dens) { double s; int n = points.getDimension() - 1; // # of intervals double h = points.getEntry(1) - points.getEntry(0); // the length of an // interval if (n == 2) { s = dens.getEntry(0) + 4 * dens.getEntry(1) + dens.getEntry(2); } else { s = dens.getEntry(0) + dens.getEntry(n); for (int i = 1; i < n; i += 2) { s += 2 * dens.getEntry(i); } for (int i = 2; i < n - 1; i += 2) { s += 4 * dens.getEntry(i); } } s = s * h / 3; return s; } public static int[] str2int(String[] strings) { int[] intArray = new int[strings.length]; int i = 0; for (String str : strings) { str = str.trim(); if (str.equals("NA")) { intArray[i] = 0; // just for the depth info } else { intArray[i] = Integer.parseInt(str.trim()); } i++; } return intArray; } public static double[] str2double(String[] strings) { double[] doubleArray = new double[strings.length]; int i = 0; for (String str : strings) { str = str.trim(); if (str.equals("NA")) { doubleArray[i] = Double.NaN; } else { doubleArray[i] = Double.parseDouble(str); } i++; } return doubleArray; } public static String join(String[] strings, String delim) { StringBuilder sb = new StringBuilder(); String loopDelim = ""; for (String s : strings) { sb.append(loopDelim); sb.append(s); loopDelim = delim; } return sb.toString(); } }
#!/usr/bin/env bash # This file was generated from an m4 template. # Generation date-time (ISO 8601): 2018-06-22T10:10+00:00 # Git repository URL: https://github.com/oleks/git-ready-to-deploy # Commit ID: 394c2b040558a890d33d996c402d6594d1e5dfc6 # Copyright (c) 2016-2018 Oleks <oleks@oleks.info> # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. set -euo pipefail failcode=1 nlines=$(git status . --porcelain | wc -l) if [ ${nlines} -ne 0 ] ; then cat <<EOF ##################################################### # Nice! You have changes. Now commit or stash them. # ##################################################### EOF git status . exit ${failcode} fi echo "Let me check your remote.." git remote update nlines=$(git cherry | (grep "^+" || true) | wc -l) if [ ${nlines} -ne 0 ] ; then cat <<EOF ##################################### # Nice! You have commits. Now push. # ##################################### EOF git status . exit ${failcode} fi echo "Looks like you are up-to-date."
import { HobbyDto } from './hobby.dto'; //The server sends a list of all hobbies when the client requests it export class HobbyListDto { hobbies: HobbyDto[]; }
$(function () { $("#tabs").tabs(); $("#radioset").buttonset(); $("#controlgroup").controlgroup(); $("#recorder-ui-on").click(function () { chrome.tabs.executeScript({ code: '$("#recorderDialog").show()' }); chrome.storage.local.set({ 'recorderUi': true }, function () { console.log('Settings saved'); }); }); $("#recorder-ui-off").click(function () { chrome.tabs.executeScript({ code: '$("#recorderDialog").hide()' }); chrome.storage.local.set({ 'recorderUi': false }, function () { console.log('Settings saved'); }); }); }); window.onload = function () { chrome.storage.local.get('recorderUi', function (data) { if (data['recorderUi']) { $('#recorder-ui-on').attr('checked', 'checked'); } else { $('#recorder-ui-off').attr('checked', 'checked'); } $('input').button('refresh'); }); };
#!/usr/bin/env bash # Copyright 2019 Jetstack Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -o errexit set -o nounset set -o pipefail set -o xtrace REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." >/dev/null 2>&1 && pwd )" # Make temporary directory to use for testing and enter it VERIFY_DIR="${REPO_ROOT}/verify" mkdir -p "$VERIFY_DIR" pushd "$VERIFY_DIR" # Determine OS type and architecture to get the correct Terraform binary. # Terrafom supports more platforms than are listed here, but only those used by # the developers are included. We're open to PRs to add more. if [[ "$OSTYPE" == "linux-gnu" ]]; then TERRAFORM_OS="linux" elif [[ "$OSTYPE" == "darwin"* ]]; then TERRAFORM_OS="darwin" else echo "OS type not supported" exit 1 fi ARCH=$(uname -m) if [[ "$ARCH" == "x86_64" ]]; then TERRAFORM_ARCH="amd64" elif [[ "$ARCH" == "i386" ]]; then TERRAFORM_ARCH="386" else echo "Architecture not supported" exit 1 fi # Checks the Terraform version used by the module, download the Terraform binary # for that version. if grep "required_version.*0.12.*" "${REPO_ROOT}/main.tf"; then TERRAFORM_VERSION="0.12.24" else echo "Terraform version is not supported or could not be found." exit 1 fi TERRAFORM_ZIP="terraform_${TERRAFORM_VERSION}_${TERRAFORM_OS}_${TERRAFORM_ARCH}.zip" TERRAFORM_URL="https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/${TERRAFORM_ZIP}" # If the zip is already present don't download it again. if [ ! -f ${TERRAFORM_ZIP} ]; then curl $TERRAFORM_URL -o $TERRAFORM_ZIP fi unzip -o $TERRAFORM_ZIP chmod +x terraform TERRAFORM="${VERIFY_DIR}/terraform" $TERRAFORM version # Capture the output of terraform fmt so that we can trigger the script to # fail if formatting changes were made. terraform fmt does not consider # applying formatting changes to be failure, however we want the files to be # correctly formatted in version control. FMT=$(${TERRAFORM} fmt $REPO_ROOT) if [ "$FMT" != "" ]; then echo "$FMT" exit 1 fi # Copy files from the example project to use as a test project for the module. # Only copy these files if they don't already exist so they can be edited during # iterative testing. if [ ! -f main.tf ]; then cp "${REPO_ROOT}/example/main.tf" main.tf # Remove the requirement for a GCS backend so we can init and validate locally perl -i -0pe 's/(\s*)backend "gcs" \{\n?\s*\n?\s*\}/\1# GCS bucket not used for testing/gms' main.tf # Use the local version of the module, not the Terraform Registry version, and remove the version specification perl -i -0pe 's/(\s*)source*\s*= "jetstack\/gke-cluster\/google"\n\s*version = ".*?"/\1source = "..\/"/gms' main.tf fi if [ ! -f variables.tf ]; then cp "${REPO_ROOT}/example/variables.tf" variables.tf fi if [ ! -f terraform.tfvars ]; then cp "${REPO_ROOT}/example/terraform.tfvars.example" terraform.tfvars fi # Initialise and validate the generated test project $TERRAFORM init $TERRAFORM validate # TODO: Set up a GCP project and service account to run the following section # in automated testing. # If SKIP_DESTROY is true then exit without destroying, this can be used to # conduct more manual testing and experiments. SKIP_DESTROY="${SKIP_DESTROY:-false}" # To make Terraform plan and apply the generated test project the following # environment variables are required: # GOOGLE_APPLICATION_CREDENTIALS is the path of a key.json for a service account # GCP_PROJECT_ID is the ID of a GCP project to use if [ ! -z ${GCP_PROJECT_ID+x} ] || [ ! -z ${GOOGLE_APPLICATION_CREDENTIALS+x} ]; then echo $GCP_PROJECT_ID echo $GOOGLE_APPLICATION_CREDENTIALS sed -i.bak "s|my-project|$GCP_PROJECT_ID|g" terraform.tfvars $TERRAFORM plan $TERRAFORM apply -auto-approve if [ ! "$SKIP_DESTROY" == "true" ]; then $TERRAFORM destroy -auto-approve fi else echo "Skipping terraform plan and apply as GCP_PROJECT_ID and GOOGLE_APPLICATION_CREDENTIALS not set." fi popd > /dev/null # If SKIP_DESTROY is true then don't delete the project directory so that # terraform destroy can still be run later for clean up. if [ ! "$SKIP_DESTROY" == "true" ]; then rm -rf "$VERIFY_DIR" fi
#!/bin/bash PID_DIR=/run java -cp "ecsJars/*:common/*" -Dio.netty.tryReflectionSetAccessible=true -Diip.pid.dir=$PID_DIR "de.iip_ecosphere.platform.support.LifecycleHandler\$WaitingStarter" $*
<reponame>muehleisen/OpenStudio /*********************************************************************************************************************** * OpenStudio(R), Copyright (c) 2008-2021, Alliance for Sustainable Energy, LLC, and other contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the * following conditions are met: * * (1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following * disclaimer. * * (2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided with the distribution. * * (3) Neither the name of the copyright holder nor the names of any contributors may be used to endorse or promote products * derived from this software without specific prior written permission from the respective party. * * (4) Other than as required in clauses (1) and (2), distributions in any form of modifications or other derivative works * may not use the "OpenStudio" trademark, "OS", "os", or any other confusingly similar designation without specific prior * written permission from Alliance for Sustainable Energy, LLC. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND ANY CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S), ANY CONTRIBUTORS, THE UNITED STATES GOVERNMENT, OR THE UNITED * STATES DEPARTMENT OF ENERGY, NOR ANY OF THEIR EMPLOYEES, BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***********************************************************************************************************************/ #include "CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit.hpp" #include "CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl.hpp" #include "Curve.hpp" #include "Curve_Impl.hpp" #include "CurveQuadratic.hpp" #include "CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFitSpeedData.hpp" #include "CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFitSpeedData_Impl.hpp" #include "ModelObjectList.hpp" #include "ModelObjectList_Impl.hpp" #include "ZoneHVACComponent.hpp" #include "HVACComponent.hpp" #include "ZoneHVACWaterToAirHeatPump.hpp" #include "ZoneHVACWaterToAirHeatPump_Impl.hpp" #include "AirLoopHVACUnitarySystem.hpp" #include "AirLoopHVACUnitarySystem_Impl.hpp" #include "Model.hpp" #include "Model_Impl.hpp" #include "Node.hpp" #include "Node_Impl.hpp" #include <utilities/idd/OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFit_FieldEnums.hxx> #include <utilities/idd/IddEnums.hxx> #include "../utilities/units/Unit.hpp" #include "../utilities/core/Assert.hpp" #include "../utilities/idf/WorkspaceExtensibleGroup.hpp" namespace openstudio { namespace model { namespace detail { CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl(const IdfObject& idfObject, Model_Impl* model, bool keepHandle) : WaterToAirComponent_Impl(idfObject, model, keepHandle) { OS_ASSERT(idfObject.iddObject().type() == CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::iddObjectType()); } CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl( const openstudio::detail::WorkspaceObject_Impl& other, Model_Impl* model, bool keepHandle) : WaterToAirComponent_Impl(other, model, keepHandle) { OS_ASSERT(other.iddObject().type() == CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::iddObjectType()); } CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl( const CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl& other, Model_Impl* model, bool keepHandle) : WaterToAirComponent_Impl(other, model, keepHandle) {} const std::vector<std::string>& CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::outputVariableNames() const { static const std::vector<std::string> result{"Heating Coil Electricity Rate", "Heating Coil Heating Rate", "Heating Coil Sensible Heating Rate", "Heating Coil Source Side Heat Transfer Rate", "Heating Coil Part Load Ratio", "Heating Coil Runtime Fraction", "Heating Coil Air Mass Flow Rate", "Heating Coil Air Inlet Temperature", "Heating Coil Air Inlet Humidity Ratio", "Heating Coil Air Outlet Temperature", "Heating Coil Air Outlet Humidity Ratio", "Heating Coil Source Side Mass Flow Rate", "Heating Coil Source Side Inlet Temperature", "Heating Coil Source Side Outlet Temperature", "Heating Coil Upper Speed Level", "Heating Coil Neighboring Speed Levels Ratio", "Heating Coil Recoverable Heat Transfer Rate", "Heating Coil Electricity Energy", "Heating Coil Heating Energy", "Heating Coil Source Side Heat Transfer Energy"}; return result; } IddObjectType CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::iddObjectType() const { return CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::iddObjectType(); } unsigned CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::airInletPort() const { return OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::IndoorAirInletNodeName; } unsigned CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::airOutletPort() const { return OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::IndoorAirOutletNodeName; } unsigned CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::waterInletPort() const { return OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::WatertoRefrigerantHXWaterInletNodeName; } unsigned CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::waterOutletPort() const { return OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::WatertoRefrigerantHXWaterOutletNodeName; } int CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::nominalSpeedLevel() const { boost::optional<int> value = getInt(OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::NominalSpeedLevel, true); OS_ASSERT(value); return value.get(); } boost::optional<double> CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::ratedHeatingCapacityAtSelectedNominalSpeedLevel() const { return getDouble(OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::RatedHeatingCapacityAtSelectedNominalSpeedLevel, true); } bool CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::isRatedHeatingCapacityAtSelectedNominalSpeedLevelAutosized() const { bool result = false; boost::optional<std::string> value = getString(OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::RatedHeatingCapacityAtSelectedNominalSpeedLevel, true); if (value) { result = openstudio::istringEqual(value.get(), "autosize"); } return result; } boost::optional<double> CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::ratedAirFlowRateAtSelectedNominalSpeedLevel() const { return getDouble(OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::RatedAirFlowRateAtSelectedNominalSpeedLevel, true); } bool CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::isRatedAirFlowRateAtSelectedNominalSpeedLevelAutosized() const { bool result = false; boost::optional<std::string> value = getString(OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::RatedAirFlowRateAtSelectedNominalSpeedLevel, true); if (value) { result = openstudio::istringEqual(value.get(), "autosize"); } return result; } boost::optional<double> CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::ratedWaterFlowRateAtSelectedNominalSpeedLevel() const { return getDouble(OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::RatedWaterFlowRateAtSelectedNominalSpeedLevel, true); } bool CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::isRatedWaterFlowRateAtSelectedNominalSpeedLevelAutosized() const { bool result = false; boost::optional<std::string> value = getString(OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::RatedWaterFlowRateAtSelectedNominalSpeedLevel, true); if (value) { result = openstudio::istringEqual(value.get(), "autosize"); } return result; } Curve CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::energyPartLoadFractionCurve() const { boost::optional<Curve> value = optionalEnergyPartLoadFractionCurve(); if (!value) { LOG_AND_THROW(briefDescription() << " does not have an Energy Part Load Fraction Curve attached."); } return value.get(); } bool CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::setNominalSpeedLevel(int nominalSpeedLevel) { bool result = setInt(OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::NominalSpeedLevel, nominalSpeedLevel); OS_ASSERT(result); return result; } bool CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::setRatedHeatingCapacityAtSelectedNominalSpeedLevel( boost::optional<double> ratedHeatingCapacityAtSelectedNominalSpeedLevel) { bool result(false); if (ratedHeatingCapacityAtSelectedNominalSpeedLevel) { result = setDouble(OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::RatedHeatingCapacityAtSelectedNominalSpeedLevel, ratedHeatingCapacityAtSelectedNominalSpeedLevel.get()); } OS_ASSERT(result); return result; } void CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::autosizeRatedHeatingCapacityAtSelectedNominalSpeedLevel() { bool result = setString(OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::RatedHeatingCapacityAtSelectedNominalSpeedLevel, "autosize"); OS_ASSERT(result); } bool CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::setRatedAirFlowRateAtSelectedNominalSpeedLevel( boost::optional<double> ratedAirFlowRateAtSelectedNominalSpeedLevel) { bool result(false); if (ratedAirFlowRateAtSelectedNominalSpeedLevel) { result = setDouble(OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::RatedAirFlowRateAtSelectedNominalSpeedLevel, ratedAirFlowRateAtSelectedNominalSpeedLevel.get()); } OS_ASSERT(result); return result; } void CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::autosizeRatedAirFlowRateAtSelectedNominalSpeedLevel() { bool result = setString(OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::RatedAirFlowRateAtSelectedNominalSpeedLevel, "autosize"); OS_ASSERT(result); } bool CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::setRatedWaterFlowRateAtSelectedNominalSpeedLevel( boost::optional<double> ratedWaterFlowRateAtSelectedNominalSpeedLevel) { bool result(false); if (ratedWaterFlowRateAtSelectedNominalSpeedLevel) { result = setDouble(OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::RatedWaterFlowRateAtSelectedNominalSpeedLevel, ratedWaterFlowRateAtSelectedNominalSpeedLevel.get()); } OS_ASSERT(result); return result; } void CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::autosizeRatedWaterFlowRateAtSelectedNominalSpeedLevel() { bool result = setString(OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::RatedWaterFlowRateAtSelectedNominalSpeedLevel, "autosize"); OS_ASSERT(result); } bool CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::setEnergyPartLoadFractionCurve(const Curve& curve) { bool result = setPointer(OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::EnergyPartLoadFractionCurveName, curve.handle()); return result; } boost::optional<Curve> CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::optionalEnergyPartLoadFractionCurve() const { return getObject<ModelObject>().getModelObjectTarget<Curve>( OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::EnergyPartLoadFractionCurveName); } ModelObject CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::clone(Model model) const { auto newCoil = WaterToAirComponent_Impl::clone(model).cast<CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit>(); if (auto speedDataList = this->speedDataList()) { auto speedDataListClone = speedDataList->clone(model).cast<ModelObjectList>(); newCoil.getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->setSpeedDataList(speedDataListClone); } return newCoil; } std::vector<ModelObject> CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::children() const { std::vector<ModelObject> children; if (auto const _stageDataList = speedDataList()) { for (const auto& mo : _stageDataList->modelObjects()) { children.push_back(mo); } } children.push_back(energyPartLoadFractionCurve()); return children; } std::vector<openstudio::IdfObject> CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::remove() { if (isRemovable()) { if (auto _stageDataList = speedDataList()) { _stageDataList->remove(); } return WaterToAirComponent_Impl::remove(); } return std::vector<IdfObject>(); } boost::optional<HVACComponent> CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::containingHVACComponent() const { // AirLoopHVACUnitarySystem auto const airLoopHVACUnitarySystems = this->model().getConcreteModelObjects<AirLoopHVACUnitarySystem>(); for (const auto& airLoopHVACUnitarySystem : airLoopHVACUnitarySystems) { if (boost::optional<HVACComponent> heatingCoil = airLoopHVACUnitarySystem.heatingCoil()) { if (heatingCoil->handle() == this->handle()) { return airLoopHVACUnitarySystem; } } } return boost::none; } boost::optional<ZoneHVACComponent> CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::containingZoneHVACComponent() const { // ZoneHVACWaterToAirHeatPump auto const zoneHVACWaterToAirHeatPumps = this->model().getConcreteModelObjects<ZoneHVACWaterToAirHeatPump>(); for (const auto& zoneHVACWaterToAirHeatPump : zoneHVACWaterToAirHeatPumps) { if (boost::optional<HVACComponent> coil = zoneHVACWaterToAirHeatPump.heatingCoil()) { if (coil->handle() == this->handle()) { return zoneHVACWaterToAirHeatPump; } } } return boost::none; } boost::optional<ModelObjectList> CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::speedDataList() const { return getObject<ModelObject>().getModelObjectTarget<ModelObjectList>( OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::SpeedDataList); } bool CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::addSpeed( const CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFitSpeedData& speed) { auto modelObjectList = speedDataList(); if (modelObjectList) { modelObjectList->addModelObject(speed); } return false; } void CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::removeSpeed( const CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFitSpeedData& speed) { auto modelObjectList = speedDataList(); if (modelObjectList) { modelObjectList->removeModelObject(speed); } } void CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::removeAllSpeeds() { auto modelObjectList = speedDataList(); if (modelObjectList) { auto const modelObjects = modelObjectList->modelObjects(); for (const auto& elem : modelObjects) { auto const modelObject = elem.optionalCast<CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFitSpeedData>(); if (modelObject) { modelObjectList->removeModelObject(elem); } } } } std::vector<CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFitSpeedData> CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::speeds() const { std::vector<CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFitSpeedData> result; auto const modelObjectList = speedDataList(); if (modelObjectList) { auto const modelObjects = modelObjectList->modelObjects(); for (const auto& elem : modelObjects) { auto const modelObject = elem.optionalCast<CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFitSpeedData>(); if (modelObject) { result.push_back(modelObject.get()); } } } return result; } bool CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::setSpeedDataList(const boost::optional<ModelObjectList>& modelObjectList) { bool result(false); if (modelObjectList) { result = setPointer(OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::SpeedDataList, modelObjectList.get().handle()); } else { resetSpeedDataList(); result = true; } return result; } void CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::resetSpeedDataList() { bool result = setString(OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFitFields::SpeedDataList, ""); OS_ASSERT(result); } boost::optional<double> CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::autosizedRatedHeatingCapacityAtSelectedNominalSpeedLevel() const { return getAutosizedValue("Design Size Nominal Heating Capacity", "W"); } boost::optional<double> CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::autosizedRatedAirFlowRateAtSelectedNominalSpeedLevel() const { return getAutosizedValue("Design Size Rated Air Flow Rate", "m3/s"); } boost::optional<double> CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::autosizedRatedWaterFlowRateAtSelectedNominalSpeedLevel() const { return getAutosizedValue("Design Size Rated Water Flow Rate", "m3/s"); } void CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::autosize() { autosizeRatedHeatingCapacityAtSelectedNominalSpeedLevel(); autosizeRatedAirFlowRateAtSelectedNominalSpeedLevel(); autosizeRatedWaterFlowRateAtSelectedNominalSpeedLevel(); } void CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl::applySizingValues() { boost::optional<double> val; val = autosizedRatedHeatingCapacityAtSelectedNominalSpeedLevel(); if (val) { setRatedHeatingCapacityAtSelectedNominalSpeedLevel(val.get()); } val = autosizedRatedAirFlowRateAtSelectedNominalSpeedLevel(); if (val) { setRatedAirFlowRateAtSelectedNominalSpeedLevel(val.get()); } val = autosizedRatedWaterFlowRateAtSelectedNominalSpeedLevel(); if (val) { setRatedWaterFlowRateAtSelectedNominalSpeedLevel(val.get()); } } } // namespace detail CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit(const Model& model) : WaterToAirComponent(CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::iddObjectType(), model) { OS_ASSERT(getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()); bool ok = true; setNominalSpeedLevel(1); autosizeRatedHeatingCapacityAtSelectedNominalSpeedLevel(); autosizeRatedAirFlowRateAtSelectedNominalSpeedLevel(); autosizeRatedWaterFlowRateAtSelectedNominalSpeedLevel(); auto partLoadFraction = CurveQuadratic(model); partLoadFraction.setCoefficient1Constant(0.85); partLoadFraction.setCoefficient2x(0.15); partLoadFraction.setCoefficient3xPOW2(0.0); partLoadFraction.setMinimumValueofx(0.0); partLoadFraction.setMaximumValueofx(1.0); ok = setEnergyPartLoadFractionCurve(partLoadFraction); OS_ASSERT(ok); auto speedDataList = ModelObjectList(model); speedDataList.setName(this->name().get() + " Speed Data List"); ok = getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->setSpeedDataList(speedDataList); OS_ASSERT(ok); } CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit(const Model& model, const Curve& partLoadFraction) : WaterToAirComponent(CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::iddObjectType(), model) { OS_ASSERT(getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()); bool ok = true; setNominalSpeedLevel(1); autosizeRatedHeatingCapacityAtSelectedNominalSpeedLevel(); autosizeRatedAirFlowRateAtSelectedNominalSpeedLevel(); autosizeRatedWaterFlowRateAtSelectedNominalSpeedLevel(); ok = setEnergyPartLoadFractionCurve(partLoadFraction); OS_ASSERT(ok); auto speedDataList = ModelObjectList(model); speedDataList.setName(this->name().get() + " Speed Data List"); ok = getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->setSpeedDataList(speedDataList); OS_ASSERT(ok); } IddObjectType CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::iddObjectType() { return IddObjectType(IddObjectType::OS_Coil_Heating_WaterToAirHeatPump_VariableSpeedEquationFit); } int CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::nominalSpeedLevel() const { return getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->nominalSpeedLevel(); } boost::optional<double> CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::ratedHeatingCapacityAtSelectedNominalSpeedLevel() const { return getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->ratedHeatingCapacityAtSelectedNominalSpeedLevel(); } bool CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::isRatedHeatingCapacityAtSelectedNominalSpeedLevelAutosized() const { return getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>() ->isRatedHeatingCapacityAtSelectedNominalSpeedLevelAutosized(); } boost::optional<double> CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::ratedAirFlowRateAtSelectedNominalSpeedLevel() const { return getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->ratedAirFlowRateAtSelectedNominalSpeedLevel(); } bool CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::isRatedAirFlowRateAtSelectedNominalSpeedLevelAutosized() const { return getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->isRatedAirFlowRateAtSelectedNominalSpeedLevelAutosized(); } boost::optional<double> CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::ratedWaterFlowRateAtSelectedNominalSpeedLevel() const { return getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->ratedWaterFlowRateAtSelectedNominalSpeedLevel(); } bool CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::isRatedWaterFlowRateAtSelectedNominalSpeedLevelAutosized() const { return getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->isRatedWaterFlowRateAtSelectedNominalSpeedLevelAutosized(); } Curve CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::energyPartLoadFractionCurve() const { return getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->energyPartLoadFractionCurve(); } bool CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::setNominalSpeedLevel(int nominalSpeedLevel) { return getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->setNominalSpeedLevel(nominalSpeedLevel); } bool CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::setRatedHeatingCapacityAtSelectedNominalSpeedLevel( double ratedHeatingCapacityAtSelectedNominalSpeedLevel) { return getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->setRatedHeatingCapacityAtSelectedNominalSpeedLevel( ratedHeatingCapacityAtSelectedNominalSpeedLevel); } void CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::autosizeRatedHeatingCapacityAtSelectedNominalSpeedLevel() { getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->autosizeRatedHeatingCapacityAtSelectedNominalSpeedLevel(); } bool CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::setRatedAirFlowRateAtSelectedNominalSpeedLevel( double ratedAirFlowRateAtSelectedNominalSpeedLevel) { return getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->setRatedAirFlowRateAtSelectedNominalSpeedLevel( ratedAirFlowRateAtSelectedNominalSpeedLevel); } void CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::autosizeRatedAirFlowRateAtSelectedNominalSpeedLevel() { getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->autosizeRatedAirFlowRateAtSelectedNominalSpeedLevel(); } bool CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::setRatedWaterFlowRateAtSelectedNominalSpeedLevel( double ratedWaterFlowRateAtSelectedNominalSpeedLevel) { return getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->setRatedWaterFlowRateAtSelectedNominalSpeedLevel( ratedWaterFlowRateAtSelectedNominalSpeedLevel); } void CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::autosizeRatedWaterFlowRateAtSelectedNominalSpeedLevel() { getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->autosizeRatedWaterFlowRateAtSelectedNominalSpeedLevel(); } bool CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::setEnergyPartLoadFractionCurve(const Curve& curve) { return getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->setEnergyPartLoadFractionCurve(curve); } std::vector<CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFitSpeedData> CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::speeds() const { return getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->speeds(); } bool CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::addSpeed(const CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFitSpeedData& speed) { return getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->addSpeed(speed); } void CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::removeSpeed(const CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFitSpeedData& speed) { getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->removeSpeed(speed); } void CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::removeAllSpeeds() { getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->removeAllSpeeds(); } /// @cond CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit( std::shared_ptr<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl> impl) : WaterToAirComponent(std::move(impl)) {} /// @endcond boost::optional<double> CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::autosizedRatedHeatingCapacityAtSelectedNominalSpeedLevel() const { return getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->autosizedRatedHeatingCapacityAtSelectedNominalSpeedLevel(); } boost::optional<double> CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::autosizedRatedAirFlowRateAtSelectedNominalSpeedLevel() const { return getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->autosizedRatedAirFlowRateAtSelectedNominalSpeedLevel(); } boost::optional<double> CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit::autosizedRatedWaterFlowRateAtSelectedNominalSpeedLevel() const { return getImpl<detail::CoilHeatingWaterToAirHeatPumpVariableSpeedEquationFit_Impl>()->autosizedRatedWaterFlowRateAtSelectedNominalSpeedLevel(); } } // namespace model } // namespace openstudio
<gh_stars>0 package com.bcopstein.negocio.factories; import com.bcopstein.negocio.entidades.ItemVenda; public interface IRestricoes { boolean temRestricao(ItemVenda[] itens); }
package interfaces import ( "fmt" "github.com/abesto/easyssh/target" ) type Discoverer interface { HasSetArgs fmt.Stringer Discover(input string) []target.Target } type HasSetArgs interface { SetArgs(args []interface{}) } type TargetFilter interface { HasSetArgs fmt.Stringer Filter(targets []target.Target) []target.Target } type Executor interface { HasSetArgs fmt.Stringer Exec(targets []target.Target, command []string) }
<reponame>Ranger-X/anycable<gh_stars>1000+ # frozen_string_literal: true require "fileutils" require "spec_helper" describe "CLI require app", :cli do context "when no application provided" do it "prints error and exit" do run_cli do |cli| expect(cli).to have_output_line("Couldn't find an application to load") expect(cli).to have_stopped expect(cli).to have_exit_status(1) end end end context "when config/environment.rb is present" do before do FileUtils.mkdir(File.join(PROJECT_ROOT, "bin/config")) FileUtils.cp( File.join(PROJECT_ROOT, "spec/dummies/rails.rb"), File.join(PROJECT_ROOT, "bin/config/environment.rb") ) end after do FileUtils.rm_rf(File.join(PROJECT_ROOT, "bin/config")) end it "loads Rails application" do run_cli do |cli| expect(cli).to have_output_line("Serving Rails application from ./config/environment.rb") end end end context "when require option is present" do it "loads application when file exists" do run_cli("-r ../spec/dummies/app.rb") do |cli| expect(cli).to have_output_line("Serving application from ../spec/dummies/app.rb") expect(cli).to have_output_line("Hello from app, server!") end end it "prints application when file couldn't be loaded" do run_cli("-r ../spec/dummies/fake.rb") do |cli| expect(cli).to have_output_line("cannot load such file") expect(cli).to have_stopped expect(cli).to have_exit_status(1) end end end end
#!/bin/bash service apache2 stop # rabbitmqctl stop # supervisorctl -c /etc/supervisord/supervisord.conf shutdown
<filename>blueprints/auth-migration/files/config/migrations/__filename__.js export async function up(knex) { await knex.schema.createTableIfNotExists('<%= table %>', function (<%= tableVar %>) { <%= tableVar %>.increments(); <%= tableVar %>.timestamps(); }); // Add the appropriate columns to the <%= table %> table knex.schema.table('<%= table %>', (<%= tableVar %>) => { <% if (confirmable) { %> // Confirmable <%= tableVar %>.uuid('email_confirmation_token_id'); <%= tableVar %>.text('unconfirmed_email'); <%= tableVar %>.boolean('email_confirmed'); <%= tableVar %>.timestamp('email_confirmed_at'); <% } %> <% if (invitable) { %> // Invitable <%= tableVar %>.uuid('invitation_id'); <%= tableVar %>.timestamp('invitation_used_at'); <% } %> <% if (lockable) { %> // Lockable <%= tableVar %>.boolean('locked_out'); <%= tableVar %>.timestamp('last_authentication_attempt'); <%= tableVar %>.integer('failed_authentication_attempts').unsigned(); <% } %> <% if (oauthable) { %> // Oauthable // Uncomment whichever services you'll support, and add any additional // columns for data that you'll import from the OAuth provider's profile // <%= tableVar %>.text('facebook_id'); // <%= tableVar %>.text('twitter_id'); // <%= tableVar %>.text('github_id'); <% } %> <% if (passwordable) { %> // Passwordable // Change the column names here to match the usernameField and // secretField / hashedSecretField you provided to the Passwordable mixin, // if you did. <%= tableVar %>.text('email'); <%= tableVar %>.text('hashed_password'); <% } %> <% if (trackable) { %> // Trackable <%= tableVar %>.timestamp('last_login_at'); <%= tableVar %>.text('last_ip'); <%= tableVar %>.integer('login_count').unsigned(); <% } %> }); <% if (confirmable || invitable || resetable || sessionable) { %> // Some facets also require additional tables/models ... <% } %> <% if (confirmable) { %> // Confirmable knex.schema.createTableIfNotExists('email_confirmation_tokens', function (emailConfirmationTokens) { emailConfirmationTokens.uuid('id').primary(); emailConfirmationTokens.timestamps(); emailConfirmationTokens.text('email'); }); <% } %> <% if (invitable) { %> // Invitable knex.schema.createTableIfNotExists('invitations', function (invitations) { invitations.uuid('id').primary(); invitations.timestamps(); invitations.boolean('used'); invitations.timestamp('used_at'); invitations.text('user_id'); invitations.text('user_type'); invitations.text('token'); invitations.timestamp('expires_at'); }); <% } %> <% if (resetable) { %> // Resetable knex.schema.createTableIfNotExists('password_reset_tokens', function (passwordResetTokens) { passwordResetTokens.uuid('id').primary(); passwordResetTokens.timestamps(); passwordResetTokens.text('user_id'); passwordResetTokens.text('user_type'); passwordResetTokens.text('token'); passwordResetTokens.timestamp('expires_at'); }); <% } %> <% if (sessionable) { %> // Sessionable knex.schema.createTableIfNotExists('sessions', function (sessions) { sessions.uuid('id').primary(); sessions.timestamps(); sessions.text('user_id'); sessions.text('user_type'); sessions.text('token'); sessions.timestamp('expires_at'); }); <% } %> } export async function down(knex) { // We don't drop the <%= table %> table since we can't know for sure // whether or not it pre-existed this migration knex.schema.table('<%= table %>', (<%= tableVar %>) => { <% if (confirmable) { %> // Confirmable <%= tableVar %>.dropColumn('email_confirmation_token_id'); <%= tableVar %>.dropColumn('unconfirmed_email'); <%= tableVar %>.dropColumn('email_confirmed'); <%= tableVar %>.dropColumn('email_confirmed_at'); <% } %> <% if (invitable) { %> // Invitable <%= tableVar %>.dropColumn('invitation_id'); <%= tableVar %>.dropColumn('invitation_used_at'); <% } %> <% if (lockable) { %> // Lockable <%= tableVar %>.dropColumn('locked_out'); <%= tableVar %>.dropColumn('last_authentication_attempt'); <%= tableVar %>.dropColumn('failed_authentication_attempts'); <% } %> <% if (oauthable) { %> // Oauthable // <%= tableVar %>.dropColumn('facebook_id'); // <%= tableVar %>.dropColumn('twitter_id'); // <%= tableVar %>.dropColumn('github_id'); <% } %> <% if (passwordable) { %> // Passwordable // Change the column names here to match the usernameField and // secretField / hashedSecretField you provided to the Passwordable mixin, // if you did. <%= tableVar %>.dropColumn('email'); <%= tableVar %>.dropColumn('hashed_password'); <% } %> <% if (trackable) { %> // Trackable <%= tableVar %>.dropColumn('last_login_at'); <%= tableVar %>.dropColumn('last_ip'); <%= tableVar %>.dropColumn('login_count'); <% } %> }); <% if (confirmable) { %> // Confirmable knex.schema.dropTable('email_confirmation_tokens'); <% } %> <% if (invitable) { %> // Invitable knex.schema.dropTable('invitations'); <% } %> <% if (resetable) { %> // Resetable knex.schema.dropTable('password_reset_tokens'); <% } %> <% if (sessionable) { %> // Sessionable knex.schema.dropTable('sessions'); <% } %> }
class Package { constructor(public name: string, public version: string, public stars: number) {} } class PackageRegistry { private packages: Package[] = []; addPackage(name: string, version: string, stars: number) { this.packages.push(new Package(name, version, stars)); } removePackage(name: string) { this.packages = this.packages.filter((pkg) => pkg.name !== name); } searchPackage(name: string) { const foundPackage = this.packages.find((pkg) => pkg.name === name); return foundPackage ? `Name: ${foundPackage.name}, Version: ${foundPackage.version}, Stars: ${foundPackage.stars}` : "Package not found"; } displayAllPackages() { this.packages.forEach((pkg) => console.log(`Name: ${pkg.name}, Version: ${pkg.version}, Stars: ${pkg.stars}`)); } } // Example usage const registry = new PackageRegistry(); registry.addPackage("example-package", "1.0.0", 100); registry.addPackage("another-package", "2.0.0", 200); registry.displayAllPackages(); console.log(registry.searchPackage("example-package")); registry.removePackage("example-package"); registry.displayAllPackages();
from django import template import mistune register = template.Library() @register.filter def markdown(value): markdown_converter = mistune.Markdown() return markdown_converter(value)
/* * SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD * * SPDX-License-Identifier: Unlicense OR CC0-1.0 * * I2C C++ unit tests * * This example code is in the Public Domain (or CC0 licensed, at your option.) * * Unless required by applicable law or agreed to in writing, this * software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. */ #define CATCH_CONFIG_MAIN #include <stdio.h> #include "unity.h" #include "freertos/portmacro.h" #include "driver/i2c.h" #include "i2c_cxx.hpp" #include "system_cxx.hpp" #include "test_fixtures.hpp" #include "catch.hpp" extern "C" { #include "Mocki2c.h" } // TODO: IDF-2693, function definition just to satisfy linker, mock esp_common instead const char *esp_err_to_name(esp_err_t code) { return "host_test error"; } using namespace std; using namespace idf; TEST_CASE("I2CNumber") { CMockFixture fix; CHECK(I2CNumber::I2C0().get_num() == 0); } TEST_CASE("I2CAddr") { CMockFixture fix; CHECK_THROWS_AS(I2CAddress(-1), I2CException&); I2CAddress(0); I2CAddress(127); CHECK_THROWS_AS(I2CAddress(128), I2CException&); I2CAddress addr(47); CHECK(addr.get_addr() == 47); } TEST_CASE("I2CMaster parameter configuration fails") { CMockFixture fix; i2c_param_config_ExpectAnyArgsAndReturn(ESP_FAIL); CHECK_THROWS_AS(I2CMaster(I2CNumber::I2C0(), SCL_GPIO(1), SDA_GPIO(2), Frequency(400000)), I2CException&); } TEST_CASE("I2CMaster driver install failure") { CMockFixture fix; i2c_param_config_ExpectAnyArgsAndReturn(ESP_OK); i2c_driver_install_ExpectAnyArgsAndReturn(ESP_FAIL); CHECK_THROWS_AS(I2CMaster(I2CNumber::I2C0(), SCL_GPIO(1), SDA_GPIO(2), Frequency(400000)), I2CException&); } TEST_CASE("I2CMaster success") { CMockFixture fix; I2CMasterFix master_fix; I2CMaster(I2CNumber::I2C0(), SCL_GPIO(1), SDA_GPIO(2), Frequency(400000)); } TEST_CASE("I2CWrite empty data throws") { CMockFixture fix; std::vector<uint8_t> empty; CHECK_THROWS_AS(I2CWrite writer(empty), I2CException&); } TEST_CASE("I2CRead zero length throws") { CMockFixture fix; std::vector<uint8_t> empty; CHECK_THROWS_AS(I2CRead reader(0), I2CException&); } TEST_CASE("I2CWrite do_transfer fails at link creation") { CMockFixture fix; i2c_cmd_link_create_ExpectAndReturn(nullptr); i2c_cmd_link_delete_Ignore(); I2CWrite writer({47}); CHECK_THROWS_AS(writer.do_transfer(I2CNumber::I2C0(), I2CAddress(0x47)), I2CException&); } TEST_CASE("I2CWrite do_transfer fails at start") { CMockFixture fix; i2c_cmd_handle_t dummy_handle = reinterpret_cast<i2c_cmd_handle_t>(0xbeef); i2c_cmd_link_create_IgnoreAndReturn(&dummy_handle); i2c_master_start_ExpectAnyArgsAndReturn(ESP_FAIL); i2c_cmd_link_delete_Ignore(); I2CWrite writer({47}); CHECK_THROWS_AS(writer.do_transfer(I2CNumber::I2C0(), I2CAddress(0x47)), I2CException&); } TEST_CASE("I2CWrite do_transfer fails at write byte") { CMockFixture fix; i2c_cmd_handle_t dummy_handle = reinterpret_cast<i2c_cmd_handle_t>(0xbeef); i2c_cmd_link_create_IgnoreAndReturn(&dummy_handle); i2c_master_start_IgnoreAndReturn(ESP_OK); i2c_master_write_byte_ExpectAnyArgsAndReturn(ESP_FAIL); i2c_cmd_link_delete_Ignore(); I2CWrite writer({47}); CHECK_THROWS_AS(writer.do_transfer(I2CNumber::I2C0(), I2CAddress(0x47)), I2CException&); } TEST_CASE("I2CWrite do_transfer fails at write") { CMockFixture fix; i2c_cmd_handle_t dummy_handle = reinterpret_cast<i2c_cmd_handle_t>(0xbeef); i2c_cmd_link_create_IgnoreAndReturn(&dummy_handle); i2c_master_start_IgnoreAndReturn(ESP_OK); i2c_master_write_byte_IgnoreAndReturn(ESP_OK); i2c_master_write_ExpectAnyArgsAndReturn(ESP_FAIL); i2c_cmd_link_delete_Ignore(); I2CWrite writer({47}); CHECK_THROWS_AS(writer.do_transfer(I2CNumber::I2C0(), I2CAddress(0x47)), I2CException&); } TEST_CASE("I2CWrite do_transfer fails at stop") { CMockFixture fix; i2c_cmd_handle_t dummy_handle = reinterpret_cast<i2c_cmd_handle_t>(0xbeef); i2c_cmd_link_create_IgnoreAndReturn(&dummy_handle); i2c_master_start_IgnoreAndReturn(ESP_OK); i2c_master_write_byte_IgnoreAndReturn(ESP_OK); i2c_master_write_IgnoreAndReturn(ESP_OK); i2c_master_stop_ExpectAnyArgsAndReturn(ESP_FAIL); i2c_cmd_link_delete_Ignore(); I2CWrite writer({47}); CHECK_THROWS_AS(writer.do_transfer(I2CNumber::I2C0(), I2CAddress(0x47)), I2CException&); } TEST_CASE("I2CWrite do_transfer execution times out") { CMockFixture fix; i2c_cmd_handle_t dummy_handle = reinterpret_cast<i2c_cmd_handle_t>(0xbeef); i2c_cmd_link_create_IgnoreAndReturn(&dummy_handle); i2c_master_start_IgnoreAndReturn(ESP_OK); i2c_master_write_byte_IgnoreAndReturn(ESP_OK); i2c_master_write_IgnoreAndReturn(ESP_OK); i2c_master_stop_IgnoreAndReturn(ESP_OK); i2c_master_cmd_begin_ExpectAnyArgsAndReturn(ESP_ERR_TIMEOUT); i2c_cmd_link_delete_Ignore(); I2CWrite writer({47}); CHECK_THROWS_AS(writer.do_transfer(I2CNumber::I2C0(), I2CAddress(0x47)), I2CTransferException&); } TEST_CASE("I2CWrite calls driver correctly") { CMockFixture fix; I2CCmdLinkFix cmd_fix(0x47, I2C_MASTER_WRITE); uint8_t expected_write [] = {0xAB, 0xBA}; const size_t WRITE_SIZE = sizeof(expected_write); const size_t EXPECTED_DATA_LEN = WRITE_SIZE; // note that this behavior is not entirely correct, in th real driver, only i2c_master_cmd_begin() // will actually write the data but for the tests it is enough for now i2c_master_write_ExpectWithArrayAndReturn(&cmd_fix.dummy_handle, expected_write, WRITE_SIZE, EXPECTED_DATA_LEN, true, ESP_OK); i2c_master_stop_ExpectAndReturn(&cmd_fix.dummy_handle, ESP_OK); i2c_master_cmd_begin_ExpectAndReturn(0, &cmd_fix.dummy_handle, 1000 / portTICK_PERIOD_MS, ESP_OK); std::vector<uint8_t> WRITE_BYTES = {0xAB, 0xBA}; I2CWrite write(WRITE_BYTES); write.do_transfer(I2CNumber::I2C0(), I2CAddress(0x47)); } TEST_CASE("I2CRead do_transfer fails at read") { CMockFixture fix; i2c_cmd_handle_t dummy_handle = reinterpret_cast<i2c_cmd_handle_t>(0xbeef); i2c_cmd_link_create_ExpectAndReturn(&dummy_handle); i2c_master_start_ExpectAnyArgsAndReturn(ESP_OK); i2c_master_write_byte_ExpectAnyArgsAndReturn(ESP_OK); i2c_master_read_ExpectAnyArgsAndReturn(ESP_FAIL); i2c_cmd_link_delete_Ignore(); I2CRead reader(2); CHECK_THROWS_AS(reader.do_transfer(I2CNumber::I2C0(), I2CAddress(0x47)), I2CException&); } TEST_CASE("I2CRead calls driver correctly") { CMockFixture fix; I2CCmdLinkFix cmd_fix(0x47, I2C_MASTER_READ); uint8_t READ_DATA [] = {0xAB, 0xBA}; const size_t READ_SIZE = sizeof(READ_DATA); i2c_master_read_ExpectAndReturn(&cmd_fix.dummy_handle, nullptr, READ_SIZE, i2c_ack_type_t::I2C_MASTER_LAST_NACK, ESP_OK); i2c_master_read_IgnoreArg_data(); // note that this behavior is not entirely correct, in th real driver, only i2c_master_cmd_begin() // will actually read the data but for the tests it is enough for now i2c_master_read_ReturnArrayThruPtr_data(READ_DATA, READ_SIZE); i2c_master_stop_ExpectAndReturn(&cmd_fix.dummy_handle, ESP_OK); i2c_master_cmd_begin_ExpectAndReturn(0, &cmd_fix.dummy_handle, 1000 / portTICK_PERIOD_MS, ESP_OK); I2CRead reader(READ_SIZE); std::vector<uint8_t> result = reader.do_transfer(I2CNumber::I2C0(), I2CAddress(0x47)); CHECK(result[0] == 0xAB); CHECK(result[1] == 0xBA); } TEST_CASE("I2CComposed try to read size 0 throws") { CMockFixture fix; I2CComposed composed_transfer; CHECK_THROWS_AS(composed_transfer.add_read(0), I2CException&); } TEST_CASE("I2CComposed try to write empy vector throws") { CMockFixture fix; I2CComposed composed_transfer; CHECK_THROWS_AS(composed_transfer.add_write({}), I2CException&); } TEST_CASE("I2CComposed calls driver correctly") { CMockFixture fix; I2CCmdLinkFix cmd_fix(0x47, I2C_MASTER_WRITE); uint8_t expected_write [] = {0x47, 0x48, 0x49}; const size_t WRITE_SIZE = sizeof(expected_write); const size_t EXPECTED_DATA_LEN = WRITE_SIZE; uint8_t READ_DATA [] = {0xAB, 0xBA}; const size_t READ_SIZE = sizeof(READ_DATA); // the write-read transaction with repeated start: i2c_master_write_ExpectWithArrayAndReturn(&cmd_fix.dummy_handle, expected_write, WRITE_SIZE, EXPECTED_DATA_LEN, true, ESP_OK); i2c_master_start_ExpectAndReturn(&cmd_fix.dummy_handle, ESP_OK); i2c_master_write_byte_ExpectAndReturn(&cmd_fix.dummy_handle, 0x47 << 1 | I2C_MASTER_READ, true, ESP_OK); i2c_master_read_ExpectAndReturn(&cmd_fix.dummy_handle, nullptr, 2, i2c_ack_type_t::I2C_MASTER_LAST_NACK, ESP_OK); i2c_master_read_IgnoreArg_data(); // note that this behavior is not entirely correct, in th real driver, only i2c_master_cmd_begin() // will actually read the data but for the tests it is enough for now i2c_master_read_ReturnArrayThruPtr_data(READ_DATA, READ_SIZE); i2c_master_stop_ExpectAndReturn(&cmd_fix.dummy_handle, ESP_OK); i2c_master_cmd_begin_ExpectAndReturn(0, &cmd_fix.dummy_handle, 1000 / portTICK_PERIOD_MS, ESP_OK); I2CComposed composed_transfer; composed_transfer.add_write({0x47, 0x48, 0x49}); composed_transfer.add_read(READ_SIZE); vector<vector<uint8_t> > read_result = composed_transfer.do_transfer(I2CNumber::I2C0(), I2CAddress(0x47)); TEST_ASSERT_EQUAL(1, read_result.size()); TEST_ASSERT_EQUAL(READ_SIZE, read_result[0].size()); for (int i = 0; i < READ_SIZE; i++) { TEST_ASSERT_EQUAL(READ_DATA[i], read_result[0][i]); } } TEST_CASE("I2CWrite transfer calls driver correctly") { CMockFixture fix; I2CMasterFix master_fix; I2CCmdLinkFix cmd_fix(0x47, I2C_MASTER_WRITE); uint8_t expected_write [] = {0xAB, 0xBA}; const size_t WRITE_SIZE = sizeof(expected_write); const size_t EXPECTED_DATA_LEN = WRITE_SIZE; // note that this behavior is not entirely correct, in th real driver, only i2c_master_cmd_begin() // will actually write the data but for the tests it is enough for now i2c_master_write_ExpectWithArrayAndReturn(&cmd_fix.dummy_handle, expected_write, WRITE_SIZE, EXPECTED_DATA_LEN, true, ESP_OK); i2c_master_stop_ExpectAndReturn(&cmd_fix.dummy_handle, ESP_OK); i2c_master_cmd_begin_ExpectAndReturn(0, &cmd_fix.dummy_handle, 1000 / portTICK_PERIOD_MS, ESP_OK); I2CMaster master(I2CNumber::I2C0(), SCL_GPIO(1), SDA_GPIO(2), Frequency(400000)); std::vector<uint8_t> WRITE_BYTES = {0xAB, 0xBA}; auto writer = make_shared<I2CWrite>(WRITE_BYTES); master.transfer(I2CAddress(0x47), writer); } TEST_CASE("I2CMaster synchronous write") { CMockFixture fix; I2CMasterFix master_fix; I2CCmdLinkFix cmd_fix(0x47, I2C_MASTER_WRITE); uint8_t expected_write [] = {0xAB, 0xBA}; const size_t WRITE_SIZE = sizeof(expected_write); const size_t EXPECTED_DATA_LEN = WRITE_SIZE; // note that this behavior is not entirely correct, in th real driver, only i2c_master_cmd_begin() // will actually write the data but for the tests it is enough for now i2c_master_write_ExpectWithArrayAndReturn(&cmd_fix.dummy_handle, expected_write, WRITE_SIZE, EXPECTED_DATA_LEN, true, ESP_OK); i2c_master_stop_ExpectAndReturn(&cmd_fix.dummy_handle, ESP_OK); i2c_master_cmd_begin_ExpectAndReturn(0, &cmd_fix.dummy_handle, 1000 / portTICK_PERIOD_MS, ESP_OK); I2CMaster master(I2CNumber::I2C0(), SCL_GPIO(1), SDA_GPIO(2), Frequency(400000)); std::vector<uint8_t> WRITE_BYTES = {0xAB, 0xBA}; master.sync_write(I2CAddress(0x47), WRITE_BYTES); } TEST_CASE("I2CMaster synchronous read") { CMockFixture fix; I2CMasterFix master_fix; I2CCmdLinkFix cmd_fix(0x47, I2C_MASTER_READ); uint8_t READ_DATA [] = {0xAB, 0xBA}; const size_t READ_SIZE = sizeof(READ_DATA); i2c_master_read_ExpectAndReturn(&cmd_fix.dummy_handle, nullptr, READ_SIZE, i2c_ack_type_t::I2C_MASTER_LAST_NACK, ESP_OK); i2c_master_read_IgnoreArg_data(); // note that this behavior is not entirely correct, in th real driver, only i2c_master_cmd_begin() // will actually read the data but for the tests it is enough for now i2c_master_read_ReturnArrayThruPtr_data(READ_DATA, READ_SIZE); i2c_master_stop_ExpectAndReturn(&cmd_fix.dummy_handle, ESP_OK); i2c_master_cmd_begin_ExpectAndReturn(0, &cmd_fix.dummy_handle, 1000 / portTICK_PERIOD_MS, ESP_OK); I2CMaster master(I2CNumber::I2C0(), SCL_GPIO(1), SDA_GPIO(2), Frequency(400000)); std::vector<uint8_t> result = master.sync_read(I2CAddress(0x47), READ_SIZE); REQUIRE(result.size() == READ_SIZE); CHECK(result[0] == 0xAB); CHECK(result[1] == 0xBA); } TEST_CASE("I2CMaster syncronous transfer (read and write)") { CMockFixture fix; I2CMasterFix master_fix; I2CCmdLinkFix cmd_fix(0x47, I2C_MASTER_WRITE); i2c_cmd_handle_t dummy_handle = reinterpret_cast<i2c_cmd_handle_t>(0xbeef); uint8_t expected_write [] = {0x47, 0x48, 0x49}; const size_t WRITE_SIZE = sizeof(expected_write); const size_t EXPECTED_DATA_LEN = WRITE_SIZE; uint8_t READ_DATA [] = {0xAB, 0xBA}; const size_t READ_SIZE = sizeof(READ_DATA); // the write-read transaction with repeated start: i2c_master_write_ExpectWithArrayAndReturn(&cmd_fix.dummy_handle, expected_write, WRITE_SIZE, EXPECTED_DATA_LEN, true, ESP_OK); i2c_master_start_ExpectAndReturn(&cmd_fix.dummy_handle, ESP_OK); i2c_master_write_byte_ExpectAndReturn(&cmd_fix.dummy_handle, 0x47 << 1 | I2C_MASTER_READ, true, ESP_OK); i2c_master_read_ExpectAndReturn(&cmd_fix.dummy_handle, nullptr, 2, i2c_ack_type_t::I2C_MASTER_LAST_NACK, ESP_OK); i2c_master_read_IgnoreArg_data(); // note that this behavior is not entirely correct, in th real driver, only i2c_master_cmd_begin() // will actually read the data but for the tests it is enough for now i2c_master_read_ReturnArrayThruPtr_data(READ_DATA, READ_SIZE); i2c_master_stop_ExpectAndReturn(&cmd_fix.dummy_handle, ESP_OK); i2c_master_cmd_begin_ExpectAndReturn(0, &cmd_fix.dummy_handle, 1000 / portTICK_PERIOD_MS, ESP_OK); I2CMaster master(I2CNumber::I2C0(), SCL_GPIO(1), SDA_GPIO(2), Frequency(400000)); vector<uint8_t> read_result = master.sync_transfer(I2CAddress(0x47), {0x47, 0x48, 0x49}, READ_SIZE); CHECK(read_result.size() == READ_SIZE); for (int i = 0; i < READ_SIZE; i++) { CHECK(read_result[i] == READ_DATA[i]); } } #if SOC_I2C_SUPPORT_SLAVE TEST_CASE("I2CSlave parameter configuration fails") { CMockFixture fix; i2c_param_config_ExpectAnyArgsAndReturn(ESP_FAIL); CHECK_THROWS_AS(I2CSlave(I2CNumber::I2C0(), SCL_GPIO(1), SDA_GPIO(2), I2CAddress(0x47), 64, 64), I2CException&); } TEST_CASE("I2CSlave driver installation fails") { CMockFixture fix; i2c_param_config_IgnoreAndReturn(ESP_OK); i2c_driver_install_IgnoreAndReturn(ESP_FAIL); CHECK_THROWS_AS(I2CSlave (I2CNumber::I2C0(), SCL_GPIO(1), SDA_GPIO(2), I2CAddress(0x47), 64, 64), I2CException&); } TEST_CASE("I2CSlave calls driver functions correctly") { CMockFixture fix; I2CSlaveFix slave_fix(CreateAnd::SUCCEED); I2CSlave slave(I2CNumber::I2C0(), SCL_GPIO(1), SDA_GPIO(2), I2CAddress(0x47), 64, 64); } TEST_CASE("I2CSlave write fails") { CMockFixture fix; I2CSlaveFix slave_fix(CreateAnd::IGNORE); const uint8_t WRITE_BUFFER[] = {0xAB, 0xCD}; const size_t WRITE_BUFFER_LEN = sizeof(WRITE_BUFFER); i2c_slave_write_buffer_ExpectAnyArgsAndReturn(-1); I2CSlave slave(I2CNumber::I2C0(), SCL_GPIO(3), SDA_GPIO(4), I2CAddress(0x47), 64, 64); CHECK(slave.write_raw(WRITE_BUFFER, WRITE_BUFFER_LEN, chrono::milliseconds(0)) == -1); } TEST_CASE("I2CSlave write calls driver functions correctly") { CMockFixture fix; I2CSlaveFix slave_fix(CreateAnd::IGNORE); const uint8_t WRITE_BUFFER[] = {0xAB, 0xCD}; const size_t WRITE_BUFFER_LEN = sizeof(WRITE_BUFFER); i2c_slave_write_buffer_ExpectWithArrayAndReturn(0, WRITE_BUFFER, WRITE_BUFFER_LEN, WRITE_BUFFER_LEN, 500 / portTICK_PERIOD_MS, WRITE_BUFFER_LEN); I2CSlave slave(I2CNumber::I2C0(), SCL_GPIO(3), SDA_GPIO(4), I2CAddress(0x47), 64, 64); CHECK(slave.write_raw(WRITE_BUFFER, WRITE_BUFFER_LEN, chrono::milliseconds(500)) == WRITE_BUFFER_LEN); } TEST_CASE("I2CSlave read fails") { CMockFixture fix; I2CSlaveFix slave_fix(CreateAnd::IGNORE); const size_t READ_BUFFER_LEN = 2; uint8_t read_buffer[READ_BUFFER_LEN]; i2c_slave_read_buffer_ExpectAnyArgsAndReturn(-1); I2CSlave slave(I2CNumber::I2C0(), SCL_GPIO(3), SDA_GPIO(4), I2CAddress(0x47), 64, 64); CHECK(slave.read_raw(read_buffer, READ_BUFFER_LEN, chrono::milliseconds(0)) == -1); } TEST_CASE("I2CSlave read calls driver functions correctly") { CMockFixture fix; I2CSlaveFix slave_fix(CreateAnd::IGNORE); uint8_t WRITE_BUFFER[] = {0xAB, 0xCD}; const size_t BUFFER_LEN = sizeof(WRITE_BUFFER); uint8_t read_buffer[BUFFER_LEN]; i2c_slave_read_buffer_ExpectAndReturn(0, read_buffer, BUFFER_LEN, 500 / portTICK_PERIOD_MS, BUFFER_LEN); i2c_slave_read_buffer_ReturnArrayThruPtr_data(WRITE_BUFFER, BUFFER_LEN); I2CSlave slave(I2CNumber::I2C0(), SCL_GPIO(3), SDA_GPIO(4), I2CAddress(0x47), 64, 64); CHECK(slave.read_raw(read_buffer, BUFFER_LEN, chrono::milliseconds(500)) == BUFFER_LEN); for (size_t i = 0; i < BUFFER_LEN; i++) { CHECK(read_buffer[i] == WRITE_BUFFER[i]); } } #endif // SOC_I2C_SUPPORT_SLAVE
if [[ -n $SSH_CONNECTION ]]; then export PS1='%m:%3~$(git_info_for_prompt)%# ' else export PS1='%3~$(git_info_for_prompt)%# ' fi export LSCOLORS="exfxcxdxbxegedabagacad" export CLICOLOR=true fpath=($ZSH/zsh/functions $fpath) autoload -U $ZSH/zsh/functions/*(:t) HISTFILE=~/.zsh_history HISTSIZE=10000 SAVEHIST=10000 setopt NO_BG_NICE # don't nice background tasks setopt NO_HUP setopt NO_LIST_BEEP setopt LOCAL_OPTIONS # allow functions to have local options setopt LOCAL_TRAPS # allow functions to have local traps setopt HIST_VERIFY setopt SHARE_HISTORY # share history between sessions ??? setopt EXTENDED_HISTORY # add timestamps to history setopt PROMPT_SUBST setopt CORRECT setopt COMPLETE_IN_WORD setopt IGNORE_EOF setopt APPEND_HISTORY # adds history setopt INC_APPEND_HISTORY SHARE_HISTORY # adds history incrementally and share it across sessions setopt HIST_IGNORE_ALL_DUPS # don't record dupes in history setopt HIST_REDUCE_BLANKS # don't expand aliases _before_ completion has finished # like: git comm-[tab] unsetopt complete_aliases zle -N newtab bindkey '^[^[[D' backward-word bindkey '^[^[[C' forward-word bindkey '^[[5D' beginning-of-line bindkey '^[[5C' end-of-line bindkey '^[[3~' delete-char bindkey '^[^N' newtab bindkey '^?' backward-delete-char bindkey '^B' backward-word bindkey '^F' forward-word # ^left bindkey ';5D' backward-word # ^right bindkey ';5C' forward-word bindkey '^a' beginning-of-line bindkey '^e' end-of-line bindkey '^r' history-incremental-search-backward # C-x C-e to edit autoload -U edit-command-line zle -N edit-command-line bindkey '\C-x\C-e' edit-command-line
#!/usr/bin/env bats # This file is used to test the installation and removal # of a Debian package. # WARNING: This testing file must be executed as root and can # dramatically change your system. It removes the 'elasticsearch' # user/group and also many directories. Do not execute this file # unless you know exactly what you are doing. # The test case can be executed with the Bash Automated # Testing System tool available at https://github.com/sstephenson/bats # Thanks to Sam Stephenson! # Licensed to Elasticsearch under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Load test utilities load packaging_test_utils # Cleans everything for the 1st execution setup() { skip_not_dpkg } ################################## # Install DEB package ################################## @test "[DEB] dpkg command is available" { clean_before_test dpkg --version } @test "[DEB] package is available" { count=$(find . -type f -name 'elastic*.deb' | wc -l) [ "$count" -eq 1 ] } @test "[DEB] package is not installed" { run dpkg -s 'elasticsearch' [ "$status" -eq 1 ] } @test "[DEB] install package" { dpkg -i elasticsearch*.deb } @test "[DEB] package is installed" { dpkg -s 'elasticsearch' } ################################## # Check that the package is correctly installed ################################## @test "[DEB] verify package installation" { verify_package_installation } ################################## # Check that Elasticsearch is working ################################## @test "[DEB] test elasticsearch" { start_elasticsearch_service run_elasticsearch_tests } ################################## # Uninstall DEB package ################################## @test "[DEB] remove package" { dpkg -r 'elasticsearch' } @test "[DEB] package has been removed" { run dpkg -s 'elasticsearch' [ "$status" -eq 0 ] echo "$output" | grep -i "status" | grep -i "deinstall ok" } @test "[DEB] verify package removal" { # The removal must stop the service count=$(ps | grep Elasticsearch | wc -l) [ "$count" -eq 0 ] # The removal must disable the service # see prerm file if is_systemd; then # Debian systemd distros usually returns exit code 3 run systemctl status elasticsearch.service [ "$status" -eq 3 ] run systemctl is-enabled elasticsearch.service [ "$status" -eq 1 ] fi # Those directories are deleted when removing the package # see postrm file assert_file_not_exist "/var/log/elasticsearch" assert_file_not_exist "/usr/share/elasticsearch/plugins" assert_file_not_exist "/var/run/elasticsearch" # The configuration files are still here assert_file_exist "/etc/elasticsearch" assert_file_exist "/etc/elasticsearch/elasticsearch.yml" assert_file_exist "/etc/elasticsearch/logging.yml" # The env file is still here assert_file_exist "/etc/default/elasticsearch" # The service files are still here assert_file_exist "/etc/init.d/elasticsearch" assert_file_exist "/usr/lib/systemd/system/elasticsearch.service" } @test "[DEB] purge package" { dpkg --purge 'elasticsearch' } @test "[DEB] verify package purge" { # all remaining files are deleted by the purge assert_file_not_exist "/etc/elasticsearch" assert_file_not_exist "/etc/elasticsearch/elasticsearch.yml" assert_file_not_exist "/etc/elasticsearch/logging.yml" assert_file_not_exist "/etc/default/elasticsearch" assert_file_not_exist "/etc/init.d/elasticsearch" assert_file_not_exist "/usr/lib/systemd/system/elasticsearch.service" assert_file_not_exist "/usr/share/elasticsearch" assert_file_not_exist "/usr/share/doc/elasticsearch" assert_file_not_exist "/usr/share/doc/elasticsearch/copyright" } @test "[DEB] package has been completly removed" { run dpkg -s 'elasticsearch' [ "$status" -eq 1 ] }
<reponame>superJackiee/Laravel-TCL function archive(tanker) { console.log(tanker); $.ajax({ type: 'POST', url: base_url + '/tankers/archive', data: { _token: $("[name='_token']").val(), tanker_id: tanker }, success: function(response) { if(response == 'success') { document.getElementById("row" + tanker).innerHTML = null; } } }) }
<gh_stars>1-10 package com.benmu.framework.activity; import android.content.Context; import android.content.Intent; import android.net.http.SslError; import android.os.Build; import android.os.Bundle; import android.support.annotation.Nullable; import android.text.TextUtils; import android.util.Log; import android.view.View; import android.webkit.ConsoleMessage; import android.webkit.JavascriptInterface; import android.webkit.SslErrorHandler; import android.webkit.WebChromeClient; import android.webkit.WebSettings; import android.webkit.WebView; import android.webkit.WebViewClient; import android.widget.ProgressBar; import android.widget.RelativeLayout; import com.benmu.framework.R; import com.benmu.framework.adapter.router.RouterTracker; import com.benmu.framework.constant.Constant; import com.benmu.framework.manager.impl.ModalManager; import com.benmu.framework.model.ShareInfoBean; import com.benmu.framework.model.WebViewParamBean; import com.benmu.framework.utils.BaseCommonUtil; /** * Created by Carry on 2017/8/25. */ public class GlobalWebViewActivity extends AbstractWeexActivity { private View rl_refresh; private ProgressBar mProgressBar; private WebView mWeb; private String mFailUrl; public static String WEBVIEW_URL = "WEBVIEW_URL"; private WebViewParamBean mWebViewParams; private RelativeLayout mContainer; private String mTitle; @Override protected void onCreate(@Nullable Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_webview); init(); } @Override protected void onResume() { super.onResume(); } private void init() { Intent data = getIntent(); mWebViewParams = (WebViewParamBean) data.getSerializableExtra(Constant.WEBVIEW_PARAMS); String mUrl = mWebViewParams.getUrl(); ShareInfoBean shareInfo = mWebViewParams.getShareInfo(); if (shareInfo != null) { getNavigationBar().setRightIcon(R.drawable.icon_share); } rl_refresh = findViewById(R.id.rl_refresh); mProgressBar = (ProgressBar) findViewById(R.id.pb_progress); mWeb = (WebView) findViewById(R.id.webView); mContainer = (RelativeLayout) findViewById(R.id.rl_container); WebSettings settings = mWeb.getSettings(); settings.setCacheMode(WebSettings.LOAD_NO_CACHE); settings.setUseWideViewPort(true); settings.setLoadWithOverviewMode(true); settings.setJavaScriptEnabled(true); mWeb.addJavascriptInterface(new JSMethod(this), "bmnative"); settings.setDomStorageEnabled(true); if (Build.VERSION.SDK_INT >= 21) { settings.setMixedContentMode(WebSettings.MIXED_CONTENT_ALWAYS_ALLOW); } mWeb.setWebViewClient(new MyWebViewClient(this)); mWeb.setWebChromeClient(new MyWebChromeClient()); if (!TextUtils.isEmpty(mUrl)) { mWeb.loadUrl(mUrl); } ModalManager.BmLoading.showLoading(this, "", true); } private static class MyWebViewClient extends WebViewClient { GlobalWebViewActivity activity; public MyWebViewClient(GlobalWebViewActivity activity) { Log.d("SVProgressHUD", "MyWebViewClient hasCode -> " + activity.hashCode()); this.activity = activity; } @Override public void onReceivedSslError(WebView view, SslErrorHandler handler, SslError error) { handler.proceed(); } @Override public boolean shouldOverrideUrlLoading(WebView view, String url) { return super.shouldOverrideUrlLoading(view, url); } @Override public void onPageFinished(WebView view, String url) { ModalManager.BmLoading.dismissLoading(activity); super.onPageFinished(view, url); } @Override public void onReceivedError(WebView view, int errorCode, String description, String failingUrl) { super.onReceivedError(view, errorCode, description, failingUrl); //L.i("web failingUrl == " + failingUrl); activity.mFailUrl = failingUrl; activity.showRefreshView(); } } private class MyWebChromeClient extends WebChromeClient { @Override public void onProgressChanged(WebView view, int newProgress) { super.onProgressChanged(view, newProgress); } @Override public void onReceivedTitle(WebView view, String title) { super.onReceivedTitle(view, title); if (!TextUtils.isEmpty(title) && mTitle == null) { getNavigationBar().setTitle(title); } } @Override public boolean onConsoleMessage(ConsoleMessage consoleMessage) { Log.e("onConsoleMessage", "onConsoleMessage>>>>>" + consoleMessage.message()); return super.onConsoleMessage(consoleMessage); } } private void showRefreshView() { showWebCloseView(); } @Override public void onBackPressed() { if (mWeb.canGoBack()) { mWeb.goBack(); } else { BaseCommonUtil.clearAllCookies(this); super.onBackPressed(); } } private void showWebCloseView() { } public class JSMethod { private Context mContext; public JSMethod(Context mContext) { this.mContext = mContext; } @JavascriptInterface public void closePage() { //关闭当前页面 RouterTracker.popActivity(); } } }
#!/usr/bin/env bash set -e # Fix ownership of output files finish() { # Fix ownership of output files user_id=$(stat -c '%u:%g' /data) chown -R ${user_id} /data } trap finish EXIT # gather time and memory statistics, save everything to log echo "/usr/bin/time -f '\\\\nDEBUG_MAX_MEM:%M\\\\nDEBUG_RUNTIME:%E\\\\n' /opt/marginPhase/build/marginPolish $@\n" > /data/marginPolish.log eval "/usr/bin/time -f '\\nDEBUG_MAX_MEM:%M\\nDEBUG_RUNTIME:%E\\n' /opt/marginPhase/build/marginPolish $@" 2>&1 | tee -a /data/marginPolish.log
<filename>sdk/src/main/java/com/iovation/launchkey/sdk/transport/domain/DirectoryV3DevicesPostRequest.java<gh_stars>0 /** * Copyright 2017 iovation, Inc. * <p> * Licensed under the MIT License. * You may not use this file except in compliance with the License. * A copy of the License is located in the "LICENSE.txt" file accompanying * this file. This file is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.iovation.launchkey.sdk.transport.domain; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.Objects; public class DirectoryV3DevicesPostRequest { public final String identifier; private final Integer ttl; public DirectoryV3DevicesPostRequest(String identifier, Integer ttl) { this.identifier = identifier; this.ttl = ttl; } @JsonProperty("identifier") public String getIdentifier() { return identifier; } @JsonProperty("ttl") @JsonInclude(JsonInclude.Include.NON_EMPTY) public Integer getTTL() { return ttl; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof DirectoryV3DevicesPostRequest)) return false; DirectoryV3DevicesPostRequest that = (DirectoryV3DevicesPostRequest) o; return Objects.equals(getIdentifier(), that.getIdentifier()) && Objects.equals(getTTL(), that.getTTL()); } @Override public int hashCode() { return Objects.hash(getIdentifier(), getTTL()); } @Override public String toString() { return "DirectoryV3DevicesPostRequest{" + "identifier='" + identifier + '\'' + ", ttl=" + ttl + '}'; } }
# Import libraries import numpy as np import matplotlib.pyplot as plt from PIL import Image from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers.convolutional import Conv2D, MaxPooling2D # Read image in the format of NumPy array img = np.array(Image.open(<image_url>)) # Reshape the data for training img = img.reshape(1, 28, 28, 1) # Normalize the data img = img.astype('float32') img /= 255 # Create the model model = Sequential() # Add convolutional layers model.add(Conv2D(64, (5, 5), input_shape=(28, 28, 1), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) # Add fully connected layers model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(units=10, activation='softmax')) # Compile the model model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # Fit the model model.fit(img, epochs=1) # Make predictions result = model.predict(img) prediction = np.argmax(result) # Print the prediction print("The input image is the number: ", prediction)
function [A1,A2,A3,A4] = splitMatrix(Mtrx) % Split matrix A1 = Mtrx(1:2,1:2,1:2); A2 = Mtrx(1:2,1:2,3:4); A3 = Mtrx(3:4,1:2,1:2); A4 = Mtrx(3:4,1:2,3:4); end
import React from 'react' import styled from 'styled-components' const A = styled.a` color: white; &&:visited { color: white; } ` const StyledFooter = styled.footer` position: absolute; text-align: center; bottom: 20px; width: 100vw; ` const Footer = () => { return ( <StyledFooter> <p> a big cool hackathon project by <A href="https://github.com/h4ckh0use">h4ckh0use</A> © 2020 </p> </StyledFooter> ) } export default Footer
<filename>app/src/main/java/com/kp/twitterclient/models/DbUser.java package com.kp.twitterclient.models; import com.activeandroid.Model; import com.activeandroid.annotation.Column; import com.activeandroid.annotation.Table; import java.util.List; @Table(name = "USERS") public class DbUser extends Model { @Column(name = "REMOTE_ID", unique = true, onUniqueConflict = Column.ConflictAction.REPLACE) public long remoteId; @Column(name = "USER_ID") public long userId; @Column(name = "USER_NAME") public String userName; @Column(name = "TWITTER_HANDLE") public String twitterHandle; @Column(name = "PROFILE_IMAGE_URL") public String profileImageUrl; @Column(name = "PROFILE_IMAGE_URL_DISK") public String profileImageUrlDisk; @Column(name = "BANNER_IMAGE_URL") public String bannerImageUrl; @Column(name = "BANNER_IMAGE_URL_DISK") public String bannerImageUrlDisk; @Column(name = "TAG_LINE") public String tagLine; @Column(name = "NUM_FOLLOWERS") public int numFollowers; @Column(name = "NUM_FOLLOWING") public int numFollowing; @Column(name = "NUM_TWEETS") public int numTweets; public DbUser() { super(); } public DbUser(long remoteId, long userId, String userName, String twitterHandle, String profileImageUrl, String profileImageUrlDisk, String bannerImageUrl, String bannerImageUrlDisk, String tagLine, int numFollowers, int numFollowing, int numTweets) { this.remoteId = remoteId; this.userId = userId; this.userName = userName; this.twitterHandle = twitterHandle; this.profileImageUrl = profileImageUrl; this.profileImageUrlDisk = profileImageUrlDisk; this.bannerImageUrl = bannerImageUrl; this.bannerImageUrlDisk = bannerImageUrlDisk; this.tagLine = tagLine; this.numFollowers = numFollowers; this.numFollowing = numFollowing; this.numTweets = numTweets; } public DbUser(long remoteId, User user) { this(remoteId, user.getUserId(), user.getUserName(), user.getTwitterHandle(), user.getProfileImageUrl(), user.getProfileImageUrlDisk(), user.getBannerImageUrl(), user.getBannerImageUrlDisk(), user.getTagLine(), user.getNumFollowers(), user.getNumFollowing(), user.getNumTweets()); } // Return all the tweets belonging to this user from Tweets table public List<DbTweet> getTweetsInDb() { return getMany(DbTweet.class, "USER"); } }
import React, { Fragment } from 'react' import Header from '../Header/Header' import Container from './Container' import SideNav from '../SideNav/SideNav' import Main from './Main' import styled from '@emotion/styled/macro' import bg from '../../assets/bg-page.png' const DefaultLayoutContainer = styled('nav')` background-color: #ecf3f1; background-image: url(${bg}); min-height: 100vh; ` const DefaultLayout = ({ children }) => ( <DefaultLayoutContainer> <Header /> <Container> <SideNav /> <Main>{children}</Main> </Container> </DefaultLayoutContainer> ) export default DefaultLayout
<reponame>baade-org/eel package org.baade.eel.core.server; import io.netty.channel.ChannelInitializer; import io.netty.channel.socket.SocketChannel; /** * @author <a href="http://eel.baade.org">Baade Eel Project</a> * 2017/3/23. */ public interface ISocketServer extends IServer { public void setChannelInitializer(ChannelInitializer<SocketChannel> channelInitializer); }
SELECT * FROM Books WHERE date_published > DATEADD(year, -5, getdate());
def replace_letter(string, old_letter, new_letter): return string.replace(old_letter, new_letter) string = "Hello World" new_string = replace_letter(string, 'l', 'z') print(new_string)
from typing import List, Dict, Union from colour import Color def add_color(tweets: List[Dict[str, Union[str, float]]]) -> None: for tweet in tweets: sentiment_score = tweet["sentiment_score"] if sentiment_score < 0: color = Color("red").get_rgb() color = f"rgb({int(color[0]*255)}, {int(color[1]*255)}, {int(color[2]*255)})" elif sentiment_score > 0: color = Color("green").get_rgb() color = f"rgb({int(color[0]*255)}, {int(color[1]*255)}, {int(color[2]*255)})" else: color = "rgb(128, 128, 128)" tweet["color"] = color
#!/bin/sh wd="$(dirname "$0")" : "${crt:=orchestrator.crt}" : "${ca_crt:=server.crt}" : "${key:=orchestrator.key}" : "${gen_crt_sh:=$wd/gen-certs.sh}" : "${oc2_url:=https://127.0.0.1:1512/oc2}" : "${request_id:=$(uuid || date +%s)}" if which jq >/dev/null;then _jq() { jq . "$@"; } else _jq() { cat "$@"; } fi test -e "$1" || { echo "Usage: ${0##*/} <json_file>" >&2 exit 1 } r() { (set -x; "$@"); } set -e test -e "$crt" || r "$gen_crt_sh" echo "--> COMMAND" _jq "$1" resp_file="$(mktemp resp-${request_id}-XXXXXX.json)" hdr_file="${resp_file%.json}.hdr" echo "<-- RESPONSE in $resp_file" r curl -D "$hdr_file" -o "$resp_file" \ --cacert "$ca_crt" --cert "$crt" --key "$key" \ -H "Content-Type: application/openc2-cmd+json;version=1.0" \ -H "X-Request-ID: $request_id" \ --data-binary "@$1" \ "$oc2_url" cat "$hdr_file" _jq "$resp_file"
/* * Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.floody.auth; import static org.apache.commons.lang3.ObjectUtils.isEmpty; import com.google.auth.http.HttpCredentialsAdapter; import com.google.auth.oauth2.GoogleCredentials; import com.google.common.collect.ImmutableSet; import java.io.IOException; import java.util.Collection; /** * Concrete implementation of {@link CredentialService} to build {@link GoogleCredentials} objects * for the default service account */ public final class SystemAccountCredentialService implements CredentialService { @Override public final GoogleCredentials getCredentialForScope(String scope) throws IOException { return getCredentialForScopes(ImmutableSet.of(scope)); } @Override public final GoogleCredentials getCredentialForScopes(Collection<String> scopes) throws IOException { return GoogleCredentials.getApplicationDefault() .createScoped(isEmpty(scopes) ? ImmutableSet.of("email") : scopes); } @Override public HttpCredentialsAdapter getClientInitializerForScope(String scope) throws IOException { return getClientInitializerForScope(ImmutableSet.of(scope)); } @Override public HttpCredentialsAdapter getClientInitializerForScope(Collection<String> scopes) throws IOException { return new HttpCredentialsAdapter(getCredentialForScopes(scopes)); } }
function processComponent(Component) { const ModifiedComponent = () => { return ( <Component.type {...Component.props}> {Component.props.children} </Component.type> ); }; const ButtonRenderer = ModifiedComponent().props.children.props.children; const ModifiedButtonRenderer = (ButtonChildren, buttonProps) => ( <div className="modified-button">{ButtonChildren}</div> ); ModifiedComponent().props.children.props.children = ModifiedButtonRenderer; return ModifiedComponent; }
<filename>middleman-core/lib/middleman-core/sitemap/extensions/redirects.rb require 'middleman-core/sitemap/resource' require 'middleman-core/contracts' module Middleman module Sitemap module Extensions # Manages the list of proxy configurations and manipulates the sitemap # to include new resources based on those configurations class Redirects < Extension def initialize(app, config={}, &block) super @app.add_to_config_context(:redirect, &method(:create_redirect)) @redirects = {} end # Setup a redirect from a path to a target # @param [String] path # @param [Hash] opts The :to value gives a target path Contract String, ({ to: Or[String, IsA['Middleman::Sitemap::Resource']] }), Proc => Any def create_redirect(path, opts={}, &block) opts[:template] = block if block_given? @redirects[path] = opts @app.sitemap.rebuild_resource_list!(:added_redirect) end # Update the main sitemap resource list # @return Array<Middleman::Sitemap::Resource> Contract ResourceList => ResourceList def manipulate_resource_list(resources) resources + @redirects.map do |path, opts| r = RedirectResource.new( @app.sitemap, path, opts[:to] ) r.output = opts[:template] if opts[:template] r end end end class RedirectResource < ::Middleman::Sitemap::Resource Contract None => Maybe[Proc] attr_accessor :output def initialize(store, path, target) @request_path = target super(store, path) end Contract None => Bool def template? true end Contract Args[Any] => String def render(*) url = ::Middleman::Util.url_for(@store.app, @request_path, relative: false, find_resource: true ) if output output.call(path, url) else <<-END <html> <head> <meta http-equiv=refresh content="0; url=#{url}" /> <meta name="robots" content="noindex,follow" /> <meta http-equiv="cache-control" content="no-cache" /> </head> <body> </body> </html> END end end Contract None => Bool def ignored? false end end end end end
#!/usr/bin/env bash set -eEuo pipefail cd `dirname $BASH_SOURCE`/.. pushd .. > /dev/null pwd rm -rf instana-serverless*.tgz rm -rf instana-aws-lambda*.tgz # Specify one of: # - BUILD_LAMBDAS_WITH=local # - BUILD_LAMBDAS_WITH=npm # - BUILD_LAMBDAS_WITH=layer # See ../README.md for details. # Note: ${BUILD_LAMBDAS_WITH-xyz} -> default expansion with default xyz, to avoid bash error "unbound variable" # when set -u is active. echo Building all local tar.gz files. if [[ -z "${BUILD_LAMBDAS_WITH-}" ]]; then echo "Environmant variable BUILD_LAMBDAS_WITH has not been provided, assuming \"npm\" (build with latest npm package)." fi # We will want to install/uninstall local npm package tar files, which means we need to build them first. Note: Even for # options BUILD_LAMBDAS_WITH=layer or BUILD_LAMBDAS_WITH=npm, we need to build the packages, because we for all three # packages we will call "npm uninstall -S $package-name" and if the package.json points to the tar file it needs # to exist so npm can uninstall it and all its transitive dependencies. echo "Building local tar.gz for @instana/serverless." cd ../serverless npm --loglevel=warn pack mv instana-serverless-*.tgz ../aws-lambda/instana-serverless.tgz echo "Building local tar.gz for @instana/aws-lambda." cd ../aws-lambda npm --loglevel=warn pack mv instana-aws-lambda-1*.tgz instana-aws-lambda.tgz echo "Building local tar.gz for instana-aws-lambda-auto-wrap." cd ../aws-lambda-auto-wrap npm --loglevel=warn pack mv instana-aws-lambda-auto-wrap*.tgz ../aws-lambda/instana-aws-lambda-auto-wrap.tgz cd ../aws-lambda if [[ "${BUILD_LAMBDAS_WITH-npm}" == "npm" ]]; then echo "Building Lambda zip file(s) with the latest npm packages." elif [[ "${BUILD_LAMBDAS_WITH-}" == "local" ]]; then echo "Building Lambda zip file(s) with the local tar.gz files." elif [[ "$BUILD_LAMBDAS_WITH" == "layer" ]]; then echo "Building Lambda zip file(s) without @instana/aws-lambda, assuming the AWS Lambda layer \"instana-nodejs\" is (or will be) configured." else echo "Unknown option for BUILD_LAMBDAS_WITH: $BUILD_LAMBDAS_WITH" echo Aborting. exit 1 fi popd > /dev/null if [[ -z "${1-}" ]]; then echo Creating *all* zip files. echo for lambda_directory in */ ; do if [[ -d "$lambda_directory" && ! -L "$lambda_directory" && -e "$lambda_directory/bin/create-zip.sh" ]]; then echo "next directory: $lambda_directory" $lambda_directory/bin/create-zip.sh else echo "skipping directory: $lambda_directory" fi done else echo "Creating *only* $1.zip" lambda_directory=$1 if [[ -d "$lambda_directory" && ! -L "$lambda_directory" && -e "$lambda_directory/bin/create-zip.sh" ]]; then $lambda_directory/bin/create-zip.sh else echo "Cannot create zip file for $lambda_directory, either the directory does not exist or is a symlink or it has no bin/create-zip.sh script." fi fi
import React from "react"; import { Typography, makeStyles } from "@material-ui/core"; const useStyles = makeStyles((theme) => ({ root: { position: "absolute", bottom: theme.spacing(2), left: 0, right: 0 // zIndex: 10000 }, button: { backgroundColor: theme.palette.primary.light, borderRadius: theme.spacing(3), width: 150, margin: "auto", display: "flex", alignItems: "center", justifyContent: "center", color: theme.palette.getContrastText(theme.palette.primary.light), padding: theme.spacing(1, 0), opacity: 0.8, cursor: "pointer", "&:hover": { // backgroundColor: theme.palette.primary.main, opacity: 1 } } })); function ChatUnreadMessages({ onClickUnread, numMessages = null }) { const classes = useStyles(); const label = React.useMemo(() => { if (numMessages) { return numMessages + " new message" + (numMessages > 1 ? "s" : ""); } return "New messages"; }, [numMessages]); return ( <div className={classes.root}> <div className={classes.button} onClick={onClickUnread}> <Typography align="center" variant="caption"> {label} </Typography> </div> </div> ); } export default ChatUnreadMessages;
<reponame>simplebam/GlideDemo<gh_stars>1-10 package com.yueyue.glidedemo.module.chapter_5; import android.content.Context; import android.graphics.drawable.GradientDrawable; import android.os.Build; import android.support.v4.widget.SwipeRefreshLayout; import android.widget.Button; import android.widget.ImageView; import com.bumptech.glide.Glide; import com.bumptech.glide.request.RequestOptions; import com.yueyue.glidedemo.R; import com.yueyue.glidedemo.base.App; import com.yueyue.glidedemo.base.BaseFragment; import com.yueyue.glidedemo.utils.ConvertUtil; import com.yueyue.glidedemo.utils.ToastUtil; import butterknife.BindView; import butterknife.OnClick; import jp.wasabeef.glide.transformations.BlurTransformation; import jp.wasabeef.glide.transformations.GrayscaleTransformation; /** * author : yueyue on 2018/4/21 20:53 * desc : Android图片加载框架最全解析(五),Glide强大的图片变换功能 - CSDN博客 * <p>https://blog.csdn.net/guolin_blog/article/details/71524668</p> */ public class Chapter5_Fragment extends BaseFragment { private static final String TAG = Chapter5_Fragment.class.getSimpleName(); @BindView(R.id.swipe_refresh) SwipeRefreshLayout mSwipeRefresh; @BindView(R.id.iv_image) ImageView mIvImage; @BindView(R.id.btn_my_circle_crop) Button mBtnMyCircleCrop; @BindView(R.id.btn_blur_transformations) Button mBtnBlurTransformations; @BindView(R.id.btn_grayscale_transformations) Button mBtnGrayscaleTransformations; @BindView(R.id.btn_combination_transformations) Button mBtnCombinationTransformations; @OnClick(R.id.btn_my_circle_crop) void circleCrop() { changeSwipeRefreshState(true); Context context = getContext() == null ? App.getContext() : getContext(); RequestOptions options = new RequestOptions() .placeholder(R.drawable.placeholder) .error(R.drawable.error) .override(ConvertUtil.dp2px(200)) .transform(new MyCircleCrop()); Glide.with(context).load(R.drawable.ying_default).apply(options).into(mIvImage); ToastUtil.showShort(TAG + "自定义的MyCircleCrop变换成功"); changeSwipeRefreshState(false); } @OnClick(R.id.btn_blur_transformations) void blurTransformation() { changeSwipeRefreshState(true); Context context = getContext() == null ? App.getContext() : getContext(); //模糊化 RequestOptions options = RequestOptions .bitmapTransform(new BlurTransformation(10)) .placeholder(R.drawable.placeholder) .error(R.drawable.error); Glide.with(context) .load(R.drawable.ying_default) .apply(options) .into(mIvImage); ToastUtil.showShort(TAG + "模糊化成功"); changeSwipeRefreshState(false); } @OnClick(R.id.btn_grayscale_transformations) void grayscaleTransformation() { changeSwipeRefreshState(true); Context context = getContext() == null ? App.getContext() : getContext(); //黑白化 RequestOptions options = RequestOptions .bitmapTransform(new GrayscaleTransformation()) .placeholder(R.drawable.placeholder) .error(R.drawable.error); Glide.with(context) .load(R.drawable.ying_default) .apply(options) .into(mIvImage); ToastUtil.showShort(TAG + "黑白化成功"); changeSwipeRefreshState(false); } @OnClick(R.id.btn_combination_transformations) void combinationTransformation() { changeSwipeRefreshState(true); Context context = getContext() == null ? App.getContext() : getContext(); //单一变换使用.bitmapTransform ,混合变换使用.transforms //黑白化 RequestOptions options = new RequestOptions() .transforms(new GrayscaleTransformation(),new BlurTransformation(10)) .placeholder(R.drawable.placeholder) .error(R.drawable.error); Glide.with(context) .load(R.drawable.ying_default) .apply(options) .into(mIvImage); ToastUtil.showShort(TAG + "模糊黑白化成功"); changeSwipeRefreshState(false); } private void changeSwipeRefreshState(boolean swipeRefresh) { mSwipeRefresh.setRefreshing(swipeRefresh); } @Override protected int initLayoutId() { return R.layout.fragment_chapter5; } @Override protected void initViews() { initButtonBg(); } private void initButtonBg() { GradientDrawable gd = new GradientDrawable(); gd.setCornerRadius(ConvertUtil.dp2px(2)); gd.setColor(getResources().getColor(R.color.color_d6d7d7)); if (Build.VERSION.SDK_INT <= Build.VERSION_CODES.ICE_CREAM_SANDWICH_MR1) { mBtnMyCircleCrop.setBackgroundDrawable(gd); mBtnBlurTransformations.setBackgroundDrawable(gd); mBtnGrayscaleTransformations.setBackgroundDrawable(gd); mBtnCombinationTransformations.setBackgroundDrawable(gd); } else { mBtnMyCircleCrop.setBackground(gd); mBtnBlurTransformations.setBackground(gd); mBtnGrayscaleTransformations.setBackground(gd); mBtnCombinationTransformations.setBackground(gd); } } @Override protected void initData() { circleCrop(); } public static Chapter5_Fragment launch() { return new Chapter5_Fragment(); } }
#!/bin/bash IPTABLES=/sbin/iptables IPTABLES_SAVE=/sbin/iptables-save $IPTABLES_SAVE > /usr/local/etc/iptables.last echo '配置默策略ACCEPT' $IPTABLES -P INPUT ACCEPT $IPTABLES -F INPUT echo '清空nat表' $IPTABLES -t nat -F $IPTABLES -t nat -X $IPTABLES -t nat -Z $IPTABLES -t mangle -F $IPTABLES -t mangle -X $IPTABLES -t mangle -Z echo '删除admin链' $IPTABLES -F admin $IPTABLES -X admin echo '清空filter表' $IPTABLES -t filter -F $IPTABLES -t filter -X $IPTABLES -t filter -Z echo '允许本机访问' $IPTABLES -A INPUT -s 127.0.0.1 -j ACCEPT $IPTABLES -A INPUT -s 127.0.0.1 -d 127.0.0.1 -j ACCEPT $IPTABLES -A OUTPUT -j ACCEPT echo '允许指定IP或者IP段访问,固定开放的IP' $IPTABLES -A INPUT --src 192.168.0.0/16 -j ACCEPT $IPTABLES -A INPUT --src 121.40.25.37 -j ACCEPT #svm $IPTABLES -A INPUT --src 121.40.27.88 -j ACCEPT #官网 ALLOW_IPS=121.237.61.142,121.237.61.152 echo '创建新的链 admin,临时开放的IP' $IPTABLES -N admin $IPTABLES -A admin --src $ALLOW_IPS -j ACCEPT $IPTABLES -A admin -j DROP $IPTABLES -A INPUT -m tcp -p tcp -m multiport --dports 22,13306 -j admin # allow 22 and redis $IPTABLES -A INPUT -m tcp -p tcp -j admin echo '配置nat表 DNAT与SNAT' $IPTABLES -I INPUT -p tcp --dport 8080 -j ACCEPT # CI $IPTABLES -t nat -I PREROUTING -p tcp --dport 1122 -j DNAT --to 192.168.1.1:22 $IPTABLES -t nat -I PREROUTING -p tcp --dport 1222 -j DNAT --to 192.168.1.2:22 $IPTABLES -t nat -I POSTROUTING -s 192.168.0.0/16 -j SNAT --to-source 192.168.0.1 # $IPTABLES -I INPUT -m state --state NEW -m tcp -p tcp --dport 8000 -m mark --mark 1 -j ACCEPT # $IPTABLES -t nat -A PREROUTING -p tcp --dport 80 -j REDIRECT --to-port 8000 # $IPTABLES -t mangle -A PREROUTING -p tcp --dport 80 -j MARK --set-mark 1 $IPTABLES -t nat -A PREROUTING -p tcp --dport 1000 -j REDIRECT --to-port 8000 echo '配置filter表' $IPTABLES -I INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT $IPTABLES -A INPUT -p udp -j DROP $IPTABLES -A INPUT -p tcp --syn -j DROP /etc/init.d/iptables save
import { parse as pathParse } from "path"; import { RestAnnotation } from "./ast"; function scanHeaders(text: string) { // Header name: https://tools.ietf.org/html/rfc2616#section-4.2 const headerRegex = /\[header (?<header>[^()<>@,;:\\"/[\]?={}\s\t]+): \{(?<name>\w+)\}\]/gu; const headers = new Map<string, string>(); let match: RegExpExecArray | null; while ((match = headerRegex.exec(text)) !== null) { if (match.groups?.header && match.groups.name) { headers.set(match.groups.header.toLowerCase(), match.groups.name); } } return headers; } function scanBody(text: string) { const match = /\[body \{(?<name>\w+)\}\]/u.exec(text); if (match?.groups?.name) { return match.groups.name; } return null; } function scanVariables(text: string) { const variableRegex = /\{(?<name>\w+)\}/gu; const variables: string[] = []; let match: RegExpExecArray | null; while ((match = variableRegex.exec(text)) !== null) { if (match.groups?.name) { variables.push(match.groups.name); } } return variables; } export function parseRestAnnotation(text: string): RestAnnotation { const fragments = text.split(" "); const method = fragments[0].toUpperCase(); if (!["GET", "POST", "PUT", "DELETE", "PATCH"].includes(method)) { throw new Error(`Unsupported method '${method}'`); } const parsedPath = pathParse(fragments[1]); if (parsedPath.root !== "/") { throw new Error(`Invalid path`); } if (parsedPath.dir === "/") { parsedPath.dir = ""; } let queryVariables: string[] = []; if (parsedPath.base.includes("?")) { const [base, ...queryArray] = parsedPath.base.split("?"); parsedPath.base = base; const query = queryArray.join("?"); if (!/^\{\w+\}(?:&\{\w+\})*$/u.test(query)) { throw new Error(`Invalid querystring on path`); } queryVariables = scanVariables(query); } const path = `${parsedPath.dir}/${parsedPath.base}`; const pathVariables = scanVariables(path); const remaining = fragments.slice(2).join(" "); const headers = scanHeaders(remaining); const bodyVariable = scanBody(remaining); return new RestAnnotation(method, path, pathVariables, queryVariables, headers, bodyVariable); }
package io.github.marcelbraghetto.dailydeviations.features.collection.ui; import android.annotation.TargetApi; import android.content.Intent; import android.database.Cursor; import android.net.Uri; import android.os.Build; import android.os.Bundle; import android.support.annotation.NonNull; import android.support.annotation.Nullable; import android.support.v4.app.LoaderManager; import android.support.v4.content.CursorLoader; import android.support.v4.content.Loader; import android.support.v7.widget.StaggeredGridLayoutManager; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import javax.inject.Inject; import io.github.marcelbraghetto.dailydeviations.R; import io.github.marcelbraghetto.dailydeviations.databinding.CollectionFragmentBinding; import io.github.marcelbraghetto.dailydeviations.features.application.MainApp; import io.github.marcelbraghetto.dailydeviations.features.collection.logic.CollectionArguments; import io.github.marcelbraghetto.dailydeviations.features.collection.logic.CollectionDisplayMode; import io.github.marcelbraghetto.dailydeviations.features.collection.logic.CollectionViewModel; import io.github.marcelbraghetto.dailydeviations.framework.artworks.models.Artwork; import io.github.marcelbraghetto.dailydeviations.framework.foundation.ui.BaseFragment; /** * Created by <NAME> on 24/02/16. * * Fragment representing the 'collection' of artworks rendered in a grid formation. */ public class CollectionFragment extends BaseFragment { private static final int LOADER_ID = 0; private CollectionFragmentBinding mViews; private CollectionAdapter mAdapter; @Inject CollectionViewModel mViewModel; @NonNull public static CollectionFragment newInstance(@NonNull CollectionArguments extras) { CollectionFragment fragment = new CollectionFragment(); fragment.setArguments(extras.toBundle()); return fragment; } public CollectionFragment() { MainApp.getDagger().inject(this); } @Override public void onViewCreated(View view, @Nullable Bundle savedInstanceState) { super.onViewCreated(view, savedInstanceState); mAdapter = new CollectionAdapter(getActivity()); mAdapter.setGridItemDelegate(mAdapterDelegate); mViews.setViewModel(mViewModel); mViewModel.begin(getArguments(), mDelegate); } @Override public void onStart() { super.onStart(); mViewModel.screenStarted(); } @Override public void onStop() { super.onStop(); mViewModel.screenStopped(); } @Override public void onDestroyView() { mViews.collectionRecyclerView.setAdapter(null); mAdapter.swapCursor(null); getLoaderManager().destroyLoader(LOADER_ID); super.onDestroyView(); } @Override public void onDestroy() { mViewModel.destroy(); super.onDestroy(); } @Nullable @Override public View onCreateView(LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) { mViews = CollectionFragmentBinding.inflate(inflater, container, false); return mViews.getRoot(); } private final CollectionViewModel.Actions mDelegate = new CollectionViewModel.Actions() { @Override public void setDataSource(@NonNull final Uri dataSourceUri) { mViews.collectionRecyclerView.setLayoutManager(new StaggeredGridLayoutManager(getResources().getInteger(R.integer.collection_num_columns), StaggeredGridLayoutManager.VERTICAL)); mViews.collectionRecyclerView.setAdapter(mAdapter); getLoaderManager().initLoader(LOADER_ID, null, new LoaderManager.LoaderCallbacks<Cursor>() { @Override public Loader<Cursor> onCreateLoader(int id, Bundle args) { return new CursorLoader(getActivity(), dataSourceUri, null, null, null, null); } @Override public void onLoadFinished(Loader<Cursor> loader, Cursor data) { mAdapter.swapCursor(data); } @Override public void onLoaderReset(Loader<Cursor> loader) { mAdapter.swapCursor(null); } }); } @Override public void setCollectionMode(@NonNull CollectionDisplayMode collectionMode) { // Apply a fade in effect and tell the adapter to changes its display mode. mViews.collectionSwipeRefreshLayout.clearAnimation(); mViews.collectionSwipeRefreshLayout.setAlpha(0); mViews.collectionSwipeRefreshLayout.animate().alpha(1f).setStartDelay(300).setDuration(600).start(); mAdapter.setCollectionMode(collectionMode); } @Override public void startActivity(@NonNull Intent intent) { getActivity().startActivity(intent); } @TargetApi(Build.VERSION_CODES.LOLLIPOP) @Override public void startActivityWithSceneTransition(@NonNull Intent intent, @NonNull Bundle sceneTransitionBundle) { getActivity().startActivity(intent, sceneTransitionBundle); } @Override public void showSnackbar(@NonNull String message) { CollectionFragment.this.showSnackbar(message, mViews.collectionSwipeRefreshLayout); } }; private final CollectionAdapter.GridItemDelegate mAdapterDelegate = new CollectionAdapter.GridItemDelegate() { @Override public void onArtworkSelected(@NonNull Artwork artwork, @Nullable Bundle sceneTransitionBundle) { mViewModel.artworkSelected(artwork, sceneTransitionBundle); } @Override public void onToggleFavourite(@NonNull Artwork artwork, boolean isFavourite) { mViewModel.favouriteButtonSelected(artwork, isFavourite); } @Override public void onCollectionChanged(int numItems) { mViewModel.collectionDataSourceChanged(numItems); } }; }
<reponame>bookmansoft/gamegold-wechat-server let facade = require('gamecloud') let { ReturnCode, NotifyType } = facade.const let rp = require('request-promise'); let qr = require('qr-image'); /** * 游戏的控制器 * Updated on 2018-11-19. */ class wallet extends facade.Control { /** * 创建钱包 * @param {*} user * @param {*} paramGold 其中的成员 items 是传递给区块链全节点的参数数组 */ async Create(user, paramGold) { try { console.log("wallet.Create参数串:"); let paramArray = paramGold.items; if (typeof (paramArray) == "string") { paramArray = JSON.parse(paramArray); } console.log(paramArray); let ret = await this.core.service.RemoteNode.conn(user.cid).execute('wallet.create', paramArray); console.log(ret); //return { code: ReturnCode.Success, list: ret }; return { code: ret.code, list: ret.result }; } catch (error) { console.log(error); return { code: -1, msg: "wallet.Create方法出错" }; } } /** * 配置URL路由,用户可以直接经由页面访问获取签名数据集 */ get router() { return [ ['/qrcode/:qrcode', 'qrCode'], ]; } /** * 生成并返回二维码图像 * @param {*} params */ qrCode(params) { if (!!params.qrcode) { let img = qr.image(params.qrcode, { size: 10 }); params.res.writeHead(200, { 'Content-Type': 'image/png' }); img.pipe(params.res); } else { rp({ uri: decodeURIComponent(this.core.fileMap.DataConst.user.icon), headers: { 'User-Agent': 'Request-Promise', } }).pipe(params.res); } } /** * 列表钱包 * @param {*} user * @param {*} paramGold 其中的成员 items 是传递给区块链全节点的参数数组 */ async List(user, paramGold) { try { console.log("wallet.List参数串:"); let paramArray = paramGold.items; if (typeof (paramArray) == "string") { paramArray = JSON.parse(paramArray); } console.log(paramArray); let ret = await this.core.service.RemoteNode.conn(user.cid).execute('wallet.list', paramArray); //console.log(ret); //return { code: ReturnCode.Success, list: ret }; return { code: ret.code, list: ret.result }; } catch (error) { console.log(error); return { code: -1, msg: "wallet.List方法出错" }; } } /** * 查询钱包概要 * @param {*} user * @param {*} paramGold 其中的成员 items 是传递给区块链全节点的参数数组 */ async Info(user, paramGold) { try { console.log("wallet.Info参数串:"); console.log(JSON.stringify(paramGold.userinfo)); let paramArray = paramGold.items; if (typeof (paramArray) == "string") { paramArray = JSON.parse(paramArray); } console.log(paramArray); let ret = await this.core.service.RemoteNode.conn(user.cid).execute('wallet.info', paramArray); //console.log(ret); //return { code: ReturnCode.Success, list: ret }; return { code: ret.code, list: ret.result }; } catch (error) { console.log(error); return { code: -1, msg: "wallet.Info方法出错" }; } } /** * 转储钱包信息 * @param {*} user * @param {*} paramGold 其中的成员 items 是传递给区块链全节点的参数数组 */ async Dump(user, paramGold) { try { console.log("wallet.Dump参数串:"); let paramArray = paramGold.items; if (typeof (paramArray) == "string") { paramArray = JSON.parse(paramArray); } console.log(paramArray); let ret = await this.core.service.RemoteNode.conn(user.cid).execute('wallet.dump', paramArray); console.log(ret); //return { code: ReturnCode.Success, list: ret }; return { code: ret.code, list: ret.result }; } catch (error) { console.log(error); return { code: -1, msg: "wallet.Dump方法出错" }; } } /** * 导入钱包备份 * @param {*} user * @param {*} paramGold 其中的成员 items 是传递给区块链全节点的参数数组 */ async ImportWallet(user, paramGold) { try { console.log("wallet.ImportWallet参数串:"); let paramArray = paramGold.items; if (typeof (paramArray) == "string") { paramArray = JSON.parse(paramArray); } console.log(paramArray); let ret = await this.core.service.RemoteNode.conn(user.cid).execute('wallet.import', paramArray); console.log(ret); //return { code: ReturnCode.Success, list: ret }; return { code: ret.code, list: ret.result }; } catch (error) { console.log(error); return { code: -1, msg: "wallet.ImportWallet方法出错" }; } } /** * 备份钱包 * @param {*} user * @param {*} paramGold 其中的成员 items 是传递给区块链全节点的参数数组 */ async Backup(user, paramGold) { try { console.log("wallet.Backup参数串:"); let paramArray = paramGold.items; if (typeof (paramArray) == "string") { paramArray = JSON.parse(paramArray); } console.log(paramArray); let ret = await this.core.service.RemoteNode.conn(user.cid).execute('wallet.backup', paramArray); console.log(ret); //return { code: ReturnCode.Success, list: ret }; return { code: ret.code, list: ret.result }; } catch (error) { console.log(error); return { code: -1, msg: "wallet.Backup方法出错" }; } } /** * 获取钱包助记词 * @param {*} user * @param {*} paramGold 其中的成员 items 是传递给区块链全节点的参数数组 */ async KeyMaster(user, paramGold) { try { console.log("key.master参数串:"); let paramArray = paramGold.items; if (typeof (paramArray) == "string") { paramArray = JSON.parse(paramArray); } console.log(paramArray); let ret = await this.core.service.RemoteNode.conn(user.cid).execute('key.master.admin', paramArray); console.log(ret); //return { code: ReturnCode.Success, list: ret }; return { code: ret.code, data: ret.result }; } catch (error) { console.log(error); return { code: -1, msg: "key.master.admin 方法出错" }; } } } exports = module.exports = wallet;
def remove_duplicates(lst): return list(dict.fromkeys(lst)) result = remove_duplicates([1, 2, 2, 4, 5]) print(result)
class Node: def __init__(self, value): self.value = value self.next = None class LinkedList: def __init__(self): self.head = None def append(self, value): new_node = Node(value) if not self.head: self.head = new_node else: node = self.head while node.next: node = node.next node.next = new_node def prepend(self, value): new_node = Node(value) new_node.next = self.head self.head = new_node def display(self): Pylist = [] node = self.head while node: Pylist.append(node.value) node = node.next return Pylist # Example usage ll = LinkedList() ll.append(1) ll.append(2) ll.prepend(0) print(ll.display()) # Output: [0, 1, 2]
/*************************************************************************** * Copyright (C) 2014 by <NAME> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * As a special exception, if other files instantiate templates or use * * macros or inline functions from this file, or you compile this file * * and link it with other works to produce a work based on this file, * * this file does not by itself cause the resulting work to be covered * * by the GNU General Public License. However the source code for this * * file must still be made available in accordance with the GNU General * * Public License. This exception does not invalidate any other reasons * * why a work based on this file might be covered by the GNU General * * Public License. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, see <http://www.gnu.org/licenses/> * ***************************************************************************/ #ifndef EVENT_TYPES_STM32F4DISCOVERY_H #define EVENT_TYPES_STM32F4DISCOVERY_H #ifdef _BOARD_STM32F429ZI_STM32F4DISCOVERY class EventType { public: enum E { // These are a must on all backends -- begin Default=0, // This actually means 'no event' WindowPartialRedraw, // At least one drawable has requested redraw WindowForeground, // Window manager moved this window to foreground WindowBackground, // Window manager moved this window to background WindowQuit, // Window manager requested the window to close // These are a must on all backends -- end TouchDown=1, TouchUp=2, TouchMove=3, ButtonA=4 //The blue button }; private: EventType(); }; #endif //_BOARD_STM32F429ZI_STM32F4DISCOVERY #endif //EVENT_TYPES_STM32F4DISCOVERY_H
#!/bin/bash # NoobTaco Simple Post-install script # Install Wine sudo dpkg --add-architecture i386 && wget -nc https://dl.winehq.org/wine-builds/winehq.key && sudo apt-key add winehq.key && sudo apt-add-repository -y 'deb https://dl.winehq.org/wine-builds/ubuntu/ eoan main' && # Add Lutris repository sudo add-apt-repository ppa:lutris-team/lutris && sudo apt update && echo "**************************" && echo Installing APT Software && echo "**************************" && sudo apt install htop steam lutris && echo "**************************" && echo Installing SNAP Software && echo "**************************" && # Use code insider untill system sync is rolled into mainline sudo snap install code-insiders --classic && sudo snap install yakyak && echo "**************************" && echo Installing FLATPAK Software && echo "**************************" && flatpak install -y flathub org.gimp.GIMP && flatpak install -y flathub org.inkscape.Inkscape && flatpak install -y flathub com.axosoft.GitKraken && flatpak install -y flathub org.kde.krita && flatpak install -y flathub org.kde.kdenlive && flatpak install -y flathub org.audacityteam.Audacity
#! /usr/bin/env bash # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -x ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )" # shellcheck source=scripts/istio.env source "$ROOT/scripts/istio.env" #ISTIO_DIR="$ROOT/istio-${ISTIO_VERSION}" kubectl delete ns vm --ignore-not-found=true kubectl delete ns bookinfo --ignore-not-found=true kubectl delete -n default \ -f "$ISTIO_DIR/samples/bookinfo/platform/kube/bookinfo.yaml" kubectl delete -n default \ -f "$ISTIO_DIR/samples/bookinfo/networking/bookinfo-gateway.yaml" kubectl delete -n default \ -f "$ISTIO_DIR/samples/bookinfo/networking/destination-rule-all-mtls.yaml" kubectl delete -n default \ -f "$ISTIO_DIR/samples/bookinfo/networking/virtual-service-reviews-v3.yaml" kubectl delete -n default \ -f "$ISTIO_DIR/samples/bookinfo/networking/virtual-service-ratings-mysql-vm.yaml" kubectl delete -n default \ -f "$ISTIO_DIR/samples/bookinfo/platform/kube/bookinfo-ratings-v2-mysql-vm.yaml" # Wait until all LBs have been cleaned up by the addon manager echo "Deleting Istio ILBs" for ISTIO_LB_NAME in istio-ingressgateway istio-pilot-ilb mixer-ilb; do until [[ "$(kubectl get svc -n istio-system ${ISTIO_LB_NAME} -o=jsonpath="{.metadata.name}" --ignore-not-found=true)" == "" ]]; do echo "Waiting for istio-system ${ISTIO_LB_NAME} to be removed..." sleep 2 done done # If kube-system/dns-lib svc is present, delete it kubectl delete svc -n kube-system dns-ilb --ignore-not-found=true # Wait until its gone until [[ "$(kubectl get svc -n kube-system dns-ilb -o=jsonpath="{.metadata.name}" --ignore-not-found=true)" == "" ]]; do echo "Waiting for kube-system dns-ilb to be removed..." sleep 5 done # Loop until the ILBs are fully gone until [[ "$(gcloud --project="${ISTIO_PROJECT}" compute forwarding-rules list --format="value(name)" --filter "(description ~ istio-system.*ilb OR description:kube-system/dns-ilb) AND network ~ /istio-network$")" == "" ]]; do # Find all internal (ILB) forwarding rules in the network: istio-network FWDING_RULE_NAMES="$(gcloud --project="${ISTIO_PROJECT}" compute forwarding-rules list --format="value(name)" --filter "(description ~ istio-system.*ilb OR description:kube-system/dns-ilb) AND network ~ /istio-network$")" # Iterate and delete the forwarding rule by name and its corresponding backend-service by the same name for FWD_RULE in ${FWDING_RULE_NAMES}; do gcloud --project="${ISTIO_PROJECT}" compute forwarding-rules delete "${FWD_RULE}" --region="${REGION}" || true gcloud --project="${ISTIO_PROJECT}" compute backend-services delete "${FWD_RULE}" --region="${REGION}" || true done sleep 2 done # Loop until the target-pools and health checks are fully gone until [[ "$(gcloud --project="${ISTIO_PROJECT}" compute target-pools list --format="value(name)" --filter="(instances ~ gke-${ISTIO_CLUSTER})")" == "" && "$(gcloud --project="${ISTIO_PROJECT}" compute target-pools list --format="value(healthChecks)" --filter="(instances ~ gke-${ISTIO_CLUSTER})" | sed 's/.*\/\(k8s\-.*$\)/\1/g')" == "" ]]; do # Find all target pools with this cluster as the target by name TARGET_POOLS="$(gcloud --project="${ISTIO_PROJECT}" compute target-pools list --format="value(name)" --filter="(instances ~ gke-${ISTIO_CLUSTER})")" # Find all health checks with this cluster's nodes as the instances HEALTH_CHECKS="$( gcloud --project="${ISTIO_PROJECT}" compute target-pools list --format="value(healthChecks)" --filter="(instances ~ gke-${ISTIO_CLUSTER})" | sed 's/.*\/\(k8s\-.*$\)/\1/g')" # Delete the external (RLB) forwarding rules by name and the target pool by the same name for TARGET_POOL in ${TARGET_POOLS}; do gcloud --project="${ISTIO_PROJECT}" compute forwarding-rules delete "${TARGET_POOL}" --region="${REGION}" || true gcloud --project="${ISTIO_PROJECT}" compute target-pools delete "${TARGET_POOL}" --region="${REGION}" || true done # Delete the leftover health check by name for HEALTH_CHECK in ${HEALTH_CHECKS}; do gcloud --project="${ISTIO_PROJECT}" compute health-checks delete "${HEALTH_CHECK}" || true done sleep 2 done # Delete all the firewall rules that aren't named like our cluster name which # correspond to our health checks and load balancers that are dynamically created. # This is because GKE manages those named with the cluster name get cleaned # up with a terraform destroy. until [[ "$(gcloud --project="${ISTIO_PROJECT}" compute firewall-rules list --format "value(name)" --filter "targetTags.list():gke-${ISTIO_CLUSTER} AND NOT name ~ gke-${ISTIO_CLUSTER}")" == "" ]]; do FW_RULES="$(gcloud --project="${ISTIO_PROJECT}" compute firewall-rules list --format "value(name)" --filter "targetTags.list():gke-${ISTIO_CLUSTER} AND NOT name ~ gke-${ISTIO_CLUSTER}")" for FW_RULE in ${FW_RULES}; do gcloud --project="${ISTIO_PROJECT}" compute firewall-rules delete "${FW_RULE}" || true done sleep 2 done # Tear down all of the infrastructure created by Terraform (cd "$ROOT/terraform"; terraform init; terraform destroy -input=false -auto-approve\ -var "istio_project=${ISTIO_PROJECT}" \ -var "gce_project=${GCE_PROJECT}" \ -var "zone=${ZONE}" \ -var "region=${REGION}" \ -var "gce_network=${GCE_NETWORK}" \ -var "gce_subnet=${GCE_SUBNET}" \ -var "gce_subnet_cidr=${GCE_SUBNET_CIDR}" \ -var "istio_network=${ISTIO_NETWORK}" \ -var "istio_subnet_cidr=${ISTIO_SUBNET_CIDR}" \ -var "istio_subnet_cluster_cidr=${ISTIO_SUBNET_CLUSTER_CIDR}" \ -var "istio_subnet_services_cidr=${ISTIO_SUBNET_SERVICES_CIDR}" \ -var "gce_vm=${GCE_VM}") # Clean up the downloaded Istio components if [[ -d "$ROOT/istio-$ISTIO_VERSION" ]]; then rm -rf istio-$ISTIO_VERSION fi
<gh_stars>10-100 import { Disposable } from 'atom' import { BusySignalService } from 'atom-ide' import { AutoLanguageClient } from 'atom-languageclient' import { LanguageServerProcess } from 'atom-languageclient/build/lib//server-manager.js' import DatatipAdapter from 'atom-languageclient/build/lib/adapters/datatip-adapter' import { InitializeParams } from 'atom-languageclient/build/lib/languageclient' import { ChildProcess, spawn } from 'child_process' import { EventEmitter } from 'events' import { join } from 'path' import { injectable } from 'tsyringe' import * as pkg from '../package.json' import { GoPlus } from '../typings/go-plus' import { AtomPluginSettings, getPluginSettingValue, getProcessArgs, } from './atom-config' import { findOrInstallGoLangserver, promptToInstallGoPlusIfNeeded, promptToUpdateWithGoPlus, promptToUpgradeManually, shouldUpgrade, } from './go-env' interface InitializationOptions { formatTool?: string funcSnippetEnabled?: boolean gocodeCompletionEnabled?: boolean goimportsLocalPrefix?: string maxParallelism?: number useBinaryPkgCache?: boolean } const GO_READY_EVENT = Symbol('ide-go-env-ready') const BUSY_SIGNAL_READY_EVENT = Symbol('ide-go-busy-signal-ready') /** Language client for go-langserver. */ @injectable() export class GoLanguageClient extends AutoLanguageClient { public readonly config: AtomPluginSettings = pkg.packageSettings private goConfig?: GoPlus.GoConfig private goGet?: GoPlus.GoGet public constructor( private readonly emitter: EventEmitter, datatip: DatatipAdapter ) { super() this.datatip = datatip } /** Loads a BusySignalService. */ public consumeBusySignal(service: BusySignalService): Disposable { const disposable = super.consumeBusySignal(service) this.emitter.emit(BUSY_SIGNAL_READY_EVENT) return disposable } /** Loads a GoPlus.GoConfig. */ public consumeGoConfig(goConfig: GoPlus.GoConfig): void { this.goConfig = goConfig if (this.goGet) { this.emitter.emit(GO_READY_EVENT) } } /** Loads a GoPlus.GoGet. */ public consumeGoGet(goGet: GoPlus.GoGet): void { this.goGet = goGet if (this.goConfig) { this.emitter.emit(GO_READY_EVENT) } } /** Activated for these scopes. */ public getGrammarScopes(): string[] { return pkg.enhancedScopes } /** Initialize Params to use for the lsp. */ public getInitializeParams( projectPath: string, process: LanguageServerProcess ): InitializeParams { const params = super.getInitializeParams(projectPath, process) const initializationOptions: InitializationOptions = { formatTool: getPluginSettingValue('formatTool'), funcSnippetEnabled: !!getPluginSettingValue('funcSnippetEnabled'), gocodeCompletionEnabled: !!getPluginSettingValue( 'completionEnabled' ), goimportsLocalPrefix: getPluginSettingValue('goimportsLocalPrefix'), useBinaryPkgCache: !!getPluginSettingValue('useBinaryPkgCache'), } params.initializationOptions = initializationOptions return params } /** The programming language name. */ public getLanguageName(): string { return 'Go' } /** They key to use for configurations. */ public getRootConfigurationKey(): string { return pkg.name } /** The name of the lsp. */ public getServerName(): string { return 'go-langserver' } /** * Map the configuration object. Used to restart the server when the * configuration changes. */ public mapConfigurationObject(): void { this.restartAllServers().catch((e: Error) => { throw e }) } /** Starts the lsp process. */ public async startServerProcess(): Promise<ChildProcess> { const childProcess = spawn(await this.serverPath(), getProcessArgs(), { cwd: join(__dirname, '..'), env: process.env, }) childProcess.on('exit', this.onExit.bind(this)) return childProcess } private async busySignalReady(): Promise<void> { return new Promise( (resolve: () => void): void => { if (this.busySignalService) { resolve() return } this.emitter.on(BUSY_SIGNAL_READY_EVENT, resolve) } ) } private async goReady(): Promise<void> { return new Promise( (resolve: () => void): void => { if (this.goConfig && this.goGet) { resolve() return } this.emitter.on(GO_READY_EVENT, resolve) } ) } private onExit(code: number, _signal: string): void { if (code) { atom.notifications.addError( `${this.getLanguageName()} language server stopped unexpectedly.`, { description: `Exit code: ${code}\n${this.processStdErr}`, dismissable: true, } ) } } private async serverPath(): Promise<string> { await this.busySignalReady() if (!this.busySignalService) { throw new Error('Busy Signal Service not loaded.') } const busy = this.busySignalService.reportBusy( `Looking for ${this.getServerName()}` ) const customPath = getPluginSettingValue('customServerPath') if (customPath !== this.config.customServerPath.default) { if (shouldUpgrade(customPath)) { promptToUpgradeManually() } busy.dispose() return customPath } promptToInstallGoPlusIfNeeded( pkg.name, this.getServerName(), busy ).catch((e: Error) => { throw e }) busy.setTitle(`Waiting for Go env`) await this.goReady() if (!this.goConfig || !this.goGet) { throw new Error('Failed to load Go environment.') } const serverPath = await findOrInstallGoLangserver( pkg.name, this.getServerName(), this.goConfig, this.goGet, busy ) if (serverPath === false) { throw new Error('Failed to locate language server.') } if (shouldUpgrade(serverPath)) { await promptToUpdateWithGoPlus( pkg.name, this.getServerName(), this.goGet ) } return serverPath } }
<filename>1.0.0a/src/jadalib/libsess-media.c #include "jada.h" #include "jada_internal.h" #include "jada_messages.h" #include "jada_events.h" DllExport JADA_Call __stdcall JADA_CallNew(char *szFile, int iLine, JADA_Connection connSelf) { int iRet; JADA_CallSelf *callPtr; JADA_ConnSelf *connPtr = (JADA_ConnSelf *) connSelf; JADA_ReturnValIfInvalidConnection(connPtr, NULL); callPtr = JADA_Calloc(szFile, iLine, 1, sizeof(JADA_CallSelf)); CORE_ReturnValIfFail(callPtr != NULL, NULL, ;, "Errore nell'allocazione del descrittore di sessione"); iRet = JADA_SuppGetRequestId(connPtr->suppSelf, &callPtr->iRequestId); CORE_ReturnValIfFail(iRet == 0, NULL, ;, "Errore nella lettura del support"); iRet = JADA_SuppGetSessionId(connPtr->suppSelf, &callPtr->iSessionId); CORE_ReturnValIfFail(iRet == 0, NULL, ;, "Errore nella lettura del support"); callPtr->iStatus = JADA_CALLSTATUS_AVAILABLE; callPtr->iReset = FALSE; callPtr->connSelf = connSelf; time(&callPtr->timLastActivity); callPtr->iMagic = JADA_MAGIC_CALL; connPtr->callSelf = (JADA_Call) callPtr; callPtr->lstInAudioFrames = JADA_LstNew(__FILE__, __LINE__, sizeof(JADA_Frame), NULL); CORE_ReturnValIfFail(callPtr->lstInAudioFrames != NULL, NULL, ;, "Errore nella creazione della lista di frames"); callPtr->lstInVideoFrames = JADA_LstNew(__FILE__, __LINE__, sizeof(JADA_Frame), NULL); CORE_ReturnValIfFail(callPtr->lstInVideoFrames != NULL, NULL, ;, "Errore nella creazione della lista di frames (IN)"); callPtr->lstOutVideoFrames = JADA_LstNew(__FILE__, __LINE__, sizeof(JADA_Message), NULL); CORE_ReturnValIfFail(callPtr->lstOutVideoFrames != NULL, NULL, ;, "Errore nella creazione della lista di frames (OUT)"); callPtr->iVideoFrameDelay = 2; iRet = JADA_IniGetInt("call", "delay", &callPtr->iVideoFrameDelay); CORE_ReturnValIfFail(iRet == 0, NULL, ;, "Errore nella lettura del parametro call/delay dal file .ini"); return((JADA_Call) callPtr); } DllExport int __stdcall JADA_CallDestroy(char *szFile, int iLine, JADA_Call callSelf) { int iRet, iRetries = 100; JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_Frame frmSelf; CORE_Currency curC; JADA_ReturnValIfInvalidCall(callPtr, -1); if (JADA_CallIsConnected(callSelf) == TRUE) { iRet = JADA_CallClose(callSelf); CORE_ReturnValIfFail(iRet == 0, -1, ;, "Errore nella chiusura della richiesta"); while (iRetries-- > 0) { if (callPtr->iStatus != JADA_CALLSTATUS_CLOSED) { JADA_Sleep(100); } } CORE_ReturnValIfFail(iRetries > 0, -1, ;, "Timeout nella risposta alla chiusura della richiesta"); } iRet = JADA_LstGetFirst(callPtr->lstInAudioFrames, &frmSelf, sizeof(frmSelf), &curC); while (iRet == 0) { iRet = JADA_FrmDestroy(__FILE__, __LINE__, frmSelf); iRet = JADA_LstGetNext(callPtr->lstInAudioFrames, &frmSelf, sizeof(frmSelf), &curC); } iRet = JADA_LstDestroy(__FILE__, __LINE__, callPtr->lstInAudioFrames); iRet = JADA_LstGetFirst(callPtr->lstInVideoFrames, &frmSelf, sizeof(frmSelf), &curC); while (iRet == 0) { iRet = JADA_FrmDestroy(__FILE__, __LINE__, frmSelf); iRet = JADA_LstGetNext(callPtr->lstInVideoFrames, &frmSelf, sizeof(frmSelf), &curC); } iRet = JADA_LstDestroy(__FILE__, __LINE__, callPtr->lstInVideoFrames); iRet = JADA_LstGetFirst(callPtr->lstOutVideoFrames, &frmSelf, sizeof(frmSelf), &curC); while (iRet == 0) { iRet = JADA_FrmDestroy(__FILE__, __LINE__, frmSelf); iRet = JADA_LstGetNext(callPtr->lstOutVideoFrames, &frmSelf, sizeof(frmSelf), &curC); } iRet = JADA_LstDestroy(__FILE__, __LINE__, callPtr->lstOutVideoFrames); JADA_Free(__FILE__, __LINE__, callPtr, sizeof(*callPtr)); return(0); } DllExport int __stdcall JADA_CallGetRequestId(JADA_Call callSelf, JADA_ReqId *piId) { JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ReturnValIfInvalidCall(callPtr, -1); *piId = callPtr->iRequestId; return(0); } DllExport int __stdcall JADA_CallGetSessionId(JADA_Call callSelf, JADA_SessId *piId) { JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ReturnValIfInvalidCall(callPtr, -1); *piId = callPtr->iSessionId; return(0); } DllExport int __stdcall JADA_CallGetConnection(JADA_Call callSelf, JADA_Connection *connSelf) { JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ConnSelf *connPtr; JADA_ReturnValIfInvalidCall(callPtr, -1); connPtr = (JADA_ConnSelf *) callPtr->connSelf; JADA_ReturnValIfInvalidConnection(connPtr, -1); *connSelf = (JADA_Connection) &connPtr; return(0); } DllExport int __stdcall JADA_CallIsConnected(JADA_Call callSelf) { int iIsConnected; JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ReturnValIfInvalidCall(callPtr, -1); switch (callPtr->iStatus) { case JADA_CALLSTATUS_CONNECTED: iIsConnected = TRUE; break; default: iIsConnected = FALSE; break; } return(iIsConnected); } DllExport int __stdcall JADA_CallOpen(JADA_Call callSelf, JADA_ReqId iRequestId) { int iRet; JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ConnSelf *connPtr; JADA_MsgOpenCall msgPayload; FixString szParams; JADA_ReturnValIfInvalidSupport(callPtr, -1); connPtr = (JADA_ConnSelf *) callPtr->connSelf; JADA_ReturnValIfInvalidConnection(connPtr, -1); callPtr->iRequestId = iRequestId; iRet = JADA_SuppGetRequestId(connPtr->suppSelf, &callPtr->iRequestId); CORE_ReturnValIfFail(iRet == 0, -1, ;, "Errore nella lettura del support"); iRet = JADA_SuppGetSessionId(connPtr->suppSelf, &callPtr->iSessionId); CORE_ReturnValIfFail(iRet == 0, -1, ;, "Errore nella lettura del support"); iRet = JADA_ProcExists(JADA_PROC_CALLMANAGER); if (iRet == 0) { iRet = JADA_ProcKill(JADA_PROC_CALLMANAGER); } sprintf(szParams, "-audio 1 -video 1 -id %d", connPtr->szServerName, connPtr->iId); iRet = JADA_ProcExecute(JADA_PROC_CALLMANAGER, szParams); CORE_ReturnValIfFail(iRet == 0, -1, ;, "Errore generico"); JADA_LogMessage("Ho lanciato il comando %s %s\n", JADA_PROC_CALLMANAGER, szParams); memset(&msgPayload, 0, sizeof(msgPayload)); msgPayload.iRequestId = callPtr->iRequestId; callPtr->iAudioMuteStatus = FALSE; callPtr->iVideoMuteStatus = FALSE; iRet = JADA_SendCallMessage(callSelf, JADA_MSG_OPEN_CALL, &msgPayload, sizeof(msgPayload)); CORE_ReturnValIfFail(iRet == 0, -1, ;, "Errore nell'invio del messaggio OPEN_CALL"); return(0); } DllExport int __stdcall JADA_CallReset(JADA_Call callSelf, int iReset) { JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ReturnValIfInvalidSupport(callPtr, -1); callPtr->iReset = iReset; return(0); } DllExport int __stdcall JADA_CallIsReset(JADA_Call callSelf) { JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ReturnValIfInvalidSupport(callPtr, -1); return(callPtr->iReset); } DllExport int __stdcall JADA_CallMuteAudio(JADA_Call callSelf, int iMuteStatus) { JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ReturnValIfInvalidSupport(callPtr, -1); if (iMuteStatus != 0) { callPtr->iAudioMuteStatus = TRUE; } else { callPtr->iAudioMuteStatus = FALSE; } return(0); } DllExport int __stdcall JADA_CallMuteVideo(JADA_Call callSelf, int iMuteStatus) { JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ConnSelf *connPtr; JADA_ReturnValIfInvalidSupport(callPtr, -1); connPtr = (JADA_ConnSelf *) callPtr->connSelf; JADA_ReturnValIfInvalidConnection(connPtr, -1); if (iMuteStatus != 0) { callPtr->iVideoMuteStatus = TRUE; } else { callPtr->iVideoMuteStatus = FALSE; } return(0); } DllExport int __stdcall JADA_CallIsAudioMuted(JADA_Call callSelf) { JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ReturnValIfInvalidSupport(callPtr, -1); return(callPtr->iAudioMuteStatus); } DllExport int __stdcall JADA_CallIsVideoMuted(JADA_Call callSelf) { JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ReturnValIfInvalidSupport(callPtr, -1); return(callPtr->iVideoMuteStatus); } DllExport int __stdcall JADA_CallClose(JADA_Call callSelf) { int iRet; JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ConnSelf *connPtr; JADA_MsgCloseCall msgPayload; JADA_ReturnValIfInvalidSupport(callPtr, -1); connPtr = (JADA_ConnSelf *) callPtr->connSelf; JADA_ReturnValIfInvalidConnection(connPtr, -1); memset(&msgPayload, 0, sizeof(msgPayload)); msgPayload.iRequestId = callPtr->iRequestId; msgPayload.timCloseTime = time(0); iRet = JADA_SendCallMessage(callSelf, JADA_MSG_CLOSE_CALL, &msgPayload, sizeof(msgPayload)); CORE_ReturnValIfFail(iRet == 0, -1, ;, "Errore nell'invio del messaggio CLOSE_CALL"); iRet = JADA_HandleCallClosed(connPtr, NULL); CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore nella lettura del payload"); return(0); } DllExport int __stdcall JADA_CallAbort(JADA_Call callSelf, int iRetries) { int iRet; JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ConnSelf *connPtr; JADA_Message msgOut; JADA_MsgAbortCall msgPayload; JADA_ReturnValIfInvalidSupport(callPtr, -1); connPtr = (JADA_ConnSelf *) callPtr->connSelf; JADA_ReturnValIfInvalidConnection(connPtr, -1); memset(&msgPayload, 0, sizeof(msgPayload)); msgPayload.iRequestId = callPtr->iRequestId; msgPayload.iRetries = iRetries; msgPayload.timAbortTime = time(0); if (iRetries >= 3) { callPtr->iAudioMuteStatus = TRUE; callPtr->iVideoMuteStatus = TRUE; iRet = JADA_SendCallMessage(callSelf, JADA_MSG_ABORT_CALL, &msgPayload, sizeof(msgPayload)); CORE_ReturnValIfFail(iRet == 0, -1, ;, "Errore nell'invio del messaggio ABORT_CALL"); } msgOut = JADA_MsgSesNew(__FILE__, __LINE__, JADA_GetWhoAmI(), callPtr->iSessionId, JADA_MSG_ABORT_CALL, sizeof(msgPayload)); CORE_ReturnValIfFail(msgOut != NULL, -1, ;, "Errore generico"); iRet = JADA_MsgSetPayload(msgOut, &msgPayload, sizeof(msgPayload)); CORE_ReturnValIfFail(iRet == 0, -1, JADA_MsgDestroy(__FILE__, __LINE__, msgOut);, "errore nella lettura del payload"); iRet = JADA_HandleCallAborted(connPtr, msgOut); CORE_ReturnValIfFail(iRet == 0, -1, JADA_MsgDestroy(__FILE__, __LINE__, msgOut);, "errore nella lettura del payload"); iRet = JADA_MsgDestroy(__FILE__, __LINE__, msgOut); CORE_ReturnValIfFail(iRet == 0, -1, JADA_MsgDestroy(__FILE__, __LINE__, msgOut);, "errore nella lettura del payload"); return(0); } DllExport int __stdcall JADA_CallGetNextAudioFrame(JADA_Call callSelf, int iMilliSecs, JADA_Frame *frmSelf) { int iRet; JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ConnSelf *connPtr; JADA_ReturnValIfInvalidSupport(callPtr, -1); connPtr = (JADA_ConnSelf *) callPtr->connSelf; JADA_ReturnValIfInvalidConnection(connPtr, -1); iRet = JADA_LstWait(callPtr->lstInAudioFrames, frmSelf, sizeof(*frmSelf), iMilliSecs); if (iRet == 0) { iRet = 0; } return(iRet); } DllExport int __stdcall JADA_CallGetNextVideoFrame(JADA_Call callSelf, int iMilliSecs, JADA_Frame *frmSelf) { int iRet; JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ConnSelf *connPtr; JADA_ReturnValIfInvalidSupport(callPtr, -1); connPtr = (JADA_ConnSelf *) callPtr->connSelf; JADA_ReturnValIfInvalidConnection(connPtr, -1); iRet = JADA_LstWait(callPtr->lstInVideoFrames, frmSelf, sizeof(*frmSelf), iMilliSecs); if (iRet == 0) { iRet = 0; } return(iRet); } DllExport int __stdcall JADA_CallSendAudioFrame(JADA_Call callSelf, void *szFrameData, int iLength) { int iRet; JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ConnSelf *connPtr; JADA_Message msgOut = NULL; JADA_MsgId iMsgId; JADA_ReturnValIfInvalidSupport(callPtr, -1); connPtr = (JADA_ConnSelf *) callPtr->connSelf; JADA_ReturnValIfInvalidConnection(connPtr, -1); if (callPtr->iAudioMuteStatus == FALSE) { if (connPtr->sokConn.iSocket > 0) { JADA_SessIdToMsgId(&callPtr->iSessionId, &iMsgId); msgOut = JADA_MsgNew(__FILE__, __LINE__, JADA_MSGTYPE_AUDIOFRAME, iMsgId, iLength); CORE_ReturnValIfFail(msgOut != NULL, -1, ;, "errore nella creazione dell'audio frame message"); iRet = JADA_MsgSetPayload(msgOut, szFrameData, iLength); CORE_ReturnValIfFail(iRet == 0, -1, JADA_MsgDestroy(__FILE__, __LINE__, msgOut);, "errore nella valorizzazione dell'audio frame message"); #ifdef TEST_AUDIO iRet = JADA_HandleAudioFrame(callPtr, msgOut); CORE_ReturnValIfFail(iRet == 0, -1, JADA_MsgDestroy(__FILE__, __LINE__, msgOut);, "errore nella valorizzazione dell'audio frame message"); return(0); #endif iRet = JADA_TcpEnqueueMessage(&connPtr->sokConn, msgOut, JADA_MSG_QUEUE_CALL); CORE_ReturnValIfFail(iRet == 0, -1, JADA_MsgDestroy(__FILE__, __LINE__, msgOut);, "Errore nell'invio dell'audio frame message"); } } return(0); } DllExport int __stdcall JADA_CallSendVideoFrame(JADA_Call callSelf, void *szFrameData, int iLength) { int iRet, iCount; JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ConnSelf *connPtr; JADA_Message msgOut; JADA_MsgId iMsgId; // JADA_Frame frmSelf; JADA_ReturnValIfInvalidSupport(callPtr, -1); connPtr = (JADA_ConnSelf *) callPtr->connSelf; JADA_ReturnValIfInvalidConnection(connPtr, -1); if (callPtr->iVideoMuteStatus == FALSE) { if (connPtr->sokConn.iSocket > 0) { JADA_SessIdToMsgId(&callPtr->iSessionId, &iMsgId); msgOut = JADA_MsgNew(__FILE__, __LINE__, JADA_MSGTYPE_VIDEOFRAME, iMsgId, iLength); CORE_ReturnValIfFail(msgOut != NULL, -1, ;, "errore nella creazione del video frame message"); iRet = JADA_MsgSetPayload(msgOut, szFrameData, iLength); CORE_ReturnValIfFail(iRet == 0, -1, JADA_MsgDestroy(__FILE__, __LINE__, msgOut);, "errore nella valorizzazione del video frame message"); iRet = JADA_LstAdd(callPtr->lstOutVideoFrames, &msgOut, sizeof(msgOut), NULL); CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore nell'inserimento in coda del frame video (OUT)"); msgOut = NULL; iCount = JADA_LstGetCount(callPtr->lstOutVideoFrames) - callPtr->iVideoFrameDelay; while (iCount-- > 0) { iRet = JADA_LstWait(callPtr->lstOutVideoFrames, &msgOut, sizeof(msgOut), 0); CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore nella lettura dalla coda del frame video (OUT)"); } if (msgOut != NULL) { // Se ho troppi messaggi call, salto il video per privilegiare l'audio iCount = JADA_LstGetCount(connPtr->sokConn.lstOutCallMsgs); if (iCount < 10) { iRet = JADA_TcpEnqueueMessage(&connPtr->sokConn, msgOut, JADA_MSG_QUEUE_CALL); CORE_ReturnValIfFail(iRet == 0, -1, JADA_MsgDestroy(__FILE__, __LINE__, msgOut);, "Errore nell'invio del video frame message"); /* Genera l'evento locale, per dare localmente il corretto feedback sul ritardo frmSelf = JADA_FrmNew(__FILE__, __LINE__, JADA_FRAMETYPE_VIDEO); CORE_ReturnValIfFail(frmSelf != NULL, -1, ;, "errore nella creazione del frame video"); iRet = JADA_FrmSetData(frmSelf, szFrameData, iLength); CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore nella scrittura dei dati di frame"); iRet = JADA_LstAdd(callPtr->lstInVideoFrames, &frmSelf, sizeof(frmSelf), NULL); CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore nell'inserimento in lista del frame video"); */ } } } } return(0); } DllExport int __stdcall JADA_CallEnqueueAudioFrame(JADA_Call callSelf, JADA_Frame *frmSelf) { int iRet; JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ReturnValIfInvalidSupport(callPtr, -1); iRet = JADA_LstAdd(callPtr->lstInAudioFrames, frmSelf, sizeof(*frmSelf), NULL); CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore nell'inserimento in lista del frame audio"); return(0); } DllExport int __stdcall JADA_CallEnqueueVideoFrame(JADA_Call callSelf, JADA_Frame *frmSelf) { int iRet; JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ReturnValIfInvalidSupport(callPtr, -1); iRet = JADA_LstAdd(callPtr->lstInVideoFrames, frmSelf, sizeof(*frmSelf), NULL); CORE_ReturnValIfFail(iRet == 0, -1, ;, "errore nell'inserimento in lista del frame audio"); return(0); } DllExport int __stdcall JADA_CallSetVideoFrameDelay(JADA_Call callSelf, int iDelay) { int iRet; JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ReturnValIfInvalidSupport(callPtr, -1); callPtr->iVideoFrameDelay = iDelay; iRet = JADA_IniSetInt("call", "delay", callPtr->iVideoFrameDelay); CORE_LogIfFail(iRet == 0, "Errore nella scrittura del parametro call/delay nel file .ini"); return(0); } DllExport int __stdcall JADA_CallGetVideoFrameDelay(JADA_Call callSelf, int *piDelay) { JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ReturnValIfInvalidSupport(callPtr, -1); *piDelay = callPtr->iVideoFrameDelay; return(0); } int JADA_SendCallMessage(JADA_Call callSelf, int iOpcode, void *pPayload, int iSize) { int iRet; JADA_CallSelf *callPtr = (JADA_CallSelf *) callSelf; JADA_ConnSelf *connPtr; JADA_Message msgOut; JADA_ReturnValIfInvalidSupport(callPtr, -1); connPtr = (JADA_ConnSelf *) callPtr->connSelf; JADA_ReturnValIfInvalidConnection(connPtr, -1); /* Crea il messaggio in output */ msgOut = JADA_MsgSesNew(__FILE__, __LINE__, JADA_GetWhoAmI(), callPtr->iSessionId, iOpcode, iSize); CORE_ReturnValIfFail(msgOut != NULL, -1, ;, "Errore generico"); iRet = JADA_MsgSesSetPayload(msgOut, pPayload, iSize); CORE_ReturnValIfFail(iRet == 0, -1, ;, "Errore generico"); /* Invia il messagio in output */ CORE_ReturnValIfFail(connPtr->sokConn.iSocket > 0, -1, ;, "Errore generico"); iRet = JADA_TcpEnqueueMessage(&connPtr->sokConn, msgOut, JADA_MSG_QUEUE_SESSION); CORE_ReturnValIfFail(iRet == 0, -1, ;, "Errore generico"); /* Cancella il messaggio creato */ //iRet = JADA_MsgDestroy(__FILE__, __LINE__, msgOut); //CORE_ReturnValIfFail(iRet == 0, -1, ;, "Errore generico"); return(0); }
<filename>migrations/versions/cf4d4d621167_.py """empty message Revision ID: cf4d4d621167 Revises: <PASSWORD> Create Date: 2021-03-15 11:23:22.889100 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'cf4d4d621167' down_revision = '<PASSWORD>' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_constraint('questions_user_id_fkey', 'questions', type_='foreignkey') op.drop_constraint('questions_project_id_fkey', 'questions', type_='foreignkey') op.drop_constraint('questions_organisation_id_fkey', 'questions', type_='foreignkey') op.drop_column('questions', 'project_id') op.drop_column('questions', 'organisation_id') op.drop_column('questions', 'user_id') # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('questions', sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True)) op.add_column('questions', sa.Column('organisation_id', sa.INTEGER(), autoincrement=False, nullable=True)) op.add_column('questions', sa.Column('project_id', sa.INTEGER(), autoincrement=False, nullable=True)) op.create_foreign_key('questions_organisation_id_fkey', 'questions', 'organisations', ['organisation_id'], ['id'], ondelete='CASCADE') op.create_foreign_key('questions_project_id_fkey', 'questions', 'project', ['project_id'], ['id'], ondelete='CASCADE') op.create_foreign_key('questions_user_id_fkey', 'questions', 'users', ['user_id'], ['id'], ondelete='CASCADE') # ### end Alembic commands ###
#! /bin/dash export FLASK_ENV=development export FLASK_APP=memelinks.py flask run
#!/bin/bash if [ "$createdump" = "yes" ]; then echo "Creating dump to /data/share/ilias.tar.gz" /data/resources/base/createiliasdump.sh --target /data/share/ilias.tar.gz fi if [ "$restorefromdump" = "yes" ]; then echo "Restoring from /data/share/ilias.tar.gz" /data/resources/base/restoreilias.sh --src /data/share/ilias.tar.gz fi apache2ctl -D $apachestartmode
package com.shop.controller.cart; import com.shop.been.AjaxResult; import com.shop.model.cart.CartItem; import com.shop.model.cart.CartMerchant; import com.shop.model.merchant.Item; import com.shop.model.merchant.Merchant; import com.shop.service.cart.CartService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Controller; import org.springframework.ui.Model; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.ResponseBody; import java.util.List; /** * <p>Description:购物车控制器</p> * * @Author 姚洪斌 * @Date 2017/8/22 9:31 */ @RequestMapping(value = "cart") @Controller public class CartController { @Autowired private CartService cartService; /** * 前往购物车页面 * @return */ @RequestMapping(value = "cartUI") public String cartUI(Model model, Integer userId) { // 查询出购物车内的商品 CartMerchant cartMerchants = cartService.selectCart(userId); System.out.println(cartMerchants.getMerchants().size()); model.addAttribute("cartMerchants", cartMerchants); return "/WEB-INF/jsp/cart/cart"; } @RequestMapping(value = "saveCart") @ResponseBody public AjaxResult saveCart(CartMerchant cartMerchant, CartItem cartItem) { return cartService.saveCart(cartMerchant, cartItem); } }
/** * @license * Copyright 2017 The FOAM Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ foam.CLASS({ package: 'foam.net.node', name: 'DirTreeHandler', extends: 'foam.net.node.PathnamePrefixHandler', flags: ['node'], documentation: `HTTP(S) server handler for an entire directory. All files in the directory will be served according to their relative path name. E.g., /foo/bar /foo/bar/baz /foo/bar/baz/alpha.html /foo/bar/quz/beta.js /foo/bar/quz/charlie.xyz Suppose dir=/foo/bar and pathnamePrefix=/frobinator This exposes URLs (relative to the server's root): /frobinator/baz/alpha.html as an html document /frobinator/quz/beta.js as a client-runnable script /frobinator/quz/charlie.xyz as a document resource`, imports: [ 'log', 'info' ], properties: [ { class: 'String', name: 'dir', documentation: 'Directory under which to serve files.', preSet: function(old, nu) { return this.path.resolve(process.cwd(), nu); }, factory: function() { return process.cwd(); } }, { name: 'mimeTypes', factory: function() { return { '.js': 'text/javascript', '.css': 'text/css', '.html': 'text/html', __default: 'application/octet-stream' }; } }, { name: 'path', transient: true, factory: function() { return require('path'); } }, { name: 'fs', transient: true, factory: function() { return require('fs'); } } ], methods: [ function handle(req, res) { // Try to serve a static file. if ( ! this.dir ) return false; // Check the URL for the prefix. var target = req.url.pathname; if ( target.indexOf(this.pathnamePrefix) !== 0 ) { this.send404(req, res); this.reportWarnMsg(req, `PathnamePrefix Route/Handler mismatch: URL pathname: ${req.url.pathname} Handler prefix: ${this.pathnamePrefix}`); return true; } target = target.substring(this.pathnamePrefix.length); // Check and strip the prefix off the URL. if ( target.indexOf('?') >= 0 ) target = target.substring(0, target.indexOf('?')); if ( target.indexOf('#') >= 0 ) target = target.substring(0, target.indexOf('#')); this.log('Matched prefix, target file: ' + target); // String a leading slash, if any. if ( target[0] === '/' ) target = target.substring(1); target = this.path.resolve(this.dir, target); this.log('Target resolved to: ' + target); var rel = this.path.relative(this.dir, target); this.log('Relative path: ' + target); // The relative path can't start with .. or it's outside the dir. if ( rel.startsWith('..') ) { this.send404(req, res); this.reportWarnMsg( req, 'Attempt to read static file outside directory: ' + target); return true; } // Now we have a legal filename within our subdirectory. // We try to stream the file to the other end. if ( ! this.fs.existsSync(target) ) { this.send404(req, res); this.reportWarnMsg(req, 'File not found: ' + target); return true; } var stats = this.fs.statSync(target); if ( stats.isDirectory() ) { this.send404(req, res); this.reportWarnMsg(req, 'Attempt to read directory: ' + target); return true; } var ext = this.path.extname(target); var mimetype = this.mimeTypes[ext] || this.mimeTypes.__default; if ( mimetype === this.mimeTypes.__default ) { this.info('Unknown MIME type: ' + ext); } res.setStatusCode(200); res.setHeader('Content-type', mimetype); // Stream file. res.pipeFrom(this.fs.createReadStream(target)); this.info('200 OK ' + req.url.pathname + ' => ' + target); return true; } ] });
#!/bin/sh if [ "x$CRONTAB" == "x" ]; then echo "Set environment variable CRONTAB" echo "Example: 0 3 * * * echo hi" sleep 10 exit 1 fi echo "$CRONTAB" >> /etc/crontabs/root cat /etc/crontabs/root exec crond -f
from typing import List def filterBrandStoreLinks(links: List[str]) -> List[str]: filtered_links = [link.split(": ")[1] for link in links if link.split(": ")[1]] return filtered_links # Test the function with the example input input_links = ["brandStoreLink: https://example.com", "brandStoreLink: ", "brandStoreLink: https://store.com", "brandStoreLink: "] output_links = filterBrandStoreLinks(input_links) print(output_links) # Output: ["https://example.com", "https://store.com"]
import React, {FC} from 'react'; import A from '@renderer/components/A'; import DetailPanel from '@renderer/components/DetailPanel'; import {Loader} from '@renderer/components/FormElements'; import {VALIDATOR_CONFIGS} from '@renderer/constants'; import {useNetworkConfigFetcher} from '@renderer/hooks'; import {ValidatorConfig} from '@renderer/types'; import './ValidatorOverview.scss'; const ValidatorOverview: FC = () => { const {data: validatorConfig, loading} = useNetworkConfigFetcher<ValidatorConfig>(VALIDATOR_CONFIGS); return ( <div className="ValidatorOverview"> {loading || !validatorConfig ? ( <Loader /> ) : ( <DetailPanel items={[ { key: 'Account Number', value: validatorConfig.account_number, }, { key: 'IP Address', value: validatorConfig.ip_address, }, { key: 'Network ID', value: validatorConfig.node_identifier, }, { key: 'Port', value: validatorConfig.port || '-', }, { key: 'Protocol', value: validatorConfig.protocol, }, { key: 'Version', value: validatorConfig.version, }, { key: 'Tx Fee', value: validatorConfig.default_transaction_fee, }, { key: 'Daily Rate', value: validatorConfig.daily_confirmation_rate || '-', }, { key: 'Root Account File', value: ( <A className="ValidatorOverview__link" href={validatorConfig.root_account_file}> {validatorConfig.root_account_file} </A> ), }, { key: 'Root Account File Hash', value: validatorConfig.root_account_file_hash, }, { key: 'Seed Block Identifier', value: validatorConfig.seed_block_identifier || '-', }, { key: 'Node Type', value: validatorConfig.node_type, }, ]} title="Validator Information" /> )} </div> ); }; export default ValidatorOverview;
#!/bin/sh # Prepare to build the FTDI module for LT4 21.4 on the NVIDIA Jetson TK1 if [ $(id -u) != 0 ]; then echo "This script requires root permissions" echo "$ sudo "$0"" exit fi # Get the kernel source for LT4 21.4 cd /usr/src/ wget http://developer.download.nvidia.com/embedded/L4T/r23_Release_v1.0/source/kernel_src.tbz2 # Decompress tar -xvf kernel_src.tbz2 cd kernel # Get the kernel configuration file zcat /proc/config.gz > .config
var express = require('express'); var path = require('path'); var app = new express(); app.use(express.static(__dirname)) app.use('*',function(req,res){ res.sendFile(path.resolve(__dirname,'index.html')) }) app.listen(8080); console.log('app is running at port 8080')
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Set Hadoop-specific environment variables here. ## ## THIS FILE ACTS AS THE MASTER FILE FOR ALL HADOOP PROJECTS. ## SETTINGS HERE WILL BE READ BY ALL HADOOP COMMANDS. THEREFORE, ## ONE CAN USE THIS FILE TO SET YARN, HDFS, AND MAPREDUCE ## CONFIGURATION OPTIONS INSTEAD OF xxx-env.sh. ## ## Precedence rules: ## ## {yarn-env.sh|hdfs-env.sh} > hadoop-env.sh > hard-coded defaults ## ## {YARN_xyz|HDFS_xyz} > HADOOP_xyz > hard-coded defaults ## # Many of the options here are built from the perspective that users # may want to provide OVERWRITING values on the command line. # For example: # # JAVA_HOME=/usr/java/testing hdfs dfs -ls # # Therefore, the vast majority (BUT NOT ALL!) of these defaults # are configured for substitution and not append. If append # is preferable, modify this file accordingly. ### # Generic settings for HADOOP ### # Technically, the only required environment variable is JAVA_HOME. # All others are optional. However, the defaults are probably not # preferred. Many sites configure these options outside of Hadoop, # such as in /etc/profile.d # The java implementation to use. By default, this environment # variable is REQUIRED on ALL platforms except OS X! export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64 # Location of Hadoop. By default, Hadoop will attempt to determine # this location based upon its execution path. # export HADOOP_HOME= # Location of Hadoop's configuration information. i.e., where this # file is living. If this is not defined, Hadoop will attempt to # locate it based upon its execution path. # # NOTE: It is recommend that this variable not be set here but in # /etc/profile.d or equivalent. Some options (such as # --config) may react strangely otherwise. # #export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"} # The maximum amount of heap to use (Java -Xmx). If no unit # is provided, it will be converted to MB. Daemons will # prefer any Xmx setting in their respective _OPT variable. # There is no default; the JVM will autoscale based upon machine # memory size. # export HADOOP_HEAPSIZE_MAX= # The minimum amount of heap to use (Java -Xms). If no unit # is provided, it will be converted to MB. Daemons will # prefer any Xms setting in their respective _OPT variable. # There is no default; the JVM will autoscale based upon machine # memory size. # export HADOOP_HEAPSIZE_MIN= # Enable extra debugging of Hadoop's JAAS binding, used to set up # Kerberos security. # export HADOOP_JAAS_DEBUG=true # Extra Java runtime options for all Hadoop commands. We don't support # IPv6 yet/still, so by default the preference is set to IPv4. # export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true" # For Kerberos debugging, an extended option set logs more information # export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true -Dsun.security.krb5.debug=true -Dsun.security.spnego.debug" # Some parts of the shell code may do special things dependent upon # the operating system. We have to set this here. See the next # section as to why.... export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)} # Extra Java runtime options for some Hadoop commands # and clients (i.e., hdfs dfs -blah). These get appended to HADOOP_OPTS for # such commands. In most cases, # this should be left empty and # let users supply it on the command line. # export HADOOP_CLIENT_OPTS="" # # A note about classpaths. # # By default, Apache Hadoop overrides Java's CLASSPATH # environment variable. It is configured such # that it starts out blank with new entries added after passing # a series of checks (file/dir exists, not already listed aka # de-deduplication). During de-deduplication, wildcards and/or # directories are *NOT* expanded to keep it simple. Therefore, # if the computed classpath has two specific mentions of # awesome-methods-1.0.jar, only the first one added will be seen. # If two directories are in the classpath that both contain # awesome-methods-1.0.jar, then Java will pick up both versions. # An additional, custom CLASSPATH. Site-wide configs should be # handled via the shellprofile functionality, utilizing the # hadoop_add_classpath function for greater control and much # harder for apps/end-users to accidentally override. # Similarly, end users should utilize ${HOME}/.hadooprc . # This variable should ideally only be used as a short-cut, # interactive way for temporary additions on the command line. # export HADOOP_CLASSPATH="/some/cool/path/on/your/machine" # Should HADOOP_CLASSPATH be first in the official CLASSPATH? # export HADOOP_USER_CLASSPATH_FIRST="yes" # If HADOOP_USE_CLIENT_CLASSLOADER is set, the classpath along # with the main jar are handled by a separate isolated # client classloader when 'hadoop jar', 'yarn jar', or 'mapred job' # is utilized. If it is set, HADOOP_CLASSPATH and # HADOOP_USER_CLASSPATH_FIRST are ignored. # export HADOOP_USE_CLIENT_CLASSLOADER=true # HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES overrides the default definition of # system classes for the client classloader when HADOOP_USE_CLIENT_CLASSLOADER # is enabled. Names ending in '.' (period) are treated as package names, and # names starting with a '-' are treated as negative matches. For example, # export HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES="-org.apache.hadoop.UserClass,java.,javax.,org.apache.hadoop." # Enable optional, bundled Hadoop features # This is a comma delimited list. It may NOT be overridden via .hadooprc # Entries may be added/removed as needed. # export HADOOP_OPTIONAL_TOOLS="hadoop-aliyun,hadoop-openstack,hadoop-azure,hadoop-azure-datalake,hadoop-aws,hadoop-kafka" ### # Options for remote shell connectivity ### # There are some optional components of hadoop that allow for # command and control of remote hosts. For example, # start-dfs.sh will attempt to bring up all NNs, DNS, etc. # Options to pass to SSH when one of the "log into a host and # start/stop daemons" scripts is executed # export HADOOP_SSH_OPTS="-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s" # The built-in ssh handler will limit itself to 10 simultaneous connections. # For pdsh users, this sets the fanout size ( -f ) # Change this to increase/decrease as necessary. # export HADOOP_SSH_PARALLEL=10 # Filename which contains all of the hosts for any remote execution # helper scripts # such as workers.sh, start-dfs.sh, etc. # export HADOOP_WORKERS="${HADOOP_CONF_DIR}/workers" ### # Options for all daemons ### # # # Many options may also be specified as Java properties. It is # very common, and in many cases, desirable, to hard-set these # in daemon _OPTS variables. Where applicable, the appropriate # Java property is also identified. Note that many are re-used # or set differently in certain contexts (e.g., secure vs # non-secure) # # Where (primarily) daemon log files are stored. # ${HADOOP_HOME}/logs by default. # Java property: hadoop.log.dir # export HADOOP_LOG_DIR=${HADOOP_HOME}/logs # A string representing this instance of hadoop. $USER by default. # This is used in writing log and pid files, so keep that in mind! # Java property: hadoop.id.str # export HADOOP_IDENT_STRING=$USER # How many seconds to pause after stopping a daemon # export HADOOP_STOP_TIMEOUT=5 # Where pid files are stored. /tmp by default. # export HADOOP_PID_DIR=/tmp # Default log4j setting for interactive commands # Java property: hadoop.root.logger # export HADOOP_ROOT_LOGGER=INFO,console # Default log4j setting for daemons spawned explicitly by # --daemon option of hadoop, hdfs, mapred and yarn command. # Java property: hadoop.root.logger # export HADOOP_DAEMON_ROOT_LOGGER=INFO,RFA # Default log level and output location for security-related messages. # You will almost certainly want to change this on a per-daemon basis via # the Java property (i.e., -Dhadoop.security.logger=foo). (Note that the # defaults for the NN and 2NN override this by default.) # Java property: hadoop.security.logger # export HADOOP_SECURITY_LOGGER=INFO,NullAppender # Default process priority level # Note that sub-processes will also run at this level! # export HADOOP_NICENESS=0 # Default name for the service level authorization file # Java property: hadoop.policy.file # export HADOOP_POLICYFILE="hadoop-policy.xml" # # NOTE: this is not used by default! <----- # You can define variables right here and then re-use them later on. # For example, it is common to use the same garbage collection settings # for all the daemons. So one could define: # # export HADOOP_GC_SETTINGS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps" # # .. and then use it as per the b option under the namenode. ### # Secure/privileged execution ### # # Out of the box, Hadoop uses jsvc from Apache Commons to launch daemons # on privileged ports. This functionality can be replaced by providing # custom functions. See hadoop-functions.sh for more information. # # The jsvc implementation to use. Jsvc is required to run secure datanodes # that bind to privileged ports to provide authentication of data transfer # protocol. Jsvc is not required if SASL is configured for authentication of # data transfer protocol using non-privileged ports. # export JSVC_HOME=/usr/bin # # This directory contains pids for secure and privileged processes. #export HADOOP_SECURE_PID_DIR=${HADOOP_PID_DIR} # # This directory contains the logs for secure and privileged processes. # Java property: hadoop.log.dir # export HADOOP_SECURE_LOG=${HADOOP_LOG_DIR} # # When running a secure daemon, the default value of HADOOP_IDENT_STRING # ends up being a bit bogus. Therefore, by default, the code will # replace HADOOP_IDENT_STRING with HADOOP_xx_SECURE_USER. If one wants # to keep HADOOP_IDENT_STRING untouched, then uncomment this line. # export HADOOP_SECURE_IDENT_PRESERVE="true" ### # NameNode specific parameters ### # Default log level and output location for file system related change # messages. For non-namenode daemons, the Java property must be set in # the appropriate _OPTS if one wants something other than INFO,NullAppender # Java property: hdfs.audit.logger # export HDFS_AUDIT_LOGGER=INFO,NullAppender # Specify the JVM options to be used when starting the NameNode. # These options will be appended to the options specified as HADOOP_OPTS # and therefore may override any similar flags set in HADOOP_OPTS # # a) Set JMX options # export HDFS_NAMENODE_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=1026" # # b) Set garbage collection logs # export HDFS_NAMENODE_OPTS="${HADOOP_GC_SETTINGS} -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')" # # c) ... or set them directly # export HDFS_NAMENODE_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')" # this is the default: # export HDFS_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS" ### # SecondaryNameNode specific parameters ### # Specify the JVM options to be used when starting the SecondaryNameNode. # These options will be appended to the options specified as HADOOP_OPTS # and therefore may override any similar flags set in HADOOP_OPTS # # This is the default: # export HDFS_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS" ### # DataNode specific parameters ### # Specify the JVM options to be used when starting the DataNode. # These options will be appended to the options specified as HADOOP_OPTS # and therefore may override any similar flags set in HADOOP_OPTS # # This is the default: # export HDFS_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS" # On secure datanodes, user to run the datanode as after dropping privileges. # This **MUST** be uncommented to enable secure HDFS if using privileged ports # to provide authentication of data transfer protocol. This **MUST NOT** be # defined if SASL is configured for authentication of data transfer protocol # using non-privileged ports. # This will replace the hadoop.id.str Java property in secure mode. # export HDFS_DATANODE_SECURE_USER=hdfs # Supplemental options for secure datanodes # By default, Hadoop uses jsvc which needs to know to launch a # server jvm. # export HDFS_DATANODE_SECURE_EXTRA_OPTS="-jvm server" ### # NFS3 Gateway specific parameters ### # Specify the JVM options to be used when starting the NFS3 Gateway. # These options will be appended to the options specified as HADOOP_OPTS # and therefore may override any similar flags set in HADOOP_OPTS # # export HDFS_NFS3_OPTS="" # Specify the JVM options to be used when starting the Hadoop portmapper. # These options will be appended to the options specified as HADOOP_OPTS # and therefore may override any similar flags set in HADOOP_OPTS # # export HDFS_PORTMAP_OPTS="-Xmx512m" # Supplemental options for priviliged gateways # By default, Hadoop uses jsvc which needs to know to launch a # server jvm. # export HDFS_NFS3_SECURE_EXTRA_OPTS="-jvm server" # On privileged gateways, user to run the gateway as after dropping privileges # This will replace the hadoop.id.str Java property in secure mode. # export HDFS_NFS3_SECURE_USER=nfsserver ### # ZKFailoverController specific parameters ### # Specify the JVM options to be used when starting the ZKFailoverController. # These options will be appended to the options specified as HADOOP_OPTS # and therefore may override any similar flags set in HADOOP_OPTS # # export HDFS_ZKFC_OPTS="" ### # QuorumJournalNode specific parameters ### # Specify the JVM options to be used when starting the QuorumJournalNode. # These options will be appended to the options specified as HADOOP_OPTS # and therefore may override any similar flags set in HADOOP_OPTS # # export HDFS_JOURNALNODE_OPTS="" ### # HDFS Balancer specific parameters ### # Specify the JVM options to be used when starting the HDFS Balancer. # These options will be appended to the options specified as HADOOP_OPTS # and therefore may override any similar flags set in HADOOP_OPTS # # export HDFS_BALANCER_OPTS="" ### # HDFS Mover specific parameters ### # Specify the JVM options to be used when starting the HDFS Mover. # These options will be appended to the options specified as HADOOP_OPTS # and therefore may override any similar flags set in HADOOP_OPTS # # export HDFS_MOVER_OPTS="" ### # Router-based HDFS Federation specific parameters # Specify the JVM options to be used when starting the RBF Routers. # These options will be appended to the options specified as HADOOP_OPTS # and therefore may override any similar flags set in HADOOP_OPTS # # export HDFS_DFSROUTER_OPTS="" ### # HDFS StorageContainerManager specific parameters ### # Specify the JVM options to be used when starting the HDFS Storage Container Manager. # These options will be appended to the options specified as HADOOP_OPTS # and therefore may override any similar flags set in HADOOP_OPTS # # export HDFS_STORAGECONTAINERMANAGER_OPTS="" ### # Advanced Users Only! ### # # When building Hadoop, one can add the class paths to the commands # via this special env var: # export HADOOP_ENABLE_BUILD_PATHS="true" # # To prevent accidents, shell commands be (superficially) locked # to only allow certain users to execute certain subcommands. # It uses the format of (command)_(subcommand)_USER. # # For example, to limit who can execute the namenode command, export HDFS_NAMENODE_USER="root" export HDFS_DATANODE_USER="root" export HDFS_SECONDARYNAMENODE_USER="root" export YARN_RESOURCEMANAGER_USER="root" export YARN_NODEMANAGER_USER="root"
#!/bin/bash # # Configures the nanopub server via environment variables, and then starts it. # # Environment variables can be set before this script is run like this: # # $ export NPS_PUBLIC_URL=http://np.inn.ac/ # $ export NPS_ADMIN="John Doe <john@example.com>" # $ export NPS_COLLECT_NANOPUBS_ENABLED=true # # Go to parent directory of this script file: cd "$( dirname "${BASH_SOURCE[0]}" )" && cd -P .. # Load environment variables to local config file: scripts/set-localconf-from-env.sh # Run Tomcat web application server: catalina.sh run
package processor import ( "fmt" "io/ioutil" "os" "testing" "github.com/Jeffail/benthos/v3/lib/log" "github.com/Jeffail/benthos/v3/lib/message" "github.com/Jeffail/benthos/v3/lib/metrics" "github.com/Jeffail/benthos/v3/lib/types" ) func TestJSONSchemaExternalSchemaCheck(t *testing.T) { schema := `{ "$id": "https://example.com/person.schema.json", "$schema": "http://json-schema.org/draft-07/schema#", "title": "Person", "type": "object", "properties": { "firstName": { "type": "string", "description": "The person's first name." }, "lastName": { "type": "string", "description": "The person's last name." }, "age": { "description": "Age in years which must be equal to or greater than zero.", "type": "integer", "minimum": 0 } } }` tmpSchemaFile, err := ioutil.TempFile("", "benthos_jsonschema_test") if err != nil { t.Fatal(err) } defer os.Remove(tmpSchemaFile.Name()) // write schema definition to tmpfile if _, err := tmpSchemaFile.Write([]byte(schema)); err != nil { t.Fatal(err) } testLog := log.New(os.Stdout, log.Config{LogLevel: "NONE"}) testMet := metrics.DudType{} type fields struct { schemaPath string } tests := []struct { name string fields fields arg [][]byte output string err string }{ { name: "schema match", fields: fields{ schemaPath: fmt.Sprintf("file://%s", tmpSchemaFile.Name()), }, arg: [][]byte{ []byte(`{"firstName":"John","lastName":"Doe","age":21}`), }, output: `{"firstName":"John","lastName":"Doe","age":21}`, }, { name: "schema no match", fields: fields{ schemaPath: fmt.Sprintf("file://%s", tmpSchemaFile.Name()), }, arg: [][]byte{ []byte(`{"firstName":"John","lastName":"Doe","age":-20}`), }, output: `{"firstName":"John","lastName":"Doe","age":-20}`, err: `age must be greater than or equal to 0`, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { conf := NewConfig() conf.Type = "jsonschema" conf.JSONSchema.SchemaPath = tt.fields.schemaPath c, err := NewJSONSchema(conf, nil, testLog, testMet) if err != nil { t.Error(err) return } msgs, _ := c.ProcessMessage(message.New(tt.arg)) if len(msgs) != 1 { t.Fatalf("Test '%v' did not succeed", tt.name) } if exp, act := tt.output, string(message.GetAllBytes(msgs[0])[0]); exp != act { t.Errorf("Wrong result '%v': %v != %v", tt.name, act, exp) } msgs[0].Iter(func(i int, part types.Part) error { act := part.Metadata().Get(FailFlagKey) if len(act) > 0 && act != tt.err { t.Errorf("Wrong error message '%v': %v != %v", tt.name, act, tt.err) } return nil }) }) } } func TestJSONSchemaInlineSchemaCheck(t *testing.T) { schemaDef := `{ "$id": "https://example.com/person.schema.json", "$schema": "http://json-schema.org/draft-07/schema#", "title": "Person", "type": "object", "properties": { "firstName": { "type": "string", "description": "The person's first name." }, "lastName": { "type": "string", "description": "The person's last name." }, "age": { "description": "Age in years which must be equal to or greater than zero.", "type": "integer", "minimum": 0 } } }` testLog := log.New(os.Stdout, log.Config{LogLevel: "NONE"}) testMet := metrics.DudType{} type fields struct { schema string part int } tests := []struct { name string fields fields arg [][]byte output string err string }{ { name: "schema match", fields: fields{ schema: schemaDef, part: 0, }, arg: [][]byte{ []byte(`{"firstName":"John","lastName":"Doe","age":21}`), }, output: `{"firstName":"John","lastName":"Doe","age":21}`, }, { name: "schema no match", fields: fields{ schema: schemaDef, part: 0, }, arg: [][]byte{ []byte(`{"firstName":"John","lastName":"Doe","age":-20}`), }, output: `{"firstName":"John","lastName":"Doe","age":-20}`, err: `age must be greater than or equal to 0`, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { conf := NewConfig() conf.Type = "jsonschema" conf.JSONSchema.Schema = tt.fields.schema conf.JSONSchema.Parts = []int{0} c, err := NewJSONSchema(conf, nil, testLog, testMet) if err != nil { t.Error(err) return } msgs, _ := c.ProcessMessage(message.New(tt.arg)) if len(msgs) != 1 { t.Fatalf("Test '%v' did not succeed", tt.name) } if exp, act := tt.output, string(message.GetAllBytes(msgs[0])[0]); exp != act { t.Errorf("Wrong result '%v': %v != %v", tt.name, act, exp) } msgs[0].Iter(func(i int, part types.Part) error { act := part.Metadata().Get(FailFlagKey) if len(act) > 0 && act != tt.err { t.Errorf("Wrong error message '%v': %v != %v", tt.name, act, tt.err) } return nil }) }) } } func TestJSONSchemaPathNotExist(t *testing.T) { testLog := log.New(os.Stdout, log.Config{LogLevel: "NONE"}) testMet := metrics.DudType{} conf := NewConfig() conf.Type = "jsonschema" conf.JSONSchema.SchemaPath = fmt.Sprintf("file://path_does_not_exist") _, err := NewJSONSchema(conf, nil, testLog, testMet) if err == nil { t.Error("expected error from loading non existant schema file") } } func TestJSONSchemaInvalidSchema(t *testing.T) { schema := `{ "$schema": "http://json-schema.org/draft-07/schema#", "type": "any" }` tmpSchemaFile, err := ioutil.TempFile("", "benthos_jsonschema_invalid_schema_test") if err != nil { t.Fatal(err) } defer os.Remove(tmpSchemaFile.Name()) // write schema definition to tmpfile if _, err := tmpSchemaFile.Write([]byte(schema)); err != nil { t.Fatal(err) } testLog := log.New(os.Stdout, log.Config{LogLevel: "NONE"}) testMet := metrics.DudType{} conf := NewConfig() conf.Type = "jsonschema" conf.JSONSchema.SchemaPath = fmt.Sprintf("file://%s", tmpSchemaFile.Name()) _, err = NewJSONSchema(conf, nil, testLog, testMet) if err == nil { t.Error("expected error from loading bad schema") } }
<gh_stars>0 # Copyright 2016-present CERN – European Organization for Nuclear Research # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime from itertools import count from typing import Sequence, Type, List, Union, Dict, Any import numpy as np import pandas as pd from joblib import Parallel, delayed from qf_lib.backtesting.alpha_model.alpha_model import AlphaModel from qf_lib.backtesting.alpha_model.exposure_enum import Exposure from qf_lib.backtesting.data_handler.data_handler import DataHandler from qf_lib.backtesting.fast_alpha_model_tester.backtest_summary import BacktestSummary, BacktestSummaryElement from qf_lib.backtesting.fast_alpha_model_tester.fast_data_handler import FastDataHandler from qf_lib.backtesting.portfolio.trade import Trade from qf_lib.common.enums.frequency import Frequency from qf_lib.common.enums.price_field import PriceField from qf_lib.common.exceptions.future_contracts_exceptions import NoValidTickerException from qf_lib.common.tickers.tickers import Ticker from qf_lib.common.utils.dateutils.timer import SettableTimer from qf_lib.common.utils.logging.qf_parent_logger import qf_logger from qf_lib.common.utils.miscellaneous.to_list_conversion import convert_to_list from qf_lib.common.utils.numberutils.is_finite_number import is_finite_number from qf_lib.containers.dataframe.cast_dataframe import cast_dataframe from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame from qf_lib.containers.dataframe.qf_dataframe import QFDataFrame from qf_lib.containers.dataframe.simple_returns_dataframe import SimpleReturnsDataFrame from qf_lib.containers.dimension_names import TICKERS from qf_lib.containers.futures.future_tickers.future_ticker import FutureTicker from qf_lib.containers.futures.futures_chain import FuturesChain from qf_lib.containers.qf_data_array import QFDataArray from qf_lib.containers.series.qf_series import QFSeries from qf_lib.containers.series.simple_returns_series import SimpleReturnsSeries from qf_lib.data_providers.helpers import cast_data_array_to_proper_type, tickers_dict_to_data_array from qf_lib.portfolio_construction.portfolio_models.portfolio import Portfolio class FastAlphaModelTesterConfig: """ Parameters ---------- model_type: type[AlphaModel] type of the alpha model that needs to be tested kwargs: all arguments that should be passed to the init function of the alpha model (may contain data_handler but it will be overwritten, when initializing the model) modeled_params: Union[str, Sequence[str]] strings, representing the init parameters of AlphaModel that are being verified in the test. The kwargs dict needs to contain them. """ def __init__(self, model_type: Type[AlphaModel], kwargs: Dict[str, Any], modeled_params: Union[str, Sequence[str]]): self.model_type = model_type # type: Type[AlphaModel] self.kwargs = kwargs self.model_parameters_names, _ = convert_to_list(modeled_params, str) assert set(param for param in self.model_parameters_names if param in self.kwargs.keys()) == set( self.model_parameters_names), "The modeled_params need to be passed in the kwargs" def generate_model(self, data_handler: DataHandler): return self.model_type(**self.kwargs, data_provider=data_handler) def model_parameters(self): return tuple(self.kwargs[param] for param in self.model_parameters_names) class FastAlphaModelTester: """ ModelTester in which portfolio construction is simulated by always following the suggested Exposures from AlphaModels. All Tickers are traded with same weights (weights are constant across time). """ def __init__(self, alpha_model_configs: Sequence[FastAlphaModelTesterConfig], tickers: Sequence[Ticker], start_date: datetime, end_date: datetime, data_handler: FastDataHandler, timer: SettableTimer, n_jobs: int = 1): self.logger = qf_logger.getChild(self.__class__.__name__) self._start_date = start_date self._end_date = end_date self._alpha_model_configs = alpha_model_configs assert len(set(config.model_type for config in alpha_model_configs)) == 1, \ "All passed FastAlphaModelTesterConfig should have the same alpha model type" self._model_type = alpha_model_configs[0].model_type self._data_handler = data_handler self._timer = timer self._tickers = self._get_valid_tickers(tickers) self._n_jobs = n_jobs if type(self._data_handler) is not FastDataHandler: self.logger.warning("You are using a deprecated type of DataHandler. In FastAlphaModelsTester " "use of FastDataHandler is suggested.") def test_alpha_models(self) -> BacktestSummary: print("{} parameters sets to be tested".format(len(self._alpha_model_configs))) prices_data_array = self._get_data_for_backtest() exposure_values_df_list = self._generate_exposures_for_all_params_sets() backtest_summary_elem_list = self._calculate_backtest_summary_elements(exposure_values_df_list, prices_data_array) backtest_summary = BacktestSummary( self._tickers, self._model_type, backtest_summary_elem_list, self._start_date, self._end_date) return backtest_summary def _get_valid_tickers(self, original_ticker: Sequence[Ticker]) -> List[Ticker]: tickers = [] for ticker in original_ticker: try: if isinstance(ticker, FutureTicker): ticker.initialize_data_provider(self._timer, self._data_handler) ticker = ticker.get_current_specific_ticker() tickers.append(ticker) except NoValidTickerException: self.logger.warning("No valid ticker for {}".format(ticker.name)) return tickers def _get_data_for_backtest(self) -> QFDataArray: """ Creates a QFDataArray containing OHLCV values for all tickers passes to Fast Alpha Models Tester. """ print("\nLoading all price values of tickers:") self._timer.set_current_time(self._end_date) tickers_dict = {} for ticker in self._tickers: if isinstance(ticker, FutureTicker): fc = FuturesChain(ticker, self._data_handler) tickers_dict[ticker] = fc.get_price(PriceField.ohlcv(), self._start_date, self._end_date, Frequency.DAILY) else: tickers_dict[ticker] = self._data_handler.get_price(ticker, PriceField.ohlcv(), self._start_date, self._end_date) prices_data_array = tickers_dict_to_data_array(tickers_dict, self._tickers, PriceField.ohlcv()) return prices_data_array def _generate_exposures_for_all_params_sets(self) -> List[QFDataFrame]: print("\nGenerating exposures:") exposure_values_df_list = Parallel(n_jobs=self._n_jobs)(delayed(self._generate_exposure_values) (config, self._data_handler, self._tickers) for config in self._alpha_model_configs) print("\nFinished generation of exposures.") return exposure_values_df_list def _calculate_backtest_summary_elements(self, exposure_values_df_list: List[QFDataFrame], prices_data_array: QFDataArray) -> List[BacktestSummaryElement]: open_prices_df = self._get_open_prices(prices_data_array) open_to_open_returns_df = open_prices_df.to_simple_returns() print("\nGenerating backtest summaries:") tickers_for_backtests = [[ticker] for ticker in self._tickers] if len(self._tickers) > 1: tickers_for_backtests += [self._tickers] all_params = [(config, exposure_values_df, tickers) for config, exposure_values_df in zip(self._alpha_model_configs, exposure_values_df_list) for tickers in tickers_for_backtests] backtest_summary_elem_list = Parallel(n_jobs=self._n_jobs)( delayed(self._calculate_backtest_summary)(tickers, config, prices_data_array, open_to_open_returns_df[tickers], exposure_values_df[tickers]) for config, exposure_values_df, tickers in all_params ) return backtest_summary_elem_list def _get_open_prices(self, prices_data_array: QFDataArray) -> PricesDataFrame: """ Returns PricesDataFrame consisting of only Open prices. """ open_prices_df = cast_data_array_to_proper_type(prices_data_array.loc[:, :, PriceField.Open], use_prices_types=True).dropna(how="all") return open_prices_df def _calculate_backtest_summary(self, tickers: Union[Ticker, Sequence[Ticker]], config: FastAlphaModelTesterConfig, prices_data_array: QFDataArray, open_to_open_returns_df: QFDataFrame, exposure_values_df: QFDataFrame) -> BacktestSummaryElement: tickers, _ = convert_to_list(tickers, Ticker) portfolio_rets_tms = self._calculate_portfolio_returns_tms(tickers, open_to_open_returns_df, exposure_values_df) trades = self._calculate_trades(prices_data_array, exposure_values_df) return BacktestSummaryElement(config.model_parameters(), config.model_parameters_names, portfolio_rets_tms, trades, tickers) def _calculate_portfolio_returns_tms(self, tickers, open_to_open_returns_df: QFDataFrame, exposure_values_df: QFDataFrame) \ -> SimpleReturnsSeries: """ SimpleReturnsSeries of the portfolio - for each date equal to the portfolio performance over the last open-to-open period, ex. value indexed as 2010-02-15 would refer to the portfolio value change between open at 14th and open at 15th, and would be based on the signal from 2010-02-13; the first index of the series is the Day 3 of the backtest, as the first signal calculation occurs after Day 1 (see ORDER OF ACTIONS below) the last index of the series is test_end_date and the portfolio exposure is being set to zero on the opening of the test_end_date ORDER OF ACTIONS: -- Day 1 -- signal is generated, based on the historic data INCLUDING prices from Day 1 suggested exposure for Day 2 is calculated -- Day 2 -- a trade is entered, held or exited (or nothing happens) regarding the suggested exposure this action is performed on the opening of the day -- Day 3 -- at the opening the open-to-open return is calculated now it is possible to estimate current portfolio value the simple return of the portfolio (Day 3 to Day 2) is saved and indexed with Day 3 date """ open_to_open_returns_df = open_to_open_returns_df.dropna(how="all") shifted_signals_df = exposure_values_df.shift(2, axis=0) shifted_signals_df = shifted_signals_df.iloc[2:] daily_returns_of_strategies_df = shifted_signals_df * open_to_open_returns_df daily_returns_of_strategies_df = daily_returns_of_strategies_df.dropna(axis=0, how='all') daily_returns_of_strategies_df = cast_dataframe( daily_returns_of_strategies_df, SimpleReturnsDataFrame) # type: SimpleReturnsDataFrame weights = Portfolio.one_over_n_weights(tickers) # for strategies based on more than one ticker (ex. VolLongShort) use the line below: # weights = QFSeries(np.ones(daily_returns_of_strategies_df.num_of_columns)) portfolio_rets_tms, _ = Portfolio.constant_weights(daily_returns_of_strategies_df, weights) return portfolio_rets_tms def _calculate_trades(self, prices_array: QFDataArray, exposures_df: QFDataFrame) -> List[Trade]: trade_data_list = [] shifted_signals_df = exposures_df.shift(1, axis=0) for ticker, exposures_tms in shifted_signals_df.iteritems(): trade_data_partial_list = self.generate_trades_for_ticker(prices_array, exposures_tms, ticker) trade_data_list.extend(trade_data_partial_list) return trade_data_list def generate_trades_for_ticker(self, prices_array: QFDataArray, exposures_tms: pd.Series, ticker: Ticker) \ -> List[Trade]: open_prices_tms = cast_data_array_to_proper_type(prices_array.loc[:, ticker, PriceField.Open], use_prices_types=True) # historical data cropped to the time frame of the backtest (from start date till end date) historical_data = pd.concat((exposures_tms, open_prices_tms), axis=1).loc[self._start_date:] prev_exposure = 0.0 trades_list = [] trade_start_date = None trade_exposure = None trade_start_price = None # If the first exposure is nan - skip it first_exposure = historical_data.iloc[0, 0] if np.isnan(first_exposure): historical_data = historical_data.iloc[1:] for curr_date, row in historical_data.iterrows(): curr_exposure, curr_price = row.values # skipping the nan Open prices if np.isnan(curr_price): self.logger.warning("Open price is None, cannot create trade on {} for {}".format( curr_date, str(ticker))) continue out_of_the_market = trade_exposure is None if out_of_the_market: assert prev_exposure == 0.0 if curr_exposure != 0.0: trade_start_date = curr_date trade_exposure = curr_exposure trade_start_price = curr_price else: assert prev_exposure != 0.0 exposure_change = int(curr_exposure - prev_exposure) should_close_position = exposure_change != 0.0 if should_close_position: trades_list.append(Trade( start_time=trade_start_date, end_time=curr_date, ticker=ticker, pnl=(curr_price / trade_start_price - 1) * trade_exposure, commission=0.0, direction=int(trade_exposure) )) trade_start_date = None trade_exposure = None trade_start_price = None going_into_opposite_direction = curr_exposure != 0.0 if going_into_opposite_direction: trade_start_date = curr_date trade_exposure = curr_exposure trade_start_price = curr_price prev_exposure = curr_exposure return trades_list def _generate_exposure_values(self, config: FastAlphaModelTesterConfig, data_handler: FastDataHandler, tickers: Sequence[Ticker]): """ For the given Alpha model and its parameters, generates the dataframe containing all exposure values, that will be returned by the model through signals. """ model = config.generate_model(data_handler) current_exposures_values = QFSeries(index=pd.Index(tickers, name=TICKERS)) current_exposures_values[:] = 0.0 backtest_dates = pd.date_range(self._start_date, self._end_date, freq="B") exposure_values_df = QFDataFrame( index=backtest_dates, columns=pd.Index(tickers, name=TICKERS) ) for ticker in tickers: if isinstance(ticker, FutureTicker): # Even if the tickers were already initialize, during pickling process, the data handler and timer # information is lost ticker.initialize_data_provider(self._timer, data_handler) for i, curr_datetime in enumerate(backtest_dates): new_exposures = QFSeries(index=tickers) self._timer.set_current_time(curr_datetime) for j, ticker, curr_exp_value in zip(count(), tickers, current_exposures_values): curr_exp = Exposure(curr_exp_value) if is_finite_number(curr_exp_value) else None try: new_exp = model.calculate_exposure(ticker, curr_exp) except NoValidTickerException: new_exp = None new_exposures.iloc[j] = new_exp.value if new_exp is not None else None # assuming that we always follow the new_exposures from strategy, disregarding confidence levels # and expected moves, looking only at the suggested exposure current_exposures_values = new_exposures exposure_values_df.iloc[i, :] = current_exposures_values.iloc[:] exposure_values_df = exposure_values_df.dropna(axis=1, how="all") return exposure_values_df
#!/bin/bash VOLUME_HOME="/var/lib/mysql" sed -ri -e "s/^upload_max_filesize.*/upload_max_filesize = ${PHP_UPLOAD_MAX_FILESIZE}/" \ -e "s/^post_max_size.*/post_max_size = ${PHP_POST_MAX_SIZE}/" /etc/php5/apache2/php.ini if [[ ! -d $VOLUME_HOME/mysql ]]; then echo "=> An empty or uninitialized MySQL volume is detected in $VOLUME_HOME" echo "=> Installing MySQL ..." mysql_install_db > /dev/null 2>&1 echo "=> Done!" /create_mysql_admin_user.sh else echo "=> Using an existing volume of MySQL" fi exec supervisord -n
#!/usr/bin/env bash ################################################################################ # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ # lint-python.sh # This script will prepare a virtual environment for many kinds of checks, such as tox check, flake8 check. # # You can refer to the README.MD in ${flink-python} to learn how easy to run the script. # # Download some software, such as miniconda.sh function download() { local DOWNLOAD_STATUS= if hash "wget" 2>/dev/null; then # because of the difference of all versions of wget, so we turn of the option --show-progress wget "$1" -O "$2" -q DOWNLOAD_STATUS="$?" else curl "$1" -o "$2" --progress-bar DOWNLOAD_STATUS="$?" fi if [ $DOWNLOAD_STATUS -ne 0 ]; then echo "Dowload failed.You can try again" exit $DOWNLOAD_STATUS fi } # Printing infos both in log and console function print_function() { local STAGE_LENGTH=48 local left_edge_len= local right_edge_len= local str case "$1" in "STAGE") left_edge_len=$(((STAGE_LENGTH-${#2})/2)) right_edge_len=$((STAGE_LENGTH-${#2}-left_edge_len)) str="$(seq -s "=" $left_edge_len | tr -d "[:digit:]")""$2""$(seq -s "=" $right_edge_len | tr -d "[:digit:]")" ;; "STEP") str="$2" ;; *) str="seq -s "=" $STAGE_LENGTH | tr -d "[:digit:]"" ;; esac echo $str | tee -a $LOG_FILE } function regexp_match() { if echo $1 | grep -e $2 &>/dev/null; then echo true else echo false fi } # decide whether a array contains a specified element. function contains_element() { arr=($1) if echo "${arr[@]}" | grep -w "$2" &>/dev/null; then echo true else echo false fi } # Checkpoint the stage:step for convenient to re-exec the script with # skipping those success steps. # The format is "${Stage}:${Step}". e.g. Install:4 function checkpoint_stage() { if [ ! -d `dirname $STAGE_FILE` ]; then mkdir -p `dirname $STAGE_FILE` fi echo "$1:$2">"$STAGE_FILE" } # Restore the stage:step function restore_stage() { if [ -f "$STAGE_FILE" ]; then local lines=$(awk '{print NR}' $STAGE_FILE) if [ $lines -eq 1 ]; then local first_field=$(cat $STAGE_FILE | cut -d ":" -f 1) local second_field=$(cat $STAGE_FILE | cut -d ":" -f 2) check_valid_stage $first_field $second_field if [ $? -eq 0 ]; then STAGE=$first_field STEP=$second_field return fi fi fi STAGE="install" STEP=0 } # Decide whether the stage:step is valid. function check_valid_stage() { case $1 in "install") if [ $2 -le $STAGE_INSTALL_STEPS ] && [ $2 -ge 0 ]; then return 0 fi ;; *) ;; esac return 1 } function parse_component_args() { local REAL_COMPONENTS=() for component in ${INSTALLATION_COMPONENTS[@]}; do # because all other components depends on conda, the install of conda is # required component. if [[ "$component" == "basic" ]] || [[ "$component" == "miniconda" ]]; then continue fi if [[ "$component" == "all" ]]; then component="environment" fi if [[ `contains_element "${SUPPORTED_INSTALLATION_COMPONENTS[*]}" "${component}"` = true ]]; then REAL_COMPONENTS+=(${component}) else echo "unknown install component ${component}, currently we only support installing basic,py_env,tox,flake8,sphinx,all." exit 1 fi done if [[ `contains_element "${REAL_COMPONENTS[*]}" "environment"` = false ]]; then SUPPORTED_INSTALLATION_COMPONENTS=(${REAL_COMPONENTS[@]}) fi } # For convenient to index something binded to OS. # Now, the script only make a distinction between 'Mac' and 'Non-Mac'. function get_os_index() { if [ $1 == "Darwin" ]; then return 0 else return 1 fi } # Considering the file size of miniconda.sh, # "wget" is better than "curl" in the weak network environment. function install_wget() { if [ $1 == "Darwin" ]; then hash "brew" 2>/dev/null if [ $? -ne 0 ]; then $((/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)") 2>&1 >/dev/null) if [ $? -ne 0 ]; then echo "Failed to install brew" exit 1 fi fi hash "wget" 2>/dev/null if [ $? -ne 0 ]; then brew install wget 2>&1 >/dev/null if [ $? -ne 0 ]; then echo "Failed to install wget" exit 1 fi fi fi } # The script choose miniconda as our package management tool. # The script use miniconda to create all kinds of python versions and # some pakcages including checks such as tox and flake8. function install_miniconda() { OS_TO_CONDA_URL=("https://repo.continuum.io/miniconda/Miniconda3-4.7.10-MacOSX-x86_64.sh" \ "https://repo.continuum.io/miniconda/Miniconda3-4.7.10-Linux-x86_64.sh") print_function "STEP" "download miniconda..." if [ ! -f "$CONDA_INSTALL" ]; then download ${OS_TO_CONDA_URL[$1]} $CONDA_INSTALL_SH chmod +x $CONDA_INSTALL_SH if [ $? -ne 0 ]; then echo "Please manually chmod +x $CONDA_INSTALL_SH" exit 1 fi if [ -d "$CURRENT_DIR/.conda" ]; then rm -rf "$CURRENT_DIR/.conda" if [ $? -ne 0 ]; then echo "Please manually rm -rf $CURRENT_DIR/.conda directory.\ Then retry to exec the script." exit 1 fi fi fi print_function "STEP" "download miniconda... [SUCCESS]" print_function "STEP" "installing conda..." if [ ! -d "$CURRENT_DIR/.conda" ]; then $CONDA_INSTALL_SH -b -p $CURRENT_DIR/.conda 2>&1 >/dev/null if [ $? -ne 0 ]; then echo "install miniconda failed" exit $CONDA_INSTALL_STATUS fi fi print_function "STEP" "install conda ... [SUCCESS]" } # Install some kinds of py env. function install_py_env() { py_env=("3.5" "3.6" "3.7") for ((i=0;i<${#py_env[@]};i++)) do if [ -d "$CURRENT_DIR/.conda/envs/${py_env[i]}" ]; then rm -rf "$CURRENT_DIR/.conda/envs/${py_env[i]}" if [ $? -ne 0 ]; then echo "rm -rf $CURRENT_DIR/.conda/envs/${py_env[i]} failed, please \ rm -rf $CURRENT_DIR/.conda/envs/${py_env[i]} manually.\ Then retry to exec the script." exit 1 fi fi print_function "STEP" "installing python${py_env[i]}..." $CONDA_PATH create --name ${py_env[i]} -y -q python=${py_env[i]} 2>&1 >/dev/null if [ $? -ne 0 ]; then echo "conda install ${py_env[i]} failed.\ You can retry to exec the script." exit 1 fi print_function "STEP" "install python${py_env[i]}... [SUCCESS]" done } # Install tox. # In some situations,you need to run the script with "sudo". e.g. sudo ./lint-python.sh function install_tox() { if [ -f "$TOX_PATH" ]; then $PIP_PATH uninstall tox -y -q 2>&1 >/dev/null if [ $? -ne 0 ]; then echo "pip uninstall tox failed \ please try to exec the script again.\ if failed many times, you can try to exec in the form of sudo ./lint-python.sh -f" exit 1 fi fi # tox 3.14.0 depends on both 0.19 and 0.23 of importlib_metadata at the same time and # conda will try to install both these two versions and it will cause problems occasionally. # Using pip as the package manager could avoid this problem. $PIP_PATH install -q virtualenv==16.0.0 tox==3.14.0 2>&1 >/dev/null if [ $? -ne 0 ]; then echo "pip install tox failed \ please try to exec the script again.\ if failed many times, you can try to exec in the form of sudo ./lint-python.sh -f" exit 1 fi } # Install flake8. # In some situations,you need to run the script with "sudo". e.g. sudo ./lint-python.sh function install_flake8() { if [ -f "$FLAKE8_PATH" ]; then $CONDA_PATH remove -p $CONDA_HOME flake8 -y -q 2>&1 >/dev/null if [ $? -ne 0 ]; then echo "conda remove flake8 failed \ please try to exec the script again.\ if failed many times, you can try to exec in the form of sudo ./lint-python.sh -f" exit 1 fi fi $CONDA_PATH install -p $CONDA_HOME -c anaconda flake8 -y -q 2>&1 >/dev/null if [ $? -ne 0 ]; then echo "conda install flake8 failed \ please try to exec the script again.\ if failed many times, you can try to exec in the form of sudo ./lint-python.sh -f" exit 1 fi } # Install sphinx. # In some situations,you need to run the script with "sudo". e.g. sudo ./lint-python.sh function install_sphinx() { if [ -f "$SPHINX_PATH" ]; then $CONDA_PATH remove -p $CONDA_HOME sphinx -y -q 2>&1 >/dev/null if [ $? -ne 0 ]; then echo "conda remove sphinx failed \ please try to exec the script again.\ if failed many times, you can try to exec in the form of sudo ./lint-python.sh -f" exit 1 fi fi $CONDA_PATH install -p $CONDA_HOME -c anaconda sphinx -y -q 2>&1 >/dev/null if [ $? -ne 0 ]; then echo "conda install sphinx failed \ please try to exec the script again.\ if failed many times, you can try to exec in the form of sudo ./lint-python.sh -f" exit 1 fi } function need_install_component() { if [[ `contains_element "${SUPPORTED_INSTALLATION_COMPONENTS[*]}" "$1"` = true ]]; then echo true else echo false fi } # In this function, the script will prepare all kinds of python environments and checks. function install_environment() { print_function "STAGE" "installing environment" local sys_os=`uname -s` #get the index of the SUPPORT_OS array for convinient to intall tool. get_os_index $sys_os local os_index=$? # step-1 install wget # the file size of the miniconda.sh is too big to use "wget" tool to download instead # of the "curl" in the weak network environment. print_function "STEP" "installing wget..." if [ $STEP -lt 1 ]; then install_wget ${SUPPORT_OS[$os_index]} STEP=1 checkpoint_stage $STAGE $STEP fi print_function "STEP" "install wget... [SUCCESS]" # step-2 install miniconda print_function "STEP" "installing miniconda..." if [ $STEP -lt 2 ]; then create_dir $CURRENT_DIR/download install_miniconda $os_index STEP=2 checkpoint_stage $STAGE $STEP fi print_function "STEP" "install miniconda... [SUCCESS]" # step-3 install python environment whcih includes # 3.5 3.6 3.7 if [ $STEP -lt 3 ] && [ `need_install_component "py_env"` = true ]; then print_function "STEP" "installing python environment..." install_py_env STEP=3 checkpoint_stage $STAGE $STEP print_function "STEP" "install python environment... [SUCCESS]" fi # step-4 install tox if [ $STEP -lt 4 ] && [ `need_install_component "tox"` = true ]; then print_function "STEP" "installing tox..." install_tox STEP=4 checkpoint_stage $STAGE $STEP print_function "STEP" "install tox... [SUCCESS]" fi # step-5 install flake8 if [ $STEP -lt 5 ] && [ `need_install_component "flake8"` = true ]; then print_function "STEP" "installing flake8..." install_flake8 STEP=5 checkpoint_stage $STAGE $STEP print_function "STEP" "install flake8... [SUCCESS]" fi # step-6 install sphinx if [ $STEP -lt 6 ] && [ `need_install_component "sphinx"` = true ]; then print_function "STEP" "installing sphinx..." install_sphinx STEP=6 checkpoint_stage $STAGE $STEP print_function "STEP" "install sphinx... [SUCCESS]" fi print_function "STAGE" "install environment... [SUCCESS]" } # create dir if needed function create_dir() { if [ ! -d $1 ]; then mkdir -p $1 if [ $? -ne 0 ]; then echo "mkdir -p $1 failed. you can mkdir manually or exec the script with \ the command: sudo ./lint-python.sh" exit 1 fi fi } # Set created py-env in $PATH for tox's creating virtual env function activate () { if [ ! -d $CURRENT_DIR/.conda/envs ]; then echo "For some unkown reasons,missing the directory $CURRENT_DIR/.conda/envs,\ you should exec the script with the option: -f" exit 1 fi for py_dir in $CURRENT_DIR/.conda/envs/* do PATH=$py_dir/bin:$PATH done export PATH 2>/dev/null if [ $? -ne 0 ]; then echo "For some unkown reasons, the py package is not complete,\ you should exec the script with the option: -f" exit 1 fi } # Reset the $PATH function deactivate() { # reset old environment variables # ! [ -z ${VAR+_} ] returns true if VAR is declared at all if ! [ -z "${_OLD_PATH+_}" ] ; then PATH="$_OLD_PATH" export PATH unset _OLD_PATH fi } # Collect checks function collect_checks() { if [ ! -z "$EXCLUDE_CHECKS" ] && [ ! -z "$INCLUDE_CHECKS" ]; then echo "You can't use option -s and -e simultaneously." exit 1 fi if [ ! -z "$EXCLUDE_CHECKS" ]; then for (( i = 0; i < ${#EXCLUDE_CHECKS[@]}; i++)); do if [[ `contains_element "${SUPPORT_CHECKS[*]}" "${EXCLUDE_CHECKS[i]}_check"` = true ]]; then SUPPORT_CHECKS=("${SUPPORT_CHECKS[@]/${EXCLUDE_CHECKS[i]}_check}") else echo "the check ${EXCLUDE_CHECKS[i]} is invalid." exit 1 fi done fi if [ ! -z "$INCLUDE_CHECKS" ]; then REAL_SUPPORT_CHECKS=() for (( i = 0; i < ${#INCLUDE_CHECKS[@]}; i++)); do if [[ `contains_element "${SUPPORT_CHECKS[*]}" "${INCLUDE_CHECKS[i]}_check"` = true ]]; then REAL_SUPPORT_CHECKS+=("${INCLUDE_CHECKS[i]}_check") else echo "the check ${INCLUDE_CHECKS[i]} is invalid." exit 1 fi done SUPPORT_CHECKS=(${REAL_SUPPORT_CHECKS[@]}) fi } # If the check stage is needed function include_stage() { if [[ `contains_element "${SUPPORT_CHECKS[*]}" "$1"` = true ]]; then return 0 else return 1 fi } # get all supported checks functions function get_all_supported_checks() { _OLD_IFS=$IFS IFS=$'\n' SUPPORT_CHECKS=() for fun in $(declare -F); do if [[ `regexp_match "$fun" "_check$"` = true ]]; then SUPPORT_CHECKS+=("${fun:11}") fi done IFS=$_OLD_IFS } # get all supported install components functions function get_all_supported_install_components() { _OLD_IFS=$IFS IFS=$'\n' for fun in $(declare -F); do if [[ `regexp_match "${fun:11}" "^install_"` = true ]]; then SUPPORTED_INSTALLATION_COMPONENTS+=("${fun:19}") fi done IFS=$_OLD_IFS # we don't need to expose "install_wget" to user. local DELETE_COMPONENTS=("wget") local REAL_COMPONENTS=() for component in ${SUPPORTED_INSTALLATION_COMPONENTS[@]}; do if [[ `contains_element "${DELETE_COMPONENTS[*]}" "${component}"` = false ]]; then REAL_COMPONENTS+=("${component}") fi done SUPPORTED_INSTALLATION_COMPONENTS=(${REAL_COMPONENTS[@]}) } # exec all selected check stages function check_stage() { print_function "STAGE" "checks starting" for fun in ${SUPPORT_CHECKS[@]}; do $fun done echo "All the checks are finished, the detailed information can be found in: $LOG_FILE" } ###############################################################All Checks Definitions############################################################### ######################### # This part defines all check functions such as tox_check and flake8_check # We make a rule that all check functions are suffixed with _ check. e.g. tox_check, flake8_chek ######################### # Tox check function tox_check() { print_function "STAGE" "tox checks" # Set created py-env in $PATH for tox's creating virtual env activate $TOX_PATH -c $FLINK_PYTHON_DIR/tox.ini --recreate 2>&1 | tee -a $LOG_FILE TOX_RESULT=$((grep -c "congratulations :)" "$LOG_FILE") 2>&1) if [ $TOX_RESULT -eq '0' ]; then print_function "STAGE" "tox checks... [FAILED]" else print_function "STAGE" "tox checks... [SUCCESS]" fi # Reset the $PATH deactivate # If check failed, stop the running script. if [ $TOX_RESULT -eq '0' ]; then exit 1 fi } # Flake8 check function flake8_check() { local PYTHON_SOURCE="$(find . \( -path ./dev -o -path ./.tox \) -prune -o -type f -name "*.py" -print )" print_function "STAGE" "flake8 checks" if [ ! -f "$FLAKE8_PATH" ]; then echo "For some unkown reasons, the flake8 package is not complete,\ you should exec the script with the parameter: -f" fi if [[ ! "$PYTHON_SOURCE" ]]; then echo "No python files found! Something is wrong exiting." exit 1; fi # the return value of a pipeline is the status of the last command to exit # with a non-zero status or zero if no command exited with a non-zero status set -o pipefail ($FLAKE8_PATH --config=tox.ini $PYTHON_SOURCE) 2>&1 | tee -a $LOG_FILE PYCODESTYLE_STATUS=$? if [ $PYCODESTYLE_STATUS -ne 0 ]; then print_function "STAGE" "flake8 checks... [FAILED]" # Stop the running script. exit 1; else print_function "STAGE" "flake8 checks... [SUCCESS]" fi } # Sphinx check function sphinx_check() { export SPHINXBUILD=$SPHINX_PATH # cd to $FLINK_PYTHON_DIR pushd "$FLINK_PYTHON_DIR"/docs &> /dev/null make clean # the return value of a pipeline is the status of the last command to exit # with a non-zero status or zero if no command exited with a non-zero status set -o pipefail (SPHINXOPTS="-a -W" make html) 2>&1 | tee -a $LOG_FILE SPHINXBUILD_STATUS=$? if [ $SPHINXBUILD_STATUS -ne 0 ]; then print_function "STAGE" "sphinx checks... [FAILED]" # Stop the running script. exit 1; else print_function "STAGE" "sphinx checks... [SUCCESS]" fi } ###############################################################All Checks Definitions############################################################### # CURRENT_DIR is "flink/flink-python/dev/" CURRENT_DIR="$(cd "$( dirname "$0" )" && pwd)" # FLINK_PYTHON_DIR is "flink/flink-python" FLINK_PYTHON_DIR=$(dirname "$CURRENT_DIR") pushd "$FLINK_PYTHON_DIR" &> /dev/null # conda home path CONDA_HOME=$CURRENT_DIR/.conda # conda path CONDA_PATH=$CONDA_HOME/bin/conda # pip path PIP_PATH=$CONDA_HOME/bin/pip # tox path TOX_PATH=$CONDA_HOME/bin/tox # flake8 path FLAKE8_PATH=$CONDA_HOME/bin/flake8 # sphinx path SPHINX_PATH=$CONDA_HOME/bin/sphinx-build _OLD_PATH="$PATH" SUPPORT_OS=("Darwin" "Linux") # the file stores the success step in installing progress. STAGE_FILE=$CURRENT_DIR/.stage.txt # the dir includes all kinds of py env installed. VIRTUAL_ENV=$CONDA_HOME/envs LOG_DIR=$CURRENT_DIR/log if [ "$FLINK_IDENT_STRING" == "" ]; then FLINK_IDENT_STRING="$USER" fi if [ "$HOSTNAME" == "" ]; then HOSTNAME="$HOST" fi # the log file stores the checking result. LOG_FILE=$LOG_DIR/flink-$FLINK_IDENT_STRING-python-$HOSTNAME.log create_dir $LOG_DIR # clean LOG_FILE content echo >$LOG_FILE # miniconda script CONDA_INSTALL_SH=$CURRENT_DIR/download/miniconda.sh # stage "install" includes the num of steps. STAGE_INSTALL_STEPS=6 # whether force to restart the script. FORCE_START=0 SUPPORT_CHECKS=() # search all supported check functions and put them into SUPPORT_CHECKS array get_all_supported_checks EXCLUDE_CHECKS="" INCLUDE_CHECKS="" SUPPORTED_INSTALLATION_COMPONENTS=() # search all supported install functions and put them into SUPPORTED_INSTALLATION_COMPONENTS array get_all_supported_install_components INSTALLATION_COMPONENTS=() # parse_opts USAGE=" usage: $0 [options] -h print this help message and exit -f force to exec from the progress of installing environment -s [basic,py_env,tox,flake8,sphinx,all] install environment with specified components which split by comma(,) note: This option is used to install environment components and will skip all subsequent checks, so do not use this option with -e,-i simultaneously. -e [tox,flake8,sphinx] exclude checks which split by comma(,) -i [tox,flake8,sphinx] include checks which split by comma(,) -l list all checks supported. Examples: ./lint-python -s basic => install environment with basic components. ./lint-python -s py_env => install environment with python env(3.5,3.6,3.7). ./lint-python -s all => install environment with all components such as python env,tox,flake8,sphinx etc. ./lint-python -s tox,flake8 => install environment with tox,flake8. ./lint-python -s tox -f => reinstall environment with tox. ./lint-python -e tox,flake8 => exclude checks tox,flake8. ./lint-python -i flake8 => include checks flake8. ./lint-python => exec all checks. ./lint-python -f => reinstall environment with all components and exec all checks. ./lint-python -l => list all checks supported. " while getopts "hfs:i:e:l" arg; do case "$arg" in h) printf "%s\\n" "$USAGE" exit 2 ;; f) FORCE_START=1 ;; s) INSTALLATION_COMPONENTS=($(echo $OPTARG | tr ',' ' ' )) ;; e) EXCLUDE_CHECKS=($(echo $OPTARG | tr ',' ' ' )) ;; i) INCLUDE_CHECKS=($(echo $OPTARG | tr ',' ' ' )) ;; l) printf "current supported checks includes:\n" for fun in ${SUPPORT_CHECKS[@]}; do echo ${fun%%_check*} done exit 2 ;; ?) printf "ERROR: did not recognize option '%s', please try -h\\n" "$1" exit 1 ;; esac done # decides whether to skip check stage skip_checks=0 if [ ! -z "$INSTALLATION_COMPONENTS" ]; then parse_component_args skip_checks=1 fi # collect checks according to the options collect_checks # If exec the script with the param: -f, all progress will be re-run if [ $FORCE_START -eq 1 ]; then STAGE="install" STEP=0 checkpoint_stage $STAGE $STEP else restore_stage fi # install environment install_environment # exec all selected checks if [ $skip_checks -eq 0 ]; then check_stage fi
import { randomizedTest } from '../src/randomizedTest'; describe('randomizedTest', () => { randomizedTest('Without seed, no async', randomizer => { if(! randomizer) throw new Error(); }); randomizedTest('Without seed, async', (randomizer, done) => { if(! randomizer) throw new Error(); done(); }); });
public class ItemCooldownManager { private bool IsCooldown; private float cooldownDuration; private float cooldownInitTime; private Item ObservedItem; // Update is called once per frame public void Update() { if (IsCooldown && Time.time - cooldownInitTime >= cooldownDuration) { IsCooldown = false; // Reset cooldown notification InventoryManager.Notifications.inCooldown.Hide(); // Hide the cooldown notification } else if (IsCooldown && ObservedItem != null) { InventoryManager.Notifications.inCooldown.Show(ObservedItem.Name, (cooldownDuration - (Time.time - cooldownInitTime)).ToString("f2")); } } //Can we use the item public bool CanUse() { return !IsCooldown && ObservedItem != null; } }
<reponame>Zwooosh/sebastiaanscheers.nl import { useState, useRef, useCallback } from 'react' import { useSafeLayoutEffect } from './useSafeLayoutEffect' export interface IUseImageProps { src?: string srcSet?: string sizes?: string onLoad?(event: Event): void onError?(error: string | Event): void ignoreFallback?: boolean crossOrigin?: string } type Status = 'loading' | 'failed' | 'pending' | 'loaded' export function useImage(props: IUseImageProps) { const { src, srcSet, onLoad, onError, crossOrigin, sizes, ignoreFallback, } = props const [status, setStatus] = useState<Status>(() => { return src ? 'loading' : 'pending' }) const imageRef = useRef<HTMLImageElement | null>() const load = useCallback(() => { if (!src) return flush() const img = new Image() img.src = src if (crossOrigin) { img.crossOrigin = crossOrigin } if (srcSet) { img.srcset = srcSet } if (sizes) { img.sizes = sizes } img.onload = (event) => { flush() setStatus('loaded') onLoad?.(event) } img.onerror = (error) => { flush() setStatus('failed') onError?.(error) } imageRef.current = img }, [src, crossOrigin, srcSet, sizes, onLoad, onError]) const flush = () => { if (imageRef.current) { imageRef.current.onload = null imageRef.current.onerror = null imageRef.current = null } } useSafeLayoutEffect(() => { if (ignoreFallback) return if (status === 'loading') { load() } return () => { flush() } }, [status, load, ignoreFallback]) return ignoreFallback ? 'loaded' : status } export type UseImageReturn = ReturnType<typeof useImage>
<filename>examples/object-entrys/config-overrides.js const paths = require('react-scripts/config/paths'); const rewireEntry = require('react-app-rewire-entry'); paths.appAdminJs = paths.appSrc + '/admin.js'; const { rewireWebpackEntryConfig, rewireDevServerkEntryConfig, } = rewireEntry({ index: paths.appIndexJs, admin: paths.appAdminJs, }); module.exports = { webpack: (config, env) => { config = rewireWebpackEntryConfig(config, env); return config; }, devServer: (configFunction) => { return (proxy, allowedHost) => { let config = configFunction(proxy, allowedHost); config = rewireDevServerkEntryConfig(config); return config; }; }, };
#!/bin/sh BASEDIR=$(dirname "$0") java -Xmx1024M -jar "$BASEDIR/../lib/vome.jar" --config "$BASEDIR/../etc/databases.xml"
#!/bin/bash # essentials sudo apt-get install --yes \ build-essential \ cmake \ curl \ direnv \ fish \ golang \ htop \ hub \ jq \ mosh \ neofetch \ neovim \ nodejs \ python-dev \ python-pip \ python3-dev \ python3-pip \ tmux \ wget \ xz-utils # neovim sudo update-alternatives --install /usr/bin/vi vi /usr/bin/nvim 60 sudo update-alternatives --install /usr/bin/vim vim /usr/bin/nvim 60 sudo update-alternatives --install /usr/bin/editor editor /usr/bin/nvim 60 # yarn curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add - echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list sudo apt-get update sudo apt-get install --yes yarn mkdir -p \ ~/.bin \ ~/go/bin \ ~/.config/yarn/global yarn config set prefix ~/.config/yarn/global yarn global add \ eslint \ hs \ javascript-language-server \ prettier \ wscat # omf curl -L https://get.oh-my.fish | fish # utils go get -u github.com/golang/lint/golint go get -u github.com/alecthomas/gometalinter go get -u github.com/sourcegraph/go-langserver pip3 install neovim yamllint python-language-server pip install neovim yamllint python-language-server curl https://sh.rustup.rs -sSf | sh rustup component add rls-preview rust-analysis rust-src rustfmt-preview cargo install exa
#!/bin/bash exit 1; # This is a shell script, but it's recommended that you run the commands one by # one by copying and pasting into the shell. # Caution: some of the graph creation steps use quite a bit of memory, so you # should run this on a machine that has sufficient memory. . cmd.sh # Data prep #local/swbd_p1_data_prep.sh /mnt/matylda2/data/SWITCHBOARD_1R2 local/swbd_p1_data_prep.sh /export/corpora3/LDC/LDC97S62 local/swbd_p1_prepare_dict.sh utils/prepare_lang.sh data/local/dict "<UNK>" data/local/lang data/lang local/swbd_p1_train_lms.sh local/swbd_p1_format_data.sh # Data preparation and formatting for eval2000 (note: the "text" file # is not very much preprocessed; for actual WER reporting we'll use # sclite. #local/eval2000_data_prep.sh /mnt/matylda2/data/HUB5_2000/ /mnt/matylda2/data/HUB5_2000/2000_hub5_eng_eval_tr local/eval2000_data_prep.sh /export/corpora2/LDC/LDC2002S09/hub5e_00 /export/corpora2/LDC/LDC2002T43 . cmd.sh # mfccdir should be some place with a largish disk where you # want to store MFCC features. mfccdir=mfcc steps/make_mfcc.sh --nj 20 --cmd "$train_cmd" data/train exp/make_mfcc/train $mfccdir || exit 1; # Don't do "|| exit 1" because actually some speakers don't have data, # we'll get rid of them later. Ignore this error. steps/compute_cmvn_stats.sh data/train exp/make_mfcc/train $mfccdir # after this, the next command will remove the small number of utterances # that couldn't be extracted for some reason (e.g. too short; no such file). utils/fix_data_dir.sh data/train || exit 1; steps/make_mfcc.sh --cmd "$train_cmd" --nj 10 data/eval2000 exp/make_mfcc/eval2000 $mfccdir || exit 1; steps/compute_cmvn_stats.sh data/eval2000 exp/make_mfcc/eval2000 $mfccdir || exit 1; utils/fix_data_dir.sh data/eval2000 # remove segments that had problems, e.g. too short. # Use the first 4k sentences as dev set. Note: when we trained the LM, we used # the 1st 10k sentences as dev set, so the 1st 4k won't have been used in the # LM training data. However, they will be in the lexicon, plus speakers # may overlap, so it's still not quite equivalent to a test set. utils/subset_data_dir.sh --first data/train 4000 data/train_dev # 5.3 hours. n=$[`cat data/train/segments | wc -l` - 4000] utils/subset_data_dir.sh --last data/train $n data/train_nodev # Now-- there are 264k utterances, and we want to start the monophone training # on relatively short utterances (easier to align), but not only the very shortest # ones (mostly uh-huh). So take the 100k shortest ones, and then take 10k random # utterances from those. utils/subset_data_dir.sh --shortest data/train_nodev 100000 data/train_100kshort utils/subset_data_dir.sh data/train_100kshort 10000 data/train_10k local/remove_dup_utts.sh 100 data/train_10k data/train_10k_nodup # Take the first 30k utterances (about 1/8th of the data) utils/subset_data_dir.sh --first data/train_nodev 30000 data/train_30k local/remove_dup_utts.sh 200 data/train_30k data/train_30k_nodup local/remove_dup_utts.sh 300 data/train_nodev data/train_nodup # Take the first 100k utterances (just under half the data); we'll use # this for later stages of training. utils/subset_data_dir.sh --first data/train_nodev 100000 data/train_100k local/remove_dup_utts.sh 200 data/train_100k data/train_100k_nodup # The next commands are not necessary for the scripts to run, but increase # efficiency of data access by putting the mfcc's of the subset # in a contiguous place in a file. ( . path.sh; # make sure mfccdir is defined as above.. cp data/train_10k_nodup/feats.scp{,.bak} copy-feats scp:data/train_10k_nodup/feats.scp ark,scp:$mfccdir/kaldi_swbd_10k_nodup.ark,$mfccdir/kaldi_swbd_10k_nodup.scp \ && cp $mfccdir/kaldi_swbd_10k_nodup.scp data/train_10k_nodup/feats.scp ) ( . path.sh; # make sure mfccdir is defined as above.. cp data/train_30k_nodup/feats.scp{,.bak} copy-feats scp:data/train_30k_nodup/feats.scp ark,scp:$mfccdir/kaldi_swbd_30k_nodup.ark,$mfccdir/kaldi_swbd_30k_nodup.scp \ && cp $mfccdir/kaldi_swbd_30k_nodup.scp data/train_30k_nodup/feats.scp ) steps/train_mono.sh --nj 10 --cmd "$train_cmd" \ data/train_10k_nodup data/lang exp/mono0a || exit 1; steps/align_si.sh --nj 30 --cmd "$train_cmd" \ data/train_30k_nodup data/lang exp/mono0a exp/mono0a_ali || exit 1; steps/train_deltas.sh --cmd "$train_cmd" \ 2500 20000 data/train_30k_nodup data/lang exp/mono0a_ali exp/tri1 || exit 1; utils/mkgraph.sh data/lang_test exp/tri1 exp/tri1/graph steps/decode.sh --nj 30 --cmd "$decode_cmd" --config conf/decode.config \ exp/tri1/graph data/eval2000 exp/tri1/decode_eval2000 #MAP-adapted decoding example. #steps/decode_with_map.sh --nj 30 --cmd "$decode_cmd" --config conf/decode.config \ # exp/tri1/graph data/eval2000 exp/tri1/decode_eval2000_map steps/align_si.sh --nj 30 --cmd "$train_cmd" \ data/train_30k_nodup data/lang exp/tri1 exp/tri1_ali || exit 1; steps/train_deltas.sh --cmd "$train_cmd" \ 2500 20000 data/train_30k_nodup data/lang exp/tri1_ali exp/tri2 || exit 1; ( utils/mkgraph.sh data/lang_test exp/tri2 exp/tri2/graph || exit 1; steps/decode.sh --nj 30 --cmd "$decode_cmd" --config conf/decode.config \ exp/tri2/graph data/eval2000 exp/tri2/decode_eval2000 || exit 1; )& steps/align_si.sh --nj 30 --cmd "$train_cmd" \ data/train_30k_nodup data/lang exp/tri2 exp/tri2_ali || exit 1; # Train tri3a, which is LDA+MLLT, on 30k_nodup data. steps/train_lda_mllt.sh --cmd "$train_cmd" \ --splice-opts "--left-context=3 --right-context=3" \ 2500 20000 data/train_30k_nodup data/lang exp/tri2_ali exp/tri3a || exit 1; ( utils/mkgraph.sh data/lang_test exp/tri3a exp/tri3a/graph || exit 1; steps/decode.sh --nj 30 --cmd "$decode_cmd" --config conf/decode.config \ exp/tri3a/graph data/eval2000 exp/tri3a/decode_eval2000 || exit 1; )& # From now, we start building a more serious system (with SAT), and we'll # do the alignment with fMLLR. steps/align_fmllr.sh --nj 30 --cmd "$train_cmd" \ data/train_100k_nodup data/lang exp/tri3a exp/tri3a_ali_100k_nodup || exit 1; steps/train_sat.sh --cmd "$train_cmd" \ 2500 20000 data/train_100k_nodup data/lang exp/tri3a_ali_100k_nodup exp/tri4a || exit 1; ( utils/mkgraph.sh data/lang_test exp/tri4a exp/tri4a/graph steps/decode_fmllr.sh --nj 30 --cmd "$decode_cmd" --config conf/decode.config \ exp/tri4a/graph data/eval2000 exp/tri4a/decode_eval2000 steps/decode_fmllr.sh --nj 30 --cmd "$decode_cmd" --config conf/decode.config \ exp/tri4a/graph data/train_dev exp/tri4a/decode_train_dev )& steps/align_fmllr.sh --nj 30 --cmd "$train_cmd" \ data/train_100k_nodup data/lang exp/tri4a exp/tri4a_ali_100k_nodup local/run_sgmm.sh #local/run_sgmm2.sh # Building a larger SAT system. steps/train_sat.sh --cmd "$train_cmd" \ 3500 100000 data/train_100k_nodup data/lang exp/tri4a_ali_100k_nodup exp/tri5a || exit 1; ( utils/mkgraph.sh data/lang_test exp/tri5a exp/tri5a/graph || exit 1; steps/decode_fmllr.sh --cmd "$decode_cmd" --config conf/decode.config \ --nj 30 exp/tri5a/graph data/eval2000 exp/tri5a/decode_eval2000 || exit 1; ) # MMI starting from system in tri5a. Use the same data (100k_nodup). # Later we'll use all of it. steps/align_fmllr.sh --nj 40 --cmd "$train_cmd" \ data/train_100k_nodup data/lang exp/tri5a exp/tri5a_ali_100k_nodup || exit 1; steps/make_denlats.sh --nj 40 --cmd "$decode_cmd" --transform-dir exp/tri5a_ali_100k_nodup \ --config conf/decode.config \ --sub-split 50 data/train_100k_nodup data/lang exp/tri5a exp/tri5a_denlats_100k_nodup || exit 1; steps/train_mmi.sh --cmd "$decode_cmd" --boost 0.1 \ data/train_100k_nodup data/lang exp/tri5a_{ali,denlats}_100k_nodup exp/tri5a_mmi_b0.1 || exit 1; steps/decode.sh --nj 30 --cmd "$decode_cmd" --config conf/decode.config \ --transform-dir exp/tri5a/decode_eval2000 \ exp/tri5a/graph data/eval2000 exp/tri5a_mmi_b0.1/decode_eval2000 & steps/train_diag_ubm.sh --silence-weight 0.5 --nj 40 --cmd "$train_cmd" \ 700 data/train_100k_nodup data/lang exp/tri5a_ali_100k_nodup exp/tri5a_dubm steps/train_mmi_fmmi.sh --learning-rate 0.005 \ --boost 0.1 --cmd "$train_cmd" \ data/train_100k_nodup data/lang exp/tri5a_ali_100k_nodup exp/tri5a_dubm exp/tri5a_denlats_100k_nodup \ exp/tri5a_fmmi_b0.1 || exit 1; for iter in 4 5 6 7 8; do steps/decode_fmmi.sh --nj 30 --cmd "$decode_cmd" --iter $iter \ --config conf/decode.config --transform-dir exp/tri5a/decode_eval2000 \ exp/tri5a/graph data/eval2000 exp/tri5a_fmmi_b0.1/decode_eval2000_it$iter & done # Recipe with indirect differential [doesn't make difference here] steps/train_mmi_fmmi_indirect.sh \ --boost 0.1 --cmd "$train_cmd" \ data/train_100k_nodup data/lang exp/tri5a_ali_100k_nodup exp/tri5a_dubm exp/tri5a_denlats_100k_nodup \ exp/tri5a_fmmi_b0.1_indirect || exit 1; for iter in 4 5 6 7 8; do steps/decode_fmmi.sh --nj 30 --cmd "$decode_cmd" --iter $iter \ --config conf/decode.config --transform-dir exp/tri5a/decode_eval2000 \ exp/tri5a/graph data/eval2000 exp/tri5a_fmmi_b0.1_indirect/decode_eval2000_it$iter & done # Note: we haven't yet run with all the data. # getting results (see RESULTS file) for x in exp/*/decode*; do [ -d $x ] && grep Sum $x/score_*/*.sys | utils/best_wer.sh; done 2>/dev/null for x in exp/*/decode*; do [ -d $x ] && grep WER $x/wer_* | utils/best_wer.sh; done 2>/dev/null
<reponame>rafalcieslak/RGK /*#include "brdf.hpp" #include "glm.hpp" #include <glm/gtx/vector_angle.hpp> #include <glm/gtx/string_cast.hpp> #include "out.hpp" #include "utils.hpp" #include "LTC/ltc.hpp" #include "random_utils.hpp" std::tuple<glm::vec3, Spectrum> BRDF::GetRay(glm::vec3 normal, glm::vec3 inc, Spectrum diffuse, Spectrum specular, glm::vec2 sample, bool) const{ glm::vec3 v = RandomUtils::Sample2DToHemisphereCosineDirected(sample, normal); qassert_true(glm::dot(normal, v) >= -0.001f); Spectrum r = Apply(diffuse, specular, normal, inc, v); r *= glm::pi<float>(); return std::make_tuple(v,r); } float BRDFDiffuseCosine::PdfDiff() const{ return 1.0f/glm::pi<float>(); } float BRDFDiffuseCosine::PdfSpec(glm::vec3, glm::vec3, glm::vec3, bool) const{ return 0.0f; } */ /* float BRDFPhongEnergy::PdfDiff() const{ return 1.0f/glm::pi<float>(); } float BRDFPhongEnergy::PdfSpec(glm::vec3 N, glm::vec3 Vi, glm::vec3 Vr, bool) const{ // Ideal specular reflection direction glm::vec3 Vs = 2.0f * glm::dot(Vi, N) * N - Vi; float c = glm::max(0.0f, glm::dot(Vr,Vs)); c = glm::pow(c, exponent); c /= glm::dot(Vi, N); float norm = (exponent + 2) / (2.0f * glm::pi<float>()); return norm * c; } BRDFCookTorr::BRDFCookTorr(float phong_exp, float ior){ // Converting specular exponent to roughness using Brian Karis' formula: roughness = glm::pow(2.0f / (2.0f + phong_exp), 0.25f); // Fresnel approximation for perpendicular reflection float q = (1.0f - ior)/(1.0f + ior); F0 = q*q; } float BRDFCookTorr::PdfDiff() const{ return 0.0f; return 1.0f/glm::pi<float>(); } float BRDFCookTorr::PdfSpec(glm::vec3 N, glm::vec3 Vi, glm::vec3 Vr, bool debug) const{ (void)debug; glm::vec3 Vh = glm::normalize(Vi + Vr); //if(glm::length(Vi + Vr) < 0.001f) ...? float th_i = 0.5f*glm::pi<float>() - glm::angle(Vi, N); float th_r = 0.5f*glm::pi<float>() - glm::angle(Vr, N); float th_h = 0.5f*glm::pi<float>() - glm::angle(Vh, N); float beta = 0.5f*glm::angle(Vi, Vr); // Schlich approximation for Fresnel function float cb = 1.0f - glm::dot(Vi, Vh); float F = F0 + (1.0f - F0) * cb*cb*cb*cb*cb; // Beckman dist //float ce = glm::cos(th_h); //float te = glm::tan(th_h); float m2 = roughness * roughness; float NdotH = glm::dot(N, Vh); float r1 = 1.0f / ( 4.0f * m2 * NdotH * NdotH * NdotH * NdotH); float r2 = (NdotH * NdotH - 1.0) / (m2 * NdotH * NdotH); float D = r1 * glm::exp(r2); // Shadow and masking factor float Gc = 2.0f * glm::cos(th_h) / glm::cos(beta); float G1 = Gc * glm::cos(th_i); float G2 = Gc * glm::cos(th_r); float G = glm::max(1.0f, glm::max(G1,G2)); float NdotV = glm::dot(N,Vi); if(NdotV < 0.001f) return 0.0f; float VdotH = glm::dot(Vi,Vh); float NdotL = NdotV; float NH2 = 2.0f * NdotH; float g1 = (NH2 * NdotV) / VdotH; float g2 = (NH2 * NdotL) / VdotH; G = glm::min(1.0f, glm::min(g1, g2)); float c = F * D * G / (NdotV * NdotL * 3.14); return c / glm::pi<float>(); } */ /* // ======================================================================== BRDFLTCBeckmann::BRDFLTCBeckmann(float phong_exp){ // Converting specular exponent to roughness using <NAME>' formula: roughness = glm::pow(2.0f / (2.0f + phong_exp), 0.5f); out::cout(4) << "Created new BRDF LTC Beckmann with roughness = " << roughness << std::endl; } float BRDFLTCBeckmann::PdfDiff() const{ return 1.0f/glm::pi<float>(); } float BRDFLTCBeckmann::PdfSpec(glm::vec3 N, glm::vec3 Vi, glm::vec3 Vr, bool debug) const{ return LTC::GetPDF(LTC::Beckmann, N, Vi, Vr, roughness, debug); } std::tuple<glm::vec3, Spectrum> BRDFLTCBeckmann::GetRay(glm::vec3 normal, glm::vec3 inc, Spectrum diffuse, Spectrum specular, glm::vec2 sample, bool debug) const{ assert(glm::dot(normal, inc) > 0.0f); float diffuse_power = diffuse.r + diffuse.g + diffuse.b; // Integral over diffuse spectrum... float specular_power = specular.r + specular.g + specular.b; // Integral over specular spectrum... float diffuse_probability = diffuse_power / (diffuse_power + specular_power); if(RandomUtils::DecideAndRescale(sample.x, diffuse_probability)){ // Diffuse ray auto res = BRDF::GetRay(normal, inc, diffuse, specular, sample); std::get<1>(res) = diffuse; return res; }else{ glm::vec3 v = RandomUtils::Sample2DToHemisphereCosineZ(sample); qassert_false(std::isnan(v.x)); v = LTC::GetRandom(LTC::Beckmann, normal, inc, roughness, v, debug); return std::make_tuple(v,specular); } } // ======================================================================== BRDFLTCGGX::BRDFLTCGGX(float phong_exp){ // Converting specular exponent to roughness using <NAME>' formula: roughness = glm::pow(2.0f / (2.0f + phong_exp), 0.5f); out::cout(4) << "Created new BRDF LTC GGX with roughness = " << roughness << std::endl; } float BRDFLTCGGX::PdfDiff() const{ return 1.0f/glm::pi<float>(); } float BRDFLTCGGX::PdfSpec(glm::vec3 N, glm::vec3 Vi, glm::vec3 Vr, bool debug) const{ return LTC::GetPDF(LTC::GGX, N, Vi, Vr, roughness, debug); } std::tuple<glm::vec3, Spectrum> BRDFLTCGGX::GetRay(glm::vec3 normal, glm::vec3 inc, Spectrum diffuse, Spectrum specular, glm::vec2 sample, bool debug) const{ qassert_directed(normal, inc); float diffuse_power = diffuse.r + diffuse.g + diffuse.b; // Integral over diffuse spectrum... float specular_power = specular.r + specular.g + specular.b; // Integral over specular spectrum... float diffuse_probability = diffuse_power / (diffuse_power + specular_power + 0.0001f); if(RandomUtils::DecideAndRescale(sample.x, diffuse_probability)){ // Diffuse ray auto res = BRDF::GetRay(normal, inc, diffuse, specular, sample); std::get<1>(res) = diffuse; return res; }else{ glm::vec3 v = RandomUtils::Sample2DToHemisphereCosineZ(sample); qassert_false(std::isnan(v.x)); v = LTC::GetRandom(LTC::GGX, normal, inc, roughness, v, debug); return std::make_tuple(v,specular); } } */
// https://codejam.withgoogle.com/2018/challenges/0000000000000130/dashboard/0000000000000524 #include <iomanip> #include <iostream> using namespace std; int main() { int t; cin >> t; for (int i = 1; i <= t; i++) { int d, n; cin >> d >> n; double m = -1; for (int j = 0; j < n; j++) { int k, s; cin >> k >> s; double l = double(d - k) / double(s); if (m < 0) m = l; else m = max(m, l); } cout << "Case #" << i << ": " << setprecision(10) << d / m << endl; } }
def c2f(c): return (c * 9 / 5) + 32 celsius = [21, 12, -4] fahrenheit = [] for temp in celsius: fahrenheit.append(c2f(temp)) print(fahrenheit) # Output: [69.8, 53.6, 24.8]
#include <iostream> using namespace std; void print_sum(int a, int b, char operator_) { cout<< operator_ << a + b; } int main(){ print_sum(2,3,'+'); return 0; }
<gh_stars>0 package algorithms; public class FibonacciVariations { public static long calculateResult(int n) { if (n <= 1) { return n; } int previousLastNumber = 0; int lastNumber = 1; int tempNumber = 0; for (int i = 2; i <= n; i++) { tempNumber = lastNumber; lastNumber = lastNumber + previousLastNumber; previousLastNumber = tempNumber; } return lastNumber; } public static int calculateLastDigit(int n) { if (n <= 1) { return n; } int previousLastDigit = 0; int lastDigit = 1; int tempDigit = 0; for (int i = 2; i <= n; i++) { tempDigit = lastDigit; lastDigit = (lastDigit + previousLastDigit) % 10; previousLastDigit = tempDigit; } return lastDigit; } }
import plotly.offline as py import plotly.graph_objs as go import pandas as pd # Read in the data data = pd.read_csv('AAPL_data.csv') # Create traces apple_trace = go.Scatter( x = data['date'], y = data['close'], name = 'Apple' ) # Create the layout layout = go.Layout( title = 'Apple Stock Price Trends', xaxis = dict( title = 'Past 3 Years' ), yaxis = dict( title = 'Closing Price ($)' ) ) # Create the figure and plot it fig = go.Figure(data = [apple_trace], layout = layout) py.plot(fig, filename = 'apple_chart.html')
const MODERN_ACTIVITY = 15; const HALF_LIFE_PERIOD = 5730; module.exports = function dateSample(sampleActivity) { //throw 'Not implemented'; let n if (typeof(sampleActivity) === 'string' && parseFloat(sampleActivity) > 0 && parseFloat(sampleActivity) < 15 && parseFloat(sampleActivity) !== NaN) { n = Math.ceil((HALF_LIFE_PERIOD * Math.log(MODERN_ACTIVITY / parseFloat(sampleActivity))) / 0.693) return n } else {return false} };
# |source| this file # # Utilities for working with EC2 instances # cloud_DefaultZone() { echo "us-east-1b" } cloud_RestartPreemptedInstances() { : # Not implemented } # AWS region is zone with the last character removed __cloud_GetRegion() { declare zone="$1" # AWS region is zone with the last character removed declare region="${zone:0:$((${#zone} - 1))}" echo "$region" } # Note: sshPrivateKey should be globally defined whenever this function is called. __cloud_SshPrivateKeyCheck() { # shellcheck disable=SC2154 if [[ -z $sshPrivateKey ]]; then echo Error: sshPrivateKey not defined exit 1 fi if [[ ! -r $sshPrivateKey ]]; then echo "Error: file is not readable: $sshPrivateKey" exit 1 fi } # # __cloud_FindInstances # # Find instances with name matching the specified pattern. # # For each matching instance, an entry in the `instances` array will be added with the # following information about the instance: # "name:public IP:private IP" # # filter - The instances to filter on # # examples: # $ __cloud_FindInstances "exact-machine-name" # $ __cloud_FindInstances "all-machines-with-a-common-machine-prefix*" # __cloud_FindInstances() { declare filter="$1" instances=() declare -a regions=("us-east-1" "us-east-2" "us-west-1" "us-west-2" "sa-east-1" "ap-northeast-2" \ "ap-northeast-1" "ap-southeast-2" "ap-southeast-1" "ap-south-1" "eu-west-1" "eu-west-2" "eu-central-1" "ca-central-1") for region in "${regions[@]}" do declare name publicIp privateIp while read -r name publicIp privateIp zone; do printf "%-30s | publicIp=%-16s privateIp=%s zone=%s\n" "$name" "$publicIp" "$privateIp" "$zone" instances+=("$name:$publicIp:$privateIp:$zone") done < <(aws ec2 describe-instances \ --region "$region" \ --filters \ "Name=tag:name,Values=$filter" \ "Name=instance-state-name,Values=pending,running" \ --query "Reservations[].Instances[].[InstanceId,PublicIpAddress,PrivateIpAddress,Placement.AvailabilityZone]" \ --output text \ ) done } # # cloud_FindInstances [namePrefix] # # Find instances with names matching the specified prefix # # For each matching instance, an entry in the `instances` array will be added with the # following information about the instance: # "name:public IP:private IP" # # namePrefix - The instance name prefix to look for # # examples: # $ cloud_FindInstances all-machines-with-a-common-machine-prefix # cloud_FindInstances() { declare namePrefix="$1" __cloud_FindInstances "$namePrefix*" } # # cloud_FindInstance [name] # # Find an instance with a name matching the exact pattern. # # For each matching instance, an entry in the `instances` array will be added with the # following information about the instance: # "name:public IP:private IP" # # name - The instance name to look for # # examples: # $ cloud_FindInstance exact-machine-name # cloud_FindInstance() { declare name="$1" __cloud_FindInstances "$name" } # # cloud_Initialize [networkName] # # Perform one-time initialization that may be required for the given testnet. # # networkName - unique name of this testnet # # This function will be called before |cloud_CreateInstances| cloud_Initialize() { declare networkName="$1" declare zone="$2" declare region= region=$(__cloud_GetRegion "$zone") __cloud_SshPrivateKeyCheck aws ec2 delete-key-pair --region "$region" --key-name "$networkName" aws ec2 import-key-pair --region "$region" --key-name "$networkName" \ --public-key-material file://"${sshPrivateKey}".pub declare rules rules=$(cat "$(dirname "${BASH_SOURCE[0]}")"/ec2-security-group-config.json) aws ec2 delete-security-group --region "$region" --group-name "$networkName" || true aws ec2 create-security-group --region "$region" --group-name "$networkName" --description "Created automatically by $0" aws ec2 authorize-security-group-ingress --output table --region "$region" --group-name "$networkName" --cli-input-json "$rules" } # # cloud_CreateInstances [networkName] [namePrefix] [numNodes] [imageName] # [machineType] [bootDiskSize] [startupScript] [address] # # Creates one more identical instances. # # networkName - unique name of this testnet # namePrefix - unique string to prefix all the instance names with # numNodes - number of instances to create # imageName - Disk image for the instances # machineType - GCE machine type # bootDiskSize - Optional size of the boot disk in GB # startupScript - Optional startup script to execute when the instance boots # address - Optional name of the GCE static IP address to attach to the # instance. Requires that |numNodes| = 1 and that addressName # has been provisioned in the GCE region that is hosting |zone| # bootDiskType - Optional specify SSD or HDD boot disk # additionalDiskSize - Optional specify size of additional storage volume # # Tip: use cloud_FindInstances to locate the instances once this function # returns cloud_CreateInstances() { declare networkName="$1" declare namePrefix="$2" declare numNodes="$3" declare enableGpu="$4" declare machineType="$5" declare zone="$6" declare optionalBootDiskSize="$7" declare optionalStartupScript="$8" declare optionalAddress="$9" declare region= region=$(__cloud_GetRegion "$zone") if $enableGpu; then # # Custom Ubuntu 18.04 LTS image with CUDA 9.2 and CUDA 10.0 installed # # Unfortunately these AMIs are not public. When this becomes an issue, use # the stock Ubuntu 18.04 image and programmatically install CUDA after the # instance boots # case $region in us-east-1) imageName="ami-0a8bd6fb204473f78" ;; us-west-1) imageName="ami-07011f0795513c59d" ;; us-west-2) imageName="ami-0a11ef42b62b82b68" ;; *) usage "Unsupported region: $region" ;; esac else # Select an upstream Ubuntu 18.04 AMI from https://cloud-images.ubuntu.com/locator/ec2/ case $region in us-east-1) imageName="ami-0fba9b33b5304d8b4" ;; us-east-2) imageName="ami-0e04554247365d806" ;; us-west-1) imageName="ami-07390b6ff5934a238" ;; us-west-2) imageName="ami-03804ed633fe58109" ;; sa-east-1) imageName="ami-0f1678b6f63a0f923" ;; ap-northeast-2) imageName="ami-0695e34e31339c3ff" ;; ap-northeast-1) imageName="ami-003371bfa26192744" ;; ap-southeast-2) imageName="ami-0401c9e2f645b5557" ;; ap-southeast-1) imageName="ami-08050c889a630f1bd" ;; ap-south-1) imageName="ami-04184c12996409633" ;; eu-central-1) imageName="ami-054e21e355db24124" ;; eu-west-1) imageName="ami-0727f3c2d4b0226d5" ;; eu-west-2) imageName="ami-068f09e337d7da0c4" ;; ca-central-1) imageName="ami-06ed08059bdc08fc9" ;; *) usage "Unsupported region: $region" ;; esac fi declare -a args args=( --key-name "$networkName" --count "$numNodes" --region "$region" --placement "AvailabilityZone=$zone" --security-groups "$networkName" --image-id "$imageName" --instance-type "$machineType" --tag-specifications "ResourceType=instance,Tags=[{Key=name,Value=$namePrefix}]" ) if [[ -n $optionalBootDiskSize ]]; then args+=( --block-device-mapping "[{\"DeviceName\": \"/dev/sda1\", \"Ebs\": { \"VolumeSize\": $optionalBootDiskSize }}]" ) fi if [[ -n $optionalStartupScript ]]; then args+=( --user-data "file://$optionalStartupScript" ) fi if [[ -n $optionalAddress ]]; then [[ $numNodes = 1 ]] || { echo "Error: address may not be supplied when provisioning multiple nodes: $optionalAddress" exit 1 } fi ( set -x aws ec2 run-instances --output table "${args[@]}" ) if [[ -n $optionalAddress ]]; then cloud_FindInstance "$namePrefix" if [[ ${#instances[@]} -ne 1 ]]; then echo "Failed to find newly created instance: $namePrefix" fi declare instanceId IFS=: read -r instanceId publicIp privateIp zone < <(echo "${instances[0]}") ( set -x # It would be better to poll that the instance has moved to the 'running' # state instead of blindly sleeping for 30 seconds... sleep 30 declare region= region=$(__cloud_GetRegion "$zone") aws ec2 associate-address \ --instance-id "$instanceId" \ --region "$region" \ --allocation-id "$optionalAddress" ) fi } # # cloud_DeleteInstances # # Deletes all the instances listed in the `instances` array # cloud_DeleteInstances() { if [[ ${#instances[0]} -eq 0 ]]; then echo No instances to delete return fi # Terminate the instances for instance in "${instances[@]}"; do declare name="${instance/:*/}" declare zone="${instance/*:/}" declare region= region=$(__cloud_GetRegion "$zone") ( set -x aws ec2 terminate-instances --output table --region "$region" --instance-ids "$name" ) done # Wait until the instances are terminated for instance in "${instances[@]}"; do declare name="${instance/:*/}" declare zone="${instance/*:/}" declare region= region=$(__cloud_GetRegion "$zone") while true; do declare instanceState instanceState=$(\ aws ec2 describe-instances \ --region "$region" \ --instance-ids "$name" \ --query "Reservations[].Instances[].State.Name" \ --output text \ ) echo "$name: $instanceState" if [[ $instanceState = terminated ]]; then break; fi sleep 2 done done } # # cloud_WaitForInstanceReady [instanceName] [instanceIp] [instanceZone] [timeout] # # Return once the newly created VM instance is responding. This function is cloud-provider specific. # cloud_WaitForInstanceReady() { declare instanceName="$1" declare instanceIp="$2" # declare instanceZone="$3" # unused declare timeout="$4" timeout "${timeout}"s bash -c "set -o pipefail; until ping -c 3 $instanceIp | tr - _; do echo .; done" } # # cloud_FetchFile [instanceName] [publicIp] [remoteFile] [localFile] # # Fetch a file from the given instance. This function uses a cloud-specific # mechanism to fetch the file # cloud_FetchFile() { # shellcheck disable=SC2034 # instanceName is unused declare instanceName="$1" declare publicIp="$2" declare remoteFile="$3" declare localFile="$4" __cloud_SshPrivateKeyCheck ( set -x scp \ -o "StrictHostKeyChecking=no" \ -o "UserKnownHostsFile=/dev/null" \ -o "User=solana" \ -o "IdentityFile=$sshPrivateKey" \ -o "LogLevel=ERROR" \ -F /dev/null \ "solana@$publicIp:$remoteFile" "$localFile" ) } # # cloud_CreateAndAttachPersistentDisk # # Not yet implemented for this cloud provider cloud_CreateAndAttachPersistentDisk() { echo "ERROR: cloud_CreateAndAttachPersistentDisk is not yet implemented for ec2" exit 1 } # # cloud_StatusAll # # Not yet implemented for this cloud provider cloud_StatusAll() { echo "ERROR: cloud_StatusAll is not yet implemented for ec2" }
class Difference: def __init__(self, a, b): self.a = a self.b = b def calculate(self): if self.a > self.b: return self.a - self.b elif self.a < self.b: return self.b - self.a else: return self.a + self.b
#!/bin/sh rm -rf figures/* cd figures ../../genplot.py -i ../ResultsL2/cophir/res_K\=10.dat -o cophir_L2_dist -x 0~norm~Recall -y 1~log~ImprDistComp -l "none" -t "CoPhIR" ../../genplot.py -i ../ResultsL2/sift_texmex_base1m/res_K\=10.dat -o sift_L2_dist -x 0~norm~Recall -y 1~log~ImprDistComp -l "none" -t "SIFT" ../../genplot.py -i ../ResultsL2/unif64/res_K\=10.dat -o unif64_L2_dist -x 0~norm~Recall -y 1~log~ImprDistComp -l "none" -t "Unif64" ../../genplot.py -i ../ResultsL2/cophir/res_K\=10.dat -o cophir_L2_eff -x 1~norm~Recall -y 1~log~ImprEfficiency -l "2~(1,-.2)" -t "" ../../genplot.py -i ../ResultsL2/sift_texmex_base1m/res_K\=10.dat -o sift_L2_eff -x 1~norm~Recall -y 1~log~ImprEfficiency -l "2~(1,-.2)" -t "" ../../genplot.py -i ../ResultsL2/unif64/res_K\=10.dat -o unif64_L2_eff -x 1~norm~Recall -y 1~log~ImprEfficiency -l "2~(1,-.2)" -t "" ../../genplot.py -i ../ResultsKL/final16/res_K\=10.dat -o final16_KL_dist -x 0~norm~Recall -y 1~log~ImprDistComp -l "none" -t "Final16" ../../genplot.py -i ../ResultsKL/final64/res_K\=10.dat -o final64_KL_dist -x 0~norm~Recall -y 1~log~ImprDistComp -l "none" -t "Final64" ../../genplot.py -i ../ResultsKL/final256/res_K\=10.dat -o final256_KL_dist -x 0~norm~Recall -y 1~log~ImprDistComp -l "none" -t "Final256" ../../genplot.py -i ../ResultsKL/final16/res_K\=10.dat -o final16_KL_eff -x 1~norm~Recall -y 1~log~ImprEfficiency -l "2~(0.96,-.2)" -t "" ../../genplot.py -i ../ResultsKL/final64/res_K\=10.dat -o final64_KL_eff -x 1~norm~Recall -y 1~log~ImprEfficiency -l "2~(0.96,-.2)" -t "" ../../genplot.py -i ../ResultsKL/final256/res_K\=10.dat -o final256_KL_eff -x 1~norm~Recall -y 1~log~ImprEfficiency -l "2~(0.96,-.2)" -t "" ../../genplot.py -i ../ResultsCosineSimil/wikipedia_3200000/res_K\=10.dat -o wiki_all_eff -x 1~norm~Recall -y 1~log~ImprEfficiency -l "2~(0.95,-.2)" -t "" ../../genplot.py -i ../ResultsCosineSimil/wikipedia_3200000/res_K\=10.dat -o wiki_all_dist -x 0~norm~Recall -y 1~log~ImprDistComp -l "none" -t "Wikipedia" ../../genplot.py -i ../ResultsCosineSimil/sw_fixed_recall.dat -o wiki_sw -y 1~norm~QueryTime -x 0~log~NumData -l "none" -t "Wikipedia" ../../genplot.py -i ../ResultsCosineSimil/all_meth_fixed_recall.dat -o wiki_all -y 1~norm~QueryTime -x 1~norm~NumData -l "2~(0.95,-.2)" -t ""
mongo --host 192.168.99.100 --port <service_port>