text
stringlengths 1
1.05M
|
|---|
<reponame>nagalun/guimachi
#include "varints.hpp"
#include <string>
#include <stdexcept>
#include <algorithm>
u64 decodeUnsignedVarint(const u8 * const data, sz_t &decodedBytes, sz_t maxBytes) {
sz_t i = 0;
u64 decoded_value = 0;
sz_t shift_amount = 0;
do {
if (maxBytes-- == 0) {
throw std::length_error("Varint too big!");
}
decoded_value |= (u64)(data[i] & 0x7F) << shift_amount;
shift_amount += 7;
} while ((data[i++] & 0x80) != 0);
decodedBytes = i;
return decoded_value;
}
sz_t encodeUnsignedVarint(u8 * const buffer, u64 value) {
sz_t encoded = 0;
do {
u8 next_byte = value & 0x7F;
value >>= 7;
if (value) {
next_byte |= 0x80;
}
buffer[encoded++] = next_byte;
} while (value);
return encoded;
}
sz_t unsignedVarintSize(u64 value) {
sz_t encoded = 0;
do {
encoded++;
} while (value >>= 7);
return encoded;
}
std::string getVarintString(const u8 * data, sz_t &decodedBytes) {
sz_t lb;
u64 size = decodeUnsignedVarint(data, lb);
decodedBytes = lb + size;
return std::string(reinterpret_cast<const char *>(data + lb), size);
}
sz_t setVarintString(u8 * data, std::string const& str) {
sz_t lb = encodeUnsignedVarint(data, str.size());
return std::copy(str.begin(), str.end(), data + lb) - data;
}
sz_t varintStringSize(std::string const& str) {
return unsignedVarintSize(str.size()) + str.size();
}
|
#!/bin/bash
source @sub_PREFIXDIR@/etc/extrae.sh
export EXTRAE_CONFIG_FILE=./extrae.xml
export LD_PRELOAD=${EXTRAE_HOME}/lib/libcudampitrace.so
#export LD_PRELOAD=${EXTRAE_HOME}/lib/libcudampitracef.so
export PYTHONPATH=@sub_PREFIXDIR@/libexec:$PYTHONPATH
## Run the desired program
$*
|
class SmokeTestsController < ApplicationController
before_filter :authorize, :except => [:index, :show]
layout "default", :only => :index
SMOKE_TESTS_OBJ_INCLUDES = [
:package_builders,
:config_modules
]
# GET /smoke_tests
# GET /smoke_tests.json
# GET /smoke_tests.xml
def index
@smoke_tests = SmokeTest.find(:all, :include => SMOKE_TESTS_OBJ_INCLUDES, :order => [:project, :id])
if params[:table_only] then
render :partial => "table"
else
respond_to do |format|
format.html # index.html.erb
format.json { render :json => @smoke_tests } #formatted via as_json
format.xml { render :xml => @smoke_tests, :include => SMOKE_TESTS_OBJ_INCLUDES }
end
end
end
# GET /smoke_tests/1
# GET /smoke_tests/1.json
# GET /smoke_tests/1.xml
def show
@smoke_test = SmokeTest.find(params[:id], :include => SMOKE_TESTS_OBJ_INCLUDES)
respond_to do |format|
format.html # show.html.erb
format.json { render :json => @smoke_test } #formated via as_json
format.xml { render :xml => @smoke_test, :include => SMOKE_TESTS_OBJ_INCLUDES }
end
end
# GET /smoke_tests/new
# GET /smoke_tests/new.xml
def new
@smoke_test = SmokeTest.new
# package builders
@smoke_test.build_nova_package_builder
@smoke_test.nova_package_builder.merge_trunk = false
@smoke_test.build_glance_package_builder
@smoke_test.glance_package_builder.merge_trunk = false
@smoke_test.build_keystone_package_builder
@smoke_test.keystone_package_builder.merge_trunk = false
@smoke_test.build_swift_package_builder
@smoke_test.swift_package_builder.merge_trunk = false
@smoke_test.build_cinder_package_builder
@smoke_test.cinder_package_builder.merge_trunk = false
@smoke_test.build_neutron_package_builder
@smoke_test.neutron_package_builder.merge_trunk = false
@smoke_test.build_ceilometer_package_builder
@smoke_test.ceilometer_package_builder.merge_trunk = false
@smoke_test.build_heat_package_builder
@smoke_test.heat_package_builder.merge_trunk = false
# config modules
@smoke_test.build_nova_config_module
@smoke_test.nova_config_module.merge_trunk = false
@smoke_test.build_glance_config_module
@smoke_test.glance_config_module.merge_trunk = false
@smoke_test.build_keystone_config_module
@smoke_test.keystone_config_module.merge_trunk = false
@smoke_test.build_swift_config_module
@smoke_test.swift_config_module.merge_trunk = false
@smoke_test.build_cinder_config_module
@smoke_test.cinder_config_module.merge_trunk = false
@smoke_test.build_neutron_config_module
@smoke_test.neutron_config_module.merge_trunk = false
@smoke_test.build_ceilometer_config_module
@smoke_test.ceilometer_config_module.merge_trunk = false
@smoke_test.build_heat_config_module
@smoke_test.heat_config_module.merge_trunk = false
respond_to do |format|
format.html # new.html.erb
format.json { render :json => @smoke_test }
format.xml { render :xml => @smoke_test }
end
end
# GET /smoke_tests/1/edit
def edit
@smoke_test = SmokeTest.find(params[:id])
end
# POST /smoke_tests
# POST /smoke_tests.xml
def create
@smoke_test = SmokeTest.new(params[:smoke_test])
respond_to do |format|
if @smoke_test.save
format.html { redirect_to(@smoke_test, :notice => 'Smoke test was successfully created.') }
format.json { render :json => @smoke_test, :status => :created, :location => @smoke_test }
format.xml { render :xml => @smoke_test, :status => :created, :location => @smoke_test }
else
format.html { render :action => "new" }
format.json { render :json => @smoke_test.errors, :status => :unprocessable_entity }
format.xml { render :xml => @smoke_test.errors, :status => :unprocessable_entity }
end
end
end
# PUT /smoke_tests/1
# PUT /smoke_tests/1.xml
def update
@smoke_test = SmokeTest.find(params[:id])
if not params[:smoke_test][:test_suite_ids] and not params[:smoke_test][:config_templates]
@smoke_test.config_templates.clear
@smoke_test.test_suites.clear
end
respond_to do |format|
if @smoke_test.update_attributes(params[:smoke_test])
@smoke_test.test_suites.clear if @smoke_test.test_suites.size == 0
format.html { redirect_to(@smoke_test, :notice => 'Smoke test was successfully updated.') }
format.json { render :json => @smoke_test, :status => :ok }
format.xml { render :xml => @smoke_test, :status => :ok }
else
format.html { render :action => "edit" }
format.json { render :json => @smoke_test.errors, :status => :unprocessable_entity }
format.xml { render :xml => @smoke_test.errors, :status => :unprocessable_entity }
end
end
end
# DELETE /smoke_tests/1
# DELETE /smoke_tests/1.xml
def destroy
@smoke_test = SmokeTest.find(params[:id])
@smoke_test.destroy
respond_to do |format|
format.html { redirect_to(smoke_tests_url) }
format.json { head :ok }
format.xml { head :ok }
end
end
# POST /smoke_tests/1/run_jobs
def run_jobs
@smoke_test = SmokeTest.find(params[:id])
job_group=JobGroup.create(
:smoke_test => @smoke_test
)
respond_to do |format|
format.html { head :ok }
format.json { head :ok }
format.xml { head :ok }
end
end
end
|
<reponame>akiyoshitomita/terraform-provider-velocloud<filename>velocloud/vcoclient/model_enterprise.go
package vcoclient
import (
"time"
)
type Enterprise struct {
Id int32 `json:"id,omitempty"`
Created time.Time `json:"created,omitempty"`
NetworkId int32 `json:"networkId,omitempty"`
GatewayPoolId int32 `json:"gatewayPoolId,omitempty"`
AlertsEnabled *Tinyint `json:"alertsEnabled,omitempty"`
OperatorAlertsEnabled *Tinyint `json:"operatorAlertsEnabled,omitempty"`
EndpointPkiMode string `json:"endpointPkiMode,omitempty"`
Name string `json:"name,omitempty"`
Domain string `json:"domain,omitempty"`
Prefix string `json:"prefix,omitempty"`
LogicalId string `json:"logicalId,omitempty"`
AccountNumber string `json:"accountNumber,omitempty"`
Description string `json:"description,omitempty"`
ContactName string `json:"contactName,omitempty"`
ContactPhone string `json:"contactPhone,omitempty"`
ContactMobile string `json:"contactMobile,omitempty"`
ContactEmail string `json:"contactEmail,omitempty"`
StreetAddress string `json:"streetAddress,omitempty"`
StreetAddress2 string `json:"streetAddress2,omitempty"`
City string `json:"city,omitempty"`
State string `json:"state,omitempty"`
PostalCode string `json:"postalCode,omitempty"`
Country string `json:"country,omitempty"`
Lat float64 `json:"lat,omitempty"`
Lon float64 `json:"lon,omitempty"`
Timezone string `json:"timezone,omitempty"`
Locale string `json:"locale,omitempty"`
Modified time.Time `json:"modified,omitempty"`
BastionState string `json:"bastionState,omitempty"`
}
|
<gh_stars>0
function valida_vacios(ids,mensajes)
{
var error=true;
$.each( ids, function( i, val ) {
$(val).parent(".input").parent().remove(".invalid");
var contenido=$(val).val();
if(contenido=='')
{
$(val).parent( ".input" ).addClass('state-error');
$(val).focus();
$(val).parent(".input").parent().append('<em class="invalid">'+mensajes[i]+'</em>');
error=false;
}
else
{
$(val).parent(".input").parent().remove(".invalid");
}
});
return error;
}
function valida_espacios(ids,mensajes)
{
var error = true;
var espacio_blanco = /\s/;
$.each( ids, function( i, val ) {
$(val).parent(".input").parent().remove(".invalid");
var contenido=$(val).val();
if(contenido.indexOf(" ") !== -1)
{
$(val).parent( ".input" ).addClass('state-error');
$(val).focus();
$(val).parent(".input").parent().append('<em class="invalid">'+mensajes[i]+'</em>');
error=false;
}
else
{
$(val).parent(".input").parent().remove(".invalid");
}
});
return error;
}
function valida_correo(ids,mensajes)
{
var error=true;
$.each(ids,function(i,val)
{
var contenido=$(val).val();
var arroba=contenido.indexOf('@');
if(arroba==-1)
{
error=false;
}
else
{
var correo_nm=contenido.substring(0,arroba-1);
if(correo_nm=='')
{
error=false;
}
else
{
dominio=contenido.substring(arroba+1);
if(dominio=='')
{
error=false;
}
else
{
punto1=dominio.indexOf('.');
if(punto1==-1)
{
error=false;
}
else
{
nom_dominio=dominio.substring(0,punto1-1);
if(nom_dominio=='')
{
error=false;
}
else
{
tipo_dominio=dominio.substring(punto1+1);
if(tipo_dominio=='')
{
error=false;
}
else
{
$(val).parent(".input").parent().remove(".invalid");
}
}
}
}
}
}
if(!error)
{
$(val).parent( ".input" ).addClass('state-error');
$(val).focus();
$(val).parent(".input").parent().append('<em class="invalid">'+mensajes[i]+'</em>');
}
});
return error;
}
function valida_psswrds(ids,mensaje)
{
psswrd1=$(ids[0]).val();
psswrd2=$(ids[1]).val();
var error=true;
if(psswrd1!=psswrd2)
{
$(ids[1]).parent( ".input" ).addClass('state-error');
$(ids[1]).focus();
$(ids[1]).parent(".input").parent().append('<em class="invalid">'+mensaje+'</em>');
error=false
}
else
{
$(ids[1]).parent(".input").parent().remove(".invalid");
}
return error;
}
function valida_entero(ids, mensajes)
{
var error=true;
$.each(ids,function(i,val)
{
var contenido=$(val).val();
var tamano=contenido.length;
for(var j=0;j<tamano;j++)
{
var dato=contenido.substring(j,j+1);
if($.isNumeric(dato))
{
var numero=parseInt(dato);
if(numero%1!=0)
{
error=false;
break;
}
else
{
$(val).parent(".input").parent().remove(".invalid");
}
}
else
{
error=false;
break;
}
}
if(!error)
{
$(val).parent( ".input" ).addClass('state-error');
$(val).focus();
$(val).parent(".input").parent().append('<em class="invalid">'+mensajes[i]+'</em>');
}
});
return error;
}
function valida_tamano(ids,min,max,mensajes)
{
var error=true;
$.each(ids,function(i,val)
{
var contenido=$(val).val();
var tamano=contenido.length;
if(tamano<min[i]||tamano>max[i])
{
$(val).parent( ".input" ).addClass('state-error');
$(val).focus();
$(val).parent(".input").parent().append('<em class="invalid">'+mensajes[i]+'</em>');
error=false;
}
else
{
$(val).parent(".input").parent().remove(".invalid");
}
});
return error;
}
function valida_fecha(ids,mensajes)
{
var error=true;
$.each(ids,function(i,val)
{
var contenido=$(val).val();
var tamano=contenido.length;
if(tamano!=10)
{
error=false;
}
else
{
var ano=contenido.substring(0,4);
if(!$.isNumeric(ano)||ano%1!=0)
{
error=false;
}
else
{
var guion=contenido.substring(4,5);
if(guion!="-")
{
error=false;
}
else
{
var mes=contenido.substring(5,7);
if(!$.isNumeric(mes)||mes%1!=0)
{
error=false;
}
else
{
var guion=contenido.substring(7,8);
if(guion!="-")
{
error=false;
}
else
{
var dia=contenido.substring(8);
if(!$.isNumeric(dia)||dia%1!=0)
{
error=false;
}
else
{
$(val).parent(".input").parent().remove(".invalid");
}
}
}
}
}
}
if(!error)
{
$(val).parent( ".input" ).addClass('state-error');
$(val).focus();
$(val).parent(".input").parent().append('<em class="invalid">'+mensajes[i]+'</em>');
}
});
return error;
}
function valida_espacios(ids, mensajes)
{
var error=true;
$.each(ids,function(i,val)
{
var contenido=$(val).val();
var tamano=contenido.length;
for(var j=0;j<tamano;j++)
{
var dato=contenido.substring(j,j+1);
if(dato==" ")
{
error=false;
break;
}
else
{
$(val).parent(".input").parent().remove(".invalid");
}
}
if(!error)
{
$(val).parent( ".input" ).addClass('state-error');
$(val).focus();
$(val).parent(".input").parent().append('<em class="invalid">'+mensajes[i]+'</em>');
}
});
return error;
}
function valida_decimales(ids, mensajes)
{
alert("algo");
var error=true;
$.each(ids,function(i,val)
{
var contenido=$(val).val();
var tamano=contenido.length;
for(var j=0;j<tamano;j++)
{
var dato=contenido.substring(j,j+1);
if($.isNumeric(dato))
{
var numero=parseInt(dato);
if(numero%1!=0)
{
error=false;
break;
}
else
{
$(val).parent(".input").parent().remove(".invalid");
}
}
else
{
if(dato==".")
{
$(val).parent(".input").parent().remove(".invalid");
}
else
{
error=false;
break;
}
}
}
if(!error)
{
$(val).parent( ".input" ).addClass('state-error');
$(val).focus();
$(val).parent(".input").parent().append('<em class="invalid">'+mensajes[i]+'</em>');
}
});
return error;
}
|
class Node {
constructor(data, prev, next) {
this.data = data;
this.prev = prev;
this.next = next;
}
}
class Queue {
constructor() {
this.first = null;
this.last = null;
}
enqueue(data) {
const node = new Node(data, null);
if (this.first === null) {
this.first = node;
}
if (this.last) {
this.last.next = node;
}
this.last = node;
}
dequeue() {
if (this.first === null) {
return;
}
const node = this.first;
this.first = this.first.next;
if (node === this.last) {
this.last = null;
}
return node.value;
}
}
class DoubleLinkedQueue {
constructor() {
this.first = null;
this.last = null;
}
enqueue(data) {
// O , O , O
const node = new Node(data);
if (this.first === null) {
this.first = node;
}
// if (this.last) {
// this.last = node;
// }
this.last = node;
}
}
function display(q) {
console.log(q);
let currNode = q.first;
if (q.first === null) return null;
while (currNode) {
console.log(currNode.data);
currNode = currNode.last;
}
return;
}
function peek(q) {
if (q.first === null) return null;
return q.first.data;
}
function isEmpty(q) {
//console.log(q);
if (!q.first) {
return true;
} else return false;
}
|
<reponame>wpisen/trace<gh_stars>1-10
package com.wpisen.trace.agent.common.logger.log4j;
import org.apache.log4j.Level;
import com.wpisen.trace.agent.common.logger.Logger;
import com.wpisen.trace.agent.common.logger.support.FailsafeLogger;
public class Log4jLogger implements Logger {
private static final String FQCN = FailsafeLogger.class.getName();
private final org.apache.log4j.Logger logger;
public Log4jLogger(org.apache.log4j.Logger logger) {
this.logger = logger;
}
public void trace(String msg) {
logger.log(FQCN, Level.TRACE, msg, null);
}
public void trace(Throwable e) {
logger.log(FQCN, Level.TRACE, e == null ? null : e.getMessage(), e);
}
public void trace(String msg, Throwable e) {
logger.log(FQCN, Level.TRACE, msg, e);
}
public void debug(String msg) {
logger.log(FQCN, Level.DEBUG, msg, null);
}
public void debug(Throwable e) {
logger.log(FQCN, Level.DEBUG, e == null ? null : e.getMessage(), e);
}
public void debug(String msg, Throwable e) {
logger.log(FQCN, Level.DEBUG, msg, e);
}
public void info(String msg) {
logger.log(FQCN, Level.INFO, msg, null);
}
public void info(Throwable e) {
logger.log(FQCN, Level.INFO, e == null ? null : e.getMessage(), e);
}
public void info(String msg, Throwable e) {
logger.log(FQCN, Level.INFO, msg, e);
}
public void warn(String msg) {
logger.log(FQCN, Level.WARN, msg, null);
}
public void warn(Throwable e) {
logger.log(FQCN, Level.WARN, e == null ? null : e.getMessage(), e);
}
public void warn(String msg, Throwable e) {
logger.log(FQCN, Level.WARN, msg, e);
}
public void error(String msg) {
logger.log(FQCN, Level.ERROR, msg, null);
}
public void error(Throwable e) {
logger.log(FQCN, Level.ERROR, e == null ? null : e.getMessage(), e);
}
public void error(String msg, Throwable e) {
logger.log(FQCN, Level.ERROR, msg, e);
}
public boolean isTraceEnabled() {
return logger.isTraceEnabled();
}
public boolean isDebugEnabled() {
return logger.isDebugEnabled();
}
public boolean isInfoEnabled() {
return logger.isInfoEnabled();
}
public boolean isWarnEnabled() {
return logger.isEnabledFor(Level.WARN);
}
public boolean isErrorEnabled() {
return logger.isEnabledFor(Level.ERROR);
}
}
|
<gh_stars>0
# Write your solution here
str = input("Please type in a string: ")
count = 0
while count< len(str):
print(str[len(str) - count-1])
count += 1
|
var { getAppConfig, frontType } = require("./utils/utils");
module.exports = {
chainWebpack: (config) => {
config.plugin("define").tap((args) => {
args[0]["process.env"] = {
...args[0],
VUE_APP_PORT: JSON.stringify(getAppConfig().port),
FRONT_TYPE: JSON.stringify(frontType)
};
return args;
});
},
pages: {
index: {
entry: "packages/website/src/main.js",
// 模板来源
template: "packages/website/index.html",
// 在 dist/index.html 的输出
filename: "index.html",
// 当使用 title 选项时,
// template 中的 title 标签需要是 <title><%= htmlWebpackPlugin.options.title %></title>
title: "Sandshrew-NPM同步工具",
// 在这个页面中包含的块,默认情况下会包含
// 提取出来的通用 chunk 和 vendor chunk。
chunks: ["chunk-vendors", "chunk-common", "index"],
},
},
};
|
#include "mpiP.h"
/*
* COLLECTIVE
*/
FC_FUNC( mpi_barrier , MPI_BARRIER )(int *comm, int *ierror)
{
*ierror=MPI_Barrier( *comm );
}
int MPI_Barrier(MPI_Comm comm )
{
return(MPI_SUCCESS);
}
/*********/
FC_FUNC( mpi_bcast , MPI_BCAST )(void *buffer, int *count, int *datatype,
int *root, int *comm, int *ierror )
{
*ierror=MPI_Bcast(buffer, *count, *datatype, *root, *comm);
}
int MPI_Bcast(void* buffer, int count, MPI_Datatype datatype,
int root, MPI_Comm comm )
{
if (root==MPI_ROOT)
return(MPI_SUCCESS);
if (root!=0)
{
fprintf(stderr,"MPI_Bcast: bad root = %d\n",root);
abort();
}
return(MPI_SUCCESS);
}
/*********/
FC_FUNC( mpi_gather , MPI_GATHER )
(void *sendbuf, int *sendcount, int *sendtype,
void *recvbuf, int *recvcount, int *recvtype,
int *root, int *comm, int *ierror)
{
*ierror=MPI_Gather( mpi_c_in_place(sendbuf), *sendcount, *sendtype,
recvbuf, *recvcount, *recvtype,
*root, *comm);
}
int MPI_Gather(void* sendbuf, int sendcount, MPI_Datatype sendtype,
void* recvbuf, int recvcount, MPI_Datatype recvtype,
int root, MPI_Comm comm)
{
if (sendbuf==MPI_IN_PLACE)
return(MPI_SUCCESS);
if (root==MPI_ROOT)
return(MPI_SUCCESS);
if (root!=0)
{
fprintf(stderr,"MPI_Gather: bad root = %d\n",root);
abort();
}
copy_data2(sendbuf, sendcount, sendtype,
recvbuf, recvcount, recvtype);
// memcpy(recvbuf,sendbuf,sendcount*sendtype);
return(MPI_SUCCESS);
}
/*********/
FC_FUNC( mpi_gatherv , MPI_GATHERV )
( void *sendbuf, int *sendcount, int *sendtype,
void *recvbuf, int *recvcounts, int *displs,
int *recvtype, int *root, int *comm, int *ierror)
{
*ierror=MPI_Gatherv( mpi_c_in_place(sendbuf), *sendcount, *sendtype,
recvbuf, recvcounts, displs,
*recvtype, *root, *comm);
}
int MPI_Gatherv(void* sendbuf, int sendcount, MPI_Datatype sendtype,
void* recvbuf, int *recvcounts, int *displs,
MPI_Datatype recvtype, int root, MPI_Comm comm)
{
int offset;
MPI_Aint rt_extent;
if (sendbuf==MPI_IN_PLACE)
return(MPI_SUCCESS);
if (root==MPI_ROOT)
return(MPI_SUCCESS);
if (root!=0)
{
fprintf(stderr,"MPI_Gatherv: bad root = %d\n",root);
abort();
}
MPI_Type_extent(recvtype, &rt_extent);
offset=displs[0]*rt_extent;
copy_data2(sendbuf, sendcount, sendtype,
(char*)recvbuf+offset, recvcounts[0], recvtype);
// memcpy( (char *)recvbuf+offset, sendbuf, recvcounts[0] * recvtype);
return(MPI_SUCCESS);
}
/*********/
FC_FUNC( mpi_allgather , MPI_ALLGATHER )
( void *sendbuf, int *sendcount, int *sendtype,
void *recvbuf, int *recvcount, int *recvtype,
int *comm, int *ierror)
{
*ierror=MPI_Allgather( mpi_c_in_place(sendbuf), *sendcount, *sendtype,
recvbuf, *recvcount, *recvtype,
*comm );
}
int MPI_Allgather(void* sendbuf, int sendcount, MPI_Datatype sendtype,
void* recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm)
{
if (sendbuf==MPI_IN_PLACE)
return(MPI_SUCCESS);
copy_data2(sendbuf, sendcount, sendtype,
recvbuf, recvcount, recvtype);
// memcpy(recvbuf,sendbuf,sendcount * sendtype);
return(MPI_SUCCESS);
}
/*********/
FC_FUNC( mpi_allgatherv , MPI_ALLGATHERV )
( void *sendbuf, int *sendcount, int *sendtype,
void *recvbuf, int *recvcounts, int *displs,
int *recvtype, int *comm, int *ierror)
{
*ierror=MPI_Allgatherv( mpi_c_in_place(sendbuf), *sendcount, *sendtype,
recvbuf, recvcounts, displs,
*recvtype, *comm );
}
int MPI_Allgatherv(void* sendbuf, int sendcount, MPI_Datatype sendtype,
void* recvbuf, int *recvcounts, int *displs,
MPI_Datatype recvtype, MPI_Comm comm)
{
int offset;
MPI_Aint rt_extent;
if (sendbuf==MPI_IN_PLACE)
return(MPI_SUCCESS);
MPI_Type_extent(recvtype, &rt_extent);
offset=displs[0]*rt_extent;
copy_data2(sendbuf, sendcount, sendtype,
(char*)recvbuf+offset, recvcounts[0], recvtype);
// memcpy( (char *)recvbuf+offset, sendbuf, recvcounts[0] * recvtype);
return(MPI_SUCCESS);
}
/*********/
/* MPI_Scatter
* Scattering to one proc involves only one copy operation, so copy
* data from source to dest pointer
*/
FC_FUNC( mpi_scatter, MPI_SCATTER )
( void *sendbuf, int *sendcount, int *sendtype,
void *recvbuf, int *recvcount, int *recvtype,
int *root, int *comm, int *ierror)
{
*ierror = MPI_Scatter(sendbuf, *sendcount, *sendtype,
mpi_c_in_place(recvbuf), *recvcount, *recvtype,
*root, *comm);
}
int MPI_Scatter(void * sendbuf, int sendcount, MPI_Datatype sendtype,
void * recvbuf, int recvcount, MPI_Datatype recvtype,
int root, MPI_Comm comm)
{
if (recvbuf==MPI_IN_PLACE)
return(MPI_SUCCESS);
if (root==MPI_ROOT)
return(MPI_SUCCESS);
if (root!=0)
{
fprintf(stderr,"MPI_Scatter: bad root = %d\n",root);
abort();
}
copy_data2(sendbuf, sendcount, sendtype,
recvbuf, recvcount, recvtype);
return(MPI_SUCCESS);
}
/*********/
FC_FUNC( mpi_scatterv , MPI_SCATTERV )
( void *sendbuf, int *sendcounts, int *displs,
int *sendtype, void *recvbuf, int *recvcount,
int *recvtype, int *root, int *comm, int *ierror)
{
*ierror=MPI_Scatterv(sendbuf, sendcounts, displs,
*sendtype, mpi_c_in_place(recvbuf), *recvcount,
*recvtype, *root, *comm);
}
int MPI_Scatterv(void* sendbuf, int *sendcounts, int *displs,
MPI_Datatype sendtype, void* recvbuf, int recvcount,
MPI_Datatype recvtype, int root, MPI_Comm comm)
{
int offset;
MPI_Aint st_extent;
if (recvbuf==MPI_IN_PLACE)
return(MPI_SUCCESS);
if (root==MPI_ROOT)
return(MPI_SUCCESS);
if (root!=0)
{
fprintf(stderr,"MPI_Scatterv: bad root = %d\n",root);
abort();
}
MPI_Type_extent(sendtype, &st_extent);
offset=displs[0]*st_extent;
copy_data2((char*)sendbuf+offset, sendcounts[0], sendtype,
recvbuf, recvcount, recvtype);
// memcpy(recvbuf,(char *)sendbuf+offset,sendcounts[0] * sendtype);
return(MPI_SUCCESS);
}
/*********/
FC_FUNC( mpi_reduce , MPI_REDUCE )
( void *sendbuf, void *recvbuf, int *count,
int *datatype, int *op, int *root, int *comm,
int *ierror)
{
*ierror=MPI_Reduce(sendbuf, recvbuf, *count,
*datatype, *op, *root, *comm);
}
int MPI_Reduce(void* sendbuf, void* recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
{
if (root!=0)
{
fprintf(stderr,"MPI_Reduce: bad root = %d\n",root);
abort();
}
copy_data2(sendbuf, count, datatype, recvbuf, count, datatype);
// memcpy(recvbuf,sendbuf,count * datatype);
return(MPI_SUCCESS);
}
/*********/
FC_FUNC( mpi_allreduce , MPI_ALLREDUCE )
( void *sendbuf, void *recvbuf, int *count,
int *datatype, int *op, int *comm, int *ierror)
{
*ierror=MPI_Allreduce(sendbuf, recvbuf, *count,
*datatype, *op, *comm);
}
int MPI_Allreduce(void* sendbuf, void* recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
{
if (sendbuf==MPI_IN_PLACE)
return(MPI_SUCCESS);
copy_data2(sendbuf, count, datatype, recvbuf, count, datatype);
// memcpy(recvbuf,sendbuf,count * datatype);
return(MPI_SUCCESS);
}
/*********/
/* MPI_Reduce_scatter
* Performs reduction of n*sum(recvcounts) and distributes to all members
* in a group. We do this to only one proc, so recvcounts[0] is only used.
*/
FC_FUNC(mpi_reduce_scatter, MPI_REDUCE_SCATTER)
(void * sendbuf, void * recvbuf, int *recvcounts,
int *datatype, int *op, int *comm, int *ierr)
{
*ierr = MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, *datatype, *op, *comm);
}
int MPI_Reduce_scatter(void* sendbuf, void* recvbuf, int *recvcounts,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
{
copy_data2(sendbuf, recvcounts[0], datatype, recvbuf, recvcounts[0], datatype);
}
/*********/
FC_FUNC( mpi_scan , MPI_SCAN)
( void *sendbuf, void *recvbuf, int *count,
int *datatype, int *op, int *comm,
int *ierror)
{
*ierror=MPI_Scan( sendbuf, recvbuf, *count,
*datatype, *op, *comm);
}
int MPI_Scan(void* sendbuf, void* recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm )
{
copy_data2(sendbuf, count, datatype, recvbuf, count, datatype);
return(MPI_SUCCESS);
}
/*********/
FC_FUNC( mpi_alltoall , MPI_ALLTOALL )
( void *sendbuf, int *sendcount, int *sendtype,
void *recvbuf, int *recvcount, int *recvtype,
int *comm, int *ierror )
{
*ierror=MPI_Alltoall(sendbuf, *sendcount, *sendtype,
recvbuf, *recvcount, *recvtype,
*comm);
}
int MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm)
{
copy_data2(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype);
// memcpy(recvbuf,sendbuf,sendcount * sendtype);
return(MPI_SUCCESS);
}
/*********/
FC_FUNC( mpi_alltoallv , MPI_ALLTOALLV )
( void *sendbuf, int *sendcounts, int *sdispls, int *sendtype,
void *recvbuf, int *recvcounts, int *rdispls, int *recvtype,
int *comm, int *ierror )
{
*ierror=MPI_Alltoallv(sendbuf, sendcounts, sdispls, *sendtype,
recvbuf, recvcounts, rdispls, *recvtype,
*comm);
}
int MPI_Alltoallv(void *sendbuf, int *sendcounts,
int *sdispls, MPI_Datatype sendtype,
void *recvbuf, int *recvcounts,
int *rdispls, MPI_Datatype recvtype,
MPI_Comm comm)
{
int send_offset;
int recv_offset;
MPI_Aint st_extent;
MPI_Aint rt_extent;
MPI_Type_extent(sendtype, &st_extent);
MPI_Type_extent(recvtype, &rt_extent);
send_offset=sdispls[0]*st_extent;
recv_offset=rdispls[0]*rt_extent;
copy_data2((char*)sendbuf+send_offset, sendcounts[0], sendtype,
(char*)recvbuf+recv_offset, recvcounts[0], recvtype);
// memcpy( (char *)recvbuf+recv_offset, (char *)sendbuf+send_offset,
// sendcounts[0] * sendtype);
return(MPI_SUCCESS);
}
/*********/
FC_FUNC( mpi_alltoallw , MPI_ALLTOALLW )
( void *sendbuf, int *sendcounts, int *sdispls, int *sendtypes,
void *recvbuf, int *recvcounts, int *rdispls, int *recvtypes,
int *comm, int *ierror )
{
*ierror=MPI_Alltoallw(sendbuf, sendcounts, sdispls, sendtypes,
recvbuf, recvcounts, rdispls, recvtypes,
*comm);
}
int MPI_Alltoallw(void *sendbuf, int *sendcounts,
int *sdispls, MPI_Datatype *sendtypes,
void *recvbuf, int *recvcounts,
int *rdispls, MPI_Datatype *recvtypes,
MPI_Comm comm)
{
copy_data2((char*)sendbuf+sdispls[0], sendcounts[0], sendtypes[0],
(char*)recvbuf+rdispls[0], recvcounts[0], recvtypes[0]);
return(MPI_SUCCESS);
}
/*********/
MPI_Op MPI_Op_f2c(MPI_Fint op)
{
return(op);
}
/*********/
MPI_Fint MPI_Op_c2f(MPI_Op op)
{
return(op);
}
|
<filename>app/src/main/java/com/example/veterineruygulamas/RestApi/ApiServ.java
package com.example.veterineruygulamas.RestApi;
import com.example.veterineruygulamas.Pojos.AsiPojo;
import com.example.veterineruygulamas.Pojos.DuyuruPojo;
import com.example.veterineruygulamas.Pojos.PetPojos;
import com.example.veterineruygulamas.Pojos.SignInPojos;
import com.example.veterineruygulamas.Pojos.SignUpPojo;
import com.example.veterineruygulamas.Pojos.SoruPojo;
import java.util.List;
import retrofit2.Call;
import retrofit2.Retrofit;
import retrofit2.converter.gson.GsonConverterFactory;
public class ApiServ {
Retrofit retrofit;
ApiService apis ;
public ApiServ() {
this.retrofit = new Retrofit.Builder().baseUrl(APIUrl.BASE_URL).addConverterFactory(GsonConverterFactory.create()).build();;
this.apis = this.retrofit.create(ApiService.class);
}
public Call<SignUpPojo> signUp(String username,String emailadress,String password){
return apis.at(username,emailadress,password);
}
public Call<SignInPojos> signIn(String email,String password){
return apis.signIn(email,password);
}
public Call<List<PetPojos>> getPets(String id){
return apis.getPets(id);
}
public Call<List<SoruPojo>> postSoru(String questiontext, String creator){
return apis.postSoru(questiontext,creator);
}
public Call<List<SoruPojo>> getSoru(String id){
return apis.getSoru(id);
}
public Call<List<SoruPojo>> deleteSoru(String id) {return apis.deleteSoru(id);}
public Call<List<AsiPojo>> getAsi(String id){return apis.getAsi(id);}
public Call<List<DuyuruPojo>> getDuyuru(){return apis.getDuyuru();}
public Call<List<AsiPojo>> getPetAsi(String id){return apis.getPetAsi(id);}
}
|
<filename>src/main/java/resource/utils/JsonUtil.java
package resource.utils;
import com.fasterxml.jackson.databind.ObjectMapper;
import resource.AccountTransfer;
public class JsonUtil {
private static ObjectMapper mapper = new ObjectMapper();
public static String toJson(AccountTransfer transfer) throws Exception {
return mapper.writeValueAsString(transfer);
}
public static AccountTransfer fromJson(String json) throws Exception {
return mapper.readValue(json, AccountTransfer.class);
}
}
|
<filename>tests/test_base_resnet.py
import os
import torch
import torchvision.transforms as transforms
import pytest
import pytorch_lightning as pl
import shutil
from pose_est_nets.utils.wrappers import predict_plot_test_epoch
from pose_est_nets.utils.io import set_or_open_folder, load_object
from typing import Optional
from pose_est_nets.models.base_resnet import BaseFeatureExtractor
import torchvision
_TORCH_DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
_BATCH_SIZE = 12
_HEIGHTS = [
128,
256,
384,
] # standard numbers, not going to bigger images due to memory
_WIDTHS = [120, 246, 380] # similar but not square
# print(list(zip(_HEIGHTS, _WIDTHS)))
resnet_versions = [18, 34, 50, 101, 152]
def test_backbone():
"""architecture properties when we truncate network at index at last_resnet_layer_to_get"""
for ind, resnet_v in enumerate(resnet_versions):
model = BaseFeatureExtractor(
resnet_version=resnet_v, last_resnet_layer_to_get=-3
).to(_TORCH_DEVICE)
if resnet_v <= 34: # lest block is BasicBlock
assert (
type(list(model.backbone.children())[-3][-1])
== torchvision.models.resnet.BasicBlock
)
else: # different archi; BottleneckBlock
assert (
type(list(model.backbone.children())[-3][-1])
== torchvision.models.resnet.Bottleneck
)
# remove model/data from gpu; then cache can be cleared
del model
torch.cuda.empty_cache() # remove tensors from gpu
def test_representation_shapes_truncated_resnet():
"""loop over different resnet versions and make sure that the
resulting representation shapes make sense."""
# assuming you're truncating before average pool; that depends on image shape
repres_shape_list_truncated_before_avg_pool_small_image = [
torch.Size([_BATCH_SIZE, 512, 4, 4]),
torch.Size([_BATCH_SIZE, 512, 4, 4]),
torch.Size([_BATCH_SIZE, 2048, 4, 4]),
torch.Size([_BATCH_SIZE, 2048, 4, 4]),
torch.Size([_BATCH_SIZE, 2048, 4, 4]),
]
repres_shape_list_truncated_before_avg_pool_medium_image = [
torch.Size([_BATCH_SIZE, 512, 8, 8]),
torch.Size([_BATCH_SIZE, 512, 8, 8]),
torch.Size([_BATCH_SIZE, 2048, 8, 8]),
torch.Size([_BATCH_SIZE, 2048, 8, 8]),
torch.Size([_BATCH_SIZE, 2048, 8, 8]),
]
repres_shape_list_truncated_before_avg_pool_big_image = [
torch.Size([_BATCH_SIZE, 512, 12, 12]),
torch.Size([_BATCH_SIZE, 512, 12, 12]),
torch.Size([_BATCH_SIZE, 2048, 12, 12]),
torch.Size([_BATCH_SIZE, 2048, 12, 12]),
torch.Size([_BATCH_SIZE, 2048, 12, 12]),
]
shape_list_pre_pool = [
repres_shape_list_truncated_before_avg_pool_small_image,
repres_shape_list_truncated_before_avg_pool_medium_image,
repres_shape_list_truncated_before_avg_pool_big_image,
]
for ind_image in range(len(_HEIGHTS)):
for ind, resnet_v in enumerate(resnet_versions):
if _TORCH_DEVICE == "cuda":
torch.cuda.empty_cache()
fake_image_batch = torch.rand(
size=(_BATCH_SIZE, 3, _HEIGHTS[ind_image], _WIDTHS[ind_image]),
device=_TORCH_DEVICE,
)
model = BaseFeatureExtractor(
resnet_version=resnet_v, last_resnet_layer_to_get=-3
).to(_TORCH_DEVICE)
representations = model(fake_image_batch)
assert representations.shape == shape_list_pre_pool[ind_image][ind]
# remove model/data from gpu; then cache can be cleared
del model
del fake_image_batch
del representations
torch.cuda.empty_cache() # remove tensors from gpu
def test_resnet_versions():
# no resnet 11
pytest.raises(TypeError, BaseFeatureExtractor, resnet_version=11)
# below should run fine
model = BaseFeatureExtractor(resnet_version=18, last_resnet_layer_to_get=-3)
# remove model/data from gpu; then cache can be cleared
del model
torch.cuda.empty_cache()
def test_representation_shapes_full_resnet():
# assuming you're taking everything but the resnet's FC layer
repres_shape_list_all_but_fc = [
torch.Size([_BATCH_SIZE, 512, 1, 1]),
torch.Size([_BATCH_SIZE, 512, 1, 1]),
torch.Size([_BATCH_SIZE, 2048, 1, 1]),
torch.Size([_BATCH_SIZE, 2048, 1, 1]),
torch.Size([_BATCH_SIZE, 2048, 1, 1]),
]
for ind_image in range(len(_HEIGHTS)):
for ind, resnet_v in enumerate(resnet_versions):
if _TORCH_DEVICE == "cuda":
torch.cuda.empty_cache()
fake_image_batch = torch.rand(
size=(_BATCH_SIZE, 3, _HEIGHTS[ind_image], _WIDTHS[ind_image]),
device=_TORCH_DEVICE,
)
model = BaseFeatureExtractor(
resnet_version=resnet_v, last_resnet_layer_to_get=-2
).to(_TORCH_DEVICE)
representations = model(fake_image_batch)
assert representations.shape == repres_shape_list_all_but_fc[ind]
# remove model/data from gpu; then cache can be cleared
del model
del fake_image_batch
del representations
torch.cuda.empty_cache() # remove tensors from gpu
|
<filename>setup.py
import setuptools
import os
# read content of README.md
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(
name="pyapetnet",
use_scm_version={'fallback_version':'unkown'},
setup_requires=['setuptools_scm','setuptools_scm_git_archive'],
author="<NAME>",
author_email="<EMAIL>",
description="a CNN for anatomy-guided deconvolution and denoising of PET images",
long_description=long_description,
license='MIT',
long_description_content_type="text/markdown",
url="https://github.com/gschramm/pyapetnet",
packages=setuptools.find_packages(exclude = ["demo_data","figures","pyapetnet_2d","scripts_bow","wip"]),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6, <3.9',
install_requires=['tensorflow>=2.2',
'nibabel>=3.0',
'matplotlib>=3.1',
'pydicom>=2.0',
'pymirc>=0.22'],
entry_points = {'console_scripts' : ['pyapetnet_predict_from_nifti=pyapetnet.command_line_tools:predict_from_nifti','pyapetnet_predict_from_dicom=pyapetnet.command_line_tools:predict_from_dicom', 'pyapetnet_list_models=pyapetnet.command_line_tools:list_models',],},
include_package_data=True,
)
|
let algorithms = ["bubble", "insertion", "selection"];
// Create strategy pattern
const strategypattern = {
// Create actions
bubble: arr => {
for(let i=arr.length; i >=0; i--){
for(let j=0; j < i; j++){
if(arr[j] > arr[j+1]){
let temp = arr[j];
arr[j] = arr[j+1];
arr[j+1] = temp;
}
}
}
return arr;
},
// Create actions
insertion: arr => {
let temp;
for(let i=1; i < arr.length; i++){
let j = i;
temp = arr[i];
while(arr[j-1] >= temp && j > 0){
arr[j] = arr[j-1];
j--;
}
arr[j] = temp;
}
return arr;
},
selection: arr => {
let minIdx;
let temp;
for(let i=0; i < arr.length; i++){
minIdx = i;
for(let j=i+1; j < arr.length; j++){
if(arr[j] < arr[minIdx]){
minIdx = j;
}
}
temp = arr[minIdx];
arr[minIdx] = arr[i];
arr[i] = temp;
}
return arr;
}
};
// Use the strategy pattern to call sorting algorithm
function selectSortingAlgorithm(algorithm, arr) {
return strategypattern[algorithm](arr);
}
let result = selectSortingAlgorithm(algorithms[0], [7, 8, 5, 6, 4, 9]);
console.log(result);
|
package com.yin.springboot.mybatis.server.service;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
import java.util.List;
import com.yin.springboot.mybatis.domain.PmsProductCategory;
import com.yin.springboot.mybatis.mapper.PmsProductCategoryMapper;
import com.yin.springboot.mybatis.server.PmsProductCategoryService;
@Service
public class PmsProductCategoryServiceImpl implements PmsProductCategoryService{
@Resource
private PmsProductCategoryMapper pmsProductCategoryMapper;
@Override
public int deleteByPrimaryKey(Long id) {
return pmsProductCategoryMapper.deleteByPrimaryKey(id);
}
@Override
public int insert(PmsProductCategory record) {
return pmsProductCategoryMapper.insert(record);
}
@Override
public int insertOrUpdate(PmsProductCategory record) {
return pmsProductCategoryMapper.insertOrUpdate(record);
}
@Override
public int insertOrUpdateSelective(PmsProductCategory record) {
return pmsProductCategoryMapper.insertOrUpdateSelective(record);
}
@Override
public int insertSelective(PmsProductCategory record) {
return pmsProductCategoryMapper.insertSelective(record);
}
@Override
public PmsProductCategory selectByPrimaryKey(Long id) {
return pmsProductCategoryMapper.selectByPrimaryKey(id);
}
@Override
public int updateByPrimaryKeySelective(PmsProductCategory record) {
return pmsProductCategoryMapper.updateByPrimaryKeySelective(record);
}
@Override
public int updateByPrimaryKey(PmsProductCategory record) {
return pmsProductCategoryMapper.updateByPrimaryKey(record);
}
@Override
public int updateBatch(List<PmsProductCategory> list) {
return pmsProductCategoryMapper.updateBatch(list);
}
@Override
public int batchInsert(List<PmsProductCategory> list) {
return pmsProductCategoryMapper.batchInsert(list);
}
}
|
import React from 'react';
import {
useFonts,
Inter_100Thin,
Inter_300Light,
Inter_400Regular,
Inter_500Medium,
Inter_700Bold,
Inter_900Black,
} from '@expo-google-fonts/inter';
import { StackActions, useNavigation } from '@react-navigation/native';
import * as Molecules from '@components/molecules';
import {
useUpcomingLaunches,
useRecentLaunches,
useArticles,
} from '@hooks/index';
export default function Splash() {
const { dispatch } = useNavigation();
const [isFontsLoaded] = useFonts({
Inter_100Thin,
Inter_300Light,
Inter_400Regular,
Inter_500Medium,
Inter_700Bold,
Inter_900Black,
});
const { isLoading: isLoadingLaunches } = useUpcomingLaunches();
const { isLoading: isLoadingPastLaunches } = useRecentLaunches();
const { isLoading: isLoadingArticles } = useArticles();
const isLoading =
isLoadingLaunches || isLoadingPastLaunches || isLoadingArticles;
React.useEffect(() => {
if (isFontsLoaded && !isLoading) {
setTimeout(() => {
dispatch(StackActions.replace('HomeTabs'));
}, 2500);
}
}, [dispatch, isFontsLoaded, isLoading]);
return <Molecules.Loading />;
}
|
<reponame>yaoice/meliodas<filename>pkg/ipam/backend/neutron/mix_route_store.go<gh_stars>1-10
// Copyright 2015 CNI authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package neutron
import (
"fmt"
"github.com/yaoice/meliodas/pkg/ipam/backend/allocator"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers"
"github.com/gophercloud/gophercloud/openstack/networking/v2/networks"
"github.com/gophercloud/gophercloud/openstack/networking/v2/ports"
"github.com/gophercloud/gophercloud/openstack/networking/v2/subnets"
"github.com/vishvananda/netlink"
"io/ioutil"
"log"
"net"
"os"
"path/filepath"
"strings"
"github.com/yaoice/meliodas/pkg/ipam/backend"
)
// VETH路由模式,弹性网卡多IP
type MixRouteStore struct {
*FileLock
NetworkClient *gophercloud.ServiceClient
hostPort *ports.Port
hostPortAddr string
subnet *subnets.Subnet
Router *routers.Router
network *networks.Network
dataDir string
}
// Store implements the Store interface
var _ backend.Store = &MixRouteStore{}
func NewMixRouteStore(ipamConfig *allocator.IPAMConfig) (backend.Store, error) {
var router *routers.Router
networkClient, err := ConnectStore(ipamConfig.OpenStackConf, backend.SERVICE_TYPE_NETWORK)
if err != nil {
return nil, err
}
if len(ipamConfig.NeutronConf.Networks) == 0 {
return nil, fmt.Errorf("neutron networks is none")
}
iface, err := netlink.LinkByName(ipamConfig.NeutronConf.HostInterface)
if err != nil {
return nil, fmt.Errorf("failed to lookup %q: %v", ipamConfig.NeutronConf.HostInterface, err)
}
hostAddrs, err := netlink.AddrList(iface, netlink.FAMILY_ALL)
if err != nil || len(hostAddrs) == 0 {
return nil, fmt.Errorf("failed to get host IP addresses for %q: %v", iface, err)
}
log.Printf("host address: %s", hostAddrs[0].IP.String())
network, err := networks.Get(networkClient, ipamConfig.NeutronConf.Networks[0]).Extract()
if err != nil {
return nil, fmt.Errorf("failed to get neutron network object: %v", err)
}
if len(network.Subnets) == 0 {
return nil, fmt.Errorf("neutron network %s subnets is none", ipamConfig.NeutronConf.Networks[0])
}
hostPorts, err := ports.List(networkClient, ports.ListOpts{
// To list all networks
// NetworkID: network.ID,
FixedIPs: []ports.FixedIPOpts{
{
IPAddress: hostAddrs[0].IP.String(),
},
},
}).AllPages()
if err != nil {
return nil, fmt.Errorf("get host neutron port err: %v", err)
}
hostPortsSlice, err := ports.ExtractPorts(hostPorts)
if err != nil {
return nil, err
}
if len(hostPortsSlice) == 0 {
return nil, fmt.Errorf("failed to get host neutron port")
}
if len(hostPortsSlice[0].FixedIPs) == 0 {
return nil, fmt.Errorf("failed to get host neutron port fixed ip")
}
subnet, err := subnets.Get(networkClient, hostPortsSlice[0].FixedIPs[0].SubnetID).Extract()
if err != nil {
return nil, fmt.Errorf("failed to get host neutron subnet %s: %v", hostPortsSlice[0].FixedIPs[0].SubnetID, err)
}
// find vrouter
router, err = FindRouter(networkClient, subnet.GatewayIP)
if err != nil {
return nil, err
}
if router == nil {
return nil, fmt.Errorf("can't find neutron vrouter with gateway ip %s", subnet.GatewayIP)
}
dir := filepath.Join(ipamConfig.NeutronConf.DataDir)
if err := os.MkdirAll(dir, 0755); err != nil {
return nil, err
}
lk, err := NewFileLock(dir)
if err != nil {
return nil, err
}
// write values in Store object
store := &MixRouteStore{
FileLock: lk,
NetworkClient: networkClient,
network: network,
hostPort: &hostPortsSlice[0],
hostPortAddr: hostAddrs[0].IP.String(),
subnet: subnet,
Router: router,
dataDir: ipamConfig.NeutronConf.DataDir,
}
return store, nil
}
func (s *MixRouteStore) Reserve(id string) (*net.IPNet, net.IP, error) {
port, err := ports.Get(s.NetworkClient, s.hostPort.ID).Extract()
if err != nil {
log.Printf("get host neutron port %s err: %v", s.hostPort.ID, err)
return nil, nil, err
}
overMax, err := s.IsOverMax(port)
if err != nil {
return nil, nil, err
}
if *overMax {
return nil, nil, fmt.Errorf("over single network interface max supported ips:%d", backend.MAX_IPS)
}
ipNet, gw, err := s.allocatePort(port)
if err != nil {
return ipNet, gw, err
}
// write ip-containerID into file
if err := s.writeIP(id, ipNet.IP.String()); err != nil {
return nil, nil, err
}
return ipNet, gw, nil
}
// N.B. This function eats errors to be tolerant and
// release as much as possible
func (s *MixRouteStore) ReleaseByID(id string) error {
var (
containerIP string
containerIPPath string
)
filepath.Walk(s.dataDir, func(path string, info os.FileInfo, err error) error {
if err != nil || info.IsDir() {
return nil
}
data, err := ioutil.ReadFile(path)
if err != nil {
return nil
}
if strings.TrimSpace(string(data)) == strings.TrimSpace(id) {
containerIP = info.Name()
containerIPPath = path
return fmt.Errorf("found container ip")
}
return nil
})
if containerIP == "" {
log.Printf("container %s ip not found", id)
return nil
}
if err := s.removePort(containerIP); err != nil {
return err
}
if exists(containerIPPath) {
if err := os.Remove(containerIPPath); err != nil {
return err
}
}
return nil
}
func (s *MixRouteStore) findIndex(targetSlice interface{}, target string) *int {
switch targetSlice.(type) {
case []ports.IP:
for index, ip := range targetSlice.([]ports.IP) {
if ip.IPAddress == target {
return &index
}
}
case []routers.Route:
destCidr := target
if !strings.Contains(target, "/") {
destCidr = target + "/32"
}
for index, route := range targetSlice.([]routers.Route) {
if route.DestinationCIDR == destCidr {
return &index
}
}
}
return nil
}
func (s *MixRouteStore) IsOverMax(port *ports.Port) (*bool, error) {
if len(port.FixedIPs) >= backend.MAX_IPS {
return backend.GetBoolPointer(true), nil
}
return backend.GetBoolPointer(false), nil
}
func (s *MixRouteStore) writeIP(id string, ip string) error {
fname := GetEscapedPath(s.dataDir, ip)
f, err := os.OpenFile(fname, os.O_RDWR|os.O_EXCL|os.O_CREATE, 0644)
if os.IsExist(err) {
return err
}
if err != nil {
return err
}
if _, err = f.WriteString(strings.TrimSpace(id)); err != nil {
f.Close()
os.Remove(f.Name())
return err
}
if err = f.Close(); err != nil {
os.Remove(f.Name())
return err
}
return nil
}
func (s *MixRouteStore) removePort(containerIP string) error {
if index := s.findIndex(s.hostPort.FixedIPs, containerIP); index != nil {
s.hostPort.FixedIPs = append(s.hostPort.FixedIPs[:*index], s.hostPort.FixedIPs[*index+1:]...)
_, err := ports.Update(s.NetworkClient, s.hostPort.ID, ports.UpdateOpts{
FixedIPs: s.hostPort.FixedIPs,
}).Extract()
if err != nil {
log.Printf("delete host neutron port %s err: %v", s.hostPort.ID, err)
return err
}
log.Printf("updated neutron port %s: %v", s.hostPort.ID, s.hostPort.FixedIPs)
return nil
}
return fmt.Errorf("container ip %s was not found in fixedIPs %v", containerIP, s.hostPort.FixedIPs)
}
func (s *MixRouteStore) allocatePort(p *ports.Port) (*net.IPNet, net.IP, error) {
oldFixedIPsSlice := p.FixedIPs
p.FixedIPs = append(p.FixedIPs, ports.IP{
SubnetID: s.subnet.ID,
})
port, err := ports.Update(s.NetworkClient, s.hostPort.ID, ports.UpdateOpts{
FixedIPs: p.FixedIPs,
}).Extract()
if err != nil {
log.Printf("add host neutron port %s err: %v", s.hostPort.ID, err)
return nil, nil, err
}
log.Printf("updated neutron port %s: %v", s.hostPort.ID, port.FixedIPs)
newFixedIPsSlice := port.FixedIPs
newIP := difference(oldFixedIPsSlice, newFixedIPsSlice)
if newIP == nil {
return nil, nil, fmt.Errorf("port doesn't have new fixed ip")
}
log.Printf("new neutron port fixed ip: %s", newIP.IPAddress)
gw := net.ParseIP(s.subnet.GatewayIP)
currentIP := net.ParseIP(newIP.IPAddress)
_, ipnet, err := net.ParseCIDR(s.subnet.CIDR)
if err != nil {
log.Printf("parse neutron subnet %s err: %v", s.subnet.ID, err.Error())
return nil, nil, err
}
return &net.IPNet{IP: currentIP, Mask: ipnet.Mask}, gw, nil
}
|
def removeSpecifiedIndex(list, index):
# using list comprehension
new_list = [list[i] for i in range(len(list)) if i != index]
return new_list
list = ['Apple', 'Orange', 'Banana', 'Mango']
index = 2
print(removeSpecifiedIndex(list, index))
|
terraform init
terraform apply -auto-approve
|
import React from 'react';
import {makeStyles} from "@material-ui/core/styles";
import CssBaseline from "@material-ui/core/CssBaseline";
import {Switch, Route} from "react-router-dom";
import {hot} from 'react-hot-loader'
import Navigation from 'components/Navigation';
import HomePage from 'components/Home';
import PlayersPage from "components/Players";
import BookmarksPage from 'components/Bookmarks';
import './app.scss';
const useStyle = makeStyles((theme) => ({
appBarSpacer: theme.mixins.toolbar
}));
const App: React.FC = () => {
const classes = useStyle();
return (
<div>
<CssBaseline/>
<Navigation/>
<main className={'content-application'}>
<div className={classes.appBarSpacer} />
<Switch>
<Route
exact
path={'/'}
component={HomePage}
key={'home'}
/>
<Route
exact
path={'/bookmarks'}
component={BookmarksPage}
key={'bookmark'}
/>
<Route
exact
path={'/players'}
component={PlayersPage}
key={'player'}
/>
</Switch>
</main>
</div>
);
};
export default hot(module)(App);
|
/*
* Copyright The Stargate Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package io.stargate.metrics.jersey.tags;
import io.micrometer.core.instrument.Tag;
import io.micrometer.core.instrument.Tags;
import io.micrometer.jersey2.server.JerseyTagsProvider;
import io.stargate.core.metrics.api.Metrics;
import java.util.Collection;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.glassfish.jersey.server.ExtendedUriInfo;
import org.glassfish.jersey.server.monitoring.RequestEvent;
/**
* A simple tag provider that overwrites the module tag to <code>other</code> if URI matches one of
* the patterns.
*/
public class NonApiModuleTagsProvider implements JerseyTagsProvider {
public static final String NON_API_MODULE_EXTENSION = "other";
private final Tags tags;
private final Collection<Pattern> uriPatterns;
public NonApiModuleTagsProvider(Metrics metrics, String module, Collection<String> uriRegexes) {
this.tags = metrics.tagsForModule(module + "-" + NON_API_MODULE_EXTENSION);
this.uriPatterns = uriRegexes.stream().map(Pattern::compile).collect(Collectors.toList());
}
/** {@inheritDoc} */
@Override
public Iterable<Tag> httpRequestTags(RequestEvent event) {
return tagsInternal(event);
}
/** {@inheritDoc} */
@Override
public Iterable<Tag> httpLongRequestTags(RequestEvent event) {
return tagsInternal(event);
}
private Iterable<Tag> tagsInternal(RequestEvent event) {
if (isNonApiRequest(event.getUriInfo())) {
return tags;
}
return Tags.empty();
}
// we are on non-api if any pattern matches
private boolean isNonApiRequest(ExtendedUriInfo uriInfo) {
if (uriPatterns.isEmpty()) {
return false;
}
String path = uriInfo.getAbsolutePath().getPath();
return uriPatterns.stream().anyMatch(p -> p.matcher(path).matches());
}
}
|
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
# $FreeBSD$
#
# Copyright 2012 Spectra Logic. All rights reserved.
# Use is subject to license terms.
#
atf_test_case cache_001_pos cleanup
cache_001_pos_head()
{
atf_set "descr" "Creating a pool with a cache device succeeds."
atf_set "require.progs" zpool
atf_set "timeout" 1200
}
cache_001_pos_body()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
verify_disk_count "$DISKS" 1
ksh93 $(atf_get_srcdir)/setup.ksh || atf_fail "Setup failed"
ksh93 $(atf_get_srcdir)/cache_001_pos.ksh || atf_fail "Testcase failed"
}
cache_001_pos_cleanup()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
ksh93 $(atf_get_srcdir)/cleanup.ksh || atf_fail "Cleanup failed"
}
atf_test_case cache_002_pos cleanup
cache_002_pos_head()
{
atf_set "descr" "Adding a cache device to normal pool works."
atf_set "require.progs" zpool
atf_set "timeout" 1200
}
cache_002_pos_body()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
verify_disk_count "$DISKS" 1
ksh93 $(atf_get_srcdir)/setup.ksh || atf_fail "Setup failed"
ksh93 $(atf_get_srcdir)/cache_002_pos.ksh || atf_fail "Testcase failed"
}
cache_002_pos_cleanup()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
ksh93 $(atf_get_srcdir)/cleanup.ksh || atf_fail "Cleanup failed"
}
atf_test_case cache_003_pos cleanup
cache_003_pos_head()
{
atf_set "descr" "Adding an extra cache device works."
atf_set "require.progs" zpool
atf_set "timeout" 1200
}
cache_003_pos_body()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
verify_disk_count "$DISKS" 2
ksh93 $(atf_get_srcdir)/setup.ksh || atf_fail "Setup failed"
ksh93 $(atf_get_srcdir)/cache_003_pos.ksh || atf_fail "Testcase failed"
}
cache_003_pos_cleanup()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
ksh93 $(atf_get_srcdir)/cleanup.ksh || atf_fail "Cleanup failed"
}
atf_test_case cache_004_neg cleanup
cache_004_neg_head()
{
atf_set "descr" "Attaching a cache device fails."
atf_set "require.progs" zpool
atf_set "timeout" 1200
}
cache_004_neg_body()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
verify_disk_count "$DISKS" 2
ksh93 $(atf_get_srcdir)/setup.ksh || atf_fail "Setup failed"
ksh93 $(atf_get_srcdir)/cache_004_neg.ksh || atf_fail "Testcase failed"
}
cache_004_neg_cleanup()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
ksh93 $(atf_get_srcdir)/cleanup.ksh || atf_fail "Cleanup failed"
}
atf_test_case cache_005_neg cleanup
cache_005_neg_head()
{
atf_set "descr" "Replacing a cache device fails."
atf_set "require.progs" zpool
atf_set "timeout" 1200
}
cache_005_neg_body()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
verify_disk_count "$DISKS" 2
ksh93 $(atf_get_srcdir)/setup.ksh || atf_fail "Setup failed"
ksh93 $(atf_get_srcdir)/cache_005_neg.ksh || atf_fail "Testcase failed"
}
cache_005_neg_cleanup()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
ksh93 $(atf_get_srcdir)/cleanup.ksh || atf_fail "Cleanup failed"
}
atf_test_case cache_006_pos cleanup
cache_006_pos_head()
{
atf_set "descr" "Exporting and importing pool with cache devices passes."
atf_set "require.progs" zpool
atf_set "timeout" 1200
}
cache_006_pos_body()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
verify_disk_count "$DISKS" 2
ksh93 $(atf_get_srcdir)/setup.ksh || atf_fail "Setup failed"
ksh93 $(atf_get_srcdir)/cache_006_pos.ksh || atf_fail "Testcase failed"
}
cache_006_pos_cleanup()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
ksh93 $(atf_get_srcdir)/cleanup.ksh || atf_fail "Cleanup failed"
}
atf_test_case cache_007_neg cleanup
cache_007_neg_head()
{
atf_set "descr" "A mirror/raidz/raidz2 cache is not supported."
atf_set "require.progs" zpool
atf_set "timeout" 1200
}
cache_007_neg_body()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
verify_disk_count "$DISKS" 2
ksh93 $(atf_get_srcdir)/setup.ksh || atf_fail "Setup failed"
ksh93 $(atf_get_srcdir)/cache_007_neg.ksh || atf_fail "Testcase failed"
}
cache_007_neg_cleanup()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
ksh93 $(atf_get_srcdir)/cleanup.ksh || atf_fail "Cleanup failed"
}
atf_test_case cache_008_neg cleanup
cache_008_neg_head()
{
atf_set "descr" "A raidz/raidz2 cache can not be added to existed pool."
atf_set "require.progs" zpool
atf_set "timeout" 1200
}
cache_008_neg_body()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
verify_disk_count "$DISKS" 2
ksh93 $(atf_get_srcdir)/setup.ksh || atf_fail "Setup failed"
ksh93 $(atf_get_srcdir)/cache_008_neg.ksh || atf_fail "Testcase failed"
}
cache_008_neg_cleanup()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
ksh93 $(atf_get_srcdir)/cleanup.ksh || atf_fail "Cleanup failed"
}
atf_test_case cache_009_pos cleanup
cache_009_pos_head()
{
atf_set "descr" "Offline and online a cache device succeed."
atf_set "require.progs" zpool
atf_set "timeout" 1200
}
cache_009_pos_body()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
verify_disk_count "$DISKS" 2
ksh93 $(atf_get_srcdir)/setup.ksh || atf_fail "Setup failed"
ksh93 $(atf_get_srcdir)/cache_009_pos.ksh || atf_fail "Testcase failed"
}
cache_009_pos_cleanup()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
ksh93 $(atf_get_srcdir)/cleanup.ksh || atf_fail "Cleanup failed"
}
atf_test_case cache_010_neg cleanup
cache_010_neg_head()
{
atf_set "descr" "Cache device can only be disk or slice."
atf_set "require.progs" zfs zpool
atf_set "timeout" 1200
}
cache_010_neg_body()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
verify_disk_count "$DISKS" 1
ksh93 $(atf_get_srcdir)/setup.ksh || atf_fail "Setup failed"
ksh93 $(atf_get_srcdir)/cache_010_neg.ksh || atf_fail "Testcase failed"
}
cache_010_neg_cleanup()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
ksh93 $(atf_get_srcdir)/cleanup.ksh || atf_fail "Cleanup failed"
}
atf_test_case cache_011_pos cleanup
cache_011_pos_head()
{
atf_set "descr" "Remove cache device from pool with spare device should succeed"
atf_set "require.progs" zpool
atf_set "timeout" 1200
}
cache_011_pos_body()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
verify_disk_count "$DISKS" 2
ksh93 $(atf_get_srcdir)/setup.ksh || atf_fail "Setup failed"
ksh93 $(atf_get_srcdir)/cache_011_pos.ksh || atf_fail "Testcase failed"
}
cache_011_pos_cleanup()
{
. $(atf_get_srcdir)/../../include/default.cfg
. $(atf_get_srcdir)/cache.kshlib
. $(atf_get_srcdir)/cache.cfg
ksh93 $(atf_get_srcdir)/cleanup.ksh || atf_fail "Cleanup failed"
}
atf_init_test_cases()
{
atf_add_test_case cache_001_pos
atf_add_test_case cache_002_pos
atf_add_test_case cache_003_pos
atf_add_test_case cache_004_neg
atf_add_test_case cache_005_neg
atf_add_test_case cache_006_pos
atf_add_test_case cache_007_neg
atf_add_test_case cache_008_neg
atf_add_test_case cache_009_pos
atf_add_test_case cache_010_neg
atf_add_test_case cache_011_pos
}
|
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package bus.management;
/**
*
* @author
*/
public class UserControlPanel extends javax.swing.JFrame {
/**
* Creates new form UserControlPanel
*/
String luser;
public UserControlPanel() {
}
public UserControlPanel(String loginUser)
{
initComponents();
this.luser=loginUser;
String getValue=jLabel1.getText();
jLabel1.setText(getValue+ " :"+loginUser);
}
/**
* This method is called from within the constructor to initialize the form.
* WARNING: Do NOT modify this code. The content of this method is always
* regenerated by the Form Editor.
*/
@SuppressWarnings("unchecked")
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
jLabel1 = new javax.swing.JLabel();
jDesktopPane1 = new javax.swing.JDesktopPane();
jMenuBar1 = new javax.swing.JMenuBar();
jMenu1 = new javax.swing.JMenu();
jMenu2 = new javax.swing.JMenu();
setDefaultCloseOperation(javax.swing.WindowConstants.EXIT_ON_CLOSE);
jLabel1.setFont(new java.awt.Font("Tahoma", 0, 18)); // NOI18N
jLabel1.setText("Welcome");
javax.swing.GroupLayout jDesktopPane1Layout = new javax.swing.GroupLayout(jDesktopPane1);
jDesktopPane1.setLayout(jDesktopPane1Layout);
jDesktopPane1Layout.setHorizontalGroup(
jDesktopPane1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGap(0, 0, Short.MAX_VALUE)
);
jDesktopPane1Layout.setVerticalGroup(
jDesktopPane1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGap(0, 402, Short.MAX_VALUE)
);
jMenu1.setText("Add Booking");
jMenu1.addMouseListener(new java.awt.event.MouseAdapter() {
public void mouseClicked(java.awt.event.MouseEvent evt) {
jMenu1MouseClicked(evt);
}
});
jMenu1.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
jMenu1ActionPerformed(evt);
}
});
jMenuBar1.add(jMenu1);
jMenu2.setText("Exit");
jMenu2.addMouseListener(new java.awt.event.MouseAdapter() {
public void mouseClicked(java.awt.event.MouseEvent evt) {
jMenu2MouseClicked(evt);
}
});
jMenuBar1.add(jMenu2);
setJMenuBar(jMenuBar1);
javax.swing.GroupLayout layout = new javax.swing.GroupLayout(getContentPane());
getContentPane().setLayout(layout);
layout.setHorizontalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, layout.createSequentialGroup()
.addContainerGap(446, Short.MAX_VALUE)
.addComponent(jLabel1, javax.swing.GroupLayout.PREFERRED_SIZE, 184, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGap(292, 292, 292))
.addGroup(layout.createSequentialGroup()
.addContainerGap()
.addComponent(jDesktopPane1))
);
layout.setVerticalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addContainerGap()
.addComponent(jLabel1)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addComponent(jDesktopPane1)
.addContainerGap())
);
pack();
}// </editor-fold>//GEN-END:initComponents
private void jMenu1ActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_jMenu1ActionPerformed
// TODO add your handling code here:
AddBooking ab=new AddBooking(luser,"user");
jDesktopPane1.add(ab);
ab.show();
}//GEN-LAST:event_jMenu1ActionPerformed
private void jMenu1MouseClicked(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_jMenu1MouseClicked
// TODO add your handling code here:
System.out.print("Menu");
AddBooking ab=new AddBooking(luser,"user");
jDesktopPane1.add(ab);
ab.show();
}//GEN-LAST:event_jMenu1MouseClicked
private void jMenu2MouseClicked(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_jMenu2MouseClicked
// TODO add your handling code here:
dispose();
MainScreen ms=new MainScreen();
ms.setLocationRelativeTo(null);
ms.setVisible(true);
}//GEN-LAST:event_jMenu2MouseClicked
/**
* @param args the command line arguments
*/
public static void main(String args[]) {
/* Set the Nimbus look and feel */
//<editor-fold defaultstate="collapsed" desc=" Look and feel setting code (optional) ">
/* If Nimbus (introduced in Java SE 6) is not available, stay with the default look and feel.
* For details see http://download.oracle.com/javase/tutorial/uiswing/lookandfeel/plaf.html
*/
try {
for (javax.swing.UIManager.LookAndFeelInfo info : javax.swing.UIManager.getInstalledLookAndFeels()) {
if ("Nimbus".equals(info.getName())) {
javax.swing.UIManager.setLookAndFeel(info.getClassName());
break;
}
}
} catch (ClassNotFoundException ex) {
java.util.logging.Logger.getLogger(UserControlPanel.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
} catch (InstantiationException ex) {
java.util.logging.Logger.getLogger(UserControlPanel.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
} catch (IllegalAccessException ex) {
java.util.logging.Logger.getLogger(UserControlPanel.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
} catch (javax.swing.UnsupportedLookAndFeelException ex) {
java.util.logging.Logger.getLogger(UserControlPanel.class.getName()).log(java.util.logging.Level.SEVERE, null, ex);
}
//</editor-fold>
/* Create and display the form */
java.awt.EventQueue.invokeLater(new Runnable() {
public void run() {
new UserControlPanel().setVisible(true);
}
});
}
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.JDesktopPane jDesktopPane1;
private javax.swing.JLabel jLabel1;
private javax.swing.JMenu jMenu1;
private javax.swing.JMenu jMenu2;
private javax.swing.JMenuBar jMenuBar1;
// End of variables declaration//GEN-END:variables
}
|
<gh_stars>0
import { Injectable } from '@angular/core';
import { User, UserPreferences } from '@app/domain';
import { of } from 'rxjs';
import { UserManagementDetailService } from './user-management-detail.service';
@Injectable({ providedIn: 'root' })
export class StubUserManagementDetailService implements Partial<UserManagementDetailService> {
getUser = () => of(new User({}));
fetchUserPreferences = () => of(new UserPreferences({}));
canUpdateProfile = () => of(true);
canUpdateAuthorities = () => of(true);
canUpdateRole = () => of(true);
canSendActivationToken = () => of(true);
canDeleteAccount = () => of(true);
}
export const stubUserManagementDetailServiceProvider = {
provide: UserManagementDetailService,
useClass: StubUserManagementDetailService,
};
|
#!/bin/bash
echo "Will push all LOCALLY EXISTING jdickey/hanami-1.2.0-base images to Docker Hub in 5 seconds"
echo "Hit Control-C *NOW* if this is not what you want!"
for i in 5 4 3 2 1; do
echo -n $i; echo -n '... '
sleep 1
done
for i in `docker image ls jdickey/hanami-1.2.0-base --format='{{.ID}}\t{{.Repository}}:{{.Tag}}' | sort | cut -f 2`; do
echo "Pushing $i"
docker push $i
echo "Done pushing $i"; echo
done
|
'use strict'
const webpack = require('webpack')
const merge = require('webpack-merge')
const FriendlyErrorsPlugin = require('friendly-errors-webpack-plugin')
const config = require('mpvue-packager/lib/config')
const utils = require('mpvue-packager/lib/utils')
const baseWebpackConfig = require('./webpack.base.conf')
module.exports = merge(baseWebpackConfig, {
module: {
rules: utils.styleLoaders({
sourceMap: config.cssSourceMap,
extract: true
})
},
plugins: [
new webpack.NoEmitOnErrorsPlugin(),
new FriendlyErrorsPlugin()
]
})
|
<reponame>robinwyss/openkit-java
/**
* Copyright 2018-2020 Dynatrace LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dynatrace.openkit.core.util;
import org.junit.Test;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.nullValue;
import static org.junit.Assert.assertThat;
public class PercentEncoderTest {
/**
* All unreserved characters based on RFC-3986
*/
private static final String UNRESERVED_CHARACTERS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-._~";
@Test
public void rfc3986UnreservedCharactersAreNotEncoded() {
// when
String obtained = PercentEncoder.encode(UNRESERVED_CHARACTERS, "UTF-8");
// then
assertThat(obtained, is(equalTo(UNRESERVED_CHARACTERS)));
}
@Test
public void reservedCharactersArePercentEncoded() {
// when
String obtained = PercentEncoder.encode("+()/\\&%$#@!`?<>[]{}", "UTF-8");
// then
String expected = "%2B%28%29%2F%5C%26%25%24%23%40%21%60%3F%3C%3E%5B%5D%7B%7D"; // precomputed using Python
assertThat(obtained, is(equalTo(expected)));
}
@Test
public void mixingReservedAndUnreservedCharactersWorks() {
// when
String obtained = PercentEncoder.encode("a+bc()~/\\&0_", "UTF-8");
// then
String expected = "a%2Bbc%28%29~%2F%5C%260_"; // precomputed using Python
assertThat(obtained, is(equalTo(expected)));
}
@Test
public void charactersOutsideOfAsciiRangeAreEncodedFirst() {
// when
String obtained = PercentEncoder.encode("aösÖ€dÁF", "UTF-8");
// then
String expected = "a%C3%B6s%C3%96%E2%82%ACd%C3%81F";
assertThat(obtained, is(equalTo(expected)));
}
@Test
public void itIsPossibleToMarkAdditionalCharactersAsReserved() {
// when
String additionalReservedCharacters = "€0_";
String obtained = PercentEncoder.encode("0123456789-._~", "UTF-8", additionalReservedCharacters.toCharArray());
// then
String expected = "%30123456789-.%5F~";
assertThat(obtained, is(equalTo(expected)));
}
@Test
public void nullIsReturnedIfEncodingIsNecessaryButIsNotKnown() {
// when
String obtained = PercentEncoder.encode("a€b", "this-is-really-no-valid-encoding");
// then
assertThat(obtained, is(nullValue()));
}
}
|
package org.purevalue.arbitrage.traderoom.exchange
import java.time.Instant
import java.util.UUID
import akka.actor.typed.scaladsl.{AbstractBehavior, ActorContext, Behaviors}
import akka.actor.typed.{ActorRef, Behavior}
import org.purevalue.arbitrage.Main.actorSystem
import org.purevalue.arbitrage.traderoom._
import org.purevalue.arbitrage.traderoom.exchange.LiquidityManager._
import org.purevalue.arbitrage.{Config, ExchangeConfig}
import org.slf4j.LoggerFactory
import scala.concurrent.ExecutionContextExecutor
/*
- The LiquidityManager is responsible for providing the Assets which are requested by Traders.
There is one manager per exchange, which:
- manages liquidity storing assets (like BTC, USDT) (unused altcoin liquidity goes back to these)
- provides/creates liquidity of specific assets demanded by (unfulfilled) liquidity requests, which is believed to be used soon by next upcoming trades
[Concept]
- Every single valid OrderRequest (no matter if enough balance is available or not) will result in a Liquidity-Request,
which may be granted or not, based on the available (yet unreserved) asset balance
- If that Liquidity-Request is covered by the current balance of the corresponding wallet,
then it is granted and this amount in the Wallet is marked as locked (for a limited duration until clearance) => [LiquidityLock]
- Else, if that Liquidity-Request is not covered by the current balance, then
- it is denied and a LiquidityDemand is noticed by the ExchangeLiquidityManager,
which (may) result in a liquidity providing trade in favor of the requested asset balance, given that enough Reserve-Liquidity is available.
- Every completed trade (no matter if succeeded or canceled) will result in a clearance of it's previously acquired Liquidity-Locks,
- Clearance of a Liquidity-Lock means removal of that lock (the underlying coins should be gone into a transaction in between anyway):
- In case, that the maximum lifetime of a liquidity-lock is reached, it will be cleared automatically by housekeeping
[Concept Liquidity-Demand]
- The Liquidity-Demand can only be fulfilled, when enough amount of one of the configured Reserve-Assets is available
- Furthermore it shall only be fulfilled by a Reserve-Asset, which is not involved in any order of the connected OrderRequestBundle, where the demand comes from!
- Furthermore it can only be fulfilled, if the current exchange-rate on the local exchange is good enough,
which means, it must be close to the Reference-Ticker exchange-rate or better than that (getting more coins out of the same amount of reserve-asset)
[Concept Reserve-Liquidity-Management]
- Unused (not locked or demanded) liquidity of a non-Reserve-Asset will be automatically converted to a Reserve-Asset.
Which reserve-asset it will be, is determined by:
- [non-loss-asset-filter] Filtering acceptable exchange-rates based on ticker on that exchange compared to a [ReferenceTicker]-value
- [fill-up] Try to reach minimum configured balance of each reserve-assets in their order of preference
- [play safe] Remaining value goes to first (highest prio) reserve-asset (having a acceptable exchange-rate)
*/
object LiquidityManager {
def apply(config: Config,
exchangeConfig: ExchangeConfig):
Behavior[Command] =
Behaviors.setup(context => new LiquidityManager(context, config, exchangeConfig))
sealed trait Command extends Exchange.Message
/**
* UniqueDemand
* A unique demand is characterized by an asset name and a trade-pattern.
* The trade pattern is a type identifier for incoming orderbundles typically containing: Trader-name + trading strategy name (optional)
* The point is, that demand for a coin on an exchange with the same trade-pattern are condensed to the last one,
* while demands for the same coin with different trade-pattern are added up.
*/
case class UniqueDemand(tradePattern: String, // UK (different trader or different trading strategy shall be represented by a different tradePattern)
asset: Asset, // UK
amount: Double,
dontUseTheseReserveAssets: Set[Asset], // not used any more; replaced by exchange rate rating
lastRequested: Instant) {
def uk: String = tradePattern + asset.officialSymbol
}
case class GetState(replyTo: ActorRef[State]) extends Command
case class State(liquidityDemand: Map[String, UniqueDemand],
liquidityLocks: Map[UUID, LiquidityLock])
case class LiquidityLockRequest(id: UUID,
createTime: Instant,
exchange: String,
tradePattern: String,
coins: Seq[CryptoValue],
isForLiquidityTx: Boolean,
dontUseTheseReserveAssets: Set[Asset],
wallet: Option[Wallet], // is filled in by exchange actor, which forwards this message
replyTo: ActorRef[Option[LiquidityLock]]) extends Command {
def withWallet(wallet: Wallet): LiquidityLockRequest = LiquidityLockRequest(
id, createTime, exchange, tradePattern, coins, isForLiquidityTx, dontUseTheseReserveAssets, Some(wallet), replyTo
)
}
case class LiquidityLock(exchange: String,
liquidityRequestId: UUID,
coins: Seq[CryptoValue],
createTime: Instant)
case class LiquidityLockClearance(liquidityRequestId: UUID) extends Command
class OrderBookTooFlatException(val tradePair: TradePair, val side: TradeSide) extends Exception
}
class LiquidityManager(context: ActorContext[Command],
config: Config,
exchangeConfig: ExchangeConfig
) extends AbstractBehavior[Command](context) {
import LiquidityManager._
private val log = LoggerFactory.getLogger(getClass)
case class LiquidityDemand(exchange: String,
tradePattern: String,
coins: Seq[CryptoValue],
dontUseTheseReserveAssets: Set[Asset]) {
if (coins.exists(_.asset.isFiat)) throw new IllegalArgumentException("Seriously, you demand for Fiat Money?")
}
private object LiquidityDemand {
def apply(r: LiquidityLockRequest): LiquidityDemand =
LiquidityDemand(r.exchange, r.tradePattern, r.coins, r.dontUseTheseReserveAssets)
}
private implicit val executionContext: ExecutionContextExecutor = actorSystem.executionContext
// Map(uk:"trade-pattern + asset", UniqueDemand))
private var liquidityDemand: Map[String, UniqueDemand] = Map()
private var liquidityLocks: Map[UUID, LiquidityLock] = Map()
def noticeUniqueDemand(d: UniqueDemand): Unit = {
if (log.isTraceEnabled) log.trace(s"noticed $d")
liquidityDemand = liquidityDemand + (d.uk -> d)
}
def noticeDemand(d: LiquidityDemand): Unit = {
d.coins
.map(c => UniqueDemand(d.tradePattern, c.asset, c.amount, d.dontUseTheseReserveAssets, Instant.now))
.foreach(noticeUniqueDemand)
}
def clearLock(id: UUID): Unit = {
liquidityLocks = liquidityLocks - id
if (log.isTraceEnabled) log.trace(s"Liquidity lock with ID $id cleared")
}
def addLock(l: LiquidityLock): Unit = {
liquidityLocks = liquidityLocks + (l.liquidityRequestId -> l)
if (log.isTraceEnabled) log.trace(s"Liquidity locked: $l")
}
def clearObsoleteDemands(): Unit = {
val limit = Instant.now.minus(config.liquidityManager.liquidityDemandActiveTime)
liquidityDemand = liquidityDemand.filter(_._2.lastRequested.isAfter(limit))
}
def clearObsoleteLocks(): Unit = {
val limit: Instant = Instant.now.minus(config.liquidityManager.liquidityLockMaxLifetime)
liquidityLocks = liquidityLocks.filter(_._2.createTime.isAfter(limit))
}
def determineUnlockedBalance(wallet: Wallet): Map[Asset, Double] = {
val lockedLiquidity: Map[Asset, Double] = liquidityLocks
.values
.flatMap(_.coins) // all locked values
.groupBy(_.asset) // group by asset
.map(e => (e._1, e._2.map(_.amount).sum)) // sum up values of same asset
wallet.balance
.filterNot(_._1.isFiat)
.filterNot(e => exchangeConfig.doNotTouchTheseAssets.contains(e._1))
.map(e => (e._1, Math.max(0.0, e._2.amountAvailable - lockedLiquidity.getOrElse(e._1, 0.0))))
}
// just cleanup
def houseKeeping(): Unit = {
clearObsoleteLocks()
clearObsoleteDemands()
}
// Accept, if free (not locked) coins are available.
def lockLiquidity(r: LiquidityLockRequest): Option[LiquidityLock] = {
if (r.coins.exists(e => exchangeConfig.doNotTouchTheseAssets.contains(e.asset))) throw new IllegalArgumentException
if (r.coins.exists(_.asset.isFiat)) throw new IllegalArgumentException
val unlockedBalances = determineUnlockedBalance(r.wallet.get)
val sumCoinsPerAsset = r.coins // coins should contain already only values of different assets, but we need to be 100% sure, that we do not work with multiple requests for the same coin
.groupBy(_.asset)
.map(x => CryptoValue(x._1, x._2.map(_.amount).sum))
if (sumCoinsPerAsset.forall(c => unlockedBalances.getOrElse(c.asset, 0.0) >= c.amount)) {
val lock = LiquidityLock(r.exchange, r.id, r.coins, Instant.now)
addLock(lock)
Some(lock)
} else {
log.debug(s"refused liquidity-lock request on ${r.exchange} for ${r.coins.mkString(" ,")} (don't use: ${r.dontUseTheseReserveAssets})")
None
}
}
def checkValidity(r: LiquidityLockRequest): Unit = {
if (r.exchange != exchangeConfig.name) throw new IllegalArgumentException
if (r.coins.exists(c => exchangeConfig.doNotTouchTheseAssets.contains(c.asset))) throw new IllegalArgumentException("liquidity request for a DO-NOT-TOUCH asset")
}
def liquidityLockRequest(r: LiquidityLockRequest): Option[LiquidityLock] = {
houseKeeping()
checkValidity(r)
if (!r.isForLiquidityTx) {
noticeDemand(LiquidityDemand(r)) // notice/refresh the demand, when 'someone' wants to lock liquidity for trading
}
lockLiquidity(r)
}
override def onMessage(message: Command): Behavior[Command] = {
message match {
// @formatter:off
case r: LiquidityLockRequest => r.replyTo ! liquidityLockRequest(r); this
case LiquidityLockClearance(id) => clearLock(id); this
case GetState(replyTo) => houseKeeping(); replyTo ! State(liquidityDemand, liquidityLocks); this
// @formatter:off
}
}
}
// TODO statistics: min/max/average time a liquidity providing order needs to be filled
// TODO statistics: LiquidityRequest successful ones / unsuccessful ones
// TODO statistics: min/max/average time a convert back to reserve-liquidity tx needs to be filled
// TODO statistics: total number and final balance of all done liquidity providing and back-converting transactions
|
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
elif [ -L "${binary}" ]; then
echo "Destination binary is symlinked..."
dirname="$(dirname "${binary}")"
binary="${dirname}/$(readlink "${binary}")"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u)
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
warn_missing_arch=${2:-true}
if [ -r "$source" ]; then
# Copy the dSYM into the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .dSYM "$source")"
binary_name="$(ls "$source/Contents/Resources/DWARF")"
binary="${DERIVED_FILES_DIR}/${basename}.dSYM/Contents/Resources/DWARF/${binary_name}"
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then
strip_invalid_archs "$binary" "$warn_missing_arch"
fi
if [[ $STRIP_BINARY_RETVAL == 1 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.dSYM"
fi
fi
}
# Copies the bcsymbolmap files of a vendored framework
install_bcsymbolmap() {
local bcsymbolmap_path="$1"
local destination="${BUILT_PRODUCTS_DIR}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}"
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identity
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
warn_missing_arch=${2:-true}
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
if [[ "$warn_missing_arch" == "true" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
fi
STRIP_BINARY_RETVAL=0
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary"
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=1
}
install_artifact() {
artifact="$1"
base="$(basename "$artifact")"
case $base in
*.framework)
install_framework "$artifact"
;;
*.dSYM)
# Suppress arch warnings since XCFrameworks will include many dSYM files
install_dsym "$artifact" "false"
;;
*.bcsymbolmap)
install_bcsymbolmap "$artifact"
;;
*)
echo "error: Unrecognized artifact "$artifact""
;;
esac
}
copy_artifacts() {
file_list="$1"
while read artifact; do
install_artifact "$artifact"
done <$file_list
}
ARTIFACT_LIST_FILE="${BUILT_PRODUCTS_DIR}/cocoapods-artifacts-${CONFIGURATION}.txt"
if [ -r "${ARTIFACT_LIST_FILE}" ]; then
copy_artifacts "${ARTIFACT_LIST_FILE}"
fi
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/PlistManager/PlistManager.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/PlistManager/PlistManager.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
#include "mainwindow.h"
#include <accounts/registeraccountwindow.h>
#include <accounts/loginwindow.h>
#include <accounts/user.h>
#include <KillCovid-19/killcovid-19window.h>
#include "Utils/Utils.h"
#include <iostream>
/**
* \file mainwindow.h
* \brief Implementation of the mainWindow.
* \author <NAME>
*/
mainWindow::mainWindow(QWidget *parent) : QWidget(parent){
QSize windowSize = QSize(560, 600 );
setFixedSize(windowSize);
buildRequestLoginLayout();
buildDisplayGamesLayout();
activeLayout = new QStackedLayout();
activeLayout->addWidget(requestLoginPage);
activeLayout->addWidget(displayGamesPage);
setLayout(activeLayout);
QObject::connect(this, SIGNAL(swapLayout(int)), activeLayout, SLOT(setCurrentIndex(int)));
emit swapLayout(0);
}
// ================================================================================================================================
// LAYOUTS
// ================================================================================================================================
void mainWindow::buildRequestLoginLayout(){
requestLoginLayout = new QVBoxLayout();
requestLoginLayout->addItem(new QSpacerItem(1, 32));
titleLabel = new QLabel("Qaming Framework");
titleLabel->setAlignment(Qt::AlignCenter);
QFont titleFont( "Arial", 32, QFont::Bold);
titleLabel->setFont(titleFont);
requestLoginLayout->addWidget(titleLabel);
requestLoginLayout->addItem(new QSpacerItem(1, 48));
loginButton = new QPushButton("Log in to an existing account");
requestLoginLayout->addWidget(loginButton);
registerButton = new QPushButton("New user? Register a new account!");
requestLoginLayout->addWidget(registerButton);
playAsGuestButton = new QPushButton("Play as guest");
requestLoginLayout->addWidget(playAsGuestButton);
requestLoginLayout->addItem(new QSpacerItem(1, 64));
creditsLabel = new QLabel("A Gaming platform written in Qt by <NAME> and <NAME>");
creditsLabel->setAlignment(Qt::AlignCenter);
requestLoginLayout->addWidget(creditsLabel);
requestLoginPage = new QWidget();
requestLoginPage->setLayout(requestLoginLayout);
QObject::connect(registerButton, SIGNAL(clicked(bool)), this, SLOT(openRegisterAccountForm()));
QObject::connect(loginButton, SIGNAL(clicked(bool)), this, SLOT(openLoginForm()));
QObject::connect(playAsGuestButton, SIGNAL(clicked(bool)), this, SLOT(loginAsGuest()));
}
void mainWindow::buildDisplayGamesLayout(){
displayGamesLayout = new QVBoxLayout();
displayGamesLayout->addItem(new QSpacerItem(1, 32));
titleLabel = new QLabel("Qaming Framework");
titleLabel->setAlignment(Qt::AlignCenter);
QFont titleFont( "Arial", 32, QFont::Bold);
titleLabel->setFont(titleFont);
displayGamesLayout->addWidget(titleLabel);
userDisplayLayout = new QHBoxLayout();
userDisplayLayout->addStretch();
userProfilePicture = new QLabel();
userProfilePicture->setAlignment(Qt::AlignCenter);
userDisplayLayout->addWidget(userProfilePicture);
userDisplayLayout->addStretch();
displayGamesLayout->addItem(userDisplayLayout);
usernameLabel = new QLabel();
usernameLabel->setAlignment(Qt::AlignCenter);
QFont usernameFont( "Arial", 18, QFont::Bold);
usernameLabel->setFont(usernameFont);
displayGamesLayout->addWidget(usernameLabel);
displayGamesLayout->addItem(new QSpacerItem(1, 32));
killCovidGameButton = new QPushButton("Play Kill Covid-19!");
displayGamesLayout->addWidget(killCovidGameButton);
reversiGameButton = new QPushButton("Play Reversi!");
displayGamesLayout->addWidget(reversiGameButton);
requestLoginLayout->addItem(new QSpacerItem(1, 48));
logoutButton = new QPushButton("Log out");
displayGamesLayout->addWidget(logoutButton);
displayGamesLayout->addItem(new QSpacerItem(1, 16));
gameHistoryWidget = new GameHistoryWidget();
displayGamesLayout->addWidget(gameHistoryWidget);
displayGamesLayout->addItem(new QSpacerItem(1, 16));
creditsLabel = new QLabel("A Gaming platform written in Qt by <NAME> and <NAME>");
creditsLabel->setAlignment(Qt::AlignCenter);
displayGamesLayout->addWidget(creditsLabel);
displayGamesPage = new QWidget();
displayGamesPage->setLayout(displayGamesLayout);
QObject::connect(this, SIGNAL(updateUsernameLabel(const QString&)), usernameLabel, SLOT(setText(const QString&)));
QObject::connect(this, SIGNAL(updateUserProfilePicture(const QPixmap&)), userProfilePicture, SLOT(setPixmap(const QPixmap&)));
QObject::connect(logoutButton, SIGNAL(clicked(bool)), this, SLOT(executeLogout()));
QObject::connect(killCovidGameButton, SIGNAL(clicked(bool)), this, SLOT(StartKillCovidGame()));
QObject::connect(reversiGameButton, SIGNAL(clicked(bool)), this, SLOT(StartReversiGame()));
}
// ================================================================================================================================
// SLOTS
// ================================================================================================================================
void mainWindow::loginAsGuest(){
User* user = new User();
user->fromJSON();
updateLayoutWithUserInfo(user);
}
void mainWindow::openRegisterAccountForm(){
registerAccountWindow *window = new registerAccountWindow();
QObject::connect(window, SIGNAL(userApproved(User*)), this, SLOT(updateLayoutWithUserInfo(User*)));
window->show();
}
void mainWindow::openLoginForm(){
loginWindow *window = new loginWindow();
QObject::connect(window, SIGNAL(userApproved(User*)), this, SLOT(updateLayoutWithUserInfo(User*)));
window->show();
}
void mainWindow::updateLayoutWithUserInfo(User* user){
if(user){
activeUser=user;
gameHistoryWidget->fill(activeUser);
QString username = user->getUsername();
emit updateUsernameLabel(username);
QPixmap pixmap("../" + user->getProfilePicturePath());
pixmap = pixmap.scaled(96, 96,Qt::KeepAspectRatio);
userProfilePicture->setPixmap(pixmap);
emit updateUserProfilePicture(pixmap);
emit swapLayout(1);
messageBox = new QMessageBox();
messageBox->setWindowTitle("Greetings");
if(user->getDateOfBirth().month() == QDate::currentDate().month() && user->getDateOfBirth().day() == user->getDateOfBirth().day()){
messageBox->setText("Happy Birthday, " + user->getFirstName() + " " + user->getLastName() + "!");
}else if(user->getUsername() == "Guest"){
messageBox->setText("You are logged in as Guest!");
}else{
messageBox->setText("Welcome back, " + user->getFirstName() + " " + user->getLastName() + "!");
}
messageBox->exec();
}
}
void mainWindow::StartKillCovidGame(){
KillCovidGameWindow= new KillCovid_19Window(activeUser);
KillCovidGameWindow->setWindowTitle("Kill Covid-19");
this->hide();
connect(KillCovidGameWindow, SIGNAL(windowClosed()), this, SLOT(onKillCovid19Finish()));
KillCovidGameWindow->show();
}
void mainWindow::StartReversiGame(){
ReversiGameWindow = new ReversiWindow(activeUser);
ReversiGameWindow->setWindowTitle("Reversi");
this->hide();
connect(ReversiGameWindow, SIGNAL(windowClosed()), this, SLOT(onReversiFinish()));
ReversiGameWindow->show();
}
void mainWindow::executeLogout(){
emit swapLayout(0);
}
void mainWindow::onKillCovid19Finish(){
gameHistoryWidget->fill(activeUser);
if(KillCovidGameWindow!=nullptr){
KillCovidGameWindow->hide();
delete KillCovidGameWindow;
}
this->show();
}
void mainWindow::onReversiFinish(){
gameHistoryWidget->fill(activeUser);
if(ReversiGameWindow!=nullptr){
ReversiGameWindow->hide();
delete ReversiGameWindow;
}
this->show();
}
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.resourcemanager_v3.services.folders import pagers
from google.cloud.resourcemanager_v3.types import folders
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import FoldersTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import FoldersGrpcTransport
from .transports.grpc_asyncio import FoldersGrpcAsyncIOTransport
class FoldersClientMeta(type):
"""Metaclass for the Folders client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[FoldersTransport]]
_transport_registry["grpc"] = FoldersGrpcTransport
_transport_registry["grpc_asyncio"] = FoldersGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[FoldersTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class FoldersClient(metaclass=FoldersClientMeta):
"""Manages Cloud Platform folder resources.
Folders can be used to organize the resources under an
organization and to control the policies applied to groups of
resources.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "cloudresourcemanager.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FoldersClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
FoldersClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> FoldersTransport:
"""Returns the transport used by the client instance.
Returns:
FoldersTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_folder_path(path: str) -> Dict[str, str]:
"""Parses a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, FoldersTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the folders client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, FoldersTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, FoldersTransport):
# transport is a FoldersTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def get_folder(
self,
request: Union[folders.GetFolderRequest, dict] = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> folders.Folder:
r"""Retrieves a folder identified by the supplied resource name.
Valid folder resource names have the format
``folders/{folder_id}`` (for example, ``folders/1234``). The
caller must have ``resourcemanager.folders.get`` permission on
the identified folder.
Args:
request (Union[google.cloud.resourcemanager_v3.types.GetFolderRequest, dict]):
The request object. The GetFolder request message.
name (str):
Required. The resource name of the folder to retrieve.
Must be of the form ``folders/{folder_id}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.resourcemanager_v3.types.Folder:
A folder in an organization's
resource hierarchy, used to organize
that organization's resources.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a folders.GetFolderRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, folders.GetFolderRequest):
request = folders.GetFolderRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_folder]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_folders(
self,
request: Union[folders.ListFoldersRequest, dict] = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListFoldersPager:
r"""Lists the folders that are direct descendants of supplied parent
resource. ``list()`` provides a strongly consistent view of the
folders underneath the specified parent resource. ``list()``
returns folders sorted based upon the (ascending) lexical
ordering of their display_name. The caller must have
``resourcemanager.folders.list`` permission on the identified
parent.
Args:
request (Union[google.cloud.resourcemanager_v3.types.ListFoldersRequest, dict]):
The request object. The ListFolders request message.
parent (str):
Required. The resource name of the organization or
folder whose folders are being listed. Must be of the
form ``folders/{folder_id}`` or
``organizations/{org_id}``. Access to this method is
controlled by checking the
``resourcemanager.folders.list`` permission on the
``parent``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.resourcemanager_v3.services.folders.pagers.ListFoldersPager:
The ListFolders response message.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a folders.ListFoldersRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, folders.ListFoldersRequest):
request = folders.ListFoldersRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_folders]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListFoldersPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def search_folders(
self,
request: Union[folders.SearchFoldersRequest, dict] = None,
*,
query: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.SearchFoldersPager:
r"""Search for folders that match specific filter criteria.
``search()`` provides an eventually consistent view of the
folders a user has access to which meet the specified filter
criteria.
This will only return folders on which the caller has the
permission ``resourcemanager.folders.get``.
Args:
request (Union[google.cloud.resourcemanager_v3.types.SearchFoldersRequest, dict]):
The request object. The request message for searching
folders.
query (str):
Optional. Search criteria used to select the folders to
return. If no search criteria is specified then all
accessible folders will be returned.
Query expressions can be used to restrict results based
upon displayName, state and parent, where the operators
``=`` (``:``) ``NOT``, ``AND`` and ``OR`` can be used
along with the suffix wildcard symbol ``*``.
The ``displayName`` field in a query expression should
use escaped quotes for values that include whitespace to
prevent unexpected behavior.
\| Field \| Description \|
\|-------------------------\|----------------------------------------\|
\| displayName \| Filters by displayName. \| \| parent
\| Filters by parent (for example: folders/123). \| \|
state, lifecycleState \| Filters by state. \|
Some example queries are:
- Query ``displayName=Test*`` returns Folder resources
whose display name starts with "Test".
- Query ``state=ACTIVE`` returns Folder resources with
``state`` set to ``ACTIVE``.
- Query ``parent=folders/123`` returns Folder resources
that have ``folders/123`` as a parent resource.
- Query ``parent=folders/123 AND state=ACTIVE`` returns
active Folder resources that have ``folders/123`` as
a parent resource.
- Query ``displayName=\\"Test String\\"`` returns
Folder resources with display names that include both
"Test" and "String".
This corresponds to the ``query`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.resourcemanager_v3.services.folders.pagers.SearchFoldersPager:
The response message for searching
folders.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([query])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a folders.SearchFoldersRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, folders.SearchFoldersRequest):
request = folders.SearchFoldersRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if query is not None:
request.query = query
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.search_folders]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.SearchFoldersPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def create_folder(
self,
request: Union[folders.CreateFolderRequest, dict] = None,
*,
folder: folders.Folder = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Creates a folder in the resource hierarchy. Returns an
``Operation`` which can be used to track the progress of the
folder creation workflow. Upon success, the
``Operation.response`` field will be populated with the created
Folder.
In order to succeed, the addition of this new folder must not
violate the folder naming, height, or fanout constraints.
- The folder's ``display_name`` must be distinct from all other
folders that share its parent.
- The addition of the folder must not cause the active folder
hierarchy to exceed a height of 10. Note, the full active +
deleted folder hierarchy is allowed to reach a height of 20;
this provides additional headroom when moving folders that
contain deleted folders.
- The addition of the folder must not cause the total number of
folders under its parent to exceed 300.
If the operation fails due to a folder constraint violation,
some errors may be returned by the ``CreateFolder`` request,
with status code ``FAILED_PRECONDITION`` and an error
description. Other folder constraint violations will be
communicated in the ``Operation``, with the specific
``PreconditionFailure`` returned in the details list in the
``Operation.error`` field.
The caller must have ``resourcemanager.folders.create``
permission on the identified parent.
Args:
request (Union[google.cloud.resourcemanager_v3.types.CreateFolderRequest, dict]):
The request object. The CreateFolder request message.
folder (google.cloud.resourcemanager_v3.types.Folder):
Required. The folder being created,
only the display name and parent will be
consulted. All other fields will be
ignored.
This corresponds to the ``folder`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.resourcemanager_v3.types.Folder` A folder in an organization's resource hierarchy, used to
organize that organization's resources.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([folder])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a folders.CreateFolderRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, folders.CreateFolderRequest):
request = folders.CreateFolderRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if folder is not None:
request.folder = folder
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_folder]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
folders.Folder,
metadata_type=folders.CreateFolderMetadata,
)
# Done; return the response.
return response
def update_folder(
self,
request: Union[folders.UpdateFolderRequest, dict] = None,
*,
folder: folders.Folder = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Updates a folder, changing its ``display_name``. Changes to the
folder ``display_name`` will be rejected if they violate either
the ``display_name`` formatting rules or the naming constraints
described in the
[CreateFolder][google.cloud.resourcemanager.v3.Folders.CreateFolder]
documentation.
The folder's ``display_name`` must start and end with a letter
or digit, may contain letters, digits, spaces, hyphens and
underscores and can be between 3 and 30 characters. This is
captured by the regular expression:
``[\p{L}\p{N}][\p{L}\p{N}_- ]{1,28}[\p{L}\p{N}]``. The caller
must have ``resourcemanager.folders.update`` permission on the
identified folder.
If the update fails due to the unique name constraint then a
``PreconditionFailure`` explaining this violation will be
returned in the Status.details field.
Args:
request (Union[google.cloud.resourcemanager_v3.types.UpdateFolderRequest, dict]):
The request object. The request sent to the
[UpdateFolder][google.cloud.resourcemanager.v3.Folder.UpdateFolder]
method.
Only the `display_name` field can be changed. All other
fields will be ignored. Use the
[MoveFolder][google.cloud.resourcemanager.v3.Folders.MoveFolder]
method to change the `parent` field.
folder (google.cloud.resourcemanager_v3.types.Folder):
Required. The new definition of the Folder. It must
include the ``name`` field, which cannot be changed.
This corresponds to the ``folder`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. Fields to be updated. Only the
``display_name`` can be updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.resourcemanager_v3.types.Folder` A folder in an organization's resource hierarchy, used to
organize that organization's resources.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([folder, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a folders.UpdateFolderRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, folders.UpdateFolderRequest):
request = folders.UpdateFolderRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if folder is not None:
request.folder = folder
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_folder]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("folder.name", request.folder.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
folders.Folder,
metadata_type=folders.UpdateFolderMetadata,
)
# Done; return the response.
return response
def move_folder(
self,
request: Union[folders.MoveFolderRequest, dict] = None,
*,
name: str = None,
destination_parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Moves a folder under a new resource parent. Returns an
``Operation`` which can be used to track the progress of the
folder move workflow. Upon success, the ``Operation.response``
field will be populated with the moved folder. Upon failure, a
``FolderOperationError`` categorizing the failure cause will be
returned - if the failure occurs synchronously then the
``FolderOperationError`` will be returned in the
``Status.details`` field. If it occurs asynchronously, then the
FolderOperation will be returned in the ``Operation.error``
field. In addition, the ``Operation.metadata`` field will be
populated with a ``FolderOperation`` message as an aid to
stateless clients. Folder moves will be rejected if they violate
either the naming, height, or fanout constraints described in
the
[CreateFolder][google.cloud.resourcemanager.v3.Folders.CreateFolder]
documentation. The caller must have
``resourcemanager.folders.move`` permission on the folder's
current and proposed new parent.
Args:
request (Union[google.cloud.resourcemanager_v3.types.MoveFolderRequest, dict]):
The request object. The MoveFolder request message.
name (str):
Required. The resource name of the Folder to move. Must
be of the form folders/{folder_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
destination_parent (str):
Required. The resource name of the folder or
organization which should be the folder's new parent.
Must be of the form ``folders/{folder_id}`` or
``organizations/{org_id}``.
This corresponds to the ``destination_parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.resourcemanager_v3.types.Folder` A folder in an organization's resource hierarchy, used to
organize that organization's resources.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name, destination_parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a folders.MoveFolderRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, folders.MoveFolderRequest):
request = folders.MoveFolderRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
if destination_parent is not None:
request.destination_parent = destination_parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.move_folder]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
folders.Folder,
metadata_type=folders.MoveFolderMetadata,
)
# Done; return the response.
return response
def delete_folder(
self,
request: Union[folders.DeleteFolderRequest, dict] = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Requests deletion of a folder. The folder is moved into the
[DELETE_REQUESTED][google.cloud.resourcemanager.v3.Folder.State.DELETE_REQUESTED]
state immediately, and is deleted approximately 30 days later.
This method may only be called on an empty folder, where a
folder is empty if it doesn't contain any folders or projects in
the
[ACTIVE][google.cloud.resourcemanager.v3.Folder.State.ACTIVE]
state. If called on a folder in
[DELETE_REQUESTED][google.cloud.resourcemanager.v3.Folder.State.DELETE_REQUESTED]
state the operation will result in a no-op success. The caller
must have ``resourcemanager.folders.delete`` permission on the
identified folder.
Args:
request (Union[google.cloud.resourcemanager_v3.types.DeleteFolderRequest, dict]):
The request object. The DeleteFolder request message.
name (str):
Required. The resource name of the folder to be deleted.
Must be of the form ``folders/{folder_id}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.resourcemanager_v3.types.Folder` A folder in an organization's resource hierarchy, used to
organize that organization's resources.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a folders.DeleteFolderRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, folders.DeleteFolderRequest):
request = folders.DeleteFolderRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_folder]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
folders.Folder,
metadata_type=folders.DeleteFolderMetadata,
)
# Done; return the response.
return response
def undelete_folder(
self,
request: Union[folders.UndeleteFolderRequest, dict] = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Cancels the deletion request for a folder. This method may be
called on a folder in any state. If the folder is in the
[ACTIVE][google.cloud.resourcemanager.v3.Folder.State.ACTIVE]
state the result will be a no-op success. In order to succeed,
the folder's parent must be in the
[ACTIVE][google.cloud.resourcemanager.v3.Folder.State.ACTIVE]
state. In addition, reintroducing the folder into the tree must
not violate folder naming, height, and fanout constraints
described in the
[CreateFolder][google.cloud.resourcemanager.v3.Folders.CreateFolder]
documentation. The caller must have
``resourcemanager.folders.undelete`` permission on the
identified folder.
Args:
request (Union[google.cloud.resourcemanager_v3.types.UndeleteFolderRequest, dict]):
The request object. The UndeleteFolder request message.
name (str):
Required. The resource name of the folder to undelete.
Must be of the form ``folders/{folder_id}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.resourcemanager_v3.types.Folder` A folder in an organization's resource hierarchy, used to
organize that organization's resources.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a folders.UndeleteFolderRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, folders.UndeleteFolderRequest):
request = folders.UndeleteFolderRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.undelete_folder]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
folders.Folder,
metadata_type=folders.UndeleteFolderMetadata,
)
# Done; return the response.
return response
def get_iam_policy(
self,
request: Union[iam_policy_pb2.GetIamPolicyRequest, dict] = None,
*,
resource: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Gets the access control policy for a folder. The returned policy
may be empty if no such policy or resource exists. The
``resource`` field should be the folder's resource name, for
example: "folders/1234". The caller must have
``resourcemanager.folders.getIamPolicy`` permission on the
identified folder.
Args:
request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]):
The request object. Request message for `GetIamPolicy`
method.
resource (str):
REQUIRED: The resource for which the
policy is being requested. See the
operation documentation for the
appropriate value for this field.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.policy_pb2.Policy:
Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform
resources.
A Policy is a collection of bindings. A binding binds
one or more members to a single role. Members can be
user accounts, service accounts, Google groups, and
domains (such as G Suite). A role is a named list of
permissions (defined by IAM or configured by users).
A binding can optionally specify a condition, which
is a logic expression that further constrains the
role binding based on attributes about the request
and/or target resource.
**JSON Example**
{
"bindings": [
{
"role":
"roles/resourcemanager.organizationAdmin",
"members": [ "user:<EMAIL>",
"group:<EMAIL>",
"domain:google.com",
"serviceAccount:<EMAIL>"
]
}, { "role":
"roles/resourcemanager.organizationViewer",
"members": ["user:<EMAIL>"],
"condition": { "title": "expirable access",
"description": "Does not grant access after
Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } }
]
}
**YAML Example**
bindings: - members: - user:\ <EMAIL> -
group:\ <EMAIL> - domain:google.com -
serviceAccount:\ <EMAIL>@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin -
members: - user:\ <EMAIL> role:
roles/resourcemanager.organizationViewer
condition: title: expirable access description:
Does not grant access after Sep 2020 expression:
request.time <
timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the
[IAM developer's
guide](\ https://cloud.google.com/iam/docs).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
if isinstance(request, dict):
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
request = iam_policy_pb2.GetIamPolicyRequest(**request)
elif not request:
# Null request, just make one.
request = iam_policy_pb2.GetIamPolicyRequest()
if resource is not None:
request.resource = resource
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_iam_policy]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def set_iam_policy(
self,
request: Union[iam_policy_pb2.SetIamPolicyRequest, dict] = None,
*,
resource: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Sets the access control policy on a folder, replacing any
existing policy. The ``resource`` field should be the folder's
resource name, for example: "folders/1234". The caller must have
``resourcemanager.folders.setIamPolicy`` permission on the
identified folder.
Args:
request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]):
The request object. Request message for `SetIamPolicy`
method.
resource (str):
REQUIRED: The resource for which the
policy is being specified. See the
operation documentation for the
appropriate value for this field.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.policy_pb2.Policy:
Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform
resources.
A Policy is a collection of bindings. A binding binds
one or more members to a single role. Members can be
user accounts, service accounts, Google groups, and
domains (such as G Suite). A role is a named list of
permissions (defined by IAM or configured by users).
A binding can optionally specify a condition, which
is a logic expression that further constrains the
role binding based on attributes about the request
and/or target resource.
**JSON Example**
{
"bindings": [
{
"role":
"roles/resourcemanager.organizationAdmin",
"members": [ "user:<EMAIL>",
"group:<EMAIL>",
"domain:google.com",
"serviceAccount:<EMAIL>"
]
}, { "role":
"roles/resourcemanager.organizationViewer",
"members": ["user:<EMAIL>"],
"condition": { "title": "expirable access",
"description": "Does not grant access after
Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } }
]
}
**YAML Example**
bindings: - members: - user:\ <EMAIL> -
group:\ <EMAIL> - domain:google.com -
serviceAccount:\ <EMAIL>
role: roles/resourcemanager.organizationAdmin -
members: - user:\ <EMAIL> role:
roles/resourcemanager.organizationViewer
condition: title: expirable access description:
Does not grant access after Sep 2020 expression:
request.time <
timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the
[IAM developer's
guide](\ https://cloud.google.com/iam/docs).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
if isinstance(request, dict):
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
request = iam_policy_pb2.SetIamPolicyRequest(**request)
elif not request:
# Null request, just make one.
request = iam_policy_pb2.SetIamPolicyRequest()
if resource is not None:
request.resource = resource
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_iam_policy]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def test_iam_permissions(
self,
request: Union[iam_policy_pb2.TestIamPermissionsRequest, dict] = None,
*,
resource: str = None,
permissions: Sequence[str] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
r"""Returns permissions that a caller has on the specified folder.
The ``resource`` field should be the folder's resource name, for
example: "folders/1234".
There are no permissions required for making this API call.
Args:
request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]):
The request object. Request message for
`TestIamPermissions` method.
resource (str):
REQUIRED: The resource for which the
policy detail is being requested. See
the operation documentation for the
appropriate value for this field.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
permissions (Sequence[str]):
The set of permissions to check for the ``resource``.
Permissions with wildcards (such as '*' or 'storage.*')
are not allowed. For more information see `IAM
Overview <https://cloud.google.com/iam/docs/overview#permissions>`__.
This corresponds to the ``permissions`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse:
Response message for TestIamPermissions method.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource, permissions])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
if isinstance(request, dict):
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
request = iam_policy_pb2.TestIamPermissionsRequest(**request)
elif not request:
# Null request, just make one.
request = iam_policy_pb2.TestIamPermissionsRequest()
if resource is not None:
request.resource = resource
if permissions:
request.permissions.extend(permissions)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-resourcemanager",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("FoldersClient",)
|
// Copyright (c) 2018 Northwestern Mutual.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package traversal
import (
"go/token"
"testing"
"github.com/brugnara/grammes/query/predicate"
. "github.com/smartystreets/goconvey/convey"
)
func TestHas(t *testing.T) {
Convey("Given a ) String { that represents the graph's traversal", t, func() {
g := NewTraversal()
Convey("When 'Has' is called with object strings", func() {
result := g.Has("obj1", "obj2", "obj3")
Convey("Then result should equal 'g.has('obj1','obj2','obj3')'", func() {
So(result.String(), ShouldEqual, "g.has(\"obj1\",\"obj2\",\"obj3\")")
})
})
Convey("When 'Has' is called with a traversal", func() {
result := g.Has("testHas", NewTraversal().Label().Raw())
Convey("Then result should equal 'g.has('testHas',label())'", func() {
So(result.String(), ShouldEqual, "g.has(\"testHas\",label())")
})
})
Convey("When 'Has' is called with one token parameter", func() {
var t token.Token
result := g.Has(t)
Convey("Then result should equal 'g.has(ILLEGAL)'", func() {
So(result.String(), ShouldEqual, "g.has(ILLEGAL)")
})
})
Convey("When 'Has' is called with one int", func() {
result := g.Has(1234)
Convey("Then result should equal 'g.has(1234)'", func() {
So(result.String(), ShouldEqual, "g.has(1234)")
})
})
Convey("When 'Has' is called with too many params", func() {
result := g.Has("first", "second", "third", "fourth")
Convey("Then result should equal 'g.has('first','second','third','fourth')'", func() {
So(result.String(), ShouldEqual, "g.has(\"first\",\"second\",\"third\",\"fourth\")")
})
})
Convey("When 'Has' is called with many different param types", func() {
p := new(predicate.Predicate)
*p = "predicate"
result := g.Has("first", p, 1234)
Convey("Then result should equal 'g.has('first',predicate,1234')'", func() {
So(result.String(), ShouldEqual, "g.has(\"first\",predicate,1234)")
})
})
})
}
func TestHasID(t *testing.T) {
Convey("Given a ) String { that represents the graph's traversal", t, func() {
g := NewTraversal()
Convey("When 'HasID' is called with one parameter", func() {
result := g.HasID("tstObjOrP")
Convey("Then result should equal 'g.hasId('tstObjOrP')'", func() {
So(result.String(), ShouldEqual, "g.hasId(\"tstObjOrP\")")
})
})
Convey("When 'HasID' is called with a multiple params", func() {
result := g.HasID("tstObjOrP", "tstObj1", "tstObj2")
Convey("Then result should equal 'g.hasId('tstObjOrP','tstObj1','tstObj2')'", func() {
So(result.String(), ShouldEqual, "g.hasId(\"tstObjOrP\",\"tstObj1\",\"tstObj2\")")
})
})
})
}
func TestHasKey(t *testing.T) {
Convey("Given a ) String { that represents the graph's traversal", t, func() {
g := NewTraversal()
Convey("When 'HasKey' is called with one parameter", func() {
result := g.HasKey("tstpOrStr")
Convey("Then result should equal 'g.hasKey('tstpOrStr')'", func() {
So(result.String(), ShouldEqual, "g.hasKey(\"tstpOrStr\")")
})
})
Convey("When 'HasKey' is called with one int parameter", func() {
result := g.HasKey(1234)
Convey("Then result should equal 'g.hasKey(1234)'", func() {
So(result.String(), ShouldEqual, "g.hasKey(1234)")
})
})
Convey("When 'HasKey' is called with a multiple params", func() {
result := g.HasKey("tstpOrStr", "tstHandled1", "tstHandled2")
Convey("Then result should equal 'g.hasKey('tstpOrStr','tstHandled1','tstHandled2')'", func() {
So(result.String(), ShouldEqual, "g.hasKey(\"tstpOrStr\",\"tstHandled1\",\"tstHandled2\")")
})
})
})
}
func TestHasLabel(t *testing.T) {
Convey("Given a ) String { that represents the graph's traversal", t, func() {
g := NewTraversal()
Convey("When 'HasLabel' is called with one parameter", func() {
result := g.HasLabel("tstpOrStr")
Convey("Then result should equal 'g.hasLabel('tstpOrStr')'", func() {
So(result.String(), ShouldEqual, "g.hasLabel(\"tstpOrStr\")")
})
})
Convey("When 'HasLabel' is called with a multiple params", func() {
result := g.HasLabel("tstpOrStr", "tstHandled1", "tstHandled2")
Convey("Then result should equal 'g.hasLabel('tstpOrStr','tstHandled1','tstHandled2')'", func() {
So(result.String(), ShouldEqual, "g.hasLabel(\"tstpOrStr\",\"tstHandled1\",\"tstHandled2\")")
})
})
Convey("When 'HasLabel' is called with a predicate", func() {
result := g.HasLabel(predicate.LessThan(12))
Convey("Then result should equal 'g.hasLabel(lt(12))'", func() {
So(result.String(), ShouldEqual, "g.hasLabel(lt(12))")
})
})
})
}
func TestHasNot(t *testing.T) {
Convey("Given a ) String { that represents the graph's traversal", t, func() {
g := NewTraversal()
Convey("When 'HasNot' is called", func() {
result := g.HasNot("testStr")
Convey("Then result should equal 'g.hasNot('testStr')'", func() {
So(result.String(), ShouldEqual, "g.hasNot(\"testStr\")")
})
})
})
}
func TestHasValue(t *testing.T) {
Convey("Given a ) String { that represents the graph's traversal", t, func() {
g := NewTraversal()
Convey("When 'HasValue' is called with one parameter", func() {
result := g.HasValue("tstObjOrP")
Convey("Then result should equal 'g.hasValue('tstObjOrP')'", func() {
So(result.String(), ShouldEqual, "g.hasValue(\"tstObjOrP\")")
})
})
Convey("When 'HasValue' is called with one int parameter", func() {
result := g.HasValue(1234)
Convey("Then result should equal 'g.hasValue(1234))'", func() {
So(result.String(), ShouldEqual, "g.hasValue(1234))")
})
})
Convey("When 'HasValue' is called with a multiple params", func() {
result := g.HasValue("tstObjOrP", "tstObj1", "tstObj2")
Convey("Then result should equal 'g.hasValue('tstObjOrP','tstObj1','tstObj2')'", func() {
So(result.String(), ShouldEqual, "g.hasValue(\"tstObjOrP\",\"tstObj1\",\"tstObj2\")")
})
})
})
}
|
<gh_stars>1-10
import { types } from 'mobx-state-tree';
import Cookies from 'universal-cookie';
import { addLocaleData } from 'react-intl';
import en from 'react-intl/locale-data/en';
// Translated data
import localeData from '../../../i18n/build/data.json';
const DEFAULT_LANGUAGE = 'en';
const COOKIE_NAME = 'language';
const LocaleStore = types
.model('LocaleStore', {
language: types.optional(types.string, DEFAULT_LANGUAGE),
})
.volatile(() => ({
cookies: new Cookies(),
}))
.views(self => ({
get languageWithoutRegionCode() {
return self.language.toLowerCase().split(/[_-]+/)[0];
},
get messages() {
// Try full locale, try locale without region code, fallback to default language
return (
localeData[self.languageWithoutRegionCode]
|| localeData[self.language]
|| localeData.DEFAULT_LANGUAGE
);
},
}))
.actions((self) => {
const changeLanguage = (language = '', windowReload = false) => {
if (self.language === language) return;
self.language = language;
// Save to cookie
self.cookies.set(COOKIE_NAME, language, {
path: '/',
maxAge: 24 * 3600 * 30,
});
if (windowReload) {
window.location.reload();
}
};
const afterCreate = () => {
addLocaleData([...en]);
// Language from cookie
const cookieLanguage = self.cookies.get(COOKIE_NAME);
// Language from user's browser settings
const browserLanguage = (navigator.languages && navigator.languages[0].split('-')[0])
|| navigator.language
|| navigator.userLanguage;
self.language = cookieLanguage || browserLanguage;
};
return {
afterCreate,
changeLanguage,
};
});
export default LocaleStore;
|
<filename>zeus-spring/src/main/java/com/iterlife/zeus/spring/servlet/XServletContext.java
/**
*
*/
package com.iterlife.zeus.spring.servlet;
import java.io.InputStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Enumeration;
import java.util.Set;
/**
* A context set defined by Web server container,it was initialized when Web
* server starts to provides a servlet.
*
* @author <NAME>
*
*/
public interface XServletContext {
public String getContextPath();
public XServletContext getContext(String uripath);
public int getMajorVersion();
public int getMinorVersion();
public String getMimeType(String file);
public Set<String> getResourcePaths(String path);
public URL getResource(String path) throws MalformedURLException;
public InputStream getResourceAsStream(String path);
public XRequestDispatcher getRequestDispatcher(String path);
public XRequestDispatcher getNamedDispatcher(String name);
public void log(String msg);
public void log(String message, Throwable throwable);
public String getRealPath(String path);
public String getServerInfo();
public String getInitParameter(String name);
public Enumeration<?> getInitParameterNames();
public Object getAttribute(String name);
public Enumeration<?> getAttributeNames();
public void setAttribute(String name, Object object);
public void removeAttribute(String name);
public String getServletContextName();
}
|
#! /bin/bash
for region in $(aws ec2 describe-regions | jq -r '.Regions[].RegionName')
do
instancesInfo=$(aws ec2 describe-instances --region $region | jq -r '.Reservations')
numReservations=$(echo $instancesInfo | jq '. | length')
for r in $(seq 0 $(( numReservations - 1 )))
do
numInstances=$(echo $instancesInfo | jq ".[$r].Instances | length")
for i in $(seq 0 $(( numInstances - 1 )))
do
publicIp=$(echo $instancesInfo | jq -r ".[$r].Instances[$i].PublicIpAddress")
if [ $publicIp != "null" ]; then
instanceId=$(echo $instancesInfo | jq -r ".[$r].Instances[$i].InstanceId")
key=$(aws ec2 get-console-output --instance-id $instanceId --region $region | grep -oP "(?<=BEGIN SSH HOST KEY KEYS-----\\\r\\\n)[^ ]* [^ ]* ")
echo "$publicIp $key"
fi
done
done
done
|
def print_table(a, b, c):
print("Team | Score")
print("------------")
print("Team A | %d" % a)
print("Team B | %d" % b)
print("Team C | %d" % c)
print_table(4, 2, 3)
|
<gh_stars>1-10
import torch
# import torchvision
import collections
import os
from PIL import Image
import cv2
from torch.utils import data
# HOME = os.environ['HOME']
def parse_model(model_path):
model_path = "./models/swin_large_patch4_window12_384_22kto1k.pth"
assert(os.path.exists(model_path))
x = torch.load(model_path)
val = collections.OrderedDict()
for key in x['model'].keys():
# print(key, replace[key])
print(key)
val = x['model'][key]
print(val.shape)
def parse_dataset(data_path, ann_dir):
for img_path in os.listdir(data_path):
im_path = os.path.join(data_path, img_path)
ann_path = os.path.join(ann_dir, img_path)
im = None
# if data_type == 'anno':
ann_im = Image.open(ann_path)
# print(ann_im.size)
try:
assert(ann_im.mode == 'L')
except Exception as ex:
print('converting' , ann_path)
ann_im = ann_im.convert('L')
ann_im.save(ann_path)
# else:
im = cv2.imread(im_path)
im_shape = (im.shape[1], im.shape[0])
# print(im.shape)
assert(im.shape[2] ==3)
assert(im_shape == ann_im.size)
if __name__ == '__main__':
data_path = './data/ade/ADEChallengeData2016/images/training'
ann_path = './data/ade/ADEChallengeData2016/annotations/training'
parse_dataset(data_path, ann_path)
|
<filename>src/app.service.ts<gh_stars>0
import { Injectable } from '@nestjs/common';
@Injectable()
export class AppService {
getHello(): string {
return 'Hello World!';
}
getMessage(data: string): string {
if (data == 'hello') {
return 'Hello from microservice'
} else if (data == 'bye'){
return 'Bye from microservice'
} else {
return 'Microservice does not understands'
}
}
}
|
#!/bin/bash
while read line; do
echo "$line"
if [[ -n $prev ]]; then
if [[ $prev -lt $line ]]; then
counter=$((counter+1))
fi
fi
prev=$line
done < "${1:-/dev/stdin}"
echo "counter: $counter"
|
go run ./tools/genddragon.go > /tmp/auto_ddragon.go
echo ""
echo "Output to: /tmp/auto_ddragon.go"
echo "Overwrite command: cp /tmp/auto_ddragon.go riot/auto_ddragon.go"
echo ""
|
alter table DDCSD_SCRIPT add column CATEGORY_ID varchar(36) ;
|
<gh_stars>1-10
from django.apps import AppConfig
class TagsConfig(AppConfig):
name = "hexa.tags"
label = "tags"
|
<?php
namespace jupingx\rtb\Normal;
/**
* OsType enum
*/
final class OsType
{
const UNKNOWN = 0;
const WINDOWS = 1;
const MAC = 2;
const LINUX = 3;
const ANDROID = 4;
const IOS = 5;
/**
* Get the name of the operating system based on the given type
*
* @param int $type The type of the operating system
* @return string The name of the operating system
*/
public static function getOsName($type)
{
switch ($type) {
case self::UNKNOWN:
return 'Unknown';
case self::WINDOWS:
return 'Windows';
case self::MAC:
return 'Mac';
case self::LINUX:
return 'Linux';
case self::ANDROID:
return 'Android';
case self::IOS:
return 'iOS';
default:
return 'Invalid OS type';
}
}
}
|
function binarySearch(arr, target) {
let left = 0;
let right = arr.length - 1;
while (left <= right) {
let middle = Math.floor((left + right) / 2);
if (arr[middle] === target) {
return middle;
}
if (arr[middle] > target) {
right = middle - 1;
} else if (arr[middle] < target) {
left = middle + 1;
}
}
return -1;
}
|
<reponame>MrPepperoni/Reaping2-1
#ifndef INCLUDED_CORE_PERF_TIMER_H
#define INCLUDED_CORE_PERF_TIMER_H
#include <boost/timer.hpp>
#include "platform/i_platform.h"
namespace perf {
class Timer_t
{
boost::timer mMeasurer;
double mPrevMeasurement;
public:
void Log( std::string const& Str = std::string() );
Timer_t();
};
} // namespace perf
#endif//INCLUDED_CORE_PERF_TIMER_H
|
package com.jsgygujun.code.flink.chapter05.util
/**
* 报警数据实体
* @author <EMAIL>
* @since 2020/8/20 2:30 下午
*/
case class Alert(message: String, timestamp: Long)
|
#!/bin/bash
# download from ftp://cirrus.ucsd.edu/pub/ncview/ncview-2.1.7.tar.gz
# yum install libX11 libX11-devel libXaw libXaw-devel libpng-devel libpng
set -x
set -e
. ${DOWNLOAD_TOOL} -u ftp://cirrus.ucsd.edu/pub/ncview/ncview-2.1.7.tar.gz
cd ${JARVIS_TMP}
tar -xvf ${JARVIS_DOWNLOAD}/ncview-2.1.7.tar.gz
cd ncview-2.1.7
NETCDF_DIR=${1%/*/*}/netcdf/4.7.0
UDUNITS_DIR=${1%/*/*}/udunits/2.2.28
./configure --prefix=$1 --with-nc-config=${NETCDF_DIR}/bin/nc-config -with-udunits2_incdir=${UDUNITS_DIR}/include -with-udunits2_libdir=${UDUNITS_DIR}/lib
make -j
make install
|
<gh_stars>0
insert into display_selection values (nextval('somsequence'),$1,$2,$3,
current_date,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14,$15,$16,$17,$18,$19,$20,$21,$22,$23,$24,$25,$26,$27,$28,
$29,$30,$31,$32,$33,$34,$35,$36,$37,$38,$39,$40,$41,$42,$43,$44,$45,$46,$47,$48,$49,$50,$51,
$52,$53,$54,$55,$56,$57,$58,$59,$60,$61,$62,$63,$64,$65,$66,$67,$68,$69,$70,$71,$72,$73,$74,
$75,$76,$77,$78,$79,$80,$81,$82,$83,$84,$85,$86,$87,$88,$89,$90,$91,$92,$93,$94,$95,$96,$97,
$98,$99,$100,$101,$102,$103,$104,$105,$106,$107,$108,$109,$110,$111,$112,$113,$114,$115,$116,
$117,$118,$119,$120,$121,$122,$123,$124,$125,$126,$127,$128,$129,$130,$131);
|
#!/bin/bash
# LinuxGSM command_debug.sh module
# Author: Daniel Gibbs
# Contributors: http://linuxgsm.com/contrib
# Website: https://linuxgsm.com
# Description: Runs the server without tmux and directly from the terminal.
commandname="DEBUG"
commandaction="Debuging"
functionselfname="$(basename "$(readlink -f "${BASH_SOURCE[0]}")")"
fn_firstcommand_set
# Trap to remove lockfile on quit.
fn_lockfile_trap(){
# Remove lockfile.
rm -f "${lockdir:?}/${selfname}.lock"
# resets terminal. Servers can sometimes mess up the terminal on exit.
reset
fn_print_dots "Stopping debug"
fn_print_ok_nl "Stopping debug"
fn_script_log_pass "Stopping debug"
# remove trap.
trap - INT
core_exit.sh
}
check.sh
fix.sh
info_distro.sh
info_config.sh
# NOTE: Check if works with server without parms. Could be intergrated in to info_parms.sh.
fn_print_header
{
echo -e "${lightblue}Distro:\t\t${default}${distroname}"
echo -e "${lightblue}Arch:\t\t${default}${arch}"
echo -e "${lightblue}Kernel:\t\t${default}${kernel}"
echo -e "${lightblue}Hostname:\t\t${default}${HOSTNAME}"
echo -e "${lightblue}tmux:\t\t${default}${tmuxv}"
echo -e "${lightblue}Avg Load:\t\t${default}${load}"
echo -e "${lightblue}Free Memory:\t\t${default}${physmemfree}"
echo -e "${lightblue}Free Disk:\t\t${default}${availspace}"
} | column -s $'\t' -t
# glibc required.
if [ "${glibc}" ]; then
if [ "${glibc}" == "null" ]; then
# Glibc is not required.
:
elif [ -z "${glibc}" ]; then
echo -e "${lightblue}glibc required:\t${red}UNKNOWN${default}"
elif [ "$(printf '%s\n'${glibc}'\n' ${glibcversion} | sort -V | head -n 1)" != "${glibc}" ]; then
echo -e "${lightblue}glibc required:\t${red}${glibc} ${default}(${red}distro glibc ${glibcversion} too old${default})"
else
echo -e "${lightblue}glibc required:\t${green}${glibc}${default}"
fi
fi
# Server IP
echo -e "${lightblue}Game Server IP:\t${default}${ip}:${port}"
# External server IP.
if [ "${extip}" ]; then
if [ "${ip}" != "${extip}" ]; then
echo -e "${lightblue}Internet IP:\t${default}${extip}:${port}"
fi
fi
# Server password.
if [ "${serverpassword}" ]; then
echo -e "${lightblue}Server password:\t${default}${serverpassword}"
fi
echo -e "${lightblue}Start parameters:${default}"
if [ "${engine}" == "source" ]||[ "${engine}" == "goldsrc" ]; then
echo -e "${executable} ${startparameters} -debug"
elif [ "${engine}" == "quake" ]; then
echo -e "${executable} ${startparameters} -condebug"
else
echo -e "${preexecutable} ${executable} ${startparameters}"
fi
echo -e ""
echo -e "Use for identifying server issues only!"
echo -e "Press CTRL+c to drop out of debug mode."
fn_print_warning_nl "If ${selfname} is already running it will be stopped."
echo -e ""
if ! fn_prompt_yn "Continue?" Y; then
exitcode=0
core_exit.sh
fi
fn_print_info_nl "Stopping any running servers"
fn_script_log_info "Stopping any running servers"
exitbypass=1
command_stop.sh
fn_firstcommand_reset
unset exitbypass
fn_print_dots "Starting debug"
fn_script_log_info "Starting debug"
fn_print_ok_nl "Starting debug"
# Create lockfile.
date '+%s' > "${lockdir}/${selfname}.lock"
echo "${version}" >> "${lockdir}/${selfname}.lock"
echo "${port}" >> "${lockdir}/${selfname}.lock"
fn_script_log_info "Lockfile generated"
fn_script_log_info "${lockdir}/${selfname}.lock"
cd "${executabledir}" || exit
# Note: do not add double quotes to ${executable} ${startparameters}.
if [ "${engine}" == "source" ]||[ "${engine}" == "goldsrc" ]; then
${executable} ${startparameters} -debug
elif [ "${shortname}" == "arma3" ]; then
# Arma3 requires semicolons in the module list, which need to
# be escaped for regular (tmux) loading, but need to be
# stripped when loading straight from the console.
${executable} ${parms//\\;/;}
elif [ "${engine}" == "quake" ]; then
${executable} ${startparameters} -condebug
else
# shellcheck disable=SC2086
${preexecutable} ${executable} ${startparameters}
fi
fn_lockfile_trap
fn_print_dots "Stopping debug"
fn_print_ok_nl "Stopping debug"
fn_script_log_info "Stopping debug"
core_exit.sh
|
<reponame>MontealegreLuis/activity-feed
package com.montealegreluis.activityfeed;
import static com.montealegreluis.activityfeed.ExceptionContextFactory.contextFrom;
import com.montealegreluis.assertions.Assert;
import java.util.LinkedHashMap;
import java.util.Map;
import org.slf4j.event.Level;
public final class ActivityBuilder {
private final Map<String, Object> context = new LinkedHashMap<>();
private Level level;
private String identifier;
private String message;
public static ActivityBuilder anActivity() {
return new ActivityBuilder();
}
public ActivityBuilder trace() {
level = Level.TRACE;
return this;
}
public ActivityBuilder debug() {
level = Level.DEBUG;
return this;
}
public ActivityBuilder info() {
level = Level.INFO;
return this;
}
public ActivityBuilder warning() {
level = Level.WARN;
return this;
}
public ActivityBuilder error() {
level = Level.ERROR;
return this;
}
public ActivityBuilder withIdentifier(String identifier) {
this.identifier = identifier;
return this;
}
public ActivityBuilder withMessage(String message) {
this.message = message;
return this;
}
public ActivityBuilder withException(Throwable exception) {
Assert.notNull(exception, "Exception cannot be null");
context.put("exception", contextFrom(exception));
return this;
}
public ActivityBuilder with(String key, Object value) {
context.put(key, value);
return this;
}
public Activity build() {
return Activity.withLevel(
level, identifier, message, (context) -> context.putAll(this.context));
}
}
|
CREATE TABLE members (
id int(11) AUTO_INCREMENT PRIMARY KEY,
name varchar(255) NOT NULL,
email varchar(255) NOT NULL,
address varchar(255) NOT NULL
);
CREATE TABLE books (
id int(11) AUTO_INCREMENT PRIMARY KEY,
title varchar(255) NOT NULL,
author varchar(255) NOT NULL,
genre varchar(255) NOT NULL
);
CREATE TABLE librarians (
id int(11) AUTO_INCREMENT PRIMARY KEY,
name varchar(255) NOT NULL,
email varchar(255) NOT NULL,
address varchar(255) NOT NULL
);
CREATE TABLE checkout (
id int(11) AUTO_INCREMENT PRIMARY KEY,
book int(11) NOT NULL,
member int(11) NOT NULL,
librarian int(11) NOT NULL,
date_checked_out date NOT NULL,
date_due date NOT NULL,
date_returned date
);
|
<reponame>lostmsu/RoboZZle-Droid
package com.team242.robozzle;
import android.app.Activity;
import android.os.Bundle;
import android.text.Html;
import android.text.Spanned;
import android.text.method.LinkMovementMethod;
import android.widget.TextView;
/**
* Created by lost on 10/26/2015.
*/
public class AboutActivity extends Activity {
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.about);
TextView aboutText = (TextView)findViewById(R.id.aboutText);
aboutText.setMovementMethod(LinkMovementMethod.getInstance());
}
}
|
#!/bin/bash
# Copyright hechain. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
make docker
docker login --username "${DOCKER_USERNAME}" --password "${DOCKER_PASSWORD}"
for image in baseos peer orderer ccenv tools; do
for release in ${RELEASE} ${TWO_DIGIT_RELEASE}; do
docker tag "hyperledger/fabric-${image}" "hyperledger/fabric-${image}:amd64-${release}"
docker tag "hyperledger/fabric-${image}" "hyperledger/fabric-${image}:${release}"
docker push "hyperledger/fabric-${image}:amd64-${release}"
docker push "hyperledger/fabric-${image}:${release}"
done
done
|
const path = require('path');
const fs = require('fs');
// Extract languages from files available in /templates/src/translations/
module.exports = function() {
const translationsDir = path.join(__dirname, 'templates/src/translations');
const files = fs.readdirSync(translationsDir).filter(file => path.extname(file) === '.json');
return files.map(file => path.basename(file).slice(2, 7));
};
|
import config from 'config'
import path from 'path'
const dir = (arr, isAbsolute = false) => {
const p = isAbsolute ? path.resolve : path.join
let res = ''
arr.forEach((d) => {
res = p(res, d)
})
return res
}
/**
* 配列をディレクトリパスに変換
* @param {string[]} arr - ディレクトリの配列
* @return {string}
*/
export const relativePath = (arr) => dir(arr)
/**
* 配列をプロジェクトの絶対パスに変換
* @param {string[]} arr - ディレクトリの配列
* @return {string}
*/
export const absolutePath = (arr) => dir(arr, true)
/**
* 配列をルート相対パスに変換
* @param {string[]} arr - ディレクトリの配列
* @return {*}
*/
export const rootRelativePath = (arr) => {
if (arr.length === 0) return config.get('output.root')
return `${config.get('output.root') + relativePath(arr)}/`
}
|
<filename>src/routes/api/index.js
const router = require('express').Router()
const usersRouter = require('./users.router')
router.use('/users', usersRouter)
module.exports = router
|
<filename>docs/cvs/classdroid_1_1_runtime_1_1_prototyping_1_1_sensors_1_1_rays_1_1_experimental_1_1_ray.js
var classdroid_1_1_runtime_1_1_prototyping_1_1_sensors_1_1_rays_1_1_experimental_1_1_ray =
[
[ "Perceive", "classdroid_1_1_runtime_1_1_prototyping_1_1_sensors_1_1_rays_1_1_experimental_1_1_ray.html#adaabd0e53b00bae8aa0ff726081bf303", null ]
];
|
#include "Config.h"
#include "BNetOldAuthLogon2.h"
namespace Packets
{
namespace BNet
{
bool BNetOldAuthLogon2::Pack()
{
_packet << result;
if(!reason.empty())
_packet << reason.c_str();
return true;
}
}
}
|
script_dir=$(dirname "$(readlink -f "$0")")
export KB_DEPLOYMENT_CONFIG=$script_dir/../deploy.cfg
WD=/kb/module/work
if [ -f $WD/token ]; then
cat $WD/token | xargs sh $script_dir/../bin/run_VariationImporter_async_job.sh $WD/input.json $WD/output.json
else
echo "File $WD/token doesn't exist, aborting."
exit 1
fi
|
SLIDE_WINDOWS = 3
THRESHOLD = 10
f = open("input.txt", "r")
window = []
for i in range(SLIDE_WINDOWS):
window.append(int(f.readline()))
count = 0
for cur in f:
window_sum = sum(window)
if window_sum >= THRESHOLD:
count += 1
window.pop(0)
window.append(int(cur))
print(count)
|
import React from 'react';
import { storiesOf } from '@storybook/react';
import { deepMerge } from 'grommet/utils';
import { grommet, Box, FormField, TextArea, Grommet } from 'grommet';
var customFormFieldTheme = {
global: {
font: {
size: '13px'
},
input: {
weight: 400
}
},
formField: {
label: {
color: 'dark-3',
size: 'xsmall',
margin: {
vertical: '0',
bottom: 'small',
horizontal: '0'
},
weight: 600
},
border: false,
margin: 0
}
};
var CustomFormField = function CustomFormField() {
return React.createElement(Grommet, {
theme: deepMerge(grommet, customFormFieldTheme)
}, React.createElement(Box, {
align: "center",
pad: "large"
}, React.createElement(FormField, {
label: "Label",
htmlFor: "text-area"
}, React.createElement(TextArea, {
id: "text-area",
placeholder: "placeholder"
}))));
};
storiesOf('FormField', module).add('Custom Theme', function () {
return React.createElement(CustomFormField, null);
});
|
#!/bin/sh
set -e
set -u
set -o pipefail
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
# Copy the dSYM into a the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .framework.dSYM "$source")"
binary="${DERIVED_FILES_DIR}/${basename}.framework.dSYM/Contents/Resources/DWARF/${basename}"
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"Mach-O dSYM companion"* ]]; then
strip_invalid_archs "$binary"
fi
if [[ $STRIP_BINARY_RETVAL == 1 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.framework.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.framework.dSYM"
fi
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
STRIP_BINARY_RETVAL=0
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=1
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/NKQuartzClockTimePicker/NKQuartzClockTimePicker.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/NKQuartzClockTimePicker/NKQuartzClockTimePicker.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
<filename>toArray.js
var identity = require('./identity')
var each = require('./each')
module.exports = function toArray(obj, fn) {
if (!fn) { fn = identity }
var result = []
each(obj, function(val, key) {
result.push(fn(val, key))
})
return result
}
|
import * as ss from "steelseries"
/**
* Rose gauge construction parameters
*
* @public
*/
export interface RoseParams {
/**
* Gauge size (px) - default: smaller dimension between `canvas.width` and `canvas.height`
*/
size?: number;
/** * Gauge upper title string - default: `""` */
titleString?: string;
/**
* Gauge lower string title - default: `""`
* @remarks
* if {@link RoseParams.useOdometer|useOdometer} = `true`, it works as odometer title)
*/
unitString?: string;
/**
* Cardinal points labels - default: `["N", "E", "S", "W"]`
* @remarks
* Must be 4 or 8 elements long
*/
pointSymbols?: string[];
/**
* Gauge frame design - default: `steelseries.FrameDesign.METAL`
* @remarks
* ignored if {@link RoseParams.frameVisible|frameVisible} = `false`
*/
frameDesign?: ss.FrameDesign;
/**
* Draw gauge frame? - default: `true`
*/
frameVisible?: boolean;
/**
* Gauge background color - default: `steelseries.BackgroundColor.DARK_GRAY`
* @remarks
* ignored if {@link RoseParams.backgroundVisible|backgroundVisible} = `false`
*/
backgroundColor?: ss.BackgroundColor;
/**
* Draw gauge background? - default: `true`
*/
backgroundVisible?: boolean;
/**
* Gauge foreground type - default: `steelseries.ForegroundType.TYPE1`
* @remarks ignored if {@link RoseParams.foregroundVisible|foregroundVisible} = `false`
*/
foregroundType?: ss.ForegroundType;
/**
* Draw gauge foreground? - default: `true`
*/
foregroundVisible?: boolean;
/**
* Draw odometer? - default: `false`
*/
useOdometer?: boolean;
/**
* Odometer construction params
* @remarks ignored if {@link RoseParams.useOdometer|useOdometer} = `false`
*/
odometerParams?: ss.OdometerParams;
}
|
<reponame>cmosier270/suitesparse-java<filename>cholmod/src/main/java/ssjava/cholmod/SupernodalStrategy.java
package ssjava.cholmod;
/**
* Supernodal strategy
*/
public enum SupernodalStrategy
{
CHOLMOD_SIMPLICIAL,
CHOLMOD_AUTO,
CHOLMOD_SUPERNODAL
}
|
'use strict';
Object.defineProperty(exports, '__esModule', {
value: true
});
exports.browser = browser;
var _system_js_hot_reloadJs = require('./system_js_hot_reload.js');
function browser(window, bs) {
(0, _system_js_hot_reloadJs.HotReload)();
}
'use strict';
Object.defineProperty(exports, '__esModule', {
value: true
});
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { 'default': obj }; }
var _fs = require('fs');
var _fs2 = _interopRequireDefault(_fs);
var browser = _fs2['default'].readFileSync(__dirname + '/../dist/browser.js', 'utf-8');
var plugin = function plugin(opts, bs) {
/* noop */
var logger = bs.getLogger('jspm-hot');
var clients = bs.io.of(bs.options.getIn(['socket', 'namespace']));
logger.info('starting!');
bs.events.on('file:changed', function (event) {
if (event.namespace !== 'hot-jspm') {
return;
}
clients.emit('jspm:reload', { path: event.path, type: 'change' });
});
};
var hooks = {
'server:middleware': function serverMiddleware() {
return function (req, res, next) {
console.log('middleware', req.originalUrl);
next();
};
},
'client:js': ';function(window, bs){\n var browser = ' + browser.toString() + ' browser(window, bs)\n }(window, window.___browserSync___);'
};
var name = 'hot-jspm';
exports.hooks = hooks;
exports.plugin = plugin;
module.exports['plugin:name'] = name;
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.HotReload = HotReload;
function HotReload() {
console.log("HOT RELOAD");
}
|
#pragma once
// This file was created automatically, do not modify the contents of this file.
PRAGMA_DISABLE_DEPRECATION_WARNINGS
#include "CoreShell.h"
#include "ManageObject.h"
#include "TypeConvertor.h"
#include "Runtime/Engine/Classes/Components/PoseableMeshComponent.h"
#include "ManagePoseableMeshComponent.generated.h"
// Source file C:\Program Files\Epic Games\UE_4.22\Engine\Source\Runtime\Engine\Classes\Components\PoseableMeshComponent.h:17
UCLASS()
class UNREALDOTNETRUNTIME_API UManagePoseableMeshComponent : public UPoseableMeshComponent, public IManageObject
{
GENERATED_BODY()
public:
bool bIsManageAttach = false;
bool AddWrapperIfNotAttach() override;
void SetManageType(const FDotnetTypeName& ManageType) override;
UPROPERTY(EditAnywhere, BlueprintReadOnly, Category = "C#")
FDotnetTypeName ManageClassName;
virtual void PrestreamTextures(float Seconds, bool bPrioritizeCharacterTextures, int32 CinematicTextureGroups) override;
virtual void SetTextureForceResidentFlag(bool bForceMiplevelsToBeResident) override;
virtual void OnComponentCollisionSettingsChanged() override;
virtual void PutAllRigidBodiesToSleep() override;
virtual void SetAllMassScale(float InMassScale) override;
virtual void SetAllUseCCD(bool InUseCCD) override;
virtual void SetAngularDamping(float InDamping) override;
virtual void SetEnableGravity(bool bGravityEnabled) override;
virtual void SetLinearDamping(float InDamping) override;
virtual void SetNotifyRigidBodyCollision(bool bNewNotifyRigidBodyCollision) override;
virtual void SetSimulatePhysics(bool bSimulate) override;
virtual void UnWeldChildren() override;
virtual void UnWeldFromParent() override;
virtual void UpdatePhysicsToRBChannels() override;
virtual void WakeAllRigidBodies() override;
virtual void DetachFromParent(bool bMaintainWorldPosition, bool bCallModify) override;
virtual void OnAttachmentChanged() override;
virtual void OnHiddenInGameChanged() override;
virtual void OnVisibilityChanged() override;
virtual void PropagateLightingScenarioChange() override;
virtual void UpdateBounds() override;
virtual void UpdatePhysicsVolume(bool bTriggerNotifiers) override;
virtual void Activate(bool bReset) override;
virtual void BeginPlay() override;
virtual void CreateRenderState_Concurrent() override;
virtual void Deactivate() override;
virtual void DestroyComponent(bool bPromoteChildren) override;
virtual void DestroyRenderState_Concurrent() override;
virtual void InitializeComponent() override;
virtual void InvalidateLightingCacheDetailed(bool bInvalidateBuildEnqueuedLighting, bool bTranslationOnly) override;
virtual void OnActorEnableCollisionChanged() override;
virtual void OnComponentCreated() override;
virtual void OnComponentDestroyed(bool bDestroyingHierarchy) override;
virtual void OnCreatePhysicsState() override;
virtual void OnDestroyPhysicsState() override;
virtual void OnRegister() override;
virtual void OnRep_IsActive() override;
virtual void OnUnregister() override;
virtual void RegisterComponentTickFunctions(bool bRegister) override;
virtual void SendRenderDynamicData_Concurrent() override;
virtual void SendRenderTransform_Concurrent() override;
virtual void SetActive(bool bNewActive, bool bReset) override;
virtual void SetAutoActivate(bool bNewAutoActivate) override;
virtual void SetComponentTickEnabled(bool bEnabled) override;
virtual void SetComponentTickEnabledAsync(bool bEnabled) override;
virtual void ToggleActive() override;
virtual void UninitializeComponent() override;
virtual void BeginDestroy() override;
virtual void FinishDestroy() override;
virtual void MarkAsEditorOnlySubobject() override;
virtual void PostCDOContruct() override;
virtual void PostEditImport() override;
virtual void PostInitProperties() override;
virtual void PostLoad() override;
virtual void PostNetReceive() override;
virtual void PostRepNotifies() override;
virtual void PostSaveRoot(bool bCleanupIsRequired) override;
virtual void PreDestroyFromReplication() override;
virtual void PreNetReceive() override;
virtual void ShutdownAfterError() override;
virtual void CreateCluster() override;
virtual void OnClusterMarkedAsPendingKill() override;
void _Supper__PrestreamTextures(float Seconds, bool bPrioritizeCharacterTextures, int32 CinematicTextureGroups);
void _Supper__SetTextureForceResidentFlag(bool bForceMiplevelsToBeResident);
void _Supper__OnComponentCollisionSettingsChanged();
void _Supper__PutAllRigidBodiesToSleep();
void _Supper__SetAllMassScale(float InMassScale);
void _Supper__SetAllUseCCD(bool InUseCCD);
void _Supper__SetAngularDamping(float InDamping);
void _Supper__SetEnableGravity(bool bGravityEnabled);
void _Supper__SetLinearDamping(float InDamping);
void _Supper__SetNotifyRigidBodyCollision(bool bNewNotifyRigidBodyCollision);
void _Supper__SetSimulatePhysics(bool bSimulate);
void _Supper__UnWeldChildren();
void _Supper__UnWeldFromParent();
void _Supper__UpdatePhysicsToRBChannels();
void _Supper__WakeAllRigidBodies();
void _Supper__DetachFromParent(bool bMaintainWorldPosition, bool bCallModify);
void _Supper__OnAttachmentChanged();
void _Supper__OnHiddenInGameChanged();
void _Supper__OnVisibilityChanged();
void _Supper__PropagateLightingScenarioChange();
void _Supper__UpdateBounds();
void _Supper__UpdatePhysicsVolume(bool bTriggerNotifiers);
void _Supper__Activate(bool bReset);
void _Supper__BeginPlay();
void _Supper__CreateRenderState_Concurrent();
void _Supper__Deactivate();
void _Supper__DestroyComponent(bool bPromoteChildren);
void _Supper__DestroyRenderState_Concurrent();
void _Supper__InitializeComponent();
void _Supper__InvalidateLightingCacheDetailed(bool bInvalidateBuildEnqueuedLighting, bool bTranslationOnly);
void _Supper__OnActorEnableCollisionChanged();
void _Supper__OnComponentCreated();
void _Supper__OnComponentDestroyed(bool bDestroyingHierarchy);
void _Supper__OnCreatePhysicsState();
void _Supper__OnDestroyPhysicsState();
void _Supper__OnRegister();
void _Supper__OnRep_IsActive();
void _Supper__OnUnregister();
void _Supper__RegisterComponentTickFunctions(bool bRegister);
void _Supper__SendRenderDynamicData_Concurrent();
void _Supper__SendRenderTransform_Concurrent();
void _Supper__SetActive(bool bNewActive, bool bReset);
void _Supper__SetAutoActivate(bool bNewAutoActivate);
void _Supper__SetComponentTickEnabled(bool bEnabled);
void _Supper__SetComponentTickEnabledAsync(bool bEnabled);
void _Supper__ToggleActive();
void _Supper__UninitializeComponent();
void _Supper__BeginDestroy();
void _Supper__FinishDestroy();
void _Supper__MarkAsEditorOnlySubobject();
void _Supper__PostCDOContruct();
void _Supper__PostEditImport();
void _Supper__PostInitProperties();
void _Supper__PostLoad();
void _Supper__PostNetReceive();
void _Supper__PostRepNotifies();
void _Supper__PostSaveRoot(bool bCleanupIsRequired);
void _Supper__PreDestroyFromReplication();
void _Supper__PreNetReceive();
void _Supper__ShutdownAfterError();
void _Supper__CreateCluster();
void _Supper__OnClusterMarkedAsPendingKill();
};
PRAGMA_ENABLE_DEPRECATION_WARNINGS
|
//
// wrapper.cpp
// RoutemakingGUI
//
// Created by <NAME> on 7/27/16.
// Copyright © 2016 <NAME>. All rights reserved.
//
#include <stdio.h>
#include <PathfinderController.h>
#include <pathfinding/LocalPathfinder.h>
#include <pathfinding/Astar.h>
Boat SimpleAISBoat(double lat, double lon, int trueHeading, float sog_knots,
std::chrono::steady_clock::time_point timeReceived){
Boat simpleBoat = Boat();
simpleBoat.m_latitude = lat;
simpleBoat.m_longitude = lon;
simpleBoat.m_trueHeading = trueHeading;
simpleBoat.m_sog = sog_knots;
simpleBoat.m_timeReceived = timeReceived;
simpleBoat.m_positionValid = true;
simpleBoat.m_trueHeadingValid = true;
simpleBoat.m_sogValid = true;
return simpleBoat;
}
struct DynamicTile {
Type type;
int obstacle_risk;
};
struct ATile {
Type type;
int x;
int y;
DynamicTile* tiles;
};
typedef ATile Matrix[62][29];
extern "C" ATile** GetPath(int startX, int startY, int targetX, int targetY, int* boats, int numBoats) {
LocalPathfinder pathfinder = LocalPathfinder(49.2984988, -123.276173, 10, 49.45584988, -122.750173);
std::list<Boat> theBoats;
for (int i = 0; i < numBoats * 4; i += 4) {
std::shared_ptr<Tile> boatTile = pathfinder.matrix_[boats[i]][boats[i + 1]];
Boat boat = SimpleAISBoat(boatTile->lat_, boatTile->lng_, boats[i + 2], boats[i + 3], std::chrono::steady_clock::now());
theBoats.push_back(boat);
}
pathfinder.SetStart(startX, startY, 0);
pathfinder.SetTarget(targetX, targetY);
pathfinder.SetBoats(theBoats, theBoats);
pathfinder.Run();
pathfinder.VisualizePath();
ATile** arr = new ATile*[62];
for(int i = 0; i < 62; ++i) {
arr[i] = new ATile[29];
for (int j = 0; j < 29; j++) {
arr[i][j].tiles = new DynamicTile[100];
}
}
for (int y = 0; y < 29; y++) { // pathfinder.y_len
for (int x = 0; x < 62; x++) { // pathfinder.x_len
std::shared_ptr<Tile> pTile = pathfinder.matrix_[x][y];
DynamicTile *dTiles = new DynamicTile[100];
for (int i = 0; i < 100; i++) {
dTiles[i].obstacle_risk = pTile->t_[i].obstacle_risk_;
dTiles[i].type = pTile->t_[i].type_;
}
// for dynamic tile
ATile tile = {pTile->type_, pTile->x_, pTile->y_, dTiles};
arr[x][y] = tile;
}
}
return arr;
}
|
/** @module url */ /** for typedoc */
import {extend, bindFunctions, IInjectable, removeFrom} from "../common/common";
import {isFunction, isString, isDefined, isArray} from "../common/predicates";
import {UrlMatcher} from "./urlMatcher";
import {services, $InjectorLike, LocationServices} from "../common/coreservices";
import {UrlMatcherFactory} from "./urlMatcherFactory";
import {StateParams} from "../params/stateParams";
import {RawParams} from "../params/interface";
/** @hidden */
let $location = services.location;
/** @hidden Returns a string that is a prefix of all strings matching the RegExp */
function regExpPrefix(re: RegExp) {
let prefix = /^\^((?:\\[^a-zA-Z0-9]|[^\\\[\]\^$*+?.()|{}]+)*)/.exec(re.source);
return (prefix != null) ? prefix[1].replace(/\\(.)/g, "$1") : '';
}
/** @hidden Interpolates matched values into a String.replace()-style pattern */
function interpolate(pattern: string, match: RegExpExecArray) {
return pattern.replace(/\$(\$|\d{1,2})/, function (m, what) {
return match[what === '$' ? 0 : Number(what)];
});
}
/** @hidden */
function handleIfMatch($injector: $InjectorLike, $stateParams: RawParams, handler: IInjectable, match: RawParams) {
if (!match) return false;
let result = $injector.invoke(handler, handler, { $match: match, $stateParams: $stateParams });
return isDefined(result) ? result : true;
}
/** @hidden */
function appendBasePath(url: string, isHtml5: boolean, absolute: boolean): string {
let baseHref = services.locationConfig.baseHref();
if (baseHref === '/') return url;
if (isHtml5) return baseHref.slice(0, -1) + url;
if (absolute) return baseHref.slice(1) + url;
return url;
}
// TODO: Optimize groups of rules with non-empty prefix into some sort of decision tree
/** @hidden */
function update(rules: Function[], otherwiseFn: Function, evt?: any) {
if (evt && evt.defaultPrevented) return;
function check(rule: Function) {
let handled = rule(services.$injector, $location);
if (!handled) return false;
if (isString(handled)) {
$location.setUrl(handled, true);
}
return true;
}
let n = rules.length;
for (let i = 0; i < n; i++) {
if (check(rules[i])) return;
}
// always check otherwise last to allow dynamic updates to the set of rules
if (otherwiseFn) check(otherwiseFn);
}
/**
* Manages rules for client-side URL
*
* This class manages the router rules for what to do when the URL changes.
*/
export class UrlRouterProvider {
/** @hidden */
rules: Function[] = [];
/** @hidden */
otherwiseFn: ($injector: $InjectorLike, $location: LocationServices) => string;
/** @hidden */
interceptDeferred = false;
/** @hidden */
private $urlMatcherFactory: UrlMatcherFactory;
/** @hidden */
private $stateParams: StateParams;
constructor($urlMatcherFactory: UrlMatcherFactory, $stateParams: StateParams) {
this.$urlMatcherFactory = $urlMatcherFactory;
this.$stateParams = $stateParams;
}
/**
* Registers a url handler function.
*
* Registers a low level url handler (a `rule`). A rule detects specific URL patterns and returns
* a redirect, or performs some action.
*
* If a rule returns a string, the URL is replaced with the string, and all rules are fired again.
*
* @example
* ```js
*
* var app = angular.module('app', ['ui.router.router']);
*
* app.config(function ($urlRouterProvider) {
* // Here's an example of how you might allow case insensitive urls
* $urlRouterProvider.rule(function ($injector, $location) {
* var path = $location.path(),
* normalized = path.toLowerCase();
*
* if (path !== normalized) {
* return normalized;
* }
* });
* });
* ```
*
* @param rule
* Handler function that takes `$injector` and `$location` services as arguments.
* You can use them to detect a url and return a different url as a string.
*
* @return [[$urlRouterProvider]] (`this`)
*/
rule(rule: ($injector: $InjectorLike, $location: LocationServices) => string): UrlRouterProvider {
if (!isFunction(rule)) throw new Error("'rule' must be a function");
this.rules.push(rule);
return this;
};
/**
* Remove a rule previously registered
*
* @param rule the matcher rule that was previously registered using [[rule]]
* @return true if the rule was found (and removed)
*/
removeRule(rule): boolean {
return this.rules.length !== removeFrom(this.rules, rule).length;
}
/**
* Defines the path or behavior to use when no url can be matched.
*
* @example
* ```js
*
* var app = angular.module('app', ['ui.router.router']);
*
* app.config(function ($urlRouterProvider) {
* // if the path doesn't match any of the urls you configured
* // otherwise will take care of routing the user to the
* // specified url
* $urlRouterProvider.otherwise('/index');
*
* // Example of using function rule as param
* $urlRouterProvider.otherwise(function ($injector, $location) {
* return '/a/valid/url';
* });
* });
* ```
*
* @param rule
* The url path you want to redirect to or a function rule that returns the url path or performs a `$state.go()`.
* The function version is passed two params: `$injector` and `$location` services, and should return a url string.
*
* @return {object} `$urlRouterProvider` - `$urlRouterProvider` instance
*/
otherwise(rule: string | (($injector: $InjectorLike, $location: LocationServices) => string)): UrlRouterProvider {
if (!isFunction(rule) && !isString(rule)) throw new Error("'rule' must be a string or function");
this.otherwiseFn = isString(rule) ? () => rule : rule;
return this;
};
/**
* Registers a handler for a given url matching.
*
* If the handler is a string, it is
* treated as a redirect, and is interpolated according to the syntax of match
* (i.e. like `String.replace()` for `RegExp`, or like a `UrlMatcher` pattern otherwise).
*
* If the handler is a function, it is injectable.
* It gets invoked if `$location` matches.
* You have the option of inject the match object as `$match`.
*
* The handler can return
*
* - **falsy** to indicate that the rule didn't match after all, then `$urlRouter`
* will continue trying to find another one that matches.
* - **string** which is treated as a redirect and passed to `$location.url()`
* - **void** or any **truthy** value tells `$urlRouter` that the url was handled.
*
* @example
* ```js
*
* var app = angular.module('app', ['ui.router.router']);
*
* app.config(function ($urlRouterProvider) {
* $urlRouterProvider.when($state.url, function ($match, $stateParams) {
* if ($state.$current.navigable !== state ||
* !equalForKeys($match, $stateParams) {
* $state.transitionTo(state, $match, false);
* }
* });
* });
* ```
*
* @param what A pattern string to match, compiled as a [[UrlMatcher]].
* @param handler The path (or function that returns a path) that you want to redirect your user to.
* @param ruleCallback [optional] A callback that receives the `rule` registered with [[UrlMatcher.rule]]
*
* Note: the handler may also invoke arbitrary code, such as `$state.go()`
*/
when(what: (RegExp|UrlMatcher|string), handler: string|IInjectable, ruleCallback = function(rule) {}) {
let {$urlMatcherFactory, $stateParams} = this;
let redirect, handlerIsString = isString(handler);
// @todo Queue this
if (isString(what)) what = $urlMatcherFactory.compile(<string> what);
if (!handlerIsString && !isFunction(handler) && !isArray(handler))
throw new Error("invalid 'handler' in when()");
let strategies = {
matcher: function (_what, _handler) {
if (handlerIsString) {
redirect = $urlMatcherFactory.compile(_handler);
_handler = ['$match', redirect.format.bind(redirect)];
}
return extend(function () {
return handleIfMatch(services.$injector, $stateParams, _handler, _what.exec($location.path(), $location.search(), $location.hash()));
}, {
prefix: isString(_what.prefix) ? _what.prefix : ''
});
},
regex: function (_what, _handler) {
if (_what.global || _what.sticky) throw new Error("when() RegExp must not be global or sticky");
if (handlerIsString) {
redirect = _handler;
_handler = ['$match', ($match) => interpolate(redirect, $match)];
}
return extend(function () {
return handleIfMatch(services.$injector, $stateParams, _handler, _what.exec($location.path()));
}, {
prefix: regExpPrefix(_what)
});
}
};
let check = {
matcher: $urlMatcherFactory.isMatcher(what),
regex: what instanceof RegExp
};
for (var n in check) {
if (check[n]) {
let rule = strategies[n](what, handler);
ruleCallback(rule);
return this.rule(rule);
}
}
throw new Error("invalid 'what' in when()");
};
/**
* Disables monitoring of the URL.
*
* Call this method before UI-Router has bootstrapped.
* It will stop UI-Router from performing the initial url sync.
*
* This can be useful to perform some asynchronous initialization before the router starts.
* Once the initialization is complete, call [[listen]] to tell UI-Router to start watching and synchronizing the URL.
*
* @example
* ```js
*
* var app = angular.module('app', ['ui.router']);
*
* app.config(function ($urlRouterProvider) {
* // Prevent $urlRouter from automatically intercepting URL changes;
* $urlRouterProvider.deferIntercept();
* })
*
* app.run(function (MyService, $urlRouter, $http) {
* $http.get("/stuff").then(function(resp) {
* MyService.doStuff(resp.data);
* $urlRouter.listen();
* $urlRouter.sync();
* });
* });
* ```
*
* @param defer Indicates whether to defer location change interception. Passing
* no parameter is equivalent to `true`.
*/
deferIntercept(defer) {
if (defer === undefined) defer = true;
this.interceptDeferred = defer;
};
}
export class UrlRouter {
/** @hidden */
private location: string;
/** @hidden */
private listener: Function;
/** @hidden */
private urlRouterProvider: UrlRouterProvider;
/** @hidden */
constructor(urlRouterProvider: UrlRouterProvider) {
this.urlRouterProvider = urlRouterProvider;
bindFunctions(UrlRouter.prototype, this, this);
}
/**
* Checks the current URL for a matching rule
*
* Triggers an update; the same update that happens when the address bar url changes, aka `$locationChangeSuccess`.
* This method is useful when you need to use `preventDefault()` on the `$locationChangeSuccess` event,
* perform some custom logic (route protection, auth, config, redirection, etc) and then finally proceed
* with the transition by calling `$urlRouter.sync()`.
*
* @example
* ```js
*
* angular.module('app', ['ui.router'])
* .run(function($rootScope, $urlRouter) {
* $rootScope.$on('$locationChangeSuccess', function(evt) {
* // Halt state change from even starting
* evt.preventDefault();
* // Perform custom logic
* var meetsRequirement = ...
* // Continue with the update and state transition if logic allows
* if (meetsRequirement) $urlRouter.sync();
* });
* });
* ```
*/
sync() {
update(this.urlRouterProvider.rules, this.urlRouterProvider.otherwiseFn);
}
/**
* Starts listening for URL changes
*
* Call this sometime after calling [[deferIntercept]] to start monitoring the url.
* This causes [[UrlRouter]] to start listening for changes to the URL, if it wasn't already listening.
*/
listen(): Function {
return this.listener = this.listener || $location.onChange(evt => update(this.urlRouterProvider.rules, this.urlRouterProvider.otherwiseFn, evt));
}
/**
* Internal API.
*/
update(read?: boolean) {
if (read) {
this.location = $location.path();
return;
}
if ($location.path() === this.location) return;
$location.setUrl(this.location, true);
}
/**
* Internal API.
*
* Pushes a new location to the browser history.
*
* @param urlMatcher
* @param params
* @param options
*/
push(urlMatcher: UrlMatcher, params: StateParams, options: { replace?: (string|boolean) }) {
let replace = options && !!options.replace;
$location.setUrl(urlMatcher.format(params || {}), replace);
}
/**
* Builds and returns a URL with interpolated parameters
*
* @example
* ```js
*
* $bob = $urlRouter.href(new UrlMatcher("/about/:person"), {
* person: "bob"
* });
* // $bob == "/about/bob";
* ```
*
* @param urlMatcher The [[UrlMatcher]] object which is used as the template of the URL to generate.
* @param params An object of parameter values to fill the matcher's required parameters.
* @param options Options object. The options are:
*
* - **`absolute`** - {boolean=false}, If true will generate an absolute url, e.g. "http://www.example.com/fullurl".
*
* @returns Returns the fully compiled URL, or `null` if `params` fail validation against `urlMatcher`
*/
href(urlMatcher: UrlMatcher, params: any, options: { absolute: boolean }): string {
if (!urlMatcher.validates(params)) return null;
let url = urlMatcher.format(params);
options = options || { absolute: false };
let cfg = services.locationConfig;
let isHtml5 = cfg.html5Mode();
if (!isHtml5 && url !== null) {
url = "#" + cfg.hashPrefix() + url;
}
url = appendBasePath(url, isHtml5, options.absolute);
if (!options.absolute || !url) {
return url;
}
let slash = (!isHtml5 && url ? '/' : ''), port = cfg.port();
port = <any> (port === 80 || port === 443 ? '' : ':' + port);
return [cfg.protocol(), '://', cfg.host(), port, slash, url].join('');
}
}
|
#
# Copyright (c) 2016, Nimbix, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Nimbix, Inc.
#
from collections import OrderedDict
from mimetypes import MimeTypes
import simplejson as json
import os
import base64
class AppDef():
'''
AppDef class - see method documentation below
'''
data = OrderedDict()
def new(self, name, desc, author, licensed=True, machines=['*'],
classifications=['Uncategorized'],
vaults=['FILE', 'BLOCK', 'OBJECT'], image=None, templatefile=None):
'''
Creates a new AppDef initialized with meta data
Required parameters:
name(string): the short name of the application
desc(string): the description of what the application does
author(string): the author or vendor name for this application
Optional parameters:
licensed(bool): True if the application contains license
(if applicable);
default: True
machines(list): list of machine types or wildcards
the application can run on;
default: *
classifications(list): list of classifications/categories the
application belongs under;
default: Uncategorized
vaults(list): list of vault types supported;
default: FILE, BLOCK, and OBJECT
also valid: NONE (for no vault) and
BLOCK_ARRAY (for distributed FS)
image(string): graphic file with icon to embed (max 64kb)
default: no graphic
must be either png or jpg
templatefile(string): if set, load an existing JSON file as
a baseline, rather than starting with
a blank object
'''
assert(type(machines) == list and type(classifications) == list
and type(vaults) == list)
if templatefile:
self.load(templatefile)
else:
self.data = OrderedDict()
if 'commands' not in self.data:
self.data['commands'] = OrderedDict()
self.data['name'] = name
self.data['description'] = desc
self.data['author'] = desc
self.data['licensed'] = licensed
self.data['machines'] = machines
self.data['classifications'] = classifications
self.data['vault-types'] = vaults
if image:
self.image(image)
elif 'image' not in self.data:
self.data['image'] = OrderedDict()
self.data['image']['type'] = 'image/png'
self.data['image']['data'] = ''
def load(self, filename):
'''
Creates an AppDef from an existing JSON file
Required parameters:
filename(string): file name to load
'''
with open(filename, 'r') as f:
self.data = json.load(f, object_pairs_hook=OrderedDict)
def image(self, filename):
'''
Encodes application image into AppDef
Required parameters:
filename(string): image file name - must be png or jpg and <=64K
'''
m = MimeTypes()
mime, other = m.guess_type(filename)
assert(mime == 'image/png' or mime == 'image/jpeg')
assert(os.stat(filename).st_size <= 65536)
self.data['image'] = OrderedDict()
self.data['image']['type'] = mime
with open(filename, 'r') as f:
self.data['image']['data'] = base64.encodestring(f.read())
def cmd(self, id, desc, path, interactive=True, name=None, args=[]):
'''
Adds a command with some optional constant positional arguments.
A command maps an executable (and arguments) to an API endpoint with
optional parameters.
Required parameters:
id(string): command ID (should not have spaces)
desc(string): the description of what the command does
Optional parameters:
interactive(bool): True if users should be able to reach the
runtime environment by public IP while running;
default: True
name(string): user-facing short name of the command
default: same as id
args(list): positional arguments for command
default: none
Notes:
- for security, set interactive=False for any command which does
not require user interaction while running; stdout and stderr
will still be available to user via API and web interface
- positional arguments should be separated as if using exec()
e.g.:
ls -l /data
should be added like this:
cmd(<id>, <desc>, False, args=['-l', '/data'])
'''
self.data['commands'][id] = OrderedDict()
self.data['commands'][id]['description'] = desc
self.data['commands'][id]['interactive'] = interactive
self.data['commands'][id]['name'] = name if name else id
params = OrderedDict()
index = 0
if args:
for i in args:
arg = OrderedDict()
argname = '__arg%d' % index
index = index + 1
arg['name'] = argname
arg['description'] = argname
arg['type'] = 'CONST'
arg['value'] = i
arg['positional'] = True
arg['required'] = True
params[argname] = arg
self.data['commands'][id]['parameters'] = params
def param(self, cmd, flag, paramdef):
'''
Adds a parameter to a command.
Required parameters:
cmd(string): command name to add to (from cmd())
flag(string): flag name of parameter
paramdef(string or dict): parameter definition (JSON or dict)
For parameter dictionary specification please see:
https://www.nimbix.net/jarvice-application-deployment-guide/
'''
if type(paramdef) == str:
paramdef = json.loads(paramdef)
self.data['commands'][cmd]['parameters'][flag] = paramdef
def dump(self, f=None):
'''
Dumps an appdef object as JSON either to a stream or a string.
Optional parameters:
f(stream): stream to dump AppDef to;
default: return as string instead
Returns:
string without pretty-printing if f=None,
or AppDef dumped if f is set
'''
if f is None:
return json.dumps(self.data)
else:
json.dump(self.data, f, indent=4)
|
import gql from 'graphql-tag';
export const getInstanceLogsQueryGql = gql`
query($id: String!) {
instance(id: $id) {
id
name
commandLogs {
id
description
createdAt
completedAt
failedAt
entries {
id
timestamp
message
}
}
}
}
`;
export interface GetInstanceLogsQueryInstanceFieldInterface {
id: string;
name: string;
commandLogs: {
id: string;
description: string;
createdAt: string;
completedAt: string;
failedAt: string;
entries: {
id: string;
timestamp: string;
formattedTimestamp: string;
message: string;
}[];
}[];
}
export interface GetInstanceLogsQueryInterface {
instance: GetInstanceLogsQueryInstanceFieldInterface;
}
|
<reponame>Crisreyda/AudioVisualCrowdCounting
import os
import numpy as np
import torch
import argparse
import time
from config import cfg
# args = argparse.ArgumentParser()
# args.add_argument('--net_name', type=str, default='', help='name of net')
# args.add_argument('--resume', type=int, default=0, help='whether to resume model')
# args.add_argument('--resume_path', type=str, default='')
# args.add_argument('--settings', type=str, default='')
#
# args.add_argument('--is_noise', type=int, default=0)
# args.add_argument('--brightness', type=float, default=1.0)
# args.add_argument('--noise_sigma', type=float, default=25)
# args.add_argument('--longest_side', type=int, default=1024)
# args.add_argument('--black_area_ratio', type=float, default=0)
# args.add_argument('--is_random', type=int, default=0)
#
# opt = args.parse_args()
#
# cfg.NET = opt.net_name
# cfg.RESUME = (opt.resume == 1)
# cfg.RESUME_PATH = os.path.join('../trained_models/exp', opt.resume_path + '/latest_state.pth')
# cfg.SETTINGS = opt.settings
#
# now = time.strftime("%m-%d_%H-%M", time.localtime())
# cfg.EXP_NAME = cfg.SETTINGS + '_' + cfg.DATASET + '_' + cfg.NET + '_' + str(cfg.LR)
#------------prepare enviroment------------
seed = cfg.SEED
if seed is not None:
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
gpus = cfg.GPU_ID
if len(gpus)==1:
torch.cuda.set_device(gpus[0])
torch.backends.cudnn.benchmark = True
#------------prepare data loader------------
data_mode = cfg.DATASET
if data_mode is 'SHHA':
from datasets.SHHA.loading_data import loading_data
from datasets.SHHA.setting import cfg_data
elif data_mode is 'SHHB':
from datasets.SHHB.loading_data import loading_data
from datasets.SHHB.setting import cfg_data
elif data_mode is 'QNRF':
from datasets.QNRF.loading_data import loading_data
from datasets.QNRF.setting import cfg_data
elif data_mode is 'UCF50':
from datasets.UCF50.loading_data import loading_data
from datasets.UCF50.setting import cfg_data
elif data_mode is 'WE':
from datasets.WE.loading_data import loading_data
from datasets.WE.setting import cfg_data
elif data_mode is 'GCC':
from datasets.GCC.loading_data import loading_data
from datasets.GCC.setting import cfg_data
elif data_mode is 'Mall':
from datasets.Mall.loading_data import loading_data
from datasets.Mall.setting import cfg_data
elif data_mode is 'UCSD':
from datasets.UCSD.loading_data import loading_data
from datasets.UCSD.setting import cfg_data
elif data_mode is 'AC': # Qingzhong
from datasets.AC.loading_data import loading_data
from datasets.AC.setting import cfg_data
# cfg_data.IS_NOISE = (opt.is_noise == 1)
# cfg_data.BRIGHTNESS = opt.brightness
# cfg_data.NOISE_SIGMA = opt.noise_sigma
# cfg_data.LONGEST_SIDE = opt.longest_side
# cfg_data.BLACK_AREA_RATIO = opt.black_area_ratio
# cfg_data.IS_RANDOM = (opt.is_random == 1)
print(cfg, cfg_data)
#------------Prepare Trainer------------
net = cfg.NET
if net in ['MCNN', 'AlexNet', 'VGG', 'VGG_DECODER', 'Res50', 'Res101', 'CSRNet','Res101_SFCN',
'CSRNet_IN', 'CSRNet_Audio', 'CANNet', 'CANNet_Audio', 'CSRNet_Audio_Concat', 'CANNet_Audio_Concat',
'CSRNet_Audio_Guided', 'CANNet_Audio_Guided'
]:
from trainer import Trainer
elif net in ['SANet', 'SANet_Audio']:
from trainer_for_M2TCC import Trainer # double losses but signle output
elif net in ['CMTL']:
from trainer_for_CMTL import Trainer # double losses and double outputs
elif net in ['PCCNet']:
from trainer_for_M3T3OCC import Trainer
#------------Start Training------------
pwd = os.path.split(os.path.realpath(__file__))[0]
cc_trainer = Trainer(loading_data, cfg_data, pwd)
cc_trainer.forward()
|
#include<iostream>
#include<fstream>
#include<ctime>
#include<cstring>
#include<sstream>
using namespace std;
template <typename T>
string toString(T number){
stringstream ss;
ss <<number;
return ss.str();
}
void path_to_neighbor(double** arr,double** map, int const* coor, int const* bound,bool** visited,string** pathway);
int main(){
ifstream input("navigation_in.txt");
int round, bound[2],init[2],dest[2];
bool **is_used;
double** path,**map;
double inp;
string name,**way,in;
input>>round;
for(int r=0;r<round;r++ ){
input>>name>>bound[0]>>bound[1];
input>>init[0]>>init[1]>>dest[0]>>dest[1];
if((init[0]||init[1]||dest[0]||dest[1])<1||((init[0]||dest[0])>bound[0])||((init[1]||dest[1])>bound[1])){
cout<<"INVALID_INPUT(invalid_starting/ending_position):"<<name<<endl;
}
cout<<name<<","<<bound[0]<<","<<bound[1]<<endl;
cout<<"("<<init[0]<<","<<init[1]<<"),("<<dest[0]<<","<<dest[1]<<")\n";
map= new double *[bound[1]];
for(int i=0;i<bound[1];i++)
map[i]= new double[bound[0]];
getline(input,in);
getline(input,in);
getline(input,in);
int y=1;
for(int i = 0;i<in.length();i++){
if(in[i]==' ')
y++;
}
if(y<bound[0]*bound[1]||y>bound[0]*bound[1]){
cout<<"INVALID_INPUT(invalid_grid size):"<<name<<endl;
continue;
}
int k=0,l=0;
cout<<in<<endl;
istringstream ss(in);
while(ss>>inp){
map[k][l]=inp;
l++;
if(l==bound[1]){
l=0;
k++;
}
if(k==bound[0]) break;
}
for(int i =0 ; i<bound[1];i++){
cout<<"|";
for(int j=0; j<bound[0];j++){
cout<<map[i][j]<<"|";
}
cout<<endl;
}
for(int q=0;q<2;q++){
init[q]-=1;
dest[q]-=1;
}
is_used= new bool *[bound[1]];
for(int i =0; i<bound[1];i++)
is_used[i]=new bool [bound[0]];
path= new double *[bound[1]];
for(int i =0; i<bound[1];i++)
path[i]=new double [bound[0]];
for(int i =0 ; i<bound[1];i++)
for(int j=0; j<bound[0];j++){
is_used[i][j]=false;
path[i][j]=bound[0]*bound[1]*2.0;
}
path[init[0]][init[1]]=map[init[0]][init[1]];
way =new string *[bound[1]];
for(int i=0;i<bound[1];i++)
way[i] =new string [bound[0]];
way[init[0]][init[1]]="("+toString(init[0]+1)+","+toString(init[1]+1)+")";
path_to_neighbor(path,map,init , bound ,is_used,way);
cout<<way[dest[0]][dest[1]]<<endl;
cout<<"Shortest_Time:"<<path[dest[0]][dest[1]]<<endl;
cout<<"Time_for_computation:"<<endl;
cout<<endl;
}
}
void path_to_neighbor(double** arr,double** map, int const* coor, int const* bound,bool** visited,string** pathway){
string temp_coord;
int p=0,temp[2];
if(coor[0]>=0&&coor[1]>=0&&coor[0]<bound[0]&&coor[1]<bound[1]){
if(visited[coor[0]][coor[1]]==false){
for(int i=coor[0]-1;i<=coor[0]+1;i++){
if(i<bound[0]&&i>=0){
for(int j=coor[1]-1;j<=coor[1]+1;j++){
if(j<bound[1]&&j>=0){
if((i<coor[0]||i>coor[0])&&(j>coor[1]||j<coor[1])) continue;
if((arr[coor[0]][coor[1]]+map[i][j])<arr[i][j]){
pathway[i][j] =pathway[coor[0]][coor[1]]+",("+toString(i+1)+","+toString(j+1)+")";
arr[i][j]= arr[coor[0]][coor[1]]+map[i][j];
// cout<<"("<<i<<","<<j<<") ="<<arr[i][j]<<" = "<<arr[coor[0]][coor[1]]<<" + "<<map[i][j]<<" coor = "<<coor[0]<<" "<<coor[1]<<endl;
}
}
}
}
}
visited[coor[0]][coor[1]]=true;
double min =100000.0;
for(int i=0;i<bound[1];i++){
for(int j=0;j<bound[0];j++){
if(arr[i][j]<min&&visited[i][j]==false){
min = arr[i][j];
temp[0]=i;
temp[1]=j;
}
}
}
path_to_neighbor(arr,map,temp,bound,visited,pathway);
}
}
}
|
<gh_stars>0
# -*- coding: utf-8 -*-
miquire :core, 'environment', 'user', 'message', 'userlist', 'configloader', 'userconfig', 'service_keeper'
miquire :lib, "mikutwitter", 'reserver', 'delayer', 'instance_storage'
Thread.abort_on_exception = true
=begin rdoc
Twitter APIとmikutterプラグインのインターフェイス
=end
class Service
include ConfigLoader
include InstanceStorage
extend Enumerable
# MikuTwitter のインスタンス
attr_reader :twitter
class << self
def services_refresh
SaveData.accounts.keys.each do |account|
Service[account] end
@primary = (UserConfig[:primary_account] and Service[UserConfig[:primary_account]]) or instances.first end
# 存在するServiceオブジェクトをSetで返す。
# つまり、投稿権限のある「自分」のアカウントを全て返す。
alias services instances
# Service.instances.eachと同じ
def each(*args, &proc)
instances.each(*args, &proc) end
# 現在アクティブになっているサービスを返す。
# 基本的に、あるアクションはこれが返すServiceに対して行われなければならない。
# ==== Return
# アクティブなServiceか、存在しない場合はnil
def primary
if @primary
@primary
elsif services.empty?
nil
else
set_primary(services.first)
@primary end end
alias primary_service primary
# 現在アクティブになっているサービスを返す。
# Service.primary とちがって、サービスが一つも登録されていない時、例外を発生させる。
# ==== Exceptions
# Service::NotExistError :: (選択されている)Serviceが存在しない
# ==== Return
# アクティブなService
def primary!
result = primary
raise Service::NotExistError, 'Services does not exists.' unless result
result end
def set_primary(service)
type_strict service => Service
before_primary = @primary
return self if before_primary != @primary || @primary == service
@primary = service
Plugin.call(:primary_service_changed, service)
notice "current active service: #{service.name}"
self end
# 新しくサービスを認証する
def add_service(token, secret)
type_strict token => String, secret => String
twitter = MikuTwitter.new
twitter.consumer_key = Environment::TWITTER_CONSUMER_KEY
twitter.consumer_secret = Environment::TWITTER_CONSUMER_SECRET
twitter.a_token = token
twitter.a_secret = secret
(twitter/:account/:verify_credentials).user.next { |user|
id = "twitter#{user.id}".to_sym
accounts = Service::SaveData.accounts
if accounts.is_a? Hash
accounts = accounts.melt
else
accounts = {} end
Service::SaveData.account_register id, {
provider: :twitter,
slug: id,
token: token,
secret: secret,
user: {
id: user[:id],
idname: user[:idname],
name: user[:name],
profile_image_url: user[:profile_image_url] } }
service = Service[id]
Plugin.call(:service_registered, service)
service } end
alias __destroy_e3de__ destroy
def destroy(service)
type_strict service => Service
Service::SaveData.account_destroy service.name
__destroy_e3de__("twitter#{service.user_obj.id}".to_sym)
Plugin.call(:service_destroyed, service) end
def remove_service(service)
destroy(service) end
end
# プラグインには、必要なときにはこのインスタンスが渡るようになっているので、インスタンスを
# 新たに作る必要はない
def initialize(name)
super
account = Service::SaveData.account_data name
@twitter = MikuTwitter.new
@twitter.consumer_key = Environment::TWITTER_CONSUMER_KEY
@twitter.consumer_secret = Environment::TWITTER_CONSUMER_SECRET
@twitter.a_token = account[:token]
@twitter.a_secret = account[:secret]
Message.add_data_retriever(MessageServiceRetriever.new(self, :status_show))
User.add_data_retriever(UserServiceRetriever.new(self, :user_show))
user_initialize
end
# アクセストークンとアクセスキーを再設定する
def set_token_secret(token, secret)
Service::SaveData.account_modify name, {token: token, secret: secret}
@twitter.a_token = token
@twitter.a_secret = secret
self
end
# 自分のUserを返す。初回はサービスに問い合せてそれを返す。
def user_obj
@user_obj end
# 自分のユーザ名を返す。初回はサービスに問い合せてそれを返す。
def user
@user_obj[:idname] end
alias :idname :user
# userと同じだが、サービスに問い合わせずにnilを返すのでブロッキングが発生しない
def user_by_cache
@user_idname end
# selfを返す
def service
self end
# サービスにクエリ _kind_ を投げる。
# レスポンスを受け取るまでブロッキングする。
# レスポンスを返す。失敗した場合は、apifailイベントを発生させてnilを返す。
# 0.1: このメソッドはObsoleteです
def scan(kind=:friends_timeline, args={})
no_mainthread
wait = Queue.new
__send__(kind, args).next{ |res|
wait.push res
}.terminate.trap{ |e|
wait.push nil }
wait.pop end
# scanと同じだが、別スレッドで問い合わせをするのでブロッキングしない。
# レスポンスが帰ってきたら、渡されたブロックが呼ばれる。
# ブロックは、必ずメインスレッドで実行されることが保証されている。
# Deferredを返す。
# 0.1: このメソッドはObsoleteです
def call_api(api, args = {}, &proc)
__send__(api, args).next &proc end
# Streaming APIに接続する
def streaming(method = :userstream, *args, &proc)
twitter.__send__(method, *args, &proc) end
#
# POST関連
#
# なんかコールバック機能つける
# Deferred返すから無くてもいいんだけどねー
def self.define_postal(method, twitter_method = method, &wrap)
function = lambda{ |api, options, &callback|
if(callback)
callback.call(:start, options)
callback.call(:try, options)
api.call(options).next{ |res|
callback.call(:success, res)
res
}.trap{ |exception|
callback.call(:err, exception)
callback.call(:fail, exception)
callback.call(:exit, nil)
Deferred.fail(exception)
}.next{ |val|
callback.call(:exit, nil)
val }
else
api.call(options) end }
if block_given?
define_method(method){ |*args, &callback|
wrap.call(lambda{ |options|
function.call(twitter.method(twitter_method), options, &callback) }, self, *args) }
else
define_method(method){ |options, &callback| function.call(twitter.method(twitter_method), options, &callback) } end
end
define_postal(:update){ |parent, service, options|
parent.call(options).next{ |message|
notice 'event fire :posted and :update by statuses/update'
Plugin.call(:posted, service, [message])
Plugin.call(:update, service, [message])
message } }
define_postal(:retweet){ |parent, service, options|
parent.call(options).next{ |message|
notice 'event fire :posted and :update by statuses/retweet'
Plugin.call(:posted, service, [message])
Plugin.call(:update, service, [message])
message } }
define_postal :search_create
define_postal :search_destroy
define_postal :follow
define_postal :unfollow
define_postal :add_list_member
define_postal :delete_list_member
define_postal :add_list
define_postal :delete_list
define_postal :update_list
define_postal :send_direct_message
define_postal :destroy_direct_message
define_postal(:destroy){ |parent, service, options|
parent.call(options).next{ |message|
message[:rule] = :destroy
Plugin.call(:destroyed, [message])
message } }
alias post update
define_postal(:favorite) { |parent, service, message, fav = true|
if fav
Plugin.call(:before_favorite, service, service.user_obj, message)
parent.call(message).next{ |message|
Plugin.call(:favorite, service, service.user_obj, message)
message
}.trap{ |e|
Plugin.call(:fail_favorite, service, service.user_obj, message)
Deferred.fail(e) } else
service.unfavorite(message).next{ |message|
Plugin.call(:unfavorite, service, service.user_obj, message)
message } end }
define_postal :unfavorite
def inspect
"#<Service #{idname}>" end
def method_missing(method_name, *args)
result = twitter.__send__(method_name, *args)
(class << self; self end).__send__(:define_method, method_name, &twitter.method(method_name))
result end
private
def user_initialize
if defined? Service::SaveData.account_data(name.to_sym)[:user]
@user_obj = User.new_ifnecessary(Service::SaveData.account_data(name.to_sym)[:user])
(twitter/:account/:verify_credentials).user.next(&method(:user_data_received)).trap(&method(:user_data_failed))
else
res = twitter.query!('account/verify_credentials', cache: true)
if "200" == res.code
user_data_received(MikuTwitter::ApiCallSupport::Request::Parser.user(JSON.parse(res.body).symbolize))
else
user_data_failed_crash!(res) end end end
# :enddoc:
def user_data_received(user)
@user_obj = user
Service.account_modify name, {
user: {
id: @user_obj[:id],
idname: @user_obj[:idname],
name: @user_obj[:name],
profile_image_url: @user_obj[:profile_image_url] } } end
def user_data_failed(e)
if e.is_a? MikuTwitter::Error
if not UserConfig[:verify_credentials]
user_data_failed_crash!(e.httpresponse) end end end
def user_data_failed_crash!(res)
if '400' == res.code
chi_fatal_alert "起動に必要なデータをTwitterが返してくれませんでした。規制されてるんじゃないですかね。\n" +
"ニコ動とか見て、規制が解除されるまで適当に時間を潰してください。ヽ('ω')ノ三ヽ('ω')ノもうしわけねぇもうしわけねぇ\n" +
"\n\n--\n\n" +
"#{res.code} #{res.body}"
else
chi_fatal_alert "起動に必要なデータをTwitterが返してくれませんでした。電車が止まってるから会社行けないみたいなかんじで起動できません。ヽ('ω')ノ三ヽ('ω')ノもうしわけねぇもうしわけねぇ\n"+
"Twitterサーバの情況を調べる→ https://dev.twitter.com/status\n"+
"Twitterサーバの情況を調べたくない→ http://www.nicovideo.jp/vocaloid\n\n--\n\n" +
"#{res.code} #{res.body}" end end
class ServiceRetriever
include Retriever::DataSource
def initialize(post, api)
@post = post
@api = api
end
def findbyid(id)
if id.is_a? Enumerable
id.map(&method(:findbyid))
else
@post.scan(@api, :id => id) end end
def time
1.0/0 end
end
class MessageServiceRetriever < ServiceRetriever
end
class UserServiceRetriever < ServiceRetriever
include Retriever::DataSource
def findbyid(id)
if id.is_a? Enumerable
id.each_slice(100).map{|id_list|
@post.scan(:user_lookup, id: id_list.join(','.freeze)) || [] }.flatten
else
@post.scan(@api, :id => id) end end end
class Error < RuntimeError; end
class NotExistError < Error; end
services_refresh
end
Post = Service
|
<filename>packages/coinstac-ui/app/render/components/data-discovery/data-discovery.styles.js
import { makeStyles } from '@material-ui/core/styles';
export default makeStyles(theme => ({
searchButton: {
marginTop: theme.spacing(2),
},
resultItem: {
marginBottom: theme.spacing(2),
},
firstSearchInfo: {
padding: theme.spacing(2),
background: theme.palette.info.main,
color: theme.palette.info.contrastText,
},
}));
|
#!/bin/bash
set -e
if [ -n "$1" ]; then
MY_FILE=$1
stat ./lib/$MY_FILE > /dev/null 2>&1
flutter format ./lib/$MY_FILE
else
flutter format lib
flutter format test
fi
|
#!/bin/bash
mkdir -p $PREFIX/bin
make
cp rainbow $PREFIX/bin
cp select_all_rbcontig.pl $PREFIX/bin
cp select_best_rbcontig.pl $PREFIX/bin
cp select_sec_rbcontig.pl $PREFIX/bin
cp select_best_rbcontig_plus_read1.pl $PREFIX/bin
|
class Employee:
def __init__(self, name, age, salary):
self.name = name
self.age = age
self.salary = salary
class EmployeeDB:
def __init__(self):
self.db = []
def add_employee(self, employee):
self.db.append(employee)
def get_employee(self, name):
for e in self.db:
if e.name == name:
return e
return None
|
<filename>test_code/mnist_dpsgd_test_keras_vectorized.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
import pandas as pd
import tables
from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
from tensorflow_privacy.privacy.optimizers.dp_optimizer_keras import DPKerasSGDOptimizer
# We import the modified optimizers
from dp_optimizer_keras_vectorized import VectorizedDPKerasSGDOptimizer
flags.DEFINE_boolean('dpsgd', True, 'If True, train with DP-SGD. If False, train with vanilla SGD.')
flags.DEFINE_integer('dpsgd_type',1, 'types: 0-gaussian, 1-laplace, 2-cactus')
flags.DEFINE_float('learning_rate', 0.15, 'Learning rate for training')
flags.DEFINE_float('noise_multiplier', 2,'Ratio of the standard deviation to the clipping norm')
flags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm')
flags.DEFINE_integer('batch_size', 250, 'Batch size')
flags.DEFINE_integer('epochs', 15, 'Number of epochs')
flags.DEFINE_integer('microbatches', 250, 'Number of microbatches (must evenly divide batch_size)')
flags.DEFINE_string('model_dir', None, 'Model directory')
flags.DEFINE_boolean('logging', False, 'If True, records will be saved in files.')
FLAGS = flags.FLAGS
NUM_TRAIN_EXAMPLES = 60000
def compute_epsilon(steps):
"""Computes epsilon value for given hyperparameters."""
if FLAGS.noise_multiplier == 0.0:
return float('inf')
orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64))
sampling_probability = FLAGS.batch_size / NUM_TRAIN_EXAMPLES
rdp = compute_rdp(q=sampling_probability,
noise_multiplier=FLAGS.noise_multiplier,
steps=steps,
orders=orders)
# Delta is set to 1e-5 because MNIST has 60000 training points.
return get_privacy_spent(orders, rdp, target_delta=1e-5)[0]
def load_mnist():
"""Loads MNIST and preprocesses to combine training and validation data."""
train, test = tf.keras.datasets.mnist.load_data()
train_data, train_labels = train
test_data, test_labels = test
train_data = np.array(train_data, dtype=np.float32) / 255
test_data = np.array(test_data, dtype=np.float32) / 255
train_data = train_data.reshape((train_data.shape[0], 28, 28, 1))
test_data = test_data.reshape((test_data.shape[0], 28, 28, 1))
train_labels = np.array(train_labels, dtype=np.int32)
test_labels = np.array(test_labels, dtype=np.int32)
train_labels = tf.keras.utils.to_categorical(train_labels, num_classes=10)
test_labels = tf.keras.utils.to_categorical(test_labels, num_classes=10)
assert train_data.min() == 0.
assert train_data.max() == 1.
assert test_data.min() == 0.
assert test_data.max() == 1.
return train_data, train_labels, test_data, test_labels
def main(unused_argv):
var=(FLAGS.noise_multiplier*FLAGS.l2_norm_clip)**2
if FLAGS.logging:
# Initialize the norm file
filename = ('norm_%d_v%1.2f_gradient.npy' %(FLAGS.dpsgd_type, var))
np.save(filename, np.empty(0))
logging.set_verbosity(logging.INFO)
if FLAGS.dpsgd and FLAGS.batch_size % FLAGS.microbatches != 0:
raise ValueError('Number of microbatches should divide evenly batch_size')
# Load training and test data.
train_data, train_labels, test_data, test_labels = load_mnist()
# Define a sequential Keras model
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(16, 8,
strides=2,
padding='same',
activation='relu',
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPool2D(2, 1),
tf.keras.layers.Conv2D(32, 4,
strides=2,
padding='valid',
activation='relu'),
tf.keras.layers.MaxPool2D(2, 1),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(10)
])
if FLAGS.dpsgd:
optimizer = VectorizedDPKerasSGDOptimizer(
l2_norm_clip=FLAGS.l2_norm_clip,
noise_multiplier=FLAGS.noise_multiplier,
num_microbatches=FLAGS.microbatches,
learning_rate=FLAGS.learning_rate,
dpsgd_type=FLAGS.dpsgd_type,
logging=FLAGS.logging)
# Compute vector of per-example loss rather than its mean over a minibatch.
loss = tf.keras.losses.CategoricalCrossentropy(
from_logits=True, reduction=tf.losses.Reduction.NONE)
else:
optimizer = tf.keras.optimizers.SGD(learning_rate=FLAGS.learning_rate)
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
# Information Session:
print('\n *** Information: dpsgd_type=%d, stddev=%f, epochs=%d *** \n' %(FLAGS.dpsgd_type, FLAGS.noise_multiplier*FLAGS.l2_norm_clip, FLAGS.epochs))
# Compile model with Keras
print('\n *** Complie model with Keras. *** \n')
model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
# Train model with Keras
print('\n *** Train model with Keras. *** \n')
history = model.fit(train_data, train_labels,
epochs=FLAGS.epochs,
validation_data=(test_data, test_labels),
batch_size=FLAGS.batch_size)
# Compute the privacy budget expended.
if FLAGS.dpsgd:
hist_df = pd.DataFrame(history.history)
hist_csv_file = ('test_data/log_e%d_%d_v%1.2f.csv' %(FLAGS.epochs, FLAGS.dpsgd_type,(FLAGS.noise_multiplier*FLAGS.l2_norm_clip)**2))
with open(hist_csv_file, mode='w') as f:
hist_df.to_csv(f)
eps = compute_epsilon(FLAGS.epochs * 60000 // FLAGS.batch_size)
print('For delta=1e-5, the current epsilon is: %.2f' % eps)
else:
hist_df = pd.DataFrame(history.history)
hist_csv_file = ('test_data/logB_e%d_np.csv' %(FLAGS.epochs))
with open(hist_csv_file, mode='w') as f:
hist_df.to_csv(f)
print('Trained with vanilla non-private SGD optimizer')
if __name__ == '__main__':
app.run(main)
|
const express = require('express')
const _ = require('lodash')
const Boom = require('boom')
const jwt = require('jsonwebtoken')
const yaml = require('js-yaml')
const logger = require('hw-logger')
const log = logger.log
const JsonValidator = require('../../services/json-validator')
const Store = require('../../services/store')
const yamlType = 'application/x-yaml'
module.exports = (config, store) => {
const router = express.Router()
router.use((req, res, next) => {
if (!res.renderData) {
res.renderData = data => res.format({
json: () => {
res.json(data)
},
[yamlType]: () => res.send(yaml.safeDump(data)),
})
}
next()
})
router.use('/auth', require('./auth')(config, store))
router.use('/quizzes', require('./quizzes')(config, store))
router.use('/answers', require('./answers')(config, store))
router.use('/sessions', require('./sessions')(config, store))
router.use(() => {
throw Boom.notFound()
})
router.use((err, req, res, next) => {
if (err instanceof JsonValidator.errors.JsonValidatorError) {
Boom.wrap(err, 400)
} else if (err instanceof Store.errors.NotFoundError) {
Boom.wrap(err, 404)
} else if (err instanceof jwt.JsonWebTokenError) {
Boom.wrap(err, 401)
}
logger.enabledLevels.debug && log.debug('err :', err)
const statusCode = _.get(err, 'output.statusCode', 500)
const data = {
code: _.get(err, 'output.payload.error', err.code),
message: _.get(err, 'output.payload.message', err.toString),
}
res.status(statusCode).json(data)
})
return router
}
|
#!/bin/sh
#set -e
# Get minishift
MINISHIFT_VER=1.34.1
MINISHIFT_NAME="minishift-${MINISHIFT_VER}-linux-amd64"
wget https://github.com/minishift/minishift/releases/download/v${MINISHIFT_VER}/${MINISHIFT_NAME}.tgz
tar -xvzf ${MINISHIFT_NAME}.tgz
cd ${MINISHIFT_NAME}
# Get KVM driver
# https://docs.okd.io/latest/minishift/getting-started/setting-up-virtualization-environment.html#kvm-driver-fedora
curl -L https://github.com/dhiltgen/docker-machine-kvm/releases/download/v0.10.0/docker-machine-driver-kvm-centos7 -o docker-machine-driver-kvm
chmod +x docker-machine-driver-kvm
# So we keep to our directory but minishift still finds docker-machine-driver
PATH=$PATH:.
# Start minishift
./minishift addons enable registry-route
./minishift addons enable admin-user
./minishift start
./minishift ssh -- sudo sysctl -w vm.max_map_count=262144
|
#!/bin/bash
BRIDGE=br0
CONTAINER_NAMES=$(sudo docker ps -a --format {{.Names}})
for NAME in $CONTAINER_NAMES; do
sudo ovs-docker del-ports $BRIDGE $NAME
done
sudo docker kill $(sudo docker ps -a -q)
sudo ovs-ofctl -OOpenflow13 del-flows br0
|
#!/usr/bin/env bash
set +e
for ext in $(code --list-extensions); do
code --uninstall-extension "$ext"
done
code --list-extensions --show-versions
|
import KEYCODE from '../constants/_KEYCODE.es6';
import { PopupMenu } from './_Menu.es6';
/**
* @abstract
*/
class MenuItem {
constructor(domNode, menuObj) {
this.menu = menuObj;
this.domNode = domNode;
this.popupMenu = false;
this.isMenubarItem = false;
this.hasFocus = false;
this.hasHover = false;
}
_isPrintableCharacter(str) {
return str.length === 1 && str.match(/\S/);
}
init() {
const popupMenu = this.domNode.parentElement.querySelector('ul');
if (popupMenu) {
this.domNode.setAttribute('aria-haspopup', 'true');
this.popupMenu = new PopupMenu(popupMenu, this);
this.popupMenu.init();
}
this.domNode.addEventListener('keydown', this.handleKeydown.bind(this));
this.domNode.addEventListener('focus', this.handleFocus.bind(this));
this.domNode.addEventListener('blur', this.handleBlur.bind(this));
this.domNode.addEventListener('mouseover', this.handleMouseover.bind(this));
this.domNode.addEventListener('mouseout', this.handleMouseout.bind(this));
}
setExpanded(value) {
if (value) {
this.domNode.setAttribute('aria-expanded', 'true');
} else {
this.domNode.setAttribute('aria-expanded', 'false');
}
}
getBoundaries() {
return this.domNode.getBoundingClientRect();
}
getIsMenubarItem() {
return this.isMenubarItem;
}
getHover() {
return this.hasHover;
}
handleFocus() {
this.menu.setFocus(true);
}
handleBlur() {
this.menu.setFocus(false);
}
handleMouseover() {
this.hasHover = true;
if (this.popupMenu) {
this.popupMenu.setHover(true);
this.popupMenu.open();
}
}
handleMouseout() {
this.hasHover = false;
if (this.popupMenu) {
this.popupMenu.setHover(false);
setTimeout(this.popupMenu.close.bind(this.popupMenu, false), 300);
}
}
}
export class MenubarItem extends MenuItem {
constructor(domNode, menuObj) {
super(domNode, menuObj);
this.isMenubarItem = true;
}
init() {
super.init();
this.domNode.tabIndex = -1;
}
setTabIndex(value) {
this.domNode.tabIndex = value;
}
focusOnSelf() {
this.domNode.focus();
}
focusOnPreviousSibling() {
this.menu.setFocusToPreviousItem(this);
}
focusOnNextSibling() {
this.menu.setFocusToNextItem(this);
}
handleKeydown(event) {
const { key } = event;
let flag = false;
switch (event.keyCode) {
case KEYCODE.SPACE:
case KEYCODE.RETURN:
case KEYCODE.DOWN:
if (this.popupMenu) {
this.popupMenu.open();
this.popupMenu.setFocusToFirstItem();
flag = true;
}
break;
case KEYCODE.LEFT:
this.menu.setFocusToPreviousItem(this);
flag = true;
break;
case KEYCODE.RIGHT:
this.menu.setFocusToNextItem(this);
flag = true;
break;
case KEYCODE.UP:
if (this.popupMenu) {
this.popupMenu.open();
this.popupMenu.setFocusToLastItem();
flag = true;
}
break;
case KEYCODE.HOME:
case KEYCODE.PAGEUP:
this.menu.setFocusToFirstItem();
flag = true;
break;
case KEYCODE.END:
case KEYCODE.PAGEDOWN:
this.menu.setFocusToLastItem();
flag = true;
break;
case KEYCODE.TAB:
if (this.popupMenu) {
this.popupMenu.close(true);
}
break;
case KEYCODE.ESC:
if (this.popupMenu) {
this.popupMenu.close(true);
}
break;
default:
if (this._isPrintableCharacter(key)) {
this.menu.setFocusByFirstCharacter(this, key);
flag = true;
}
break;
}
if (flag) {
event.stopPropagation();
event.preventDefault();
}
}
}
export class SubMenuItem extends MenuItem {
init() {
super.init();
this.domNode.addEventListener('click', this.handleClick.bind(this));
}
handleKeydown(event) {
const { currentTarget, key } = event;
let flag = false;
let clickEvent;
switch (event.keyCode) {
case KEYCODE.SPACE:
case KEYCODE.RETURN:
if (this.popupMenu) {
this.popupMenu.open();
this.popupMenu.setFocusToFirstItem();
} else {
// Create simulated mouse event to mimic the behavior of ATs
// and let the event handler handleClick do the housekeeping.
try {
clickEvent = new MouseEvent('click', {
view: window,
bubbles: true,
cancelable: true,
});
} catch (err) {
if (document.createEvent) {
// DOM Level 3 for IE 9+
clickEvent = document.createEvent('MouseEvents');
clickEvent.initEvent('click', true, true);
}
}
currentTarget.dispatchEvent(clickEvent);
}
flag = true;
break;
case KEYCODE.UP:
this.menu.setFocusToPreviousItem(this);
flag = true;
break;
case KEYCODE.DOWN:
this.menu.setFocusToNextItem(this);
flag = true;
break;
case KEYCODE.LEFT:
this.menu.setFocusToController('previous', true);
this.menu.close(true);
flag = true;
break;
case KEYCODE.RIGHT:
if (this.popupMenu) {
this.popupMenu.open();
this.popupMenu.setFocusToFirstItem();
} else {
this.menu.setFocusToController('next', true);
this.menu.close(true);
}
flag = true;
break;
case KEYCODE.HOME:
case KEYCODE.PAGEUP:
this.menu.setFocusToFirstItem();
flag = true;
break;
case KEYCODE.END:
case KEYCODE.PAGEDOWN:
this.menu.setFocusToLastItem();
flag = true;
break;
case KEYCODE.ESC:
this.menu.setFocusToController();
this.menu.close(true);
flag = true;
break;
case KEYCODE.TAB:
this.menu.setFocusToController();
break;
default:
if (this._isPrintableCharacter(key)) {
this.menu.setFocusByFirstCharacter(this, key);
flag = true;
}
break;
}
if (flag) {
event.stopPropagation();
event.preventDefault();
}
}
handleClick() {
this.menu.setFocusToController();
this.menu.close(true);
}
handleBlur() {
super.handleBlur();
setTimeout(this.menu.close.bind(this.menu, false), 300);
}
handleMouseover() {
this.menu.setHover(true);
this.menu.open();
super.handleMouseover();
}
handleMouseout() {
super.handleMouseout();
this.menu.setHover(false);
setTimeout(this.menu.close.bind(this.menu, false), 300);
}
}
|
package com.java.simple;
public class HelloWorld {
public static void main(String[] args) {
System.out.println("HELLO WORLD");
}
}
|
<reponame>MartinNeupauer/mongo<filename>src/third_party/wiredtiger/test/suite/test_alter01.py
#!/usr/bin/env python
#
# Public Domain 2014-2017 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import wiredtiger, wttest
from wtscenario import make_scenarios
# test_alter01.py
# Smoke-test the session alter operations.
class test_alter01(wttest.WiredTigerTestCase):
name = "alter01"
entries = 100
# Settings for access_pattern_hint
types = [
('file', dict(uri='file:', use_cg=False, use_index=False)),
('lsm', dict(uri='lsm:', use_cg=False, use_index=False)),
('table-cg', dict(uri='table:', use_cg=True, use_index=False)),
('table-index', dict(uri='table:', use_cg=False, use_index=True)),
('table-simple', dict(uri='table:', use_cg=False, use_index=False)),
]
hints = [
('default', dict(acreate='')),
('none', dict(acreate='none')),
('random', dict(acreate='random')),
('sequential', dict(acreate='sequential')),
]
access_alter=('', 'none', 'random', 'sequential')
# Settings for cache_resident
resid = [
('default', dict(ccreate='')),
('false', dict(ccreate='false')),
('true', dict(ccreate='true')),
]
reopen = [
('no-reopen', dict(reopen=False)),
('reopen', dict(reopen=True)),
]
cache_alter=('', 'false', 'true')
scenarios = make_scenarios(types, hints, resid, reopen)
def verify_metadata(self, metastr):
if metastr == '':
return
cursor = self.session.open_cursor('metadata:', None, None)
#
# Walk through all the metadata looking for the entries that are
# the file URIs for components of the table.
#
found = False
while True:
ret = cursor.next()
if ret != 0:
break
key = cursor.get_key()
check_meta = ((key.find("lsm:") != -1 or key.find("file:") != -1) \
and key.find(self.name) != -1)
if check_meta:
value = cursor[key]
found = True
self.assertTrue(value.find(metastr) != -1)
cursor.close()
self.assertTrue(found == True)
# Alter: Change the access pattern hint after creation
def test_alter01_access(self):
uri = self.uri + self.name
create_params = 'key_format=i,value_format=i,'
complex_params = ''
#
# If we're not explicitly setting the parameter, then don't
# modify create_params to test using the default.
#
if self.acreate != '':
access_param = 'access_pattern_hint=%s' % self.acreate
create_params += '%s,' % access_param
complex_params += '%s,' % access_param
else:
# NOTE: This is hard-coding the default value. If the default
# changes then this will fail and need to be fixed.
access_param = 'access_pattern_hint=none'
if self.ccreate != '':
cache_param = 'cache_resident=%s' % self.ccreate
create_params += '%s,' % cache_param
complex_params += '%s,' % cache_param
else:
# NOTE: This is hard-coding the default value. If the default
# changes then this will fail and need to be fixed.
cache_param = 'cache_resident=false'
cgparam = ''
if self.use_cg or self.use_index:
cgparam = 'columns=(k,v),'
if self.use_cg:
cgparam += 'colgroups=(g0),'
self.session.create(uri, create_params + cgparam)
# Add in column group or index settings.
if self.use_cg:
cgparam = 'columns=(v),'
suburi = 'colgroup:' + self.name + ':g0'
self.session.create(suburi, complex_params + cgparam)
if self.use_index:
suburi = 'index:' + self.name + ':i0'
self.session.create(suburi, complex_params + cgparam)
# Put some data in table.
c = self.session.open_cursor(uri, None)
for k in range(self.entries):
c[k+1] = 1
c.close()
# Verify the string in the metadata
self.verify_metadata(access_param)
self.verify_metadata(cache_param)
# Run through all combinations of the alter commands
# for all allowed settings. This tests having only one or
# the other set as well as having both set. It will also
# cover trying to change the setting to its current value.
for a in self.access_alter:
alter_param = ''
access_str = ''
if a != '':
access_str = 'access_pattern_hint=%s' % a
for c in self.cache_alter:
alter_param = '%s' % access_str
cache_str = ''
if c != '':
cache_str = 'cache_resident=%s' % c
alter_param += ',%s' % cache_str
if alter_param != '':
self.session.alter(uri, alter_param)
if self.reopen:
self.reopen_conn()
special = self.use_cg or self.use_index
if not special:
self.verify_metadata(access_str)
self.verify_metadata(cache_str)
else:
self.session.alter(suburi, alter_param)
self.verify_metadata(access_str)
self.verify_metadata(cache_str)
if __name__ == '__main__':
wttest.run()
|
def is_leap_year(y:int)->bool:
return (y % 4 == 0 and y % 100 !=0) or y % 400 == 0
def get_last_date(y:int, m:int)->int:
if m == 1 or m == 3 or m == 5 or m == 7 or m == 8 or m == 10 or m == 12:
return 31
elif m == 2:
return 29 if is_leap_year(y) else 28
return 30
def is_date_overflow(year:int, month:int, date:int)->bool:
return date > get_last_date(year, month)
date_str, delta_days = input().split()
year, month, date = map(int, date_str.split('-'))
date += int(delta_days) -1
while is_date_overflow(year, month, date):
date -= get_last_date(year, month)
month += 1
if month > 12:
month -= 12
year += 1
print(f'{year}-{month:02}-{date:02}')
|
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.analytics.financial.model.volatility.smile.fitting.sabr;
import java.util.Arrays;
import org.apache.commons.lang.ObjectUtils;
import com.opengamma.analytics.financial.model.interestrate.curve.ForwardCurve;
import com.opengamma.analytics.financial.model.option.definition.SmileDeltaParameters;
import com.opengamma.analytics.math.curve.InterpolatedDoublesCurve;
import com.opengamma.analytics.math.interpolation.CombinedInterpolatorExtrapolator;
import com.opengamma.util.ArgumentChecker;
/**
*
*/
public class ForexSmileDeltaSurfaceDataBundle extends SmileSurfaceDataBundle {
private final double[] _forwards;
private final double[] _expiries;
private final double[][] _strikes;
private final double[][] _vols;
private final ForwardCurve _forwardCurve;
private final int _nExpiries;
private final boolean _isCallData;
public ForexSmileDeltaSurfaceDataBundle(final double[] forwards, final double[] expiries, final double[] deltas, final double[] atms, final double[][] riskReversals,
final double[][] strangle, final boolean isCallData, final CombinedInterpolatorExtrapolator interpolator) {
ArgumentChecker.notNull(deltas, "delta");
ArgumentChecker.notNull(forwards, "forwards");
ArgumentChecker.notNull(expiries, "expiries");
ArgumentChecker.notNull(atms, "at-the-money");
ArgumentChecker.notNull(riskReversals, "risk reversal");
ArgumentChecker.notNull(strangle, "strangle");
_nExpiries = expiries.length;
ArgumentChecker.isTrue(_nExpiries == forwards.length, "forwards wrong length; have {}, need {}", forwards.length, _nExpiries);
ArgumentChecker.isTrue(_nExpiries == atms.length, "atms wrong length; have {}, need {}", atms.length, _nExpiries);
final int n = deltas.length;
ArgumentChecker.isTrue(n > 0, "need at least one delta");
ArgumentChecker.isTrue(n == riskReversals.length, "wrong number of rr sets; have {}, need {}", riskReversals.length, n);
ArgumentChecker.isTrue(n == strangle.length, "wrong number of strangle sets; have {}, need {}", strangle.length, n);
for (int i = 0; i < n; i++) {
ArgumentChecker.isTrue(_nExpiries == riskReversals[i].length, "wrong number of rr; have {}, need {}", riskReversals[i].length, _nExpiries);
ArgumentChecker.isTrue(_nExpiries == strangle[i].length, "wrong number of strangles; have {}, need {}", strangle[i].length, _nExpiries);
}
_forwards = forwards;
_expiries = expiries;
_forwardCurve = new ForwardCurve(InterpolatedDoublesCurve.from(_expiries, _forwards, interpolator));
_strikes = new double[_nExpiries][];
_vols = new double[_nExpiries][];
for (int i = 0; i < _nExpiries; i++) {
final double[] rr = new double[n];
final double[] s = new double[n];
for (int j = 0; j < n; j++) {
rr[j] = riskReversals[j][i];
s[j] = strangle[j][i];
}
final SmileDeltaParameters cal = new SmileDeltaParameters(_expiries[i], atms[i], deltas, rr, s);
_strikes[i] = cal.getStrike(_forwards[i]);
_vols[i] = cal.getVolatility();
}
_isCallData = isCallData;
checkVolatilities(expiries, _vols);
}
public ForexSmileDeltaSurfaceDataBundle(final ForwardCurve forwardCurve, final double[] expiries, final double[] deltas, final double[] atms, final double[][] riskReversals,
final double[][] strangle, final boolean isCallData) {
ArgumentChecker.notNull(deltas, "delta");
ArgumentChecker.notNull(forwardCurve, "forward curve");
ArgumentChecker.notNull(expiries, "expiries");
ArgumentChecker.notNull(atms, "atms");
ArgumentChecker.notNull(riskReversals, "risk reversals");
ArgumentChecker.notNull(strangle, "strangle");
_nExpiries = expiries.length;
ArgumentChecker.isTrue(_nExpiries == atms.length, "atms wrong length; have {}, need {}", atms.length, _nExpiries);
final int n = deltas.length;
ArgumentChecker.isTrue(n > 0, "need at least one delta");
ArgumentChecker.isTrue(n == riskReversals.length, "wrong number of rr sets; have {}, need {}", riskReversals.length, n);
ArgumentChecker.isTrue(n == strangle.length, "wrong number of strangle sets; have {}, need {}", strangle.length, n);
for (int i = 0; i < n; i++) {
ArgumentChecker.isTrue(_nExpiries == riskReversals[i].length, "wrong number of rr; have {}, need {}", riskReversals[i].length, _nExpiries);
ArgumentChecker.isTrue(_nExpiries == strangle[i].length, "wrong number of strangles; have {}, need {}", strangle[i].length, _nExpiries);
}
_forwards = new double[_nExpiries];
_expiries = expiries;
_forwardCurve = forwardCurve;
_strikes = new double[_nExpiries][];
_vols = new double[_nExpiries][];
for (int i = 0; i < _nExpiries; i++) {
_forwards[i] = forwardCurve.getForward(_expiries[i]);
final double[] rr = new double[n];
final double[] s = new double[n];
for (int j = 0; j < n; j++) {
rr[j] = riskReversals[j][i];
s[j] = strangle[j][i];
}
final SmileDeltaParameters cal = new SmileDeltaParameters(_expiries[i], atms[i], deltas, rr, s);
_strikes[i] = cal.getStrike(_forwards[i]);
_vols[i] = cal.getVolatility();
}
_isCallData = isCallData;
checkVolatilities(expiries, _vols);
}
public ForexSmileDeltaSurfaceDataBundle(final ForwardCurve forwardCurve, final double[] expiries, final double[][] strikes, final double[][] vols, final boolean isCallData) {
ArgumentChecker.notNull(forwardCurve, "forward curve");
ArgumentChecker.notNull(expiries, "expiries");
ArgumentChecker.notNull(strikes, "strikes");
ArgumentChecker.notNull(vols, "vols");
_nExpiries = expiries.length;
ArgumentChecker.isTrue(_nExpiries == strikes.length, "strikes wrong length; have {}, need {}", strikes.length, _nExpiries);
ArgumentChecker.isTrue(_nExpiries == vols.length, "implied vols wrong length; have {}, need {}", vols.length, _nExpiries);
for (int i = 0; i < _nExpiries; i++) {
ArgumentChecker.isTrue(strikes[i].length == vols[i].length, "wrong number of volatilities; have {}, need {}", strikes[i].length, vols[i].length);
}
_forwardCurve = forwardCurve;
_expiries = expiries;
_strikes = strikes;
_vols = vols;
_forwards = new double[_nExpiries];
for (int i = 0; i < _nExpiries; i++) {
_forwards[i] = forwardCurve.getForward(expiries[i]);
}
_isCallData = isCallData;
}
@Override
public int getNumExpiries() {
return _nExpiries;
}
@Override
public double[] getExpiries() {
return _expiries;
}
@Override
public double[][] getStrikes() {
return _strikes;
}
@Override
public double[][] getVolatilities() {
return _vols;
}
@Override
public double[] getForwards() {
return _forwards;
}
@Override
public ForwardCurve getForwardCurve() {
return _forwardCurve;
}
@Override
public SmileSurfaceDataBundle withBumpedPoint(final int expiryIndex, final int strikeIndex, final double amount) {
ArgumentChecker.isTrue(ArgumentChecker.isInRangeExcludingHigh(0, _nExpiries, expiryIndex), "Invalid index for expiry; {}", expiryIndex);
final double[][] strikes = getStrikes();
ArgumentChecker.isTrue(ArgumentChecker.isInRangeExcludingHigh(0, strikes[expiryIndex].length, strikeIndex), "Invalid index for strike; {}", strikeIndex);
final int nStrikes = strikes[expiryIndex].length;
final double[][] vols = new double[_nExpiries][];
for (int i = 0; i < _nExpiries; i++) {
vols[i] = new double[nStrikes];
System.arraycopy(_vols[i], 0, vols[i], 0, nStrikes);
}
vols[expiryIndex][strikeIndex] += amount;
return new ForexSmileDeltaSurfaceDataBundle(getForwardCurve(), getExpiries(), getStrikes(), vols, _isCallData);
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + _forwardCurve.hashCode();
result = prime * result + Arrays.deepHashCode(_vols);
result = prime * result + Arrays.deepHashCode(_strikes);
result = prime * result + Arrays.hashCode(_expiries);
return result;
}
@Override
public boolean equals(final Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
final ForexSmileDeltaSurfaceDataBundle other = (ForexSmileDeltaSurfaceDataBundle) obj;
if (!ObjectUtils.equals(_forwardCurve, other._forwardCurve)) {
return false;
}
if (!Arrays.equals(_expiries, other._expiries)) {
return false;
}
for (int i = 0; i < _nExpiries; i++) {
if (!Arrays.equals(_strikes[i], other._strikes[i])) {
return false;
}
if (!Arrays.equals(_vols[i], other._vols[i])) {
return false;
}
}
return true;
}
}
|
def run(input_list):
unique_elements = []
seen = set()
for num in input_list:
if num not in seen:
unique_elements.append(num)
seen.add(num)
return unique_elements
|
<gh_stars>1-10
package dockerhub
const (
// APIURL dockerhub base url
APIURL = "https://hub.docker.com"
// APIVersion dockerhub API version
APIVersion = "v2"
// APILogin url
APILogin = "users/login"
// APIRepositories dockerhub repositories API
APIRepositories = "repositories"
// Category ...
Category = "dockerhub-data"
// Dockerhub - DS name
Dockerhub string = "dockerhub"
)
|
#!/usr/bin/env bash
go run rename/main.go
|
<filename>webpack.config.js
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
const path = require('path');
const MiniCssExtractPlugin = require('mini-css-extract-plugin')
module.exports = {
entry: './src/index.js',
output: {
path: path.resolve(__dirname, 'dist'),
filename: 'js/bundle.js'
},
module: {
rules: [{
test: /\.scss$/,
use: [
MiniCssExtractPlugin.loader,
{
loader: 'css-loader'
},
{
loader: 'sass-loader',
options: {
sourceMap: true,
// options...
}
}
]
}]
},
plugins: [
new MiniCssExtractPlugin({
filename: 'css/[name].bundle.css'
}),
]
};
|
<reponame>bowlofstew/blockchain-samples<filename>contracts/industry/LogisticsSplit.0.6/Common/common.go<gh_stars>100-1000
package Common;
import (
"encoding/json"
)
const BLSTATEKEY string = "BLSTATEKEY"
const CONTSTATEKEY string = "CONTSTATEKEY"
const BLSTATE string = "_STATE"
// const CONTHIST string = "_HIST"
const MYVERSION string = "2.0.0"
type BLContractState struct {
Version string `json:"version"`
ContainerCC string `json:"containercc"`
ComplianceCC string `json:"compliancecc"`
}
type ContContractState struct {
Version string `json:"version"`
ComplianceCC string `json:"compliancecc"`
}
type Geolocation struct {
Latitude float64 `json:"latitude,omitempty"`
Longitude float64 `json:"longitude,omitempty"`
}
type Airquality struct {
Oxygen float64 `json:"oxygen,omitempty"`
Carbondioxide float64 `json:"carbondioxide,omitempty"`
Ethylene float64 `json:"ethylene,omitempty"`
}
// This is optional. It stands for the 'acceptable range', say 1 degree of lat and long
// at which the container should be, before it is considered 'arrived' at 'Notified Party' location'
// If not sent in, some default could be assumed (say 1 degree or )
type NotifyRange struct {
LatRange float64 `json:"latrange,omitempty"`
LongRange float64 `json:"longrange,omitempty"`
}
// This is a logistics contract, written in the context of shipping. It tracks the progress of a Bill of Lading
// and associated containers, and raises alerts in case of violations in expected conditions
// Assumption 1. Bill of Lading is sacrosanct - Freight forwarders may issue intermediary freight bills, but
// the original B/L is the document we trackend to end. Similarly a 'Corrected B/L' scenario is not considered
// Assumption 2. A Bill of Lading can have multiple containers attached to it. We are, for simplicity, assuming that
// the same transit rules in terms of allowed ranges in temperature, humidity etc. apply across the B/L - i.e.
// applies to all containers attached to a Bill of Lading
// Assumption 3. A shipment may switch from one container to another in transit, for various reasons. We are assuming,
// for simplicity, that the same containers are used for end to end transit.
// Initial registration of the Bill of Lading. Sets out the constrains for B/L data and the Notification details
type BillOfLadingRegistration struct {
BLNo string `json:"blno"`
ContainerNos string `json:"containernos"` // Comma separated container numbers - keep json simple
Hazmat bool `json:"hazmat,omitempty"` // shipment hazardous ?
MinTemperature float64 `json:"mintemperature,omitempty"` //split range to min and max: Jeff's input
MaxTemperature float64 `json:"maxtemperature,omitempty"`
MinHumidity float64 `json:"minhumidity,omitempty"` //split range to min and max: Jeff's input
MaxHumidity float64 `json:"maxhumidity,omitempty"`
MinLight float64 `json:"minlight,omitempty"` //split range to min and max: Jeff's input
MaxLight float64 `json:"maxlight,omitempty"`
MinAcceleration float64 `json:"minacceleration,omitempty"` //split range to min and max: Jeff's input
MaxAcceleration float64 `json:"maxacceleration,omitempty"`
//NotifyLocations *[]Geolocation `json:"notifylocations,omitempty"` // No implementation right now
//NotifyRange *NotifyRange `json:"notifyrange,omitempty"` // To be integrated when shipping part gets sorted out
TransitComplete bool `json:"transitcomplete,omitempty"`
Timestamp string `json:"timestamp,omitempty"`
}
//Structure for logistics data at the container level
type ContainerLogistics struct {
ContainerNo string `json:"containerno"`
BLNo string `json:"blno,omitempty"`
Location Geolocation `json:"location,omitempty"` // current asset location
Carrier string `json:"carrier,omitempty"` // the name of the carrier
Timestamp string `json:"timestamp"`
Temperature float64 `json:"temperature,omitempty"` // celcius
Humidity float64 `json:"humidity,omitempty"` // percent
Light float64 `json:"light,omitempty"` // lumen
Acceleration float64 `json:"acceleration,omitempty"`
DoorClosed bool `json:"doorclosed,omitempty"`
AirQuality Airquality `json:"airquality,omitempty"`
Extra json.RawMessage `json:"extra,omitempty"`
AlertRecord string `json:"alerts,omitempty"`
TransitComplete bool `json:"transitcomplete,omitempty"`
}
// Compliance record structure
type ComplianceState struct {
BLNo string `json:"blno"`
Type string `json:"type"` // Default: DEFTYPE
Compliance bool `json:"compliance"`
AssetAlerts map[string]string `json:"assetalerts"`
Active bool `json:"active,omitempty"`
Timestamp string `json:"timestamp"`
}
type Variation string
const (
Normal Variation ="normal"
Above ="above"
Below = "below"
)
// These are common alerts reported by sensor. Example Tetis.
// http://www.starcomsystems.com/download/Tetis_ENG.pdf
type Alerts struct {
TempAlert Variation `json:"tempalert,omitempty"`
HumAlert Variation `json:"humalert,omitempty"`
LightAlert Variation `json:"lightalert,omitempty"`
AccAlert Variation `json:"accalert,omitempty"`
DoorAlert bool `json:"dooralert,omitempty"`
}
// This is a logistics contract, written in the context of shipping. It tracks the progress of a Bill of Lading
// and associated containers, and raises alerts in case of violations in expected conditions
// Assumption 1. Bill of Lading is sacrosanct - Freight forwarders may issue intermediary freight bills, but
// the original B/L is the document we trackend to end. Similarly a 'Corrected B/L' scenario is not considered
// Assumption 2. A Bill of Lading can have multiple containers attached to it. We are, for simplicity, assuming that
// the same transit rules in terms of allowed ranges in temperature, humidity etc. apply across the B/L - i.e.
// applies to all containers attached to a Bill of Lading
// Assumption 3. A shipment may switch from one container to another in transit, for various reasons. We are assuming,
// for simplicity, that the same containers are used for end to end transit.
|
#!/bin/bash
# Copyright (c) 2013 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
if [ -d "$1" ]; then
cd "$1"
else
echo "Usage: $0 <datadir>" >&2
echo "Removes obsolete GleecBTC database files" >&2
exit 1
fi
LEVEL=0
if [ -f wallet.dat -a -f addr.dat -a -f blkindex.dat -a -f blk0001.dat ]; then LEVEL=1; fi
if [ -f wallet.dat -a -f peers.dat -a -f blkindex.dat -a -f blk0001.dat ]; then LEVEL=2; fi
if [ -f wallet.dat -a -f peers.dat -a -f coins/CURRENT -a -f blktree/CURRENT -a -f blocks/blk00000.dat ]; then LEVEL=3; fi
if [ -f wallet.dat -a -f peers.dat -a -f chainstate/CURRENT -a -f blocks/index/CURRENT -a -f blocks/blk00000.dat ]; then LEVEL=4; fi
case $LEVEL in
0)
echo "Error: no GleecBTC datadir detected."
exit 1
;;
1)
echo "Detected old GleecBTC datadir (before 0.7)."
echo "Nothing to do."
exit 0
;;
2)
echo "Detected GleecBTC 0.7 datadir."
;;
3)
echo "Detected GleecBTC pre-0.8 datadir."
;;
4)
echo "Detected GleecBTC 0.8 datadir."
;;
esac
FILES=""
DIRS=""
if [ $LEVEL -ge 3 ]; then FILES=$(echo $FILES blk????.dat blkindex.dat); fi
if [ $LEVEL -ge 2 ]; then FILES=$(echo $FILES addr.dat); fi
if [ $LEVEL -ge 4 ]; then DIRS=$(echo $DIRS coins blktree); fi
for FILE in $FILES; do
if [ -f $FILE ]; then
echo "Deleting: $FILE"
rm -f $FILE
fi
done
for DIR in $DIRS; do
if [ -d $DIR ]; then
echo "Deleting: $DIR/"
rm -rf $DIR
fi
done
echo "Done."
|
//
// Created by Zhukov on 2020/7/23.
//
#ifndef MODBUSMASTER_CHECKSUM_H
#define MODBUSMASTER_CHECKSUM_H
uint8_t checksum_lrc(uint8_t* data, uint16_t length);
uint16_t checksum_crc(uint8_t* data, uint16_t length);
#endif //MODBUSMASTER_CHECKSUM_H
|
<reponame>slipkinem/webpack-ts
import * as ext from 'vue-property-decorator'
import axios from 'axios'
// extending default vue instance with some more stuff
export class Vue extends ext.Vue {
static get http () {
return axios
}
get $http () {
return axios
}
}
export {
Inject,
Provide,
Model,
Prop,
Component,
Watch,
Emit
} from 'vue-property-decorator'
// declare module 'vue/types/vue' {
// interface VueConstructor {
// options: {
// name: string | undefined
// }
// }
// }
|
'use strict';
let axios = require('axios');
let express = require('express');
let app = express();
let WIT_AI_TOKEN = process.env.WIT_AI_TOKEN;
let IFTTT_MAKER_KEY = process.env.IFTTT_MAKER_KEY;
app.get('/', function (req, res) {
// Now that we have the query,
// lets ask wit to parse it and return some helpful information
axios({
url: 'https://api.wit.ai/message',
method: 'GET',
params: {
v: '20160202',
q: req.query.q,
},
headers: {
'Authorization': `Bearer ${WIT_AI_TOKEN}`,
},
})
.then(function(response) {
// let's find the message we want to send
let messageBody = response.data.outcomes[0].entities.message_body[0].value;
console.log(`Announcing to slack: ${messageBody}`);
return messageBody;
})
.then(function(message) {
// lets tell IFTTT to post the message
return axios.get(`https://maker.ifttt.com/trigger/incoming_slack_message/with/key/${IFTTT_MAKER_KEY}`, {
params: {
'value1': 'Matt wants to say:',
'value2': message,
},
});
})
.then(function(response) {
console.log(response.data);
// the request was successful!
res.json({
title: 'Successfully Posted to Slack',
text: 'You\'ve just hooked up Googiri, wit.ai, IFTTT, and Slack!',
});
})
.catch(function(err) {
console.error(err);
res.send('OH NOES');
});
});
let server = app.listen(8000, function () {
console.log(`listening on localhost:${server.address().port}`);
});
|
<reponame>tdm1223/Algorithm<gh_stars>1-10
// TwoSum
// 2021.10.21
// Easy
class Solution
{
public:
vector<int> twoSum(vector<int>& nums, int target)
{
unordered_map<int, int> maps;
for (int i = 0; i < nums.size(); i++)
{
if (maps.find(target - nums[i]) != maps.end())
{
return { maps[target - nums[i]], i };
}
else
{
maps[nums[i]] = i;
}
}
return {};
}
};
|
<gh_stars>0
class TechDeals::Scraper
def self.scrape_items(url)
#Scrapes each item on selected page and returns the list of items as an array of objects
doc = Nokogiri::HTML(open("https://www.dealsource.tech#{url}"))
items = []
doc.css('div.summary-item').each{|item|
deal = TechDeals::Items.new
deal.name = item.css('div.summary-title a').text
deal.url = item.css('div.summary-title a').attribute('href').value
deal.fullPrice = item.css('div.summary-excerpt p').text.split(" ")[1]
deal.salePrice = item.css('div.summary-excerpt p').text.split(" ")[3]
hold = item.css('div.summary-excerpt p strong').text.split(" ")
hold.shift
deal.promo = hold.join(" ").gsub(/[()]/, "")
items << deal
}
items
end
def self.scrape_categories
doc = Nokogiri::HTML(open('https://www.dealsource.tech/deals-1'))
categories = []
doc.css('a.sqs-block-button-element--medium').each{|category|
cats = TechDeals::Categories.new
cats.name = category.text
cats.url = category.attribute('href').value.to_s
categories << cats}
categories
end
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.