text stringlengths 1 1.05M |
|---|
const config = require('config');
const validate = require('../middleware/validate');
const _ = require('lodash');
const Joi = require('joi');
const jwt = require('jsonwebtoken');
const PasswordComplexity = require('joi-password-complexity');
const bcrypt = require('bcrypt');
const express = require('express');
const mongoose = require('mongoose');
const { User } = require('../models/user');
const authRouter = express.Router();
authRouter.use(express.json());
authRouter.post('/', validate(validateUser), async (req, res) => {
let user = await User.findOne({ email: req.body.email });
if(!user) return res.status(400).send('Invalid email or password.');
const valid = await bcrypt.compare(req.body.password, user.password);
if(!valid) return res.status(400).send('Invalid email or password.');
const token = user.generateAuthToken();
res.send(token);
})
function validateUser(user) {
const schema = Joi.object({
email: Joi.string().min(0).max(255).required().email(),
password: new PasswordComplexity({
min: 8,
max: 25,
lowerCase: 1,
upperCase: 1,
numeric: 1,
symbol: 1,
requirementCount: 4
})
})
return schema.validate(user)
}
module.exports = authRouter; |
class Vector3D:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def magnitude(self):
return (self.x**2 + self.y**2 + self.z**2)**(1/2) |
def printTable(array):
# find the longest string in each of the inner lists
# so we can format the output
col_width = [max(len(str(x)) for x in col) for col in array]
for row in array:
print(" ".join(str(x).ljust(col_width[i]) for i, x in enumerate(row)))
array = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
printTable(array)
# Output:
# 1 2 3
# 4 5 6
# 7 8 9 |
#!/usr/bin/env sh
docker build -t lelkaklel/docker-airflow:latest --no-cache .
docker push lelkaklel/docker-airflow:latest
docker stack deploy -c docker-compose.yml data_works_cloud
|
package com.infamous.zod.media.streaming.controller;
import javax.ws.rs.core.Response;
public interface MediaStreamingController {
Response view(String fileId, String range);
}
|
import User from "../model/IUser";
import axios from "axios";
import IProject from "../model/IProject";
import IQuerySorting from "../model/IQuerySorting";
import Constants from '../constants';
export default async function getAllProjectsWithTimeSpent(
myUser: User | undefined, querySorting: IQuerySorting | undefined): Promise<IProject[]> {
try {
const endPoint = `${Constants.BASE_URL}/projects/user`;
let userId = myUser !== undefined && myUser.id > 0 ? myUser.id : 0;
let filter = querySorting !== undefined && querySorting.filterString !== undefined && querySorting.filterString !== ''
? querySorting.filterString : 'id';
let isAscSorting = querySorting !== undefined && querySorting.filterString !== undefined && querySorting.filterString !== ''
? querySorting.isAscSorting.toString() : 'true';
const params = new URLSearchParams([
['userId', userId.toString()],
['filterString', filter],
['isAscSorting', isAscSorting]
]);
const response = await axios.get<IProject[]>(endPoint, { params });
return Promise.resolve(response.data);
} catch (error) {
console.error("Error occured when getting project from GetAllProjectsWithTimeSpent");
return Promise.reject(error);
}
};
export async function getAllProjectsWithName(projectName: string): Promise<IProject[]> {
try {
const endPoint = `${Constants.BASE_URL}/projects/name`;
const params = new URLSearchParams([
['projectName', projectName]
]);
const response = await axios.get<IProject[]>(endPoint, { params });
return Promise.resolve(response.data);
} catch (error) {
console.error("Error occured when getting project from getAllProjectsWithName");
return Promise.reject(error);
}
};
|
#!/bin/bash
terraform $1 -var "region=$2" -state "state/$2.tfstate"
|
import { Action, ActionType } from '../actions/app'
import { initialState, State } from '../states/app'
type AlertType = 'success' | 'danger'
const createAlert = (type: AlertType, message: string) => ({ type, message })
export function appReducer(state: State = initialState, action: Action) {
switch (action.type) {
case ActionType.API_DDOS_DETECTION_REQUEST:
case ActionType.API_FILTER_REQUEST:
case ActionType.API_FLOW_RATE_REQUEST:
case ActionType.API_PACKET_INTERVAL_REQUEST:
case ActionType.API_STATISTIC_REQUEST:
return { ...state, isChartOpen: false, isLoading: true }
case ActionType.API_DDOS_DETECTION_SUCCESS:
return state.isLoading
? { ...state, isChartOpen: true, isLoading: false, ddosData: action.payload.data }
: state
case ActionType.API_FILTER_SUCCESS:
const alertType = action.payload.data.result ? 'success' : 'danger'
return {
...state,
isChartOpen: false,
isLoading: false,
alert: createAlert(alertType, action.payload.data.message),
}
case ActionType.API_FLOW_RATE_SUCCESS:
return state.isLoading
? { ...state, isChartOpen: true, isLoading: false, flowData: action.payload.data }
: state
case ActionType.API_PACKET_INTERVAL_SUCCESS:
return state.isLoading
? { ...state, isChartOpen: true, isLoading: false, intervalData: action.payload.data }
: state
case ActionType.API_STATISTIC_SUCCESS:
return state.isLoading
? { ...state, isChartOpen: true, isLoading: false, statisticData: action.payload.data }
: state
case ActionType.API_DDOS_DETECTION_FAILURE:
case ActionType.API_FILTER_FAILURE:
case ActionType.API_FLOW_RATE_FAILURE:
case ActionType.API_PACKET_INTERVAL_FAILURE:
case ActionType.API_STATISTIC_FAILURE:
return {
...state,
isChartOpen: false,
isLoading: false,
alert: createAlert('danger', action.payload.error),
}
case ActionType.APP_CHANGE_MAIN_NAV:
return { ...state, isChartOpen: false, isLoading: false, alert: undefined }
case ActionType.APP_CLOSE_ALERT:
return { ...state, alert: undefined }
case ActionType.APP_CLOSE_LOADING_MODAL:
return { ...state, isLoading: false }
default:
return state
}
}
|
<gh_stars>1-10
/* THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. */
export { default as Belgium } from './Belgium';
export { default as Brazil } from './Brazil';
export { default as Bulgaria } from './Bulgaria';
export { default as Canada } from './Canada';
export { default as China } from './China';
export { default as Czech } from './Czech';
export { default as Denmark } from './Denmark';
export { default as EnGlobal } from './EnGlobal';
export { default as Estonia } from './Estonia';
export { default as Eu } from './Eu';
export { default as Finland } from './Finland';
export { default as France } from './France';
export { default as Germany } from './Germany';
export { default as Hungary } from './Hungary';
export { default as Italy } from './Italy';
export { default as Japan } from './Japan';
export { default as Korea } from './Korea';
export { default as Latvia } from './Latvia';
export { default as Lithuania } from './Lithuania';
export { default as Netherlands } from './Netherlands';
export { default as Norway } from './Norway';
export { default as Poland } from './Poland';
export { default as Romania } from './Romania';
export { default as Russia } from './Russia';
export { default as Slovenia } from './Slovenia';
export { default as Spain } from './Spain';
export { default as Sweden } from './Sweden';
export { default as Turkey } from './Turkey';
export { default as Uk } from './Uk';
export { default as Usa } from './Usa'; |
#awk 'BEGIN{f="header.ssa"}{if ($0 ~ /^func /) {f=substr($0,6,match($0,/\(/)-6)}}!/^#/{gsub(/\t/, " "); print substr($0,1,56)>>f}' ../ssa.ssa
awk 'BEGIN{f="header.ssa"}{if ($0 ~ /^func /) {f=substr($0,6,match($0,/\(/)-6)}}!/^#/{S=substr($0,1,56); sub(/[ ]+$/,"",S); print S>>f}' ../ssa.ssa
#for ff in `ls -1 ssa2 | grep -v 'header\|init'`; do for ii in `ls -1 ssa/main.${ff}.*.ssa`; do echo kompare ${ii} ssa2/${ff} \&\> /dev/null; done; done
|
<filename>homeassistant/components/sensor/geo_rss_events.py
"""
Generic GeoRSS events service.
Retrieves current events (typically incidents or alerts) in GeoRSS format, and
shows information on events filtered by distance to the HA instance's location
and grouped by category.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.geo_rss_events/
"""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_UNIT_OF_MEASUREMENT, CONF_NAME,
CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_URL)
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['georss_client==0.5']
_LOGGER = logging.getLogger(__name__)
ATTR_CATEGORY = 'category'
ATTR_DISTANCE = 'distance'
ATTR_TITLE = 'title'
CONF_CATEGORIES = 'categories'
DEFAULT_ICON = 'mdi:alert'
DEFAULT_NAME = "Event Service"
DEFAULT_RADIUS_IN_KM = 20.0
DEFAULT_UNIT_OF_MEASUREMENT = 'Events'
DOMAIN = 'geo_rss_events'
SCAN_INTERVAL = timedelta(minutes=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_RADIUS, default=DEFAULT_RADIUS_IN_KM): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_CATEGORIES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_UNIT_OF_MEASUREMENT,
default=DEFAULT_UNIT_OF_MEASUREMENT): cv.string,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the GeoRSS component."""
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
url = config.get(CONF_URL)
radius_in_km = config.get(CONF_RADIUS)
name = config.get(CONF_NAME)
categories = config.get(CONF_CATEGORIES)
unit_of_measurement = config.get(CONF_UNIT_OF_MEASUREMENT)
_LOGGER.debug("latitude=%s, longitude=%s, url=%s, radius=%s",
latitude, longitude, url, radius_in_km)
# Create all sensors based on categories.
devices = []
if not categories:
device = GeoRssServiceSensor((latitude, longitude), url,
radius_in_km, None, name,
unit_of_measurement)
devices.append(device)
else:
for category in categories:
device = GeoRssServiceSensor((latitude, longitude), url,
radius_in_km, category, name,
unit_of_measurement)
devices.append(device)
add_entities(devices, True)
class GeoRssServiceSensor(Entity):
"""Representation of a Sensor."""
def __init__(self, coordinates, url, radius, category, service_name,
unit_of_measurement):
"""Initialize the sensor."""
self._category = category
self._service_name = service_name
self._state = None
self._state_attributes = None
self._unit_of_measurement = unit_of_measurement
from georss_client.generic_feed import GenericFeed
self._feed = GenericFeed(coordinates, url, filter_radius=radius,
filter_categories=None if not category
else [category])
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self._service_name,
'Any' if self._category is None
else self._category)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the default icon to use in the frontend."""
return DEFAULT_ICON
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._state_attributes
def update(self):
"""Update this sensor from the GeoRSS service."""
import georss_client
status, feed_entries = self._feed.update()
if status == georss_client.UPDATE_OK:
_LOGGER.debug("Adding events to sensor %s: %s", self.entity_id,
feed_entries)
self._state = len(feed_entries)
# And now compute the attributes from the filtered events.
matrix = {}
for entry in feed_entries:
matrix[entry.title] = '{:.0f}km'.format(
entry.distance_to_home)
self._state_attributes = matrix
elif status == georss_client.UPDATE_OK_NO_DATA:
_LOGGER.debug("Update successful, but no data received from %s",
self._feed)
# Don't change the state or state attributes.
else:
_LOGGER.warning("Update not successful, no data received from %s",
self._feed)
# If no events were found due to an error then just set state to
# zero.
self._state = 0
self._state_attributes = {}
|
package edu.unitn.pbam.androidproject.utilities;
import android.app.Application;
import android.content.Context;
import edu.unitn.pbam.androidproject.model.dao.BookDao;
import edu.unitn.pbam.androidproject.model.dao.CategoryDao;
import edu.unitn.pbam.androidproject.model.dao.DListDao;
import edu.unitn.pbam.androidproject.model.dao.MovieDao;
import edu.unitn.pbam.androidproject.model.dao.db.BookDaoDb;
import edu.unitn.pbam.androidproject.model.dao.db.CategoryDaoDb;
import edu.unitn.pbam.androidproject.model.dao.db.DListDaoDb;
import edu.unitn.pbam.androidproject.model.dao.db.MovieDaoDb;
public class App extends Application {
private static Context context;
public static MovieDao mDao;
public static BookDao bDao;
public static CategoryDao cDao;
public static DListDao dlDao;
@Override
public void onCreate() {
super.onCreate();
context = getApplicationContext();
mDao = new MovieDaoDb();
bDao = new BookDaoDb();
cDao = new CategoryDaoDb();
dlDao = new DListDaoDb();
}
public static Context getAppContext() {
return context;
}
}
|
<reponame>piscis/piscis-npm-dummy-pkg
console.log('Hello NPM'); |
SIZES="16 24 32 48"
for A in $2; do
for B in $SIZES; do
inkscape $1 --export-id=id_$A --export-png=../$B/$A.png -w $B -h $B
done
done |
<gh_stars>1000+
"""
categories: Types,bytearray
description: Array slice assignment with unsupported RHS
cause: Unknown
workaround: Unknown
"""
b = bytearray(4)
b[0:1] = [1, 2]
print(b)
|
<reponame>lbovolini/ordena-comprime-enad
/* !TODO
* - Inserir arvore no arquivo comprimido
* - Descomprimir
* - Dividir em arquivos
* - Comentar
*/
#include <stdio.h>
#include <stdlib.h>
#define BIT_MAIS_BAIXO 1
typedef struct code {
int *code;
char caracter;
int tam;
} bin_code;
typedef struct lista {
char caracter;
int cont;
struct lista *prox,
*esq,
*dir;
} st_lista;
typedef struct descritor_lista {
int n;
st_lista *prim;
} descritor_lista;
int CODE_MAX_SIZE = 0;
bin_code *codigos_bin;
int codigos_index = 0;
// Cria descritor lista e retorna
descritor_lista *cria_descritor_lista(void)
{
descritor_lista *descritor = NULL;
if(!(descritor = (descritor_lista*)malloc(sizeof(descritor_lista))))
{
printf("Falta de memoria!\n");
exit(1);
}
descritor->n = 0;
descritor->prim = NULL;
return descritor;
}
// Cria lista e retorna
st_lista *cria_lista(char caracter)
{
st_lista *nova = NULL;
if(!(nova = (st_lista*)malloc(sizeof(st_lista))))
{
printf("Falta de memoria!\n");
exit(EXIT_FAILURE);
}
nova->caracter = caracter;
nova->cont = 1;
nova->prox = NULL;
nova->esq = NULL;
nova->dir = NULL;
return nova;
}
/* Busca caracter na lista, se encontrado
retorna sua posicao, se nao retorna NULL */
st_lista *busca_caracter(descritor_lista *descritor, char caracter)
{
st_lista *percorrer = NULL;
percorrer = descritor->prim;
if(!percorrer) return NULL;
while(percorrer) {
if(percorrer->caracter == caracter)
return percorrer;
percorrer = percorrer->prox;
}
// nao encontrado
return NULL;
}
// Insere na lista
void *insere_lista(descritor_lista *descritor, char caracter)
{
st_lista *existe = NULL;
if(!descritor) return NULL;
// Busca caracter na lista
existe = busca_caracter(descritor, caracter);
if(!existe) {
st_lista *lista = NULL,
*percorrer = NULL;
lista = cria_lista(caracter);
// Lista vazia
if(descritor->prim == NULL) {
descritor->prim = lista;
}
else {
// Insere no fim
percorrer = descritor->prim;
while(percorrer->prox)
percorrer = percorrer->prox;
percorrer->prox = lista;
}
descritor->n++;
}
else {
existe->cont++;
}
return NULL;
}
// Remove da lista
st_lista *remove_lista(descritor_lista *descritor)
{
if(!descritor || !descritor->prim) return NULL;
st_lista *percorrer = NULL;
percorrer = descritor->prim;
// remove do inicio
descritor->prim = percorrer->prox;
percorrer->prox = NULL;
return percorrer;
}
/* Ordena a lista por InsertionSort
https://en.wikipedia.org/wiki/Insertion_sort */
st_lista *ordena_lista(descritor_lista *descritor)
{
st_lista *pList = descritor->prim;
// zero or one element in list
if(pList == NULL || pList->prox == NULL)
return pList;
// head is the first element of resulting sorted list
st_lista * head = NULL;
while(pList != NULL)
{
st_lista * atual = pList;
pList = pList->prox;
if(head == NULL || atual->cont < head->cont) {
// insert into the head of the sorted list
// or as the first element into an empty sorted list
atual->prox = head;
head = atual;
}
else {
// insert current element into proper position in non-empty sorted list
st_lista *p = head;
while(p != NULL)
{
if(p->prox == NULL || // last element of the sorted list
atual->cont < p->prox->cont) // middle of the list
{
// insert into middle of the sorted list or as the last element
atual->prox = p->prox;
p->prox = atual;
break; // done
}
p = p->prox;
}
}
}
descritor->prim = head;
return head;
}
// Imprime lista
void imprime_lista(descritor_lista *descritor)
{
if(!descritor || !descritor->prim) return;
st_lista *percorrer = NULL;
percorrer = descritor->prim;
while(percorrer) {
printf("%c ", percorrer->caracter);
printf("%d\n", percorrer->cont);
percorrer = percorrer->prox;
}
}
// Insere ordenado na lista
void *insere_ordenado_lista(descritor_lista *descritor, st_lista *lista)
{
st_lista *percorrer = NULL;
if(!descritor) return NULL;
// Lista vazia
if(!descritor->prim || descritor->prim->cont >= lista->cont) {
lista->prox = descritor->prim;
descritor->prim = lista;
return NULL;
}
percorrer = descritor->prim;
while(percorrer->prox != NULL &&
percorrer->prox->cont < lista->cont)
{
percorrer = percorrer->prox;
}
lista->prox = percorrer->prox;
percorrer->prox = lista;
return NULL;
}
// Gera arvore de Huffman
void *gera_arvore(descritor_lista *descritor)
{
st_lista *esq = NULL,
*dir = NULL,
*novo = NULL;
if(!descritor || !descritor->prim) return NULL;
while(descritor->prim->prox != NULL) {
esq = remove_lista(descritor);
dir = remove_lista(descritor);
if(!esq || !dir) {
printf("Erro gera_arvore\n");
exit(EXIT_FAILURE);
}
novo = cria_lista('\0');
novo->cont = esq->cont + dir->cont;
novo->esq = esq;
novo->dir = dir;
insere_ordenado_lista(descritor, novo);
}
return NULL;
}
// Imprime arvore em ordem
void imprime_arvore(st_lista *raiz)
{
st_lista *pRaiz = raiz;
if(pRaiz != NULL) {
imprime_arvore(pRaiz->esq);
printf("%c - %d\n", pRaiz->caracter, pRaiz->cont);
imprime_arvore(pRaiz->dir);
}
}
//
void imprime_vetor(int arr[], int n)
{
int i = 0;
for (i = 0; i < n; i++)
printf("%d", arr[i]);
printf("\n");
}
// Verifica se o no eh folha
int folha(st_lista *raiz)
{
return !(raiz->esq) && !(raiz->dir) ;
}
void arrCpy(int src[], int dest[], int size)
{
int i = 0;
for(i = 0; i < size; i++)
{
dest[i] = src[i];
}
}
// Aloca vetor de inteiros
int *aloca_vetor(int n)
{
return (int*) malloc(n * sizeof(int));
}
// Aloca vetor de codigos binarios
bin_code *aloca_bin(int n)
{
return (bin_code*) malloc(n * sizeof(bin_code));
}
// Prints huffman codes from the root of Huffman Tree. It uses arr[] to
// store codes
void imprime_codigos(st_lista *raiz, int arr[], int top)
{
// Assign 0 to left edge and recur
if(raiz->esq) {
arr[top] = 0;
imprime_codigos(raiz->esq, arr, top + 1);
}
// Assign 1 to right edge and recur
if(raiz->dir) {
arr[top] = 1;
imprime_codigos(raiz->dir, arr, top + 1);
}
// If this is a leaf node, then it contains one of the input
// characters, print the character and its code from arr[]
if (folha(raiz)) {
printf("%d: ", raiz->caracter);
imprime_vetor(arr, top);
codigos_bin[codigos_index].caracter = raiz->caracter;
codigos_bin[codigos_index].code = aloca_vetor(top);
codigos_bin[codigos_index].tam = top;
arrCpy(arr, codigos_bin[codigos_index].code, top);
codigos_index++;
}
}
// Retorna indice do codigo correspondente ao caracter
int get_code_index(char caracter)
{
int i = 0;
for (i = 0; i < CODE_MAX_SIZE; i++)
{
if(codigos_bin[i].caracter == caracter)
return i;
}
return - 1;
}
// Main
int main()
{
int i, j;
long lSize;
char *buffer;
size_t result;
FILE *arquivo = NULL,
*arquivo_saida = NULL;
descritor_lista *descritor = NULL;
arquivo = fopen("microdados_enade_2014_CCSI.csv", "r");
if(!arquivo) {
fputs("File error", stderr);
exit(EXIT_FAILURE);
}
// obtem tamanho do arquivo
fseek(arquivo , 0 , SEEK_END);
lSize = ftell(arquivo);
rewind(arquivo);
// aloca memoria para todo o arquivo
buffer = (char*) malloc(sizeof(char) * lSize);
if(buffer == NULL) {
fputs("Memory error", stderr);
exit(EXIT_FAILURE);
}
// copia o arquivo para o buffer
result = fread(buffer, 1, lSize, arquivo);
if(result != lSize) {
fputs("Reading error", stderr);
exit (EXIT_FAILURE);
}
// fecha arquivo
fclose (arquivo);
descritor = cria_descritor_lista();
for(int i = 0; i <= lSize; i++)
{
insere_lista(descritor, *(buffer + i));
}
ordena_lista(descritor);
imprime_lista(descritor);
CODE_MAX_SIZE = descritor->n;
codigos_bin = aloca_bin(CODE_MAX_SIZE);
gera_arvore(descritor);
int arr[CODE_MAX_SIZE];
imprime_codigos(descritor->prim, arr, 0);
/* for(i = 0; i < CODE_MAX_SIZE; ++i)
{
printf("!%d: ", codigos_bin[i].caracter);
for(j = 0; j < codigos_bin[i].tam; j++)
{
printf("%d", codigos_bin[i].code[j]);
}
printf("\n");
}
*/
arquivo_saida = fopen("saida.bin", "w+b");
unsigned int saida = 0, mascara, aux;
unsigned int numBits;
int cont, total = 0;
for(i = 0; i <= lSize; i++)
{
int index = get_code_index(buffer[i]);
for(j = 0; j < codigos_bin[index].tam; j++)
{
char entrada = codigos_bin[index].code[j];
numBits = 1;
//printf("entrada: %x saida: %x\n", entrada, saida);
for(; numBits >= 1 && total < 32; total++) {
saida = saida << 1;
for(cont = 1, mascara = BIT_MAIS_BAIXO; cont < numBits; cont++)
mascara = mascara << 1;
aux = entrada & mascara;
for(cont = 1; cont < numBits; cont++)
aux = aux >> 1;
saida = saida | aux;
//printf("entrada: %x saida: %x\n", entrada, saida);
numBits--;
}
if(total == 32) {
//done
fwrite(&saida, sizeof(unsigned int), 1, arquivo_saida);
if(ferror(arquivo_saida)) {
perror(__func__);
exit(EXIT_FAILURE);
}
saida = 0;
total = 0;
}
}
}
if(total != 0) {
int desl = 32 - total;
total = total << desl;
//done
fwrite(&saida, sizeof(unsigned int), 1, arquivo_saida);
if(ferror(arquivo_saida)) {
perror(__func__);
exit(EXIT_FAILURE);
}
}
// hexdump -C saida.bin
// http://stackoverflow.com/questions/759707/efficient-way-of-storing-huffman-tree
fclose(arquivo_saida);
free (buffer);
return EXIT_SUCCESS;
} |
<gh_stars>0
class Config:
TOKEN = "xxxxx" #get bot token from https://discord.com/developers/ -> new application -> bot _> token |
<gh_stars>0
import {gl} from '../../globals';
abstract class Drawable {
count: number = 0;
bufIdx: WebGLBuffer;
bufPos: WebGLBuffer;
bufNor: WebGLBuffer;
idxBound: boolean = false;
posBound: boolean = false;
norBound: boolean = false;
abstract create() : void;
destory() {
gl.deleteBuffer(this.bufIdx);
gl.deleteBuffer(this.bufPos);
gl.deleteBuffer(this.bufNor);
}
generateIdx() {
this.idxBound = true;
this.bufIdx = gl.createBuffer();
}
generatePos() {
this.posBound = true;
this.bufPos = gl.createBuffer();
}
generateNor() {
this.norBound = true;
this.bufNor = gl.createBuffer();
}
bindIdx(): boolean {
if (this.idxBound) {
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, this.bufIdx);
}
return this.idxBound;
}
bindPos(): boolean {
if (this.posBound) {
gl.bindBuffer(gl.ARRAY_BUFFER, this.bufPos);
}
return this.posBound;
}
bindNor(): boolean {
if (this.norBound) {
gl.bindBuffer(gl.ARRAY_BUFFER, this.bufNor);
}
return this.norBound;
}
elemCount(): number {
return this.count;
}
drawMode(): GLenum {
return gl.TRIANGLES;
}
};
export default Drawable;
|
/*=============================================================================
Copyright (c) 2001-2010 <NAME>
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
=============================================================================*/
#include <boost/spirit/include/qi_eps.hpp>
#include <boost/spirit/include/qi_lazy.hpp>
#include <boost/spirit/include/qi_not_predicate.hpp>
#include <boost/phoenix/core.hpp>
#include <iostream>
#include "test.hpp"
int
main()
{
using spirit_test::test;
using boost::spirit::eps;
{
BOOST_TEST((test("", eps)));
BOOST_TEST((test("xxx", eps, false)));
BOOST_TEST((!test("", !eps))); // not predicate
}
{ // test non-lazy semantic predicate
BOOST_TEST((test("", eps(true))));
BOOST_TEST((!test("", eps(false))));
BOOST_TEST((test("", !eps(false)))); // not predicate
}
{ // test lazy semantic predicate
using boost::phoenix::val;
BOOST_TEST((test("", eps(val(true)))));
BOOST_TEST((!test("", eps(val(false)))));
BOOST_TEST((test("", !eps(val(false))))); // not predicate
}
return boost::report_errors();
}
|
def Fibonacci(a):
if a<0:
print("Incorrect input")
# First Fibonacci number is 0
elif a==1:
return 0
# Second Fibonacci number is 1
elif a==2:
return 1
else:
return Fibonacci(a-1)+Fibonacci(a-2)
# Driver Program
limit = int(input('Enter the limit of fibonacci series:'))
for i in range(1,limit+1):
print(Fibonacci(i)) |
<filename>molicode-common/src/main/java/com/shareyi/molicode/common/chain/DefaultHandlerChain.java
package com.shareyi.molicode.common.chain;
import com.shareyi.molicode.common.chain.handler.Handler;
import java.util.List;
public class DefaultHandlerChain<T> implements HandlerChain<T> {
private int index = 0;
private final List<? extends Handler<T>> handlers;
public DefaultHandlerChain(List<? extends Handler<T>> handlers) {
this.handlers = handlers;
}
public void handle(T t) throws Exception {
Handler<T> next = this.getNext();
if (next != null) {
next.handle(t, this);
}
}
protected Handler<T> getNext() {
return this.handlers != null && this.index < this.handlers.size() ? (Handler)this.handlers.get(this.index++) : null;
}
}
|
# coding=UTF-8
class InterviewPanel(object):
@staticmethod
def HomePanel():
"""
:return:
"""
import easygui
easygui.fileopenbox('打开配置文件csv')
class InterviewDocker(object):
account_sid = {
# SMTP邮箱
'username': '',
# SMTP验证码
'sid': '',
}
def __init__(self,
# SMTP账号信息,填写范式如上
account_sid: dict,
# 你的公司名/组织名,部门名,职位,姓名
org='《海大校友》杂志社',
dep='技术部',
position='部长',
I_am='张亦先',
# 通知【通过面试】或【被录用的】面试者下一步工作的截止时间,既该参数不会在向【被淘汰的】面试者发送的邮件中启用
ddl: str = '10月10日(周六)晚18:00',
# 公司/组织 XX数据库名称简写,用于向【被淘汰的】面试者发送的邮件正文编写,以及smtp邮箱设置
zone='「海南大学」人才库',
# 官方邮箱
official_mailbox='<EMAIL>',
# 公司/组织 网站域名
website_domain='yao.qinse.top',
# 加群分享链接,此项不可为空!
official_group_link='',
# 落款
SIGN='海大校友办公室|Alkaid 团队',
# 邮件主题
header='《海大校友》杂志社面试通知',
**kwargs):
self.err = kwargs.get('err')
# -----------------------------------------
# 邮件内容设置
# -----------------------------------------
# 你的公司名/组织名,部门名,职位,姓名 以及 正文称呼
self.org, self.dep, self.position, self.I_am = org, dep, position, I_am
self.I_AM = self.org + self.dep + self.position + self.I_am
# 邮件大标题 |《海大校友》杂志社 面试通知
self.TITLE = self.org + '面试通知'
# 通知【通过面试】或【被录用的】面试者下一步工作的截止时间,既该参数不会在向【被淘汰的】面试者发送的邮件中启用
self.ddl = ddl
# 公司/组织 XX数据库名称简写,用于向【被淘汰的】面试者发送的邮件正文编写,以及smtp邮箱设置
self.zone = zone
# 官方邮箱,
self.official_mailbox: str = official_mailbox
# 公司/组织 网站域名
self.website_domain = website_domain
# 加群分享链接
self.official_group_link: str = official_group_link
# 落款
self.SIGN = SIGN
# -----------------------------------------
# 邮件发送设置
# -----------------------------------------
# 邮件主题
self.header = header
# SMTP SETTING
self.account_sid = {
f'{self.zone}': {
'username': account_sid['username'],
'sid': account_sid['sid'],
},
}
def send_email(self, text_body: str, to_=None):
"""
:param text_body: -> str文本
:param to_: -> 接收者
:return:
"""
from smtplib import SMTP_SSL
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
from smtplib import SMTPDataError
if isinstance(to_, str):
to_ = [to_, ]
#####################################################
sender_name = self.account_sid[f'{self.zone}']['username']
sender_sid = self.account_sid[f'{self.zone}']['sid']
# 连接服务器
server = SMTP_SSL(host='smtp.qq.com')
qq_email = 'smtp.qq.com'
port = 465
#####################################################
# 邮件的正文内容
mail_content = "{}".format(text_body)
# 构建多媒体实例
msg = MIMEMultipart()
msg['Subject'] = Header(self.header)
msg['From'] = sender_name
msg.attach(MIMEText(mail_content, 'html', 'utf-8'))
#####################################################
errorList = [] # 发送失败的object
try:
# 链接服务器
server.connect(host=qq_email, port=port)
# 登陆服务器
server.login(user=sender_name, password=<PASSWORD>)
for receiver in to_:
try:
# 发送邮件:success
server.sendmail(sender_name, receiver, msg.as_string())
print('>>> success to {}'.format(receiver))
except SMTPDataError:
# 发送邮件:retry
errorList.append(receiver)
# server.sendmail(sender_name, sender_name, msg.as_string()) # 备用信道
print('error list append: {}'.format(receiver))
continue
while errorList.__len__() > 0:
to_ = errorList.pop()
try:
server.sendmail(sender_name, to_, msg.as_string())
except SMTPDataError:
print('panic object !!! {}'.format(to_))
finally:
server.quit()
def text_temple(self, to_name: str, temple: str, ):
"""
:param to_name:
:param temple: success,loser,winner
:return:
"""
# 称呼
TO_TAG = f'{to_name}同学,您好!',
# 致谢
TO_END_1 = '收到请回复,谢谢,幸苦了!',
TO_END_2 = '再次感谢您的信任与参与。',
# 正文模版1:录用
BODY_SUCCESS = [
TO_TAG,
f'我是{self.I_AM}。很高兴通知您,您已通过{self.dep}的综合能力测试,我们对您的整体表现非常满意。',
f'现通知您于{self.ddl}前加入{self.dep}工作群,后续通知将于群内发布,期待您的到来。',
TO_END_1
]
# 正文模版2:淘汰
BODY_LOSER = [
TO_TAG,
f'感谢您关注{self.org}校园招新,我们已经收到您提交的考核项目。',
f'很遗憾,结合您的技能熟练度、项目经历等进行综合评估,您与{self.dep}目前的需求仍有差距。',
f'您在考核过程中的应试态度给我们留下了深刻印象,我们会将您的信息保留在{self.zone}中,以便未来有合适的机会再与您联系。',
TO_END_2,
]
# 正文模版3:通过X轮面试
BODY_WINNER = [
TO_TAG,
f'我是{self.I_AM}。很高兴通知您,您已通过{self.dep}的首轮面试,我们对您的整体表现非常满意。',
f'现通知您于{self.ddl}前加入{self.dep}终轮考核群,相关挑战将于群内发布,期待您的到来。',
TO_END_1
]
# v2rayc spider
BODY_V2RAYC = [
TO_TAG,
self.err,
TO_END_1
]
# 模版返回
if temple == 'v2rayc_spider':
top_img = 'https://images.pexels.com/photos/3876430/pexels-photo-3876430.jpeg?auto=compress&cs=tinysrgb&h=750&w=1260'
return self.__txt2html__(top_img, BODY_V2RAYC, temple)
if temple == 'success':
top_img = 'https://images.pexels.com/photos/3876430/pexels-photo-3876430.jpeg?auto=compress&cs=tinysrgb&h=750&w=1260'
return self.__txt2html__(top_img, BODY_SUCCESS, temple)
elif temple == 'loser':
top_img = 'https://images.pexels.com/photos/3651615/pexels-photo-3651615.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500'
return self.__txt2html__(top_img, BODY_LOSER, temple)
elif temple == 'winner':
top_img = 'https://images.pexels.com/photos/3747154/pexels-photo-3747154.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=750&w=1260'
return self.__txt2html__(top_img, BODY_WINNER, temple)
else:
return None
def __txt2html__(self, img_link, body, temple):
"""
:param img_link:
:param body:
:return:
"""
def generate_imgAtlas():
tag = '<img class="gnd-corner-image gnd-corner-image-center gnd-corner-image-top" style="border: 0; display: block; height: auto; width: 100%; max-width: 900px;" src="{}" width="600" border="0px">'
return tag.format(img_link)
def generate_text_title():
tag = '<h1 style="margin-top: 0; margin-bottom: 0; font-style: normal; font-weight: normal; color: #b59859; font-size: 26px; line-height: 34px; font-family: source sans pro,apple sd gothic neo,pt sans,trebuchet ms,sans-serif; text-align: left;"><span class="font-source-sans-pro"><strong><span style="color: #262626;">{}</span></strong></span></h1> '
return tag.format(self.TITLE)
def generate_text_body():
tag = '<p style="margin-top: 20px; margin-bottom: 0; font-family: roboto,tahoma,sans-serif;"><span class="font-roboto"><span style="color: #1b4665;">{}</span></span></p>'
docker = []
for text in body:
if isinstance(text, tuple):
docker.append(tag.format(text[0]))
continue
docker.append(tag.format(text))
return ''.join(docker)
def generate_text_sign():
tag = '<p style="margin-top: 20px; margin-bottom: 0; font-family: roboto,tahoma,sans-serif;"><span class="font-roboto"><br><span style="color: #1b4665;">{}</span></span></p>'
return tag.format(self.SIGN)
def generate_contact_details():
if temple == 'success' or temple == 'winner':
href = self.official_group_link
else:
href = self.website_domain
tag = f"""<span class="font-roboto" style="font-size: 12px; color: #808080;">访问网站:<a href="{href}"
rel="noopener" target="_blank">{self.website_domain}</a>
<br>联系我们:<a href="mailto:{self.official_mailbox}?to=qinse.top%40foxmail.com&biz_type=&
crm_mtn_tracelog_template=2001649455&crm_mtn_tracelog_task_id=e47d2ad7-c107-4106-b332-4ed631556abe&
crm_mtn_tracelog_from_sys=service_wolf-web&crm_mtn_tracelog_log_id=23765736451&
from=teambition%40service.alibaba.com" rel="noopener" target="_blank">{self.official_mailbox}</a></span></p>
"""
return tag
HTML_TEXT_BODY = f"""
<div id="contentDiv" onmouseover="getTop().stopPropagation(event);" onclick="getTop().preSwapLink(event, 'html', 'ZC2813-K3KnnHZmbHcpPiJ90Yb0fa9');" style="position:relative;font-size:14px;height:auto;padding:15px 15px 10px 15px;z-index:1;zoom:1;line-height:1.7;" class="body"> <div id="qm_con_body"><div id="mailContentContainer" class="qmbox qm_con_body_content qqmail_webmail_only" style="">
<p> </p>
<table class="wrapper" style="border-collapse: collapse; table-layout: fixed; min-width: 320px; width: 100%; background-color: #f0eee7;" cellspacing="0" cellpadding="0">
<tbody>
<tr>
<td>
<div>
<div class="preheader" style=" margin: 0 auto; max-width: 560px; min-width: 280px; ">
<div style="border-collapse: collapse; display: table; width: 100%;">
<div class="snippet" style=" display: table-cell; float: left; font-size: 12px; line-height: 19px; max-width: 280px; min-width: 140px; padding: 10px 0 5px 0; color: #b3b3b3; font-family: PT Sans,Trebuchet MS,sans-serif; ">
</div>
<div class="webversion" style=" display: table-cell; float: left; font-size: 12px; line-height: 19px; max-width: 280px; min-width: 139px; padding: 10px 0 5px 0; text-align: right; color: #b3b3b3; font-family: PT Sans,Trebuchet MS,sans-serif; ">
</div>
</div>
</div>
</div>
<div>
<div class="layout one-col fixed-width stack" style=" margin: 0 auto; max-width: 600px; min-width: 320px; overflow-wrap: break-word; word-wrap: break-word; word-break: break-word; ">
<div class="layout__inner" style="border-collapse: collapse; display: table; width: 100%; background-color: #ffffff;">
<div class="column" style="text-align: left; color: #61606c; font-size: 16px; line-height: 24px; font-family: PT Serif,Georgia,serif;">
<div style="font-size: 12px; font-style: normal; font-weight: normal; line-height: 19px;" align="center">
{generate_imgAtlas()}
</div>
<div style="margin-left: 20px; margin-right: 20px; margin-top: 20px;">
<div style="mso-line-height-rule: exactly; line-height: 20px; font-size: 1px;">
</div>
</div>
<div style="margin-left: 20px; margin-right: 20px;">
<div style="mso-line-height-rule: exactly; mso-text-raise: 11px; vertical-align: middle; padding: 30px;">
{generate_text_title()}
<p style="margin-top: 20px; margin-bottom: 0;"> </p>
{generate_text_body()}
<p style="margin-top: 20px; margin-bottom: 0; font-family: roboto,tahoma,sans-serif;"> </p>
{generate_text_sign()}
</div>
</div>
</div>
</div>
</div>
</div>
<div>
<div class="layout one-col fixed-width stack" style=" margin: 0 auto; max-width: 600px; min-width: 320px; overflow-wrap: break-word; word-wrap: break-word; word-break: break-word; ">
<div class="layout__inner" style="border-collapse: collapse; display: table; width: 100%; background-color: #ffffff;">
<div class="column" style="text-align: left; color: #61606c; font-size: 16px; line-height: 24px; font-family: PT Serif,Georgia,serif;">
<div style="margin-left: 20px; margin-right: 20px;">
<div style="mso-line-height-rule: exactly; mso-text-raise: 11px; vertical-align: middle; padding: 30px;">
<p style="margin-top: 20px; margin-bottom: 0; font-family: roboto,tahoma,sans-serif;">
{generate_contact_details()}
</div>
</div>
</div>
</div>
</div>
<div style="mso-line-height-rule: exactly; line-height: 30px; font-size: 30px;">
</div>
</div> </td>
</tr>
</tbody>
</table>
</div>"""
return HTML_TEXT_BODY
def do_senderEne(self, name_list: dict, func: str):
"""
:param name_list:
:param func: loser、winner、success
:return:
"""
for info in name_list.items():
self.send_email(
text_body=self.text_temple(to_name=info[0], temple=func),
to_=info[-1]
)
err_warning = ''
def prepare(err: str, func_name: str):
"""仅用于报错填充"""
global err_warning
from datetime import datetime
now_ = str(datetime.now()).split('.')[0]
err_warning = f'>>> {now_}||{func_name}||{err}'
def run():
"""群发"""
beta_smtp = {
# SMTP邮箱
'username': '<EMAIL>',
# SMTP验证码
'sid': 'jppbcewcqrdgicec',
}
nameList = {
'GGboy': '<EMAIL>'
}
ivd = InterviewDocker(beta_smtp, err=err_warning)
# v2rayc spider
ivd.do_senderEne(nameList, func='v2rayc_spider')
# 面试失败的邮件
# ivd.do_senderEne(nameList, func='loser', )
# 面试成功的邮件(X轮)
# ivd.do_senderEne(nameList, func='winner')
# 面试录用的邮件
# ivd.do_senderEne(nameList, func='success')
if __name__ == '__main__':
run()
|
<reponame>openspending/subsidystories.eu
'use strict';
var _ = require('lodash');
var Vuex = require('vuex');
var subsidyStories = require('../../services/subsidy-stories');
module.exports = {
template: require('./template.html'),
data: function() {
return {
isLoaded: false,
isAvailable: false,
visualizations: subsidyStories.availableVisualizations
};
},
computed: Vuex.mapState([
'period',
'countryCode'
]),
watch: {
period: function() {
this.refresh();
}
},
methods: _.extend({
getVisualizationUrl: function(visualization) {
return subsidyStories.getVisualizationUrl(visualization,
this.countryCode, this.period);
},
refresh: function() {
var that = this;
that.$data.isLoaded = false;
that.$data.isAvailable = false;
that.getTotalSubsidies().then(function(value) {
that.$data.isAvailable = value > 0;
that.$data.isLoaded = true;
});
}
}, Vuex.mapActions([
'getTotalSubsidies'
])),
created: function() {
this.refresh();
}
};
|
const code = require('./codewriter')
const {writeArithmetic, writePushPop, writeLabel,writeGoto, writeIf, writeFunction, writeReturn , writeCall} = code
const C_ARITHMETIC = 'C_ARITHMETIC';
const C_PUSH = 'C_PUSH';
const C_POP = 'C_POP';
const C_LABEL = 'C_LABEL';
const C_GOTO = 'C_GOTO';
const C_IF = 'C_IF';
const C_FUNCTION = 'C_FUNCTION';
const C_RETURN = 'C_RETURN';
const C_CALL = 'C_CALL';
const arithmeticCommandRegex = /^(add|sub|neg|eq|gt|lt|and|or|not)/i;
const pushCommandRegex = /^push.*/i;
const popCommandRegex = /^pop.*/i;
const labelCommandRegex = /^label .*/i;
const gotoCommandRegex = /^goto .*/i;
const ifCommandRegex = /^if-goto .*/i;
const functionCommandRegex = /^function.*/i;
const returnCommandRegex = /^return.*/i;
const callCommandRegex = /^call.*/i;
const commentRegex = /\/\/.*/i;
function parser(commands, fileName) {
// remove the spaces and comments
commands = commands.map(c => c.replace(/\/\/.*/ig, ''));
commands = commands.map(c => c.trim());
commands = commands.filter(c => !!c);
commands = commands.filter(c => !commentRegex.test(c));
let curCommand = '';
let output = '';
while(curCommand = advance(commands)) {
let cType = commandType(curCommand);
switch(cType) {
case C_ARITHMETIC:
output += writeArithmetic(curCommand);
break;
case C_PUSH:
case C_POP:
let a1 = arg1(curCommand);
let a2 = arg2(curCommand);
output += writePushPop(curCommand, cType, a1, a2, fileName);
break;
case C_LABEL:
output += writeLabel(curCommand);
break;
case C_GOTO:
output += writeGoto(curCommand);
break;
case C_IF:
output += writeIf(curCommand);
break;
case C_FUNCTION:
output += writeFunction(curCommand);
break;
case C_RETURN:
output += writeReturn(curCommand);
break;
case C_CALL:
output += writeCall(curCommand);
break;
default:
continue;
}
}
return output;
}
function hasMoreCommands(commands) {
return commands && commands.length > 0;
}
function advance(commands) {
if (hasMoreCommands(commands)) {
return commands.shift();
}
return '';
}
function commandType(command) {
command = command.trim();
command = command.replace(commentRegex, '');
let type = 'ignoreTypeCommand';
if (arithmeticCommandRegex.test(command)) {
return C_ARITHMETIC;
} else if (pushCommandRegex.test(command)) {
return C_PUSH;
} else if (popCommandRegex.test(command)) {
return C_POP;
} else if (labelCommandRegex.test(command)) {
return C_LABEL;
} else if (gotoCommandRegex.test(command)) {
return C_GOTO;
} else if (ifCommandRegex.test(command)) {
return C_IF;
} else if (functionCommandRegex.test(command)) {
return C_FUNCTION;
} else if (returnCommandRegex.test(command)) {
return C_RETURN;
} else if (callCommandRegex.test(command)) {
return C_CALL;
}
return type;
}
function arg1(command) {
let temps = command.split(' ').map(e => e.trim()).filter(e => !!e);
return temps[1];
}
function arg2(command) {
let temps = command.split(' ').map(e => e.trim()).filter(e => !!e);
return temps[2];
}
module.exports = parser |
<gh_stars>1-10
import { fireEvent, render, screen } from '@testing-library/react';
import { signIn, signOut, useSession } from 'next-auth/react';
import { SignInButton } from '.';
jest.mock('next-auth/react');
describe('SignInButton component', () => {
it('renders correctly when user is not authenticated', () => {
const useSessionMocked = jest.mocked(useSession);
useSessionMocked.mockReturnValueOnce({ data: null, status: 'loading' });
render(<SignInButton />);
expect(screen.getByText('Sign in with GitHub')).toBeInTheDocument();
});
it('renders correctly when user is authenticated', () => {
const useSessionMocked = jest.mocked(useSession);
useSessionMocked.mockReturnValueOnce({
data: {
user: {
name: '<NAME>',
email: '<EMAIL>',
},
expires: 'fake-expires',
},
status: 'authenticated',
});
render(<SignInButton />);
expect(screen.getByText('<NAME>')).toBeInTheDocument();
});
it('redirects user to sign in when not authenticated', () => {
const useSessionMocked = jest.mocked(useSession);
const signInMocked = jest.mocked(signIn);
useSessionMocked.mockReturnValueOnce({ data: null, status: 'loading' });
render(<SignInButton />);
const signInButton = screen.getByText('Sign in with GitHub');
fireEvent.click(signInButton);
expect(signInMocked).toHaveBeenCalled();
});
it('redirects user to sign out when authenticated', () => {
const useSessionMocked = jest.mocked(useSession);
const signOutMocked = jest.mocked(signOut);
useSessionMocked.mockReturnValueOnce({
data: {
user: {
name: '<NAME>',
email: '<EMAIL>',
},
expires: 'fake-expires',
},
status: 'authenticated',
});
render(<SignInButton />);
const signOutButton = screen.getByTitle('closeIcon');
fireEvent.click(signOutButton);
expect(signOutMocked).toHaveBeenCalled();
});
});
|
#!/usr/bin/zsh
# Make sure we have a somewhat reasonable PATH
PATH="/usr/local/bin:/bin:/usr/bin:$PATH"
cd /u/apps/crantastic/current
[[ -s $HOME/.rvm/scripts/rvm ]] && source $HOME/.rvm/scripts/rvm
rvm use 1.9.3-p547@crantastic
RAILS_ENV=production bundle exec rake crantastic:update_taskviews
|
#!/bin/bash
# This script downlaods and builds the Mac, iOS and tvOS libcurl libraries with Bitcode enabled
# Credits:
#
# Felix Schwarz, IOSPIRIT GmbH, @@felix_schwarz.
# https://gist.github.com/c61c0f7d9ab60f53ebb0.git
# Bochun Bai
# https://github.com/sinofool/build-libcurl-ios
# Jason Cox, @jasonacox
# https://github.com/jasonacox/Build-OpenSSL-cURL
# Preston Jennings
# https://github.com/prestonj/Build-OpenSSL-cURL
set -e
# Formatting
default="\033[39m"
wihte="\033[97m"
green="\033[32m"
red="\033[91m"
yellow="\033[33m"
bold="\033[0m${green}\033[1m"
subbold="\033[0m${green}"
archbold="\033[0m${yellow}\033[1m"
normal="${white}\033[0m"
dim="\033[0m${white}\033[2m"
alert="\033[0m${red}\033[1m"
alertdim="\033[0m${red}\033[2m"
# set trap to help debug any build errors
trap 'echo -e "${alert}** ERROR with Build - Check /tmp/curl*.log${alertdim}"; tail -3 /tmp/curl*.log' INT TERM EXIT
CURL_VERSION="curl-7.50.1"
IOS_SDK_VERSION=""
IOS_MIN_SDK_VERSION="7.1"
TVOS_SDK_VERSION=""
TVOS_MIN_SDK_VERSION="9.0"
IPHONEOS_DEPLOYMENT_TARGET="6.0"
nohttp2="0"
usage ()
{
echo
echo -e "${bold}Usage:${normal}"
echo
echo -e " ${subbold}$0${normal} [-v ${dim}<curl version>${normal}] [-s ${dim}<iOS SDK version>${normal}] [-t ${dim}<tvOS SDK version>${normal}] [-i ${dim}<iPhone target version>${normal}] [-b] [-x] [-n] [-h]"
echo
echo " -v version of curl (default $CURL_VERSION)"
echo " -s iOS SDK version (default $IOS_MIN_SDK_VERSION)"
echo " -t tvOS SDK version (default $TVOS_MIN_SDK_VERSION)"
echo " -i iPhone target version (default $IPHONEOS_DEPLOYMENT_TARGET)"
echo " -b compile without bitcode"
echo " -n compile with nghttp2"
echo " -x disable color output"
echo " -h show usage"
echo
trap - INT TERM EXIT
exit 127
}
while getopts "v:s:t:i:nbxh\?" o; do
case "${o}" in
v)
CURL_VERSION="curl-${OPTARG}"
;;
s)
IOS_SDK_VERSION="${OPTARG}"
;;
t)
TVOS_SDK_VERSION="${OPTARG}"
;;
i)
IPHONEOS_DEPLOYMENT_TARGET="${OPTARG}"
;;
n)
nohttp2="1"
;;
b)
NOBITCODE="yes"
;;
x)
bold=""
subbold=""
normal=""
dim=""
alert=""
alertdim=""
archbold=""
;;
*)
usage
;;
esac
done
shift $((OPTIND-1))
OPENSSL="${PWD}/../libressl"
DEVELOPER=`xcode-select -print-path`
# HTTP2 support
if [ $nohttp2 == "1" ]; then
# nghttp2 will be in ../nghttp2/{Platform}/{arch}
NGHTTP2="${PWD}/../nghttp2"
fi
if [ $nohttp2 == "1" ]; then
echo "Building with HTTP2 Support (nghttp2)"
else
echo "Building without HTTP2 Support (nghttp2)"
NGHTTP2CFG=""
NGHTTP2LIB=""
fi
buildMac()
{
ARCH=$1
HOST="x86_64-apple-darwin"
echo -e "${subbold}Building ${CURL_VERSION} for ${archbold}${ARCH}${dim}"
TARGET="darwin-i386-cc"
if [[ $ARCH == "x86_64" ]]; then
TARGET="darwin64-x86_64-cc"
fi
if [ $nohttp2 == "1" ]; then
NGHTTP2CFG="--with-nghttp2=${NGHTTP2}/Mac/${ARCH}"
NGHTTP2LIB="-L${NGHTTP2}/Mac/${ARCH}/lib"
fi
export CC="${BUILD_TOOLS}/usr/bin/clang"
export CFLAGS="-arch ${ARCH} -pipe -Os -gdwarf-2 -fembed-bitcode"
export LDFLAGS="-arch ${ARCH} -L${OPENSSL}/Mac/lib ${NGHTTP2LIB}"
pushd . > /dev/null
cd "${CURL_VERSION}"
./configure -prefix="/tmp/${CURL_VERSION}-${ARCH}" --disable-shared --enable-static -with-random=/dev/urandom --with-ssl=${OPENSSL}/Mac ${NGHTTP2CFG} --host=${HOST} &> "/tmp/${CURL_VERSION}-${ARCH}.log"
make -j8 >> "/tmp/${CURL_VERSION}-${ARCH}.log" 2>&1
make install >> "/tmp/${CURL_VERSION}-${ARCH}.log" 2>&1
# Save curl binary for Mac Version
cp "/tmp/${CURL_VERSION}-${ARCH}/bin/curl" "/tmp/curl"
make clean >> "/tmp/${CURL_VERSION}-${ARCH}.log" 2>&1
popd > /dev/null
}
buildIOS()
{
ARCH=$1
BITCODE=$2
pushd . > /dev/null
cd "${CURL_VERSION}"
if [[ "${ARCH}" == "i386" || "${ARCH}" == "x86_64" ]]; then
PLATFORM="iPhoneSimulator"
else
PLATFORM="iPhoneOS"
fi
if [[ "${BITCODE}" == "nobitcode" ]]; then
CC_BITCODE_FLAG=""
else
CC_BITCODE_FLAG="-fembed-bitcode"
fi
if [ $nohttp2 == "1" ]; then
NGHTTP2CFG="--with-nghttp2=${NGHTTP2}/iOS/${ARCH}"
NGHTTP2LIB="-L${NGHTTP2}/iOS/${ARCH}/lib"
fi
export $PLATFORM
export CROSS_TOP="${DEVELOPER}/Platforms/${PLATFORM}.platform/Developer"
export CROSS_SDK="${PLATFORM}${IOS_SDK_VERSION}.sdk"
export BUILD_TOOLS="${DEVELOPER}"
export CC="${BUILD_TOOLS}/usr/bin/gcc"
export CFLAGS="-arch ${ARCH} -pipe -Os -gdwarf-2 -isysroot ${CROSS_TOP}/SDKs/${CROSS_SDK} -miphoneos-version-min=${IOS_MIN_SDK_VERSION} ${CC_BITCODE_FLAG}"
export LDFLAGS="-arch ${ARCH} -isysroot ${CROSS_TOP}/SDKs/${CROSS_SDK} -L${OPENSSL}/iOS/lib ${NGHTTP2LIB}"
echo -e "${subbold}Building ${CURL_VERSION} for ${PLATFORM} ${IOS_SDK_VERSION} ${archbold}${ARCH}${dim} ${BITCODE}"
if [[ "${ARCH}" == *"arm64"* || "${ARCH}" == "arm64e" ]]; then
./configure -prefix="/tmp/${CURL_VERSION}-iOS-${ARCH}-${BITCODE}" --disable-shared --enable-static -with-random=/dev/urandom --with-ssl=${OPENSSL}/iOS ${NGHTTP2CFG} --host="arm-apple-darwin" &> "/tmp/${CURL_VERSION}-iOS-${ARCH}-${BITCODE}.log"
else
./configure -prefix="/tmp/${CURL_VERSION}-iOS-${ARCH}-${BITCODE}" --disable-shared --enable-static -with-random=/dev/urandom --with-ssl=${OPENSSL}/iOS ${NGHTTP2CFG} --host="${ARCH}-apple-darwin" &> "/tmp/${CURL_VERSION}-iOS-${ARCH}-${BITCODE}.log"
fi
make -j8 >> "/tmp/${CURL_VERSION}-iOS-${ARCH}-${BITCODE}.log" 2>&1
make install >> "/tmp/${CURL_VERSION}-iOS-${ARCH}-${BITCODE}.log" 2>&1
make clean >> "/tmp/${CURL_VERSION}-iOS-${ARCH}-${BITCODE}.log" 2>&1
popd > /dev/null
}
buildTVOS()
{
ARCH=$1
pushd . > /dev/null
cd "${CURL_VERSION}"
if [[ "${ARCH}" == "i386" || "${ARCH}" == "x86_64" ]]; then
PLATFORM="AppleTVSimulator"
else
PLATFORM="AppleTVOS"
fi
if [ $nohttp2 == "1" ]; then
NGHTTP2CFG="--with-nghttp2=${NGHTTP2}/tvOS/${ARCH}"
NGHTTP2LIB="-L${NGHTTP2}/tvOS/${ARCH}/lib"
fi
export $PLATFORM
export CROSS_TOP="${DEVELOPER}/Platforms/${PLATFORM}.platform/Developer"
export CROSS_SDK="${PLATFORM}${TVOS_SDK_VERSION}.sdk"
export BUILD_TOOLS="${DEVELOPER}"
export CC="${BUILD_TOOLS}/usr/bin/gcc"
export CFLAGS="-arch ${ARCH} -pipe -Os -gdwarf-2 -isysroot ${CROSS_TOP}/SDKs/${CROSS_SDK} -mtvos-version-min=${TVOS_MIN_SDK_VERSION} -fembed-bitcode"
export LDFLAGS="-arch ${ARCH} -isysroot ${CROSS_TOP}/SDKs/${CROSS_SDK} -L${OPENSSL}/tvOS/lib ${NGHTTP2LIB}"
# export PKG_CONFIG_PATH
echo -e "${subbold}Building ${CURL_VERSION} for ${PLATFORM} ${TVOS_SDK_VERSION} ${archbold}${ARCH}${dim}"
./configure -prefix="/tmp/${CURL_VERSION}-tvOS-${ARCH}" --host="arm-apple-darwin" --disable-shared -with-random=/dev/urandom --disable-ntlm-wb --with-ssl="${OPENSSL}/tvOS" ${NGHTTP2CFG} &> "/tmp/${CURL_VERSION}-tvOS-${ARCH}.log"
# Patch to not use fork() since it's not available on tvOS
LANG=C sed -i -- 's/define HAVE_FORK 1/define HAVE_FORK 0/' "./lib/curl_config.h"
LANG=C sed -i -- 's/HAVE_FORK"]=" 1"/HAVE_FORK\"]=" 0"/' "config.status"
make -j8 >> "/tmp/${CURL_VERSION}-tvOS-${ARCH}.log" 2>&1
make install >> "/tmp/${CURL_VERSION}-tvOS-${ARCH}.log" 2>&1
make clean >> "/tmp/${CURL_VERSION}-tvOS-${ARCH}.log" 2>&1
popd > /dev/null
}
echo -e "${bold}Cleaning up${dim}"
rm -rf include/curl/* lib/*
mkdir -p lib
mkdir -p include/curl/
rm -rf "/tmp/${CURL_VERSION}-*"
rm -rf "/tmp/${CURL_VERSION}-*.log"
rm -rf "${CURL_VERSION}"
if [ ! -e ${CURL_VERSION}.tar.gz ]; then
echo "Downloading ${CURL_VERSION}.tar.gz"
curl -LO https://curl.haxx.se/download/${CURL_VERSION}.tar.gz
else
echo "Using ${CURL_VERSION}.tar.gz"
fi
echo "Unpacking curl"
tar xfz "${CURL_VERSION}.tar.gz"
echo -e "${bold}Building Mac libraries${dim}"
buildMac "x86_64"
echo " Copying headers"
cp /tmp/${CURL_VERSION}-x86_64/include/curl/* include/curl/
lipo \
"/tmp/${CURL_VERSION}-x86_64/lib/libcurl.a" \
-create -output lib/libcurl_Mac.a
echo -e "${bold}Building iOS libraries (bitcode)${dim}"
buildIOS "armv7" "bitcode"
buildIOS "armv7s" "bitcode"
buildIOS "arm64" "bitcode"
buildIOS "arm64e" "bitcode"
buildIOS "x86_64" "bitcode"
buildIOS "i386" "bitcode"
lipo \
"/tmp/${CURL_VERSION}-iOS-armv7-bitcode/lib/libcurl.a" \
"/tmp/${CURL_VERSION}-iOS-armv7s-bitcode/lib/libcurl.a" \
"/tmp/${CURL_VERSION}-iOS-i386-bitcode/lib/libcurl.a" \
"/tmp/${CURL_VERSION}-iOS-arm64-bitcode/lib/libcurl.a" \
"/tmp/${CURL_VERSION}-iOS-arm64e-bitcode/lib/libcurl.a" \
"/tmp/${CURL_VERSION}-iOS-x86_64-bitcode/lib/libcurl.a" \
-create -output lib/libcurl_iOS.a
if [[ "${NOBITCODE}" == "yes" ]]; then
echo -e "${bold}Building iOS libraries (nobitcode)${dim}"
buildIOS "armv7" "nobitcode"
buildIOS "armv7s" "nobitcode"
buildIOS "arm64" "nobitcode"
buildIOS "arm64e" "nobitcode"
buildIOS "x86_64" "nobitcode"
buildIOS "i386" "nobitcode"
lipo \
"/tmp/${CURL_VERSION}-iOS-armv7-nobitcode/lib/libcurl.a" \
"/tmp/${CURL_VERSION}-iOS-armv7s-nobitcode/lib/libcurl.a" \
"/tmp/${CURL_VERSION}-iOS-i386-nobitcode/lib/libcurl.a" \
"/tmp/${CURL_VERSION}-iOS-arm64-nobitcode/lib/libcurl.a" \
"/tmp/${CURL_VERSION}-iOS-arm64e-nobitcode/lib/libcurl.a" \
"/tmp/${CURL_VERSION}-iOS-x86_64-nobitcode/lib/libcurl.a" \
-create -output lib/libcurl_iOS_nobitcode.a
fi
echo -e "${bold}Building tvOS libraries${dim}"
buildTVOS "arm64"
buildTVOS "x86_64"
lipo \
"/tmp/${CURL_VERSION}-tvOS-arm64/lib/libcurl.a" \
"/tmp/${CURL_VERSION}-tvOS-x86_64/lib/libcurl.a" \
-create -output lib/libcurl_tvOS.a
echo -e "${bold}Cleaning up${dim}"
rm -rf /tmp/${CURL_VERSION}-*
rm -rf ${CURL_VERSION}
echo "Checking libraries"
xcrun -sdk iphoneos lipo -info lib/*.a
#reset trap
trap - INT TERM EXIT
echo -e "${normal}Done"
|
def find_path(grid, start, end):
rows = len(grid)
cols = len(grid[0])
visited = [[False] * cols for _ in range(rows)]
# check initial position
if not valid(start, rows, cols) or not valid(end, rows, cols):
return None
# mark current position in visited array
visited[start[0]][start[1]] = True
queue = [start]
next_moves = [(-1, 0), (0, -1), (1, 0), (0, 1)]
while queue:
cell = queue.pop(0)
# check if this is the destination cell
if cell[0] == end[0] and cell[1] == end[1]:
path = []
while cell != start:
path.append(cell)
cell = parent[cell]
path.append(start)
return path[::-1]
# check all next possible moves
for move in next_moves:
row = cell[0] + move[0]
col = cell[1] + move[1]
if valid(cell, rows, cols) and visited[row][col] is False and grid[row][col] == 0:
visited[row][col] = True
queue.append((row, col))
parent[(row, col)] = cell
return None |
#!/usr/bin/env bash
# Copyright 2020 Authors of Arktos.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# sanity check for OpenStack provider
if [ "${CLOUD_PROVIDER}" == "openstack" ]; then
if [ "${CLOUD_CONFIG}" == "" ]; then
echo "Missing CLOUD_CONFIG env for OpenStack provider!"
exit 1
fi
if [ ! -f "${CLOUD_CONFIG}" ]; then
echo "Cloud config ${CLOUD_CONFIG} doesn't exist"
exit 1
fi
fi
# set feature gates if enable Pod priority and preemption
if [ "${ENABLE_POD_PRIORITY_PREEMPTION}" == true ]; then
FEATURE_GATES="${FEATURE_GATES},PodPriority=true"
fi
# warn if users are running with swap allowed
if [ "${FAIL_SWAP_ON}" == "false" ]; then
echo "WARNING : The kubelet is configured to not fail even if swap is enabled; production deployments should disable swap."
fi
if [ "$(id -u)" != "0" ]; then
echo "WARNING : This script MAY be run as root for docker socket / iptables functionality; if failures occur, retry as root." 2>&1
fi
# Stop right away if the build fails
set -e
# Do dudiligence to ensure containerd service and socket in a working state
# Containerd service should be part of docker.io installation or apt-get install containerd for Ubuntu OS
if ! sudo systemctl is-active --quiet containerd; then
echo "Containerd is required for Arktos"
exit 1
fi
if [[ ! -e "${CONTAINERD_SOCK_PATH}" ]]; then
echo "Containerd socket file check failed. Please check containerd socket file path"
exit 1
fi
# local function to download runtime deployment file
copyRuntimeDeploymentFile() {
if [[ $# != 2 ]]; then
echo "Invalid args in copyRuntimeDeploymentFile"
exit 1
fi
fileName=$1
pathInSrc=$2
if [[ (${OVERWRITE_DEPLOYMENT_FILES} == "true") || (! -f ${VIRTLET_DEPLOYMENT_FILES_DIR}/${fileName}) ]]; then
echo "Getting runtime deployment file " ${fileName}
wget --no-check-certificate -O ${VIRTLET_DEPLOYMENT_FILES_DIR}/${fileName} ${VIRTLET_DEPLOYMENT_FILES_SRC}/${pathInSrc}
fi
}
# Get runtime deployment files
copyRuntimeDeploymentFile "libvirt-qemu" "apparmor/libvirt-qemu"
copyRuntimeDeploymentFile "libvirtd" "apparmor/libvirtd"
copyRuntimeDeploymentFile "virtlet" "apparmor/virtlet"
copyRuntimeDeploymentFile "vms" "apparmor/vms"
copyRuntimeDeploymentFile "vmruntime.yaml" "data/virtlet-ds.yaml"
copyRuntimeDeploymentFile "images.yaml" "images.yaml"
if [ "${APPARMOR_ENABLED}" == "true" ]; then
echo "Config test env under apparmor enabled host"
# Start AppArmor service before we have scripts to configure it properly
if ! sudo systemctl is-active --quiet apparmor; then
echo "Starting Apparmor service"
sudo systemctl start apparmor
fi
# install runtime apparmor profiles and reload apparmor
echo "Installing arktos runtime apparmor profiles"
cp ${VIRTLET_DEPLOYMENT_FILES_DIR}/libvirt-qemu /etc/apparmor.d/abstractions/
sudo install -m 0644 ${VIRTLET_DEPLOYMENT_FILES_DIR}/libvirtd ${VIRTLET_DEPLOYMENT_FILES_DIR}/virtlet ${VIRTLET_DEPLOYMENT_FILES_DIR}/vms -t /etc/apparmor.d/
sudo apparmor_parser -r /etc/apparmor.d/libvirtd
sudo apparmor_parser -r /etc/apparmor.d/virtlet
sudo apparmor_parser -r /etc/apparmor.d/vms
echo "Completed"
echo "Setting annotations for the runtime daemonset"
sed -i 's+apparmorlibvirtname+container.apparmor.security.beta.kubernetes.io/libvirt+g' ${VIRTLET_DEPLOYMENT_FILES_DIR}/vmruntime.yaml
sed -i 's+apparmorlibvirtvalue+localhost/libvirtd+g' ${VIRTLET_DEPLOYMENT_FILES_DIR}/vmruntime.yaml
sed -i 's+apparmorvmsname+container.apparmor.security.beta.kubernetes.io/vms+g' ${VIRTLET_DEPLOYMENT_FILES_DIR}/vmruntime.yaml
sed -i 's+apparmorvmsvalue+localhost/vms+g' ${VIRTLET_DEPLOYMENT_FILES_DIR}/vmruntime.yaml
sed -i 's+apparmorvirtletname+container.apparmor.security.beta.kubernetes.io/virtlet+g' ${VIRTLET_DEPLOYMENT_FILES_DIR}/vmruntime.yaml
sed -i 's+apparmorvirtletvalue+localhost/virtlet+g' ${VIRTLET_DEPLOYMENT_FILES_DIR}/vmruntime.yaml
echo "Completed"
else
echo "Stopping Apparmor service"
sudo systemctl stop apparmor
fi
ARKTOS_NETWORK_TEMPLATE=${ARKTOS_NETWORK_TEMPLATE:-}
DEFAULT_FLAT_NETWORK_TEMPLATE=${KUBE_ROOT}/hack/runtime/default_flat_network.json
DEFAULT_MIZAR_NETWORK_TEMPLATE=${KUBE_ROOT}/hack/runtime/default_mizar_network.json
if [ "${ARKTOS_NETWORK_TEMPLATE}" == "flat" ]; then
ARKTOS_NETWORK_TEMPLATE=${DEFAULT_FLAT_NETWORK_TEMPLATE}
fi
if [ "${ARKTOS_NETWORK_TEMPLATE}" == "mizar" ]; then
ARKTOS_NETWORK_TEMPLATE=${DEFAULT_MIZAR_NETWORK_TEMPLATE}
fi
if [ -n "${ARKTOS_NETWORK_TEMPLATE}" ] && [ ! -f "${ARKTOS_NETWORK_TEMPLATE}" ]; then
printf "\033[1;33m\nWarning: could not find newtork template file ${ARKTOS_NETWORK_TEMPLATE}. Setting ARKTOS_NETWORK_TEMPLATE to empty.\n\033[0m"
ARKTOS_NETWORK_TEMPLATE=""
fi
source "${KUBE_ROOT}/hack/lib/util.sh"
function kube::common::detect_binary {
# Detect the OS name/arch so that we can find our binary
case "$(uname -s)" in
Darwin)
host_os=darwin
;;
Linux)
host_os=linux
;;
*)
echo "Unsupported host OS. Must be Linux or Mac OS X." >&2
exit 1
;;
esac
case "$(uname -m)" in
x86_64*)
host_arch=amd64
;;
i?86_64*)
host_arch=amd64
;;
amd64*)
host_arch=amd64
;;
aarch64*)
host_arch=arm64
;;
arm64*)
host_arch=arm64
;;
arm*)
host_arch=arm
;;
i?86*)
host_arch=x86
;;
s390x*)
host_arch=s390x
;;
ppc64le*)
host_arch=ppc64le
;;
*)
echo "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le." >&2
exit 1
;;
esac
GO_OUT="${KUBE_ROOT}/_output/local/bin/${host_os}/${host_arch}"
}
# This function guesses where the existing cached binary build is for the `-O`
# flag
function kube::common::guess_built_binary_path {
local hyperkube_path
hyperkube_path=$(kube::util::find-binary "hyperkube")
if [[ -z "${hyperkube_path}" ]]; then
return
fi
echo -n "$(dirname "${hyperkube_path}")"
}
function kube::common::set_service_accounts {
SERVICE_ACCOUNT_LOOKUP=${SERVICE_ACCOUNT_LOOKUP:-true}
SERVICE_ACCOUNT_KEY=${SERVICE_ACCOUNT_KEY:-/tmp/kube-serviceaccount.key}
# Generate ServiceAccount key if needed
if [[ ! -f "${SERVICE_ACCOUNT_KEY}" ]]; then
mkdir -p "$(dirname "${SERVICE_ACCOUNT_KEY}")"
openssl genrsa -out "${SERVICE_ACCOUNT_KEY}" 2048 2>/dev/null
fi
}
function kube::common::generate_certs {
# Create CA signers
# no need to regenerate CA in every run - optimize
if [ "${REGENERATE_CA:-}" = true ] || ! [ -e "${CERT_DIR}/server-ca.key" ] || ! [ -e "${CERT_DIR}/server-ca.crt" ] ||
! [ -e "${CERT_DIR}/client-ca.key" ] || ! [ -e "${CERT_DIR}/client-ca.crt" ] ||
! [ -e "${CERT_DIR}/server-ca-config.json" ] || ! [ -e "${CERT_DIR}/client-ca-config.json" ]; then
if [[ "${ENABLE_SINGLE_CA_SIGNER:-}" = true ]]; then
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" server '"client auth","server auth"'
sudo cp "${CERT_DIR}/server-ca.key" "${CERT_DIR}/client-ca.key"
sudo cp "${CERT_DIR}/server-ca.crt" "${CERT_DIR}/client-ca.crt"
sudo cp "${CERT_DIR}/server-ca-config.json" "${CERT_DIR}/client-ca-config.json"
else
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" server '"server auth"'
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" client '"client auth"'
fi
else
echo "Skip generating CA as CA files existed and REGENERATE_CA != true. To regenerate CA files, export REGENERATE_CA=true"
fi
# Create Certs
if [[ "${REUSE_CERTS}" != true ]]; then
# Create auth proxy client ca
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header '"client auth"'
# serving cert for kube-apiserver
kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-apiserver kubernetes.default kubernetes.default.svc "localhost" "${API_HOST_IP}" "${API_HOST}" "${FIRST_SERVICE_CLUSTER_IP}" "${API_HOST_IP_EXTERNAL}" "${APISERVERS_EXTRA:-}" "${PUBLIC_IP:-}"
fi
# Create client certs signed with client-ca, given id, given CN and a number of groups
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' controller system:kube-controller-manager
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' workload-controller system:workload-controller-manager
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' scheduler system:kube-scheduler
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' admin system:admin system:masters
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-apiserver system:kube-apiserver
if [[ "${IS_SCALE_OUT}" == "true" ]]; then
if [[ "${IS_RESOURCE_PARTITION}" != "true" ]]; then
# Generate client certkey for TP components accessing RP api servers
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' resource-provider-scheduler system:kube-scheduler
fi
fi
# Create matching certificates for kube-aggregator
kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-aggregator api.kube-public.svc "${API_HOST}" "${API_HOST_IP}"
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header-ca auth-proxy system:auth-proxy
# TODO remove masters and add rolebinding
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-aggregator system:kube-aggregator system:masters
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kube-aggregator
}
function kube::common::start_apiserver() {
CONTROLPLANE_SUDO=$(test -w "${CERT_DIR}" || echo "sudo -E")
#Increment ports to enable running muliple kube-apiserver simultaneously
secureport="$(($1 + ${API_SECURE_PORT}))"
insecureport="$(($1 + ${API_PORT}))"
# Increment logs to enable each kube-apiserver have own log files
apiserverlog="kube-apiserver$1.log"
apiserverauditlog="kube-apiserver-audit$1.log"
# Create apiservern.config for kube-apiserver partition
configsuffix="$(($1 + 1))"
configfilepath="${PARTITION_CONFIG_DIR}apiserver.config"
${CONTROLPLANE_SUDO} rm -f $configfilepath
${CONTROLPLANE_SUDO} cp hack/apiserver.config $configfilepath
echo "Copied the apiserver partition config file $configfilepath..."
security_admission=""
if [[ -n "${DENY_SECURITY_CONTEXT_ADMISSION}" ]]; then
security_admission=",SecurityContextDeny"
fi
if [[ -n "${PSP_ADMISSION}" ]]; then
security_admission=",PodSecurityPolicy"
fi
if [[ -n "${NODE_ADMISSION}" ]]; then
security_admission=",NodeRestriction"
fi
if [ "${ENABLE_POD_PRIORITY_PREEMPTION}" == true ]; then
security_admission=",Priority"
if [[ -n "${RUNTIME_CONFIG}" ]]; then
RUNTIME_CONFIG+=","
fi
RUNTIME_CONFIG+="scheduling.k8s.io/v1alpha1=true"
fi
# Append security_admission plugin
ENABLE_ADMISSION_PLUGINS="${ENABLE_ADMISSION_PLUGINS}${security_admission}"
authorizer_arg=""
if [[ -n "${AUTHORIZATION_MODE}" ]]; then
authorizer_arg="--authorization-mode=${AUTHORIZATION_MODE}"
fi
priv_arg=""
if [[ -n "${ALLOW_PRIVILEGED}" ]]; then
priv_arg="--allow-privileged=${ALLOW_PRIVILEGED}"
fi
runtime_config=""
if [[ -n "${RUNTIME_CONFIG}" ]]; then
runtime_config="--runtime-config=${RUNTIME_CONFIG}"
fi
# Let the API server pick a default address when API_HOST_IP
# is set to 127.0.0.1
advertise_address=""
if [[ "${API_HOST_IP}" != "127.0.0.1" ]]; then
advertise_address="--advertise-address=${API_HOST_IP}"
fi
if [[ "${ADVERTISE_ADDRESS}" != "" ]] ; then
advertise_address="--advertise-address=${ADVERTISE_ADDRESS}"
fi
node_port_range=""
if [[ "${NODE_PORT_RANGE}" != "" ]] ; then
node_port_range="--service-node-port-range=${NODE_PORT_RANGE}"
fi
service_group_id=""
if [[ "${APISERVER_SERVICEGROUPID}" != "" ]]; then
service_group_id="--service-group-id=${APISERVER_SERVICEGROUPID}"
fi
kube::common::generate_certs
cloud_config_arg="--cloud-provider=${CLOUD_PROVIDER} --cloud-config=${CLOUD_CONFIG}"
if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then
cloud_config_arg="--cloud-provider=external"
fi
if [[ -n "${AUDIT_POLICY_FILE}" ]]; then
cat <<EOF > /tmp/kube-audit-policy-file$i
# Log all requests at the Metadata level.
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
- level: Metadata
EOF
AUDIT_POLICY_FILE="/tmp/kube-audit-policy-file$i"
fi
APISERVER_LOG=${LOG_DIR}/$apiserverlog
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" kube-apiserver "${authorizer_arg}" "${priv_arg}" ${runtime_config} \
${cloud_config_arg} \
"${advertise_address}" \
"${node_port_range}" \
--v="${LOG_LEVEL}" \
--vmodule="${LOG_SPEC}" \
--audit-policy-file="${AUDIT_POLICY_FILE}" \
--audit-log-path="${LOG_DIR}/$apiserverauditlog" \
--cert-dir="${CERT_DIR}" \
--client-ca-file="${CERT_DIR}/client-ca.crt" \
--kubelet-client-certificate="${CERT_DIR}/client-kube-apiserver.crt" \
--kubelet-client-key="${CERT_DIR}/client-kube-apiserver.key" \
--service-account-key-file="${SERVICE_ACCOUNT_KEY}" \
--service-account-lookup="${SERVICE_ACCOUNT_LOOKUP}" \
--enable-admission-plugins="${ENABLE_ADMISSION_PLUGINS}" \
--disable-admission-plugins="${DISABLE_ADMISSION_PLUGINS}" \
--admission-control-config-file="${ADMISSION_CONTROL_CONFIG_FILE}" \
--bind-address="${API_BIND_ADDR}" \
--secure-port=$secureport \
--tls-cert-file="${CERT_DIR}/serving-kube-apiserver.crt" \
--tls-private-key-file="${CERT_DIR}/serving-kube-apiserver.key" \
--insecure-bind-address="${API_HOST_IP}" \
--insecure-port=$insecureport \
--storage-backend="${STORAGE_BACKEND}" \
--storage-media-type="${STORAGE_MEDIA_TYPE}" \
--etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \
--service-cluster-ip-range="${SERVICE_CLUSTER_IP_RANGE}" \
--feature-gates="${FEATURE_GATES}" \
--external-hostname="${EXTERNAL_HOSTNAME}" \
--requestheader-username-headers=X-Remote-User \
--requestheader-group-headers=X-Remote-Group \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-client-ca-file="${CERT_DIR}/request-header-ca.crt" \
--requestheader-allowed-names=system:auth-proxy \
--proxy-client-cert-file="${CERT_DIR}/client-auth-proxy.crt" \
--proxy-client-key-file="${CERT_DIR}/client-auth-proxy.key" \
${service_group_id} \
--partition-config="${configfilepath}" \
--profiling=true \
--contention-profiling=true \
--cors-allowed-origins="${API_CORS_ALLOWED_ORIGINS}" >"${APISERVER_LOG}" 2>&1 &
APISERVER_PID=$!
APISERVER_PID_ARRAY+=($APISERVER_PID)
# Wait for kube-apiserver to come up before launching the rest of the components.
echo "Waiting for apiserver to come up"
kube::util::wait_for_url "https://${API_HOST_IP}:$secureport/healthz" "apiserver: " 1 "${WAIT_FOR_URL_API_SERVER}" "${MAX_TIME_FOR_URL_API_SERVER}" \
|| { echo "check apiserver logs: ${APISERVER_LOG}" ; exit 1 ; }
#if [[ "${REUSE_CERTS}" != true ]]; then
# REUSE_CERTS is a feature introduced for API server data partition. It is not a must have for arktos-up and not supported in arktos scale out local setup.
# Keep the code here for later reinstate of api server data partition.
# Create kubeconfigs for all components, using client certs
# TODO: Each api server has it own configuration files. However, since clients, such as controller, scheduler and etc do not support mutilple apiservers,admin.kubeconfig is kept for compability.
ADMIN_CONFIG_API_HOST=${PUBLIC_IP:-${API_HOST}}
${CONTROLPLANE_SUDO} chown "${USER}" "${CERT_DIR}/client-admin.key" # make readable for kubectl
if [[ "${IS_SCALE_OUT}" == "true" ]]; then
# in scale out poc, use insecured mode in local dev test
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${ADMIN_CONFIG_API_HOST}" "${API_PORT}" admin "" "http"
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${ADMIN_CONFIG_API_HOST}" "${API_PORT}" scheduler "" "http"
# workload controller is not used for now
# kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${SCALE_OUT_PROXY_IP}" "${SCALE_OUT_PROXY_PORT}" workload-controller "" "http"
# controller kubeconfig points to local api server
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${ADMIN_CONFIG_API_HOST}" "${API_PORT}" controller "" "http"
# generate kubeconfig for K8s components in TP to access api servers in RP
if [[ "${IS_RESOURCE_PARTITION}" != "true" ]]; then
serverCount=${#RESOURCE_SERVERS[@]}
for (( pos=0; pos<${serverCount}; pos++ ));
do
# generate kubeconfig for scheduler in TP to access api servers in RP
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${RESOURCE_SERVERS[${pos}]}" "${API_PORT}" resource-provider-scheduler "" "http"
${CONTROLPLANE_SUDO} mv "${CERT_DIR}/resource-provider-scheduler.kubeconfig" "${CERT_DIR}/resource-provider-scheduler${pos}.kubeconfig"
${CONTROLPLANE_SUDO} chown "$(whoami)" "${CERT_DIR}/resource-provider-scheduler${pos}.kubeconfig"
# generate kubeconfig for controllers in TP to access api servers in RP
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${RESOURCE_SERVERS[${pos}]}" "${API_PORT}" resource-provider-controller "" "http"
${CONTROLPLANE_SUDO} mv "${CERT_DIR}/resource-provider-controller.kubeconfig" "${CERT_DIR}/resource-provider-controller${pos}.kubeconfig"
${CONTROLPLANE_SUDO} chown "$(whoami)" "${CERT_DIR}/resource-provider-controller${pos}.kubeconfig"
done
fi
# generate kubelet/kubeproxy certs at TP as we use same cert for the entire cluster
kube::common::generate_kubelet_certs
kube::common::generate_kubeproxy_certs
else
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${ADMIN_CONFIG_API_HOST}" "$secureport" admin
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "$secureport" controller
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "$secureport" scheduler
# kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "$secureport" workload-controller
fi
# Move the admin kubeconfig for each apiserver
${CONTROLPLANE_SUDO} cp "${CERT_DIR}/admin.kubeconfig" "${CERT_DIR}/admin$1.kubeconfig"
#${CONTROLPLANE_SUDO} cp "${CERT_DIR}/workload-controller.kubeconfig" "${CERT_DIR}/workload-controller$1.kubeconfig"
if [[ -z "${AUTH_ARGS}" ]]; then
AUTH_ARGS="--client-key=${CERT_DIR}/client-admin.key --client-certificate=${CERT_DIR}/client-admin.crt"
fi
# Grant apiserver permission to speak to the kubelet
# TODO kubelet can talk to mutilple apiservers. However, it needs to implement after code changes
#${KUBECTL} --kubeconfig "${CERT_DIR}/admin$1.kubeconfig" create clusterrolebinding kube-apiserver-kubelet-admin --clusterrole=system:kubelet-api-admin --user=kube-apiserver
bindings=$(${KUBECTL} --kubeconfig "${CERT_DIR}/admin.kubeconfig" get clusterrolebinding)
if [[ $bindings == *"kube-apiserver-kubelet-admin"* ]] ; then
echo "The cluster role binding kube-apiserver-kubelet-admin does exist"
else
${KUBECTL} --kubeconfig "${CERT_DIR}/admin.kubeconfig" create clusterrolebinding kube-apiserver-kubelet-admin --clusterrole=system:kubelet-api-admin --user=system:kube-apiserver
fi
${CONTROLPLANE_SUDO} cp "${CERT_DIR}/admin$1.kubeconfig" "${CERT_DIR}/admin-kube-aggregator$1.kubeconfig"
${CONTROLPLANE_SUDO} chown "$(whoami)" "${CERT_DIR}/admin-kube-aggregator$1.kubeconfig"
${KUBECTL} config set-cluster local-up-cluster --kubeconfig="${CERT_DIR}/admin-kube-aggregator$1.kubeconfig" --server="https://${API_HOST_IP}:31090"
echo "use 'kubectl --kubeconfig=${CERT_DIR}/admin-kube-aggregator$1.kubeconfig' to use the aggregated API server"
# Copy workload controller manager config to run path
${CONTROLPLANE_SUDO} cp "cmd/workload-controller-manager/config/controllerconfig.json" "${CERT_DIR}/controllerconfig.json"
${CONTROLPLANE_SUDO} chown "$(whoami)" "${CERT_DIR}/controllerconfig.json"
#fi
}
function kube::common::test_apiserver_off {
# For the common local scenario, fail fast if server is already running.
# this can happen if you run local-up-cluster.sh twice and kill etcd in between.
if [[ "${API_PORT}" -gt "0" ]]; then
if ! curl --silent -g "${API_HOST}:${API_PORT}" ; then
echo "API SERVER insecure port is free, proceeding..."
else
echo "ERROR starting API SERVER, exiting. Some process on ${API_HOST} is serving already on ${API_PORT}"
exit 1
fi
fi
if ! curl --silent -k -g "${API_HOST}:${API_SECURE_PORT}" ; then
echo "API SERVER secure port is free, proceeding..."
else
echo "ERROR starting API SERVER, exiting. Some process on ${API_HOST} is serving already on ${API_SECURE_PORT}"
exit 1
fi
}
function kube::common::start_workload_controller_manager {
CONTROLPLANE_SUDO=$(test -w "${CERT_DIR}" || echo "sudo -E")
controller_config_arg=("--controllerconfig=${WORKLOAD_CONTROLLER_CONFIG_PATH}")
kubeconfigfilepaths="${CERT_DIR}/workload-controller.kubeconfig"
if [[ $# -gt 1 ]] ; then
kubeconfigfilepaths=$@
fi
echo "The kubeconfig has been set ${kubeconfigfilepaths}"
WORKLOAD_CONTROLLER_LOG=${LOG_DIR}/workload-controller-manager.log
${CONTROLPLANE_SUDO} "${GO_OUT}/workload-controller-manager" \
--v="${LOG_LEVEL}" \
--kubeconfig "${kubeconfigfilepaths}" \
"${controller_config_arg[@]}" >"${WORKLOAD_CONTROLLER_LOG}" 2>&1 &
WORKLOAD_CTLRMGR_PID=$!
}
function kube::common::start_controller_manager {
CONTROLPLANE_SUDO=$(test -w "${CERT_DIR}" || echo "sudo -E")
kubeconfigfilepaths="${CERT_DIR}/controller.kubeconfig"
if [[ $# -gt 1 ]] ; then
kubeconfigfilepaths=$@
fi
node_cidr_args=()
if [[ "${NET_PLUGIN}" == "kubenet" ]]; then
node_cidr_args=("--allocate-node-cidrs=true" "--cluster-cidr=10.1.0.0/16")
fi
cloud_config_arg=("--cloud-provider=${CLOUD_PROVIDER}" "--cloud-config=${CLOUD_CONFIG}")
if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then
cloud_config_arg=("--cloud-provider=external")
cloud_config_arg+=("--external-cloud-volume-plugin=${CLOUD_PROVIDER}")
cloud_config_arg+=("--cloud-config=${CLOUD_CONFIG}")
fi
CTLRMGR_LOG=${LOG_DIR}/kube-controller-manager.log
if [[ "${IS_SCALE_OUT}" == "true" ]]; then
# scale out resource partition
if [ "${IS_RESOURCE_PARTITION}" == "true" ]; then
KUBE_CONTROLLERS="daemonset,nodelifecycle,ttl,serviceaccount,serviceaccount-token"
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" kube-controller-manager \
--v="${LOG_LEVEL}" \
--allocate-node-cidrs="${KUBE_CONTROLLER_MANAGER_ALLOCATE_NODE_CIDR}" \
--cluster-cidr="${KUBE_CONTROLLER_MANAGER_CLUSTER_CIDR}" \
--vmodule="${LOG_SPEC}" \
--service-account-private-key-file="${SERVICE_ACCOUNT_KEY}" \
--root-ca-file="${ROOT_CA_FILE}" \
--cluster-signing-cert-file="${CLUSTER_SIGNING_CERT_FILE}" \
--cluster-signing-key-file="${CLUSTER_SIGNING_KEY_FILE}" \
--enable-hostpath-provisioner="${ENABLE_HOSTPATH_PROVISIONER}" \
${node_cidr_args[@]+"${node_cidr_args[@]}"} \
--pvclaimbinder-sync-period="${CLAIM_BINDER_SYNC_PERIOD}" \
--feature-gates="${FEATURE_GATES}" \
"${cloud_config_arg[@]}" \
--kubeconfig "${kubeconfigfilepaths}" \
${KCM_TENANT_SERVER_KUBECONFIG_FLAG} \
--controllers="${KUBE_CONTROLLERS}" \
--leader-elect=false \
--cert-dir="${CERT_DIR}" \
--default-network-template-path="${ARKTOS_NETWORK_TEMPLATE}" >"${CTLRMGR_LOG}" 2>&1 &
else
KUBE_CONTROLLERS="*,-daemonset,-nodelifecycle,-nodeipam,-ttl"
RESOURCE_PROVIDER_KUBECONFIG_FLAGS="--resource-providers="
serverCount=${#RESOURCE_SERVERS[@]}
for (( pos=0; pos<${serverCount}; pos++ ));
do
RESOURCE_PROVIDER_KUBECONFIG_FLAGS="${RESOURCE_PROVIDER_KUBECONFIG_FLAGS}${CERT_DIR}/resource-provider-controller${pos}.kubeconfig,"
done
RESOURCE_PROVIDER_KUBECONFIG_FLAGS=${RESOURCE_PROVIDER_KUBECONFIG_FLAGS::-1}
echo RESOURCE_PROVIDER_KUBECONFIG_FLAGS for new controller commandline --resource-providers
echo ${RESOURCE_PROVIDER_KUBECONFIG_FLAGS}
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" kube-controller-manager \
--v="${LOG_LEVEL}" \
--allocate-node-cidrs="${KUBE_CONTROLLER_MANAGER_ALLOCATE_NODE_CIDR}" \
--cluster-cidr="${KUBE_CONTROLLER_MANAGER_CLUSTER_CIDR}" \
--vmodule="${LOG_SPEC}" \
--service-account-private-key-file="${SERVICE_ACCOUNT_KEY}" \
--root-ca-file="${ROOT_CA_FILE}" \
--cluster-signing-cert-file="${CLUSTER_SIGNING_CERT_FILE}" \
--cluster-signing-key-file="${CLUSTER_SIGNING_KEY_FILE}" \
--enable-hostpath-provisioner="${ENABLE_HOSTPATH_PROVISIONER}" \
${node_cidr_args[@]+"${node_cidr_args[@]}"} \
--pvclaimbinder-sync-period="${CLAIM_BINDER_SYNC_PERIOD}" \
--feature-gates="${FEATURE_GATES}" \
"${cloud_config_arg[@]}" \
--kubeconfig "${kubeconfigfilepaths}" \
${RESOURCE_PROVIDER_KUBECONFIG_FLAGS} \
--use-service-account-credentials \
--controllers="${KUBE_CONTROLLERS}" \
--leader-elect=false \
--cert-dir="${CERT_DIR}" \
--default-network-template-path="${ARKTOS_NETWORK_TEMPLATE}" >"${CTLRMGR_LOG}" 2>&1 &
fi
else
# single cluster
KUBE_CONTROLLERS="*"
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" kube-controller-manager \
--v="${LOG_LEVEL}" \
--allocate-node-cidrs="${KUBE_CONTROLLER_MANAGER_ALLOCATE_NODE_CIDR}" \
--cluster-cidr="${KUBE_CONTROLLER_MANAGER_CLUSTER_CIDR}" \
--vmodule="${LOG_SPEC}" \
--service-account-private-key-file="${SERVICE_ACCOUNT_KEY}" \
--root-ca-file="${ROOT_CA_FILE}" \
--cluster-signing-cert-file="${CLUSTER_SIGNING_CERT_FILE}" \
--cluster-signing-key-file="${CLUSTER_SIGNING_KEY_FILE}" \
--enable-hostpath-provisioner="${ENABLE_HOSTPATH_PROVISIONER}" \
${node_cidr_args[@]+"${node_cidr_args[@]}"} \
--pvclaimbinder-sync-period="${CLAIM_BINDER_SYNC_PERIOD}" \
--feature-gates="${FEATURE_GATES}" \
"${cloud_config_arg[@]}" \
--kubeconfig "${kubeconfigfilepaths}" \
--use-service-account-credentials \
--controllers="${KUBE_CONTROLLERS}" \
--leader-elect=false \
--cert-dir="${CERT_DIR}" \
--default-network-template-path="${ARKTOS_NETWORK_TEMPLATE}" >"${CTLRMGR_LOG}" 2>&1 &
fi
CTLRMGR_PID=$!
}
function kube::common::start_kubescheduler {
CONTROLPLANE_SUDO=$(test -w "${CERT_DIR}" || echo "sudo -E")
kubeconfigfilepaths="${CERT_DIR}/scheduler.kubeconfig"
if [[ $# -gt 1 ]] ; then
kubeconfigfilepaths=$@
fi
SCHEDULER_LOG=${LOG_DIR}/kube-scheduler.log
if [[ "${IS_SCALE_OUT}" == "true" ]]; then
RESOURCE_PROVIDER_KUBECONFIG_FLAGS="--resource-providers="
serverCount=${#RESOURCE_SERVERS[@]}
for (( pos=0; pos<${serverCount}; pos++ ));
do
RESOURCE_PROVIDER_KUBECONFIG_FLAGS="${RESOURCE_PROVIDER_KUBECONFIG_FLAGS}${CERT_DIR}/resource-provider-scheduler${pos}.kubeconfig,"
done
RESOURCE_PROVIDER_KUBECONFIG_FLAGS=${RESOURCE_PROVIDER_KUBECONFIG_FLAGS::-1}
echo RESOURCE_PROVIDER_KUBECONFIG_FLAGS for new scheduler commandline --resource-providers
echo ${RESOURCE_PROVIDER_KUBECONFIG_FLAGS}
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" kube-scheduler \
--v="${LOG_LEVEL}" \
--leader-elect=false \
--kubeconfig "${kubeconfigfilepaths}" \
${RESOURCE_PROVIDER_KUBECONFIG_FLAGS} \
--feature-gates="${FEATURE_GATES}" >"${SCHEDULER_LOG}" 2>&1 &
else
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" kube-scheduler \
--v="${LOG_LEVEL}" \
--leader-elect=false \
--kubeconfig "${kubeconfigfilepaths}" \
--feature-gates="${FEATURE_GATES}" >"${SCHEDULER_LOG}" 2>&1 &
fi
SCHEDULER_PID=$!
}
function kube::common::start_kubelet {
KUBELET_LOG=${LOG_DIR}/kubelet.log
mkdir -p "${POD_MANIFEST_PATH}" &>/dev/null || sudo mkdir -p "${POD_MANIFEST_PATH}"
cloud_config_arg=("--cloud-provider=${CLOUD_PROVIDER}" "--cloud-config=${CLOUD_CONFIG}")
if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then
cloud_config_arg=("--cloud-provider=external")
cloud_config_arg+=("--provider-id=$(hostname)")
fi
mkdir -p "/var/lib/kubelet" &>/dev/null || sudo mkdir -p "/var/lib/kubelet"
# Enable dns
if [[ "${ENABLE_CLUSTER_DNS}" = true ]]; then
if [[ "${ENABLE_NODELOCAL_DNS:-}" == "true" ]]; then
dns_args=("--cluster-dns=${LOCAL_DNS_IP}" "--cluster-domain=${DNS_DOMAIN}")
else
dns_args=("--cluster-dns=${DNS_SERVER_IP}" "--cluster-domain=${DNS_DOMAIN}")
fi
else
# To start a private DNS server set ENABLE_CLUSTER_DNS and
# DNS_SERVER_IP/DOMAIN. This will at least provide a working
# DNS server for real world hostnames.
dns_args=("--cluster-dns=8.8.8.8")
fi
net_plugin_args=()
if [[ -n "${NET_PLUGIN}" ]]; then
net_plugin_args=("--network-plugin=${NET_PLUGIN}")
fi
auth_args=()
if [[ "${KUBELET_AUTHORIZATION_WEBHOOK:-}" != "false" ]]; then
auth_args+=("--authorization-mode=Webhook")
fi
if [[ "${KUBELET_AUTHENTICATION_WEBHOOK:-}" != "false" ]]; then
auth_args+=("--authentication-token-webhook")
fi
if [[ -n "${CLIENT_CA_FILE:-}" ]]; then
auth_args+=("--client-ca-file=${CLIENT_CA_FILE}")
else
auth_args+=("--client-ca-file=${CERT_DIR}/client-ca.crt")
fi
cni_conf_dir_args=()
if [[ -n "${CNI_CONF_DIR}" ]]; then
cni_conf_dir_args=("--cni-conf-dir=${CNI_CONF_DIR}")
fi
cni_bin_dir_args=()
if [[ -n "${CNI_BIN_DIR}" ]]; then
cni_bin_dir_args=("--cni-bin-dir=${CNI_BIN_DIR}")
fi
container_runtime_endpoint_args=()
if [[ -n "${CONTAINER_RUNTIME_ENDPOINT}" ]]; then
container_runtime_endpoint_args=("--container-runtime-endpoint=${CONTAINER_RUNTIME_ENDPOINT}")
fi
image_service_endpoint_args=()
if [[ -n "${IMAGE_SERVICE_ENDPOINT}" ]]; then
image_service_endpoint_args=("--image-service-endpoint=${IMAGE_SERVICE_ENDPOINT}")
fi
KUBELET_FLAGS="--tenant-server-kubeconfig="
if [[ "${IS_SCALE_OUT}" == "true" ]] && [ "${IS_RESOURCE_PARTITION}" == "true" ]; then
serverCount=${#TENANT_SERVERS[@]}
kubeconfig_filename="tenant-server-kubelet"
for (( pos=0; pos<${serverCount}; pos++ ));
do
# here generate kubeconfig for remote API server. Only work in non secure mode for now
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "" "${TENANT_SERVERS[${pos}]}" "${API_PORT}" tenant-server-kubelet "" "http"
${CONTROLPLANE_SUDO} mv "${CERT_DIR}/${kubeconfig_filename}.kubeconfig" "${CERT_DIR}/${kubeconfig_filename}${pos}.kubeconfig"
${CONTROLPLANE_SUDO} chown "$(whoami)" "${CERT_DIR}/${kubeconfig_filename}${pos}.kubeconfig"
KUBELET_FLAGS="${KUBELET_FLAGS}${CERT_DIR}/${kubeconfig_filename}${pos}.kubeconfig,"
done
KUBELET_FLAGS=${KUBELET_FLAGS::-1}
fi
# shellcheck disable=SC2206
all_kubelet_flags=(
"--v=${LOG_LEVEL}"
"--vmodule=${LOG_SPEC}"
"--chaos-chance=${CHAOS_CHANCE}"
"--container-runtime=${CONTAINER_RUNTIME}"
"--hostname-override=${HOSTNAME_OVERRIDE}"
"${cloud_config_arg[@]}"
"--address=0.0.0.0"
--kubeconfig "${CERT_DIR}"/kubelet.kubeconfig
"--feature-gates=${FEATURE_GATES}"
"--cpu-cfs-quota=${CPU_CFS_QUOTA}"
"--enable-controller-attach-detach=${ENABLE_CONTROLLER_ATTACH_DETACH}"
"--cgroups-per-qos=${CGROUPS_PER_QOS}"
"--cgroup-driver=${CGROUP_DRIVER}"
"--cgroup-root=${CGROUP_ROOT}"
"--eviction-hard=${EVICTION_HARD}"
"--eviction-soft=${EVICTION_SOFT}"
"--eviction-pressure-transition-period=${EVICTION_PRESSURE_TRANSITION_PERIOD}"
"--pod-manifest-path=${POD_MANIFEST_PATH}"
"--fail-swap-on=${FAIL_SWAP_ON}"
${auth_args[@]+"${auth_args[@]}"}
${dns_args[@]+"${dns_args[@]}"}
${cni_conf_dir_args[@]+"${cni_conf_dir_args[@]}"}
${cni_bin_dir_args[@]+"${cni_bin_dir_args[@]}"}
${net_plugin_args[@]+"${net_plugin_args[@]}"}
${container_runtime_endpoint_args[@]+"${container_runtime_endpoint_args[@]}"}
${image_service_endpoint_args[@]+"${image_service_endpoint_args[@]}"}
"--runtime-request-timeout=${RUNTIME_REQUEST_TIMEOUT}"
"--port=${KUBELET_PORT}"
${KUBELET_FLAGS}
)
kube::common::generate_kubelet_certs
# shellcheck disable=SC2024
sudo -E "${GO_OUT}/hyperkube" kubelet "${all_kubelet_flags[@]}" >"${KUBELET_LOG}" 2>&1 &
KUBELET_PID=$!
# Quick check that kubelet is running.
if [ -n "${KUBELET_PID}" ] && ps -p ${KUBELET_PID} > /dev/null; then
echo "kubelet ( ${KUBELET_PID} ) is running."
else
cat "${KUBELET_LOG}" ; exit 1
fi
}
function kube::common::start_kubeproxy {
CONTROLPLANE_SUDO=$(test -w "${CERT_DIR}" || echo "sudo -E")
kubeconfigfilepaths="${CERT_DIR}/kube-proxy.kubeconfig"
if [[ $# -gt 1 ]] ; then
kubeconfigfilepaths=$@
fi
PROXY_LOG=${LOG_DIR}/kube-proxy.log
cat <<EOF > /tmp/kube-proxy.yaml
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clientConnection:
kubeconfig: ${kubeconfigfilepaths}
hostnameOverride: ${HOSTNAME_OVERRIDE}
mode: ${KUBE_PROXY_MODE}
EOF
if [[ -n ${FEATURE_GATES} ]]; then
echo "featureGates:"
# Convert from foo=true,bar=false to
# foo: true
# bar: false
for gate in $(echo "${FEATURE_GATES}" | tr ',' ' '); do
echo "${gate}" | ${SED} -e 's/\(.*\)=\(.*\)/ \1: \2/'
done
fi >>/tmp/kube-proxy.yaml
kube::common::generate_kubeproxy_certs
local port=${API_SECURE_PORT}
local protocol="https"
if [[ "${IS_SCALE_OUT}" == "true" ]]; then
port=${API_PORT}
protocol="http"
fi
# shellcheck disable=SC2024
sudo "${GO_OUT}/hyperkube" kube-proxy \
--v="${LOG_LEVEL}" \
--config=/tmp/kube-proxy.yaml \
--master="${protocol}://${API_HOST}:${port}" >"${PROXY_LOG}" 2>&1 &
PROXY_PID=$!
}
function kube::common::generate_kubelet_certs {
if [[ "${REUSE_CERTS}" != true ]]; then
CONTROLPLANE_SUDO=$(test -w "${CERT_DIR}" || echo "sudo -E")
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kubelet "system:node:${HOSTNAME_OVERRIDE}" system:nodes
if [[ "${IS_SCALE_OUT}" == "true" ]]; then
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_PORT}" kubelet "" "http"
else
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kubelet
fi
fi
}
function kube::common::generate_kubeproxy_certs {
if [[ "${REUSE_CERTS}" != true ]]; then
CONTROLPLANE_SUDO=$(test -w "${CERT_DIR}" || echo "sudo -E")
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-proxy system:kube-proxy system:nodes
if [[ "${IS_SCALE_OUT}" == "true" ]]; then
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_PORT}" kube-proxy "" "http"
else
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kube-proxy
fi
fi
}
|
<filename>src/globus-client-java/src/main/java/org/globus/jsonUtil/EnumDeserializer.java
/**
* Copyright 2014 University of Chicago
* All rights reserved.
* Created Aug 21, 2014 by pruyne
*/
package org.globus.jsonUtil;
import java.io.IOException;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import org.codehaus.jackson.JsonParser;
import org.codehaus.jackson.JsonProcessingException;
import org.codehaus.jackson.map.BeanProperty;
import org.codehaus.jackson.map.ContextualDeserializer;
import org.codehaus.jackson.map.DeserializationConfig;
import org.codehaus.jackson.map.DeserializationContext;
import org.codehaus.jackson.map.JsonDeserializer;
import org.codehaus.jackson.map.JsonMappingException;
/**
* @author pruyne
*
*/
public class EnumDeserializer extends JsonDeserializer<Enum<?>>
implements ContextualDeserializer<Enum<?>>
{
private Class<? extends Enum<?>> wrappedType;
@Override
public JsonDeserializer<Enum<?>> createContextual(DeserializationConfig config,
BeanProperty property)
throws JsonMappingException
{
wrappedType = (Class<? extends Enum<?>>) property.getType().getRawClass();
return this;
}
/*
* (non-Javadoc)
*
* @see org.codehaus.jackson.map.JsonDeserializer#deserialize(org.codehaus.jackson.JsonParser,
* org.codehaus.jackson.map.DeserializationContext)
*/
@Override
public Enum<?> deserialize(JsonParser jp, DeserializationContext ctxt) throws IOException,
JsonProcessingException
{
String enumVal = jp.getText();
Enum<?> val = null;
Method[] methods = wrappedType.getMethods();
for (Method method : methods) {
try {
if (method.getReturnType().equals(wrappedType) &&
Modifier.isStatic(method.getModifiers())) {
Class<?>[] paramTypes = method.getParameterTypes();
if (paramTypes.length == 1 && paramTypes[0].equals(String.class)) {
Object value = method.invoke(null, enumVal);
if (value != null) {
return (Enum<?>) value;
}
}
}
} catch (Exception e) {
// Do nothing here as we'll loop around and try the next method we find
}
}
return null;
}
}
|
package kbasesearchengine.search;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import kbasesearchengine.common.GUID;
public class AccessInfo {
public GUID pguid;
public GUID prefix;
public Integer version;
public Set<Integer> lastIn;
public Set<Integer> groups;
@Override
public String toString() {
return "AccessInfo [pguid=" + pguid + ", prefix=" + prefix + ", version="
+ version + ", lastIn=" + lastIn + ", groups=" + groups + "]";
}
@SuppressWarnings("unchecked")
public static AccessInfo fromMap(Map<String, Object> data) {
AccessInfo ret = new AccessInfo();
ret.pguid = new GUID((String)data.get("pguid"));
ret.prefix = new GUID((String)data.get("prefix"));
ret.version = (Integer)data.get("version");
ret.lastIn = new LinkedHashSet<Integer>((List<Integer>)data.get("lastin"));
ret.groups = new LinkedHashSet<Integer>((List<Integer>)data.get("groups"));
return ret;
}
@SuppressWarnings("serial")
public Map<String, Object> toMap() {
return new LinkedHashMap<String, Object>() {{
put("pguid", pguid.toString());
put("prefix", prefix.toString());
put("version", version);
put("lastin", lastIn);
put("groups", groups);
}};
}
}
|
#!/bin/sh
sudo service httpd restart
|
#! /bin/bash
PRGNAME="libdrm"
### libdrm (A library to support Direct Rendering)
# Библиотека реализует интерфейс для служб DRM (Direct Rendering) ядра, прямой
# рендеринг manager в операционных системах, поддерживающих интерфейс ioctl.
# Используется для поддержки аппаратного ускорения 3D рендеринга.
# Required: no
# Recommended: xorg-libraries (для intel kms api support, требуемой для mesa)
# Optional: cairo (для тестов)
# cmake (может использоваться для поиска зависимостей без файлов pkgconfig)
# docbook-xml
# docbook-xsl
# libxslt (для сборки man-страниц)
# libatomic-ops
# valgrind
# cunit (для amdgpu тестов) http://cunit.sourceforge.net/
ROOT="/root/src/lfs"
source "${ROOT}/check_environment.sh" || exit 1
source "${ROOT}/unpack_source_archive.sh" "${PRGNAME}" || exit 1
source "${ROOT}/xorg_config.sh" || exit 1
TMP_DIR="${BUILD_DIR}/package-${PRGNAME}-${VERSION}"
mkdir -pv "${TMP_DIR}"
mkdir build
cd build || exit 1
# включаем поддержку Udev вместо mknod
# -Dudev=true
#
# shellcheck disable=SC2086
meson \
-Dudev=true \
--prefix=${XORG_PREFIX} || exit 1
ninja || exit 1
# ninja test
DESTDIR="${TMP_DIR}" ninja install
source "${ROOT}/stripping.sh" || exit 1
source "${ROOT}/update-info-db.sh" || exit 1
/bin/cp -vpR "${TMP_DIR}"/* /
cat << EOF > "/var/log/packages/${PRGNAME}-${VERSION}"
# Package: ${PRGNAME} (A library to support Direct Rendering)
#
# This library implements an interface to the kernel's DRM services. It is used
# to support hardware accelerated 3-D rendering and libdrm provides a user
# space library for accessing the DRM, direct rendering manager, on operating
# systems that support the ioctl interface. libdrm is a low-level library,
# typically used by graphics drivers such as the Mesa DRI drivers, the X
# drivers, libva and similar projects.
#
# Home page: https://dri.freedesktop.org/wiki/DRM/
# Download: https://dri.freedesktop.org/${PRGNAME}/${PRGNAME}-${VERSION}.tar.xz
#
EOF
source "${ROOT}/write_to_var_log_packages.sh" \
"${TMP_DIR}" "${PRGNAME}-${VERSION}"
|
<reponame>benoitc/pypy
"""StdObjSpace custom opcode implementations"""
import operator
from pypy.rlib.unroll import unrolling_iterable
from pypy.interpreter import pyopcode
from pypy.interpreter.pyframe import PyFrame
from pypy.interpreter.error import OperationError
from pypy.objspace.std import intobject, smallintobject
from pypy.objspace.std.multimethod import FailedToImplement
from pypy.objspace.std.listobject import W_ListObject
class BaseFrame(PyFrame):
"""These opcodes are always overridden."""
def LIST_APPEND(f, oparg, next_instr):
w = f.popvalue()
v = f.peekvalue(oparg - 1)
if type(v) is W_ListObject:
v.append(w)
else:
raise AssertionError
def small_int_BINARY_ADD(f, oparg, next_instr):
w_2 = f.popvalue()
w_1 = f.popvalue()
if (type(w_1) is smallintobject.W_SmallIntObject and
type(w_2) is smallintobject.W_SmallIntObject):
try:
w_result = smallintobject.add__SmallInt_SmallInt(f.space, w_1, w_2)
except FailedToImplement:
w_result = f.space.add(w_1, w_2)
else:
w_result = f.space.add(w_1, w_2)
f.pushvalue(w_result)
def int_BINARY_ADD(f, oparg, next_instr):
w_2 = f.popvalue()
w_1 = f.popvalue()
if (type(w_1) is intobject.W_IntObject and
type(w_2) is intobject.W_IntObject):
try:
w_result = intobject.add__Int_Int(f.space, w_1, w_2)
except FailedToImplement:
w_result = f.space.add(w_1, w_2)
else:
w_result = f.space.add(w_1, w_2)
f.pushvalue(w_result)
def list_BINARY_SUBSCR(f, oparg, next_instr):
w_2 = f.popvalue()
w_1 = f.popvalue()
if type(w_1) is W_ListObject and type(w_2) is intobject.W_IntObject:
try:
w_result = w_1.getitem(w_2.intval)
except IndexError:
raise OperationError(f.space.w_IndexError,
f.space.wrap("list index out of range"))
else:
w_result = f.space.getitem(w_1, w_2)
f.pushvalue(w_result)
compare_table = [
"lt", # "<"
"le", # "<="
"eq", # "=="
"ne", # "!="
"gt", # ">"
"ge", # ">="
]
unrolling_compare_ops = unrolling_iterable(enumerate(compare_table))
def fast_COMPARE_OP(f, testnum, next_instr):
w_2 = f.popvalue()
w_1 = f.popvalue()
w_result = None
if (type(w_2) is intobject.W_IntObject and
type(w_1) is intobject.W_IntObject and
testnum < len(compare_table)):
for i, attr in unrolling_compare_ops:
if i == testnum:
op = getattr(operator, attr)
w_result = f.space.newbool(op(w_1.intval,
w_2.intval))
break
else:
for i, attr in pyopcode.unrolling_compare_dispatch_table:
if i == testnum:
w_result = getattr(f, attr)(w_1, w_2)
break
else:
raise pyopcode.BytecodeCorruption, "bad COMPARE_OP oparg"
f.pushvalue(w_result)
def build_frame(space):
"""Consider the objspace config and return a patched frame object."""
class StdObjSpaceFrame(BaseFrame):
pass
if space.config.objspace.std.optimized_int_add:
if space.config.objspace.std.withsmallint:
StdObjSpaceFrame.BINARY_ADD = small_int_BINARY_ADD
else:
StdObjSpaceFrame.BINARY_ADD = int_BINARY_ADD
if space.config.objspace.std.optimized_list_getitem:
StdObjSpaceFrame.BINARY_SUBSCR = list_BINARY_SUBSCR
if space.config.objspace.opcodes.CALL_METHOD:
from pypy.objspace.std.callmethod import LOOKUP_METHOD, CALL_METHOD
StdObjSpaceFrame.LOOKUP_METHOD = LOOKUP_METHOD
StdObjSpaceFrame.CALL_METHOD = CALL_METHOD
if space.config.objspace.std.optimized_comparison_op:
StdObjSpaceFrame.COMPARE_OP = fast_COMPARE_OP
if space.config.objspace.std.logspaceoptypes:
assert 0, "logspaceoptypes: a few fixes a missing here"
StdObjSpace._space_op_types = []
for name, new in get_logging():
setattr(StdObjSpaceFrame, name, new)
return StdObjSpaceFrame
def get_logging():
for name, func in pyframe.PyFrame.__dict__.iteritems():
if hasattr(func, 'binop'):
operationname = func.binop
def make_opimpl(operationname):
def opimpl(f, *ignored):
operation = getattr(f.space, operationname)
w_2 = f.popvalue()
w_1 = f.popvalue()
if we_are_translated():
s = operationname + ' ' + str(w_1) + ' ' + str(w_2)
else:
names = (w_1.__class__.__name__ + ' ' +
w_2.__class__.__name__)
s = operationname + ' ' + names
f._space_op_types.append(s)
w_result = operation(w_1, w_2)
f.pushvalue(w_result)
return func_with_new_name(opimpl,
"opcode_impl_for_%s" % operationname)
yield name, make_opimpl(operationname)
elif hasattr(func, 'unaryop'):
operationname = func.unaryop
def make_opimpl(operationname):
def opimpl(f, *ignored):
operation = getattr(f.space, operationname)
w_1 = f.popvalue()
if we_are_translated():
s = operationname + ' ' + str(w_1)
else:
s = operationname + ' ' + w_1.__class__.__name__
f._space_op_types.append(s)
w_result = operation(w_1)
f.pushvalue(w_result)
return func_with_new_name(opimpl,
"opcode_impl_for_%s" % operationname)
yield name, make_opimpl(operationname)
|
#!/bin/bash
redo=1
data_root_dir="data/VOCdevkit"
dataset_name="VOC0712"
mapfile="data/$dataset_name/labelmap_voc.prototxt"
anno_type="detection"
db="lmdb"
min_dim=0
max_dim=0
width=0
height=0
extra_cmd="--encode-type=jpg --encoded"
if [ $redo ]
then
extra_cmd="$extra_cmd --redo"
fi
for subset in test trainval
do
python tools/create_annoset.py \
--anno-type=$anno_type \
--label-map-file=$mapfile \
--min-dim=$min_dim \
--max-dim=$max_dim \
--resize-width=$width \
--resize-height=$height \
--check-label \
$extra_cmd $data_root_dir \
data/$dataset_name/$subset.txt \
$data_root_dir/$dataset_name/$db/$dataset_name"_"$subset"_"$db
done
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This is to find out why my R peaks at a value around 2021-07-01, that is
much higher than RIVM's.
Created on Fri Jul 23 12:52:53 2021
@author: hk_nien
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import tools
import nlcovidstats as nlcs
def get_Rt_rivm(mindate, maxdate):
"""Return Series with R(rivm). Note timestamps are always at time 12:00:00."""
df_rivm = nlcs.DFS['Rt_rivm'].copy()
# get 4 days extra from 'prognosis'
prog = df_rivm.loc[df_rivm['R'].isna()].iloc[:4]
prog_R = np.around(np.sqrt(prog['Rmin']*prog['Rmax']), 2)
df_rivm.loc[prog_R.index, 'R'] = prog_R
R_rivm = df_rivm.loc[~df_rivm['R'].isna(), 'R']
return R_rivm.loc[(R_rivm.index >= mindate) & (R_rivm.index <= maxdate)]
def get_Rt_mine(mindate, maxdate, slide_delay=True, cdf=None):
"""Return my Rt estimate, sampled at 12:00 daily.
Optionally provide cdf as test case; DataFrame with time index and
'Delta7r' column (7-day rolling average daily positive cases).
"""
from scipy.interpolate import interp1d
delay = nlcs.DELAY_INF2REP if slide_delay else 4.0
if cdf is None:
cdf, _npop = nlcs.get_region_data('Nederland', lastday=-1, correct_anomalies=True)
Rdf = nlcs.estimate_Rt_df(cdf['Delta7r'].iloc[10:], delay=delay, Tc=4.0)
r_interp = interp1d(
Rdf.index.astype(np.int64), Rdf['Rt'], bounds_error=False,
fill_value=(Rdf['Rt'].iloc[0], Rdf['Rt'].iloc[-1])
)
tlims = [pd.to_datetime(t).strftime('%Y-%m-%dT12:00')
for t in [mindate, maxdate]
]
index = pd.date_range(*tlims, freq='1d')
R_mine = pd.Series(r_interp(index.astype(int)), index=index)
return R_mine
def get_Rt_test_case(mindate, maxdate, case='step', slide_delay=True):
index = pd.date_range('2021-01-01', 'now', freq='1d')
cdf = pd.DataFrame(index=index + pd.Timedelta(4, 'd'))
cdf['Delta7r'] = 1000
if case == 'step':
cdf.loc[index >= '2021-07-01', 'Delta7r'] = 10000
# sudden factor 10 increase should result in
# 1 day R=1e+4 or 2 days R=1e+2, which is the case.
else:
raise ValueError(f'case={case!r}')
return get_Rt_mine(mindate, maxdate, slide_delay=slide_delay, cdf=cdf)
#%%
if __name__ == '__main__':
nlcs.reset_plots()
nlcs.init_data(autoupdate=True)
#%%
Rt_mine_fixD = get_Rt_mine('2021-06-22', '2021-07-20', slide_delay=False)
Rt_mine_varD = get_Rt_mine('2021-06-22', '2021-07-20', slide_delay=True)
Rt_rivm = get_Rt_rivm('2021-06-22', '2021-07-13')
# Rt_mine = get_Rt_test_case('2021-06-22', '2021-07-09', 'step', slide_delay=False)
cases_df, n_pop = nlcs.get_region_data('Nederland')
cases = cases_df['Delta'] * n_pop
cases7 = cases_df['Delta7r'] * n_pop
cases_mask = (cases.index >= '2021-06-22') & (cases.index <= '2021-07-23')
day = pd.Timedelta(1, 'd')
cases = cases.loc[cases_mask]
cases7 = cases7.loc[cases_mask]
plt.close('all')
fig, (axR, axC) = plt.subplots(2, 1, tight_layout=True, sharex=True,
figsize=(6, 7))
Tgen = 4.0 # generation interval
cases_scale = 590
for Rt, label, delay, marker in [
(Rt_mine_fixD, 'hk_nien, fixD', 4.5*day, 'o'),
#(Rt_mine_varD, 'hk_nien, varD', 5.5*day, 'o'),
(Rt_rivm, 'RIVM', 4*day, '^')
]:
axR.plot(Rt, marker=marker, label=label)
cases_from_R = Rt.cumprod() ** (1/Tgen)
axC.plot(
cases_from_R.index + delay,
cases_from_R.values * cases_scale,
marker=marker, label=f'Volgens R[{label}]'
)
axC.plot(cases.index, cases.values, marker='*', linestyle='', label='Gerapporteerd')
axC.plot(cases7, marker='v', label='Gerapporteerd (7d gemid.)')
axR.set_ylabel('Rt')
axC.set_ylabel('Aantal positief')
axC.set_yscale('log')
axR.legend()
axC.legend()
axC.annotate(f'Hier {cases_scale}',
(Rt.index[0] + delay, cases_scale),
xytext=(Rt.index[0] + delay, cases_scale * 2),
arrowprops=dict(arrowstyle='->'),
horizontalalignment='center')
axR.set_title('R-schattingen')
axC.set_title('R teruggerekend naar aantal positief per dag:\n'
r'$n_{pos}(t) = n_{pos}(t-1) \times R^{1/4}$')
tools.set_xaxis_dateformat(axC)
tools.set_xaxis_dateformat(axR)
fig.show()
|
<gh_stars>100-1000
import { Router } from "express";
import isAuth from "../middleware/isAuth";
import * as SettingController from "../controllers/SettingController";
const settingRoutes = Router();
settingRoutes.get("/settings", isAuth, SettingController.index);
// routes.get("/settings/:settingKey", isAuth, SettingsController.show);
// change setting key to key in future
settingRoutes.put("/settings/:settingKey", isAuth, SettingController.update);
export default settingRoutes;
|
#!/bin/bash
echo "start"
go run ./allah/main.go
echo "end" |
#!/bin/bash
DATA='/path-to-data/'
PRETRAINED_MODEL='pretrained/imagenet21k+imagenet2012_ViT-B_16.pth'
python src/eval_oracle.py \
--exp-name eval_oracle_ovit_cvit_ce \
--n-gpu 4 \
--classifier 'transformer' \
--image-size 384 \
--oracle-type 'transformer' \
--oracle-feat-dim 768 \
--oracle-loss 'ce' \
--model-arch b16 \
--checkpoint-path ${PRETRAINED_MODEL} \
--batch-size 40 \
--tensorboard \
--data-dir ${DATA} \
--dataset ImageNet \
--num-classes 1000 \
--train-epochs 1 \
--lr 1e-5 \
--wd 0 \
--momentum 0.05 \
--oracle-model-arch b16 \
--oracle-checkpoint-path ${PRETRAINED_MODEL} \
--oracle-pretrained '/path-to-the-experiment-folder/checkpoints/ep_01.pth'
|
<filename>app/src/main/java/com/jpb/music/helper/MusicLibraryHelper.java<gh_stars>0
package com.jpb.music.helper;
import android.annotation.SuppressLint;
import android.content.ContentUris;
import android.content.Context;
import android.database.Cursor;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.media.MediaExtractor;
import android.media.MediaFormat;
import android.net.Uri;
import android.os.Build;
import android.os.FileUtils;
import android.os.ParcelFileDescriptor;
import android.provider.MediaStore;
import android.provider.OpenableColumns;
import android.webkit.MimeTypeMap;
import com.jpb.music.R;
import com.jpb.music.model.Music;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Locale;
import java.util.concurrent.TimeUnit;
public class MusicLibraryHelper {
public static List<Music> fetchMusicLibrary(Context context) {
String collection;
List<Music> musicList = new ArrayList<>();
if (VersioningHelper.isVersionQ())
collection = MediaStore.Audio.Media.BUCKET_DISPLAY_NAME;
else
collection = MediaStore.Audio.Media.DATA;
String[] projection = new String[]{
MediaStore.Audio.Media.ARTIST,
MediaStore.Audio.Media.YEAR,
MediaStore.Audio.Media.TRACK,
MediaStore.Audio.Media.TITLE,
MediaStore.Audio.Media.DISPLAY_NAME,
MediaStore.Audio.Media.DURATION, // error from android side, it works < 29
MediaStore.Audio.Media.ALBUM_ID,
MediaStore.Audio.Media.ALBUM,
collection,
MediaStore.Audio.Media._ID,
MediaStore.Audio.Media.DATE_MODIFIED,
MediaStore.Audio.Media.DATA
};
String selection = MediaStore.Audio.Media.IS_MUSIC + " = 1";
String sortOrder = MediaStore.Audio.Media.DEFAULT_SORT_ORDER;
@SuppressLint("Recycle")
Cursor musicCursor = context.getContentResolver().query(MediaStore.Audio.Media.EXTERNAL_CONTENT_URI, projection, selection, null, sortOrder);
int artistInd = musicCursor.getColumnIndexOrThrow(MediaStore.Audio.Media.ARTIST);
int yearInd = musicCursor.getColumnIndexOrThrow(MediaStore.Audio.Media.YEAR);
int trackInd = musicCursor.getColumnIndexOrThrow(MediaStore.Audio.Media.TRACK);
int titleInd = musicCursor.getColumnIndexOrThrow(MediaStore.Audio.Media.TITLE);
int displayNameInd = musicCursor.getColumnIndexOrThrow(MediaStore.Audio.Media.DISPLAY_NAME);
int durationInd = musicCursor.getColumnIndexOrThrow(MediaStore.Audio.Media.DURATION);
int albumIdInd = musicCursor.getColumnIndexOrThrow(MediaStore.Audio.Media.ALBUM_ID);
int albumInd = musicCursor.getColumnIndexOrThrow(MediaStore.Audio.Media.ALBUM);
int relativePathInd = musicCursor.getColumnIndexOrThrow(collection);
int idInd = musicCursor.getColumnIndexOrThrow(MediaStore.Audio.Media._ID);
int dateModifiedInd = musicCursor.getColumnIndexOrThrow(MediaStore.Audio.Media.DATE_MODIFIED);
int contentUriInd = musicCursor.getColumnIndexOrThrow(MediaStore.Audio.Media.DATA);
while (musicCursor.moveToNext()) {
String artist = musicCursor.getString(artistInd);
String title = musicCursor.getString(titleInd);
String displayName = musicCursor.getString(displayNameInd);
String album = musicCursor.getString(albumInd);
String relativePath = musicCursor.getString(relativePathInd);
String absolutePath = musicCursor.getString(contentUriInd);
if (VersioningHelper.isVersionQ())
relativePath += "/";
else if (relativePath != null) {
File check = new File(relativePath).getParentFile();
if (check != null) {
relativePath = check.getName() + "/";
}
} else {
relativePath = "/";
}
int year = musicCursor.getInt(yearInd);
int track = musicCursor.getInt(trackInd);
int startFrom = 0;
int dateAdded = musicCursor.getInt(dateModifiedInd);
long id = musicCursor.getLong(idInd);
long duration = musicCursor.getLong(durationInd);
long albumId = musicCursor.getLong(albumIdInd);
musicList.add(new Music(
artist, title, displayName, album, relativePath, absolutePath,
year, track, startFrom, dateAdded,
id, duration, albumId,
ContentUris.withAppendedId(Uri.parse(context.getResources().getString(R.string.album_art_dir)), albumId)
));
}
if (!musicCursor.isClosed())
musicCursor.close();
return musicList;
}
public static Bitmap getThumbnail(Context context, String uri) {
try {
ParcelFileDescriptor fileDescriptor = context.getContentResolver().openFileDescriptor(Uri.parse(uri), "r");
if (uri == null)
return null;
Bitmap bitmap = BitmapFactory.decodeFileDescriptor(fileDescriptor.getFileDescriptor());
fileDescriptor.close();
return bitmap;
} catch (IOException e) {
return null;
}
}
public static File getPathFromUri(Context context, Uri uri) throws IOException {
String fileName = getFileName(context, uri);
File file = new File(context.getExternalCacheDir(), fileName);
if (file.createNewFile()) {
OutputStream outputStream = new FileOutputStream(file);
InputStream inputStream = context.getContentResolver().openInputStream(uri);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.Q) {
FileUtils.copy(inputStream, outputStream); //Simply reads input to output stream
}
outputStream.flush();
}
return file;
}
public static String getFileName(Context context, Uri uri) {
String fileName = getFileNameFromCursor(context, uri);
if (fileName == null) {
String fileExtension = getFileExtension(context, uri);
fileName = "temp_file" + (fileExtension != null ? "." + fileExtension : "");
} else if (!fileName.contains(".")) {
String fileExtension = getFileExtension(context, uri);
fileName = fileName + "." + fileExtension;
}
return fileName;
}
public static String getFileExtension(Context context, Uri uri) {
String fileType = context.getContentResolver().getType(uri);
return MimeTypeMap.getSingleton().getExtensionFromMimeType(fileType);
}
public static String getFileNameFromCursor(Context context, Uri uri) {
Cursor fileCursor = context.getContentResolver().query(uri, new String[]{OpenableColumns.DISPLAY_NAME}, null, null, null);
String fileName = null;
if (fileCursor != null && fileCursor.moveToFirst()) {
int cIndex = fileCursor.getColumnIndex(OpenableColumns.DISPLAY_NAME);
if (cIndex != -1) {
fileName = fileCursor.getString(cIndex);
}
}
return fileName;
}
public static String formatDuration(long duration) {
long seconds = TimeUnit.MILLISECONDS.toSeconds(duration);
String second = String.valueOf(seconds);
if (second.length() == 1)
second = "0" + second;
else
second = second.substring(0, 2);
return String.format(Locale.getDefault(), "%02dm %ss",
TimeUnit.MILLISECONDS.toMinutes(duration),
second
);
}
public static String formatDurationTimeStyle(long duration) {
long seconds = TimeUnit.MILLISECONDS.toSeconds(duration);
String second = String.valueOf(seconds);
if (second.length() == 1)
second = "0" + second;
else
second = second.substring(0, 2);
return String.format(Locale.getDefault(), "%02d:%s",
TimeUnit.MILLISECONDS.toMinutes(duration),
second
);
}
public static String formatDate(long dateAdded) {
SimpleDateFormat fromFormat = new SimpleDateFormat("s", Locale.getDefault());
SimpleDateFormat toFormat = new SimpleDateFormat("d MMM yyyy", Locale.getDefault());
try {
Date date = fromFormat.parse(String.valueOf(dateAdded));
assert date != null;
return toFormat.format(date);
} catch (ParseException e) {
e.printStackTrace();
}
return null;
}
public static int[] getBitSampleRates(Music music) {
try {
MediaExtractor extractor = new MediaExtractor();
extractor.setDataSource(music.absolutePath);
MediaFormat format = extractor.getTrackFormat(0);
int sample = format.getInteger(MediaFormat.KEY_SAMPLE_RATE);
int bitrate = format.getInteger(MediaFormat.KEY_BIT_RATE);
int rate = Math.abs(bitrate / 1000);
return new int[]{
sample,
normalizeRate(rate)
};
} catch (Exception e) {
e.printStackTrace();
}
return new int[]{0, 0};
}
private static int normalizeRate(int rate) {
return (rate > 320) ? 320 : 120;
}
}
|
#!/bin/bash
echo "Start of configure OpenABE Linux x86_64 script"
# Install dependencies for building OpenABE
sudo apt -y update
sudo apt -y install bison lsb-release git sudo python3-pip nano libgtest-dev -y
# Manually install gtest (see https://github.com/zeutro/openabe/issues/61#issuecomment-868751392)
cd ~
g++ -I /usr/include/gtest -I /usr/src/gtest/ -c /usr/src/gtest/src/gtest-all.cc
ar -rv libgtest.a gtest-all.o
sudo mv libgtest.a /usr/local/lib/
echo "End of configure OpenABE Linux x86_64 script"
|
import random
import string
from asyncio import AbstractEventLoop as EventLoop
from typing import Dict
from fastapi.testclient import TestClient
from app import crud
from app.core.config import settings
from app.schemas import UserCreateBySuperuser
def random_integer_below_100() -> int:
return random.randint(0, 99)
def random_lower_string(length=20) -> str:
return "".join(random.choices(string.ascii_lowercase, k=length))
def random_email(length=10) -> str:
return f"{random_lower_string(length)}@{random_lower_string(length)}.com"
def user_authentication_headers(
client: TestClient,
event_loop: EventLoop,
email: str,
password: <PASSWORD>,
is_superuser: bool = False,
is_active: bool = True,
) -> Dict[str, str]:
data = {"username": email, "password": password}
user_in = UserCreateBySuperuser(
email=email, password=password, is_superuser=is_superuser, is_active=is_active
)
event_loop.run_until_complete(crud.user.create_by_superuser(user_in))
r = client.post(f"{settings.API_STR}/login/access-token", data=data)
response = r.json()
auth_token = response["access_token"]
headers = {"Authorization": f"Bearer {auth_token}"}
return headers
|
#!/bin/bash
set -ex
# device specific settings
HYPRIOT_DEVICE="ODROID XU3/XU4"
# set up /etc/resolv.conf
echo "nameserver 8.8.8.8" > /etc/resolv.conf
# set up Hypriot Schatzkiste repository
wget -q https://packagecloud.io/gpg.key -O - | apt-key add -
echo 'deb https://packagecloud.io/Hypriot/Schatzkiste/debian/ jessie main' > /etc/apt/sources.list.d/hypriot.list
# update all apt repository lists
export DEBIAN_FRONTEND=noninteractive
apt-get update
# ---install Docker tools---
# install Hypriot packages for using Docker
apt-get install -y \
"docker-hypriot=${DOCKER_ENGINE_VERSION}" \
"docker-compose=${DOCKER_COMPOSE_VERSION}" \
"docker-machine=${DOCKER_MACHINE_VERSION}"
# enable Docker systemd service
systemctl enable docker
# install ODROID kernel
apt-get install -y u-boot-tools initramfs-tools
# make the kernel package create a copy of the current kernel here
#-don't create /media/boot, then all files will be installed in /boot
#mkdir -p /media/boot
apt-get install -y initramfs-tools
wget -q -O /tmp/bootini.deb http://deb.odroid.in/5422/pool/main/b/bootini/bootini_20160412-15_armhf.deb
wget -q -O /tmp/linux-image-3.10.92-67_20151123_armhf.deb http://deb.odroid.in/umiddelb/linux-image-3.10.92-67_20151123_armhf.deb
dpkg -i /tmp/bootini.deb /tmp/linux-image-3.10.92-67_20151123_armhf.deb
rm -f /tmp/bootini.deb /tmp/linux-image-3.10.92-67_20151123_armhf.deb
# set device label and version number
echo "HYPRIOT_DEVICE=\"$HYPRIOT_DEVICE\"" >> /etc/os-release
echo "HYPRIOT_IMAGE_VERSION=\"$HYPRIOT_IMAGE_VERSION\"" >> /etc/os-release
|
<gh_stars>100-1000
import type { NextPage } from 'next'
import { Box } from '@fower/react'
import { Form, useForm, Field } from 'fomir-react'
function BasicForm() {
const form = useForm({
onSubmit(values) {
alert(JSON.stringify(values, null, 2))
console.log('values', values)
},
})
return (
<Box p-100>
<Form form={form}>
<Field
component="Input"
label="<NAME>"
name="firstName"
validators={{ required: 'First Name is required' }}
/>
<Field
component="Input"
label="<NAME>"
name="lastName"
validators={{ required: 'Last Name is required' }}
/>
<button>Submit</button>
</Form>
</Box>
)
}
export default BasicForm
|
// Copyright 2021 arcadium.dev <<EMAIL>>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"crypto/tls"
"testing"
)
const (
goodCert = "../test/insecure/cert.pem"
goodKey = "../test/insecure/key.pem"
goodCACert = "../test/insecure/rootCA.pem"
badCert = "bad cert"
badKey = "bad key"
badCACert = "bad cacert"
)
func TestNewTLS(t *testing.T) {
t.Parallel()
t.Run("Without options, bad cert", func(t *testing.T) {
t.Parallel()
var mockCfg = mockTLS{
cert: badCert,
key: goodKey,
}
cfg, err := NewTLS(mockCfg)
if cfg != nil {
t.Errorf("Unexpected cfg: %+v", cfg)
}
if err == nil {
t.Errorf("Expected an error")
}
if err.Error() != "open bad cert: no such file or directory: Failed to load server certificate" {
t.Errorf("Unexpected err: %s", err)
}
})
t.Run("Without options, bad key", func(t *testing.T) {
t.Parallel()
var mockCfg = mockTLS{
cert: goodCert,
key: badKey,
}
cfg, err := NewTLS(mockCfg)
if cfg != nil {
t.Errorf("Unexpected cfg: %+v", cfg)
}
if err == nil {
t.Errorf("Expected an error")
}
if err.Error() != "open bad key: no such file or directory: Failed to load server certificate" {
t.Errorf("Unexpected err: %s", err)
}
})
t.Run("Without options, success", func(t *testing.T) {
t.Parallel()
var mockCfg = mockTLS{
cert: goodCert,
key: goodKey,
}
cfg, err := NewTLS(mockCfg)
if cfg == nil {
t.Errorf("Expected a cfg")
}
if err != nil {
t.Errorf("Unexpected err: %s", err)
}
})
t.Run("WithMTLS option, bad cacert", func(t *testing.T) {
t.Parallel()
var mockCfg = mockTLS{
cert: goodCert,
key: goodKey,
cacert: badCACert,
}
cfg, err := NewTLS(mockCfg, WithMTLS())
if cfg != nil {
t.Errorf("Unexpected cfg: %+v", cfg)
}
if err == nil {
t.Errorf("Expected an error")
}
if err.Error() != "open bad cacert: no such file or directory: Failed to load the client CA certificate" {
t.Errorf("Unexpected err: %s", err)
}
})
t.Run("WithMTLS option, no cacert, success (assumes ca cert available from system)", func(t *testing.T) {
t.Parallel()
var mockCfg = mockTLS{
cert: goodCert,
key: goodKey,
}
cfg, err := NewTLS(mockCfg, WithMTLS())
if cfg == nil {
t.Errorf("Expected a cfg")
}
if err != nil {
t.Errorf("Unexpected err: %s", err)
}
})
t.Run("WithMTLS option, cacert available, success", func(t *testing.T) {
t.Parallel()
var mockCfg = mockTLS{
cert: goodCert,
key: goodKey,
cacert: goodCACert,
}
cfg, err := NewTLS(mockCfg, WithMTLS())
if cfg == nil {
t.Errorf("Expected a cfg")
}
if err != nil {
t.Errorf("Unexpected err: %s", err)
}
})
}
func TestWithMTLS(t *testing.T) {
t.Parallel()
cfg := &tls.Config{}
WithMTLS().apply(cfg)
if cfg.ClientAuth != tls.RequireAndVerifyClientCert {
t.Errorf("Unexpected ClientAuth: %+v", cfg.ClientAuth)
}
}
type (
mockTLS struct {
cert, key, cacert string
}
)
func (m mockTLS) Cert() string { return m.cert }
func (m mockTLS) Key() string { return m.key }
func (m mockTLS) CACert() string { return m.cacert }
|
<gh_stars>10-100
package com.wickerlabs.calls;
import android.Manifest;
import android.annotation.TargetApi;
import android.content.DialogInterface;
import android.content.pm.PackageManager;
import android.os.Build;
import android.os.Bundle;
import android.support.annotation.NonNull;
import android.support.v4.content.ContextCompat;
import android.support.v4.content.PermissionChecker;
import android.support.v7.app.AlertDialog;
import android.support.v7.app.AppCompatActivity;
import android.widget.ListView;
import com.wickerlabs.calls.Adapter.LogsAdapter;
import com.wickerlabs.logmanager.LogObject;
import com.wickerlabs.logmanager.LogsManager;
import java.util.List;
@TargetApi(Build.VERSION_CODES.JELLY_BEAN)
public class MainActivity extends AppCompatActivity {
private static final int READ_LOGS = 725;
private ListView logList;
private Runnable logsRunnable;
private String[] requiredPermissions = {Manifest.permission.READ_CALL_LOG, Manifest.permission.READ_CONTACTS};
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
logList = (ListView) findViewById(R.id.LogsList);
logsRunnable = new Runnable() {
@Override
public void run() {
loadLogs();
}
};
// Checking for permissions
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
checkPermissionToExecute(requiredPermissions, READ_LOGS, logsRunnable);
} else {
logsRunnable.run();
}
}
// This is to be run only when READ_CONTACTS and READ_CALL_LOG permission are granted
private void loadLogs() {
LogsManager logsManager = new LogsManager(this);
List<LogObject> callLogs = logsManager.getLogs(LogsManager.ALL_CALLS);
LogsAdapter logsAdapter = new LogsAdapter(this, R.layout.log_layout, callLogs);
logList.setAdapter(logsAdapter);
}
// A method to check if a permission is granted then execute tasks depending on that particular permission
@TargetApi(Build.VERSION_CODES.M)
private void checkPermissionToExecute(String permissions[], int requestCode, Runnable runnable) {
boolean logs = ContextCompat.checkSelfPermission(this, permissions[0]) != PackageManager.PERMISSION_GRANTED;
boolean contacts = ContextCompat.checkSelfPermission(this, permissions[1]) != PackageManager.PERMISSION_GRANTED;
if (logs || contacts) {
requestPermissions(permissions, requestCode);
} else {
runnable.run();
}
}
@Override
@TargetApi(Build.VERSION_CODES.M)
public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions, @NonNull int[] grantResults) {
super.onRequestPermissionsResult(requestCode, permissions, grantResults);
if (requestCode == READ_LOGS && permissions[0].equals(Manifest.permission.READ_CALL_LOG) && permissions[1].equals(Manifest.permission.READ_CONTACTS)) {
if (grantResults[0] == PermissionChecker.PERMISSION_GRANTED && grantResults[1] == PermissionChecker.PERMISSION_GRANTED) {
logsRunnable.run();
} else {
new AlertDialog.Builder(MainActivity.this)
.setMessage("The app needs these permissions to work, Exit?")
.setTitle("Permission Denied")
.setCancelable(false)
.setPositiveButton("Retry", new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
dialog.dismiss();
checkPermissionToExecute(requiredPermissions, READ_LOGS, logsRunnable);
}
})
.setNegativeButton("Exit App", new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
dialog.dismiss();
finish();
}
}).show();
}
}
}
}
|
module.exports = [{
url: 'http://localhost:3000/#/label-wrapper',
label: 'LabelWrapper',
responsive: true,
}];
|
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
#tokenize input sentence
sentences = sent_tokenize(input_sentence)
#initialize variables
sentence_count = len(sentences)
output_sentence = ""
word_count_threshold = 10
# generate the output sentence
for i in range(sentence_count):
words = word_tokenize(sentences[i])
# if the number of words of the sentence is greater than the threshold
if len(words) > word_count_threshold:
for word in words:
output_sentence += word + " "
print(output_sentence) |
<gh_stars>1-10
import whoosh
from whoosh.qparser import QueryParser
from whoosh.filedb.filestore import FileStorage
from whoosh.fields import Schema, NUMERIC, TEXT
from whoosh.analysis import SimpleAnalyzer, LowercaseFilter, RegexTokenizer
from whoosh.query import Variations
import whoosh.index as index
import re
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from time import time
import logging
from reader.models import Verse, Division, Work
from reader.language_tools.greek import Greek
from reader.language_tools import strip_accents, normalize_unicode
from reader.utils import get_all_related_forms
from reader.templatetags.shortcuts import unslugify
import os
from collections import OrderedDict # Used to order the stats from the search
# Get an instance of a logger
logger = logging.getLogger(__name__)
class WorkIndexer:
"""
The WorkIndexer performs the operations necessary to index Work models using Whoosh.
"""
@classmethod
def get_schema(cls):
"""
Returns a schema for searching works.
This schema will be used for indexing the works and provides information about what is a valid search term.
Note that changing the schema will have no affect unless you re-create the entire index.
"""
# This analyzer allows diacritical marks to be within the search queries
greek_word_analyzer = SimpleAnalyzer(expression="[\w/*()=\\+|&']+(\.?[\w/*()=\\+|&']+)*")
# This analyzer supports section names (which include spaces)
section_analyzer = SimpleAnalyzer(expression="[a-zA-Z0-9- ]+")
# This analyzer is used for the work name
work_analyzer = RegexTokenizer(expression="[a-zA-Z0-9- ]+") | LowercaseFilter()
return Schema( verse_id = NUMERIC(unique=True, stored=True, sortable=True),
content = TEXT(analyzer=greek_word_analyzer, vector=True),
no_diacritics = TEXT(analyzer=greek_word_analyzer, vector=True),
work_id = TEXT(stored=True),
section_id = TEXT,
work = TEXT(analyzer=work_analyzer),
section = TEXT(analyzer=section_analyzer),
author = TEXT)
@classmethod
def get_index_dir(cls):
"""
Gets the directory where indexes will be stored.
"""
if settings.SEARCH_INDEXES:
return settings.SEARCH_INDEXES
else:
return os.path.join("..", "var", "indexes")
@classmethod
def index_dir_exists(cls):
"""
Determines if the index directory exists.
"""
return os.path.exists( cls.get_index_dir() )
@classmethod
def get_index(cls, create=None):
"""
Get a Whoosh index.
Arguments:
create -- If true, the index files be initialized. If none, the function will attempt to initialize the indexes
"""
# Get a reference to the indexes path
index_dir = cls.get_index_dir()
# Make the directory if it does not exist
if create and not os.path.exists(index_dir):
logger.info("Creating the index directories")
os.makedirs(index_dir)
# The index didn't exist so we can safely assume that an index needs to be created
create = True
# Make the storage object with a reference to the indexes directory
storage = FileStorage(index_dir)
# Get a reference to the schema
schema = cls.get_schema()
# Create the verses index
if create or not index.exists_in(index_dir):
inx = storage.create_index(schema)
# Open the index
else:
inx = whoosh.index.open_dir(index_dir)
# Return a reference to the index
return inx
@classmethod
def is_work_in_index(cls, work):
inx = WorkIndexer.get_index()
# Perform the search
with inx.searcher() as searcher:
parser = QueryParser("work", inx.schema)
query_str = work.title_slug
search_query = parser.parse(query_str)
results = searcher.search_page(search_query, 1, 1)
return len(results) > 0
@classmethod
def get_writer(cls, inx=None):
"""
Get a writer that can be used to update the search indexes.
"""
if inx is None:
inx = cls.get_index()
return inx.writer(limitmb=settings.SEARCH_INDEXER_MEMORY_MB, procs=settings.SEARCH_INDEXER_PROCS)
@classmethod
def index_all_works(cls, commit_only_once=False, index_only_if_empty=True):
"""
Indexes all verses for all works.
"""
logger.info("Beginning updating the index of all available works, indexing_only_if_empty=%r", index_only_if_empty)
# Record the start time so that we can measure performance
start_time = time()
if commit_only_once:
writer = cls.get_writer()
else:
writer = None
for work in Work.objects.all():
# If we are only indexing if the index does not contain the document, then check first
if index_only_if_empty and cls.is_work_in_index(work):
logger.info("Work already in index and will be skipped, work=%s", str(work.title_slug))
# Skip to the next document
continue
if not commit_only_once:
cls.index_work(work, commit=True)
else:
cls.index_work(work, commit=False, writer=writer)
# Commit at the end to reduce the time it takes to index
if commit_only_once and writer is not None:
writer.commit()
logger.info("Successfully indexed all works, duration=%i", time() - start_time )
@classmethod
def delete_work_index(cls, work=None, work_title_slug=None):
"""
Deletes the index for the given work. Either the work or the work_title_slug parameter must be provided.
Arguments:
work -- The work that the index entries will be deleted of
work_title_slug -- The slug of the work to delete the indexes of
"""
if work_title_slug is None and work is None:
return False
elif work_title_slug is None and work is not None:
work_title_slug = work.title_slug
inx = cls.get_index(False)
parser = QueryParser("content", inx.schema)
inx.delete_by_query(parser.parse(u'work:' + work_title_slug))
writer = cls.get_writer(inx)
writer.commit(optimize=True)
@classmethod
def index_work(cls, work, commit=True, writer=None):
"""
Indexes all verses within the given work.
Arguments:
work -- The work that the verse is associated with
commit -- Indicates whether the changes should be committed to the persistence
writer -- The index writer to write to.
"""
# Record the start time so that we can measure performance
start_time = time()
# If we got no writer, then use the standard one
if writer is None:
writer = cls.get_writer()
commit = True
# Index eacn division in the work
for division in Division.objects.filter(work=work):
cls.index_division(division, commit=False, writer=writer)
# Commit the changes if necessary
if commit and writer is not None:
writer.commit()
logger.info('Successfully indexed work, work="%s", duration=%i', str(work.title_slug), time() - start_time )
@classmethod
def index_division(cls, division, work=None, commit=False, writer=None):
"""
Indexes all verse within the provided division.
Arguments:
division -- The division to index
work -- The work that the division is associated with
"""
if writer is None:
writer = cls.get_writer()
commit = True
for verse in Verse.objects.filter(division=division):
cls.index_verse(verse, division=division, writer=writer, commit=False)
if work is None:
work = division.work
if commit and writer is not None:
writer.commit()
if work is not None:
logger.info('Successfully indexed division, division="%s", work="%s"', str(division), str(work.title_slug) )
else:
logger.info('Successfully indexed division, division="%s"', str(division) )
@classmethod
def get_section_index_text(cls, division):
"""
Creates a string list of the ways in which the given division can be referenced.
Arguments:
division -- The division to create the section description of
"""
descriptions = []
descriptions.append(division.get_division_description(use_titles=False))
descriptions.append(division.get_division_description(use_titles=True))
# Now add in the parent divisions so that they can be searched without having to specifically define the entire hierarchy
next_division = division.parent_division
# Keep recursing upwards until we hit the top
while next_division is not None:
descriptions.append(next_division.get_division_description(use_titles=False))
descriptions.append(next_division.get_division_description(use_titles=True))
next_division = next_division.parent_division
return ",".join(descriptions)
@classmethod
def replace_empty_string(cls, val):
"""
This function will replace empty strings with a non-empty string. This is necessary to workaround a bug in Whoosh that causes corruption of the index.
https://bitbucket.org/mchaput/whoosh/issues/439/block-tag-error-vpst-generated-on-indexing
Arguments:
val -- A string which may be empty
"""
if val is None or len(val.strip()) == 0:
return u"()"
else:
return val
@classmethod
def index_verse(cls, verse, work=None, division=None, commit=False, writer=None):
"""
Indexes the provided verse.
Arguments:
verse -- The verse to index
work -- The work that the verse is associated with
division -- The division that the verse is associated with
"""
# Get a writer
if writer is None:
writer = cls.get_writer()
if division is None and verse is not None:
division = verse.division
if work is None and division is not None:
work = division.work
# Get the author
if work.authors.count() > 0:
author_str = work.authors.all()[:1][0].name
else:
author_str = ''
# Prepare the content for saving
if verse.content is not None and len(verse.content) > 0:
content = normalize_unicode(verse.content)
else:
logger.debug('Found empty content for verse=%s, division="%s", work="%s"', str(verse), division.get_division_description(use_titles=False), str(work.title_slug) )
content = None#normalize_unicode(verse.original_content)
# Strip diacritical marks
if work is None or work.language is None or work.language != "english":
no_diacritics = strip_accents(verse.content)
else:
no_diacritics = None
if content is not None:
# Add the content
writer.add_document(content = cls.replace_empty_string(content),
no_diacritics = cls.replace_empty_string(no_diacritics),
verse_id = verse.id,
work_id = work.title_slug,
section_id = division.title_slug,
work = work.title + "," + work.title_slug,
section = cls.get_section_index_text(division),
author = author_str
)
# Commit it
if commit:
writer.commit()
#logger.info('Successfully indexed verse, verse=%s, division="%s", work="%s"', str(verse), str(division), str(work.title_slug) )
logger.info('Successfully indexed verse, verse=%s, division="%s", work="%s"', str(verse), division.get_division_description(use_titles=False), str(work.title_slug) )
class VerseSearchResults:
def add_to_results_string(self, to_str, from_str, separator="..."):
if to_str is None:
to_str = ''
if from_str is not None:
if to_str is not None and len(to_str) > 0:
to_str = to_str + separator + from_str
else:
to_str = from_str
return to_str
def get_highlights(self, result, verse):
highlights_str = ''
highlights_str = self.add_to_results_string(highlights_str, result.highlights("content", text=normalize_unicode(verse.content)))
highlights_str = self.add_to_results_string(highlights_str, result.highlights("no_diacritics", text=strip_accents(verse.content)) )
return highlights_str
def __init__(self, results, page, pagelen, use_estimated_length=False ):
self.page = page
self.pagelen = pagelen
self.verses = []
# Create the list of search results
for r in results:
# Get the verse so that the highlighting can be done
verse = Verse.objects.get(id=r['verse_id'])
highlights = self.get_highlights(r, verse)
self.verses.append(VerseSearchResult(verse, highlights))
if use_estimated_length:
self.result_count = results.results.estimated_length()
else:
self.result_count = len(results.results)
temp_matched_terms = {}
temp_matched_terms_no_diacritics = {}
temp_matched_works = {}
temp_matched_sections = {}
self.match_count = 0
# Add the matched terms if available
if results.results.has_matched_terms():
for term, term_matches in results.results.termdocs.items():
# Include terms matched
if term[0] == "content":
temp_matched_terms[bytes_to_str(term[1])] = len(term_matches)
self.match_count += len(term_matches)
# Include terms matched that matched without diacritics
if term[0] == "no_diacritics":
temp_matched_terms_no_diacritics[bytes_to_str(term[1])] = len(term_matches)
# Include section matches
if term[0] == "section":
temp_matched_sections[bytes_to_str(term[1])] = len(term_matches)
# Include work matches
if term[0] == "work":
temp_matched_works[bytes_to_str(term[1])] = len(term_matches)
# Sort the dictionaries
self.matched_terms = OrderedDict(sorted(temp_matched_terms.items(), key=lambda x: x[1], reverse=True))
self.matched_terms_no_diacritics = OrderedDict(sorted(temp_matched_terms.items(), key=lambda x: x[1], reverse=True))
self.matched_sections = OrderedDict(sorted(temp_matched_sections.items(), key=lambda x: x[1], reverse=True))
# De-reference the name of the works
self.matched_works = replace_work_names_with_titles(temp_matched_works)
class VerseSearchResult:
def __init__(self, verse, highlights):
self.verse = verse
self.highlights = highlights
class GreekVariations(Variations):
"""
Provides variations of a Greek word including a beta-code representation and all related forms. This way, users can search
using beta-code if they don't have a Greek keyboard enabled. Additionally, then can get
"""
variation_fields = ['no_diacritics', 'content']
def __init__(self, fieldname, text, boost=1.0, include_beta_code=True, include_alternate_forms=True):
super(GreekVariations,self).__init__( fieldname, text, boost )
self.include_beta_code = include_beta_code
self.include_alternate_forms = include_alternate_forms
# This cache helps improve performance by reducing unnecessary database queries for variations that we have already looked up.
# This was added because it was found that Whoosh makes multiple requests for the same variation repeatedly.
self.cached_variations = {}
@classmethod
def get_variations(cls, text, include_beta_code=True, include_alternate_forms=True, ignore_diacritics=False, messages=None, cached_variations=None):
# Make a signature so that we can be used to find cached results for the same request
signature = str(include_beta_code) + "." + str(include_alternate_forms) + "." + str(ignore_diacritics) + "." + text
# Get the result from the cache if available
if cached_variations is not None and signature in cached_variations:
#logger.debug( "Found cached variations of the search term, word=%s, variations=%r", text, len(cached_variations[signature]) )
return cached_variations[ signature ]
logger.debug( "Looking for variations of the search term in order to perform a search, word=%s", text )
forms = []
if include_beta_code:
forms.append(normalize_unicode(Greek.beta_code_to_unicode(text)))
if include_alternate_forms:
# Convert the content from beta-code if necessary
text = normalize_unicode(Greek.beta_code_to_unicode(text))
# Get the related forms
related_forms = get_all_related_forms(text, ignore_diacritics)
# If we couldn't find any related forms, then try finding them without diacritical marks
if len(related_forms) == 0 and ignore_diacritics == False:
related_forms = get_all_related_forms(text, True)
if len(related_forms) > 0:
logger.debug( "Variations could be only found by ignoring diacritical marks, word=%s", text )
# Make a message noting that we couldn't find any variations of the word
if len(related_forms) > 0 and messages is not None:
messages.append("Variations of %s could be only found by ignoring diacritical marks" % text)
# Make a message noting that we couldn't find any variations of the word
if len(related_forms) == 0 and messages is not None:
messages.append("No variations of %s could be found" % text)
elif len(related_forms) == 0:
logger.debug( "No variations could be found, word=%s", text )
# Make a message noting that variations were found
if len(related_forms) > 0 and messages is not None:
messages.append("Found variations of the search term, word=%s, variations=%r" % (text, len(related_forms)))
elif len(related_forms) > 0:
logger.debug("Found variations of the search term, word=%s, variations=%r", text, len(related_forms))
# Add the related forms
for r in related_forms:
if ignore_diacritics:
forms.append(strip_accents(r.form))
else:
forms.append(r.form)
# Cache the result
if cached_variations is not None:
cached_variations[ signature ] = forms
# Return the forms
return forms
def _btexts(self, ixreader):
# Determine if we are searching the field that is stripped of diacritical marks
if self.fieldname == "no_diacritics":
ignore_diacritics = True
# Strip diacritical beta-code characters in case the user wants to search for words regardless of diacritical
# marks but includes them in the search term
prepared_text = re.sub(r'[\/()*=&+|]', '', self.text)
else:
ignore_diacritics = False
prepared_text = self.text
# This will be the array of variations
variations = []
# If the field doesn't contain diacritics then make sure to strip them from the word
if ignore_diacritics:
prepared_text = strip_accents(prepared_text)
# Add the text we are searching for as a variation
variations.append( prepared_text )
# Add the other Greek variations
if GreekVariations.variation_fields is None or self.fieldname in GreekVariations.variation_fields:
variations.extend( GreekVariations.get_variations(prepared_text, self.include_beta_code, self.include_alternate_forms, ignore_diacritics, None, self.cached_variations) )
# Return the variations list
return [word for word in variations
if (self.fieldname, word) in ixreader]
class GreekBetaCodeVariations(GreekVariations):
"""
Provides variations of a Greek word including a beta-code representation. This way, users can search
using beta-code if they don't have a Greek keyboard enabled.
Note that other forms related to the same lemma will not be included.
"""
def __init__(self, fieldname, text, boost=1.0):
super(GreekBetaCodeVariations,self).__init__( fieldname, text, boost, True, False )
def replace_work_names_with_titles(list_of_matches_in_works):
works = Work.objects.filter(title_slug__in=list_of_matches_in_works.keys())
matched_works = {}
for work_slug, count in list_of_matches_in_works.items():
found_work = False
for work in works:
if work.title_slug == work_slug:
matched_works[work.title] = count
found_work = True
continue
# If we didn't find the work, then add the slug after attempting to un-slugify it
if not found_work:
logger.critical("Unable to find work in matched terms, work_slug=%s", work_slug)
matched_works[unslugify(work_slug)] = count
matched_works = OrderedDict(sorted(matched_works.items(), key=lambda x: x[1], reverse=True))
return matched_works
def bytes_to_str(bytes_or_str):
if isinstance(bytes_or_str, bytes):
return bytes_or_str.decode('utf-8')
else:
return bytes_or_str
def search_stats(search_text, inx=None, limit=2000, include_related_forms=True, ignore_diacritics=False):
"""
Search verses for those with the given text and provide high-level stats about the usage of this term. This function is necessary because Whoosh
term matching stats indicate the number of verses that contain the given term, not the count of absolute count of the term.
Arguments:
search_text -- The content to search for
inx -- The Whoosh index to use
limit -- A limit on the the number of verses to include
include_related_forms -- Expand the word into all of the related forms
ignore_diacritics -- Search ignoring dia-critical marks by default
"""
logger.info( 'Performing a stats search, limit=%r, include_related_forms=%r, search_query="%s"', limit, include_related_forms, search_text )
# Get the index if provided
if inx is None:
inx = WorkIndexer.get_index()
# Perform the search
with inx.searcher() as searcher:
# Determine which field will be searched by default
default_search_field = "content"
if ignore_diacritics:
default_search_field = "no_diacritics"
# Make a parser to convert the incoming search string into a search
if include_related_forms:
parser = QueryParser(default_search_field, inx.schema, termclass=GreekVariations)
else:
parser = QueryParser(default_search_field, inx.schema, termclass=GreekBetaCodeVariations)
# Parse the search string into an actual search
search_query = parser.parse(search_text)
logger.debug('Search query parsed, default_search_field="%s", raw_query="%s"', default_search_field, search_query)
results = searcher.search_page(search_query, 1, limit, terms=True, sortedby="verse_id")
stats = {
'matches' : 0
}
# Build a list of the matched terms
matched_terms = {}
if results.results.has_matched_terms():
for term, term_matches in results.results.termdocs.items():
if term[0] == "content":
matched_terms[bytes_to_str(term[1])] = 0
if term[0] == "no_diacritics":
matched_terms[bytes_to_str(term[1])] = 0
results_count = 0
# Build a list of matched works
matched_works = {}
# Iterate through the search results
for r in results:
results_count += 1
matched_in_result = 0
# For each document: get the matched terms
docnum = searcher.document_number(verse_id=r['verse_id'])
# Process the main content
for term in searcher.vector(docnum,"content").items_as("frequency"):
for matched_term in matched_terms:
if matched_term == normalize_unicode(term[0]):
matched_terms[matched_term] += term[1]
matched_in_result += term[1]
# Process the no_diacritics content
for term in searcher.vector(docnum,"no_diacritics").items_as("frequency"):
for matched_term in matched_terms:
if matched_term == normalize_unicode(term[0]):
matched_terms[matched_term] += term[1]
matched_in_result += term[1]
stats['matches'] += matched_in_result
# Get the stored fields so that we determine which works were matched
fields = searcher.stored_fields(docnum)
for field, value in fields.items():
# Make sure that this field is for the work
if field == "work_id":
# Add the number of matches
if value in matched_works:
matched_works[value] = matched_works[value] + matched_in_result
else:
matched_works[value] = matched_in_result
stats['matched_works'] = replace_work_names_with_titles(matched_works)
stats['matched_terms'] = OrderedDict(sorted(matched_terms.items(), key=lambda x: x[1], reverse=True))
stats['results_count'] = results_count
return stats
def search_verses(search_text, inx=None, page=1, pagelen=20, include_related_forms=True, ignore_diacritics=False):
"""
Search all verses for those with the given text.
Arguments:
search_text -- The content to search for
inx -- The Whoosh index to use
page -- Indicates the page number to retrieve
pagelen -- Indicates how many entries constitute a page
include_related_forms -- Expand the word into all of the related forms
ignore_diacritics -- Search ignoring dia-critical marks by default
"""
logger.info('Performing a search, page=%r, page_len=%r, include_related_forms=%r, search_query="%s"', page, pagelen, include_related_forms, search_text)
# Get the index if provided
if inx is None:
inx = WorkIndexer.get_index()
# Perform the search
with inx.searcher() as searcher:
# Determine which field will be searched by default
default_search_field = "content"
if ignore_diacritics:
default_search_field = "no_diacritics"
# Make a parser to convert the incoming search string into a search
if include_related_forms:
parser = QueryParser(default_search_field, inx.schema, termclass=GreekVariations)
else:
parser = QueryParser(default_search_field, inx.schema, termclass=GreekBetaCodeVariations)
# Parse the search string into an actual search
search_query = parser.parse(search_text)
logger.debug('Search query parsed, default_search_field="%s", raw_query="%s"', default_search_field, search_query)
# Get the search result
search_results = VerseSearchResults(searcher.search_page(search_query, page, pagelen, terms=True, sortedby="verse_id"), page, pagelen)
return search_results
"""
# Rebuild the search indexes when the work gets updated
@receiver(post_save, sender=Work)
def work_search_index_rebuild(work, **kwargs):
indexer = WorkIndexer()
indexer.index_work(work)
""" |
def find_sum_from_1_to_n(n):
# using Gauss' formula
sum = (n * (n + 1)) // 2
return sum
n = 10
print(find_sum_from_1_to_n(n)) |
<reponame>eengineergz/Lambda
"use strict";
var AbstractToken = require('../tokens/AbstractToken');
var TokenTypes = require('../constants/TokenTypes');
class KeywordToken extends AbstractToken {
constructor(
name
) {
super();
this.name = name;
}
getType() {
return TokenTypes.KEYWORD;
}
toString() {
return `KEYWORD "${this.name}"`;
}
exportToQuery() {
switch (this.name) {
case 'DESC':
case 'ASC':
if (this._isBeforeColumn()) {
return this.name + ',';
}
break;
}
return this.name;
}
}
module.exports = KeywordToken;
|
#!/bin/sh
set -eo pipefail
# Must be SH because that's what the builder in circle CI has
image_name=$1
echo "Building ${image_name}"
# Set the second argument to anything to enable this:
if [[ -n "${BUILD_FROM_CHECKOUT_ROOT}" ]]; then
# This assumes that we're running in `hack/test-images/`, which
#is what all of the docker commands assume anyway
cd ../..
echo "Setting current working directory to: ${PWD}"
fi
tag=$(date +%Y%m%d-%s)
image="titusoss/${image_name}"
dated_image="${image}:${tag}"
echo "Image name with tag: ${dated_image}"
if [[ -n "${BUILD_FROM_CHECKOUT_ROOT}" ]]; then
docker build -t $dated_image -f hack/test-images/${image_name}/Dockerfile .
else
docker build -t $dated_image ${image_name}
fi
if [[ -n "${DOCKER_CUSTOM_REGISTRY}" ]]; then
IFS=','
for registry in ${DOCKER_CUSTOM_REGISTRY}; do
docker tag ${dated_image} ${registry}/${dated_image}
docker push ${registry}/${dated_image}
docker tag ${dated_image} ${registry}/${image}:latest
docker push ${registry}/${image}:latest
done
else
docker push $dated_image
docker tag $dated_image ${image}:latest
docker push ${image}:latest
fi
echo "Built, and pushed: ${dated_image}"
|
import {
AsyncTestCompleter,
beforeEach,
ddescribe,
describe,
el,
expect,
iit,
inject,
it,
xit,
SpyObject,
normalizeCSS
} from 'angular2/test_lib';
import {isPresent, isBlank} from 'angular2/src/facade/lang';
import {DOM} from 'angular2/src/dom/dom_adapter';
import {Map, MapWrapper} from 'angular2/src/facade/collection';
import {PromiseWrapper, Promise} from 'angular2/src/facade/async';
import {XHR} from 'angular2/src/render/xhr';
import {
EmulatedScopedShadowDomStrategy,
} from 'angular2/src/render/dom/shadow_dom/emulated_scoped_shadow_dom_strategy';
import {
resetShadowDomCache,
} from 'angular2/src/render/dom/shadow_dom/util';
import {UrlResolver} from 'angular2/src/services/url_resolver';
import {StyleUrlResolver} from 'angular2/src/render/dom/shadow_dom/style_url_resolver';
import {StyleInliner} from 'angular2/src/render/dom/shadow_dom/style_inliner';
export function main() {
describe('EmulatedScopedShadowDomStrategy', () => {
var xhr, styleHost, strategy;
beforeEach(() => {
var urlResolver = new UrlResolver();
var styleUrlResolver = new StyleUrlResolver(urlResolver);
xhr = new FakeXHR();
var styleInliner = new StyleInliner(xhr, styleUrlResolver, urlResolver);
styleHost = el('<div></div>');
strategy = new EmulatedScopedShadowDomStrategy(styleInliner, styleUrlResolver, styleHost);
resetShadowDomCache();
});
it('should use the host element as shadow root', () => {
var host = el('<div><span>original content</span></div>');
expect(strategy.prepareShadowRoot(host)).toBe(host);
});
it('should rewrite style urls', () => {
var styleElement = el('<style>.foo {background-image: url("img.jpg");}</style>');
strategy.processStyleElement('someComponent', 'http://base', styleElement);
expect(normalizeCSS(DOM.getText(styleElement)))
.toEqual(".foo[_ngcontent-0] { background-image:url(http://base/img.jpg); }");
});
it('should scope styles', () => {
var styleElement = el('<style>.foo {} :host {}</style>');
strategy.processStyleElement('someComponent', 'http://base', styleElement);
expect(styleElement).toHaveText(".foo[_ngcontent-0] {\n\n}\n\n[_nghost-0] {\n\n}");
});
it('should inline @import rules', inject([AsyncTestCompleter], (async) => {
xhr.reply('http://base/one.css', '.one {}');
var styleElement = el('<style>@import "one.css";</style>');
var stylePromise =
strategy.processStyleElement('someComponent', 'http://base', styleElement);
expect(stylePromise).toBePromise();
expect(styleElement).toHaveText('');
stylePromise.then((_) => {
expect(styleElement).toHaveText('.one[_ngcontent-0] {\n\n}');
async.done();
});
}));
it('should return the same style given the same component', () => {
var styleElement = el('<style>.foo {} :host {}</style>');
strategy.processStyleElement('someComponent', 'http://base', styleElement);
var styleElement2 = el('<style>.foo {} :host {}</style>');
strategy.processStyleElement('someComponent', 'http://base', styleElement2);
expect(DOM.getText(styleElement)).toEqual(DOM.getText(styleElement2));
});
it('should return different styles given different components', () => {
var styleElement = el('<style>.foo {} :host {}</style>');
strategy.processStyleElement('someComponent1', 'http://base', styleElement);
var styleElement2 = el('<style>.foo {} :host {}</style>');
strategy.processStyleElement('someComponent2', 'http://base', styleElement2);
expect(DOM.getText(styleElement)).not.toEqual(DOM.getText(styleElement2));
});
it('should move the style element to the style host when @imports are present',
inject([AsyncTestCompleter], (async) => {
xhr.reply('http://base/one.css', '.one {}');
var compileElement = el('<div><style>@import "one.css";</style></div>');
var styleElement = DOM.firstChild(compileElement);
var stylePromise =
strategy.processStyleElement('someComponent', 'http://base', styleElement);
stylePromise.then((_) => {
expect(compileElement).toHaveText('');
expect(styleHost).toHaveText('.one[_ngcontent-0] {\n\n}');
async.done();
});
}));
it('should move the style element to the style host', () => {
var compileElement = el('<div><style>.one {}</style></div>');
var styleElement = DOM.firstChild(compileElement);
strategy.processStyleElement('someComponent', 'http://base', styleElement);
expect(compileElement).toHaveText('');
expect(styleHost).toHaveText('.one[_ngcontent-0] {\n\n}');
});
it('should add an attribute to component elements', () => {
var element = el('<div></div>');
strategy.processElement(null, 'elComponent', element);
expect(DOM.getAttribute(element, '_nghost-0')).toEqual('');
});
it('should add an attribute to the content elements', () => {
var element = el('<div></div>');
strategy.processElement('hostComponent', null, element);
expect(DOM.getAttribute(element, '_ngcontent-0')).toEqual('');
});
});
}
class FakeXHR extends XHR {
_responses: Map<string, string>;
constructor() {
super();
this._responses = MapWrapper.create();
}
get(url: string): Promise<string> {
var response = MapWrapper.get(this._responses, url);
if (isBlank(response)) {
return PromiseWrapper.reject('xhr error', null);
}
return PromiseWrapper.resolve(response);
}
reply(url: string, response: string) { MapWrapper.set(this._responses, url, response); }
}
|
/*
* Copyright 2020 Realm Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.realm.mongodb.log.obfuscator;
import java.util.List;
import java.util.Map;
import io.realm.internal.Util;
import io.realm.internal.log.obfuscator.RegexPatternObfuscator;
/**
* The HttpLogObfuscator keeps sensitive information from being displayed in Logcat.
*/
public class HttpLogObfuscator {
private String feature;
private Map<String, RegexPatternObfuscator> patternObfuscatorMap;
/**
* Constructor for creating an HTTP log obfuscator.
*
* @param feature the feature to obfuscate, e.g. "providers" for login requests -
* see {@link io.realm.internal.network.LoggingInterceptor}.
* @param patternObfuscatorMap {@link Map} of keys subject to being obfuscated and
* {@link RegexPatternObfuscator}s used to determine which
* obfuscator has to be used for the given feature.
*/
public HttpLogObfuscator(String feature, Map<String, RegexPatternObfuscator> patternObfuscatorMap) {
Util.checkNull(feature, "feature");
this.feature = feature;
Util.checkNull(patternObfuscatorMap, "patternObfuscatorMap");
this.patternObfuscatorMap = patternObfuscatorMap;
}
/**
* Obfuscates a logcat entry or not depending on whether the request being sent matches the
* specified feature. If it doesn't, the logcat entry will be returned unmodified.
*
* @param urlSegments the URL segments of the request to be sent.
* @param input the original logcat entry.
* @return the logcat entry to be shown in the logcat.
*/
public String obfuscate(List<String> urlSegments, String input) {
int featureIndex = urlSegments.indexOf(feature);
if (featureIndex != -1) {
String value = urlSegments.get(featureIndex + 1); // value is in the next segment
RegexPatternObfuscator patternObfuscator = patternObfuscatorMap.get(value);
if (patternObfuscator != null) {
return patternObfuscator.obfuscate(input);
}
}
return input;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof HttpLogObfuscator)) return false;
HttpLogObfuscator that = (HttpLogObfuscator) o;
return patternObfuscatorMap.equals(that.patternObfuscatorMap);
}
@Override
public int hashCode() {
return patternObfuscatorMap.hashCode() + 13;
}
}
|
<reponame>lgoldstein/communitychest
/*
*
*/
package net.community.chest.ui.helpers;
import java.awt.Container;
import java.io.IOException;
import java.net.URL;
import java.util.Arrays;
import java.util.Collection;
import java.util.LinkedList;
import java.util.Locale;
import java.util.Map;
import java.util.TreeMap;
import javax.swing.JComponent;
import javax.swing.border.Border;
import javax.xml.parsers.ParserConfigurationException;
import net.community.chest.awt.LocalizedComponent;
import net.community.chest.awt.border.BorderReflectiveProxy;
import net.community.chest.dom.DOMUtils;
import net.community.chest.dom.impl.EmptyDocumentImpl;
import net.community.chest.dom.proxy.AbstractXmlProxyConverter;
import net.community.chest.dom.proxy.XmlProxyConvertible;
import net.community.chest.dom.transform.XmlValueInstantiator;
import net.community.chest.lang.ExceptionUtil;
import net.community.chest.lang.StringUtil;
import net.community.chest.resources.AnchoredResourceAccessor;
import net.community.chest.resources.XmlDocumentRetriever;
import net.community.chest.util.locale.LocaleUtils;
import net.community.chest.util.map.MapEntryImpl;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.xml.sax.SAXException;
/**
* <P>Copyright 2008 as per GPLv2</P>
*
* @author <NAME>.
* @since Oct 23, 2008 2:13:46 PM
*/
public final class HelperUtils {
private HelperUtils ()
{
// no instance
}
/**
* Attempts to find an XML {@link Document} associated with the provided
* {@link Class} by searching for an XML resource that has the same name
* as the <U>simple</U> class name and which is located in the <U>same</U>
* package as the class. Failing that it tries to locate such a resource
* for the super-class, and so on.
* @param c The {@link Class} instance
* @param baseClass The "maximum" hierarchy to climb in search
* for the resource (inclusive) - if <code>null</code> then stop only at
* {@link Object}.</BR>
* <P><B>Note:</B> if the base class is not part of the hierarchy then the
* lookup will stop at the original class and go no further.</P>
* @param anchor The {@link Class#getResource(String)} to use
* @param l The {@link Locale} for the document - <code>null</code> is same
* as {@link Locale#getDefault()}
* @return The located {@link Document}
* @throws IOException If failed to access XML data
* @throws ParserConfigurationException If failed to initialize XML parser
* @throws SAXException If bad XML format
*/
public static final Document loadClassComponentDocument (
final Class<?> c, final Class<?> baseClass, final AnchoredResourceAccessor anchor, final Locale l)
throws IOException, ParserConfigurationException, SAXException
{
final Map.Entry<? extends URL,? extends Document> resPair=
XmlDocumentRetriever.loadDerivedClassDocument(c, baseClass, anchor, l, XmlDocumentRetriever.XML_SUFFIX);
final Document doc=
(null == resPair) ? null : resPair.getValue();
if (doc != null)
return doc;
return null;
}
public static final Document loadClassComponentDocument (
final Class<?> c, final Class<?> baseClass, final Class<?> anchor, final Locale l)
throws IOException, ParserConfigurationException, SAXException
{
return loadClassComponentDocument(c, baseClass, AbstractXmlProxyConverter.getAnchoredResourceChain(anchor), l);
}
/**
* Serves as a cache for the XML {@link Document}-s that have been
* resolved for the UI components. Key=fully qualified class path,
* value=the assigned {@link Document}
*/
private static Map<String,Document> _compDocMap /* =null */;
public static final synchronized Map<String,Document> getClassComponentDocumentsMap ()
{
if (null == _compDocMap)
_compDocMap = new TreeMap<String,Document>();
return _compDocMap;
}
// returns previous instance
public static final synchronized Map<String,Document> setClassComponentDocumentsMap (Map<String,Document> m)
{
final Map<String,Document> prev=_compDocMap;
_compDocMap = m;
return prev;
}
private static final String getClassComponentKey (final Class<?> c, final Locale l)
{
final String cName=(null == c) ? null : c.getName(),
lName=(null == l) ? null : LocaleUtils.getLocalePattern(l);
return cName + "[" + lName + "]";
}
// same as "load" only uses a cache
public static final Document getClassComponentDocument (
final Class<?> c, final Class<?> baseClass, final AnchoredResourceAccessor anchor, final Locale l)
throws IOException, ParserConfigurationException, SAXException
{
if (null == c)
return null;
final Map<String,Document> dm=getClassComponentDocumentsMap();
final String ck=getClassComponentKey(c, l);
Document doc=null;
// check if have it cached
synchronized(dm)
{
if ((doc=dm.get(ck)) != null)
return doc;
}
/* NOTE !!! we release the lock on the cache while we look for the
* document in order not to lock others. We run the risk of the
* same resource being requested in a multi-threaded manner, but
* the assumption is that since the same result will be reached it
* does not matter if the resource is re-mapped (unless the SAME
* class is re-used but with a different base class).
*/
if (null == (doc=loadClassComponentDocument(c, baseClass, anchor, l)))
doc = EmptyDocumentImpl.EMPTY_DOC; // use a placeholder to avoid lookup on next call
final Document prev;
synchronized(dm)
{
prev = dm.put(ck, doc);
}
if (prev != null) // mainly a debug breakpoint...
return prev;
return doc;
}
public static final Document getClassComponentDocument (
final Class<?> c, final Class<?> baseClass, final Class<?> anchor, final Locale l)
throws IOException, ParserConfigurationException, SAXException
{
return getClassComponentDocument(c, baseClass, AbstractXmlProxyConverter.getAnchoredResourceChain(anchor), l);
}
public static final Document getObjectComponentDocument (
final Object o, final Class<?> baseClass, final AnchoredResourceAccessor anchor, final Locale l)
throws IOException, ParserConfigurationException, SAXException
{
return (null == o) ? null : getClassComponentDocument(o.getClass(), baseClass, anchor, l);
}
public static final Document getObjectComponentDocument (
final Object o, final Class<?> baseClass, final Class<?> anchor, final Locale l)
throws IOException, ParserConfigurationException, SAXException
{
return (null == o) ? null : getObjectComponentDocument(o, baseClass, AbstractXmlProxyConverter.getAnchoredResourceChain(anchor), l);
}
public static final Document getObjectComponentDocument (
final Object o, final Class<?> baseClass, final AnchoredResourceAccessor anchor)
throws IOException, ParserConfigurationException, SAXException
{
final Locale l=(o instanceof LocalizedComponent) ? ((LocalizedComponent) o).getDisplayLocale() : null;
return getObjectComponentDocument(o, baseClass, anchor, l);
}
public static final Document getObjectComponentDocument (
final Object o, final Class<?> baseClass, final Class<?> anchor)
throws IOException, ParserConfigurationException, SAXException
{
return getObjectComponentDocument(o, baseClass, AbstractXmlProxyConverter.getAnchoredResourceChain(anchor));
}
/**
* Default prefix of images sub-folder
*/
public static final String IMAGES_SUB_FOLDER="images";
/**
* @param value The configured image location (may be null/empty)
* @return The adjusted location with {@link #IMAGES_SUB_FOLDER} as
* its parent folder (if not already such) - may be null/empty or same
* as input value if input value is null/empty or already contains the
* {@link #IMAGES_SUB_FOLDER} as its parent folder
*/
public static final String getDefaultImageLocationPath (final String value)
{
final int vLen=(null == value) ? 0 : value.length();
if (vLen <= 0)
return null;
if (StringUtil.startsWith(value, IMAGES_SUB_FOLDER, true, false))
{
final char ch=value.charAt(IMAGES_SUB_FOLDER.length());
if (('/' == ch) || ('\\' == ch))
return value;
}
return IMAGES_SUB_FOLDER + "/" + value;
}
/**
* @param c Anchor {@link Class}
* @param value Image resource location <U>relative</U> to the class
* location (package). <B>Note</B>: assumes that the image resource is in
* the {@link #IMAGES_SUB_FOLDER} sub-folder <U>relative</U> to the anchor
* {@link Class} location.
* @return The {@link URL} of the image location after adjustment via call
* to {@link #getDefaultImageLocationPath(String)} - may be null if resource
* does not exist.
* @throws Exception If failed to load resource
*/
public static final URL getDefaultClassImageLocation (final Class<?> c, final String value) throws Exception
{
final String resLoc=(null == c) ? null : getDefaultImageLocationPath(value);
if ((null == resLoc) || (resLoc.length() <= 0))
return null;
return c.getResource(resLoc);
}
/**
* Uses the current {@link Thread}'s context {@link ClassLoader} to
* resolve the image resource location
* @param value The location of the image resource - if not relative to
* {@link #IMAGES_SUB_FOLDER} then it is added as the parent folder.
* @return The resource URL - may be null if no initial location provided
* or resource does not exist
*/
public static final URL getDefaultImageLocation (final String value)
{
final String resLoc=getDefaultImageLocationPath(value);
if ((null == resLoc) || (resLoc.length() <= 0))
return null;
final String resPath=('/' == resLoc.charAt(0)) ? resLoc : "/" + resLoc;
final Thread t=Thread.currentThread();
final ClassLoader cl=(null == t) ? null : t.getContextClassLoader();
return (null == cl) /* should not happen */ ? null : cl.getResource(resPath);
}
/**
* Locates a component's XML {@link Element} in a sections {@link Map}
* @param sMap The sections/components {@link Map} - key=section/component
* "name", value=the XML {@link Element}
* @param compClass The component {@link Class} whose {@link Class#getSimpleName()}
* value is used to look for the component XML element. If not found, then
* the {@link Class#getSuperclass()} is used.
* @param baseClass The top-most superclass to reach in search - if null then
* {@link Object} is assumed
* @return The resulting associated XML element "pair" (null if not found),
* represented as a {@link java.util.Map.Entry} whose key=the resource name that was
* used to locate the XML element, value=the associated XML element.
*/
public static Map.Entry<String,Element> getComponentElement (final Map<String,Element> sMap, final Class<?> compClass, final Class<?> baseClass)
{
if ((null == sMap) || (sMap.size() <= 0) || (null == compClass))
return null;
// may be null/empty for anonymous classes
final String cn=compClass.getSimpleName();
final Element resElem=
((null == cn) || (cn.length() <= 0)) ? null : sMap.get(cn);
if (resElem != null)
return new MapEntryImpl<String,Element>(cn,resElem);
final Class<?> sc=compClass.getSuperclass();
if (null == sc) // OK if reached top-level
return null;
if ((baseClass != null) && (!baseClass.isAssignableFrom(sc)))
return null; // stop if gone beyond the base class
return getComponentElement(sMap, sc, baseClass);
}
public static Map.Entry<String,Element> getComponentElement (final Map<String,Element> sMap, final Class<?> compClass)
{
return getComponentElement(sMap, compClass, Object.class);
}
public static Map.Entry<String,Element> getComponentObjectElement (final Map<String,Element> sMap, final Object comp, final Class<?> baseClass)
{
return (null == comp) ? null : getComponentElement(sMap, comp.getClass(), baseClass);
}
public static Map.Entry<String,Element> getComponentObjectElement (final Map<String,Element> sMap, final Object comp)
{
return getComponentObjectElement(sMap, comp, Object.class);
}
public static final <I extends XmlContainerComponentInitializer> I layoutSections (
final I cci, final Collection<? extends Map.Entry<String,? extends Element>> sl) throws RuntimeException
{
if ((null == sl) || (sl.size() <= 0))
return cci;
for (final Map.Entry<String,? extends Element> se : sl)
{
final String sn=(null == se) ? null : se.getKey();
final Element elem=(null == se) ? null : se.getValue();
if (null == elem)
continue;
cci.layoutSection(sn, elem);
}
return cci;
}
public static final <I extends XmlContainerComponentInitializer> I layoutSections (final I cci) throws RuntimeException
{
final SectionsMap sm=
(null == cci) ? null : cci.getSectionsMap();
return layoutSections(cci, ((null == sm) || (sm.size() <= 0)) ? null : sm.sectionsSet());
}
/**
* @param sm The {@link Map} to use in order to retrieve the XML
* {@link Element} for the object - if <code>null</code>/empty
* then nothing is applied
* @param name Section name under which XML element is mapped - if
* <code>null/empty</code> or no element mapped then nothing is applied
* @param object Object on which to apply the XML element - if
* <code>null</code> then nothing is applied
* @param proxy The {@link XmlProxyConvertible} instance to use - if
* <code>null</code> then nothing is applied
* @return The applied element - null if none applied
* @throws RuntimeException If exception(s) while applying the XML
* element
*/
public static final Element applyDefinitionElement (
final Map<String,? extends Element> sm,
final String name,
final Object object,
final XmlProxyConvertible<?> proxy) throws RuntimeException
{
final Element elem=
((null == sm) || (null == name) || (name.length() <= 0) || (null == object) || (null == proxy)) ? null : sm.get(name);
if (null == elem)
return null;
try
{
@SuppressWarnings("unchecked")
final Object o=((XmlProxyConvertible<Object>) proxy).fromXml(object, elem);
if (o != object)
throw new IllegalStateException("applyDefinitionElement(" + name + ")[" + DOMUtils.toString(elem) + "] mismatched reconstructed instances");
}
catch(Exception e)
{
throw ExceptionUtil.toRuntimeException(e);
}
return elem;
}
/**
* @param cci The {@link XmlContainerComponentInitializer} to use in order
* to retrieve the XML {@link Element} for the object
* @param name Section name under which XML element is mapped - if
* <code>null/empty</code> or no element mapped then nothing is applied
* @param object Object on which to apply the XML element - if
* <code>null</code> then nothing is applied
* @param proxy The {@link XmlProxyConvertible} instance to use - if
* <code>null</code> then nothing is applied
* @return The applied element - null if none applied
* @throws RuntimeException If exception(s) while applying the XML
* element
*/
public static final Element applyDefinitionElement (
final XmlContainerComponentInitializer cci,
final String name,
final Object object,
final XmlProxyConvertible<?> proxy) throws RuntimeException
{
return applyDefinitionElement((null == cci) ? null : cci.getSectionsMap(), name, object, proxy);
}
/**
* @param <V> The component value type
* @param c The component {@link Object} - if <code>instanceof {@link SettableComponent}</code>
* then assume to accept a value of type V.
* @param value The value to use if need to call {@link SettableComponent#setContent(Object)}
* or {@link SettableComponent#refreshContent(Object)}
* @param itemNewState <P>The call to invoke on {@link SettableComponent} interface:</P></BR>
* <UL>
* <LI><code>null</code> - invoke {@link SettableComponent#clearContent()}</LI>
* <LI><code>TRUE</code> - invoke {@link SettableComponent#setContent(Object)}</LI>
* <LI><code>FALSE</code> - invoke {@link SettableComponent#refreshContent(Object)}</LI>
* </UL>
* @return The invoked {@link SettableComponent} instance if indeed the
* component object was such and the invocation was executed
*/
public static final <V> SettableComponent<V> updateSettableObject (
final Object c, final V value, final Boolean itemNewState)
{
if (c instanceof SettableComponent<?>)
{
@SuppressWarnings("unchecked")
final SettableComponent<V> sc=(SettableComponent<V>) c;
if (null == itemNewState)
sc.clearContent();
else if (itemNewState.booleanValue())
sc.setContent(value);
else
sc.refreshContent(value);
return sc;
}
return null;
}
/**
* @param <V> The component value type
* @param value The value to use if need to call {@link SettableComponent#setContent(Object)}
* or {@link SettableComponent#refreshContent(Object)}
* @param itemNewState <P>The call to invoke on {@link SettableComponent} interface:</P></BR>
* <UL>
* <LI><code>null</code> - invoke {@link SettableComponent#clearContent()}</LI>
* <LI><code>TRUE</code> - invoke {@link SettableComponent#setContent(Object)}</LI>
* <LI><code>FALSE</code> - invoke {@link SettableComponent#refreshContent(Object)}</LI>
* </UL>
* @param comps A {@link Collection} of objects to be checked if indeed
* they implement the {@link SettableComponent} interface (if so then
* assumed to expect a value of type V) - may be null/empty
* @return A {@link Collection} of all the {@link SettableComponent}
* on which the interface method was invoked - may be null/empty if no
* original components to start with or none was a {@link SettableComponent}
* @see #updateSettableObject(Object, Object, Boolean)
*/
public static final <V> Collection<SettableComponent<V>> updateSettableComponents (
final V value, final Boolean itemNewState, final Collection<?> comps)
{
if ((null == comps) || (comps.size() <= 0))
return null;
Collection<SettableComponent<V>> cl=null;
for (final Object c : comps)
{
final SettableComponent<V> sc=updateSettableObject(c, value, itemNewState);
if (null == sc)
continue;
if (null == cl)
cl = new LinkedList<SettableComponent<V>>();
cl.add(sc);
}
return cl;
}
/**
* @param <V> The component value type
* @param value The value to use if need to call {@link SettableComponent#setContent(Object)}
* or {@link SettableComponent#refreshContent(Object)}
* @param itemNewState <P>The call to invoke on {@link SettableComponent} interface:</P></BR>
* <UL>
* <LI><code>null</code> - invoke {@link SettableComponent#clearContent()}</LI>
* <LI><code>TRUE</code> - invoke {@link SettableComponent#setContent(Object)}</LI>
* <LI><code>FALSE</code> - invoke {@link SettableComponent#refreshContent(Object)}</LI>
* </UL>
* @param comps An array of objects to be checked if indeed
* they implement the {@link SettableComponent} interface (if so then
* assumed to expect a value of type V) - may be null/empty
* @return A {@link Collection} of all the {@link SettableComponent}
* on which the interface method was invoked - may be null/empty if no
* original components to start with or none was a {@link SettableComponent}
*/
public static final <V> Collection<SettableComponent<V>> updateSettableComponents (
final V value, final Boolean itemNewState, final Object ... comps)
{
return ((null == comps) || (comps.length <= 0)) ? null : updateSettableComponents(value, itemNewState, Arrays.asList(comps));
}
/**
* @param <V> The component value type
* @param c The {@link Container} whose sub-components are to be checked
* @param value The value to use if need to call {@link SettableComponent#setContent(Object)}
* or {@link SettableComponent#refreshContent(Object)}
* @param itemNewState <P>The call to invoke on {@link SettableComponent} interface:</P></BR>
* <UL>
* <LI><code>null</code> - invoke {@link SettableComponent#clearContent()}</LI>
* <LI><code>TRUE</code> - invoke {@link SettableComponent#setContent(Object)}</LI>
* <LI><code>FALSE</code> - invoke {@link SettableComponent#refreshContent(Object)}</LI>
* </UL>
* @return A {@link Collection} of all the {@link SettableComponent}
* on which the interface method was invoked - may be null/empty if no
* original components to start with or none was a {@link SettableComponent}
* @see #updateSettableComponents(Object, Boolean, Object...)
*/
public static final <V> Collection<SettableComponent<V>> updateSettableComponents (
final Container c, final V value, final Boolean itemNewState)
{
return (null == c) ? null : updateSettableComponents(value, itemNewState, (Object[]) c.getComponents());
}
/**
* Helper method for setting a {@link JComponent}-s {@link Border} value
* from an XML {@link Element}
* @param jc The {@link JComponent} to set - ignored if <code>null</code>
* @param elem The XML {@link Element} to use in order to instantiate the
* border value - ignored if <code>null</code>
* @return The border - or <code>null</code> if none instantiated
* (including if no matching instantiator found)
* @throws RuntimeException If failed to instantiate the border from the
* XML element.
*/
public static final Border setBorder (final JComponent jc, final Element elem) throws RuntimeException
{
final XmlValueInstantiator<? extends Border> proxy=
((null == elem) || (null == jc)) ? null : BorderReflectiveProxy.getBorderInstantiator(elem);
try
{
final Border b=(null == proxy) ? null : proxy.fromXml(elem);
if (b != null)
jc.setBorder(b);
return b;
}
catch(Exception e)
{
throw ExceptionUtil.toRuntimeException(e);
}
}
/**
* Helper method for setting a {@link JComponent}-s {@link Border} value
* from an XML {@link Element}
* @param jc The {@link JComponent} to set - ignored if <code>null</code>
* @param cci The {@link XmlContainerComponentInitializer#getSection(String)}
* instance to call - ignored if <code>null</code>
* @param secName The XML section name to query - ignored if
* <code>null</code>/empty
* @return A pair represented as a {@link java.util.Map.Entry} whose key=the XML
* element used to instantiate the border, value=the {@link Border}. May
* be <code>null</code> if no component/section element found. <B>Note:</B>
* if no instantiator found then a non-<code>null</code> pair is returned
* with a <code>null</code> border instance value
* @throws RuntimeException If failed to instantiate the border from the
* XML element.
*/
public static final Map.Entry<Element,Border> setBorder (
final JComponent jc,
final XmlContainerComponentInitializer cci,
final String secName) throws RuntimeException
{
final Element elem=
((null == jc) || (null == cci) || (null == secName) || (secName.length() <= 0)) ? null : cci.getSection(secName);
if (null == elem)
return null;
final Border b=setBorder(jc, elem);
return new MapEntryImpl<Element,Border>(elem, b);
}
/**
* Helper method for setting a {@link JComponent}-s {@link Border} value
* from an XML {@link Element}
* @param jc The {@link JComponent} to set - ignored if <code>null</code>
* @param cci The {@link Map} of XML "sections" to query - key=section
* name string, value=matching XML element - ignored if <code>null</code>
* @param secName The XML section name to query - ignored if
* <code>null</code>/empty
* @return A pair represented as a {@link java.util.Map.Entry} whose key=the XML
* element used to instantiate the border, value=the {@link Border}. May
* be <code>null</code> if no component/section element found. <B>Note:</B>
* if no instantiator found then a non-<code>null</code> pair is returned
* with a <code>null</code> border instance value
* @throws RuntimeException If failed to instantiate the border from the
* XML element.
*/
public static final Map.Entry<Element,Border> setBorder (
final JComponent jc,
final Map<String, ? extends Element> cci,
final String secName) throws RuntimeException
{
final Element elem=
((null == jc) || (null == cci) || (null == secName) || (secName.length() <= 0)) ? null : cci.get(secName);
if (null == elem)
return null;
final Border b=setBorder(jc, elem);
return new MapEntryImpl<Element,Border>(elem, b);
}
}
|
<reponame>estekhin/personal-stream-expression-transformer
package com.github.estekhin.set.simplify;
import com.github.estekhin.set.ast.BinaryOperation;
import com.github.estekhin.set.ast.ElementNode;
import com.github.estekhin.set.ast.ExpressionNode;
import com.github.estekhin.set.ast.Nodes;
import com.github.estekhin.set.ast.NumberNode;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
final class ElementFirstTransformer implements BinaryOperationTransformer {
@Override
public @Nullable ExpressionNode tryApply(@NotNull ExpressionNode operand1, @NotNull BinaryOperation operation, @NotNull ExpressionNode operand2) {
if (operand1 instanceof NumberNode && operand2 instanceof ElementNode) {
return tryApplyToNumberOpElement((NumberNode) operand1, operation, (ElementNode) operand2);
}
return null;
}
private @Nullable ExpressionNode tryApplyToNumberOpElement(@NotNull NumberNode operand1, @NotNull BinaryOperation operation, @NotNull ElementNode operand2) {
switch (operation) {
case ADD:
return Nodes.op(operand2, BinaryOperation.ADD, operand1);
case MULTIPLY:
return Nodes.op(operand2, BinaryOperation.MULTIPLY, operand1);
case EQUALS:
return Nodes.op(operand2, BinaryOperation.EQUALS, operand1);
case GREATER_THAN:
return Nodes.op(operand2, BinaryOperation.LESS_THAN, operand1);
case LESS_THAN:
return Nodes.op(operand2, BinaryOperation.GREATER_THAN, operand1);
default:
return null;
}
}
}
|
"""Test suite for authorization and mounting onedata client,
in multi-client environment.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2015 ACK CYFRONET AGH"
__license__ = "This software is released under the MIT license cited in " \
"LICENSE.txt"
from tests.acceptance.steps.env_steps import *
from tests.utils.acceptance_utils import *
from tests.acceptance.steps.multi_auth_steps import *
from pytest_bdd import scenario
from functools import partial
scenario = partial(scenario, '../features/multi_authorization.feature')
@scenario('Successful authorization - 1 client per user',)
def test_successful_authorization1(env_description_file):
pass
@scenario('Successful authorization - 2 clients of one user',)
def test_successful_authorization2(env_description_file):
pass
@scenario('Successful authorization - 2 clients of one user on different hosts',)
def test_successful_authorization3(env_description_file):
pass
@scenario('Bad and good authorization',)
def test_good_and_bad_authorization(env_description_file):
pass
@scenario('Bad authorization',)
def test_bad_authorization(env_description_file):
pass
|
import pandas as pd
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score
# Load data
data = pd.read_csv('student_data.csv')
X = data.iloc[:,0:-1]
y = data.iloc[:, -1]
# Preprocssing
X = StandardScaler().fit_transform(X)
# Split train, test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
# Train machine learning model
knn_classifier = KNeighborsClassifier(n_neighbors=6)
knn_classifier.fit(X_train, y_train)
# Make predictions
y_pred = knn_classifier.predict(X_test)
# Evaluate performance
print(confusion_matrix(y_test, y_pred))
print(accuracy_score(y_test, y_pred)) |
# frozen_string_literal: true
module Support
module Php
def phpcommand(cmd)
command("#{phpenv_exec} #{cmd}")
end
def phpenv_exec
return @phpenv_exec if @phpenv_exec
unless php_default_version.empty?
@phpenv_exec = "RBENV_VERSION=#{php_default_version} phpenv exec"
return @phpenv_exec
end
available = `phpenv versions 2>/dev/null`.strip
php_versions.each do |v|
next if @phpenv_exec
@phpenv_exec = "RBENV_VERSION=#{v} phpenv exec" if available =~ /\b#{v}\b/
end
@phpenv_exec ||= ''
@phpenv_exec
end
def php_versions
php_versions_trusty || %w[5.6.13 system]
end
def php_versions_trusty
::Support.attributes
.fetch('travis_build_environment', {})['php_versions']
end
def php_default_version
php_default_version_trusty || ''
end
def php_default_version_trusty
::Support.attributes
.fetch('travis_build_environment', {})['php_default_version']
end
end
end
|
<reponame>hispindia/BIHAR-2.7
package org.hisp.dhis.vn.chr.formreport.action;
/*
* Copyright (c) 2004-2012, University of Oslo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the HISP project nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.hisp.dhis.system.util.CodecUtils;
import org.hisp.dhis.vn.chr.Form;
import org.hisp.dhis.vn.chr.FormReport;
import org.hisp.dhis.vn.chr.FormReportService;
import org.hisp.dhis.vn.chr.FormService;
import org.hisp.dhis.vn.chr.form.action.ActionSupport;
/**
* @author <NAME>
* @version $Id$
*/
public class AddFormReport
extends ActionSupport
{
// -----------------------------------------------------------------------------------------------
// Dependency
// -----------------------------------------------------------------------------------------------
private FormService formService;
public void setFormReportService( FormReportService formReportService )
{
this.formReportService = formReportService;
}
private FormReportService formReportService;
public void setFormService( FormService formService )
{
this.formService = formService;
}
// -----------------------------------------------------------------------------------------------
// Input && Output
// -----------------------------------------------------------------------------------------------
private String name;
public void setName( String name )
{
this.name = name;
}
private String chosenOperand;
public void setChosenOperand( String chosenOperand )
{
this.chosenOperand = chosenOperand;
}
private String formula;
public void setFormula( String formula )
{
this.formula = formula;
}
private Integer mainForm;
public void setMainForm( Integer mainForm )
{
this.mainForm = mainForm;
}
// -----------------------------------------------------------------------------------------------
// Implement
// -----------------------------------------------------------------------------------------------
public String execute()
{
try
{
// create a new formReport
FormReport formReport = new FormReport();
// set name
formReport.setName( CodecUtils.unescape( name ) );
// set formula for the element
formula = formula.toLowerCase();
formReport.setFormula( CodecUtils.unescape(formula ));
// get all forms
Collection<Form> forms = formService.getAllForms();
// forms used in the formula
List<Form> formulaForms = new ArrayList<Form>();
for ( Form form : forms )
{
String formName = form.getName().toLowerCase() + ".";
if ( formula.contains( formName ) )
{
formulaForms.add( form );
}
}
// set forms used in the formula
Form main = formService.getForm( mainForm.intValue() );
formulaForms.add( main );
formReport.setForms( formulaForms );
// set mainForm used to identify statistics-form
formReport.setMainForm( main );
// set operand of dataelement
formReport.setOperand( chosenOperand );
// insert new formReport into database
formReportService.addFormReport( formReport );
message = i18n.getString( "success" );
return SUCCESS;
}
catch ( Exception ex )
{
message = i18n.getString( "add" ) + " " + i18n.getString( "error" );
ex.printStackTrace();
}
return ERROR;
}
}
|
<reponame>Shyran-Systems/rchain-api<gh_stars>1-10
/* global exports, Buffer */
// @flow
/*::
// ISSUE: opaque types?
export type HexStr<T> = string;
export type Bytes = Uint8Array | Buffer;
*/
/**
* A byte sequence
*
* @typedef { Uint8Array | Buffer } Bytes
*
* @memberof Hex
*/
/**
* Hex (base16) encoding of a Bytes type
*
* @typedef { string } HexStr<T: Bytes>
*
* @memberof Hex
*/
/**
* Encode bytes as hex string
*
* @memberof Hex
*/
function encode/*:: <T: Bytes>*/(bytes /*: T */) /*: string*/ {
return Buffer.from(bytes).toString('hex');
}
exports.encode = encode;
exports.fromBytes = fromBytes;
function fromBytes/*:: <T: Bytes>*/(bytes /*: T */) /*: HexStr<T>*/ {
return Buffer.from(bytes).toString('hex');
}
/**
* Decode hex string to bytes
*
* @memberof Hex
*/
function decode/*:: <T: Bytes>*/(hex /*: HexStr<T>*/) /*: Bytes*/ {
return Buffer.from(hex, 'hex');
}
exports.decode = decode;
|
#!/bin/bash
if [ -z "$GAZEBO_MODEL_PATH" ]; then
bash -c 'echo "export GAZEBO_MODEL_PATH=$GAZEBO_MODEL_PATH:"`pwd`/../assets/models >> ~/.bashrc'
else
bash -c 'sed "s,GAZEBO_MODEL_PATH=[^;]*,'GAZEBO_MODEL_PATH=`pwd`/../assets/models'," -i ~/.bashrc'
fi
#Load turtlebot variables. Temporal solution
chmod +x gym_ws/src/turtlebot_simulator/turtlebot_gazebo/env-hooks/25.turtlebot-gazebo.sh.em
bash gym_ws/src/turtlebot_simulator/turtlebot_gazebo/env-hooks/25.turtlebot-gazebo.sh.em
#add turtlebot launch environment variable
if [ -z "$GYM_GAZEBO_WORLD_MAZE" ]; then
bash -c 'echo "export GYM_GAZEBO_WORLD_MAZE="`pwd`/../assets/worlds/maze.world >> ~/.bashrc'
else
bash -c 'sed "s,GYM_GAZEBO_WORLD_MAZE=[^;]*,'GYM_GAZEBO_WORLD_MAZE=`pwd`/../assets/worlds/maze.world'," -i ~/.bashrc'
fi
if [ -z "$GYM_GAZEBO_WORLD_CIRCUIT" ]; then
bash -c 'echo "export GYM_GAZEBO_WORLD_CIRCUIT="`pwd`/../assets/worlds/circuit.world >> ~/.bashrc'
else
bash -c 'sed "s,GYM_GAZEBO_WORLD_CIRCUIT=[^;]*,'GYM_GAZEBO_WORLD_CIRCUIT=`pwd`/../assets/worlds/circuit.world'," -i ~/.bashrc'
fi
if [ -z "$GYM_GAZEBO_WORLD_CIRCUIT2" ]; then
bash -c 'echo "export GYM_GAZEBO_WORLD_CIRCUIT2="`pwd`/../assets/worlds/circuit2.world >> ~/.bashrc'
else
bash -c 'sed "s,GYM_GAZEBO_WORLD_CIRCUIT2=[^;]*,'GYM_GAZEBO_WORLD_CIRCUIT2=`pwd`/../assets/worlds/circuit2.world'," -i ~/.bashrc'
fi
if [ -z "$GYM_GAZEBO_WORLD_CIRCUIT2C" ]; then
bash -c 'echo "export GYM_GAZEBO_WORLD_CIRCUIT2C="`pwd`/../assets/worlds/circuit2c.world >> ~/.bashrc'
else
bash -c 'sed "s,GYM_GAZEBO_WORLD_CIRCUIT2C=[^;]*,'GYM_GAZEBO_WORLD_CIRCUIT2C=`pwd`/../assets/worlds/circuit2c.world'," -i ~/.bashrc'
fi
if [ -z "$GYM_GAZEBO_WORLD_ROUND" ]; then
bash -c 'echo "export GYM_GAZEBO_WORLD_ROUND="`pwd`/../assets/worlds/round.world >> ~/.bashrc'
else
bash -c 'sed "s,GYM_GAZEBO_WORLD_ROUND=[^;]*,'GYM_GAZEBO_WORLD_ROUND=`pwd`/../assets/worlds/round.world'," -i ~/.bashrc'
fi
#copy altered urdf model
cp -r ../assets/urdf/kobuki_urdf/urdf/ gym_ws/src/kobuki/kobuki_description
#copy laser mesh file
cp ../assets/meshes/lidar_lite_v2_withRay.dae gym_ws/src/kobuki/kobuki_description/meshes
echo "finish turtle_setup!"
exec bash # reload bash
|
<filename>src/core/components/KeyboardEventReceiver.h<gh_stars>10-100
#ifndef _WKT_KEYBOARD_EVENT_RECEIVER_H
#define _WKT_KEYBOARD_EVENT_RECEIVER_H
#include "ecs/Component.h"
#include "input/KeyboardProxy.h"
namespace wkt {
namespace components
{
class KeyboardEventReceiver : public wkt::ecs::Component
{
public:
using callback_type = std::function<void(const wkt::events::KeyboardEventType& ket)>;
public:
callback_type onKeyDown = nullptr;
callback_type onKeyPressed = nullptr;
callback_type onKeyUp = nullptr;
};
REGISTER_COMPONENT(KeyboardEventReceiver, -8);
}}
#endif |
def fibonacci(n):
a = 0
b = 1
if n < 0:
print("Incorrect input")
elif n == 0:
return a
elif n == 1:
return b
else:
for i in range(2,n):
c = a + b
a = b
b = c
return b
def printFibonacciNumbers(n):
for i in range(n):
print(fibonacci(i), end=" ")
printFibonacciNumbers(10) |
import styled from "react-emotion";
import { space, width, borderRadius } from "styled-system";
const Paper = styled.div`
${space};
${width};
${borderRadius};
border: solid 1px rgba(0, 0, 0, 0.09);
-webkit-box-shadow: 1px 2px 10px 1px rgba(0, 0, 0, 0.2);
-moz-box-shadow: 1px 2px 10px 1px rgba(0, 0, 0, 0.2);
box-shadow: 1px 2px 10px 1px rgba(0, 0, 0, 0.05);
z-index: 0;
overflow: hidden;
bacground-color: #fff;
`;
export default Paper;
|
#!/bin/bash
#SBATCH -N 1
#SBATCH -p gpu_rtx2080ti_shared
#SBATCH --gpus=1
#SBATCH -t 00:40:00
#Loading modules
module load 2020
module load Python/3.8.2-GCCcore-9.3.0
N_CPUS=$(nproc)
echo "Running on $N_CPUS cores"
echo "Run number $SLURM_ARRAY_TASK_ID"
# Copy input data to scratch and create output directory
cp -r data "$TMPDIR"
echo "$TMPDIR"
poetry run python main.py data.dir="$TMPDIR/data" dir.output_base="$TMPDIR" \
dir.run="kernel_3x3_restrict/$SLURM_ARRAY_TASK_ID" \
+group="kernel_3x3 restrict" \
+name="kernel_3x3 restrict seed $SLURM_ARRAY_TASK_ID" \
+experiment=kernel_3x3 \
+model.flip=1 \
+model.restriction_layer=6 \
seed="$SLURM_ARRAY_TASK_ID" \
+trainer.gpus=1 \
+trainer.progress_bar_refresh_rate=0 \
data.num_workers="$N_CPUS"
# Copy output data from scratch to home
# the -T flag ensures that we copy onto rather than into the logs directory
# (i.e. not to logs/logs/...)
cp -rT "$TMPDIR"/logs logs
|
#!/usr/bin/env bash
check_param() {
local name=$1
local value=$(eval echo '$'$name)
if [ "$value" == 'replace-me' ]; then
echo "environment variable $name must be set"
exit 1
fi
}
declare -a on_exit_items
on_exit_items=()
function on_exit {
echo "Running ${#on_exit_items[@]} on_exit items..."
for i in "${on_exit_items[@]}"
do
for try in $(seq 0 9); do
sleep $try
echo "Running cleanup command $i (try: ${try})"
eval $i || continue
break
done
done
}
function add_on_exit {
local n=${#on_exit_items[@]}
if [[ $n -eq 0 ]]; then
on_exit_items=("$*")
trap on_exit EXIT
else
on_exit_items=("$*" "${on_exit_items[@]}")
fi
}
function print_title () {
divider="==========================================================================="
echo ${divider}
echo $1
echo ${divider}
} |
import React from 'react'
import StdProps from '../../../../common/std-props'
//@ts-ignore
import styles from './index.css'
import { IconButton, Icon } from 'rsuite'
import PProps from '../PProps'
export default function (props: PProps) {
return <div {...props}>
<div className={`${styles.bg} ${styles.f} d-flex flex-column w-100 h-100 justify-content-center`}>
<div className="d-flex flex-column justify-content-center container">
<br/>
<br/>
<h1>好吧?</h1>
<h3>确实讲了许多云里雾里的话</h3>
<h2>我只是,只是想说...</h2>
</div>
<div className="animated fadeIn delay-7s">
<br/><br/>
<IconButton size="lg" onClick={props.onRequestNext} className="animated infinite slow bounce d-block mr-auto ml-auto bg-transparent"
icon={<Icon icon="angle-double-down" />} circle />
<br /><br />
</div>
</div>
</div>
} |
#!/bin/bash
set -e
function logInToPaas() {
local redownloadInfra="${REDOWNLOAD_INFRA}"
local ca="PAAS_${ENVIRONMENT}_CA"
local k8sCa="${!ca}"
local clientCert="PAAS_${ENVIRONMENT}_CLIENT_CERT"
local k8sClientCert="${!clientCert}"
local clientKey="PAAS_${ENVIRONMENT}_CLIENT_KEY"
local k8sClientKey="${!clientKey}"
local tokenPath="PAAS_${ENVIRONMENT}_CLIENT_TOKEN_PATH"
local k8sTokenPath="${!tokenPath}"
local clusterName="PAAS_${ENVIRONMENT}_CLUSTER_NAME"
local k8sClusterName="${!clusterName}"
local clusterUser="PAAS_${ENVIRONMENT}_CLUSTER_USERNAME"
local k8sClusterUser="${!clusterUser}"
local systemName="PAAS_${ENVIRONMENT}_SYSTEM_NAME"
local k8sSystemName="${!systemName}"
local api="PAAS_${ENVIRONMENT}_API_URL"
local apiUrl="${!api:-192.168.99.100:8443}"
local cliInstalled
cliInstalled="$("${KUBECTL_BIN}" version && echo "true" || echo "false")"
local cliDownloaded
cliDownloaded="$(test -r "${KUBECTL_BIN}" && echo "true" || echo "false")"
echo "CLI Installed? [${cliInstalled}], CLI Downloaded? [${cliDownloaded}]"
if [[ ${cliInstalled} == "false" && ( ${cliDownloaded} == "false" || ${cliDownloaded} == "true" && ${redownloadInfra} == "true" ) ]]; then
echo "Downloading CLI"
curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/darwin/amd64/kubectl" --fail
local cliDownloaded="true"
else
echo "CLI is already installed or was already downloaded but the flag to redownload was disabled"
fi
if [[ ${cliDownloaded} == "true" ]]; then
echo "Adding CLI to PATH"
PATH="${PATH}:$(pwd)"
chmod +x "${KUBECTL_BIN}"
fi
echo "Removing current Kubernetes configuration"
rm -rf "${KUBE_CONFIG_PATH}" || echo "Failed to remove Kube config. Continuing with the script"
echo "Logging in to Kubernetes API [${apiUrl}], with cluster name [${k8sClusterName}] and user [${k8sClusterUser}]"
"${KUBECTL_BIN}" config set-cluster "${k8sClusterName}" --server="https://${apiUrl}" --certificate-authority="${k8sCa}" --embed-certs=true
# TOKEN will get injected as a credential if present
if [[ "${TOKEN}" != "" ]]; then
"${KUBECTL_BIN}" config set-credentials "${k8sClusterUser}" --token="${TOKEN}"
elif [[ "${k8sTokenPath}" != "" ]]; then
local tokenContent
tokenContent="$(cat "${k8sTokenPath}")"
"${KUBECTL_BIN}" config set-credentials "${k8sClusterUser}" --token="${tokenContent}"
else
"${KUBECTL_BIN}" config set-credentials "${k8sClusterUser}" --certificate-authority="${k8sCa}" --client-key="${k8sClientKey}" --client-certificate="${k8sClientCert}"
fi
"${KUBECTL_BIN}" config set-context "${k8sSystemName}" --cluster="${k8sClusterName}" --user="${k8sClusterUser}"
"${KUBECTL_BIN}" config use-context "${k8sSystemName}"
echo "CLI version"
"${KUBECTL_BIN}" version
}
function testDeploy() {
local appName
appName=$(retrieveAppName)
# Log in to PaaS to start deployment
logInToPaas
deployServices
# deploy app
deployAndRestartAppWithNameForSmokeTests "${appName}" "${PIPELINE_VERSION}"
}
function testRollbackDeploy() {
rm -rf "${OUTPUT_FOLDER}/test.properties"
local latestProdTag="${1}"
local appName
appName=$(retrieveAppName)
local latestProdVersion
latestProdVersion="${latestProdTag#prod/}"
echo "Last prod version equals ${latestProdVersion}"
logInToPaas
parsePipelineDescriptor
deployAndRestartAppWithNameForSmokeTests "${appName}" "${latestProdVersion}"
# Adding latest prod tag
echo "LATEST_PROD_TAG=${latestProdTag}" >>"${OUTPUT_FOLDER}/test.properties"
}
function deployService() {
local serviceType
serviceType="$(toLowerCase "${1}")"
local serviceName
serviceName="${2}"
local serviceCoordinates
serviceCoordinates="$(if [[ "${3}" == "null" ]]; then
echo "";
else
echo "${3}";
fi)"
local coordinatesSeparator=":"
echo "Will deploy service with type [${serviceType}] name [${serviceName}] and coordinates [${serviceCoordinates}]"
case ${serviceType} in
rabbitmq)
deployRabbitMq "${serviceName}"
;;
mysql)
deployMySql "${serviceName}"
;;
eureka)
local previousIfs
previousIfs="${IFS}"
IFS=${coordinatesSeparator} read -r EUREKA_ARTIFACT_ID EUREKA_VERSION <<<"${serviceCoordinates}"
IFS="${previousIfs}"
deployEureka "${EUREKA_ARTIFACT_ID}:${EUREKA_VERSION}" "${serviceName}"
;;
stubrunner)
local uniqueEurekaName
uniqueEurekaName="$(eurekaName)"
local uniqueRabbitName
uniqueRabbitName="$(rabbitMqName)"
local previousIfs
previousIfs="${IFS}"
IFS=${coordinatesSeparator} read -r STUBRUNNER_ARTIFACT_ID STUBRUNNER_VERSION <<<"${serviceCoordinates}"
IFS="${previousIfs}"
local parsedStubRunnerUseClasspath
parsedStubRunnerUseClasspath="$(echo "${PARSED_YAML}" | jq -r --arg x "${LOWER_CASE_ENV}" '.[$x].services[] | select(.type == "stubrunner") | .useClasspath')"
local stubRunnerUseClasspath
stubRunnerUseClasspath=$(if [[ "${parsedStubRunnerUseClasspath}" == "null" ]]; then
echo "false";
else
echo "${parsedStubRunnerUseClasspath}";
fi)
deployStubRunnerBoot "${STUBRUNNER_ARTIFACT_ID}:${STUBRUNNER_VERSION}" "${REPO_WITH_BINARIES_FOR_UPLOAD}" "${uniqueRabbitName}" "${uniqueEurekaName}" "${serviceName}"
;;
*)
echo "Unknown service [${serviceType}]"
return 1
;;
esac
}
function eurekaName() {
echo "${PARSED_YAML}" | jq -r --arg x "${LOWER_CASE_ENV}" '.[$x].services[] | select(.type == "eureka") | .name'
}
function rabbitMqName() {
echo "${PARSED_YAML}" | jq -r --arg x "${LOWER_CASE_ENV}" '.[$x].services[] | select(.type == "rabbitmq") | .name'
}
function mySqlName() {
echo "${PARSED_YAML}" | jq -r --arg x "${LOWER_CASE_ENV}" '.[$x].services[] | select(.type == "mysql") | .name'
}
function mySqlDatabase() {
echo "${PARSED_YAML}" | jq -r --arg x "${LOWER_CASE_ENV}" '.[$x].services[] | select(.type == "mysql") | .database'
}
function appSystemProps() {
local systemProps
systemProps=""
# TODO: Not every system needs Eureka or Rabbit. But we need to bind this somehow...
local eurekaName
eurekaName="$(eurekaName)"
local rabbitMqName
rabbitMqName="$(rabbitMqName)"
local mySqlName
mySqlName="$(mySqlName)"
local mySqlDatabase
mySqlDatabase="$(mySqlDatabase)"
if [[ "${eurekaName}" != "" && "${eurekaName}" != "null" ]]; then
systemProps="${systemProps} -Deureka.client.serviceUrl.defaultZone=http://${eurekaName}:8761/eureka"
fi
if [[ "${rabbitMqName}" != "" && "${rabbitMqName}" != "null" ]]; then
systemProps="${systemProps} -DSPRING_RABBITMQ_ADDRESSES=${rabbitMqName}:5672"
fi
if [[ "${mySqlName}" != "" && "${mySqlName}" != "null" ]]; then
systemProps="${systemProps} -Dspring.datasource.url=jdbc:mysql://${mySqlName}/${mySqlDatabase}"
fi
echo "${systemProps}"
}
function deleteService() {
local serviceType="${1}"
local serviceName="${2}"
echo "Deleting all possible entries with name [${serviceName}]"
deleteAppByName "${serviceName}"
}
function deployRabbitMq() {
local serviceName="${1:-rabbitmq-github}"
local objectDeployed
objectDeployed="$(objectDeployed "service" "${serviceName}")"
if [[ "${ENVIRONMENT}" == "STAGE" && "${objectDeployed}" == "true" ]]; then
echo "Service [${serviceName}] already deployed. Won't redeploy for stage"
return
fi
echo "Waiting for RabbitMQ to start"
local originalDeploymentFile="${__ROOT}/k8s/rabbitmq.yml"
local originalServiceFile="${__ROOT}/k8s/rabbitmq-service.yml"
local outputDirectory
outputDirectory="$(outputFolder)/k8s"
mkdir -p "${outputDirectory}"
cp "${originalDeploymentFile}" "${outputDirectory}"
cp "${originalServiceFile}" "${outputDirectory}"
local deploymentFile="${outputDirectory}/rabbitmq.yml"
local serviceFile="${outputDirectory}/rabbitmq-service.yml"
substituteVariables "appName" "${serviceName}" "${deploymentFile}"
substituteVariables "appName" "${serviceName}" "${serviceFile}"
if [[ "${ENVIRONMENT}" == "TEST" ]]; then
deleteAppByFile "${deploymentFile}"
deleteAppByFile "${serviceFile}"
fi
replaceApp "${deploymentFile}"
replaceApp "${serviceFile}"
}
function deployApp() {
local fileName="${1}"
"${KUBECTL_BIN}" --context="${K8S_CONTEXT}" --namespace="${PAAS_NAMESPACE}" create -f "${fileName}"
}
function replaceApp() {
local fileName="${1}"
"${KUBECTL_BIN}" --context="${K8S_CONTEXT}" --namespace="${PAAS_NAMESPACE}" replace --force -f "${fileName}"
}
function deleteAppByName() {
local serviceName="${1}"
"${KUBECTL_BIN}" --context="${K8S_CONTEXT}" --namespace="${PAAS_NAMESPACE}" delete secret "${serviceName}" || result=""
"${KUBECTL_BIN}" --context="${K8S_CONTEXT}" --namespace="${PAAS_NAMESPACE}" delete persistentvolumeclaim "${serviceName}" || result=""
"${KUBECTL_BIN}" --context="${K8S_CONTEXT}" --namespace="${PAAS_NAMESPACE}" delete pod "${serviceName}" || result=""
"${KUBECTL_BIN}" --context="${K8S_CONTEXT}" --namespace="${PAAS_NAMESPACE}" delete deployment "${serviceName}" || result=""
"${KUBECTL_BIN}" --context="${K8S_CONTEXT}" --namespace="${PAAS_NAMESPACE}" delete service "${serviceName}" || result=""
}
function deleteAppByFile() {
local file="${1}"
"${KUBECTL_BIN}" --context="${K8S_CONTEXT}" --namespace="${PAAS_NAMESPACE}" delete -f "${file}" || echo "Failed to delete app by [${file}] file. Continuing with the script"
}
function system {
local unameOut
unameOut="$(uname -s)"
case "${unameOut}" in
Linux*) machine=linux ;;
Darwin*) machine=darwin ;;
*) echo "Unsupported system" && exit 1
esac
echo "${machine}"
}
function substituteVariables() {
local variableName="${1}"
local substitution="${2}"
local fileName="${3}"
local escapedSubstitution
escapedSubstitution=$(escapeValueForSed "${substitution}")
#echo "Changing [${variableName}] -> [${escapedSubstitution}] for file [${fileName}]"
if [[ "${SYSTEM}" == "darwin" ]]; then
sed -i "" "s/{{${variableName}}}/${escapedSubstitution}/" "${fileName}"
else
sed -i "s/{{${variableName}}}/${escapedSubstitution}/" "${fileName}"
fi
}
function deployMySql() {
local serviceName="${1:-mysql-github}"
local objectDeployed
objectDeployed="$(objectDeployed "service" "${serviceName}")"
if [[ "${ENVIRONMENT}" == "STAGE" && "${objectDeployed}" == "true" ]]; then
echo "Service [${serviceName}] already deployed. Won't redeploy for stage"
return
fi
local secretName
secretName="mysql-$(retrieveAppName)"
echo "Waiting for MySQL to start"
local originalDeploymentFile="${__ROOT}/k8s/mysql.yml"
local originalServiceFile="${__ROOT}/k8s/mysql-service.yml"
local outputDirectory
outputDirectory="$(outputFolder)/k8s"
mkdir -p "${outputDirectory}"
cp "${originalDeploymentFile}" "${outputDirectory}"
cp "${originalServiceFile}" "${outputDirectory}"
local deploymentFile="${outputDirectory}/mysql.yml"
local serviceFile="${outputDirectory}/mysql-service.yml"
local mySqlDatabase
mySqlDatabase="$(mySqlDatabase)"
echo "Generating secret with name [${secretName}]"
"${KUBECTL_BIN}" --context="${K8S_CONTEXT}" --namespace="${PAAS_NAMESPACE}" delete secret "${secretName}" || echo "Failed to delete secret [${serviceName}]. Continuing with the script"
"${KUBECTL_BIN}" --context="${K8S_CONTEXT}" --namespace="${PAAS_NAMESPACE}" create secret generic "${secretName}" --from-literal=username="${MYSQL_USER}" --from-literal=password="${MYSQL_PASSWORD}" --from-literal=rootpassword="${MYSQL_ROOT_PASSWORD}"
substituteVariables "appName" "${serviceName}" "${deploymentFile}"
substituteVariables "secretName" "${secretName}" "${deploymentFile}"
substituteVariables "mysqlDatabase" "${mySqlDatabase}" "${deploymentFile}"
substituteVariables "appName" "${serviceName}" "${serviceFile}"
if [[ "${ENVIRONMENT}" == "TEST" ]]; then
deleteAppByFile "${deploymentFile}"
deleteAppByFile "${serviceFile}"
fi
replaceApp "${deploymentFile}"
replaceApp "${serviceFile}"
}
function findAppByName() {
local serviceName
serviceName="${1}"
"${KUBECTL_BIN}" --context="${K8S_CONTEXT}" --namespace="${PAAS_NAMESPACE}" get pods -o wide -l app="${serviceName}" | awk -v "app=${serviceName}" '$1 ~ app {print($0)}'
}
function deployAndRestartAppWithName() {
local appName="${1}"
local jarName="${2}"
local env="${LOWER_CASE_ENV}"
echo "Deploying and restarting app with name [${appName}] and jar name [${jarName}]"
deployAppWithName "${appName}" "${jarName}" "${env}" 'true'
restartApp "${appName}"
}
function deployAndRestartAppWithNameForSmokeTests() {
local appName="${1}"
local version="${2}"
local profiles="smoke,kubernetes"
local lowerCaseAppName
lowerCaseAppName=$(toLowerCase "${appName}")
local originalDeploymentFile="deployment.yml"
local originalServiceFile="service.yml"
local outputDirectory
outputDirectory="$(outputFolder)/k8s"
mkdir -p "${outputDirectory}"
cp "${originalDeploymentFile}" "${outputDirectory}"
cp "${originalServiceFile}" "${outputDirectory}"
local deploymentFile="${outputDirectory}/deployment.yml"
local serviceFile="${outputDirectory}/service.yml"
local systemProps
systemProps="-Dspring.profiles.active=${profiles} $(appSystemProps)"
substituteVariables "dockerOrg" "${DOCKER_REGISTRY_ORGANIZATION}" "${deploymentFile}"
substituteVariables "version" "${version}" "${deploymentFile}"
substituteVariables "appName" "${appName}" "${deploymentFile}"
substituteVariables "labelAppName" "${appName}" "${deploymentFile}"
substituteVariables "containerName" "${appName}" "${deploymentFile}"
substituteVariables "systemProps" "${systemProps}" "${deploymentFile}"
substituteVariables "appName" "${appName}" "${serviceFile}"
deleteAppByFile "${deploymentFile}"
deleteAppByFile "${serviceFile}"
deployApp "${deploymentFile}"
deployApp "${serviceFile}"
waitForAppToStart "${appName}"
}
function deployAndRestartAppWithNameForE2ETests() {
local appName="${1}"
local profiles="e2e,kubernetes"
local lowerCaseAppName
lowerCaseAppName=$(toLowerCase "${appName}")
local originalDeploymentFile="deployment.yml"
local originalServiceFile="service.yml"
local outputDirectory
outputDirectory="$(outputFolder)/k8s"
mkdir -p "${outputDirectory}"
cp "${originalDeploymentFile}" "${outputDirectory}"
cp "${originalServiceFile}" "${outputDirectory}"
local deploymentFile="${outputDirectory}/deployment.yml"
local serviceFile="${outputDirectory}/service.yml"
local systemProps="-Dspring.profiles.active=${profiles}"
substituteVariables "dockerOrg" "${DOCKER_REGISTRY_ORGANIZATION}" "${deploymentFile}"
substituteVariables "version" "${PIPELINE_VERSION}" "${deploymentFile}"
substituteVariables "appName" "${appName}" "${deploymentFile}"
substituteVariables "labelAppName" "${appName}" "${deploymentFile}"
substituteVariables "containerName" "${appName}" "${deploymentFile}"
substituteVariables "systemProps" "${systemProps}" "${deploymentFile}"
substituteVariables "appName" "${appName}" "${serviceFile}"
deleteAppByFile "${deploymentFile}"
deleteAppByFile "${serviceFile}"
deployApp "${deploymentFile}"
deployApp "${serviceFile}"
waitForAppToStart "${appName}"
}
function toLowerCase() {
local string=${1}
echo "${string}" | tr '[:upper:]' '[:lower:]'
}
function lowerCaseEnv() {
echo "${ENVIRONMENT}" | tr '[:upper:]' '[:lower:]'
}
function deleteAppInstance() {
local serviceName="${1}"
local lowerCaseAppName
lowerCaseAppName=$(toLowerCase "${serviceName}")
echo "Deleting application [${lowerCaseAppName}]"
deleteAppByName "${lowerCaseAppName}"
}
function deployEureka() {
local imageName="${1}"
local appName="${2}"
local objectDeployed
objectDeployed="$(objectDeployed "service" "${appName}")"
if [[ "${ENVIRONMENT}" == "STAGE" && "${objectDeployed}" == "true" ]]; then
echo "Service [${appName}] already deployed. Won't redeploy for stage"
return
fi
echo "Deploying Eureka. Options - image name [${imageName}], app name [${appName}], env [${ENVIRONMENT}]"
local originalDeploymentFile="${__ROOT}/k8s/eureka.yml"
local originalServiceFile="${__ROOT}/k8s/eureka-service.yml"
local outputDirectory
outputDirectory="$(outputFolder)/k8s"
mkdir -p "${outputDirectory}"
cp "${originalDeploymentFile}" "${outputDirectory}"
cp "${originalServiceFile}" "${outputDirectory}"
local deploymentFile="${outputDirectory}/eureka.yml"
local serviceFile="${outputDirectory}/eureka-service.yml"
substituteVariables "appName" "${appName}" "${deploymentFile}"
substituteVariables "appUrl" "${appName}.${PAAS_NAMESPACE}" "${deploymentFile}"
substituteVariables "eurekaImg" "${imageName}" "${deploymentFile}"
substituteVariables "appName" "${appName}" "${serviceFile}"
if [[ "${ENVIRONMENT}" == "TEST" ]]; then
deleteAppByFile "${deploymentFile}"
deleteAppByFile "${serviceFile}"
fi
replaceApp "${deploymentFile}"
replaceApp "${serviceFile}"
waitForAppToStart "${appName}"
}
function escapeValueForSed() {
echo "${1//\//\\/}"
}
function deployStubRunnerBoot() {
local imageName="${1}"
local repoWithJars="${2}"
local rabbitName="${3}.${PAAS_NAMESPACE}"
local eurekaName="${4}.${PAAS_NAMESPACE}"
local stubRunnerName="${5:-stubrunner}"
local stubRunnerUseClasspath="${stubRunnerUseClasspath:-false}"
echo "Deploying Stub Runner. Options - image name [${imageName}], app name [${stubRunnerName}]"
local stubrunnerIds
stubrunnerIds="$(retrieveStubRunnerIds)"
echo "Found following stub runner ids [${stubrunnerIds}]"
local originalDeploymentFile="${__ROOT}/k8s/stubrunner.yml"
local originalServiceFile="${__ROOT}/k8s/stubrunner-service.yml"
local outputDirectory
outputDirectory="$(outputFolder)/k8s"
local systemProps=""
mkdir -p "${outputDirectory}"
cp "${originalDeploymentFile}" "${outputDirectory}"
cp "${originalServiceFile}" "${outputDirectory}"
local deploymentFile="${outputDirectory}/stubrunner.yml"
local serviceFile="${outputDirectory}/stubrunner-service.yml"
if [[ "${stubRunnerUseClasspath}" == "false" ]]; then
systemProps="${systemProps} -Dstubrunner.repositoryRoot=${repoWithJars}"
fi
substituteVariables "appName" "${stubRunnerName}" "${deploymentFile}"
substituteVariables "stubrunnerImg" "${imageName}" "${deploymentFile}"
substituteVariables "systemProps" "${systemProps}" "${deploymentFile}"
substituteVariables "rabbitAppName" "${rabbitName}" "${deploymentFile}"
substituteVariables "eurekaAppName" "${eurekaName}" "${deploymentFile}"
if [[ "${stubrunnerIds}" != "" ]]; then
substituteVariables "stubrunnerIds" "${stubrunnerIds}" "${deploymentFile}"
else
substituteVariables "stubrunnerIds" "" "${deploymentFile}"
fi
substituteVariables "appName" "${stubRunnerName}" "${serviceFile}"
if [[ "${ENVIRONMENT}" == "TEST" ]]; then
deleteAppByFile "${deploymentFile}"
deleteAppByFile "${serviceFile}"
fi
replaceApp "${deploymentFile}"
replaceApp "${serviceFile}"
waitForAppToStart "${stubRunnerName}"
}
function prepareForSmokeTests() {
echo "Retrieving group and artifact id - it can take a while..."
local appName
appName="$(retrieveAppName)"
mkdir -p "${OUTPUT_FOLDER}"
logInToPaas
local applicationPort
applicationPort="$(portFromKubernetes "${appName}")"
local stubrunnerAppName
stubrunnerAppName="stubrunner-${appName}"
local stubrunnerPort
stubrunnerPort="$(portFromKubernetes "${stubrunnerAppName}")"
local applicationHost
applicationHost="$(applicationHost "${appName}")"
local stubRunnerUrl
stubRunnerUrl="$(applicationHost "${stubrunnerAppName}")"
export APPLICATION_URL="${applicationHost}:${applicationPort}"
export STUBRUNNER_URL="${stubRunnerUrl}:${stubrunnerPort}"
}
function prepareForE2eTests() {
echo "Retrieving group and artifact id - it can take a while..."
local appName
appName="$(retrieveAppName)"
mkdir -p "${OUTPUT_FOLDER}"
logInToPaas
local applicationPort
applicationPort="$(portFromKubernetes "${appName}")"
local applicationHost
applicationHost="$(applicationHost "${appName}")"
export APPLICATION_URL="${applicationHost}:${applicationPort}"
}
function applicationHost() {
local appName="${1}"
if [[ "${KUBERNETES_MINIKUBE}" == "true" ]]; then
local apiUrlProp="PAAS_${ENVIRONMENT}_API_URL"
# host:port -> host
echo "${!apiUrlProp}" | awk -F: '{print $1}'
else
echo "${appName}.${PAAS_NAMESPACE}"
fi
}
function portFromKubernetes() {
local appName="${1}"
local jsonPath
{ if [[ "${KUBERNETES_MINIKUBE}" == "true" ]]; then
jsonPath="{.spec.ports[0].nodePort}"
else
jsonPath="{.spec.ports[0].port}"
fi
}
# '8080' -> 8080
"${KUBECTL_BIN}" --context="${K8S_CONTEXT}" --namespace="${PAAS_NAMESPACE}" get svc "${appName}" -o jsonpath="${jsonPath}"
}
function waitForAppToStart() {
local appName="${1}"
local port
port="$(portFromKubernetes "${appName}")"
local applicationHost
applicationHost="$(applicationHost "${appName}")"
isAppRunning "${applicationHost}" "${port}"
}
function retrieveApplicationUrl() {
local appName
appName="$(retrieveAppName)"
local port
port="$(portFromKubernetes "${appName}")"
local kubHost
kubHost="$(applicationHost "${appName}")"
echo "${kubHost}:${port}"
}
function isAppRunning() {
local host="${1}"
local port="${2}"
local waitTime=5
local retries=50
local running=1
local healthEndpoint="health"
echo "Checking if app [${host}:${port}] is running at [/${healthEndpoint}] endpoint"
for i in $(seq 1 "${retries}"); do
curl -m 5 "${host}:${port}/${healthEndpoint}" && running=0 && break
echo "Fail #$i/${retries}... will try again in [${waitTime}] seconds"
sleep "${waitTime}"
done
if [[ "${running}" == 1 ]]; then
echo "App failed to start"
exit 1
fi
echo ""
echo "App started successfully!"
}
function readTestPropertiesFromFile() {
local fileLocation="${1:-${OUTPUT_FOLDER}/test.properties}"
local key
local value
if [ -f "${fileLocation}" ]
then
echo "${fileLocation} found."
while IFS='=' read -r key value
do
key="$(echo "${key}" | tr '.' '_')"
eval "${key}='${value}'"
done <"${fileLocation}"
else
echo "${fileLocation} not found."
fi
}
function label() {
local appName="${1}"
local key="${2}"
local value="${3}"
local type="deployment"
"${KUBECTL_BIN}" --context="${K8S_CONTEXT}" --namespace="${PAAS_NAMESPACE}" label "${type}" "${appName}" "${key}"="${value}"
}
function objectDeployed() {
local appType="${1}"
local appName="${2}"
local result
result="$("${KUBECTL_BIN}" --context="${K8S_CONTEXT}" --namespace="${PAAS_NAMESPACE}" get "${appType}" "${appName}" --ignore-not-found=true)"
if [[ "${result}" != "" ]]; then
echo "true"
else
echo "false"
fi
}
function stageDeploy() {
local appName
appName="$(retrieveAppName)"
# Log in to PaaS to start deployment
logInToPaas
deployServices
# deploy app
deployAndRestartAppWithNameForE2ETests "${appName}"
}
function prodDeploy() {
# TODO: Consider making it less JVM specific
local appName
appName="$(retrieveAppName)"
# Log in to PaaS to start deployment
logInToPaas
# deploy app
performProductionDeploymentOfTestedApplication "${appName}"
}
function performProductionDeploymentOfTestedApplication() {
local appName="${1}"
local lowerCaseAppName
lowerCaseAppName=$(toLowerCase "${appName}")
local profiles="kubernetes"
local originalDeploymentFile="deployment.yml"
local originalServiceFile="service.yml"
local outputDirectory
outputDirectory="$(outputFolder)/k8s"
mkdir -p "${outputDirectory}"
cp "${originalDeploymentFile}" "${outputDirectory}"
cp "${originalServiceFile}" "${outputDirectory}"
local deploymentFile="${outputDirectory}/deployment.yml"
local serviceFile="${outputDirectory}/service.yml"
local changedAppName
changedAppName="$(escapeValueForDns "${appName}-${PIPELINE_VERSION}")"
echo "Will name the application [${changedAppName}]"
local systemProps="-Dspring.profiles.active=${profiles}"
substituteVariables "dockerOrg" "${DOCKER_REGISTRY_ORGANIZATION}" "${deploymentFile}"
substituteVariables "version" "${PIPELINE_VERSION}" "${deploymentFile}"
# The name will contain also the version
substituteVariables "labelAppName" "${changedAppName}" "${deploymentFile}"
substituteVariables "appName" "${appName}" "${deploymentFile}"
substituteVariables "containerName" "${appName}" "${deploymentFile}"
substituteVariables "systemProps" "${systemProps}" "${deploymentFile}"
substituteVariables "appName" "${appName}" "${serviceFile}"
deployApp "${deploymentFile}"
local serviceDeployed
serviceDeployed="$(objectDeployed "service" "${appName}")"
echo "Service already deployed? [${serviceDeployed}]"
if [[ "${serviceDeployed}" == "false" ]]; then
deployApp "${serviceFile}"
fi
waitForAppToStart "${appName}"
}
function escapeValueForDns() {
local sed
sed="$(sed -e 's/\./-/g;s/_/-/g' <<<"$1")"
local lowerCaseSed
lowerCaseSed="$(toLowerCase "${sed}")"
echo "${lowerCaseSed}"
}
function completeSwitchOver() {
local appName
appName="$(retrieveAppName)"
# Log in to CF to start deployment
logInToPaas
# find the oldest version and remove it
local oldestDeployment
oldestDeployment="$(oldestDeployment "${appName}")"
if [[ "${oldestDeployment}" != "" ]]; then
echo "Deleting deployment with name [${oldestDeployment}]"
"${KUBECTL_BIN}" --context="${K8S_CONTEXT}" --namespace="${PAAS_NAMESPACE}" delete deployment "${oldestDeployment}"
else
echo "There's no blue instance to remove, skipping this step"
fi
}
function oldestDeployment() {
local appName="${1}"
local changedAppName
changedAppName="$(escapeValueForDns "${appName}-${PIPELINE_VERSION}")"
local deployedApps
deployedApps="$("${KUBECTL_BIN}" --context="${K8S_CONTEXT}" --namespace="${PAAS_NAMESPACE}" get deployments -lname="${appName}" --no-headers | awk '{print $1}' | grep -v "${changedAppName}")"
local oldestDeployment
oldestDeployment="$(echo "${deployedApps}" | sort | head -n 1)"
echo "${oldestDeployment}"
}
__ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
export LOWER_CASE_ENV
LOWER_CASE_ENV="$(lowerCaseEnv)"
export PAAS_NAMESPACE_VAR="PAAS_${ENVIRONMENT}_NAMESPACE"
[[ -z "${PAAS_NAMESPACE}" ]] && PAAS_NAMESPACE="${!PAAS_NAMESPACE_VAR}"
export KUBERNETES_NAMESPACE="${PAAS_NAMESPACE}"
export SYSTEM
SYSTEM="$(system)"
export KUBE_CONFIG_PATH
KUBE_CONFIG_PATH="${KUBE_CONFIG_PATH:-${HOME}/.kube/config}"
export KUBECTL_BIN
KUBECTL_BIN="${KUBECTL_BIN:-kubectl}"
# CURRENTLY WE ONLY SUPPORT JVM BASED PROJECTS OUT OF THE BOX
# shellcheck source=/dev/null
[[ -f "${__ROOT}/projectType/pipeline-jvm.sh" ]] && source "${__ROOT}/projectType/pipeline-jvm.sh" || \
echo "No projectType/pipeline-jvm.sh found"
|
#!/usr/bin/env bash
# A script to download and install the latest version
OS=$(uname)
if [[ "$OS" == "Linux" || "$OS" == "Darwin" ]]; then
case "$OS" in
'Linux')
PLATFORM="linux-x64"
if [ -z "$1" ]; then
VERSION=$(curl --silent "https://api.github.com/repos/stencila/nixta/releases/latest" | grep -Po '"tag_name": "\K.*?(?=")')
else
VERSION=$1
fi
INSTALL_PATH="$HOME/.local/bin"
;;
'Darwin')
PLATFORM="macos-x64"
if [ -z "$1" ]; then
VERSION=$(curl --silent "https://api.github.com/repos/stencila/nixta/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/')
else
VERSION=$1
fi
INSTALL_PATH="/usr/local/bin"
;;
esac
echo "Downloading Nixta $VERSION"
curl -Lo /tmp/nixta.tar.gz https://github.com/stencila/nixta/releases/download/$VERSION/nixta-$PLATFORM.tar.gz
tar xvf /tmp/nixta.tar.gz
rm -f /tmp/nixta.tar.gz
echo "Installing nixta to $INSTALL_PATH/nixta-$VERSION/nixta"
mkdir -p $INSTALL_PATH/nixta-$VERSION
mv -f nixta $INSTALL_PATH/nixta-$VERSION
# Unpack `node_modules` etc into the $INSTALL_PATH/nixta-$VERSION
$INSTALL_PATH/nixta-$VERSION/nixta --version
echo "Pointing nixta to $INSTALL_PATH/nixta-$VERSION/nixta"
ln -sf nixta-$VERSION/nixta $INSTALL_PATH/nixta
else
echo "Sorry, I don't know how to install on this OS, please see https://github.com/stencila/nixta#install"
fi
|
<reponame>greentown-ideallife/Charts-Tool
import { getPropsFromJson } from '@/utils/util';
export default chartConfig => {
let transform = null; // 数据转换
const {
ChartProps,
XAxisProps,
YAxisProps,
TooltipProps,
GeomProps,
CoordProps,
LegendProps,
LabelProps,
GuideProps,
HtmlProps,
ExtendProps: { transforms } = {},
} = chartConfig;
if (transforms && transforms.length) {
[transform] = transforms;
}
const { data } = ChartProps;
delete ChartProps.data;
return `
import React from 'react';
import { Chart, Geom, Axis, Tooltip, Coord, Legend, Label, Guide } from 'bizcharts';
${transform ? `import DataSet from '@antv/data-set';` : ''}
const { Html } = Guide;
export default () => {
const data = ${JSON.stringify(data)}
${transform
? `const { DataView } = DataSet;
const dv = new DataView();
dv.source(data).transform(${JSON.stringify(transform)});
` : ''}
return (
<Chart ${getPropsFromJson(ChartProps)} data=${transform ? `{dv}` : '{data}'}>
${XAxisProps ? `<Axis ${getPropsFromJson(XAxisProps)} />` : ''}
${YAxisProps ? `<Axis ${getPropsFromJson(YAxisProps)} />` : ''}
${TooltipProps ? `<Tooltip ${getPropsFromJson(TooltipProps)} />` : ''}
${Array.isArray(GeomProps) ? GeomProps.map(item => `<Geom ${getPropsFromJson(item)} />`) : `<Geom ${getPropsFromJson(GeomProps)}>${LabelProps ? `<Label ${getPropsFromJson(LabelProps)} />` : ''}</Geom>`}
${CoordProps ? `<Coord ${getPropsFromJson(CoordProps)} />` : ''}
${LegendProps ? `<Legend ${getPropsFromJson(LegendProps)} />` : ''}
${GuideProps ? `<Guide ${getPropsFromJson(GuideProps)}>${HtmlProps ? `<Html ${getPropsFromJson(HtmlProps)} />` : ''}</Guide>` : ''}
</Chart>
);
};
`;
};
|
package com.growingwiththeweb.sorting;
public class InsertionSort {
public static <T extends Comparable<T>> void sort(T[] array) {
for (int i = 1; i < array.length; i++) {
T item = array[i];
int indexHole = i;
while (indexHole > 0 && array[indexHole - 1].compareTo(item) > 0) {
array[indexHole] = array[--indexHole];
}
array[indexHole] = item;
}
}
}
|
from sqlalchemy.sql.schema import ForeignKey
from api.database import Base
from sqlalchemy import Column, Integer, String, Float
class Area(Base):
__tablename__ = "AREA_TB"
areaCode = Column("area_code", Integer, primary_key=True)
areaName = Column("area_name", String(20))
latitude = Column("latitude", Float)
longitude = Column("longitude", Float)
areaCategory = Column("area_category", String(10))
status = Column('status', Integer)
class Businesss(Base):
__tablename__ = "BUSINESS_TB"
businessCode = Column("business_code", Integer, primary_key=True)
businessName = Column("business_name", String(20))
businesssCategory = Column("business_category", String(10))
class Store(Base):
__tablename__ = "STORE_TB"
id = Column("id", Integer, primary_key=True, index=True)
storeName = Column("store_name", String(20))
areaCode = Column("area_code_fk", Integer, ForeignKey('AREA_TB.area_code'))
businessCode = Column("business_code_fk", Integer, ForeignKey('BUSINESS_TB.business_code'))
latitude = Column("latitude", Float)
longitude = Column("longitude", Float)
class Change(Base):
__tablename__ = "CHANGE_TB"
id = Column("id", Integer, primary_key=True, index=True)
areaCode = Column("area_code_fk", Integer, ForeignKey('AREA_TB.area_code'))
businessCode = Column("business_code_fk", Integer, ForeignKey('BUSINESS_TB.business_code'))
closure = Column("closure", Float)
class Sales(Base):
__tablename__ = "SALES_TB"
id = Column("id", Integer, primary_key=True, index=True)
areaCode = Column("area_code_fk", Integer, ForeignKey('AREA_TB.area_code'))
businessCode = Column("business_code_fk", Integer, ForeignKey('BUSINESS_TB.business_code'))
amount = Column("amount", Integer)
class DaySales(Base):
__tablename__ = "DAYSALES_TB"
id = Column("id", Integer, primary_key=True, index=True)
salesId = Column("sales_id_fk", Integer, ForeignKey('SALES_TB.id'))
mondayRatio = Column("monday_ratio", Integer)
tuesdayRatio = Column("tuesday_ratio", Integer)
wednesdayRatio = Column("wednesday_ratio", Integer)
thursdayRatio = Column("thursday_ratio", Integer)
fridayRatio = Column("friday_ratio", Integer)
saturdayRatio = Column("saturday_ratio", Integer)
sundayRatio = Column("sunday_ratio", Integer)
class TimeSales(Base):
__tablename__ = "TIMESALES_TB"
id = Column("id", Integer, primary_key=True, index=True)
salesId = Column("sales_id_fk", Integer, ForeignKey('SALES_TB.id'))
time0006 = Column("time_0006", Integer)
time0611 = Column("time_0611", Integer)
time1114 = Column("time_1114", Integer)
time1417 = Column("time_1417", Integer)
time1721 = Column("time_1721", Integer)
time2124 = Column("time_2124", Integer)
class CustomerSales(Base):
__tablename__ = "CUSTOMMERSALES_TB"
id = Column("id", Integer, primary_key=True, index=True)
salesId = Column("sales_id_fk", Integer, ForeignKey('SALES_TB.id'))
man = Column("man", Integer)
woman = Column("woman", Integer)
age10 = Column("age_10", Integer)
age20 = Column("age_20", Integer)
age30 = Column("age_30", Integer)
age40 = Column("age_40", Integer)
age50 = Column("age_50", Integer)
age60 = Column("age_60", Integer)
|
#!/bin/sh
# If anything fails we should fail
set -e
if [[ $# -eq 0 ]] ; then
echo "Usage: $0 output-dir"
exit 1
fi
BUILD_DIR=build/
BUIlD_CONFIGURATION="-DCMAKE_BUILD_TYPE=Before"
OUTPUT_DIR=$1
BEFORE_DIR=$OUTPUT_DIR/before
AFTER_DIR=$OUTPUT_DIR/after
DELTA_PATCH=$OUTPUT_DIR/delta.patch
rebuild_runtime() {
rm -Rf $BUILD_DIR
mkdir $BUILD_DIR
cd $BUILD_DIR
cmake -GNinja $BUIlD_CONFIGURATION ../runtime
ninja
cd ..
}
assemble_llvm_ir() {
for file in $1/*.ll
do
opt -O2 -- "$file" | llc -O2 -o "$file.s"
done
}
build_and_assemble() {
rebuild_runtime
mkdir $1
LLAMBDA_FUNCTIONAL_TEST_ONLY_OPTIMISED=1 LLAMBDA_FUNCTIONAL_TEST_LLVMIR_DIR=$1 sbt "testOnly io.llambda.compiler.functional.*"
assemble_llvm_ir $1
}
mkdir -p $OUTPUT_DIR
# Be cautious and only delete things we create
rm -Rf $BEFORE_DIR
rm -Rf $AFTER_DIR
rm -f $DELTA_PATCH
git stash push -u -m "functional-asm-delta"
build_and_assemble $BEFORE_DIR
git stash pop
build_and_assemble $AFTER_DIR
diff -u $BEFORE_DIR $AFTER_DIR > $DELTA_PATCH
|
import pandas as pd
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Flatten, Embedding, Dropout
# Read in the dataset
data = pd.read_csv('text_classification_data.csv')
# Preprocess the data
# Tokenize the text
tokenizer = Tokenizer()
tokenizer.fit_on_texts(data.text)
x = tokenizer.texts_to_sequences(data.text)
# Pad the sequences
x = pad_sequences(x, padding='post')
# Create labels
y = pd.get_dummies(data.label).values
# Split the dataset into train and test set
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2)
# Build the neural network model
model = Sequential()
model.add(Embedding(1000, 32, input_length = x.shape[1]))
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(5, activation='softmax'))
# Compile the model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Train the model
model.fit(x_train, y_train, epochs=5, batch_size=32)
# Evaluate the model
score, acc = model.evaluate(x_test, y_test)
print(f'Test score: {score}, accuracy: {acc}') |
<filename>libs/data-handling/src/repositories/crud-repository.interface.ts<gh_stars>0
import { Observable } from 'rxjs/Observable';
import { Identifiable } from '../models/identifiable.interface';
import { Pageable } from '../models/pageable.class';
import { PagedResponse } from '../models/paged-response.interface';
export interface CrudRepository<T extends Identifiable<ID>, ID> {
getAll( pageable?: Pageable ): Observable<PagedResponse<T>>;
getById( id: ID ): Observable<T | undefined>;
create( entity: T ): Observable<T>;
update( entity: T ): Observable<T>;
/**
* Call create() OR update() based on existence of entity.id.
* @param {T} entity to create/update
* @returns {Observable<T extends Identifiable<ID>>} Observable of created/updated entity
*/
save?( entity: T ): Observable<T>;
remove( id: ID ): Observable<void>;
}
|
import random
def get_user_choice():
while True:
user_choice = input("Enter your choice (rock, paper, or scissors), or 'q' to quit: ").lower()
if user_choice in ['rock', 'paper', 'scissors', 'q']:
return user_choice
else:
print("Invalid choice. Please enter 'rock', 'paper', or 'scissors'.")
def get_computer_choice():
return random.choice(['rock', 'paper', 'scissors'])
def determine_winner(user_choice, computer_choice):
if user_choice == computer_choice:
return "draw"
elif (user_choice == 'rock' and computer_choice == 'scissors') or (user_choice == 'scissors' and computer_choice == 'paper') or (user_choice == 'paper' and computer_choice == 'rock'):
return "win"
else:
return "lose"
def play_game():
while True:
user_choice = get_user_choice()
if user_choice == 'q':
print("Thanks for playing!")
break
computer_choice = get_computer_choice()
print(f"Your choice: {user_choice}")
print(f"Computer's choice: {computer_choice}")
result = determine_winner(user_choice, computer_choice)
print(f"Result: {result}\n")
play_game() |
package simulation;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.StringTokenizer;
/**
*
* @author exponential-e
* 백준 3425번: 고스택
*
* @see https://www.acmicpc.net/problem/3425/
*
*/
public class Boj3425 {
private static StringBuilder sb = new StringBuilder();
private static final String Q = "QUIT";
private static final String NO = "NUM";
private static final String P = "POP";
private static final String I = "INV";
private static final String A = "ADD";
private static final String DU = "DUP";
private static final String DI = "DIV";
private static final String SW = "SWP";
private static final String SU = "SUB";
private static final String MU = "MUL";
private static final String MO = "MOD";
private static final String EN = "END";
private static final String E = "ERROR";
private static final String NEW_LINE = "\n";
private static final int INF = 1_000_000_000;
public static void main(String[] args) throws Exception {
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
while(true) {
String cmd = br.readLine();
if(Q.equals(cmd)) break;
ArrayList<String> queries = new ArrayList<>();
queries.add(cmd);
if(!EN.equals(cmd)) {
String input;
while (!(input = br.readLine()).equals(EN)) {
queries.add(input);
}
queries.add(EN);
}
int N = Integer.parseInt(br.readLine());
for(int i = 0; i < N; i++) {
execute(queries, Long.parseLong(br.readLine()));
}
sb.append(NEW_LINE);
br.readLine();
}
System.out.println(sb.toString());
}
private static void execute(ArrayList<String> query, long value) {
ArrayDeque<Long> stack = new ArrayDeque<>();
stack.push(value);
for(String q: query) {
StringTokenizer st = new StringTokenizer(q);
String cmd = st.nextToken();
if(EN.equals(cmd)) break;
if(NO.equals(cmd)) {
stack.push(Long.parseLong(st.nextToken()));
continue;
}
long peek;
long sec;
switch (cmd) {
case P:
if(stack.isEmpty()) {
validator(INF + 1);
return;
}
stack.pop();
break;
case I:
if(stack.isEmpty()) {
validator(INF + 1);
return;
}
stack.push(-stack.pop());
break;
case A:
if(stack.size() < 2) {
validator(INF + 1);
return;
}
peek = stack.pop();
peek += stack.pop();
if(validator(peek)) return;
stack.push(peek);
break;
case DU:
if(stack.isEmpty()) {
validator(INF + 1);
return;
}
peek = stack.peek();
stack.push(peek);
break;
case DI:
if(stack.size() < 2) {
validator(INF + 1);
return;
}
peek = stack.pop();
sec = stack.pop();
if(peek == 0) { // divide by zero
validator(INF + 1);
return;
}
long div = Math.abs(sec) / Math.abs(peek);
if((sec < 0 && peek >= 0) || (sec >= 0 && peek < 0)) div *= -1;
stack.push(div);
break;
case SU:
if(stack.size() < 2) {
validator(INF + 1);
return;
}
peek = stack.pop();
sec = stack.pop();
sec -= peek;
if(validator(sec)) return;
stack.push(sec);
break;
case SW:
if(stack.size() < 2) {
validator(INF + 1);
return;
}
peek = stack.pop();
sec = stack.pop();
stack.push(peek);
stack.push(sec);
break;
case MU:
if(stack.size() < 2) {
validator(INF + 1);
return;
}
peek = stack.pop() * stack.pop();
if(validator(peek)) return;
stack.push(peek);
break;
case MO:
if(stack.size() < 2) {
validator(INF + 1);
return;
}
peek = stack.pop();
sec = stack.pop();
if(peek == 0) { // divide by zero
validator(INF + 1);
return;
}
long mod = Math.abs(sec) % Math.abs(peek);
if(sec < 0) mod *= -1;
stack.push(mod);
break;
}
}
if(stack.size() != 1) {
sb.append(E).append(NEW_LINE);
return;
}
sb.append(stack.pop()).append(NEW_LINE);
}
private static boolean validator (long value) {
if(value > INF || value < -INF){
sb.append(E).append(NEW_LINE);
return true;
}
return false;
}
}
|
package stage
import (
"fmt"
"github.com/flant/werf/pkg/build/builder"
"github.com/flant/werf/pkg/container_runtime"
"github.com/flant/werf/pkg/image"
)
func newUserWithGitPatchStage(builder builder.Builder, name StageName, gitPatchStageOptions *NewGitPatchStageOptions, baseStageOptions *NewBaseStageOptions) *UserWithGitPatchStage {
s := &UserWithGitPatchStage{}
s.UserStage = newUserStage(builder, name, baseStageOptions)
s.GitPatchStage = newGitPatchStage(name, gitPatchStageOptions, baseStageOptions)
s.GitPatchStage.BaseStage = s.BaseStage
return s
}
type UserWithGitPatchStage struct {
*UserStage
GitPatchStage *GitPatchStage
}
func (s *UserWithGitPatchStage) SelectSuitableStage(stages []*image.StageDescription) (*image.StageDescription, error) {
ancestorsImages, err := s.selectStagesAncestorsByGitMappings(stages)
if err != nil {
return nil, fmt.Errorf("unable to select cache images ancestors by git mappings: %s", err)
}
return s.selectStageByOldestCreationTimestamp(ancestorsImages)
}
func (s *UserWithGitPatchStage) GetNextStageDependencies(c Conveyor) (string, error) {
return s.BaseStage.getNextStageGitDependencies(c)
}
func (s *UserWithGitPatchStage) PrepareImage(c Conveyor, prevBuiltImage, image container_runtime.ImageInterface) error {
if err := s.BaseStage.PrepareImage(c, prevBuiltImage, image); err != nil {
return err
}
if isPatchEmpty, err := s.GitPatchStage.IsEmpty(c, prevBuiltImage); err != nil {
return err
} else if !isPatchEmpty {
if err := s.GitPatchStage.prepareImage(c, prevBuiltImage, image); err != nil {
return err
}
}
return nil
}
|
class TreeNode:
def __init__(self, val):
self.val = val
self.children = []
def add_child(self, child):
self.children.append(child)
def construct_tree(input):
root = TreeNode(input[0])
for line in input[1:]:
parent_val, child_val = line.split()
parent_node = find_node(root, parent_val)
child_node = TreeNode(child_val)
parent_node.add_child(child_node)
return root
def find_node(root, val):
if not root:
return None
if root.val == val:
return root
else:
for child in root.children:
node = find_node(child, val)
if node:
return node
input = ["A", "A B", "A C", "B D", "C E"]
tree = construct_tree(input) |
<reponame>zaquest/requests_throttled<filename>setup.py
from setuptools import setup
setup(name='requests_throttled',
version='0.3',
description='Throttling requests.Session.',
url='http://github.com/zaquest/requests_throttled',
author='zaquest',
author_email='<EMAIL>',
license='MIT',
install_requires=['requests'],
py_modules=['requests_throttled'],
zip_safe=True)
|
var x = 2.4586;
var formattedNumber = x.toFixed(2); |
<reponame>ApexTech/yt<filename>lib/yt/models/id.rb<gh_stars>100-1000
module Yt
module Models
# @private
# Encapsulates information about the ID that YouTube uses to uniquely
# identify a resource.
class Id < String
end
end
end |
import {
SET_DIRECTIONS,
SET_WALKING_TIME,
SET_ADDRESS,
SET_SURGE,
DISABLE_SURGE,
ROUTE_SELECTED,
ROUTE_DESELECTED,
REQUEST_ROUTES,
REQUEST_EXPANDED_ROUTES,
RECEIVE_ROUTES_UBER,
RECEIVE_ROUTES_LYFT,
RECEIVE_EXPANDED_ROUTES,
NO_EXPANDED_ROUTES,
INVALID_ROUTES
} from './types';
export function setDirections(directions) {
return {
type: SET_DIRECTIONS,
payload: directions
}
}
export function setWalkingTime(directions) {
return {
type: SET_WALKING_TIME,
payload: directions
}
}
export function setAddress(address) {
return {
type: SET_ADDRESS,
payload: address
};
}
export function setSurgeMultipler(surge) {
return {
type: SET_SURGE,
payload: surge
}
}
export function disableSurge() {
return {
type: DISABLE_SURGE
}
}
export function selectRoute(route) {
return {
type: ROUTE_SELECTED,
payload: route
};
}
export function deselectRoute() {
return {
type: ROUTE_DESELECTED,
};
}
export function requestRoutes(coords) {
return {
type: REQUEST_ROUTES
}
}
export function requestExpandedRoutes(coords) {
return {
type: REQUEST_EXPANDED_ROUTES
}
}
export function receiveRoutesUber(coords, data) {
let routes = data.sort(function(a,b) {
return a.avg_estimate - b.avg_estimate;
});
return {
type: RECEIVE_ROUTES_UBER,
coords: coords,
routes: routes
}
}
export function receiveRoutesLyft(coords, data) {
let routes = data.sort(function(a,b) {
return a.avg_estimate - b.avg_estimate;
});
return {
type: RECEIVE_ROUTES_LYFT,
coords: coords,
routes: routes
}
}
export function receiveRoutesExpanded(routes, name) {
return {
type: RECEIVE_EXPANDED_ROUTES,
routes: routes,
name: name
}
}
export function noExpandedRoutes() {
return {
type: NO_EXPANDED_ROUTES
}
}
export function invalidRoutes() {
return {
type: INVALID_ROUTES
}
}
|
import * as tslib_1 from "tslib";
import { Tone } from "../Tone";
import { optionsFromArguments } from "../util/Defaults";
import { noOp } from "../util/Interface";
import { isString } from "../util/TypeCheck";
import { ToneAudioBuffer } from "./ToneAudioBuffer";
import { assert } from "../util/Debug";
/**
* A data structure for holding multiple buffers in a Map-like datastructure.
*
* @example
* import { Player, ToneAudioBuffers } from "tone";
* const pianoSamples = new ToneAudioBuffers({
* C1: "https://tonejs.github.io/examples/audio/casio/C1.mp3",
* C2: "https://tonejs.github.io/examples/audio/casio/C2.mp3",
* }, () => {
* const player = new Player().toDestination();
* // play one of the samples when they all load
* player.buffer = pianoSamples.get("C2");
* player.start();
* });
* @example
* import { ToneAudioBuffers } from "tone";
* // To pass in additional parameters in the second parameter
* const buffers = new ToneAudioBuffers({
* urls: {
* C1: "C1.mp3",
* C2: "C2.mp3",
* },
* onload: () => console.log("loaded"),
* baseUrl: "https://tonejs.github.io/examples/audio/casio/"
* });
* @category Core
*/
var ToneAudioBuffers = /** @class */ (function (_super) {
tslib_1.__extends(ToneAudioBuffers, _super);
function ToneAudioBuffers() {
var _this = _super.call(this) || this;
_this.name = "ToneAudioBuffers";
/**
* All of the buffers
*/
_this._buffers = new Map();
/**
* Keep track of the number of loaded buffers
*/
_this._loadingCount = 0;
var options = optionsFromArguments(ToneAudioBuffers.getDefaults(), arguments, ["urls", "onload", "baseUrl"], "urls");
_this.baseUrl = options.baseUrl;
// add each one
Object.keys(options.urls).forEach(function (name) {
_this._loadingCount++;
var url = options.urls[name];
_this.add(name, url, _this._bufferLoaded.bind(_this, options.onload), options.onerror);
});
return _this;
}
ToneAudioBuffers.getDefaults = function () {
return {
baseUrl: "",
onerror: noOp,
onload: noOp,
urls: {},
};
};
/**
* True if the buffers object has a buffer by that name.
* @param name The key or index of the buffer.
*/
ToneAudioBuffers.prototype.has = function (name) {
return this._buffers.has(name.toString());
};
/**
* Get a buffer by name. If an array was loaded,
* then use the array index.
* @param name The key or index of the buffer.
*/
ToneAudioBuffers.prototype.get = function (name) {
assert(this.has(name), "ToneAudioBuffers has no buffer named: " + name);
return this._buffers.get(name.toString());
};
/**
* A buffer was loaded. decrement the counter.
*/
ToneAudioBuffers.prototype._bufferLoaded = function (callback) {
this._loadingCount--;
if (this._loadingCount === 0 && callback) {
callback();
}
};
Object.defineProperty(ToneAudioBuffers.prototype, "loaded", {
/**
* If the buffers are loaded or not
*/
get: function () {
return Array.from(this._buffers).every(function (_a) {
var _b = tslib_1.__read(_a, 2), _ = _b[0], buffer = _b[1];
return buffer.loaded;
});
},
enumerable: true,
configurable: true
});
/**
* Add a buffer by name and url to the Buffers
* @param name A unique name to give the buffer
* @param url Either the url of the bufer, or a buffer which will be added with the given name.
* @param callback The callback to invoke when the url is loaded.
* @param onerror Invoked if the buffer can't be loaded
*/
ToneAudioBuffers.prototype.add = function (name, url, callback, onerror) {
if (callback === void 0) { callback = noOp; }
if (onerror === void 0) { onerror = noOp; }
if (isString(url)) {
this._buffers.set(name.toString(), new ToneAudioBuffer(this.baseUrl + url, callback, onerror));
}
else {
this._buffers.set(name.toString(), new ToneAudioBuffer(url, callback, onerror));
}
return this;
};
ToneAudioBuffers.prototype.dispose = function () {
_super.prototype.dispose.call(this);
this._buffers.forEach(function (buffer) { return buffer.dispose(); });
this._buffers.clear();
return this;
};
return ToneAudioBuffers;
}(Tone));
export { ToneAudioBuffers };
//# sourceMappingURL=ToneAudioBuffers.js.map |
<reponame>lamby/django-filebased-email-backend-ng<filename>django_filebased_email_backend_ng/backend.py
from __future__ import print_function
import os
import shutil
import mimetypes
from django.conf import settings
from django.core.mail.message import DEFAULT_ATTACHMENT_MIME_TYPE
from django.core.mail.backends.base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
def send_messages(self, messages):
try:
shutil.rmtree(settings.EMAIL_FILE_PATH)
except OSError:
pass
os.makedirs(settings.EMAIL_FILE_PATH)
for idx, message in enumerate(messages):
base = os.path.join(settings.EMAIL_FILE_PATH, '%s' % idx)
os.makedirs(base)
# Write out raw email
with open(os.path.join(base, 'raw.log'), 'w') as f:
print('%s' % message.message().as_string(), file=f)
print('-' * 79, file=f)
# Write out alternatives
alternatives = getattr(message, 'alternatives', ())
for idx, alternative in enumerate(alternatives):
content, mimetype = alternative
filename = os.path.join(base, 'alternative-%d%s' % (
idx,
mimetypes.guess_extension(mimetype)
or '.%s' % DEFAULT_ATTACHMENT_MIME_TYPE,
))
with open(filename, 'wb') as f:
f.write(content.encode('utf8'))
# Write out attachments
for idx, attachment in enumerate(message.attachments):
_, content, mimetype = attachment
if mimetype is None:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
filename = os.path.join(base, 'attachment-%d%s' % (
idx,
mimetypes.guess_extension(mimetype) or '.txt',
))
with open(filename, 'wb') as f:
f.write(content)
|
source ../scripts/constants.inc
docker build -t ${dockerPrefix}etcd .
|
#! /bin/bash
#SBATCH -o /home/hpc/pr63so/di69fol/workspace/SWEET_2016_01_16/benchmarks_performance/rexi_tests/2016_01_06_scalability_rexi_fd_run3/run_rexi_fd_par_m4096_t002_n0128_r0896_a1.txt
###SBATCH -e /home/hpc/pr63so/di69fol/workspace/SWEET_2016_01_16/benchmarks_performance/rexi_tests/2016_01_06_scalability_rexi_fd_run3/run_rexi_fd_par_m4096_t002_n0128_r0896_a1.err
#SBATCH -J rexi_fd_par_m4096_t002_n0128_r0896_a1
#SBATCH --get-user-env
#SBATCH --clusters=mpp2
#SBATCH --ntasks=896
#SBATCH --cpus-per-task=2
#SBATCH --exclusive
#SBATCH --export=NONE
#SBATCH --time=00:05:00
#declare -x NUMA_BLOCK_ALLOC_VERBOSITY=1
declare -x KMP_AFFINITY="granularity=thread,compact,1,0"
declare -x OMP_NUM_THREADS=2
echo "OMP_NUM_THREADS=$OMP_NUM_THREADS"
echo
. /etc/profile.d/modules.sh
module unload gcc
module unload fftw
module unload python
module load python/2.7_anaconda_nompi
module unload intel
module load intel/16.0
module unload mpi.intel
module load mpi.intel/5.1
module load gcc/5
cd /home/hpc/pr63so/di69fol/workspace/SWEET_2016_01_16/benchmarks_performance/rexi_tests/2016_01_06_scalability_rexi_fd_run3
cd ../../../
. local_software/env_vars.sh
# force to use FFTW WISDOM data
declare -x SWEET_FFTW_LOAD_WISDOM_FROM_FILE="FFTW_WISDOM_nofreq_T0"
time -p mpiexec.hydra -genv OMP_NUM_THREADS 2 -envall -ppn 14 -n 896 ./build/rexi_fd_par_m_tno_a1 --initial-freq-x-mul=2.0 --initial-freq-y-mul=1.0 -f 1 -g 1 -H 1 -X 1 -Y 1 --compute-error 1 -t 50 -R 4 -C 0.3 -N 128 -U 0 -S 0 --use-specdiff-for-complex-array 0 --rexi-h 0.8 --timestepping-mode 1 --staggering 0 --rexi-m=4096 -C -5.0
|
const users = [
{
user: "brightokpocha",
image: "https://i.insider.com/5b881eec3cccd119008b4582?width=700"
},
{
user: "celinedion",
image: "https://images.fandango.com/ImageRenderer/0/0/redesign/static/img/default_poster.png/0/images/masterrepository/other/1622M04_JO079_H.JPG"
},
{
user: "innocentidibia",
image: "https://i.insider.com/5b881e7a3cccd121008b4583?width=600&format=jpeg&auto=webp"
},
{
user: "princeharry",
image: "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRzNgUvEChN4ez921U3dbV-iObi_4nPkulxTA&usqp=CAU"
},
{
user: "willsmith",
image: "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSQExiD8qwVO7OqGENTCVyVy5WMBsn_9zc8kA&usqp=CAU"
},
{
user: "tylerperry",
image: "https://cdn.newsday.com/polopoly_fs/1.5878007.1504286085!/httpImage/image.JPG_gen/derivatives/display_960/image.JPG"
},
{
user: "willsmith",
image: "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQeFAilRwcIpM5VbL3TIvZ7woWfOR524SLA_v-BsZH3DiBkni0DrTjy0xrlE1Uy5cBjs6o&usqp=CAU"
},
{
user: "tylerperry",
image: "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQYME_Z3Sk_9gg-H0OCruINa2F29vE8mXnJGA&usqp=CAU"
},
{
user: "toyinabraham",
image: "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQUVLfJw9T6W8e-rFskGPRAYHw66nsVLRfm8Q&usqp=CAU"
}
]
export default users; |
# kubectl delete namespace xwiki
# kubectl create namespace xwiki
# git clone https://github.com/xwiki-contrib/xwiki-helm
cd xwiki-helm
helm dependency update
helm install \
--values=./values.yaml \
--namespace=xwiki
--name=xwiki \
. |
#!/bin/bash
# Activates the correct Anaconda environment, and runs the command passed to the container.
set -e
set -x
source activate rapids
nvidia-smi
ARGS=( "$@" )
python --version
echo "Calling: 'python ${ARGS[@]}'"
python ${ARGS[@]}
echo "Python call returned: $?"
|
Selection sort is a sorting algorithm that works by selecting the smallest element from the array and moving it to the beginning of the array. It then moves on to the next element and repeats the process until all elements have been sorted. The algorithm is O(n2) in time complexity and O(1) in space complexity. |
public class MyStack {
private int[] stack;
private int top;
private int size;
public MyStack(int size) {
this.size = size;
stack = new int[this.size];
top = -1;
}
public boolean isEmpty(){
return (top == -1);
}
public boolean isFull(){
return (top == size-1);
}
public void push(int data) {
if(!isFull()) {
stack[++top] = data;
} else {
System.out.println("Stack is full!");
}
}
public int pop(){
if(!isEmpty()) {
return stack[top--];
} else {
System.out.println("Stack is empty!");
return -1;
}
}
public int peek(){
if(!isEmpty()) {
return stack[top];
} else {
System.out.println("Stack is empty!");
return -1;
}
}
} |
var LocalStorage = require('builder/components/local-storage/local-storage');
var CoreView = require('backbone/core-view');
var template = require('./analysis-view.tpl');
var Analyses = require('builder/data/analyses');
var STORAGE_KEY = 'onboarding';
module.exports = CoreView.extend({
className: 'AnalysisCompletionDetails is-opening',
events: {
'click .js-close': '_close',
'click .js-style': '_onStyle'
},
initialize: function (opts) {
if (!opts.modalModel) throw new Error('modalModel is required');
if (!opts.userModel) throw new Error('userModel is required');
if (!opts.editorModel) throw new Error('editorModel is required');
this._modalModel = opts.modalModel;
this._userModel = opts.userModel;
this._editorModel = opts.editorModel;
LocalStorage.init(STORAGE_KEY, {
userModel: this._userModel
});
this._initBinds();
},
render: function () {
this.$el.html(template({
type: this._getGenericAnalysisType()
}));
var onboardingTemplate = this._typeDef().onboardingTemplate;
var html = onboardingTemplate && onboardingTemplate(this.model.attributes);
this.$('.js-content').html(html);
this._onChangeEditorModelEdition(this._editorModel);
return this;
},
_initBinds: function () {
this.listenTo(this.model, 'destroy', this._close);
this.listenTo(this._editorModel, 'change:edition', this._onChangeEditorModelEdition);
this.add_related_model(this._editorModel);
},
_onChangeEditorModelEdition: function (mdl) {
var isEditing = !!mdl.get('edition');
this.$el.toggleClass('is-editing', isEditing);
},
_close: function () {
this._forget();
this.trigger('close', this);
},
_onStyle: function () {
this._forget();
this.trigger('customEvent', 'style', this);
},
_forget: function () {
LocalStorage.set(this._getGenericAnalysisType(), true);
},
_typeDef: function (type) {
type = type || this.model.get('type');
return Analyses.getAnalysisByType(type);
},
_getGenericAnalysisType: function () {
var typeDef = this._typeDef();
return typeDef && typeDef.genericType || this.model.get('type');
}
});
|
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package hh;
import java.util.StringTokenizer;
/**
*
* @author FABIO
*/
public class Hh {
/**
* @param args the command line arguments
*/
public static void main(String args[]) {
System.out.println("Ingrese el nombre del sistema: ");
String n = lea.next();
System.out.println("Ingrese el usuario: ");
String usuario = lea.next();
System.out.println("Ingrese la Capacidad (Bytes): ");
double capacidad = lea.nextDouble();
sistemas = new Sistema(n, usuario, capacidad);
System.out.println("SEPARE EL COMANDO DE LA INSTRUCCION POR UNA COMA (,)");
System.out.print(sistemas.toString());
String comando = lea.next();
//
String coman = comando.split(",")[0];
String name = comando.split(",")[1];
String size = comando.split(",")[2];
//
System.out.println(coman+" "+name+" "+size);
if (coman.equals(crear_carpeta)) {
sistemas.getArchivos().add(new Carpeta(name, size, new Date(), new Date()));
}
sistemas.setCarpeta_Raiz(name);
System.out.println("");
System.out.println("SEPARE EL COMANDO DE LA INSTRUCCION POR UNA COMA (,)");
System.out.print(sistemas.toString());
comando = lea.next();
}
}
|
#code based on example found at:
#http://www.pyimagesearch.com/2015/09/14/ball-tracking-with-opencv/
# import the necessary packages
from collections import deque
import numpy as np
import argparse
import imutils
import cv2
import time as t
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to the (optional) image file")
ap.add_argument("-b", "--buffer", type=int, default=32,
help="max buffer size")
ap.add_argument("-r", "--real", type=str, default="real",
help="real image or mask")
ap.add_argument("-t", "--type", type=str, default="blank",
help="type of find")
args = vars(ap.parse_args())
real = args.get("real")
typ = args.get("type")
if typ == "oxy":
greenLower = (23, 157, 148)
greenUpper = (32, 196, 238)
if typ == "blank":
greenLower = (0, 0, 0)
greenUpper = (0, 0, 0)
elif typ == "ppbo":
greenLower = (14, 20, 157)
greenUpper = (40, 228, 246)
elif typ == "TAC":
greenLower = (0, 16, 46)
greenUpper = (183, 170, 105)
elif typ == "bby":
greenLower = (17,121,76)
greenUpper = (52,228,218)
elif typ == "plier":
greenLower = (73,108,78)
greenUpper = (100,201,149)
#my eyes(broken)
#greenLower = (106, 84, 38)
#greenUpper = (138, 143, 55)
pts = deque(maxlen=args["buffer"])
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("image", False):
camera = cv2.VideoCapture(0)
# otherwise, grab a reference to the video file
else:
frame = cv2.imread(args["image"])
# print(frame)
# keep looping
if 1:
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
# blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# update the points queue
pts.appendleft(center)
# loop over the set of tracked points
for i in xrange(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i - 1] is None or pts[i] is None:
pass
# otherwise, compute the thickness of the line and
# draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 0), thickness)
# show the frame to our screen
print(hsv)
if real == "real":
cv2.imshow("Frame", frame)
elif real == "mask":
cv2.imshow("Frame",mask)
else:
cv2.imshow("Frame",hsv)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
pass
t.sleep(10)
cv2.imshow("asd", frame)
# cleanup the camera and close any open windows
#camera.release()
#cv2.destroyAllWindows()
|
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
elif [ -L "${binary}" ]; then
echo "Destination binary is symlinked..."
dirname="$(dirname "${binary}")"
binary="${dirname}/$(readlink "${binary}")"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u)
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
# Copy the dSYM into a the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .framework.dSYM "$source")"
binary="${DERIVED_FILES_DIR}/${basename}.framework.dSYM/Contents/Resources/DWARF/${basename}"
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then
strip_invalid_archs "$binary"
fi
if [[ $STRIP_BINARY_RETVAL == 1 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.framework.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.framework.dSYM"
fi
fi
}
# Copies the bcsymbolmap files of a vendored framework
install_bcsymbolmap() {
local bcsymbolmap_path="$1"
local destination="${BUILT_PRODUCTS_DIR}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}"
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identity
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
STRIP_BINARY_RETVAL=0
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary"
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=1
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/RxSwift/RxSwift.framework"
install_framework "${BUILT_PRODUCTS_DIR}/Cuckoo/Cuckoo.framework"
install_framework "${BUILT_PRODUCTS_DIR}/Nimble/Nimble.framework"
install_framework "${BUILT_PRODUCTS_DIR}/Quick/Quick.framework"
install_framework "${BUILT_PRODUCTS_DIR}/RxBlocking/RxBlocking.framework"
install_framework "${BUILT_PRODUCTS_DIR}/RxTest/RxTest.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/RxSwift/RxSwift.framework"
install_framework "${BUILT_PRODUCTS_DIR}/Cuckoo/Cuckoo.framework"
install_framework "${BUILT_PRODUCTS_DIR}/Nimble/Nimble.framework"
install_framework "${BUILT_PRODUCTS_DIR}/Quick/Quick.framework"
install_framework "${BUILT_PRODUCTS_DIR}/RxBlocking/RxBlocking.framework"
install_framework "${BUILT_PRODUCTS_DIR}/RxTest/RxTest.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
// Load States desde CSV
var States = require('../models/loc_countries');
var mongoose = require('mongoose');
const bcDb_driver = require('bigchaindb-driver');
//const API_PATH = 'http://localhost:9984/api/v1/';
//const conn = new driver.Connection(API_PATH);
const connDb = new bcDb_driver.Connection(
"https://test.bigchaindb.com/api/v1/",
{
app_id: '385b62f9',
app_key: '<KEY>'
});
var public_key = "<KEY>";
var private_key = "<KEY>";
//mongoose.connect('mongodb://1172.16.31.10:27017/casupo');
mongoose.connect('mongodb://testuser:<EMAIL>:17175/shopping');
//mongoose.connect('mongodb://invoice:Inv.1984@admapps01:27017/invoice2pay');
var db = mongoose.connection;
mongoose.connection.on('open', function() {
console.log("Connected to Mongoose...");
});
db.on('error', console.error.bind(console, 'connection error:'));
// ---------------------
// Blockchain
const driver = require('bigchaindb-driver');
console.log("public_key:", public_key);
console.log("private_key:", private_key);
const conn = new driver.Connection(
'https://test.bigchaindb.com/api/v1/',
{ app_id: '385b62f9',
app_key: '185a4d7d619448dbf0c725f3485fa0b6' });
var csv = require('ya-csv');
var reader = csv.createCsvFileReader('haciendas.csv');
var data = [];
var arrayobject = [];
var done = 0;
var asset_schema =
{
publicKey_owner: {type: String, required: true},
asset_type: {type: String, required: true},
asset_name: {type: String, required: true},
year_foundation: {type: String, required: true},
asset_cuit: {type: String, required: true},
asset_tipo_doc: {type: String, required: true},
asset_num_doc: {type: String, required: true},
asset_bank_register: {type: String, required: true},
cadastro: {type: String, required: true},
country: {type: String, required: true},
state: {type: String, required: true},
city: {type: String, required: true},
location: {type: String, required: true},
latitude: {type: String, required: true},
longitude: {type: String, required: true},
surface_total: {type: String, required: true},
surface_agricultura: {type: String, required: true},
owner_name: {type: String, required: true},
owner_cuit: {type: String, required: true},
owner_doc_type: {type: String, required: true},
owner_num_type: {type: String, required: true},
owner_bank_register: {type: String, required: true}
};
var metadata_schema =
{
products: {type: String, required: true},
period: {type: String, required: true},
agricultural_system: {type: String, required: true},
surface_planted: {type: String, required: true},
tons_harvested: {type: String, required: true},
income_gross: {type: String, required: true},
income_net: {type: String, required: true}
};
reader.on('data', function(rec)
{
//console.log(rec);
vals = rec[0];
//console.log(vals);
vals1 = vals.split(";");
console.log("asset_type: "+vals1[0]);
console.log("asset_name: "+vals1[1]);
console.log("year_foundation :"+vals1[2]);
console.log("asset_cuit :"+vals1[3]);
console.log("asset_tipo_doc :"+vals1[4]);
console.log("asset_num_doc: :"+ vals1[5]);
console.log("asset_bank_register :"+ vals1[6]);
console.log("cadastro :"+ vals1[7]);
console.log("country :"+ vals1[8]);
console.log("state :"+ vals1[9]);
console.log("city :"+ vals1[10]);
console.log("location :"+ vals1[11]);
console.log("latitude :"+ vals1[12]);
console.log("longitude:"+ vals1[13]);
console.log("surface_total:"+ vals1[14]);
console.log("surface_agricultura:"+ vals1[15]);
console.log("owner_name:"+ vals1[16]);
console.log("owner_cuit:"+ vals1[17]);
console.log("owner_doc_type:"+ vals1[18]);
console.log("owner_num_type:"+ vals1[19]);
console.log("owner_bank_register:"+ vals1[20]);
console.log("products:"+ vals1[21]);
console.log("period:"+ vals1[22]);
console.log("agricultural_system:"+ vals1[23]);
console.log("surface planted:"+ vals1[24]);
console.log("tons_harvested:"+ vals1[25]);
console.log("income_gross:"+ vals1[26]);
console.log("income_net:"+vals1[27]);
console.log("amount_requested:"+vals1[28]);
console.log("numbers_payment:"+vals1[29]);
console.log("first_payment:"+vals1[30]);
console.log("interest_rate:"+vals1[31]);
console.log("frecuency:"+vals1[32]);
//console.log("rec:",done," Supplier creation in progress..", vals1[0]);
// arrayobject.push(
// rec_schema({
var asset_alldata =
{
user_publicKey: "<KEY>",
asset_type: vals1[0],
asset_name: vals1[1],
year_foundation: vals1[2],
asset_cuit : vals1[3],
asset_tipo_doc: vals1[4],
asset_num_doc: vals1[5],
asset_bank_register: vals1[6],
cadastro: vals1[7],
country: vals1[8],
state: vals1[9],
city: vals1[10],
location: vals1[11],
latitude: vals1[12],
longitude: vals1[13],
surface_total: vals1[14],
surface_agricultura: vals1[15],
owner_name: vals1[16],
owner_cuit: vals1[17],
owner_doc_type: vals1[18],
owner_num_type: vals1[19],
owner_bank_register: vals1[20],
products: vals1[21],
period: vals1[22],
agricultural_system: vals1[23],
surface_planted: vals1[24],
tons_harvested: vals1[25],
income_gross: vals1[26],
income_net: vals1[27],
amount_requested: vals1[28],
numbers_payment: vals1[29],
first_payment: vals1[30],
interest_rate: vals1[31],
frecuency : vals1[32]
}
var asset_data =
{
user_publicKey: "<KEY>7HWsSmck",
asset_type: vals1[0],
asset_name: vals1[1],
year_foundation: vals1[2],
asset_cuit : vals1[3],
asset_tipo_doc: vals1[4],
asset_num_doc: vals1[5],
asset_bank_register: vals1[6],
cadastro: vals1[7],
country: vals1[8],
state: vals1[9],
city: vals1[10],
location: vals1[11],
latitude: vals1[12],
longitude: vals1[13],
surface_total: vals1[14],
surface_agricultura: vals1[15],
owner_name: vals1[16],
owner_cuit: vals1[17],
owner_doc_type: vals1[18],
owner_num_type: vals1[19],
owner_bank_register: vals1[20]
};
var asset_metadata =
{
products: vals1[21],
period: vals1[22],
agricultural_system: vals1[23],
surface_planted: vals1[24],
tons_harvested: vals1[25],
income_gross: vals1[26],
income_net: vals1[27]
}
// }));
const tx = driver.Transaction.makeCreateTransaction(
asset_alldata,
asset_metadata,
[ driver.Transaction.makeOutput(
driver.Transaction.makeEd25519Condition(public_key))],
public_key)
const txSigned = driver.Transaction.signTransaction(tx, private_key);
console.log("antes de commit");
conn.postTransactionCommit(txSigned);
console.log("after commit:");
console.log(txSigned);
done++;
// data.push(rec);
}).on('end', function()
{
exit();
});
function exit() {
console.log("exit.");
mongoose.disconnect();
console.log("exit - despues de mongoose.");
}
|
import { createContext } from 'react'
const StoreContext = createContext<IStore>(null as any)
interface IProps {
store: IStore
children: React.ReactNode
}
const RootStoreProvider = ({ store, children }: IProps) => {
return (
<StoreContext.Provider value={{ ...store }}>
{children}
</StoreContext.Provider>
)
}
export { RootStoreProvider, StoreContext } |
#!/bin/bash
############################################
# Auteurs : Julien bSimard et cedric audy #
# Session : hiver 2019 #
############################################
git pull origin master
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.