text stringlengths 1 1.05M |
|---|
<gh_stars>0
import { LitElement, html, css } from 'lit';
import { customElement, property, state } from 'lit/decorators.js';
import { repeat } from 'lit/directives/repeat.js';
import { Message, MessageType } from '../models/Message';
import { UserType, User } from '../models/User';
import { scrollStyle } from '../stylesheets/scroll.js';
import { chattyButtonStyle } from '../stylesheets/chatty-button';
@customElement('chat-conversation')
export class ChatConversation extends LitElement {
@property({ type: Object })
currentUser: User;
@property({ type: Array })
messages: Message[];
@property({ type: String })
answeringMessageId?: string;
@state()
_messageToShowQuestion?: Message | null;
constructor(
currentUser: User,
messages: Message[],
answeringMessageId: string
) {
super();
this.currentUser = currentUser;
this.messages = messages;
this.answeringMessageId = answeringMessageId;
}
answerButton(message: Message) {
const isQuestionMessage =
message.type === MessageType.Question &&
message.user.id !== this.currentUser.id;
const isWelcomeMessage = message.type === MessageType.Welcome;
if (isQuestionMessage || isWelcomeMessage)
return html`<div class="answer-action">
<button @click=${() => this._handleAnswer(message)}>
${message.id === this.answeringMessageId ? 'Answering...' : 'Answer'}
</button>
</div>`;
}
showAnsweredQuestion(message: Message) {
console.log('message', message, this._messageToShowQuestion);
// I could made this section with expend animation.
// this was made at 23:21 after really long day with 3 hours of sleep the night before. ¯\_(ツ)_/¯
if (message.answeredQuestion) {
console.log(
'show',
this._messageToShowQuestion &&
message.id === this._messageToShowQuestion.id
);
if (
this._messageToShowQuestion &&
message.id === this._messageToShowQuestion.id
) {
return html`<div class="answered-question">
<h4>
${message.answeredQuestion.user.username}
${message.answeredQuestion.user.type === UserType.Bot
? html`<small>(Bot)</small>`
: null}
</h4>
<p>${message.answeredQuestion.text}</p>
<button
@click=${() => this._handleShowQuestionClick(<Message>message)}
class="expend-button"
>
<svg
xmlns="http://www.w3.org/2000/svg"
height="16px"
viewBox="0 0 24 24"
width="16px"
fill="#FFFFFF"
>
<path d="M0 0h24v24H0V0z" fill="none" />
<path d="M12 8l-6 6 1.41 1.41L12 10.83l4.59 4.58L18 14l-6-6z" />
</svg>
</button>
</div>`;
} else {
return html`<button
@click=${() => this._handleShowQuestionClick(<Message>message)}
class="expend-button"
>
<svg
xmlns="http://www.w3.org/2000/svg"
enable-background="new 0 0 20 20"
height="16px"
viewBox="0 0 20 20"
width="16px"
fill="#FFFFFF"
>
<g><rect fill="none" height="20" width="20" x="0" /></g>
<g>
<path
d="M9.29,10.58c0.6-1.07,1.73-1.7,2.39-2.65c0.7-0.99,0.31-2.85-1.67-2.85c-1.3,0-1.94,0.98-2.2,1.8l-2-0.84 C6.35,4.41,7.83,3,9.99,3c1.81,0,3.05,0.82,3.68,1.85c0.54,0.88,0.86,2.54,0.02,3.77c-0.92,1.36-1.81,1.78-2.28,2.65 c-0.19,0.35-0.27,0.58-0.27,1.72H8.91C8.91,12.4,8.82,11.42,9.29,10.58z M11.5,16c0,0.83-0.67,1.5-1.5,1.5 c-0.83,0-1.5-0.67-1.5-1.5c0-0.83,0.67-1.5,1.5-1.5C10.83,14.5,11.5,15.17,11.5,16z"
/>
</g>
</svg>
</button>`;
}
}
}
render() {
return html`
<div class="container scrollable">
<ul>
${repeat(
this.messages,
message => message.id,
message => {
// I would make this into a ChatMessage component.
return html`
<li
class="${message.user.type !== UserType.Bot &&
message.user.id === this.currentUser?.id
? 'me'
: ''}"
>
<section>
<h4>
${message.user.username}
${message.user.type === UserType.Bot
? html`<small>(Bot)</small>`
: null}
</h4>
<p>${message.text}</p>
${this.answerButton(message)}
${this.showAnsweredQuestion(message)}
</section>
<small> ${this._formatDate(message.timestamp)}</small>
</li>
`;
}
)}
</ul>
</div>
`;
}
_handleShowQuestionClick(message: Message) {
if (message.id === this._messageToShowQuestion?.id)
this._messageToShowQuestion = null;
else this._messageToShowQuestion = message;
}
_handleAnswer(message: Message) {
const isWelcomeMessage = message.type === MessageType.Welcome;
this.dispatchEvent(
new CustomEvent('onAnswer', {
detail: {
id: this.answeringMessageId === message.id ? null : message.id,
welcomeMessage: isWelcomeMessage ? message : null,
},
bubbles: true,
composed: true,
})
);
}
_formatDate(ms: number) {
return new Date(ms).toLocaleDateString('en', {
year: '2-digit',
month: '2-digit',
day: '2-digit',
hour: '2-digit',
minute: '2-digit',
});
}
static styles = [
chattyButtonStyle,
scrollStyle,
css`
:host {
--x-padding: 2.5rem;
--message-border-radius: 20px;
display: flex;
box-sizing: border-box;
padding: 2rem var(--x-padding);
position: relative;
}
:host::before {
content: '';
box-sizing: border-box;
background: linear-gradient(to bottom, white 30%, transparent);
position: absolute;
z-index: 1;
top: 0;
left: 0;
height: 6rem;
width: 100%;
}
.container {
width: 100%;
display: flex;
width: 100%;
flex-direction: column-reverse;
}
ul {
display: flex;
flex-direction: column;
margin: 0;
padding: 0;
gap: 1.5rem;
}
li {
display: flex;
width: fit-content;
flex-direction: column;
gap: 1rem;
align-self: flex-end;
margin: 0.25rem;
animation: message 0.4s ease-out forwards;
max-width: 65%;
}
li:first-child {
margin-top: 1rem;
}
li.me {
align-self: flex-start;
}
li.me section {
box-shadow: var(--shadow-color) 2px 5px 5px 0px;
background-color: #d1d1de54;
border-bottom-left-radius: unset;
border-bottom-right-radius: var(--message-border-radius);
}
section {
position: relative;
border: 1px solid var(--light-border-color);
border-radius: var(--message-border-radius);
border-bottom-right-radius: unset;
padding: 1rem 0.5rem;
box-shadow: var(--shadow-color) -2px 5px 5px 0px;
}
.answered-question {
margin: 1rem;
border: 1px solid;
border-radius: 20px;
padding: 0.5rem;
border-bottom-right-radius: 0;
background: #fffdece0;
}
.expend-button {
padding: 0px;
border-radius: 36px;
height: 18px;
width: 18px;
position: absolute;
bottom: -8px;
left: -8px;
}
li.me .expend-button {
right: -8px;
left: unset;
}
li.me .answered-question {
background: white;
}
@keyframes message {
0% {
transform: scale(0.5);
}
100% {
transform: scale(1);
}
}
h4 {
color: #5d88d8ed;
margin: 0;
margin-bottom: 0.5rem;
}
p {
margin: 0;
margin-bottom: 0.25rem;
white-space: pre-wrap;
}
small {
margin: 0px 0.5rem;
color: #979797;
font-size: 0.75rem;
}
.answer-action {
display: flex;
justify-content: center;
padding: 0.5rem;
}
`,
];
}
|
<filename>src/context/index.ts<gh_stars>1-10
export * from './client-config'
export * from './env-enum'
export * from './log-config'
|
def calculate_logits(self, inputs):
model = madry_model.MadryModel(n_classes=self.n_classes) # Instantiate MadryModel with the number of classes
logits = model.fprop(inputs) # Calculate logits using the fprop method of MadryModel
return logits |
def linear_search(arr, x):
for i in range(len(arr)):
if arr[i] == x:
return i
return -1 |
class smart_attr(object):
name = None
def __init__(self, factory, *a, **k):
self.creation_data = factory, a, k
def __get__(self, obj, clas=None):
if self.name is None:
raise RuntimeError, ("class %r uses a smart_attr, so its "
"metaclass should be MetaSmart, but is %r instead" %
(clas, type(clas)))
factory, a, k = self.creation_data
setattr(obj, name, factory(*a, **k))
return getattr(obj, name)
class MetaSmart(type):
def __new__(mcl, clasname, bases, clasdict):
# set all names for smart_attr attributes
for k, v in clasdict.iteritems():
if isinstance(v, smart_attr):
v.name = k
# delegate the rest to the supermetaclass
return super(MetaSmart, mcl).__new__(mcl, clasname, bases, clasdict)
# let's let any class use our custom metaclass by inheriting from smart_object
class smart_object:
__metaclass__ = MetaSmart
|
#!/bin/bash -u
set -e
#
# Copyright (c) 2010, 2011 Tresys Technology LLC, Columbia, Maryland, USA
#
# This software was developed by Tresys Technology LLC
# with U.S. Government sponsorship.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Remediation should not be installing packages. We may have to remove this.
/bin/rpm -q rsyslog && /usr/bin/yum install -q -y rsyslog
|
# Output single horizontally-arranged image and the JSON
# bounding boxes found.
./datapiece -i pngs/contract2.png -b boxes_contract.json -o out/contract2_one_line.png --dpi 300 --jsonout out/contract2.json
# Output series of images named out/contract2_<field>.png with --split. Clean a 2 pixel border around each output image.
./datapiece -i pngs/contract2.png -b boxes_contract.json -o out/contract2_ --dpi 300 --split -B 2
# Output JSON without images with --findonly.
./datapiece -i pngs/contract2.png -b boxes_contract.json --dpi 300 --jsonout out/contract2_findonly.json --findonly
|
import { Injectable } from '@angular/core';
@Injectable({
providedIn: 'root'
})
export class PoductsService {
public essenceQuantities = [];
public caloporteursQuantities = [];
public dieselQuantities = [];
public graissesQuantities = [];
public hmgazQuantities = [];
public htransmissionQuantities = [];
public httempsQuantities = [];
public hydrauliquesQuantities = [];
public marinsQuantities = [];
constructor() { }
}
|
AFRAME.registerComponent('camera-logger', {
schema: {
timestamp: {type: 'int'},
seconds: {type: 'int'} // default 0
},
log : function () {
var cameraEl = this.el.sceneEl.camera.el;
var rotation = cameraEl.getAttribute('rotation');
var worldPos = new THREE.Vector3();
worldPos.setFromMatrixPosition(cameraEl.object3D.matrixWorld);
console.log("Time: " + this.data.seconds
+ "; Camera Position: (" + worldPos.x.toFixed(2) + ", " + worldPos.y.toFixed(2) + ", " + worldPos.z.toFixed(2)
+ "); Camera Rotation: (" + rotation.x.toFixed(2) + ", " + rotation.y.toFixed(2) + ", " + rotation.z.toFixed(2) + ")");
},
play: function () {
this.data.timestamp = Date.now();
this.log();
},
tick: function () {
if (Date.now() - this.data.timestamp > 1000) {
this.data.timestamp += 1000;
this.data.seconds += 1;
this.log();
}
},
}); |
<gh_stars>0
# taxon.py
from Bio import Entrez
def format_name(name):
genus = name.split()[0]
rest = name.split()[1:]
return genus[0]+'. '+' '.join(rest)
def get_tax_dict(id_list,reformat=True):
handle = Entrez.efetch(db='taxonomy',id=[str(x) for x in id_list])
record = Entrez.read(handle)
if reformat:
tax_dict = {int(rec['TaxId']):format_name(rec['ScientificName']) for rec in record}
else:
tax_dict = {int(rec['TaxId']):rec['ScientificName'] for rec in record}
tax_dict['Other'] = ''
return tax_dict
def get_tax_name_counts(counts,reformat=True,threshold=0):
id_list = counts[counts>threshold].index
id_list = [x for x in id_list if x != 'Other']
if id_list == []:
return []
else:
tax_dict = get_tax_dict(id_list,reformat)
return [tax_dict[tax_id] if tax_id in tax_dict.keys() else '' for tax_id in counts.index]
def get_strain(tax_id,tax_rank_dict,parent_tax_dict):
old_id = -1
new_id = tax_id
id_branch = [tax_id]
while tax_rank_dict[new_id] not in ['species','superkingdom']:
old_id = new_id
new_id = parent_tax_dict[old_id]
id_branch.append(tax_id)
if new_id == 2: # Bacteria
return -1
elif len(id_branch) >= 3:
return id_branch[-3]
elif len(id_branch) >= 2:
return id_branch[-2]
else:
return id_branch[-1]
def get_species(tax_id,tax_rank_dict,parent_tax_dict):
old_id = -1
new_id = tax_id
while tax_rank_dict[new_id] not in ['species','superkingdom']:
old_id = new_id
new_id = parent_tax_dict[old_id]
if new_id == 2: # Bacteria
return -1
else:
return new_id
def get_class(tax_id,tax_rank_dict,parent_tax_dict):
old_id = -1
new_id = tax_id
while tax_rank_dict[new_id] not in ['class','superkingdom']:
old_id = new_id
new_id = parent_tax_dict[old_id]
if new_id == 2: # Bacteria
return -1
else:
return new_id
|
#!/bin/zsh
#$ -l h_gpu=1
#$ -l m_mem_free=45G
#$ -cwd
#$ -V
#$ -e error_log_$JOB_ID
#$ -o out_log_$JOB_ID
#$ -l h_rt=12:00:00
###$ -l hostname=maxg01
###$ -l cuda_name=Tesla-V100-SXM2-16GB
export CUDA_VISIBLE_DEVICES=0
python run_auccpvloss.py "$@"
retVal=$?
if [ $retVal -ne 0 ]; then
echo "Error"
exit 100
fi
|
import re
def remove_characters(s):
pattern = r'[^a-zA-Z0-9]'
return re.sub(pattern, '', s)
s_new = remove_characters(";Hello world!@#$")
print(s_new) # Output: Hello world |
require 'net/http'
module DataFetchers
class Civic
def variants
page = get_page(1)
Enumerator.new(page.total_count) do |y|
page.variants.each { |v| y << v }
while page.current_page_num < page.total_pages do
page = get_page(page.current_page_num + 1)
page.variants.each { |v| y << v }
end
end
end
private
def get_page(page_number = 1)
uri = variant_base_url(page_number)
res = Net::HTTP.get_response(uri)
raise StandardError.new("Request Failed!") unless res.code == '200'
CivicResponse.new(res.body)
end
def variant_base_url(page_number)
URI.parse("https://civic.genome.wustl.edu/api/docm/variants").tap do |uri|
uri.query = URI.encode_www_form({page: page_number})
end
end
end
class CivicResponse
attr_reader :data
def initialize(body)
@data = JSON.parse(body)
end
def variants
data['records']
end
def total_count
data['_meta']['total_count']
end
def total_pages
data['_meta']['total_pages']
end
def current_page_num
data['_meta']['current_page']
end
end
end
|
<reponame>objektwerks/python.scikit.learn
"""
Random Selection test on ads click-thru-rate data.
"""
import random
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('./../../data/ads.ctr.csv')
N = 10000
d = 10
ads_selected = []
total_reward = 0
for n in range(0, N):
ad = random.randrange(d
ads_selected.append(ad)
reward = df.values[n, ad]
total_reward = total_reward + reward
plt.hist(ads_selected)
plt.title('Histogram of Ad Selections')
plt.xlabel('Ads')
plt.ylabel('Number of times each Ad was Selected')
plt.show()
|
import { Component, OnInit } from '@angular/core';
import {ActivatedRoute, Params, Router} from "@angular/router";
import {AuthenticationService} from "../_services/authentication.service";
@Component({
selector: 'app-reset',
templateUrl: './reset.component.html',
styleUrls: ['./reset.component.scss']
})
export class ResetComponent implements OnInit {
private _token:string='';
private _newPassword:string;
private _cnPassword:string;
private _forgotError:boolean = false;
private _errorMsg:string;
private _success:boolean=false;
private _successMsg:string;
public loading = false;
constructor(private _activatedRoute:ActivatedRoute,private _authenticationService:AuthenticationService,private _router:Router) { }
ngOnInit() {
this._activatedRoute.queryParams.subscribe(
data => {
this._token = data['token'];
});
}
onSubmit(formValue:any){
this.loading = true;
this._token = formValue.token;
this._newPassword = <PASSWORD>Value.<PASSWORD>;
this._cnPassword = formValue.<PASSWORD>;
this._authenticationService.reset({token:this._token,newpassword:this._newPassword,cnpassword:this._<PASSWORD>})
.subscribe(
data => {
this._success= true;
this._successMsg = "Your password has been successfully reset";
setTimeout(() =>
{
this._router.navigate(['/login'])
},
2000);
},
error => {
this.loading= false;
let errordata = JSON.parse(error._body);
if(errordata.status == 0){
this._forgotError = true;
this._errorMsg = errordata.message;
}
});
}
}
|
<reponame>Wayne-Sun/api-hub<gh_stars>0
/**
* Copyright 2021 Wayne
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.wayne.apihub.model;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.wayne.apihub.modules.common.entity.SqlParam;
import com.wayne.apihub.modules.dataapi.conf.BaseApiConf;
import com.wayne.apihub.modules.datasource.conf.BaseSourceConf;
import io.swagger.v3.oas.annotations.media.Schema;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.ToString;
import java.util.Date;
/**
* @author Wayne
*/
@Data
@ToString
@EqualsAndHashCode
@Schema(name = "BaseDataObject", description = "Base class for DB entity", subTypes = {BaseApiConf.class, BaseSourceConf.class, SqlParam.class})
public class BaseDataObject {
@Schema(description = "Primary Key")
private Long id;
@JsonIgnore
private Integer status;
@JsonIgnore
private Date createTime;
@JsonIgnore
private String createBy;
@JsonIgnore
private Date updateTime;
@JsonIgnore
private String updateBy;
}
|
<reponame>navikt/familie-felles-frontend<gh_stars>1-10
// Enum
export enum AdresseType {
BOSTEDSADRESSE = 'BOSTEDSADRESSE',
MIDLERTIDIG_POSTADRESSE_NORGE = 'MIDLERTIDIG_POSTADRESSE_NORGE',
MIDLERTIDIG_POSTADRESSE_UTLAND = 'MIDLERTIDIG_POSTADRESSE_UTLAND',
POSTADRESSE = 'POSTADRESSE',
POSTADRESSE_UTLAND = 'POSTADRESSE_UTLAND',
UKJENT_ADRESSE = 'UKJENT_ADRESSE',
}
export enum FamilieRelasjonRolle {
BARN = 'BARN',
FAR = 'FAR',
MEDMOR = 'MEDMOR',
MOR = 'MOR',
EKTE = 'EKTE',
}
export enum kjønnType {
KVINNE = 'KVINNE',
MANN = 'MANN',
UKJENT = 'UKJENT',
}
export enum Adressebeskyttelsegradering {
STRENGT_FORTROLIG = 'STRENGT_FORTROLIG',
STRENGT_FORTROLIG_UTLAND = 'STRENGT_FORTROLIG_UTLAND',
FORTROLIG = 'FORTROLIG',
UGRADERT = 'UGRADERT',
}
export const adressebeskyttelsestyper: Record<Adressebeskyttelsegradering, string> = {
STRENGT_FORTROLIG: 'strengt fortrolig',
STRENGT_FORTROLIG_UTLAND: 'strengt fortrolig utland',
FORTROLIG: 'fortrolig',
UGRADERT: 'ugradert',
};
|
interface Entity {
id: number;
}
export interface EntityMap<T> {
[id: number]: T;
}
export function entityArrayToObject<T extends Entity>(
array: T[]
): EntityMap<T> {
return array.reduce((obj: EntityMap<T>, item) => {
obj[item.id] = item;
return obj;
}, {});
}
export function entityObjectToArray<T>(object: EntityMap<T>): T[] {
return Object.keys(object).map((objectKey) => object[Number(objectKey)]);
}
|
#!/bin/bash
set -e
# Create a Certificate Signing Request (CSR) for our admission webhook service
# See https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/ for more detail
CSR_NAME='demo-csr.kube-exec-controller'
kubectl delete csr $CSR_NAME 2>/dev/null || true
rm -rf server*
# Install cfssl/cfssljson tools to generate the CSR required files
curl -o cfssl -sL https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl_1.6.1_darwin_amd64
curl -o cfssljson -sL https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssljson_1.6.1_darwin_amd64
chmod +x cfssl cfssljson
cat demo/csr.json | ./cfssl genkey - | ./cfssljson -bare server
cat <<EOF | kubectl apply -f -
apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
name: $CSR_NAME
spec:
request: $(cat server.csr | base64 | tr -d '\n')
signerName: kubernetes.io/kubelet-serving
usages:
- digital signature
- key encipherment
- server auth
EOF
# Get the above CSR approved, download the issued certificate, and save it to a file
kubectl certificate approve $CSR_NAME
kubectl get csr $CSR_NAME -o jsonpath='{.status.certificate}' | base64 --decode >| server.crt
# Create a Namespace and a K8s Secret object containing the above TLS key-pair
NAMESPACE='kube-exec-controller'
kubectl delete namespace $NAMESPACE 2>/dev/null || true
kubectl create namespace $NAMESPACE
kubectl create secret tls demo-secret --cert=server.crt --key=server-key.pem -n $NAMESPACE
# Apply the demo app (Deployment, Service, and required RBAC objects)
kubectl apply -f demo/app.yaml
# Add the K8s cluster CA cert in our admission webhook configuration and apply it
clusterCA=$(kubectl config view --raw --minify --flatten -o jsonpath='{.clusters[].cluster.certificate-authority-data}')
webhookConfig=$(cat "demo/admission-webhook.yaml.template" | sed "s/{{CABUNDLE_VALUE}}/$clusterCA/g")
echo "$webhookConfig" | kubectl apply -f -
|
const webpack = require('webpack')
const merge = require('webpack-merge')
const path = require('path')
const config = require('../config')
const webpackBaseConfig = require('./webpack.base.conf')
const HtmlWebpackPlugin = require('html-webpack-plugin')
const CleanWebpackPlugin = require('clean-webpack-plugin')
const FriendlyErrorsPlugin = require('friendly-errors-webpack-plugin')
Object.keys(webpackBaseConfig.entry).forEach(name => {
webpackBaseConfig.entry[name] = [
'./build/dev-client'
].concat(webpackBaseConfig.entry[name])
})
module.exports = merge(webpackBaseConfig, {
output: {
filename: '[name].js',
path: path.resolve(__dirname, '../dist'),
publicPath: '/'
},
devtool: 'inline-source-map',
plugins: [
new CleanWebpackPlugin(),
new webpack.DefinePlugin({
'process.env': config.dev.env
}),
new webpack.HotModuleReplacementPlugin(),
new webpack.NoEmitOnErrorsPlugin(),
...['index'].map(
v =>
new HtmlWebpackPlugin({
statPrefix: 'dev-',
filename: v + '.html',
template: 'src/index.html',
inject: true,
chunks: [v]
})
),
new FriendlyErrorsPlugin()
]
}) |
// Copyright © 2021 The Things Network Foundation, The Things Industries B.V.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package band
import "go.thethings.network/lorawan-stack/v3/pkg/ttnpb"
const (
// KR_920_923 is the ID of the Korean frequency plan
KR_920_923 = "KR_920_923"
kr920923BeaconFrequency = 923100000
)
var (
kr920923DefaultChannels = []Channel{
{
Frequency: 922100000,
MaxDataRate: ttnpb.DataRateIndex_DATA_RATE_5,
},
{
Frequency: 922300000,
MaxDataRate: ttnpb.DataRateIndex_DATA_RATE_5,
},
{
Frequency: 922500000,
MaxDataRate: ttnpb.DataRateIndex_DATA_RATE_5,
},
}
kr920923DownlinkDRTable = [6][6]ttnpb.DataRateIndex{
{0, 0, 0, 0, 0, 0},
{1, 0, 0, 0, 0, 0},
{2, 1, 0, 0, 0, 0},
{3, 2, 1, 0, 0, 0},
{4, 3, 2, 1, 0, 0},
{5, 4, 3, 2, 1, 0},
}
)
|
#!/bin/bash
# profiles = xccdf_org.ssgproject.content_profile_C2S
# remediation = bash
. $SHARED/auditd_utils.sh
prepare_auditd_test_enviroment
set_parameters_value /etc/audit/auditd.conf "admin_space_left_action" "syslog"
|
#!/usr/bin/env bash
# Attempt to migration from legacy to latest
LEGACY_OUT=`./snabb lwaftr migrate-configuration -f legacy \
program/lwaftr/tests/configdata/legacy.conf`
if [[ "$?" -ne "0" ]]; then
echo "Legacy configuration migration failed (status code != 0)"
echo "$LEGACY_OUT"
exit 1
fi
# Attempt to migrate part way through the chain
V320_OUT=`./snabb lwaftr migrate-configuration -f 3.2.0 \
program/lwaftr/tests/configdata/3.2.0.conf`
if [[ "$?" -ne "0" ]]; then
echo "3.2.0 configuration migration failed (status code != 0)"
echo "$V320_OUT"
exit 1
fi
|
# ================================================================
# Custom user config
# ================================================================
|
<gh_stars>1-10
//
// Copyright 2021 <NAME> <<EMAIL>>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package color
import (
"github.com/DataDrake/flair/escape"
"strconv"
)
// CSI Character Attributes (SGR) - Colors
// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html
// Default Colors
var (
DefaultFG = escape.Sequence{Pre: "39", Post: ""} // DefaultFG resets the foreground color
DefaultBG = escape.Sequence{Pre: "49", Post: ""} // DefaultBG resets the background color
)
// 16 colors
const (
Black Color = iota
Red
Green
Yellow
Blue
Magenta
Cyan
LightGray
DarkGray
LightRed
LightGreen
LightYellow
LightBlue
LightMagenta
LightCyan
White
)
const (
fg = "38" + escape.Separator + "5" + escape.Separator
bg = "48" + escape.Separator + "5" + escape.Separator
)
// Color represents a 256 color mode sequence
type Color uint8
// seq generates the necessary escape Sequence for this color, using the smallest encoding
func (c Color) seq(suffix string, eight, sixteen uint8) escape.Sequence {
var pre string
switch {
case c < 8, c == 9:
pre = strconv.Itoa(int(uint8(c) + eight))
case c < 16:
pre = strconv.Itoa(int(uint8(c) + sixteen - 8))
default:
pre = suffix + strconv.Itoa(int(c))
}
return escape.Sequence{Pre: pre, Post: ""}
}
// FG generates an escape Sequence which will set this color as the Foreground
func (c Color) FG() escape.Sequence {
return c.seq(fg, 30, 90)
}
// BG generates an escape Sequence which will set this color as the Background
func (c Color) BG() escape.Sequence {
return c.seq(bg, 40, 100)
}
|
fn create_new_xkb_state(conn: &Connection, xkb_context: &Context, xkb_device_id: i32) -> (Keymap, State) {
// Retrieve the XKB keymap for the specified device using the XKB context and connection
let keymap_str = xkb::x11::get_keymap(conn, xkb_device_id, xkb::x11::KEYMAP_COMPILE_NO_FLAGS);
let xkb_keymap = Keymap::new_from_string(xkb_context, &keymap_str, xkb::KEYMAP_FORMAT_TEXT_V1, xkb::KEYMAP_COMPILE_NO_FLAGS).expect("Failed to create keymap");
// Initialize the XKB state using the retrieved keymap and the specified device ID
let xkb_state = State::new(&xkb_keymap);
// Return a tuple containing the created XKB keymap and state
(xkb_keymap, xkb_state)
} |
/*
* Copyright 2018 Akashic Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.yggdrash.core.store.datasource;
import io.yggdrash.util.FileUtil;
import org.junit.AfterClass;
import org.junit.Test;
import java.nio.file.Paths;
import java.util.HashMap;
import java.util.Map;
import static io.yggdrash.TestUtils.randomBytes;
import static org.assertj.core.api.Assertions.assertThat;
public class LevelDbDataSourceTest {
private static final String dbPath = "testOutput";
@AfterClass
public static void destroy() {
FileUtil.recursiveDelete(Paths.get(dbPath));
}
@Test
public void shouldBeUpdateByBatch() {
LevelDbDataSource ds = new LevelDbDataSource(dbPath, "batch-test");
ds.init();
Map<byte[], byte[]> rows = new HashMap<>();
byte[] key = randomBytes(32);
byte[] value = randomBytes(32);
rows.put(key, value);
rows.put(randomBytes(32), randomBytes(32));
rows.put(randomBytes(32), randomBytes(32));
rows.put(randomBytes(32), randomBytes(32));
ds.updateByBatch(rows);
byte[] foundValue = ds.get(key);
assertThat(foundValue).isEqualTo(value);
}
@Test
public void shouldBeReset() {
LevelDbDataSource ds = new LevelDbDataSource(dbPath, "reset-test");
ds.init();
byte[] key = randomBytes(32);
byte[] value = putDummyRow(ds, key);
byte[] foundValue = ds.get(key);
assertThat(foundValue).isEqualTo(value);
ds.reset();
foundValue = ds.get(key);
assertThat(foundValue).isNull();
}
@Test
public void shouldPutSomeThing() {
LevelDbDataSource ds = new LevelDbDataSource(dbPath, "put-test");
ds.init();
byte[] key = randomBytes(32);
byte[] value = putDummyRow(ds, key);
byte[] foundValue = ds.get(key);
assertThat(foundValue).isEqualTo(value);
}
private byte[] putDummyRow(LevelDbDataSource ds, byte[] key) {
byte[] value = randomBytes(32);
ds.put(key, value);
return value;
}
@Test
public void shouldInitialize() {
String dbName = "initial-test";
LevelDbDataSource ds = new LevelDbDataSource(dbPath, dbName);
ds.init();
assertThat(ds).isNotNull();
assertThat(FileUtil.isExists(Paths.get(dbPath, dbName))).isTrue();
}
}
|
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Movie List</title>
<style>
.card{
width: 300px;
height:200px;
padding: 0.5em;
margin: 0.5em;
border: 2px solid #cccccc;
background-color: #cccccc;
font-family: sans-serif;
float: left;
}
.title{
font-size: 1.2em;
font-weight: bold;
padding-bottom: 0.2em;
}
</style>
</head>
<body>
<h1>Movies List</h1>
<div class="card">
<h2 class="title">Spider-Man Homecoming</h2>
<p><strong>Genre:</strong> Action/Adventure</p>
<p><strong>Director:</strong> Jon Watts</p>
<p><strong>Release Year:</strong> 2017</p>
</div>
<div class="card">
<h2 class="title">A Quiet Place</h2>
<p><strong>Genre:</strong> Drama/Thriller</p>
<p><strong>Director:</strong> John Krasinski</p>
<p><strong>Release Year:</strong> 2018</p>
</div>
<div class="card">
<h2 class="title">Black Panther</h2>
<p><strong>Genre:</strong> Action/Adventure</p>
<p><strong>Director:</strong> Ryan Coogler</p>
<p><strong>Release Year:</strong> 2018</p>
</div>
</body>
</html> |
<filename>springcloud-alibaba/shop-order/src/main/java/com/ylesb/service/fallback/ProductServiceFallbackFactory.java
package com.ylesb.service.fallback;
/**
* @title: ProductServiceFallbackFactory
* @projectName springcloud-alibaba
* @description: TODO
* @author White
* @site : [www.ylesb.com]
* @date 2022/1/1216:41
*/
import com.ylesb.domain.Product;
import com.ylesb.service.ProductService;
import feign.hystrix.FallbackFactory;
/**
* @className : ProductServiceFallbackFactory
* @description : [描述说明该类的功能]
* @author : [XuGuangchao]
* @site : [www.ylesb.com]
* @version : [v1.0]
* @createTime : [2022/1/12 16:41]
* @updateUser : [XuGuangchao]
* @updateTime : [2022/1/12 16:41]
* @updateRemark : [描述说明本次修改内容]
*/
//容错工厂类实现接口传递接口调用哪个接口产生的容错类
public class ProductServiceFallbackFactory implements FallbackFactory<ProductService> {
@Override
public ProductService create(Throwable throwable) {
//匿名内部类
return new ProductService() {
@Override
public Product findByPid(Integer pid) {
Product product = new Product();
product.setPid(pid);
product.setPname("出现异常进入容错逻辑");
return product;
}
};
}
}
|
describe('Add Content Tests', () => {
beforeEach(() => {
// give a logged in editor and the site root
cy.autologin();
cy.visit('/');
cy.waitForResourceToLoad('@navigation');
cy.waitForResourceToLoad('@breadcrumbs');
cy.waitForResourceToLoad('@actions');
cy.waitForResourceToLoad('@types');
cy.waitForResourceToLoad('');
});
it('As editor I can add a page', function () {
// when I add a page
cy.get('#toolbar-add').click();
cy.get('#toolbar-add-document').click();
cy.get('.documentFirstHeading > .public-DraftStyleDefault-block')
.type('My Page')
.get('.documentFirstHeading span[data-text]')
.contains('My Page');
// then I a new page has been created
cy.get('#toolbar-save').click();
cy.url().should('eq', Cypress.config().baseUrl + '/my-page');
cy.get('.navigation .item.active').should('have.text', 'My Page');
});
it('As editor I can add a page with a text block', function () {
// when I add a page with a text block
cy.get('#toolbar-add').click();
cy.get('#toolbar-add-document').click();
cy.get('.documentFirstHeading > .public-DraftStyleDefault-block')
.type('My Page')
.get('.documentFirstHeading span[data-text]')
.contains('My Page');
cy.get('.block.inner.text .public-DraftEditor-content')
.type('This is the text.')
.get('span[data-text]')
.contains('This is the text');
cy.get('#toolbar-save').click();
cy.url().should('eq', Cypress.config().baseUrl + '/my-page');
// then a new page with a text block has been added
cy.get('.navigation .item.active').should('have.text', 'My Page');
});
it('As editor I can add a file', function () {
// when I add a file
cy.get('#toolbar-add').click();
cy.get('#toolbar-add-file').click();
cy.get('input[name="title"]')
.type('My File')
.should('have.value', 'My File');
cy.get('input[id="field-file"]').attachFile('file.pdf', {
subjectType: 'input',
});
cy.get('#toolbar-save').focus().click();
// then a new file should have been created
cy.url().should('eq', Cypress.config().baseUrl + '/file.pdf');
cy.contains('My File');
});
it('As editor I can add an image', function () {
// when I add an image
cy.get('#toolbar-add').click();
cy.get('#toolbar-add-image').click();
cy.get('input[name="title"]')
.type('My image')
.should('have.value', 'My image');
cy.fixture('image.png', 'base64')
.then((fc) => {
return Cypress.Blob.base64StringToBlob(fc);
})
.then((fileContent) => {
cy.get('input#field-image').attachFile(
{ fileContent, fileName: 'image.png', mimeType: 'image/png' },
{ subjectType: 'input' },
);
cy.get('#field-image-image').parent().parent().contains('image.png');
});
cy.get('#toolbar-save').click();
cy.url().should('eq', Cypress.config().baseUrl + '/image.png');
cy.contains('My image');
});
it('As editor I can add a news item', function () {
// when I add a news item
cy.get('#toolbar-add').click();
cy.get('#toolbar-add-news-item').click();
cy.get('input[name="title"]')
.type('My News Item')
.should('have.value', 'My News Item');
cy.get('#toolbar-save').click();
// then a new news item should have been created
cy.url().should('eq', Cypress.config().baseUrl + '/my-news-item');
cy.get('.navigation .item.active').should('have.text', 'My News Item');
});
it('As editor I can add a folder', function () {
// when I add a folder
cy.get('#toolbar-add').click();
cy.get('#toolbar-add-folder').click();
cy.get('input[name="title"]')
.type('My Folder')
.should('have.value', 'My Folder');
cy.get('#toolbar-save').click();
// then a new folder should have been created
cy.url().should('eq', Cypress.config().baseUrl + '/my-folder');
cy.get('.navigation .item.active').should('have.text', 'My Folder');
});
});
|
def is_convex(points):
n = len(points)
# There must be at least 3 points
if n < 3 :
return False
# Store the first point to compare with rest
prev = points[0]
# Traverse the other points
for i in range(1, n):
curr = points[i]
# Finding cross product
cross_product = (prev[1] - curr[1]) * (prev[0] - points[i-2][0]) - (prev[0] - curr[0]) * (prev[1] - points[i-2][1])
# If it is not consistent sign
# for consecutive points then
# polygon is not convex
if cross_product < 0:
return False
prev = curr
return True |
export const traceSector = sector => (ctx, offset) => {
const { left, top } = sector.getBoundingBox(offset);
ctx.beginPath();
ctx.moveTo(left + sector.radius, top + sector.radius);
ctx.arc(
left + sector.radius,
top + sector.radius,
sector.radius - sector.borderWidth / 2,
(sector.startAngle ?? 0) - Math.PI / 2,
(sector.endAngle ?? Math.PI * 2) - Math.PI / 2,
sector.counterclockwise ?? false
);
ctx.closePath();
return true;
}
|
<reponame>RenukaGurumurthy/Gooru-Core-API
package org.ednovo.gooru.core.exception;
import java.io.Serializable;
public class ErrorObject implements Serializable {
/**
*
*/
private static final long serialVersionUID = 2324523124118900807L;
private int code;
private String status;
public ErrorObject(int code, String status) {
this.code = code;
this.status = status;
}
public int getCode() {
return code;
}
public void setCode(int code) {
this.code = code;
}
public String getStatus() {
return status;
}
public void setStatus(String status) {
this.status = status;
}
}
|
def add_numbers(x,y):
a = x
b = y
return a+b |
#!/usr/bin/env bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
#=================================================
# System Required: All
# Description: Python HTTP Server
# Version: 1.0.2
# Author: Toyo
#=================================================
sethttp(){
#设置端口
while true
do
echo -e "请输入要开放的HTTP服务端口 [1-65535]"
read -e -p "(默认端口: 8000):" httpport
[[ -z "$httpport" ]] && httpport="8000"
expr ${httpport} + 0 &>/dev/null
if [[ $? -eq 0 ]]; then
if [[ ${httpport} -ge 1 ]] && [[ ${httpport} -le 65535 ]]; then
echo
echo -e " 端口 : \033[41;37m ${httpport} \033[0m"
echo
break
else
echo "输入错误, 请输入正确的端口。"
fi
else
echo "输入错误, 请输入正确的端口。"
fi
done
#设置目录
echo "请输入要开放的目录(绝对路径)"
read -e -p "(直接回车, 默认当前文件夹):" httpfile
if [[ ! -z $httpfile ]]; then
[[ ! -e $httpfile ]] && echo -e "\033[41;37m [错误] \033[0m 输入的目录不存在 或 当前用户无权限访问, 请检查!" && exit 1
else
httpfile=`echo $PWD`
fi
#最后确认
echo
echo "========================"
echo " 请检查配置是否正确 !"
echo
echo -e " 端口 : \033[41;37m ${httpport} \033[0m"
echo -e " 目录 : \033[41;37m ${httpfile} \033[0m"
echo "========================"
echo
read -e -p "按任意键继续,如有错误,请使用 Ctrl + C 退出." var
}
iptables_add(){
iptables -I INPUT -m state --state NEW -m tcp -p tcp --dport ${httpport} -j ACCEPT
iptables -I INPUT -m state --state NEW -m udp -p udp --dport ${httpport} -j ACCEPT
}
iptables_del(){
iptables -D INPUT -m state --state NEW -m tcp -p tcp --dport ${port} -j ACCEPT
iptables -D INPUT -m state --state NEW -m udp -p udp --dport ${port} -j ACCEPT
}
starthttp(){
PID=`ps -ef | grep SimpleHTTPServer | grep -v grep | awk '{print $2}'`
[[ ! -z $PID ]] && echo -e "\033[41;37m [错误] \033[0m SimpleHTTPServer 正着运行,请检查 !" && exit 1
sethttp
iptables_add
cd ${httpfile}
nohup python -m SimpleHTTPServer $httpport >> httpserver.log 2>&1 &
sleep 2s
PID=`ps -ef | grep SimpleHTTPServer | grep -v grep | awk '{print $2}'`
if [[ -z $PID ]]; then
echo -e "\033[41;37m [错误] \033[0m SimpleHTTPServer 启动失败 !" && exit 1
else
ip=`curl -m 10 -s http://members.3322.org/dyndns/getip`
[[ -z "$ip" ]] && ip="VPS_IP"
echo
echo "HTTP服务 已启动 !"
echo -e "浏览器访问,地址: \033[41;37m http://${ip}:${httpport} \033[0m "
echo
fi
}
stophttp(){
PID=`ps -ef | grep SimpleHTTPServer | grep -v grep | awk '{print $2}'`
[[ -z $PID ]] && echo -e "\033[41;37m [错误] \033[0m 没有发现 SimpleHTTPServer 进程运行,请检查 !" && exit 1
port=`netstat -lntp | grep ${PID} | awk '{print $4}' | awk -F ":" '{print $2}'`
iptables_del
kill -9 ${PID}
sleep 2s
PID=`ps -ef | grep SimpleHTTPServer | grep -v grep | awk '{print $2}'`
if [[ ! -z $PID ]]; then
echo -e "\033[41;37m [错误] \033[0m SimpleHTTPServer 停止失败 !" && exit 1
else
echo
echo "HTTP服务 已停止 !"
echo
fi
}
action=$1
[[ -z $1 ]] && action=start
case "$action" in
start|stop)
${action}http
;;
*)
echo "输入错误 !"
echo "用法: {start|stop}"
;;
esac
|
<filename>UnSynGAN/utils/patches.py
"""
This software is governed by the CeCILL-B license under French law and
abiding by the rules of distribution of free software. You can use,
modify and/ or redistribute the software under the terms of the CeCILL-B
license as circulated by CEA, CNRS and INRIA at the following URL
"http://www.cecill.info".
As a counterpart to the access to the source code and rights to copy,
modify and redistribute granted by the license, users are provided only
with a limited warranty and the software's author, the holder of the
economic rights, and the successive licensors have only limited
liability.
In this respect, the user's attention is drawn to the risks associated
with loading, using, modifying and/or developing or reproducing the
software by the user in light of its specific status of free software,
that may mean that it is complicated to manipulate, and that also
therefore means that it is reserved for developers and experienced
professionals having in-depth computer knowledge. Users are therefore
encouraged to load and test the software's suitability as regards their
requirements in conditions enabling the security of their systems and/or
data to be ensured and, more generally, to use and operate it in the
same conditions as regards security.
The fact that you are presently reading this means that you have had
knowledge of the CeCILL-B license and that you accept its terms.
"""
import numpy as np
from itertools import product
from sklearn.feature_extraction.image import extract_patches
def array_to_patches(arr, patch_shape=(3,3,3), extraction_step=1, normalization=False):
#Make use of skleanr function extract_patches
#https://github.com/scikit-learn/scikit-learn/blob/51a765a/sklearn/feature_extraction/image.py
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content.
Parameters
----------
arr : 3darray
3-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
patches = extract_patches(arr, patch_shape, extraction_step)
patches = patches.reshape(-1, patch_shape[0],patch_shape[1],patch_shape[2])
# patches = patches.reshape(patches.shape[0], -1)
if normalization==True:
patches -= np.mean(patches, axis=0)
patches /= np.std(patches, axis=0)
print('%.2d patches have been extracted' % patches.shape[0]) ,
return patches
def patches_to_array(patches, array_shape, patch_shape=(3,3,3) ):
#Adapted from 2D reconstruction from sklearn
#https://github.com/scikit-learn/scikit-learn/blob/51a765a/sklearn/feature_extraction/image.py
# SyntaxError: non-default argument follows default argument : exchange "array_shape" and "patch_shape"
patches = patches.reshape(len(patches),*patch_shape)
i_x, i_y, i_z = array_shape
p_x, p_y, p_z = patch_shape
array = np.zeros(array_shape)
# compute the dimensions of the patches array
n_x = i_x - p_x + 1
n_y = i_y - p_y + 1
n_z = i_z - p_z + 1
for p, (i, j, k) in zip(patches, product(range(n_x), range(n_y), range(n_z))):
array[i:i + p_x, j:j + p_y, k:k + p_z] += p
for (i, j, k) in product(range(i_x), range(i_y), range(i_z)):
array[i, j, k] /= float(min(i + 1, p_x, i_x - i) * min(j + 1, p_y, i_y - j) * min(k + 1, p_z, i_z - k))
return array
|
# We want /usr/local/bin before /usr/bin
PATH="/usr/local/bin:$PATH"
# Xcode & Developer tools.
#
# https://developer.apple.com/technologies/tools/
# Tools like gcc or make are under this directory. These tools are provided
# by Apple once Xcode is installed.
#
PATH="/Applications/Xcode.app/Contents/Developer/usr/bin:$PATH"
# Ruby.
#
# https://www.ruby-lang.org
# Configures PATH for ruby interpreter.
#
PATH="$HOME/.rvm/bin:$PATH" # Add RVM to PATH for scripting
# MacPorts
#
# https://guide.macports.org/chunked/installing.shell.html
# Configures PATH to allow MacPorts in the shell.
#
#PATH="/opt/local/bin:/opt/local/sbin:$PATH"
# Torch
#
#
PATH="$HOME/torch/install/bin:$PATH" |
import Phaser from 'phaser'
export default class ExportJson extends Phaser.Scene
{
preload()
{
this.load.atlas('gems','/assets/tests/columns/gems.png','/assets/tests/columns/gems.json')
}
create()
{
this.anims.create({ key: 'diamond', frames: this.anims.generateFrameNames('gems', { prefix: 'diamond_', end: 15, zeroPad: 4 }), repeat: -1 })
this.anims.create({ key: 'prism', frames: this.anims.generateFrameNames('gems', { prefix: 'prism_', end: 6, zeroPad: 4 }), repeat: -1 })
this.anims.create({ key: 'ruby', frames: this.anims.generateFrameNames('gems', { prefix: 'ruby_', end: 6, zeroPad: 4 }), repeat: -1 })
this.anims.create({ key: 'square', frames: this.anims.generateFrameNames('gems', { prefix: 'square_', end: 14, zeroPad: 4 }), repeat: -1 })
this.add.sprite(400, 100, 'gems').play('diamond')
this.add.sprite(400, 200, 'gems').play('prism')
this.add.sprite(400, 300, 'gems').play('ruby')
this.add.sprite(400, 400, 'gems').play('square')
this.add.text(this.scale.width * 0.5, this.scale.height * 0.9, 'Open Console to See Output')
.setOrigin(0.5)
// Get a JSON representation of a single animation, or all animations:
// You can extract the animation:
const ruby = this.anims.get('ruby')
// Then pass it to JSON.stringify
console.log(JSON.stringify(ruby))
// Or call toJSON directly (this returns an Object)
console.log(ruby.toJSON())
// You can also call 'this.anims.toJSON' and pass it the key of the animation you want:
console.log(JSON.stringify(this.anims.toJSON('ruby')))
// Or dump out ALL animations in the Animation Manager:
console.log(JSON.stringify(this.anims))
}
}
|
const {EventEmitter} = require('events')
const event = new EventEmitter()
event.on('saySomething', (name) => {
console.log(`Eu ouvi você ${name}`)
})
event.emit('saySomething', "Christian")
event.emit('saySomething', "Thayná") |
<reponame>OotinnyoO1/N64Wasm
var ROMLIST = [
/*
{url:"roms/baserom.us.z64",title:"Game 1"},
*/
]; |
#!/bin/bash
while read p; do
OBJECT=$p
triple=$(grep "$p" yagoLabels.ttl | grep rdfs:label)
echo $triple >> label-triples.txt
label=$(echo $triple | cut -d '"' -f 2) # | sed 's/@.*$//' | sed 's/^.//' | sed 's/.$//')
echo $label >> labels.txt
done < objects-no-namespace-10.txt
|
#!/bin/bash
set -e
echo "Starting deploy"
# Building the library
npm run test
npm run build
CURRENTDIR=~/blindfold2
SERVEDIR=~/serve_content/blindfold2
cd $CURRENTDIR
rm -rf $SERVEDIR
mkdir $SERVEDIR
cp -r $CURRENTDIR/serve_content/prod $SERVEDIR
cp -r $CURRENTDIR/serve_content/shared $SERVEDIR
cp -r $CURRENTDIR/serve_content/index_prod.html $SERVEDIR/index.html
echo "Completed deploy"
|
<reponame>newonexd/jDesign<gh_stars>1-10
package behavior.observer;
public abstract class Observer {
protected Subject subject;
public abstract void update();
}
class Observer1 extends Observer{
private String name;
public Observer1(String name,Subject subject){
this.name = name;
this.subject = subject;
this.subject.attach(this);
}
@Override
public void update() {
System.out.println(this.name+"收到消息:" +this.subject.getMessage());
}
} |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.reactive.streams.api;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.camel.CamelContext;
import org.apache.camel.spi.FactoryFinder;
import org.apache.camel.util.ObjectHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This is the main entry-point for getting Camel streams associate to reactive-streams endpoints.
*
* It allows to retrieve the {@link CamelReactiveStreamsService} to access Camel streams.
* This class returns the default implementation of the service unless the client requests a named service,
*/
public final class CamelReactiveStreams {
private static final Logger LOG = LoggerFactory.getLogger(CamelReactiveStreams.class);
private static Map<CamelContext, String> serviceNames = new ConcurrentHashMap<>();
private CamelReactiveStreams() {
}
public static CamelReactiveStreamsService get(CamelContext context) {
return get(context, null);
}
public static CamelReactiveStreamsService get(CamelContext context, String serviceName) {
if (serviceName != null && serviceName.trim().length() == 0) {
throw new IllegalArgumentException("the service name cannot be an empty String");
}
String lookupName = serviceName != null ? serviceName : "";
serviceNames.computeIfAbsent(context, ctx -> {
CamelReactiveStreamsService service = context.hasService(CamelReactiveStreamsService.class);
if (service == null) {
service = resolveReactiveStreamsService(context, serviceName);
try {
context.addService(service, true, true);
} catch (Exception ex) {
throw new IllegalStateException("Cannot add the CamelReactiveStreamsService to the Camel context", ex);
}
}
return lookupName;
});
if (!ObjectHelper.equal(serviceNames.get(context), lookupName)) {
// only a single implementation of the CamelReactiveStreamService can be present per Camel context
throw new IllegalArgumentException("Cannot use two different implementations of CamelReactiveStreamsService in the same CamelContext: "
+ "existing service name [" + serviceNames.get(context) + "] - requested [" + lookupName + "]");
}
return context.hasService(CamelReactiveStreamsService.class);
}
private static CamelReactiveStreamsService resolveReactiveStreamsService(CamelContext context, String serviceName) {
CamelReactiveStreamsService service = null;
if (serviceName != null) {
// lookup in the registry
service = context.getRegistry().lookupByNameAndType(serviceName, CamelReactiveStreamsService.class);
if (service == null) {
service = resolveServiceUsingFactory(context, serviceName);
}
} else {
Set<CamelReactiveStreamsService> set = context.getRegistry().findByType(CamelReactiveStreamsService.class);
if (set.size() == 1) {
service = set.iterator().next();
}
if (service == null) {
LOG.info("Using default reactive stream service");
service = resolveServiceUsingFactory(context, null);
}
}
return service;
}
@SuppressWarnings("unchecked")
private static CamelReactiveStreamsService resolveServiceUsingFactory(CamelContext context, String name) {
if (name == null) {
name = "default-service";
}
String path = "META-INF/services/org/apache/camel/reactive-streams/";
Class<? extends CamelReactiveStreamsService> serviceClass = null;
try {
FactoryFinder finder = context.getFactoryFinder(path);
LOG.trace("Using FactoryFinder: {}", finder);
serviceClass = (Class<? extends CamelReactiveStreamsService>) finder.findClass(name);
return serviceClass.newInstance();
} catch (ClassNotFoundException e) {
throw new IllegalStateException("Class referenced in '" + path + name + "' not found", e);
} catch (Exception e) {
throw new IllegalStateException("Unable to create the reactive stream service defined in '" + path + name + "'", e);
}
}
}
|
<filename>console/src/boost_1_78_0/libs/lexical_cast/test/typedefed_wchar_test.cpp
// Unit test for boost::lexical_cast.
//
// See http://www.boost.org for most recent version, including documentation.
//
// Copyright <NAME>, 2011-2021.
//
// Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt).
#include <boost/config.hpp>
#include <boost/static_assert.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/date_time/gregorian/gregorian.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
void parseDate()
{
std::locale locale;
boost::date_time::format_date_parser<boost::gregorian::date, wchar_t> parser(L"", locale);
boost::date_time::special_values_parser<boost::gregorian::date, wchar_t> svp;
boost::gregorian::date date = parser.parse_date(L"", L"", svp);
(void)date;
}
int main()
{
parseDate();
return ::boost::lexical_cast<int>(L"1000") == 1000;
}
|
<reponame>arthurflor23/spelling-correction
"""Dataset reader and process"""
import os
import html
import string
import numpy as np
import xml.etree.ElementTree as ET
from glob import glob
from data import preproc as pp
class Dataset():
def __init__(self, source):
self.source = os.path.splitext(source)[0]
self.partitions = ['train', 'valid', 'test']
self.dataset = dict()
self.size = {'total': 0}
for pt in self.partitions:
self.size[pt] = 0
def read_lines(self, maxlen):
"""Read sentences from dataset and preprocess"""
name = os.path.basename(self.source)
print(f"The {name} dataset will be transformed...")
dataset = getattr(self, f"_{name}")()
if not isinstance(dataset, list):
dataset = dataset['train'] + dataset['valid'] + dataset['test']
dataset = [y for x in dataset for y in pp.generate_multigrams(x)]
dataset = [y for x in dataset for y in pp.split_by_max_length(x, maxlen)]
dataset = [pp.text_standardize(x) for x in dataset]
dataset = [x for x in dataset if self.check_text(x)]
dataset = list(set(dataset))
np.random.shuffle(dataset)
index = int(len(dataset) * 0.1)
self.dataset['train'] = dataset[index:]
self.dataset['valid'] = dataset[:index]
self.dataset['test'] = dataset[:32] # just a sample
del dataset
for pt in self.partitions:
self.size[pt] = len(self.dataset[pt])
self.size['total'] += self.size[pt]
def _bea2019(self):
"""BEA2019 dataset reader"""
basedir = os.path.join(self.source, "m2")
m2_list = next(os.walk(basedir))[2]
lines = []
for m2_file in m2_list:
lines.extend(read_from_m2(os.path.join(basedir, m2_file)))
return lines
def _bentham(self):
"""Bentham dataset reader"""
source = os.path.join(self.source, "BenthamDatasetR0-GT")
pt_path = os.path.join(source, "Partitions")
paths = {"train": open(os.path.join(pt_path, "TrainLines.lst")).read().splitlines(),
"valid": open(os.path.join(pt_path, "ValidationLines.lst")).read().splitlines(),
"test": open(os.path.join(pt_path, "TestLines.lst")).read().splitlines()}
transcriptions = os.path.join(source, "Transcriptions")
gt = os.listdir(transcriptions)
gt_dict, dataset = dict(), dict()
for index, x in enumerate(gt):
text = " ".join(open(os.path.join(transcriptions, x)).read().splitlines())
text = html.unescape(text).replace("<gap/>", "")
gt_dict[os.path.splitext(x)[0]] = " ".join(text.split())
for i in self.partitions:
dataset[i] = [gt_dict[x] for x in paths[i]]
return dataset
def _conll13(self):
"""CONLL13 dataset reader"""
m2_file = os.path.join(self.source, "revised", "data", "official-preprocessed.m2")
return read_from_m2(m2_file)
def _conll14(self):
"""CONLL14 dataset reader"""
m2_file = os.path.join(self.source, "alt", "official-2014.combined-withalt.m2")
return read_from_m2(m2_file)
def _google(self):
"""
Google 1-Billion dataset reader.
In this project, the google dataset only get 1M data from English and French partitions.
"""
basedir = os.path.join(self.source)
m2_list = next(os.walk(basedir))[2]
lines_en, lines_fr = [], []
for m2_file in m2_list:
if "2010" in m2_file and ".en" in m2_file:
with open(os.path.join(basedir, m2_file)) as f:
lines_en = list(set([line for line in f]))[::-1]
elif "2009" in m2_file and ".fr" in m2_file:
with open(os.path.join(basedir, m2_file)) as f:
lines_fr = list(set([line for line in f]))[::-1]
# English and french will be 1% samples.
lines_en = lines_en[:int(len(lines_en) * 0.01)]
lines_fr = lines_fr[:int(len(lines_fr) * 0.01)]
return (lines_en + lines_fr)
def _iam(self):
"""IAM dataset reader"""
pt_path = os.path.join(self.source, "largeWriterIndependentTextLineRecognitionTask")
paths = {"train": open(os.path.join(pt_path, "trainset.txt")).read().splitlines(),
"valid": open(os.path.join(pt_path, "validationset1.txt")).read().splitlines(),
"test": open(os.path.join(pt_path, "testset.txt")).read().splitlines()}
lines = open(os.path.join(self.source, "ascii", "lines.txt")).read().splitlines()
gt_dict, dataset = dict(), dict()
for line in lines:
if (not line or line[0] == "#"):
continue
split = line.split()
if split[1] == "ok":
gt_dict[split[0]] = " ".join(split[8::]).replace("|", " ")
for i in self.partitions:
dataset[i] = [gt_dict[x] for x in paths[i] if x in gt_dict.keys()]
return dataset
def _rimes(self):
"""Rimes dataset reader"""
def generate(xml, paths, validation=False):
xml = ET.parse(os.path.join(self.source, xml)).getroot()
dt = []
for page_tag in xml:
for i, line_tag in enumerate(page_tag.iter("Line")):
text = html.unescape(line_tag.attrib['Value'])
dt.append(" ".join(text.split()))
if validation:
index = int(len(dt) * 0.9)
paths['valid'] = dt[index:]
paths['train'] = dt[:index]
else:
paths['test'] = dt
dataset, paths = dict(), dict()
generate("training_2011.xml", paths, validation=True)
generate("eval_2011_annotated.xml", paths, validation=False)
for i in self.partitions:
dataset[i] = [x for x in paths[i]]
return dataset
def _saintgall(self):
"""Saint Gall dataset reader"""
pt_path = os.path.join(self.source, "sets")
paths = {"train": open(os.path.join(pt_path, "train.txt")).read().splitlines(),
"valid": open(os.path.join(pt_path, "valid.txt")).read().splitlines(),
"test": open(os.path.join(pt_path, "test.txt")).read().splitlines()}
lines = open(os.path.join(self.source, "ground_truth", "transcription.txt")).read().splitlines()
gt_dict = dict()
for line in lines:
split = line.split()
split[1] = split[1].replace("-", "").replace("|", " ")
gt_dict[split[0]] = split[1]
img_path = os.path.join(self.source, "data", "line_images_normalized")
dataset = dict()
for i in self.partitions:
dataset[i] = []
for line in paths[i]:
glob_filter = os.path.join(img_path, f"{line}*")
img_list = [x for x in glob(glob_filter, recursive=True)]
for line in img_list:
line = os.path.splitext(os.path.basename(line))[0]
dataset[i].append(gt_dict[line])
return dataset
def _washington(self):
"""Washington dataset reader"""
pt_path = os.path.join(self.source, "sets", "cv1")
paths = {"train": open(os.path.join(pt_path, "train.txt")).read().splitlines(),
"valid": open(os.path.join(pt_path, "valid.txt")).read().splitlines(),
"test": open(os.path.join(pt_path, "test.txt")).read().splitlines()}
lines = open(os.path.join(self.source, "ground_truth", "transcription.txt")).read().splitlines()
gt_dict, dataset = dict(), dict()
for line in lines:
split = line.split()
split[1] = split[1].replace("-", "").replace("|", " ")
split[1] = split[1].replace("s_pt", ".").replace("s_cm", ",")
split[1] = split[1].replace("s_mi", "-").replace("s_qo", ":")
split[1] = split[1].replace("s_sq", ";").replace("s_et", "V")
split[1] = split[1].replace("s_bl", "(").replace("s_br", ")")
split[1] = split[1].replace("s_qt", "'").replace("s_GW", "G.W.")
split[1] = split[1].replace("s_", "")
gt_dict[split[0]] = split[1]
for i in self.partitions:
dataset[i] = [gt_dict[i] for i in paths[i]]
return dataset
@staticmethod
def check_text(text):
"""Checks if the text has more characters instead of punctuation marks"""
strip_punc = text.strip(string.punctuation).strip()
no_punc = text.translate(str.maketrans("", "", string.punctuation)).strip()
if len(text) <= 1 or len(strip_punc) <= 1 or len(no_punc) <= 1:
return False
return True
def read_from_txt(file_name):
"""Read the M2 file and return labels and sentences (ground truth and data)"""
train = {"dt": [], "gt": []}
valid = {"dt": [], "gt": []}
test = {"dt": [], "gt": []}
with open(file_name, "r", encoding="utf-8") as f:
lines = f.read().split("\n")
for item in lines:
arr = item.split()
if len(arr) == 0:
continue
x = " ".join(arr[1::])
if arr[0] == "TR_L":
train['gt'].append(x)
train['dt'].append("")
elif arr[0] == "TR_P":
train['dt'][-1] = x
if arr[0] == "VA_L":
valid['gt'].append(x)
valid['dt'].append("")
elif arr[0] == "VA_P":
valid['dt'][-1] = x
if arr[0] == "TE_L":
test['gt'].append(x)
test['dt'].append("")
elif arr[0] == "TE_P":
test['dt'][-1] = x
dt = {"train": train, "valid": valid, "test": test}
return dt
def read_from_m2(file_name):
"""
Read the M2 file and return the sentences with the corrections.
Tool to apply text error correction annotations in m2 format, available:
URL: https://github.com/samueljamesbell/m2-correct
"""
with open(file_name, "r", encoding="utf-8") as f:
lines = f.read().split("\n")
sentences, corrections = parse(lines)
corrected = []
for s, c in zip(sentences, corrections):
coor = apply_corrections(s, c[0])
if len(coor) > 0:
corrected.append(coor)
return corrected
def apply_corrections(sentence, corrections):
"""
Return a new sentence with corrections applied.
Sentence should be a whitespace-separated tokenised string. Corrections
should be a list of corrections.
"""
tokens = sentence.split()
offset = 0
for c in corrections:
tokens, offset = _apply_correction(tokens, c, offset)
return " ".join(tokens)
def _apply_correction(tokens, correction, offset):
"""Apply a single correction to a list of tokens"""
start_token_offset, end_token_offset, _, insertion = correction
to_insert = insertion[0].split(" ")
end_token_offset += (len(to_insert) - 1)
to_insert_filtered = [t for t in to_insert if t != ""]
head = tokens[:start_token_offset + offset]
tail = tokens[end_token_offset + offset:]
new_tokens = head + to_insert_filtered + tail
new_offset = len(to_insert_filtered) - (end_token_offset - start_token_offset) + offset
return new_tokens, new_offset
"""
The `parse` and `paragraphs` functions are modifications
of code from the NUS M2 scorer (GNU GPL v2.0 license), available here:
https://github.com/nusnlp/m2scorer/
Below is the original preamble:
This file is part of the NUS M2 scorer.
The NUS M2 scorer is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The NUS M2 scorer is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
def parse(lines):
source_sentences = []
gold_edits = []
for item in paragraphs(lines):
sentence = [line[2:].strip() for line in item if line.startswith("S ")]
assert sentence != []
annotations = {}
for line in item[1:]:
if line.startswith("I ") or line.startswith("S "):
continue
assert line.startswith("A ")
line = line[2:]
fields = line.split("|||")
start_offset = int(fields[0].split()[0])
end_offset = int(fields[0].split()[1])
etype = fields[1]
if etype == "noop":
start_offset = -1
end_offset = -1
corrections = [c.strip() if c != "-NONE-" else "" for c in fields[2].split("||")]
original = " ".join(" ".join(sentence).split()[start_offset:end_offset])
annotator = int(fields[5])
if annotator not in annotations.keys():
annotations[annotator] = []
annotations[annotator].append((start_offset, end_offset, original, corrections))
tok_offset = 0
for this_sentence in sentence:
tok_offset += len(this_sentence.split())
source_sentences.append(this_sentence)
this_edits = {}
for annotator, annotation in annotations.items():
this_edits[annotator] = [edit for edit in annotation if edit[0] <=
tok_offset and edit[1] <= tok_offset and edit[0] >= 0 and edit[1] >= 0]
if len(this_edits) == 0:
this_edits[0] = []
gold_edits.append(this_edits)
return (source_sentences, gold_edits)
def paragraphs(lines):
paragraph = []
for line in lines:
if line == "":
if paragraph:
yield paragraph
paragraph = []
else:
paragraph.append(line)
|
<filename>sphinx-sources/Examples/Commands/plotresults.py
from LightPipes import *
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
GridSize = 10*mm
GridDimension = 128
lambda_ = 1000*nm #lambda_ is used because lambda is a Python build-in function.
R=2.5*mm
xs=0*mm; ys=0*mm
T=0.8
Field = Begin(GridSize, lambda_, GridDimension)
Field=GaussAperture(R,xs,ys,T,Field)
NewGridDimension=int(GridDimension/4)
Field=Interpol(GridSize,NewGridDimension,0,0,0,1,Field)
I=Intensity(0,Field)
I=np.array(I)
#plot cross section:
x=[]
for i in range(NewGridDimension):
x.append((-GridSize/2+i*GridSize/NewGridDimension)/mm)
plt.plot(x,I[int(NewGridDimension/2)])
plt.xlabel('x [mm]');plt.ylabel('Intensity [a.u.]')
plt.show()
#3d plot:
X=range(NewGridDimension)
Y=range(NewGridDimension)
X, Y=np.meshgrid(X,Y)
fig=plt.figure(figsize=(10,6))
ax = plt.axes(projection='3d')
ax.plot_surface(X, Y,I,
rstride=1,
cstride=1,
cmap='rainbow',
linewidth=0.0,
)
ax.set_zlabel('Intensity [a.u.]')
plt.show()
|
var searchData=
[
['ecc_5fdisabled',['ECC_DISABLED',['../core__ca_8h.html#ga06d94c0eaa22d713636acaff81485409a48ce2ec8ec49f0167a7d571081a9301f',1,'core_ca.h']]],
['ecc_5fenabled',['ECC_ENABLED',['../core__ca_8h.html#ga06d94c0eaa22d713636acaff81485409af0e84d9540ed9d79f01caad9841d414d',1,'core_ca.h']]],
['ethernet_5firqn',['Ethernet_IRQn',['../ARMCA9_8h.html#a7e1129cd8a196f4284d41db3e82ad5c8a58692bf577b8a17ec79fc8472d56ff05',1,'ARMCA9.h']]],
['execute',['EXECUTE',['../core__ca_8h.html#ga2fe1157deda82e66b9a1b19772309b63a887d2cbfd9131de5cc3745731421b34b',1,'core_ca.h']]]
];
|
def is_subset(string1, string2):
result = False
for i in range(len(string1)):
if string1[i].issubset(string2):
result = True
return result |
#!/bin/bash
BASEDIR=$(dirname -- "$(readlink -f -- "${BASH_SOURCE}")")
mkdir -p $BASEDIR/logs
log_file=$(date "+%Y_%m_%d-%H_%M_%S")
echo '[*] Backend has been deployed.'
$BASEDIR/env/bin/python $BASEDIR/manage.py runserver 2>&1 | tee $BASEDIR/logs/$log_file.log
|
<reponame>Bernardinhouessou/Projets_Autres
/* Copyright (c) 2001-2014, The HSQL Development Group
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the HSQL Development Group nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL HSQL DEVELOPMENT GROUP, HSQLDB.ORG,
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.hsqldb;
import org.hsqldb.error.Error;
import org.hsqldb.error.ErrorCode;
import org.hsqldb.persist.CachedObject;
import org.hsqldb.persist.PersistentStore;
/**
* Manages rows involved in transactions
*
* @author <NAME> (<EMAIL>)
* @version 2.3.0
* @since 2.0.0
*/
public class TransactionManager2PL extends TransactionManagerCommon
implements TransactionManager {
public TransactionManager2PL(Database db) {
database = db;
lobSession = database.sessionManager.getSysLobSession();
txModel = LOCKS;
}
public long getGlobalChangeTimestamp() {
return globalChangeTimestamp.get();
}
public boolean isMVRows() {
return false;
}
public boolean isMVCC() {
return false;
}
public int getTransactionControl() {
return LOCKS;
}
public void setTransactionControl(Session session, int mode) {
super.setTransactionControl(session, mode);
}
public void completeActions(Session session) {
endActionTPL(session);
}
public boolean prepareCommitActions(Session session) {
session.actionTimestamp = getNextGlobalChangeTimestamp();
return true;
}
public boolean commitTransaction(Session session) {
if (session.abortTransaction) {
return false;
}
writeLock.lock();
try {
int limit = session.rowActionList.size();
// new actionTimestamp used for commitTimestamp
session.actionTimestamp = getNextGlobalChangeTimestamp();
session.transactionEndTimestamp = session.actionTimestamp;
endTransaction(session);
for (int i = 0; i < limit; i++) {
RowAction action = (RowAction) session.rowActionList.get(i);
action.commit(session);
}
adjustLobUsage(session);
persistCommit(session);
endTransactionTPL(session);
} finally {
writeLock.unlock();
}
session.tempSet.clear();
return true;
}
public void rollback(Session session) {
session.abortTransaction = false;
session.actionTimestamp = getNextGlobalChangeTimestamp();
session.transactionEndTimestamp = session.actionTimestamp;
rollbackPartial(session, 0, session.transactionTimestamp);
endTransaction(session);
writeLock.lock();
try {
endTransactionTPL(session);
} finally {
writeLock.unlock();
}
}
public void rollbackSavepoint(Session session, int index) {
long timestamp = session.sessionContext.savepointTimestamps.get(index);
Integer oi = (Integer) session.sessionContext.savepoints.get(index);
int start = oi.intValue();
while (session.sessionContext.savepoints.size() > index + 1) {
session.sessionContext.savepoints.remove(
session.sessionContext.savepoints.size() - 1);
session.sessionContext.savepointTimestamps.removeLast();
}
rollbackPartial(session, start, timestamp);
}
public void rollbackAction(Session session) {
rollbackPartial(session, session.actionIndex,
session.actionStartTimestamp);
endActionTPL(session);
}
/**
* rollback the row actions from start index in list and
* the given timestamp
*/
public void rollbackPartial(Session session, int start, long timestamp) {
int limit = session.rowActionList.size();
if (start == limit) {
return;
}
for (int i = limit - 1; i >= start; i--) {
RowAction action = (RowAction) session.rowActionList.get(i);
if (action == null || action.type == RowActionBase.ACTION_NONE
|| action.type == RowActionBase.ACTION_DELETE_FINAL) {
continue;
}
Row row = action.memoryRow;
if (row == null) {
row = (Row) action.store.get(action.getPos(), false);
}
if (row == null) {
continue;
}
action.rollback(session, timestamp);
int type = action.mergeRollback(session, timestamp, row);
action.store.rollbackRow(session, row, type, txModel);
}
session.rowActionList.setSize(start);
}
public RowAction addDeleteAction(Session session, Table table,
PersistentStore store, Row row,
int[] colMap) {
RowAction action;
synchronized (row) {
action = RowAction.addDeleteAction(session, table, row, colMap);
}
session.rowActionList.add(action);
store.delete(session, row);
row.rowAction = null;
return action;
}
public void addInsertAction(Session session, Table table,
PersistentStore store, Row row,
int[] changedColumns) {
RowAction action = row.rowAction;
if (action == null) {
/*
System.out.println("null insert action " + session + " "
+ session.actionTimestamp);
*/
throw Error.runtimeError(ErrorCode.GENERAL_ERROR,
"null insert action ");
}
store.indexRow(session, row);
session.rowActionList.add(action);
row.rowAction = null;
}
// functional unit - accessibility of rows
public boolean canRead(Session session, PersistentStore store, Row row,
int mode, int[] colMap) {
return true;
}
public boolean canRead(Session session, PersistentStore store, long id,
int mode) {
return true;
}
public void addTransactionInfo(CachedObject object) {}
/**
* add transaction info to a row just loaded from the cache. called only
* for CACHED tables
*/
public void setTransactionInfo(PersistentStore store,
CachedObject object) {}
public void removeTransactionInfo(CachedObject object) {}
public void beginTransaction(Session session) {
if (!session.isTransaction) {
session.actionTimestamp = getNextGlobalChangeTimestamp();
session.transactionTimestamp = session.actionTimestamp;
session.isTransaction = true;
transactionCount++;
}
}
/**
* add session to the end of queue when a transaction starts
* (depending on isolation mode)
*/
public void beginAction(Session session, Statement cs) {
if (session.hasLocks(cs)) {
return;
}
writeLock.lock();
try {
if (cs.getCompileTimestamp()
< database.schemaManager.getSchemaChangeTimestamp()) {
cs = session.statementManager.getStatement(session, cs);
session.sessionContext.currentStatement = cs;
if (cs == null) {
return;
}
}
boolean canProceed = setWaitedSessionsTPL(session, cs);
if (canProceed) {
if (session.tempSet.isEmpty()) {
lockTablesTPL(session, cs);
// we don't set other sessions that would now be waiting for this one too
// next lock release will do it
} else {
setWaitingSessionTPL(session);
}
}
} finally {
writeLock.unlock();
}
}
public void beginActionResume(Session session) {
session.actionTimestamp = getNextGlobalChangeTimestamp();
session.actionStartTimestamp = session.actionTimestamp;
if (!session.isTransaction) {
session.transactionTimestamp = session.actionTimestamp;
session.isTransaction = true;
transactionCount++;
}
}
public void removeTransactionInfo(long id) {}
public void resetSession(Session session, Session targetSession,
int mode) {
super.resetSession(session, targetSession, mode);
}
void endTransaction(Session session) {
if (session.isTransaction) {
session.isTransaction = false;
transactionCount--;
}
}
}
|
#!/bin/bash
# Module specific variables go here
# Files: file=/path/to/file
# Arrays: declare -a array_name
# Strings: foo="bar"
# Integers: x=9
###############################################
# Bootstrapping environment setup
###############################################
# Get our working directory
cwd="$(pwd)"
# Define our bootstrapper location
bootstrap="${cwd}/tools/bootstrap.sh"
# Bail if it cannot be found
if [ ! -f ${bootstrap} ]; then
echo "Unable to locate bootstrap; ${bootstrap}" && exit 1
fi
# Load our bootstrap
source ${bootstrap}
###############################################
# Metrics start
###############################################
# Get EPOCH
s_epoch="$(gen_epoch)"
# Create a timestamp
timestamp="$(gen_date)"
# Whos is calling? 0 = singular, 1 is as group
caller=$(ps $PPID | grep -c stigadm)
###############################################
# Perform restoration
###############################################
# If ${restore} = 1 go to restoration mode
if [ ${restore} -eq 1 ]; then
report "Not yet implemented" && exit 1
fi
###############################################
# STIG validation/remediation
###############################################
# Module specific validation code should go here
# Errors should go in ${errors[@]} array (which on remediation get handled)
# All inspected items should go in ${inspected[@]} array
errors=("${stigid}")
# If ${change} = 1
#if [ ${change} -eq 1 ]; then
# Create the backup env
#backup_setup_env "${backup_path}"
# Create a backup (configuration output, file/folde permissions output etc
#bu_configuration "${backup_path}" "${author}" "${stigid}" "$(echo "${array_values[@]}" | tr ' ' '\n')"
#bu_file "${backup_path}" "${author}" "${stigid}" "${file}"
#if [ $? -ne 0 ]; then
# Stop, we require a backup
#report "Unable to create backup" && exit 1
#fi
# Iterate ${errors[@]}
#for error in ${errors[@]}; do
# Work to remediate ${error} should go here
#done
#fi
# Remove dupes
#inspected=( $(remove_duplicates "${inspected[@]}") )
###############################################
# Results for printable report
###############################################
# If ${#errors[@]} > 0
if [ ${#errors[@]} -gt 0 ]; then
# Set ${results} error message
#results="Failed validation" UNCOMMENT ONCE WORK COMPLETE!
results="Not yet implemented!"
fi
# Set ${results} passed message
[ ${#errors[@]} -eq 0 ] && results="Passed validation"
###############################################
# Report generation specifics
###############################################
# Apply some values expected for report footer
[ ${#errors[@]} -eq 0 ] && passed=1 || passed=0
[ ${#errors[@]} -gt 0 ] && failed=1 || failed=0
# Calculate a percentage from applied modules & errors incurred
percentage=$(percent ${passed} ${failed})
# If the caller was only independant
if [ ${caller} -eq 0 ]; then
# Show failures
[ ${#errors[@]} -gt 0 ] && print_array ${log} "errors" "${errors[@]}"
# Provide detailed results to ${log}
if [ ${verbose} -eq 1 ]; then
# Print array of failed & validated items
[ ${#inspected[@]} -gt 0 ] && print_array ${log} "validated" "${inspected[@]}"
fi
# Generate the report
report "${results}"
# Display the report
cat ${log}
else
# Since we were called from stigadm
module_header "${results}"
# Show failures
[ ${#errors[@]} -gt 0 ] && print_array ${log} "errors" "${errors[@]}"
# Provide detailed results to ${log}
if [ ${verbose} -eq 1 ]; then
# Print array of failed & validated items
[ ${#inspected[@]} -gt 0 ] && print_array ${log} "validated" "${inspected[@]}"
fi
# Finish up the module specific report
module_footer
fi
###############################################
# Return code for larger report
###############################################
# Return an error/success code (0/1)
exit ${#errors[@]}
# Date: 2018-06-29
#
# Severity: CAT-II
# Classification: UNCLASSIFIED
# STIG_ID: V0022470
# STIG_Version: SV-26763r1
# Rule_ID: GEN005521
#
# OS: Solaris
# Version: 10
# Architecture: X86
#
# Title: The SSH daemon must restrict login ability to specific users and/or groups.
# Description: Restricting SSH logins to a limited group of users, such as system administrators, prevents password-guessing and other SSH attacks from reaching system accounts and other accounts not authorized for SSH access.
|
package io.opensphere.mantle.data.merge;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.util.Objects;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlRootElement;
import io.opensphere.mantle.data.impl.encoder.DiskDecodeHelper;
import io.opensphere.mantle.data.impl.encoder.DiskEncodeHelper;
import io.opensphere.mantle.data.impl.encoder.EncodeType;
/**
* The Class MetaDataMergeKeyMapEntry.
*/
@XmlRootElement(name = "MetaDataMergeKeyMapEntry")
@XmlAccessorType(XmlAccessType.FIELD)
public class MetaDataMergeKeyMapEntry
{
/** The Merge key name. */
@XmlAttribute(name = "mergeKeyName")
private String myMergeKeyName;
/** The Data type key name. */
@XmlAttribute(name = "sourceKeyName")
private String mySourceKeyName;
/**
* Instantiates a new meta data merge key map entry.
*/
public MetaDataMergeKeyMapEntry()
{
/* intentionally blank */
}
/**
* Copy constructor.
*
* @param other the MetaDataMergeKeyMapEntry to copy.
*/
public MetaDataMergeKeyMapEntry(MetaDataMergeKeyMapEntry other)
{
myMergeKeyName = other.myMergeKeyName;
mySourceKeyName = other.mySourceKeyName;
}
/**
* Instantiates a new meta data merge key map entry.
*
* @param mergeKeyName the merge key name
* @param dataTypeKeyName the data type key name
*/
public MetaDataMergeKeyMapEntry(String mergeKeyName, String dataTypeKeyName)
{
myMergeKeyName = mergeKeyName;
mySourceKeyName = dataTypeKeyName;
}
/**
* Decode.
*
* @param ois the ObjectInputStream
* @throws IOException Signals that an I/O exception has occurred.
*/
public void decode(ObjectInputStream ois) throws IOException
{
EncodeType et = EncodeType.decode(ois);
myMergeKeyName = et == EncodeType.STRING ? DiskDecodeHelper.decodeString(ois) : null;
et = EncodeType.decode(ois);
mySourceKeyName = et == EncodeType.STRING ? DiskDecodeHelper.decodeString(ois) : null;
}
/**
* Encode.
*
* @param oos the ObjectOutputStream
* @return the number of bytes written.
* @throws IOException Signals that an I/O exception has occurred.
*/
public int encode(ObjectOutputStream oos) throws IOException
{
int numWritten = 0;
numWritten += myMergeKeyName == null ? EncodeType.NULL.encode(oos) : DiskEncodeHelper.encodeString(oos, myMergeKeyName);
numWritten += mySourceKeyName == null ? EncodeType.NULL.encode(oos) : DiskEncodeHelper.encodeString(oos, mySourceKeyName);
return numWritten;
}
@Override
public boolean equals(Object obj)
{
if (this == obj)
{
return true;
}
if (obj == null || getClass() != obj.getClass())
{
return false;
}
MetaDataMergeKeyMapEntry other = (MetaDataMergeKeyMapEntry)obj;
return Objects.equals(mySourceKeyName, other.mySourceKeyName) && Objects.equals(myMergeKeyName, other.myMergeKeyName);
}
/**
* Gets the merge key name.
*
* @return the merge key name
*/
public String getMergeKeyName()
{
return myMergeKeyName;
}
/**
* Gets the source key name.
*
* @return the source key name
*/
public String getSourceKeyName()
{
return mySourceKeyName;
}
@Override
public int hashCode()
{
final int prime = 31;
int result = 1;
result = prime * result + (mySourceKeyName == null ? 0 : mySourceKeyName.hashCode());
result = prime * result + (myMergeKeyName == null ? 0 : myMergeKeyName.hashCode());
return result;
}
/**
* Sets the merge key name.
*
* @param mergeKeyName the new merge key name
*/
public void setMergeKeyName(String mergeKeyName)
{
myMergeKeyName = mergeKeyName;
}
/**
* Sets the source key name.
*
* @param sourceKeyName the new source key name
*/
public void setSourceKeyName(String sourceKeyName)
{
mySourceKeyName = sourceKeyName;
}
@Override
public String toString()
{
StringBuilder sb = new StringBuilder(100);
sb.append(getClass().getSimpleName()).append(" MergeKeyName[").append(myMergeKeyName).append("] SourceKeyName[")
.append(mySourceKeyName).append(']');
return sb.toString();
}
}
|
import json
def to_json(data):
return json.dumps(data)
def from_json(data):
return json.loads(data) |
<filename>src/com/opengamma/analytics/financial/curve/generator/GeneratorCurveAddYieldNb.java
/**
* Copyright (C) 2012 - present by OpenGamma Inc. and the OpenGamma group of companies
*
* Please see distribution for license.
*/
package com.opengamma.analytics.financial.curve.generator;
import java.util.Arrays;
import com.opengamma.analytics.financial.interestrate.InstrumentDerivative;
import com.opengamma.analytics.financial.interestrate.YieldCurveBundle;
import com.opengamma.analytics.financial.interestrate.market.description.IMarketBundle;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountAddZeroSpreadCurve;
import com.opengamma.analytics.financial.model.interestrate.curve.YieldAndDiscountCurve;
import com.opengamma.util.ArgumentChecker;
/**
* Store the details and generate the required curve. The curve is the sum (or difference) of the curves
* (operation on the continuously-compounded zero-coupon rates) produced by the array of generators.
* The number of parameter for each curve is imposed.
* The generated curve is a YieldAndDiscountAddZeroSpreadCurve.
*/
public class GeneratorCurveAddYieldNb extends GeneratorYDCurve {
/**
* The array of generators describing the different parts of the spread curve.
*/
private final GeneratorYDCurve[] _generators;
/**
* The number of parameter associated to each generator.
*/
private final int[] _nbParameters;
/**
* The total number of parameters (sum of _nbParameters).
*/
private int _totalNbParameters;
/**
* If true, the rate of all curves, except the first one, will be subtracted from the first one. If false, all the rates are added.
*/
private final boolean _substract;
/**
* The number of generators.
*/
private final int _nbGenerators;
/**
* Constructor.
* @param generators The array of constructors for the component curves.
* @param nbParameters The number of parameter associated to each generator.
* @param substract If true, the rate of all curves, except the first one, will be subtracted from the first one. If false, all the rates are added.
*/
public GeneratorCurveAddYieldNb(GeneratorYDCurve[] generators, int[] nbParameters, boolean substract) {
ArgumentChecker.notNull(generators, "Generators");
_generators = generators;
ArgumentChecker.isTrue(generators.length == nbParameters.length, "Number of parameters should be the same a number of generatros.");
_nbParameters = nbParameters;
_totalNbParameters = 0;
for (int loopp = 0; loopp < nbParameters.length; loopp++) {
_totalNbParameters += nbParameters[loopp];
}
_nbGenerators = generators.length;
_substract = substract;
}
@Override
public int getNumberOfParameter() {
int nbParam = 0;
for (int loopgen = 0; loopgen < _nbGenerators; loopgen++) {
nbParam += _generators[loopgen].getNumberOfParameter();
}
return nbParam;
}
@Override
public YieldAndDiscountCurve generateCurve(String name, double[] x) {
ArgumentChecker.isTrue(x.length == getNumberOfParameter(), "Incorrect number of parameters");
YieldAndDiscountCurve[] underlyingCurves = new YieldAndDiscountCurve[_nbGenerators];
int index = 0;
for (int loopgen = 0; loopgen < _nbGenerators; loopgen++) {
double[] paramCurve = Arrays.copyOfRange(x, index, index + _generators[loopgen].getNumberOfParameter());
index += _generators[loopgen].getNumberOfParameter();
underlyingCurves[loopgen] = _generators[loopgen].generateCurve(name + "-" + loopgen, paramCurve);
}
return new YieldAndDiscountAddZeroSpreadCurve(name, _substract, underlyingCurves);
}
@Override
public YieldAndDiscountCurve generateCurve(String name, YieldCurveBundle bundle, double[] parameters) {
return generateCurve(name, parameters);
}
@Override
public YieldAndDiscountCurve generateCurve(String name, IMarketBundle bundle, double[] parameters) {
return generateCurve(name, parameters);
}
/**
* Create the final generators.
* The relevant array of instrument is passed to each generator. For all the generator, except the first, the last instrument of the
* previous generator is also passed as an indication of the previous part (can be used in "anchor" for example).
* @param data The array of instrument used to construct the curve.
* @return The final generator.
*/
@Override
public GeneratorYDCurve finalGenerator(Object data) {
ArgumentChecker.isTrue(data instanceof InstrumentDerivative[], "data should be an array of InstrumentDerivative");
InstrumentDerivative[] instruments = (InstrumentDerivative[]) data;
ArgumentChecker.isTrue(instruments.length == _totalNbParameters, "The data should have the size prescribed by the _nbParameters");
GeneratorYDCurve[] finalGenerator = new GeneratorYDCurve[_nbGenerators];
int nbDataUsed = 0;
InstrumentDerivative[] instruments0 = new InstrumentDerivative[_nbParameters[0]];
System.arraycopy(instruments, 0, instruments0, 0, _nbParameters[0]);
finalGenerator[0] = _generators[0].finalGenerator(instruments0);
nbDataUsed += _nbParameters[0];
for (int loopgen = 1; loopgen < _nbParameters.length; loopgen++) {
InstrumentDerivative[] instrumentsCurrent = new InstrumentDerivative[_nbParameters[loopgen] + 1];
System.arraycopy(instruments, nbDataUsed - 1, instrumentsCurrent, 0, _nbParameters[loopgen] + 1);
finalGenerator[loopgen] = _generators[loopgen].finalGenerator(instrumentsCurrent);
nbDataUsed += _nbParameters[loopgen];
}
return new GeneratorCurveAddYield(finalGenerator, _substract);
}
@Override
public double[] initialGuess(double[] rates) {
double[] guess = new double[rates.length];
int nbDataUsed = 0;
int nbParam = 0;
for (int loopgen = 0; loopgen < _nbGenerators; loopgen++) {
nbParam = _generators[loopgen].getNumberOfParameter();
double[] tmp = new double[nbParam];
System.arraycopy(rates, nbDataUsed, tmp, 0, nbParam);
System.arraycopy(_generators[loopgen].initialGuess(tmp), 0, guess, nbDataUsed, nbParam);
nbDataUsed += nbParam;
}
return guess;
}
}
|
<filename>astropyp/db_utils/index.py
"""
Build or load an index of decam files
"""
import os
import logging
import warnings
logger = logging.getLogger('astropyp.index')
def init_connection(connection, echo=False):
from astropy.extern import six
if isinstance(connection, six.string_types):
from sqlalchemy import create_engine
engine = create_engine(connection, echo=echo)
else:
engine = connection
return engine
def get_tables(connection):
"""
Connect to database and get the current metadata, table, and session.
Parameters
----------
connection: str or `~sqlalchemy.engine.base.Engine`
Connection to the index database
tbl_name: str
Name of the table to load
Returns
-------
tbl_names: `~sqlalchemy.sql.schema.Table`_
Names of tables from database
tables: sqlAlchemy tables
Tables from database
"""
from sqlalchemy import MetaData
engine = init_connection(connection)
meta = MetaData()
meta.reflect(engine)
return meta.tables.keys(), meta.tables
def connect2idx(connection, tbl_name):
"""
Get the names of tables from an SQLAlchemy connection
Parameters
----------
connection: str or `~sqlalchemy.engine.base.Engine`
Connection to the database
Returns
-------
tbl: `~sqlalchemy.sql.schema.Table`_
Table from database
meta:
session: `~sqlalchemy.orm.session.Session`_
Session connected to database
"""
from sqlalchemy import MetaData
from sqlalchemy.orm import sessionmaker
engine = init_connection(connection)
meta = MetaData()
meta.reflect(engine)
Session = sessionmaker(bind=engine)
session = Session()
if tbl_name in meta.tables:
tbl = meta.tables[tbl_name]
else:
tbl = None
return meta, tbl, session
def add_tbl(connection, columns, tbl_name, echo=False, overwrite=False):
"""
Create or clear a set of tables for a decam file index and create the decam_keys
table to link decam headers to table columns
Parameters
----------
connection: str or `~sqlalchemy.engine.base.Engine`_
Connection to the index database
columns: list of tuples
Columns to create in the table. This should always be a list of
tuples with 3 entries:
1. column name (str)
2. column data type (sqlalchemy data type, for example `Integer`)
3. dictionary of optional parameters
Example: ``[('EXPNUM', Integer, {'index': True}),('RA', Float, {})]``
tbl_name: str
Name of the table to create. If the table aleady exists nothing will be done
echo: bool (optional)
For debugging purposes you may wish to view the commands being sent
from python to the database, in which case you should set ``echo=True``.
Returns
-------
table_exists: bool
If the table already exists ``True`` is returned, otherwise ``False`` is returned
"""
from sqlalchemy import create_engine, MetaData, Table, Column, ForeignKey
from sqlalchemy import Integer, Float, String
engine = create_engine(connection, echo=echo)
meta = MetaData()
meta.reflect(engine)
# check if the table name already exists
if tbl_name in meta.tables.keys():
if overwrite:
meta.tables[tbl_name].drop(engine)
meta.reflect(engine)
else:
warnings.warn('Table "{0}" already exists'.format(tbl_name))
return True
# Include default columns if the user keys included a '*' or if the user
# didn't specify any header keys
if columns is None:
columns = default_columns
elif '*' in columns:
columns = default_columns+columns
# Create the table
tbl_columns = [Column(col[0],col[1],**col[2]) for col in columns]
tbl = Table(tbl_name, meta, *tbl_columns)
tbl.create(engine, checkfirst=True)
return False
def clone_tbl(tbl, metadata, new_name):
"""
Clone a table in a database to another table with a new name
"""
from sqlalchemy import Table
columns = [c.copy() for c in tbl.columns]
return Table(new_name, metadata, *columns)
def valid_ext(filename, extensions):
"""
Check if a filename ends with a valid extension
"""
from astropy.extern import six
if isinstance(extensions, six.string_types):
extensions = [extensions]
if any([filename.endswith(ext) for ext in extensions]):
return True
return False
def get_files(path, extensions):
"""
Get all the files in a directory
"""
return [os.path.join(path, f) for f in os.listdir(path)
if os.path.isfile(os.path.join(path,f)) and valid_ext(f, extensions)]
def add_files(connection, tbl_name, filenames=None, paths=None, recursive=False,
no_warnings=True, column_func=None, extensions=['.fits','.fits.fz']):
"""
Add fits header information to a database table.
Parameters
----------
connection: str or `~sqlalchemy.engine.base.Engine`
Connection to the index database
tbl_name: str
Name of the table to add the files
filenames: str or list (optional)
Filename or list of filenames to add to the table
paths: str or list (optional)
Path or list of paths to search for files to add to the table. Only
files that end with one of the ``extensions`` will be added.
recursive: bool (optional)
If ``recursive==True`` each path in ``paths`` will be recursively searched
for files that end with one of the ``extensions``. The default value is
``False``.
no_warnings: bool (optional)
Often very old (or very new) fits files generate warnings in astropy.
It is often useful to ignore these so the default is to ignore warnings
(``no_warnings==True``).
column_func: function (optional)
It may be useful to calculate certain quantities like airmass that might not
be in the fits header and store them in the table. If a ``column_func`` is
passed to ``add_files`` it should receive the parameters ``hdulist`` and
``header_values``, a dictionary of all of the header values to be written
to database. Any new keys should be added to ``header_values`` and the
function should also return the modified ``header_values`` variable.
extensions: list (optional)
List of file extensions to search for in ``paths``. The default
is ``extensions=['.fits','.fits.fz']
Returns
-------
new_files: list
List of files added to the database
duplicates: list
List of files already contained in the database
"""
from astropy.io import fits
from sqlalchemy import create_engine, MetaData
from sqlalchemy.orm import sessionmaker
from astropy.extern import six
# Sometimes older (or newer) fits files generate warnings by astropy, which a user
# probably wants to suppress while building the image index
if no_warnings:
import warnings
warnings.filterwarnings("ignore")
# Open connection to database and load the table information
engine = create_engine(connection)
conn = engine.connect()
meta = MetaData()
meta.reflect(engine)
Session = sessionmaker(bind=engine)
session = Session()
# Get information about objects in the database (to prevent duplicate entries)
tbl = meta.tables[tbl_name]
old_filepaths = [f[0] for f in session.query(tbl.columns.filename).all()]
session.close()
# Search all filepaths for fits files
if paths is not None:
import itertools
if isinstance(paths, six.string_types):
paths = [paths]
logger.info('paths: {0}'.format(paths))
filepaths = []
for path in paths:
if recursive:
filepaths.append(list(itertools.chain.from_iterable(
[[os.path.join(root, f) for f in files if valid_ext(f, extensions)]
for root,dirs,files in os.walk(path)])))
else:
filepaths.append(get_files(path, extensions))
# Flatten the list of lists
filepaths = list(itertools.chain.from_iterable(filepaths))
else:
filepaths = []
# Add any files specified by the user
if filenames is not None:
if isinstance(filenames, six.string_types):
filenames = [filenames]
for filename in filenames:
if filename not in filepaths:
filepaths.append(filename)
logger.debug('filepaths: {0}'.format(filepaths))
duplicates = []
new_files = []
# Iterate through paths to look for files to add
for n, filepath in enumerate(filepaths):
logger.info('{0} of {1}: {2}'.format(n, len(filepaths),filepath))
if filepath not in old_filepaths:
# Add header keys
hdulist = fits.open(filepath, memmap=True)
header_values = {}
for col in tbl.columns:
key = col.name
if key in hdulist[0].header:
header_values[key] = hdulist[0].header[key]
elif len(hdulist)>1 and key in hdulist[1].header:
header_values[key] = hdulist[1].header[key]
header_values['filename'] = filepath
# Run a code to calculate custom columns
#(for example if airmass is not in the header)
if column_func is not None:
header_values = column_func(hdulist, header_values)
#Insert values into the table
ins = tbl.insert().values(**header_values)
conn.execute(ins)
new_files.append(filepath)
else:
logger.info('duplicate: {0}'.format(filepath))
duplicates.append(filepath)
logger.debug('New files added: \n{0}'.format(new_files))
logger.info('All duplicates: \n{0}'.format(duplicates))
return new_files, duplicates
def query(sql, connection):
"""
Query the index
Parameters
----------
sql: str
SQL expression to execute on the database
connection: str or `~sqlalchemy.engine.base.Engine`
Connection to the index database
none2nan: bool (optional)
If ``none2nan=True`` None values will be converted into ``np.nan`` values.
Returns
-------
result: `~astropy.table.QTable`
"""
from astropy.table import Table
import pandas
engine = init_connection(connection)
df = pandas.read_sql(sql, engine)
tbl = Table.from_pandas(df)
return tbl
def get_distinct(connection, tbl, column):
"""
Get the unique values in a column
Parameters
----------
connection: str or `~sqlalchemy.engine.base.Engine`
Connection to the index database
tbl: str
Name of the table containing the column
column: str
Name of the column to search for distinct values
Returns
-------
distinct: list
List of distinct values
"""
meta, obs_tbl, session = connect2idx(connection, tbl)
distinct = [f[0] for f in session.query(getattr(obs_tbl.columns, column)).distinct().all()]
return distinct
def get_multiplicity(connection, tbl, column):
"""
Get the unique values of a column and the multiplicity of each value
Parameters
----------
connection: str or `~sqlalchemy.engine.base.Engine`
Connection to the index database
tbl: str
Name of the table containing the column
column: str
Name of the column to search for distinct values
Returns
-------
multiplicity: dict
Keys are the distinct entries in the table and values of the dictionary are their
multiplicities
"""
meta, obs_tbl, session = connect2idx(connection, tbl)
distinct = [f[0] for f in session.query(getattr(obs_tbl.columns, column)).distinct().all()]
multiplicity = {}
for value in distinct:
count = session.query(obs_tbl).filter(getattr(obs_tbl.columns, column)==value).count()
multiplicity[value] = count
return multiplicity |
#!/bin/bash
#for the complete dataset ;)
#rm -f ./data/WDI*
#rm ./data/ip_jrn_art.csv
#rm ./data/sp_pop_totl
#load WDI data
#wget --directory-prefix=./data http://databank.worldbank.org/data/download/WDI_csv.zip
#unzip ./data/WDI_csv.zip -d ./data
declare -a indicators=("IC.ISV.DURS" "SE.TER.ENRL.TC.ZS" "IP.PAT.RESD" "IP.PAT.NRES" "SE.PRM.ENRL.TC.ZS" "SE.SEC.ENRL.TC.ZS" "SE.PRE.ENRL.TC.ZS" "IC.REG.DURS" "IC.REG.PROC" "GB.XPD.RSDV.GD.ZS" "FR.INR.RINR" "IC.TAX.PRFT.CP.ZS" "SP.POP.1564.TO.ZS" "IC.BUS.NDNS.ZS" "CM.MKT.LCAP.GD.ZS" "IT.NET.USER.ZS" "HD.HCI.OVRL" "SE.XPD.TOTL.GD.ZS" "NY.GNP.PCAP.CN" "NY.GNP.PCAP.CD" "BM.KLT.DINV.WD.GD.ZS" "BX.KLT.DINV.WD.GD.ZS" "NE.CON.TOTL.ZS" "SP.DYN.TFRT.IN" "IC.BUS.DFRN.XQ" "IC.REG.COST.PC.ZS" "EN.ATM.CO2E.PC")
for i in "${indicators[@]}"
do
echo $i
wget --output-document ./data/tmp.zip https://api.worldbank.org/v2/en/indicator/$i?downloadformat=csv
unzip ./data/tmp.zip -d ./data
rm ./data/tmp.zip
mv ./data/API_*.csv ./data/data
mv ./data/Metadata*.csv ./data/metadata
done
|
#!/usr/bin/env bashio
# ------------------------------------------------------------------------------
# Create the backup name by replacing all name patterns.
#
# Returns the final name on stdout
# ------------------------------------------------------------------------------
function generate-backup-name {
local name
local theversion
local thetype
local thedate
if [ -n "$BACKUP_NAME" ]; then
# get all values
theversion=$(ha core info --raw-json | jq -r .data.version)
[[ -n "$EXCLUDE_ADDONS" || -n "$EXCLUDE_FOLDERS" ]] && thetype="Partial" || thetype="Full"
thedate=$(date +'%Y-%m-%d %H:%M')
# replace the string patterns with the real values
name="$BACKUP_NAME"
name=${name/\{version\}/$theversion}
name=${name/\{type\}/$thetype}
name=${name/\{date\}/$thedate}
else
name="Samba Backup $(date +'%Y-%m-%d %H:%M')"
fi
echo "$name"
}
# ------------------------------------------------------------------------------
# Create a valid filename by replacing all forbidden characters.
#
# Arguments
# $1 The original name
#
# Returns the final name on stdout
# ------------------------------------------------------------------------------
function generate-filename {
local input="${1}"
local prefix
declare -a forbidden=('\/' '\\' '\<' '\>' '\:' '\"' '\|' '\?' '\*' '\.' '\..' '\ ' '\-')
for fc in "${forbidden[@]}"; do
input=${input//$fc/_}
done
prefix=${input:0:13}
[ "$prefix" = "Samba_Backup_" ] && echo "${input}" || echo "Samba_Backup_${input}"
}
# ------------------------------------------------------------------------------
# Run a command and log its output (debug or warning).
#
# Arguments
# $1 The command to run
#
# Returns 1 in case the command failed
# ------------------------------------------------------------------------------
function run-and-log {
local cmd="$1"
local result
result=$(eval "$cmd") \
&& bashio::log.debug "$result" \
|| { bashio::log.warning "$result"; return 1; }
}
# ------------------------------------------------------------------------------
# Checks if input is an extended trigger.
#
# Arguments
# $1 The input to check
#
# Returns 0 (true) or 1 (false)
# ------------------------------------------------------------------------------
function is-extended-trigger {
local input=${1}
local cmd
if cmd=$(echo "$input" | jq -r '.command' 2>/dev/null); then
[ "$cmd" = "trigger" ] && return 0
fi
return 1
}
|
package libs.trustconnector.scdp.util.tlv.simpletlv;
import libs.trustconnector.scdp.util.tlv.*;
import libs.trustconnector.scdp.util.tlv.bertlv.*;
import libs.trustconnector.scdp.util.*;
import libs.trustconnector.scdp.util.ByteArray;
import libs.trustconnector.scdp.util.StringFormat;
import libs.trustconnector.scdp.util.tlv.Length;
import libs.trustconnector.scdp.util.tlv.Tag;
import libs.trustconnector.scdp.util.tlv.bertlv.BERLength;
public class TextString extends SimpleTLV
{
public static final int DCS_GSM_7_BIT = 0;
public static final int DCS_GSM_8_BIT = 4;
public static final int DCS_UCS2 = 8;
public TextString(final Tag tag, final Length len, final byte[] v, final int vOff) {
super(tag, len, v, vOff);
}
public TextString() {
this.tag = new SimpleTag(13);
this.len = new BERLength(0);
}
public TextString(final String text) {
this.tag = new SimpleTag(13);
(this.value = new ByteArray()).append((byte)8);
this.value.append(text.getBytes());
this.len = new BERLength(this.value.length());
}
public TextString(final byte[] value, final int dcs) {
this.tag = new SimpleTag(13);
(this.value = new ByteArray()).append((byte)dcs);
this.value.append(value);
this.len = new BERLength(this.value.length());
}
public byte getDCS() {
return this.value.getByte(0);
}
public String getText() {
final byte retDCS = this.getDCS();
String retValue = null;
final byte[] bV = this.value.toBytes(1, this.value.length() - 1);
if (retDCS == 0) {
retValue = ByteArray.convert(bV, StringFormat.ASCII_7_BIT);
}
else if (retDCS == 4) {
retValue = ByteArray.convert(bV, StringFormat.ASCII);
}
else if (retDCS == 8) {
retValue = ByteArray.convert(bV, StringFormat.UCS2);
}
return retValue;
}
@Override
public String toString() {
String res = "Text String:" + super.toString();
res = res + "\n -Text=" + this.getText();
return res;
}
}
|
#!/usr/bin/env python3
import logging
import yaml
from os import environ as env
from typing import List
from fire import Fire
from gitlabdata.orchestration_utils import snowflake_engine_factory
from sqlalchemy.engine import Engine
def get_list_of_dbs_to_keep(yaml_path="analytics/load/snowflake/roles.yml"):
with open(yaml_path, "r") as yaml_content:
role_dict = yaml.load(yaml_content, Loader=yaml.FullLoader)
return [list(db.keys())[0].lower() for db in role_dict["databases"]]
def get_list_of_dev_schemas(engine: Engine) -> List[str]:
"""
Get a list of all dev schemas.
This will make sure sensitive data is not hanging around.
"""
query = """
SELECT distinct table_schema
FROM analytics.information_schema.tables
WHERE table_catalog IN ('ANALYTICS')
AND lower(table_schema) LIKE '%scratch%'
"""
try:
logging.info("Getting list of schemas...")
connection = engine.connect()
schemas = [row[0] for row in connection.execute(query).fetchall()]
except:
logging.info("Failed to get list of schemas...")
finally:
connection.close()
engine.dispose()
return schemas
def get_list_of_clones(engine: Engine) -> List[str]:
"""
Get a list of all databases besides analytics and raw.
This will delete clones for open MRs, so users may need to rerun the review job.
"""
query = """
SELECT DATABASE_NAME as database_name
FROM INFORMATION_SCHEMA.DATABASES
"""
try:
logging.info("Getting list of databases...")
connection = engine.connect()
databases = [row[0] for row in connection.execute(query).fetchall()]
except:
logging.info("Failed to get list of databases...")
finally:
connection.close()
engine.dispose()
dbs_to_keep = get_list_of_dbs_to_keep()
return [database for database in databases if database.lower() not in dbs_to_keep]
def drop_databases() -> None:
"""
Drop each of the databases for the clones that exist.
"""
logging.info("Preparing to drop databases...")
config_dict = env.copy()
engine = snowflake_engine_factory(config_dict, "SYSADMIN")
logging.info(f"Engine Created: {engine}")
logging.info("Creating list of clones...")
databases = get_list_of_clones(engine)
for database in databases:
drop_query = f"""DROP DATABASE "{database}";"""
try:
connection = engine.connect()
connection.execute(drop_query)
except:
logging.info(f"Failed to drop database: {database}")
finally:
connection.close()
engine.dispose()
def drop_dev_schemas() -> None:
"""
Drop each of the schemas that have "scratch" in their name.
"""
logging.info("Preparing to drop schemas...")
config_dict = env.copy()
engine = snowflake_engine_factory(config_dict, "SYSADMIN")
logging.info(f"Engine Created: {engine}")
schemas = get_list_of_dev_schemas(engine)
logging.info(f"Dropping {len(schemas)} dev schemas...")
for schema in schemas:
drop_query = f"""DROP SCHEMA analytics."{schema}";"""
logging.info(f"Dropping Schema: {schema}")
try:
connection = engine.connect()
connection.execute(drop_query)
except:
logging.info(f"Failed to drop schema: {schema}")
finally:
connection.close()
engine.dispose()
logging.info("Schemas dropped successfully.")
if __name__ == "__main__":
logging.basicConfig(level=20)
Fire({"drop_dev_schemas": drop_dev_schemas, "drop_databases": drop_databases})
logging.info("Complete.")
|
// -----------------------------------------------------------------------------
// MIT License
//
// Copyright (c) 2020 <NAME>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
// -----------------------------------------------------------------------------
/**
* \brief Utility functions for testing.
*/
#include "IllustratorSDK.h"
#include "testing_utlity.h"
#include "utility/string_functions.h"
#include "l2a_constants.h"
/**
*
*/
void L2A::TEST::UTIL::UnitTest::SetTestName(const ai::UnicodeString& test_name) { test_name_ = test_name; }
/**
*
*/
void L2A::TEST::UTIL::UnitTest::CompareInt(const int& val1, const int& val2)
{
test_count_++;
if (val1 == val2)
test_count_passed_++;
else
{
ai::UnicodeString error_string = "Integer compare test for: " + test_name_ + " failed!\nExpected \"" +
L2A::UTIL::IntegerToString(val1) + "\" got \"" + L2A::UTIL::IntegerToString(val2) + "\"";
sAIUser->MessageAlert(error_string);
}
}
/**
*
*/
void L2A::TEST::UTIL::UnitTest::CompareFloat(const AIReal& val1, const AIReal& val2, const AIReal& eps)
{
test_count_++;
if (abs(val1 - val2) < eps)
test_count_passed_++;
else
sAIUser->MessageAlert(ai::UnicodeString("Float compare test failed!"));
}
/**
*
*/
void L2A::TEST::UTIL::UnitTest::CompareStr(const ai::UnicodeString& val1, const ai::UnicodeString& val2)
{
test_count_++;
if (val1 == val2)
test_count_passed_++;
else
{
ai::UnicodeString error_string("");
error_string += "String compare test for: ";
error_string += test_name_;
error_string += " failed!\nExpected \"";
error_string += val1;
error_string += "\" got \"";
error_string += val2 + "\"";
sAIUser->MessageAlert(error_string);
}
}
/**
*
*/
void L2A::TEST::UTIL::UnitTest::CompareRect(const AIRealRect& val1, const AIRealRect& val2)
{
test_count_++;
if (abs(val1.bottom - val2.bottom) < L2A::CONSTANTS::eps_pos_ &&
abs(val1.left - val2.left) < L2A::CONSTANTS::eps_pos_ &&
abs(val1.right - val2.right) < L2A::CONSTANTS::eps_pos_ && abs(val1.top - val2.top) < L2A::CONSTANTS::eps_pos_)
{
test_count_passed_++;
}
else
{
ai::UnicodeString error_string("");
error_string += "Rectangle compair failed!\n";
error_string += "\nval1.bottom = ";
error_string += ai::UnicodeString(std::to_string(val1.bottom));
error_string += "\nval2.bottom = ";
error_string += ai::UnicodeString(std::to_string(val2.bottom));
error_string += "\n\nval1.left = ";
error_string += ai::UnicodeString(std::to_string(val1.left));
error_string += "\nval2.left = ";
error_string += ai::UnicodeString(std::to_string(val2.left));
error_string += "\n\nval1.right = ";
error_string += ai::UnicodeString(std::to_string(val1.right));
error_string += "\nval2.right = ";
error_string += ai::UnicodeString(std::to_string(val2.right));
error_string += "\n\nval1.top = ";
error_string += ai::UnicodeString(std::to_string(val1.top));
error_string += "\nval2.top = ";
error_string += ai::UnicodeString(std::to_string(val2.top));
sAIUser->MessageAlert(error_string);
}
}
/**
*
*/
void L2A::TEST::UTIL::UnitTest::CompareStringVector(
const std::vector<ai::UnicodeString>& val1, const std::vector<ai::UnicodeString>& val2)
{
CompareInt((int)val1.size(), (int)val2.size());
for (size_t i = 0; i < val1.size(); i++) CompareStr(val1[i], val2[i]);
}
/**
*
*/
void L2A::TEST::UTIL::UnitTest::PrintTestSummary(const bool print_status)
{
if (print_status)
{
ai::UnicodeString summary_string("");
summary_string += "Performed ";
summary_string += L2A::UTIL::IntegerToString(test_count_);
summary_string += " tests\n";
summary_string += L2A::UTIL::IntegerToString(test_count_passed_);
summary_string += " passed\n";
summary_string += L2A::UTIL::IntegerToString(test_count_ - test_count_passed_);
summary_string += " failed";
sAIUser->MessageAlert(summary_string);
}
}
|
#!/usr/bin/env bash
python search.py --phase train --dataroot database/maps \
--restore_G_path logs/pix2pix/map2sat/supernet/checkpoints/latest_net_G.pth \
--output_path logs/pix2pix/map2sat/supernet/result.pkl \
--direction BtoA --batch_size 32 \
--config_set channels-48 \
--real_stat_path real_stat/maps_subtrain_A.npz \
--meta_path datasets/metas/maps/train2.meta
|
<reponame>Jarunik/sm-team-finder
package org.slos.battle.abilities.attack;
import org.slos.battle.abilities.Ability;
import org.slos.battle.abilities.AbilityClassification;
import org.slos.battle.abilities.AbilityEffect;
import org.slos.battle.abilities.AbilityType;
import org.slos.battle.abilities.rule.target.SnaredRule;
import org.slos.battle.abilities.rule.target.TargetRuleset;
public class SnaredAbility extends Ability implements AbilityEffect {
public SnaredAbility() {
super(AbilityType.SNARED, AbilityClassification.TARGET_EVADE);
}
@Override
public AbilityEffect getEffect() {
return new SnaredRule();
}
@Override
public TargetRuleset getTargetRuleset() {
return TargetRuleset.SELF;
}
}
|
#!/bin/bash
# Copyright 2012 Arnab Ghoshal
# Copyright 2010-2011 Microsoft Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
# To be run from ..
# Flat start and monophone training, with delta-delta features.
# This script applies cepstral mean normalization (per speaker).
function error_exit () {
echo -e "$@" >&2; exit 1;
}
function readint () {
local retval=${1/#*=/}; # In case --switch=ARG format was used
retval=${retval#0*} # Strip any leading 0's
[[ "$retval" =~ ^-?[1-9][0-9]*$ ]] \
|| error_exit "Argument \"$retval\" not an integer."
echo $retval
}
nj=4 # Default number of jobs
stage=-4 # Default starting stage (start with calculating CMN/CVN stats)
qcmd="" # Options for the submit_jobs.sh script
sjopts="" # Options for the submit_jobs.sh script
PROG=`basename $0`;
usage="Usage: $PROG [options] <data-dir> <lang-dir> <exp-dir>\n
e.g.: $PROG data/train.1k data/lang exp/mono\n\n
Options:\n
--help\t\tPrint this message and exit\n
--num-jobs INT\tNumber of parallel jobs to run (default=$nj).\n
--qcmd STRING\tCommand for submitting a job to a grid engine (e.g. qsub) including switches.\n
--stage INT\tStarting stage (e.g. -4 for CMN/CVN stats; 2 for iter 2; default=$stage)\n
--sjopts STRING\tOptions for the 'submit_jobs.sh' script\n
";
while [ $# -gt 0 ]; do
case "${1# *}" in # ${1# *} strips any leading spaces from the arguments
--help) echo -e $usage; exit 0 ;;
--num-jobs)
shift; nj=`readint $1`;
[ $nj -lt 1 ] && error_exit "--num-jobs arg '$nj' not positive.";
shift ;;
--qcmd)
shift; qcmd="--qcmd=${1}"; shift ;;
--sjopts)
shift; sjopts="$1"; shift ;;
--stage)
shift; stage=`readint $1`; shift ;;
-*) echo "Unknown argument: $1, exiting"; echo -e $usage; exit 1 ;;
*) break ;; # end of options: interpreted as the data-dir
esac
done
if [ $# != 3 ]; then
error_exit $usage;
fi
data=$1
lang=$2
dir=$3
[ -f path.sh ] && . ./path.sh
# Configuration:
scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1"
numiters=40 # Number of iterations of training
maxiterinc=30 # Last iter to increase #Gauss on.
numgauss=300 # Initial num-Gauss (must be more than #states=3*phones).
totgauss=1000 # Target #Gaussians.
incgauss=$[($totgauss-$numgauss)/$maxiterinc] # per-iter increment for #Gauss
realign_iters="1 2 3 4 5 6 7 8 9 10 12 14 16 18 20 23 26 29 32 35 38";
oov_sym=`cat $lang/oov.txt`
mkdir -p $dir/log
if [ ! -d $data/split$nj -o $data/split$nj -ot $data/feats.scp ]; then
split_data.sh $data $nj
fi
if [ $stage -le -3 ]; then
echo "Computing cepstral mean and variance statistics"
# for n in `get_splits.pl $nj`; do # do this locally; it's fast.
submit_jobs.sh "$qcmd" --njobs=$nj --log=$dir/log/cmvnTASK_ID.log $sjopts \
compute-cmvn-stats --spk2utt=ark:$data/split$nj/TASK_ID/spk2utt \
scp:$data/split$nj/TASK_ID/feats.scp ark:$dir/TASK_ID.cmvn \
|| error_exit "Computing CMN/CVN stats failed.";
fi
feats="ark:apply-cmvn --norm-vars=false --utt2spk=ark:$data/utt2spk \"ark:cat $dir/*.cmvn|\" scp:$data/feats.scp ark:- | add-deltas ark:- ark:- |"
# for n in `get_splits.pl $nj`; do
# for n in `seq 1 $nj`; do
featspart="ark:apply-cmvn --norm-vars=false --utt2spk=ark:$data/split$nj/TASK_ID/utt2spk ark:$dir/TASK_ID.cmvn scp:$data/split$nj/TASK_ID/feats.scp ark:- | add-deltas ark:- ark:- |"
if [ $stage -le -2 ]; then
echo "Initializing monophone system."
if [ -f $lang/phonesets_mono.txt ]; then
echo "Using shared phones from $lang/phonesets_mono.txt"
# In recipes with stress and position markers, this pools together
# the stats for the different versions of the same phone (also for
# the various silence phones).
sym2int.pl $lang/phones.txt $lang/phonesets_mono.txt > $dir/phonesets.int
shared_phones_opt="--shared-phones=$dir/phonesets.int"
fi
gmm-init-mono $shared_phones_opt \
"--train-feats=$feats subset-feats --n=10 ark:- ark:-|" $lang/topo 39 \
$dir/0.mdl $dir/tree 2> $dir/log/init.log \
|| error_exit "Monophone model initialization failed.";
fi
if [ $stage -le -1 ]; then
echo "Compiling training graphs"
submit_jobs.sh "$qcmd" --njobs=$nj --log=$dir/log/compile_graphsTASK_ID.log \
$sjopts compile-train-graphs $dir/tree $dir/0.mdl $lang/L.fst \
"ark:sym2int.pl --map-oov '$oov_sym' --ignore-first-field $lang/words.txt < $data/split$nj/TASK_ID/text|" \
"ark:|gzip -c >$dir/TASK_ID.fsts.gz" \
|| error_exit "Error compiling training graphs.";
fi
if [ $stage -le 0 ]; then
echo "Aligning data equally (pass 0)"
# for n in `get_splits.pl $nj`; do
submit_jobs.sh "$qcmd" --njobs=$nj --log=$dir/log/align.0.TASK_ID.log \
$sjopts align-equal-compiled "ark:gunzip -c $dir/TASK_ID.fsts.gz|" \
"$featspart" ark,t,f:- \| \
gmm-acc-stats-ali --binary=true $dir/0.mdl "$featspart" \
ark:- $dir/0.TASK_ID.acc \
|| error_exit "Error in pass 0 accumulation";
# In the following steps, the --min-gaussian-occupancy=3 option is important,
# otherwise we cannot est "rare" phones and later on, they never align properly.
gmm-est --min-gaussian-occupancy=3 --mix-up=$numgauss \
$dir/0.mdl "gmm-sum-accs - $dir/0.*.acc|" $dir/1.mdl \
2> $dir/log/update.0.log || error_exit "Error in pass 0 estimation.";
rm $dir/0.*.acc
fi # Finished 0'th training iteration.
beam=6 # will change to 10 below after 1st pass
x=1
while [ $x -lt $numiters ]; do
echo "Pass $x"
if [ $stage -le $x ]; then
if echo $realign_iters | grep -w $x >/dev/null; then
echo "Aligning data"
submit_jobs.sh "$qcmd" --njobs=$nj --log=$dir/log/align.$x.TASK_ID.log \
$sjopts gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$[$beam*4] \
$dir/$x.mdl "ark:gunzip -c $dir/TASK_ID.fsts.gz|" "$featspart" \
"ark,t:|gzip -c >$dir/TASK_ID.ali.gz" \
|| error_exit "Error in pass $x alignment.";
fi # Realign iters
# for n in `get_splits.pl $nj`; do
submit_jobs.sh "$qcmd" --njobs=$nj --log=$dir/log/acc.$x.TASK_ID.log \
$sjopts gmm-acc-stats-ali $dir/$x.mdl "$featspart" \
"ark:gunzip -c $dir/TASK_ID.ali.gz|" $dir/$x.TASK_ID.acc \
|| error_exit "Error in pass $x accumulation.";
submit_jobs.sh "$qcmd" --log=$dir/log/update.$x.log $sjopts \
gmm-est --write-occs=$dir/$[$x+1].occs --mix-up=$numgauss $dir/$x.mdl \
"gmm-sum-accs - $dir/$x.*.acc|" $dir/$[$x+1].mdl \
|| error_exit "Error in pass $x extimation.";
rm -f $dir/$x.mdl $dir/$x.*.acc $dir/$x.occs
fi # Completed a training stage.
if [ $x -le $maxiterinc ]; then
numgauss=$[$numgauss+$incgauss];
fi
beam=10
x=$[$x+1];
done
( cd $dir; rm -f final.{mdl,occs}; ln -s $x.mdl final.mdl; \
ln -s $x.occs final.occs; )
# Print out summary of the warning messages.
for x in $dir/log/*.log; do
n=`grep WARNING $x | wc -l`;
if [ $n -ne 0 ]; then echo $n warnings in $x; fi;
done
echo Done
# example of showing the alignments:
# show-alignments data/lang/phones.txt $dir/30.mdl "ark:gunzip -c $dir/0.ali.gz|" | head -4
|
#!/bin/sh -eux
apt-get install -y xubuntu-desktop;
apt-get install -y xrdp;
systemctl enable xrdp;
|
<reponame>dogezhou/lil-vue<gh_stars>0
interface Student {
name: string,
age: number,
}
const target: Student = {
name: 'haha',
age: 12,
}
const handler: ProxyHandler<Student> = {
get(obj, prop: keyof Student) {
console.log(`=== get target.${prop} ===`)
return obj[prop]
},
set(obj, prop: keyof Student, value) {
console.log(`=== set target.${prop} = ${value}`)
// @ts-ignore 先解决一下
obj[prop] = value
return true
}
}
const p = new Proxy(target, handler)
console.log(p.name)
console.log(p.age)
p.age = 1
console.log(target) |
// Profile collection
db.createCollection('users_profiles')
db.users_profiles.insert({
name: String,
email: String,
password: String
})
// Settings collection
db.createCollection('users_settings')
db.users_settings.insert({
user_id: ObjectId,
theme: String,
notifications: Boolean
})
// Activities collection
db.createCollection('users_activities')
db.users_activities.insert({
user_id: ObjectId,
activity_type: String,
activity_data: Object
}) |
model = Sequential()
model.add(Dense(8, input_dim=1, activation='relu'))
model.add(Dense(4, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, y_train, epochs=50, verbose=0) |
import coremltools as ct
def convert_and_save_models(tf_model_paths, dimensions):
saved_mlmodel_paths = []
for i in range(len(tf_model_paths)):
tf_model_path = tf_model_paths[i]
height, width = dimensions[i]
mlmodel = ct.convert(tf_model_path, source='tensorflow')
saved_mlmodel_path = f"bisenetv2_celebamaskhq_{height}x{width}_float32.mlmodel"
mlmodel.save(saved_mlmodel_path)
saved_mlmodel_paths.append(saved_mlmodel_path)
return saved_mlmodel_paths
# Example usage
tf_model_paths = ['saved_model_448x448', 'saved_model_480x640']
dimensions = [(448, 448), (480, 640)]
saved_paths = convert_and_save_models(tf_model_paths, dimensions)
print(saved_paths) |
#!/bin/bash
if test -z "$BASH_VERSION"; then
echo "Please run this script using bash, not sh or any other shell." >&2
exit 1
fi
install() {
set -euo pipefail
dst_dir="${K14SIO_INSTALL_BIN_DIR:-/usr/local/bin}"
if [ -x "$(command -v wget)" ]; then
dl_bin="wget -nv -O-"
else
dl_bin="curl -s -L"
fi
shasum -v 1>/dev/null 2>&1 || (echo "Missing shasum binary" && exit 1)
ytt_version=v0.33.0
kbld_version=v0.30.0
kapp_version=v0.37.0
kwt_version=v0.0.6
imgpkg_version=v0.8.0
vendir_version=v0.16.0
if [[ `uname` == Darwin ]]; then
binary_type=darwin-amd64
ytt_checksum=7783d2b8a2087b18179674685dcac0de463cdc50269c676dbd365c590114d0ba
kbld_checksum=73274d02b0c2837d897c463f820f2c8192e8c3f63fd90c526de5f23d4c6bdec4
kapp_checksum=da6411b79c66138cd7437beb268675edf2df3c0a4a8be07fb140dd4ebde758c1
kwt_checksum=555d50d5bed601c2e91f7444b3f44fdc424d721d7da72955725a97f3860e2517
imgpkg_checksum=11439505cec7cd0c28182cdb2e269368be8c51376fdd57846289a4ad81460fde
vendir_checksum=3e6af7ae5cd89579f6d153af6b6a4c0ab1cfcac22f5014b983d1d942feb8bab0
else
binary_type=linux-amd64
ytt_checksum=de20cb812f6c5f66feeb66cfcc82607d21c2b726cb54cfd0eaddd112f062e5ca
kbld_checksum=76c5c572e7a9095256b4c3ae2e076c370ef70ce9ff4eb138662f56828889a00c
kapp_checksum=f845233deb6c87feac7c82d9b3f5e03ced9a4672abb1a14d4e5b74fe53bc4538
kwt_checksum=92a1f18be6a8dca15b7537f4cc666713b556630c20c9246b335931a9379196a0
imgpkg_checksum=d998c1628c5956ffc84b36f23bec6fd0145977e76f3c02a0c28962e6f8f233c2
vendir_checksum=05cede475c2b947772a9fe552380927054d48158959c530122a150a93bf542dd
fi
echo "Installing ${binary_type} binaries..."
echo "Installing ytt..."
$dl_bin https://github.com/vmware-tanzu/carvel-ytt/releases/download/${ytt_version}/ytt-${binary_type} > /tmp/ytt
echo "${ytt_checksum} /tmp/ytt" | shasum -c -
mv /tmp/ytt ${dst_dir}/ytt
chmod +x ${dst_dir}/ytt
echo "Installed ${dst_dir}/ytt v${ytt_version}"
echo "Installing kbld..."
$dl_bin https://github.com/vmware-tanzu/carvel-kbld/releases/download/${kbld_version}/kbld-${binary_type} > /tmp/kbld
echo "${kbld_checksum} /tmp/kbld" | shasum -c -
mv /tmp/kbld ${dst_dir}/kbld
chmod +x ${dst_dir}/kbld
echo "Installed ${dst_dir}/kbld v${kbld_version}"
echo "Installing kapp..."
$dl_bin https://github.com/vmware-tanzu/carvel-kapp/releases/download/${kapp_version}/kapp-${binary_type} > /tmp/kapp
echo "${kapp_checksum} /tmp/kapp" | shasum -c -
mv /tmp/kapp ${dst_dir}/kapp
chmod +x ${dst_dir}/kapp
echo "Installed ${dst_dir}/kapp v${kapp_version}"
echo "Installing kwt..."
$dl_bin https://github.com/vmware-tanzu/carvel-kwt/releases/download/${kwt_version}/kwt-${binary_type} > /tmp/kwt
echo "${kwt_checksum} /tmp/kwt" | shasum -c -
mv /tmp/kwt ${dst_dir}/kwt
chmod +x ${dst_dir}/kwt
echo "Installed ${dst_dir}/kwt v${kwt_version}"
echo "Installing imgpkg..."
$dl_bin https://github.com/vmware-tanzu/carvel-imgpkg/releases/download/${imgpkg_version}/imgpkg-${binary_type} > /tmp/imgpkg
echo "${imgpkg_checksum} /tmp/imgpkg" | shasum -c -
mv /tmp/imgpkg ${dst_dir}/imgpkg
chmod +x ${dst_dir}/imgpkg
echo "Installed ${dst_dir}/imgpkg v${imgpkg_version}"
echo "Installing vendir..."
$dl_bin https://github.com/vmware-tanzu/carvel-vendir/releases/download/${vendir_version}/vendir-${binary_type} > /tmp/vendir
echo "${vendir_checksum} /tmp/vendir" | shasum -c -
mv /tmp/vendir ${dst_dir}/vendir
chmod +x ${dst_dir}/vendir
echo "Installed ${dst_dir}/vendir v${vendir_version}"
}
install
|
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/1024+0+512-shuffled-N/7-model --tokenizer_name model-configs/1536-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/1024+0+512-shuffled-N/7-512+512+512-shuffled-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function shuffle_first_third_sixth --eval_function last_sixth_eval |
#!/bin/sh
echo "replace {REDIS_HOST} to $REDIS_HOST"
eval sed -i -e 's/\{REDIS_HOST\}/$REDIS_HOST/' /usr/local/skywalking/cache-server/config/config.properties
echo "replace {REDIS_PORT} to $REDIS_PORT"
eval sed -i -e 's/\{REDIS_PORT\}/$REDIS_PORT/' /usr/local/skywalking/cache-server/config/config.properties
echo "replace {MONGO_HOST} to $MONGO_HOST"
eval sed -i -e 's/\{MONGO_HOST\}/$MONGO_HOST/' /usr/local/skywalking/cache-server/config/config.properties
echo "replace {MONGO_PORT} to $MONGO_PORT"
eval sed -i -e 's/\{MONGO_PORT\}/$MONGO_PORT/' /usr/local/skywalking/cache-server/config/config.properties
echo "replace {ZK_ADDRESS} to $ZK_ADDRESS"
eval sed -i -e 's/\{ZK_ADDRESS\}/$ZK_ADDRESS/' /usr/local/skywalking/cache-server/config/config.properties
exec "$@"
|
#!/bin/bash
# Change trail pictures' permissions
#url="http://localhost:8080"
#url="http://localhost:3000"
url="https://jatrailmap.com:443"
if [ $# -lt 3 ]; then
echo "usage: $0 username password trailid"
exit 1
fi
user=$1
pass=$2
trailid=$3
cookie_file=cookie
get_cookie() {
token=""
while IFS='' read -r line || [[ -n "$line" ]]; do
column6=`echo $line | awk -F ' ' '{print $6}'`
column7=`echo $line | awk -F ' ' '{print $7}'`
if [ "$column6" == "token" ]; then
token=$column7
fi
done < "$1"
echo $token
}
echo "curl -s -k -X GET -c $cookie_file $url/login?username=${user}&password=${pass}"
curl -s -k -X GET -c $cookie_file "$url/login?username=${user}&password=${pass}"
if [ $? -ne 0 ]; then
echo "curl failed";
exit 1
fi
echo ""
token=$(get_cookie $cookie_file)
echo "curl -s -k -X GET --cookie token=$token $url/trail/${trailid}/track"
json=`curl -s -k -X GET --cookie token="$token" "$url/trail/${trailid}/track"`
if [ $? -ne 0 ]; then
echo "curl failed";
exit 1
fi
ids=`echo $json | jq -r '. | .pics[] | ._id, .imageid, .access, .picturename'`
#echo $ids
#exit
# above returns picture id and image id list
# first line is picture id
# and second line is image id
num=1
for line in $ids; do
groups=()
access=""
if [ $num -eq 1 ]; then
num=2
picid=$line
elif [ $num -eq 2 ]; then
num=3
imgid=$line
else
num=1
current_access=$line
read -p "Modify permissions of this image $imgid (current access=${current_access}) [y/n]? " resp
if [ $resp == "y" ]; then
read -p "Set access: Private[p], Group[g] or Trail permissions[t]? " resp
if [ $resp == "p" ]; then
access="private"
elif [ $resp == "g" ]; then
access="group"
read -p "Give a group list, space separated: " -a groups
elif [ $resp == "t" ]; then
access="public"
else
echo "Nothing selected"
fi
groups_str=""
first_group=1
for i in ${groups[@]}; do
if [ $first_group -eq 1 ]; then
first_group=0
else
groups_str="${groups_str}&"
fi
groups_str="${groups_str}groups[]=$i"
done
data="access=${access}"
if [ "$groups_str" != "" ]; then
data="${data}&${groups_str}"
fi
echo "Data: $data"
read -p "Send request to update picture permissions [y/n]? " resp
if [ $resp == "y" ]; then
echo "Yes"
echo "curl -k --globoff -d $data -X PUT --cookie token=$token $url/trail/${trailid}/picture/${picid}/permissions"
curl -k --globoff -d $data -X PUT --cookie token="$token" "$url/trail/${trailid}/picture/${picid}/permissions"
if [ $? -ne 0 ]; then
echo "curl failed";
exit 1
fi
else
echo "No"
fi
fi
echo ""
echo "----------------------------------------------------------------------"
fi
done
echo "done"
|
#!/bin/sh
file_name="cd-guidelines-$(git describe --abbrev=0 --tags)-$(date +'%Y%m%d-%H%M%S')"
mkdir -p build
gitbook pdf . build/${file_name}.pdf
|
#! /usr/bin/python
import os, sys, wave, struct, pandas
from contextlib import closing
usage = """
Segments wave file based on csv file provided
Usage:
python SplitWave.py /path/to/csv
Example:
python SplitWave.py ~/Desktop/example.csv
"""
#Recursively get file name
def getFileNames(path,fileExt):
dirList = os.listdir(path)
fileList = []
for file in dirList:
(filePath, fileName) = os.path.split(file)
(title, extension) = os.path.splitext(fileName)
if extension in fileExt:
fullPathFile = os.path.join(path,file)
if os.path.isfile(fullPathFile):
fileList.append(fullPathFile)
else:
fileList.extend(getFileNames(fullPathFile))
return fileList
def main(path):
waveFiles = getFileNames(path,[".wav", ".WAV"])
csvFiles = getFileNames(path,[".csv"])
for waveFile in waveFiles:
(filePath, fileName) = os.path.split(waveFile)
(title, extension) = os.path.splitext(fileName)
csvFile = [s for s in csvFiles if title in s]
if len(csvFile) > 0:
split(waveFile, csvFile[0])
else:
print('Cannot process:',waveFile)
sys.exit()
def split(waveFile, csvFile):
(filePath, fileName) = os.path.split(waveFile)
(title, extension) = os.path.splitext(fileName)
if not os.path.exists(filePath+'/'+title):
os.makedirs(filePath+'/'+title)
#os.makedirs(filePath+'/'+title+'/stereo')
#os.makedirs(filePath+'/'+title+'/mono')
with closing(wave.open(waveFile,'r')) as file:
frames = file.getnframes()
rate = float(file.getframerate())
duration=frames/rate
df = pandas.read_csv(csvFile, index_col=False, header=None)
durations = df[0]
for i in range(durations.size):
if i < 9:
saveStereoName = filePath+'/'+title+"/0"+str(i+1)+extension
#saveStereoName = filePath+'/'+title+"/stereo/0"+str(i+1)+extension
#saveMonoName = filePath+'/'+title+"/mono/0"+str(i+1)+extension
else:
saveStereoName = filePath+'/'+title+"/"+str(i+1)+extension
#saveStereoName = filePath+'/'+title+"/stereo/"+str(i+1)+extension
#saveMonoName = filePath+'/'+title+"/mono/"+str(i+1)+extension
#Save stereo file to keep originals
with closing(wave.open(saveStereoName,'w')) as splitFile:
#get duration to save in nframes
if i+1 < durations.size:
frameDur = int((durations[i+1]-durations[i])*rate)
else:
frameDur = int((duration - durations[i])*rate)
#read data from original file
file.setpos(int(durations[i]*rate))
saveFrames = file.readframes(frameDur)
#set parameters for a write file
splitFile.setnchannels(file.getnchannels())
splitFile.setsampwidth(file.getsampwidth())
splitFile.setframerate(rate)
splitFile.setcomptype(file.getcomptype(),file.getcompname())
splitFile.setnframes(frameDur)
#write
splitFile.writeframes(saveFrames)
#Save mono version for analysis
#with closing(wave.open(saveMonoName,'w')) as splitFile:
#get duration to save in nframes
# splitFile = wave.open(saveMonoName,'w')
#
# if i+1 < durations.size:
# frameDur = int((durations[i+1]-durations[i])*rate)
# else:
# frameDur = int((duration - durations[i])*rate)
#
# #read data from original file
# file.setpos(int(durations[i]*rate))
# saveFrames = file.readframes(frameDur)
#
# total_samples = frameDur * file.getnchannels()
#
# if file.getsampwidth() == 1:
# fmt = "%iB" % total_samples # read unsigned chars
# elif file.getsampwidth() == 2:
# fmt = "%ih" % total_samples # read signed 2 byte shorts
# else:
# raise ValueError("Only supports 8 and 16 bit audio formats.")
#
# saveFrames = struct.unpack(fmt, saveFrames)
# monoFrames = []
# for j in xrange(0,len(saveFrames),2):
# monoFrames.append((saveFrames[j]+saveFrames[j+1])/2)
#
# if file.getsampwidth() == 1:
# fmt = "%iB" % frameDur # read unsigned chars
# elif file.getsampwidth() == 2:
# fmt = "%ih" % frameDur # read signed 2 byte shorts
# else:
# raise ValueError("Only supports 8 and 16 bit audio formats.")
#
# monoFrames = struct.pack(fmt,*monoFrames)
#
# #set parameters for a write file
# splitFile.setnchannels(1)
# splitFile.setsampwidth(file.getsampwidth())
# splitFile.setframerate(rate)
# splitFile.setcomptype(file.getcomptype(),file.getcompname())
# splitFile.setnframes(frameDur)
#
# #write
# splitFile.writeframes(monoFrames)
splitFile.close()
if __name__ == '__main__':
try:
path = sys.argv[1]
except:
print usage
sys.exit(-1)
main(path)
|
export TF_VAR_project_name="${PROJECT_NAME}"
export TF_VAR_project_environment="${PROJECT_ENVIRONMENT}"
export AWS_CREDENTIALS_FILE="/home/${SHELL_USER}/.aws_credentials"
if
[ ! -f "${AWS_CREDENTIALS_FILE}" ]
then
create_credentials_file=""
while \
[ "${create_credentials_file}" != "y" ] && [ "${create_credentials_file}" != "n" ]
do
echo -n "Would you like to save your AWS credentials? [y/n]: "
read create_credentials_file
done
if
[ "${create_credentials_file}" == "n" ]
then
touch ${AWS_CREDENTIALS_FILE}
else
access_key_id=""
while \
[ "${access_key_id}" == "" ]
do
echo -n "Please enter your AWS access key ID: "
read access_key_id
done
secret_key=""
while \
[ "${secret_key}" == "" ]
do
echo -n "Please enter your AWS secret access key: "
read secret_key
done
echo "export AWS_ACCESS_KEY_ID=\"${access_key_id}\"" >> "${AWS_CREDENTIALS_FILE}"
echo "export AWS_SECRET_ACCESS_KEY=\"${secret_key}\"" >> "${AWS_CREDENTIALS_FILE}"
sync
fi
fi
# Warn user if deploy key is not found
if
[ ! -f /home/${SHELL_USER}/.ssh/deploy ]
then
echo "** Warning ** private deploy key not found (/home/${SHELL_USER_HOMEDIR}/.ssh/deploy)"
echo "** Warning ** While you may still be able to start or stop new machines"
echo "** Warning ** using Terraform, you will not be able to provision them"
echo "** Warning ** using Ansible. Request the private key, and put it here:"
echo "** Warning **"
echo "** Warning ** /home/${SHELL_USER}/.ssh/deploy"
echo "** Warning **"
echo ""
fi
# Warn user if credentials file is empty
if
[ ! -s "${AWS_CREDENTIALS_FILE}" ]
then
echo "** Warning ** You have not stored your AWS credentials"
echo "** Warning ** Please make sure to add the following variables"
echo "** Warning ** To your environment:"
echo ""
echo " AWS_ACCESS_KEY_ID"
echo " AWS_SECRET_ACCESS_KEY"
echo ""
fi
source "${AWS_CREDENTIALS_FILE}"
|
#!/bin/bash
set -ex
export my_zone=us-central1-a
export my_cluster=standard-cluster-1
gcloud container clusters resize $my_cluster --zone $my_zone --num-nodes=4 -y
|
<gh_stars>0
function CSVToArray ( inputToJson, DelimiterOfCsv ) {
DelimiterOfCsv = (DelimiterOfCsv || ",");
var RegexBase = new RegExp(
(
"(\\" + DelimiterOfCsv + "|\\r?\\n|\\r|^)" +
"(?:\"([^\"]*(?:\"\"[^\"]*)*)\"|" +
"([^\"\\" + DelimiterOfCsv + "\\r\\n]*))"
),
"gi"
);
var resultsOfJsonConversion = [[]];
var resultsOfTheComparation = null;
while (resultsOfTheComparation = RegexBase.exec( inputToJson )){
var matchedDelimiterOfCsv = resultsOfTheComparation[ 1 ];
JudgeTheValuesToAddAnArray
(matchedDelimiterOfCsv,
DelimiterOfCsv,
resultsOfJsonConversion);
var strMatchedValue =
ManipulateTheRegex(resultsOfTheComparation);
resultsOfJsonConversion[ resultsOfJsonConversion
.length - 1 ]
.push( strMatchedValue );
}
return( resultsOfJsonConversion );
}
function JudgeTheValuesToAddAnArray
(matchedDelimiterOfCsv, DelimiterOfCsv,resultsOfJsonConversion)
{
if (matchedDelimiterOfCsv.length &&
(matchedDelimiterOfCsv !== DelimiterOfCsv))
{
resultsOfJsonConversion.push( [] );
}
}
function ManipulateTheRegex (resultsOfTheComparation) {
if (resultsOfTheComparation[ 2 ])
{
var strMatchedValue = resultsOfTheComparation[ 2 ]
.replace(new RegExp( "\"\"", "g" ), "\"");
return strMatchedValue;
}
else
{
var strMatchedValue = resultsOfTheComparation[ 3 ];
return strMatchedValue;
}
}
export default CSVToArray; |
<reponame>soyacen/grpc-middleware
package grpcsonybreaker
import (
"context"
"github.com/sony/gobreaker"
"google.golang.org/grpc"
)
func UnaryClientInterceptor(Name string, opts ...Option) grpc.UnaryClientInterceptor {
st := defaultSettings(Name)
apply(st, opts...)
cb := gobreaker.NewCircuitBreaker(*st)
return unaryClientInterceptor(cb)
}
func unaryClientInterceptor(cb *gobreaker.CircuitBreaker) grpc.UnaryClientInterceptor {
return func(
ctx context.Context,
method string,
req interface{},
reply interface{},
cc *grpc.ClientConn,
invoker grpc.UnaryInvoker,
opts ...grpc.CallOption,
) (err error) {
_, err = cb.Execute(func() (interface{}, error) {
err = invoker(ctx, method, req, reply, cc, opts...)
if err != nil {
return nil, err
}
return nil, nil
})
return err
}
}
|
# Find python file
alias pyfind='find . -name "*.py"'
# Remove python compiled byte-code and mypy cache in either current directory or in a
# list of specified directories
function pyclean() {
ZSH_PYCLEAN_PLACES=${*:-'.'}
find ${ZSH_PYCLEAN_PLACES} -type f -name "*.py[co]" -delete
find ${ZSH_PYCLEAN_PLACES} -type d -name "__pycache__" -delete
find ${ZSH_PYCLEAN_PLACES} -type d -name ".mypy_cache" -delete
}
# Grep among .py files
alias pygrep='grep --include="*.py"'
|
package com.ytzb.chart.dataset;
/**
* Created by xinxin.wang on 18/5/2.
*/
public interface IBarDataSet extends IDataSet {
int getPositiveColor();
int getNegativeColor();
}
|
/**
* <a href="http://www.openolat.org">
* OpenOLAT - Online Learning and Training</a><br>
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); <br>
* you may not use this file except in compliance with the License.<br>
* You may obtain a copy of the License at the
* <a href="http://www.apache.org/licenses/LICENSE-2.0">Apache homepage</a>
* <p>
* Unless required by applicable law or agreed to in writing,<br>
* software distributed under the License is distributed on an "AS IS" BASIS, <br>
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <br>
* See the License for the specific language governing permissions and <br>
* limitations under the License.
* <p>
* Initial code contributed and copyrighted by<br>
* frentix GmbH, http://www.frentix.com
* <p>
*/
package org.olat.admin.user.groups;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.olat.NewControllerFactory;
import org.olat.admin.user.groups.BusinessGroupTableModelWithType.Cols;
import org.olat.basesecurity.GroupRoles;
import org.olat.core.CoreSpringFactory;
import org.olat.core.gui.UserRequest;
import org.olat.core.gui.components.Component;
import org.olat.core.gui.components.link.Link;
import org.olat.core.gui.components.link.LinkFactory;
import org.olat.core.gui.components.table.BooleanColumnDescriptor;
import org.olat.core.gui.components.table.ColumnDescriptor;
import org.olat.core.gui.components.table.CustomCellRenderer;
import org.olat.core.gui.components.table.CustomRenderColumnDescriptor;
import org.olat.core.gui.components.table.DefaultColumnDescriptor;
import org.olat.core.gui.components.table.Table;
import org.olat.core.gui.components.table.TableController;
import org.olat.core.gui.components.table.TableEvent;
import org.olat.core.gui.components.table.TableMultiSelectEvent;
import org.olat.core.gui.components.velocity.VelocityContainer;
import org.olat.core.gui.control.Controller;
import org.olat.core.gui.control.Event;
import org.olat.core.gui.control.WindowControl;
import org.olat.core.gui.control.controller.BasicController;
import org.olat.core.gui.control.generic.closablewrapper.CloseableModalController;
import org.olat.core.gui.control.generic.modal.DialogBoxController;
import org.olat.core.gui.control.generic.modal.DialogBoxUIFactory;
import org.olat.core.id.Identity;
import org.olat.core.id.Roles;
import org.olat.core.util.Util;
import org.olat.core.util.mail.MailHelper;
import org.olat.core.util.mail.MailPackage;
import org.olat.group.BusinessGroup;
import org.olat.group.BusinessGroupMembership;
import org.olat.group.BusinessGroupModule;
import org.olat.group.BusinessGroupService;
import org.olat.group.model.AddToGroupsEvent;
import org.olat.group.model.BusinessGroupMembershipChange;
import org.olat.group.model.SearchBusinessGroupParams;
import org.olat.group.ui.main.BGRoleCellRenderer;
/**
* Description:<br>
* GroupOverviewController creates a model and displays a table with all groups a user is in.
* The following rows are shown: type of group, groupname,
* role of user in group (participant, owner, on waiting list), date of joining the group
*
* <P>
* Initial Date: 22.09.2008 <br>
* @author <NAME>, frentix GmbH, <EMAIL>
*/
public class GroupOverviewController extends BasicController {
private static final String TABLE_ACTION_LAUNCH = "bgTblLaunch";
private static final String TABLE_ACTION_UNSUBSCRIBE = "unsubscribe";
private final VelocityContainer vc;
private final TableController groupListCtr;
private final BusinessGroupTableModelWithType tableDataModel;
private Link addGroups;
private DialogBoxController confirmSendMailBox;
private CloseableModalController cmc;
private GroupSearchController groupsCtrl;
private GroupLeaveDialogBoxController removeFromGrpDlg;
private final BusinessGroupModule groupModule;
private final BusinessGroupService businessGroupService;
private final Identity identity;
public GroupOverviewController(UserRequest ureq, WindowControl control, Identity identity, boolean canEdit) {
super(ureq, control, Util.createPackageTranslator(BusinessGroupTableModelWithType.class, ureq.getLocale()));
setTranslator(Util.createPackageTranslator(BGRoleCellRenderer.class, getLocale(), getTranslator()));
this.identity = identity;
groupModule = CoreSpringFactory.getImpl(BusinessGroupModule.class);
businessGroupService = CoreSpringFactory.getImpl(BusinessGroupService.class);
vc = createVelocityContainer("groupoverview");
groupListCtr = new TableController(null, ureq, control, getTranslator());
listenTo(groupListCtr);
groupListCtr.addColumnDescriptor(new BusinessGroupNameColumnDescriptor(TABLE_ACTION_LAUNCH, getLocale()));
groupListCtr.addColumnDescriptor(false, new DefaultColumnDescriptor(Cols.key.i18n(), Cols.key.ordinal(), null, getLocale()));
groupListCtr.addColumnDescriptor(new DefaultColumnDescriptor(Cols.firstTime.i18n(), Cols.firstTime.ordinal(), null, getLocale()));
groupListCtr.addColumnDescriptor(new DefaultColumnDescriptor(Cols.lastTime.i18n(), Cols.lastTime.ordinal(), null, getLocale()));
CustomCellRenderer roleRenderer = new BGRoleCellRenderer(getLocale());
groupListCtr.addColumnDescriptor(new CustomRenderColumnDescriptor(Cols.role.i18n(), Cols.role.ordinal(), null, getLocale(), ColumnDescriptor.ALIGNMENT_LEFT, roleRenderer));
if(canEdit) {
groupListCtr.addColumnDescriptor(new BooleanColumnDescriptor(Cols.allowLeave.i18n(), Cols.allowLeave.ordinal(),
TABLE_ACTION_UNSUBSCRIBE, translate("table.header.leave"), null));
groupListCtr.setMultiSelect(true);
groupListCtr.addMultiSelectAction("table.leave", TABLE_ACTION_UNSUBSCRIBE);
addGroups = LinkFactory.createButton("add.groups", vc, this);
}
tableDataModel = new BusinessGroupTableModelWithType(getTranslator(), 4);
groupListCtr.setTableDataModel(tableDataModel);
vc.put("table.groups", groupListCtr.getInitialComponent());
updateModel();
putInitialPanel(vc);
}
/**
* @param ureq
* @param control
* @param identity
* @return
*/
private void updateModel() {
SearchBusinessGroupParams params = new SearchBusinessGroupParams();
params.setIdentity(identity);
params.setOwner(true);
params.setAttendee(true);
params.setWaiting(true);
List<BusinessGroup> groups = businessGroupService.findBusinessGroups(params, null, 0, -1);
List<Long> groupKeysWithMembers;
if(groups.size() > 50) {
groupKeysWithMembers = null;
} else {
groupKeysWithMembers = new ArrayList<>(groups.size());
for(BusinessGroup view:groups) {
groupKeysWithMembers.add(view.getKey());
}
}
//retrieve all user's membership if there are more than 50 groups
List<BusinessGroupMembership> groupsAsOwner = businessGroupService.getBusinessGroupMembership(groupKeysWithMembers, identity);
Map<Long, BusinessGroupMembership> memberships = new HashMap<>();
for(BusinessGroupMembership membership: groupsAsOwner) {
memberships.put(membership.getGroupKey(), membership);
}
List<GroupOverviewRow> items = new ArrayList<>();
for(BusinessGroup group:groups) {
BusinessGroupMembership membership = memberships.get(group.getKey());
GroupOverviewRow tableItem = new GroupOverviewRow(group, membership, Boolean.TRUE);
items.add(tableItem);
}
tableDataModel.setEntries(items);
groupListCtr.modelChanged();
}
@Override
protected void doDispose() {
//
}
@Override
protected void event( UserRequest ureq, Component source, Event event) {
if (source == addGroups){
groupsCtrl = new GroupSearchController(ureq, getWindowControl());
listenTo(groupsCtrl);
cmc = new CloseableModalController(getWindowControl(), translate("add.groups"), groupsCtrl.getInitialComponent(), true, translate("add.groups"), true);
listenTo(cmc);
cmc.activate();
}
}
@Override
protected void event(UserRequest ureq, Controller source, Event event) {
super.event(ureq, source, event);
if (source == groupListCtr){
if (event.getCommand().equals(Table.COMMANDLINK_ROWACTION_CLICKED)) {
TableEvent te = (TableEvent) event;
GroupOverviewRow item = tableDataModel.getObject(te.getRowId());
BusinessGroup currBusinessGroup = businessGroupService.loadBusinessGroup(item.getKey());
if (currBusinessGroup==null) {
//group seems to be removed meanwhile, reload table and show error
showError("group.removed");
updateModel();
} else if (TABLE_ACTION_LAUNCH.equals(te.getActionId())) {
NewControllerFactory.getInstance().launch("[BusinessGroup:" + currBusinessGroup.getKey() + "]", ureq, getWindowControl());
} else if (TABLE_ACTION_UNSUBSCRIBE.equals(te.getActionId())){
doLeave(ureq, Collections.singletonList(currBusinessGroup));
}
} else if (event instanceof TableMultiSelectEvent) {
TableMultiSelectEvent mse = (TableMultiSelectEvent)event;
List<GroupOverviewRow> items = tableDataModel.getObjects(mse.getSelection());
if (TABLE_ACTION_UNSUBSCRIBE.equals(mse.getAction())){
List<BusinessGroup> groups = toBusinessGroups(items);
doLeave(ureq, groups);
}
}
} else if (source == groupsCtrl && event instanceof AddToGroupsEvent){
AddToGroupsEvent groupsEv = (AddToGroupsEvent) event;
if (groupsEv.isEmpty()) {
// no groups selected
showWarning("group.add.result.none");
} else {
if (cmc != null) {
cmc.deactivate();
}
boolean mailMandatory = groupModule.isMandatoryEnrolmentEmail(ureq.getUserSession().getRoles());
if(mailMandatory) {
doAddToGroups(groupsEv, true);
updateModel();
} else {
confirmSendMailBox = activateYesNoDialog(ureq, null, translate("dialog.modal.bg.send.mail"), confirmSendMailBox);
confirmSendMailBox.setUserObject(groupsEv);
}
}
cleanUpPopups();
} else if(source == confirmSendMailBox) {
boolean sendMail = DialogBoxUIFactory.isYesEvent(event) || DialogBoxUIFactory.isOkEvent(event);
AddToGroupsEvent groupsEv = (AddToGroupsEvent)confirmSendMailBox.getUserObject();
doAddToGroups(groupsEv, sendMail);
updateModel();
} else if (source == removeFromGrpDlg){
if(event == Event.DONE_EVENT) {
boolean sendMail = removeFromGrpDlg.isSendMail();
List<BusinessGroup> groupsToDelete = removeFromGrpDlg.getGroupsToDelete();
List<BusinessGroup> groupsToLeave = removeFromGrpDlg.getGroupsToLeave();
removeUserFromGroup(ureq, groupsToLeave, groupsToDelete, sendMail);
}
cmc.deactivate();
cleanUpPopups();
} else if (source == cmc) {
cleanUpPopups();
}
}
private void cleanUpPopups() {
removeAsListenerAndDispose(cmc);
removeAsListenerAndDispose(removeFromGrpDlg);
removeAsListenerAndDispose(groupsCtrl);
cmc = null;
groupsCtrl = null;
removeFromGrpDlg = null;
}
private void doAddToGroups(AddToGroupsEvent e, boolean sendMail) {
List<BusinessGroupMembershipChange> changes = new ArrayList<>();
if(e.getOwnerGroupKeys() != null && !e.getOwnerGroupKeys().isEmpty()) {
for(Long tutorGroupKey:e.getOwnerGroupKeys()) {
BusinessGroupMembershipChange change = new BusinessGroupMembershipChange(identity, tutorGroupKey);
change.setTutor(Boolean.TRUE);
changes.add(change);
}
}
if(e.getParticipantGroupKeys() != null && !e.getParticipantGroupKeys().isEmpty()) {
for(Long partGroupKey:e.getParticipantGroupKeys()) {
BusinessGroupMembershipChange change = new BusinessGroupMembershipChange(identity, partGroupKey);
change.setParticipant(Boolean.TRUE);
changes.add(change);
}
}
MailPackage mailing = new MailPackage(sendMail);
businessGroupService.updateMemberships(getIdentity(), changes, mailing);
}
private void doLeave(UserRequest ureq, List<BusinessGroup> groupsToLeave) {
List<BusinessGroup> groupsToDelete = new ArrayList<>(1);
for(BusinessGroup group:groupsToLeave) {
int numOfOwners = businessGroupService.countMembers(group, GroupRoles.coach.name());
int numOfParticipants = businessGroupService.countMembers(group, GroupRoles.participant.name());
if ((numOfOwners == 1 && numOfParticipants == 0) || (numOfOwners == 0 && numOfParticipants == 1)) {
groupsToDelete.add(group);
}
}
removeFromGrpDlg = new GroupLeaveDialogBoxController(ureq, getWindowControl(), identity, groupsToLeave, groupsToDelete);
listenTo(removeFromGrpDlg);
cmc = new CloseableModalController(getWindowControl(), translate("close"), removeFromGrpDlg.getInitialComponent(),
true, translate("unsubscribe.title"));
cmc.activate();
listenTo(cmc);
}
/**
*
* @param ureq
* @param doSendMail
*/
private void removeUserFromGroup(UserRequest ureq, List<BusinessGroup> groupsToLeave, List<BusinessGroup> groupsToDelete, boolean doSendMail) {
Roles roles = ureq.getUserSession().getRoles();
for(BusinessGroup group:groupsToLeave) {
if (groupsToDelete.contains(group)) {
// really delete the group as it has no more owners/participants
if(doSendMail) {
String businessPath = getWindowControl().getBusinessControl().getAsString();
businessGroupService.deleteBusinessGroupWithMail(group, businessPath, getIdentity(), getLocale());
} else {
businessGroupService.deleteBusinessGroup(group);
}
} else {
// 1) remove as owner
if (businessGroupService.hasRoles(identity, group, GroupRoles.coach.name())) {
businessGroupService.removeOwners(ureq.getIdentity(), Collections.singletonList(identity), group);
}
MailPackage mailing = new MailPackage(doSendMail);
// 2) remove as participant
businessGroupService.removeParticipants(getIdentity(), Collections.singletonList(identity), group, mailing);
MailHelper.printErrorsAndWarnings(mailing.getResult(), getWindowControl(),
roles.isAdministrator() || roles.isSystemAdmin(), getLocale());
}
}
updateModel();
StringBuilder groupNames = new StringBuilder();
for(BusinessGroup group:groupsToLeave) {
if(groupNames.length() > 0) groupNames.append(", ");
groupNames.append(group.getName());
}
showInfo("unsubscribe.successful", groupNames.toString());
}
private List<BusinessGroup> toBusinessGroups(List<GroupOverviewRow> items) {
List<Long> groupKeys = new ArrayList<>();
for(GroupOverviewRow item:items) {
groupKeys.add(item.getKey());
}
return businessGroupService.loadBusinessGroups(groupKeys);
}
}
|
def permutations(elements):
# Calculate the number of permutations
elements_permutation = len(elements)**len(elements)
# Generate a list for all permutations
permutations = []
# Iterate through the number of permutations
for i in range(elements_permutation):
permutation = [] # Create an empty list for each permutation
# Iterate through each element in elements
for j in range(len(elements)):
# Calculate the index of the current permutated element
permutation_index = i // (len(elements) ** j)
# Append the current permutated element to the permutation list
permutation.append(elements[permutation_index % len(elements)])
# Append the current permutation to the permutations list
permutations.append(permutation)
return permutations
permutations = permutations(elements) |
<gh_stars>0
import {createFetcher, FetchArgs} from "./fetcher/fetcher"
import {Zealot, ZealotPayload, SearchFormat} from "./types"
import {createTime} from "./util/time"
import {createZealot} from "./zealot"
import {createZealotMock, ZealotMock} from "./zealot_mock"
import * as zjson from "./zjson"
import * as zng from "./zng"
export {
zjson,
zng,
createZealot,
createZealotMock,
createTime,
Zealot,
ZealotPayload,
SearchFormat,
ZealotMock,
createFetcher,
FetchArgs
}
|
#!/bin/bash
echo "Async!!"
sh test-runner.sh $1 $2 &
|
const express = require('express')
const Utils = require('./utils')
const Params = require('./params')
const Middleware = require('./middleware')
const CustomValidator = require('./custom.validator')
const Router = require('./router')
const ModuleChildren = require('./module.children')
class Module {
constructor () {
this.server = express.Router()
}
configure (name) {
const ClassModule = Utils.requireFile(name, 'module')
if (!ClassModule) {
throw new Error(`Module not found: ${name}`)
}
const module = new ClassModule
const middleware = new Middleware()
middleware.configure(module.middlewares())
const customValidator = new CustomValidator
customValidator.load(module.validators())
const router = new Router
router.configure(name)
const children = new ModuleChildren
children.configure(module.children())
this.server.use(
module.url(),
Params.next(),
middleware.next(),
router.next(),
children.next()
)
}
next () {
return this.server
}
}
module.exports = Module |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.iceberg.nessie;
import com.facebook.airlift.configuration.Config;
import com.facebook.airlift.configuration.ConfigDescription;
import javax.validation.constraints.NotEmpty;
import java.util.Optional;
public class NessieConfig
{
private String defaultReferenceName = "main";
private String serverUri;
private AuthenticationType authenticationType;
private String username;
private String password;
private String bearerToken;
private Integer readTimeoutMillis;
private Integer connectTimeoutMillis;
private String clientBuilderImpl;
private boolean compressionEnabled = true;
@NotEmpty(message = "must not be null or empty")
public String getDefaultReferenceName()
{
return defaultReferenceName;
}
@Config("iceberg.nessie.ref")
@ConfigDescription("The default Nessie reference to work on")
public NessieConfig setDefaultReferenceName(String defaultReferenceName)
{
this.defaultReferenceName = defaultReferenceName;
return this;
}
public Optional<String> getServerUri()
{
return Optional.ofNullable(serverUri);
}
@Config("iceberg.nessie.uri")
@ConfigDescription("The URI to connect to the Nessie server")
public NessieConfig setServerUri(String serverUri)
{
this.serverUri = serverUri;
return this;
}
@Config("iceberg.nessie.auth.type")
@ConfigDescription("The authentication type to use. Available values are BASIC | BEARER")
public NessieConfig setAuthenticationType(AuthenticationType authenticationType)
{
this.authenticationType = authenticationType;
return this;
}
public Optional<AuthenticationType> getAuthenticationType()
{
return Optional.ofNullable(authenticationType);
}
@Config("iceberg.nessie.auth.basic.username")
@ConfigDescription("The username to use with BASIC authentication")
public NessieConfig setUsername(String username)
{
this.username = username;
return this;
}
public Optional<String> getUsername()
{
return Optional.ofNullable(username);
}
@Config("iceberg.nessie.auth.basic.password")
@ConfigDescription("The password to use with BASIC authentication")
public NessieConfig setPassword(String password)
{
this.password = password;
return this;
}
public Optional<String> getPassword()
{
return Optional.ofNullable(password);
}
@Config("iceberg.nessie.auth.bearer.token")
@ConfigDescription("The token to use with BEARER authentication")
public NessieConfig setBearerToken(String bearerToken)
{
this.bearerToken = bearerToken;
return this;
}
public Optional<String> getBearerToken()
{
return Optional.ofNullable(bearerToken);
}
@Config("iceberg.nessie.read-timeout-ms")
@ConfigDescription("The read timeout in milliseconds for the client")
public NessieConfig setReadTimeoutMillis(Integer readTimeoutMillis)
{
this.readTimeoutMillis = readTimeoutMillis;
return this;
}
public Optional<Integer> getReadTimeoutMillis()
{
return Optional.ofNullable(readTimeoutMillis);
}
@Config("iceberg.nessie.connect-timeout-ms")
@ConfigDescription("The connection timeout in milliseconds for the client")
public NessieConfig setConnectTimeoutMillis(Integer connectTimeoutMillis)
{
this.connectTimeoutMillis = connectTimeoutMillis;
return this;
}
public Optional<Integer> getConnectTimeoutMillis()
{
return Optional.ofNullable(connectTimeoutMillis);
}
@Config("iceberg.nessie.compression-enabled")
@ConfigDescription("Configure whether compression should be enabled or not. Default: true")
public NessieConfig setCompressionEnabled(boolean compressionEnabled)
{
this.compressionEnabled = compressionEnabled;
return this;
}
public boolean isCompressionEnabled()
{
return compressionEnabled;
}
public boolean isCompressionDisabled()
{
return !compressionEnabled;
}
@Config("iceberg.nessie.client-builder-impl")
@ConfigDescription("Configure the custom ClientBuilder implementation class to be used")
public NessieConfig setClientBuilderImpl(String clientBuilderImpl)
{
this.clientBuilderImpl = clientBuilderImpl;
return this;
}
public Optional<String> getClientBuilderImpl()
{
return Optional.ofNullable(clientBuilderImpl);
}
}
|
#!/usr/bin/env bash
#
# Copyright (C) 2016 The CyanogenMod Project
# Copyright (C) 2017 The LineageOS Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
DEVICE=grus
VENDOR=xiaomi
# Load extract_utils and do some sanity checks
MY_DIR="${BASH_SOURCE%/*}"
if [[ ! -d "${MY_DIR}" ]]; then MY_DIR="${PWD}"; fi
AOSP_ROOT="${MY_DIR}/../../.."
HELPER="${AOSP_ROOT}/vendor/aosp/build/tools/extract_utils.sh"
if [ ! -f "${HELPER}" ]; then
echo "Unable to find helper script at ${HELPER}"
exit 1
fi
source "${HELPER}"
# Default to sanitizing the vendor folder before extraction
CLEAN_VENDOR=false
SECTION=
KANG=
while [ "$1" != "" ]; do
case "$1" in
-n | --no-cleanup ) CLEAN_VENDOR=false
;;
-k | --kang) KANG="--kang"
;;
-s | --section ) shift
SECTION="$1"
CLEAN_VENDOR=false
;;
* ) SRC="$1"
;;
esac
shift
done
if [ -z "${SRC}" ]; then
SRC=adb
fi
# Initialize the helper
setup_vendor "${DEVICE}" "${VENDOR}" "${AOSP_ROOT}" false "${CLEAN_VENDOR}"
extract "${MY_DIR}/proprietary-files.txt" "${SRC}" ${KANG} --section "${SECTION}"
BLOB_ROOT="$AOSP_ROOT"/vendor/"$VENDOR"/"$DEVICE"/proprietary
patchelf --remove-needed vendor.xiaomi.hardware.mtdservice@1.0.so "$BLOB_ROOT"/vendor/bin/mlipayd@1.1
patchelf --remove-needed vendor.xiaomi.hardware.mtdservice@1.0.so "$BLOB_ROOT"/vendor/lib64/libmlipay.so
patchelf --remove-needed vendor.xiaomi.hardware.mtdservice@1.0.so "$BLOB_ROOT"/vendor/lib64/libmlipay@1.1.so
"${MY_DIR}/setup-makefiles.sh"
|
<reponame>wolfchinaliu/gameCenter
package weixin.mailmanager;
import org.apache.log4j.Logger;
import org.jeecgframework.core.common.controller.BaseController;
import org.springframework.context.annotation.Scope;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.servlet.ModelAndView;
import weixin.mailmanager.entity.VersionEntity;
import weixin.mailmanager.mailHelper.MailUitl;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.sql.Date;
import java.util.Calendar;
/**
* Created by xiaona on 2016/5/9.
* 邮件管理之版本管理,日周月邮件发送,还有运维邮件的发送
*/
@Scope("prototype")
@Controller
@RequestMapping("/mailManagerController")
public class MailManagerController extends BaseController{
/**
* Logger for this class
*/
private static final Logger logger = Logger.getLogger(MailManagerController.class);
private String message;
public String getMessage() {
return message;
}
public void setMessage(String message) {
this.message = message;
}
@RequestMapping(params = "goMailVersion")
public ModelAndView goMailVersion(HttpServletRequest request,HttpServletResponse response) {
return new ModelAndView("weixin/mailmanager/mail_versionManager");
}
@RequestMapping(params = "sendVersionMail")
@ResponseBody
public Boolean sendVersionMail(HttpServletRequest request,HttpServletResponse response){
MailUitl mailUitl = new MailUitl();
String emailTo=request.getParameter("emailTo");
String mailSubject= request.getParameter("mailSubject");
String versionnum=request.getParameter("versionNo");
String publishtime=request.getParameter("publishDate");
String addcontent= request.getParameter("addContent");
String improvecontent= request.getParameter("improveContent");
String deletecontent= request.getParameter("deleteContent");
VersionEntity versionEntity= new VersionEntity();
versionEntity.setMailTo(emailTo);
versionEntity.setVersionNO(versionnum);
versionEntity.setPublishDate(publishtime);
versionEntity.setAddContent(addcontent);
versionEntity.setImproveContent(improvecontent);
versionEntity.setDeleteContent(deletecontent);
versionEntity.setMainsubject(mailSubject);
boolean result =mailUitl.sendVersionMail(versionEntity);
return result;
}
@RequestMapping(params = "goDayMail")
public ModelAndView goDayMail(HttpServletRequest request,HttpServletResponse response){
return new ModelAndView("weixin/mailmanager/mail_dayManager");
}
}
|
def max_subarray_sum(array):
current_max = array[0]
global_max = array[0]
for i in range(1, len(array)):
current_max = max(array[i], current_max + array[i])
if current_max > global_max:
global_max = current_max
return global_max |
import React from 'react';
import OuterContainer from './OuterContainer';
import style from '../styles/About.module.css';
import BackButton from './BackButton';
const About = () => (
<OuterContainer>
<BackButton to="/" />
<div className={style.container}>
<h2 style={{ textAlign: 'center' }}>About</h2>
<span style={{ textAlign: 'center' }}>
Developed with: React & Redux and a bit of HTML & CSS.
<br />
This project is open source feel free to contribute to the repository.
<a href="https://github.com/kazumaki/lol-champion-db" target="_blank" rel="noopener noreferrer">
GitHub Repository.
</a>
<br />
Developer:
<a href="https://www.linkedin.com/in/vcamposcarvalho/" target="_blank" rel="noopener noreferrer">
<NAME>
</a>
<br />
This is a noncommercial project, all the content shown here is property of Riot Games Inc.
</span>
</div>
</OuterContainer>
);
export default About;
|
name=speaker
flag="--attn soft --angleFeatSize 128
--feature_size 512
--feature_extract img_features/CLIP-ViT-B-16-views.tsv
--aug_env img_features/CLIP-ViT-B-16-views-st-samefilter.tsv
--train speaker
--style_embedding style_original.tsv
--train_env both
--valid_env original
--subout max --dropout 0.6 --optim adam --lr 1e-4 --iters 80000 --maxAction 35"
mkdir -p snap/$name
CUDA_VISIBLE_DEVICES=$1 python r2r_src/train.py $flag --name $name
# Try this for file logging
# CUDA_VISIBLE_DEVICES=$1 unbuffer python r2r_src/train.py $flag --name $name | tee snap/$name/log
|
#!/usr/bin/env bash
set -e
systemd=0
if [ "init-$(ps -o comm= 1)" == "init-systemd" ]; then
systemd=1
fi
which docker >/dev/null || ./getdocker.sh
restart=0
target=/opt/docker
mkdir -p "$target"
if [ ! -z "$(git diff "$target/config" "config" 2>&1 || echo "new")" ]; then
echo "docker config changed"
restart=1
fi
cp config /etc/default/docker
if [ "restart$restart" == "restart1" ]; then
echo "docker restarting"
if [ "systemd$systemd" == "systemd1" ]; then
systemctl restart docker
else
cmd=restart
service docker status | grep start >/dev/null || cmd=start
service docker status | grep stop >/dev/null || cmd=restart
service docker "$cmd"
fi
cp config "$target/config"
fi
|
<filename>google/ads/googleads/v8/googleads-ruby/lib/google/ads/googleads/v8/errors/conversion_value_rule_error_pb.rb
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v8/errors/conversion_value_rule_error.proto
require 'google/api/annotations_pb'
require 'google/protobuf'
Google::Protobuf::DescriptorPool.generated_pool.build do
add_file("google/ads/googleads/v8/errors/conversion_value_rule_error.proto", :syntax => :proto3) do
add_message "google.ads.googleads.v8.errors.ConversionValueRuleErrorEnum" do
end
add_enum "google.ads.googleads.v8.errors.ConversionValueRuleErrorEnum.ConversionValueRuleError" do
value :UNSPECIFIED, 0
value :UNKNOWN, 1
value :INVALID_GEO_TARGET_CONSTANT, 2
value :CONFLICTING_INCLUDED_AND_EXCLUDED_GEO_TARGET, 3
value :CONFLICTING_CONDITIONS, 4
value :CANNOT_REMOVE_IF_INCLUDED_IN_VALUE_RULE_SET, 5
value :CONDITION_NOT_ALLOWED, 6
value :FIELD_MUST_BE_UNSET, 7
value :CANNOT_PAUSE_UNLESS_VALUE_RULE_SET_IS_PAUSED, 8
value :UNTARGETABLE_GEO_TARGET, 9
value :INVALID_AUDIENCE_USER_LIST, 10
value :INACCESSIBLE_USER_LIST, 11
value :INVALID_AUDIENCE_USER_INTEREST, 12
value :CANNOT_ADD_RULE_WITH_STATUS_REMOVED, 13
end
end
end
module Google
module Ads
module GoogleAds
module V8
module Errors
ConversionValueRuleErrorEnum = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.ads.googleads.v8.errors.ConversionValueRuleErrorEnum").msgclass
ConversionValueRuleErrorEnum::ConversionValueRuleError = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.ads.googleads.v8.errors.ConversionValueRuleErrorEnum.ConversionValueRuleError").enummodule
end
end
end
end
end
|
docker build -t ino99/myweb-k8s:v2 .
|
<reponame>mini-crm/mini-crm
/*
* This file is generated by jOOQ.
*/
package tr.com.minicrm.productgroup.data.postgresql.generated.tables.pojos;
import java.io.Serializable;
/**
* This class is generated by jOOQ.
*/
@SuppressWarnings({ "all", "unchecked", "rawtypes" })
public class ProductGroupTable implements Serializable {
private static final long serialVersionUID = 1L;
private Long pgrId;
private String groupName;
private Integer version;
public ProductGroupTable() {}
public ProductGroupTable(ProductGroupTable value) {
this.pgrId = value.pgrId;
this.groupName = value.groupName;
this.version = value.version;
}
public ProductGroupTable(
Long pgrId,
String groupName,
Integer version
) {
this.pgrId = pgrId;
this.groupName = groupName;
this.version = version;
}
/**
* Getter for <code>public.product_group_table.pgr_id</code>.
*/
public Long getPgrId() {
return this.pgrId;
}
/**
* Setter for <code>public.product_group_table.pgr_id</code>.
*/
public void setPgrId(Long pgrId) {
this.pgrId = pgrId;
}
/**
* Getter for <code>public.product_group_table.group_name</code>.
*/
public String getGroupName() {
return this.groupName;
}
/**
* Setter for <code>public.product_group_table.group_name</code>.
*/
public void setGroupName(String groupName) {
this.groupName = groupName;
}
/**
* Getter for <code>public.product_group_table.version</code>.
*/
public Integer getVersion() {
return this.version;
}
/**
* Setter for <code>public.product_group_table.version</code>.
*/
public void setVersion(Integer version) {
this.version = version;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("ProductGroupTable (");
sb.append(pgrId);
sb.append(", ").append(groupName);
sb.append(", ").append(version);
sb.append(")");
return sb.toString();
}
}
|
<filename>src/tree/Tree.js
import React, { Component } from 'react';
import TreeNode from './TreeNode';
import './Tree.css';
class Tree extends Component {
render() {
const nodes = this.props.nodes || [];
const className = `tree ${this.props.className}`;
const TreeNodes = nodes.map((node, index) =>
<TreeNode node={node} key={index} />
);
return (
<ul className={className}>
{TreeNodes}
</ul>
);
}
}
export default Tree;
|
<filename>simulate_noisy_exp_growth.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Simple simulation on fitting exponential growth on noisy discrete samples.
Background: growth of the AY.33 SARS-CoV-2 variant relative to other
lineages, in the Netherlands. So far, we have collected about 400 samples
of this lineage. Hypothesis is a logistic growth rate of 0.0336 per day.
Since AY.33 is only a small part of the total, we can approximate it as
exponential growth.
If n is the number of AY.33 cases on a date, then sqrt(n) is the standard
deviation (Poisson statistics). I will approximate the Poisson distribution
as a Gaussian distribution so that the rule of thumb ±2σ = 95% CI can be used.
2021-10-10 // <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
# How often are samples taken (in days). Exact value doesn't matter much.
dt_sample = 2
k = 0.0336
# the model is: n(t) = a0 * exp(k * t)
ts = np.arange(0, 105, dt_sample) # 105 days = 15 weeks
txs = np.arange(0, 150, dt_sample) # extended time series for extrapolation
ns = np.exp(k*ts) # number of cases found at each time point.
Ntot = 400 # Total number of cases to be found.
a0 = Ntot/ns.sum()
ns *= a0
sigmas = np.sqrt(ns)
# Fitting exponentials is tricky, so take the log and do linear regression.
# We'll fit on ideal data (no noise) an rely on the curve fitting to
# propagate the sample errors into a covariance matrix.
lgns = np.log(ns)
elgns = sigmas/ns
def linfunc(x, a0, a1):
return a0+a1*x
# fitted log(a0), k values.
(fla0, fk), cov = curve_fit(linfunc, ts, lgns, absolute_sigma=True )
# a0, error a0
fa0 = np.exp(fla0) # fit value for a0
ela0 = np.sqrt(cov[0, 0]) # error in log(a0)
ea0 = fa0 * ela0 # error in fit a0
print(f'Fit a0={fa0:.3g} ± {ea0:.3g} (model: {a0:.3g})')
ek = np.sqrt(cov[1, 1])
print(f'Fit k={fk:.3g} ± {ek:.3g} (model: {k:.3g})')
## generate curvves for this covariance matrix (monte-carlo)
# We want the 95% CI, so doing 20 Monte-Carlo runs should cover roughly
# the 95% CI.
n_mc = 20
# random distribution of log(a0) and k, each shape (n_mc)
np.random.seed(2)
la0mcs, kmcs = np.random.multivariate_normal([fla0, fk], cov, size=n_mc).T
curves = np.exp(la0mcs + kmcs*txs.reshape(-1, 1)) # shape (num_t, num_mc)
# sample realizations
# Apply the error to log(n), not n, so that we don't end up with negative
# n values.
ns_sampled = np.exp(lgns + np.random.normal(0, elgns, size=lgns.shape))
plt.close('all')
fig, ax = plt.subplots(tight_layout=True, figsize=(7, 4))
ax.errorbar(ts, ns_sampled, 2*sigmas, fmt='o', label='samples ± 2σ')
ax.plot(txs, curves[:, 0], color='gray', zorder=-10, alpha=0.2, linewidth=2,
label='Realizations')
# other curves without label
ax.plot(txs, curves[:, 1:], color='gray', zorder=-10, alpha=0.2, linewidth=2)
ax.set_ylim(-1, 100)
ax.set_xlabel('Day number')
ax.set_ylabel('Number of cases')
ax.legend()
# ax.set_yscale('log')
fig.show()
|
<gh_stars>0
import { Router } from 'express';
import { celebrate, Segments, Joi } from 'celebrate';
import ensureAuthenticated from '@modules/users/infra/http/middlewares/ensureAuthenticated';
import PetsController from '../controllers/PetsController';
import PetsUserController from '../controllers/PetsUserController';
const petsRouter = Router();
const petsController = new PetsController();
const petsUserController = new PetsUserController();
petsRouter.post('/',
celebrate({
[Segments.BODY]: {
name: Joi.string().allow(''),
species: Joi.string().required(),
age: Joi.string().required(),
description: Joi.string().required(),
gender: Joi.string().required(),
is_adopt: Joi.boolean().required(),
location_lat: Joi.string().required(),
location_lon: Joi.string().required(),
city: Joi.string().required(),
state: Joi.string().required(),
}
}),
ensureAuthenticated,
petsController.create
);
petsRouter.get('/', petsController.index);
petsRouter.get('/find/:id', petsController.find);
petsRouter.get('/me', ensureAuthenticated, petsUserController.index);
petsRouter.put('/:id',
celebrate({
[Segments.BODY]: {
name: Joi.string().allow(''),
species: Joi.string().required(),
age: Joi.string().required(),
description: Joi.string().required(),
gender: Joi.string().required(),
is_adopt: Joi.boolean().required(),
location_lat: Joi.string().required(),
location_lon: Joi.string().required(),
city: Joi.string().required(),
state: Joi.string().required(),
}
}),
ensureAuthenticated,
petsController.update);
petsRouter.delete('/:id', ensureAuthenticated, petsController.delete);
export default petsRouter; |
#!/usr/bin/sh
BASEDIR=$(dirname "$0")
# 切換目錄
# cd "$BASEDIR"/libraries/
cd "$BASEDIR"/../..
[ ! -d "libraries" ] && mkdir "libraries"
# 更新
# -- hahalib
cd "$BASEDIR"/../../libraries/hahalib
git pull
# pwd
# read |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.