text stringlengths 1 1.05M |
|---|
#include <iostream>
template<typename... Types>
struct apply {
static typename std::result_of<decltype(&Types::operator())(Types...)>::type result;
template<typename F, typename... Args>
static void apply(F&& f, Args&&... args) {
result = std::forward<F>(f)(std::forward<Args>(args)...);
}
};
int main() {
// Example usage
apply<int, int>::apply(add, 3, 4);
std::cout << apply<int, int>::result << std::endl; // Output: 7
return 0;
} |
<reponame>zhpengg/spark<gh_stars>10-100
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.tree.model
import org.apache.spark.annotation.DeveloperApi
/**
* :: DeveloperApi ::
* Information gain statistics for each split
* @param gain information gain value
* @param impurity current node impurity
* @param leftImpurity left node impurity
* @param rightImpurity right node impurity
* @param predict predicted value
* @param prob probability of the label (classification only)
*/
@DeveloperApi
class InformationGainStats(
val gain: Double,
val impurity: Double,
val leftImpurity: Double,
val rightImpurity: Double,
val predict: Double,
val prob: Double = 0.0) extends Serializable {
override def toString = {
"gain = %f, impurity = %f, left impurity = %f, right impurity = %f, predict = %f, prob = %f"
.format(gain, impurity, leftImpurity, rightImpurity, predict, prob)
}
}
|
<filename>src/com/md/appuserconnect/core/services/internal/apps/AppRead.java
package com.md.appuserconnect.core.services.internal.apps;
import java.io.IOException;
import java.util.HashMap;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import com.md.appuserconnect.core.model.QNObjectManager;
import com.md.appuserconnect.core.model.apps.App;
import com.md.appuserconnect.core.utils.RRServices;
@SuppressWarnings("serial")
public class AppRead extends HttpServlet {
private QNObjectManager objmgr = QNObjectManager.getInstance();
public void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {
if (RRServices.checkUserHasValidAccount(req, resp)) {
HashMap<String, Object> params = RRServices.loadInputParameters(req, false);
String appID = (String)params.get("appid");
String appBundle = (String)params.get("appbundle");
// Load App by these parameters
App app = null;
if (appID != null)
app = objmgr.getAppMgr().getAppByAppID(appID);
else if (appBundle != null)
app = objmgr.getAppMgr().getAppByBundleID(appBundle);
if (app != null) {
if (app.checkAuthorizationOfUser())
App.sendAsJSON(resp, app);
else
RRServices.repsondErrorAsJSON(resp, "Not authorized to read this app");
} else
RRServices.repsondErrorAsJSON(resp, "No app found or no parameters supplied");
}
}
}
|
#!/usr/bin/env bash
set -euxo pipefail
ETC=$(cd $(dirname $0) ; pwd)
export GHCVER=$(sed -n "s/^ghc-version: \"\(.*\)\"/\1/p" "$ETC/../build-constraints.yaml")
# Download and unpack the stack executable
mkdir -p ~/.local/bin
export PATH=$HOME/.local/bin:$PATH
curl -L https://www.stackage.org/stack/linux-x86_64 | tar xz --wildcards --strip-components=1 -C ~/.local/bin '*/stack'
# Get new Stackage curator
curl -L "https://download.fpcomplete.com/stackage-curator-2/curator-85b021a53833ff310fc66b3fdc5ca3f7828ce18b.bz2" | bunzip2 > curator
chmod +x curator
# Install GHC
stack setup $GHCVER
# curator's constraint command has target as a required parameter
# because of a different constraints handling in minor LTS version bumps
NIGHTLY="nightly-$(date +%Y-%m-%d)"
# New curator check
./curator update &&
./curator constraints --target=$NIGHTLY &&
./curator snapshot-incomplete --target=$NIGHTLY &&
./curator snapshot &&
stack --resolver ghc-$GHCVER exec ./curator check-snapshot
|
# -*- coding: utf-8 -*-
from app.definitions.models import Schema, Table, Column, Index, IndexColumn
from app.revisioner.models import Revision
from app.revisioner.collectors import ObjectCollector
from utils.contenttypes import get_content_type_for_model
from utils.postgres.paginators import RawQuerySetPaginator
from utils.postgres.types import ConflictAction
from utils.shortcuts import model_to_dict
class GenericModifyAction(object):
"""Generic mixin for a bulk CREATED action based on revisions.
"""
sql = '''
WITH revisioner_changes AS (
SELECT resource_id, ARRAY_AGG(metadata) AS "changes"
FROM revisioner_revision
WHERE run_id = %(run)s
AND resource_type_id = %(type)s
AND action = 2
GROUP BY resource_id
)
SELECT r.*, c.changes
FROM revisioner_revision r
JOIN revisioner_changes c
ON r.resource_id = c.resource_id
WHERE r.run_id = %(run)s
AND r.resource_type_id = %(type)s
AND r.action = 2
AND r.applied_on IS NULL
ORDER BY r.created_at
'''
def __init__(self, run, datastore, logger, *args, **kwargs):
self.run = run
self.datastore = datastore
self.logger = logger
self.content_type = get_content_type_for_model(self.model_class)
self.collector = self.get_collector()
self.revisions = (
self.run.revisions
.modified()
.filter(resource_type=self.content_type)
)
def bulk_update(self, rows):
"""Perform a bulk UPSERT based on the provided ident.
"""
self.model_class.objects.on_conflict(['id'], ConflictAction.UPDATE).bulk_insert(rows)
def get_paginator_class(self):
return RawQuerySetPaginator
def get_revisions(self):
return Revision.objects.raw(self.sql, {'run': self.run.pk, 'type': self.content_type.pk})
def apply(self, batch_size=250):
"""Apply the MODIFIED action in bulk.
"""
revisions = self.get_revisions()
paginator = self.get_paginator_class()(revisions, batch_size)
processed = set()
for page_num in paginator.page_range:
page = paginator.get_page(page_num)
data = []
for revision in page.object_list:
if revision.resource_id in processed:
continue
resource = self.collector.find_by_pk(revision.resource_id)
if not resource:
continue
for metadata in revision.changes:
set_attr = self.get_modify_function(metadata['field'])
set_attr(resource, revision=revision, **metadata)
data.append(model_to_dict(resource, exclude=['columns']))
processed.add(revision.resource_id)
if len(data):
self.bulk_update(data)
self.logger.info(
'[{0}] Modified {1} of {2}'.format(self.model_class.__name__, page.end_index(), paginator.count)
)
def get_collector(self):
"""Get the object collector.
"""
return ObjectCollector(self.get_queryset())
def get_modify_function(self, field):
"""Check if this field has a special handler function, otherwise
use the default modify function.
"""
try:
return getattr(self, 'modify_%s' % field)
except AttributeError:
return self.default_modify_function
def default_modify_function(self, resource, field, new_value, *args, **kwargs):
"""Set the attribute for the provided resource.
"""
setattr(resource, field, new_value)
class SchemaModifyAction(GenericModifyAction):
"""docstring for SchemaModifyAction
"""
model_class = Schema
def get_queryset(self):
"""Get the schemas related to this datastore.
"""
return self.model_class.objects.filter(datastore_id=self.datastore.id)
class TableModifyAction(GenericModifyAction):
"""docstring for SchemaModifyAction
"""
model_class = Table
def get_queryset(self):
"""Get the schemas related to this datastore.
"""
return self.model_class.objects.filter(schema__datastore_id=self.datastore.id)
def modify_schema_id(self, table, field, new_value, revision, *args, **kwargs):
"""If the schema has been renamed, we need to update it manually.
"""
if not new_value:
schema = Schema.objects.get(
created_revision_id=revision.parent_resource_revision_id
)
new_value = schema.pk
if new_value != revision.metadata['new_value']:
revision.metadata['new_value'] = new_value
revision.save()
table.schema_id = new_value
class ColumnModifyAction(GenericModifyAction):
"""docstring for ColumnModifyAction
"""
model_class = Column
def get_queryset(self):
"""Get the schemas related to this datastore.
"""
return self.model_class.objects.filter(table__schema__datastore_id=self.datastore.id)
class IndexModifyAction(GenericModifyAction):
"""docstring for IndexModifyAction
"""
model_class = Index
def get_queryset(self):
"""Get the schemas related to this datastore.
"""
return self.model_class.objects.filter(table__schema__datastore_id=self.datastore.id)
def modify_columns(self, index, field, new_value, *args, **kwargs):
"""If columns have been updated, we need to reflect that change.
"""
collector = ObjectCollector(Column.objects.filter(table_id=index.table_id))
index_columns = []
for column_metadata in new_value:
column = collector.find_by_name(column_metadata['column_name'])
index_column = {
'column_id': column.pk,
'index_id': index.pk,
'workspace_id': index.workspace_id,
'ordinal_position': column_metadata['ordinal_position'],
}
index_columns.append(index_column)
results = IndexColumn.objects\
.on_conflict(['workspace_id', 'index_id', 'column_id'], ConflictAction.UPDATE)\
.bulk_insert(index_columns, only_fields=['ordinal_position'])
index.index_columns.exclude(pk__in=[i['id'] for i in results]).delete()
def get_actions(*args, **kwargs):
"""Retrieve the modify action class based on the model name.
"""
actions = {
'Schema': SchemaModifyAction,
'Table': TableModifyAction,
'Column': ColumnModifyAction,
'Index': IndexModifyAction,
}
return actions.items()
|
<reponame>AWolf81/create-next-app<filename>index.js
const createNextApp = require('./lib')
const messages = require('./lib/messages')
module.exports = {
messages: messages,
createNextApp: createNextApp
}
|
<filename>assethub/assets/email.py
from django.core.mail import send_mail
from notifications.signals import notify
from django_comments.models import Comment
events_registry = []
class Event(object):
model = None
parent = None
verb = None
@staticmethod
def get_email_subject(instance, parent, actor, recipient):
raise NotImplementedError
@staticmethod
def get_email_body_template(instance, parent, actor, recipient):
"""Should return template name to render email body."""
raise NotImplementedError
@staticmethod
def get_template_data(instance, parent, actor, recipient):
"""Should return a tuple:
(template name, context)
"""
raise NotImplementedError
@staticmethod
def is_user_subscribed(recipient):
raise NotImplementedError
@staticmethod
def register(event):
global events_registry
events_registry.append(event)
@staticmethod
def get(model, parent, verb):
global events_registry
for event in events_registry:
model_ok = event.model is None or model == event.model
parent_ok = event.parent is None or parent == event.parent
verb_ok = event.verb is None or verb == event.verb
if model_ok and parent_ok and verb_ok:
return event
return None
class CommentPosted(Event):
model = Comment
parent = Asset
|
<filename>src/main/java/cn/xfyun/demo/TtsClientApp.java
package cn.xfyun.demo;
import cn.xfyun.api.TtsClient;
import cn.xfyun.config.PropertiesConfig;
import cn.xfyun.model.response.TtsResponse;
import cn.xfyun.service.tts.AbstractTtsWebSocketListener;
import okhttp3.Response;
import okhttp3.WebSocket;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.UnsupportedEncodingException;
import java.net.MalformedURLException;
import java.security.SignatureException;
/**
* TTS ( Text to Speech ):语音合成
* @author yingpeng
*/
public class TtsClientApp {
private static final String appId = PropertiesConfig.getAppId();
private static final String apiKey = PropertiesConfig.getApiKey();
private static final String apiSecret = PropertiesConfig.getApiSecret();
private static String filePath = "audio/tts1.mp3";
private static String resourcePath = "src/main/resources/";
public static void main(String[] args) throws MalformedURLException, SignatureException, UnsupportedEncodingException, FileNotFoundException {
TtsClient ttsClient = new TtsClient.Builder()
.signature(appId, apiKey, apiSecret)
.build();
File file = new File(resourcePath + filePath);
try {
ttsClient.send("语音合成流式接口将文字信息转化为声音信息", new AbstractTtsWebSocketListener(file) {
@Override
public void onSuccess(byte[] bytes) {
}
@Override
public void onFail(WebSocket webSocket, Throwable throwable, Response response) {
System.out.println(throwable.getMessage());
}
@Override
public void onBusinessFail(WebSocket webSocket, TtsResponse ttsResponse) {
System.out.println(ttsResponse.toString());
}
});
}catch (Exception e){
System.out.println(e.getMessage());
System.out.println("错误码查询链接:https://www.xfyun.cn/document/error-code");
}
}
}
|
<reponame>RomeroGuDw/wavelet_networks<filename>experiments/UrbanSound8K/parser.py
import argparse
def _get_parser():
# Running settings
parser = argparse.ArgumentParser(description='FMA experiments.')
# Parse
parser.add_argument('--model', type=str, default='RR+_M3', metavar='M', help='type of model to use {, , }')
parser.add_argument('--batch_size', type=int, default=8, metavar='N', help='input batch size for training (default: 8)')
parser.add_argument("--device", type=str, default="cuda", help="Where to deploy the model {cuda, cpu}")
parser.add_argument('--seed', type=int, default=0, metavar='S', help='random seed (default: 0)')
parser.add_argument('--pretrained', default=False, action='store_true', help='use pre-trained model. If false, the model will be trained.')
parser.add_argument('--wavelet_loss', default=False, action='store_true', help='use wavelet loss. If false, it wont be used.')
parser.add_argument('--warm_up', default=False, action='store_true', help='warm-up model. If True, small lr will be used for few epochs.')
parser.add_argument('--extra_comment', type=str, default="")
# Return parser
return parser
def parse_args():
parser = _get_parser()
return parser.parse_args() |
"use strict";
(function () {
const RANDOM_SIZE = 1000;
const container = document.getElementById("container");
// Processng function
function randomGenerator() {
let value;
// Add logpoint below to log the generated values here
////////////////////////////////////////////////
const value_1 = Math.random() * RANDOM_SIZE;
const value_2 = Math.random() * RANDOM_SIZE;
// Make calculation
value = value_1 + value_2;
// return output
return Math.round(value);
}
// Event Listener
document.querySelector(".encrypt").addEventListener("click", () => {
const randomValue = randomGenerator();
// Do something using `randomValue`
container.innerText = randomValue;
});
})();
|
<gh_stars>0
package com.g4mesoft.platporter.world.tile;
import com.g4mesoft.graphics.ColorPalette;
import com.g4mesoft.graphics.Screen2D;
import com.g4mesoft.platporter.world.PPWorld;
import com.g4mesoft.world.entity.EntityFacing;
public abstract class BeamTile extends Tile {
public void renderLaser(PPWorld world, Screen2D screen, int xt, int yt) {
boolean onWall = isOnWall(world, xt, yt);
int sy = onWall ? 1 : 0;
int flags = 0;
if (isLaserMirrored(world, xt, yt)) {
if (onWall) {
flags |= Screen2D.MIRROR_X;
} else {
flags |= Screen2D.MIRROR_Y;
}
}
int sx = (int)(world.worldTime / 3L + getAnimOffset(world, xt, yt)) % 4 + 4;
screen.drawSprite(xt * 8, yt * 8, sx, sy, ColorPalette.getColors(-1, 213, -1, -1), flags);
}
public abstract boolean isOnWall(PPWorld world, int xt, int yt);
public abstract boolean isLaserMirrored(PPWorld world, int xt, int yt);
protected abstract int getAnimOffset(PPWorld world, int xt, int yt);
public EntityFacing getFacing(PPWorld world, int xt, int yt) {
int index = isOnWall(world, xt, yt) ? 0x01 : 0x00;
if (isLaserMirrored(world, xt, yt))
index |= 0x02;
return EntityFacing.fromIndex(index);
}
}
|
<filename>database/repositories/config.js
import {Database} from '../database';
export default class Config {
constructor(database) {
this.database = database;
}
getConfig(name) {
return new Promise(resolve => {
this.database.get('SELECT * FROM config WHERE config_name=?', [name.toLowerCase()], (err, row) => {
if (row) {
row['config_name'] = row['config_name'].toUpperCase();
}
resolve(row);
});
});
}
addConfig(name, value, type) {
return new Promise((resolve, reject) => {
this.database.run('INSERT INTO config (config_id, config_name, value, type) VALUES (?,?,?,?)', [
Database.generateID(20),
name.toLowerCase(),
value,
type
], (err, row) => {
if (err) {
reject(row);
}
else {
resolve(row);
}
});
});
}
updateConfig(name, value, type) {
name = name.toLowerCase();
return new Promise(resolve => {
this.database.run('UPDATE config SET value=?' + (type !== undefined ? ', type=?' : '') + ' WHERE config_name=?', [value].concat(type !== undefined ? [
type,
name
] : [name]), (err, row) => {
resolve(row);
});
});
}
updateConfigByID(configID, value) {
return new Promise((resolve, reject) => {
this.database.run('UPDATE config SET value=? WHERE config_id=?', [
configID,
value
], (err, row) => {
if (err) {
return reject(err.message);
}
resolve(row);
});
});
}
list(where, orderBy, limit) {
return new Promise(resolve => {
const {sql, parameters} = Database.buildQuery('SELECT * FROM config', where, orderBy, limit);
this.database.all(sql, parameters, (err, rows) => {
if (rows) {
rows.forEach(row => {
row['config_name'] = row['config_name'].toUpperCase();
});
}
resolve(rows);
});
});
}
}
|
#!/bin/bash
if [ -d "$1" ]; then
cd "$1"
else
echo "Usage: $0 <datadir>" >&2
echo "Removes obsolete ZeroOne database files" >&2
exit 1
fi
LEVEL=0
if [ -f wallet.dat -a -f addr.dat -a -f blkindex.dat -a -f blk0001.dat ]; then LEVEL=1; fi
if [ -f wallet.dat -a -f peers.dat -a -f blkindex.dat -a -f blk0001.dat ]; then LEVEL=2; fi
if [ -f wallet.dat -a -f peers.dat -a -f coins/CURRENT -a -f blktree/CURRENT -a -f blocks/blk00000.dat ]; then LEVEL=3; fi
if [ -f wallet.dat -a -f peers.dat -a -f chainstate/CURRENT -a -f blocks/index/CURRENT -a -f blocks/blk00000.dat ]; then LEVEL=4; fi
case $LEVEL in
0)
echo "Error: no ZeroOne datadir detected."
exit 1
;;
1)
echo "Detected old ZeroOne datadir (before 0.7)."
echo "Nothing to do."
exit 0
;;
2)
echo "Detected ZeroOne 0.7 datadir."
;;
3)
echo "Detected ZeroOne pre-0.8 datadir."
;;
4)
echo "Detected ZeroOne 0.8 datadir."
;;
esac
FILES=""
DIRS=""
if [ $LEVEL -ge 3 ]; then FILES=$(echo $FILES blk????.dat blkindex.dat); fi
if [ $LEVEL -ge 2 ]; then FILES=$(echo $FILES addr.dat); fi
if [ $LEVEL -ge 4 ]; then DIRS=$(echo $DIRS coins blktree); fi
for FILE in $FILES; do
if [ -f $FILE ]; then
echo "Deleting: $FILE"
rm -f $FILE
fi
done
for DIR in $DIRS; do
if [ -d $DIR ]; then
echo "Deleting: $DIR/"
rm -rf $DIR
fi
done
echo "Done."
|
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package br.sistemalojaroupas.view.stock;
import br.sistemalojaroupas.model.dao.ProductDao;
import br.sistemalojaroupas.model.entities.Product;
import java.awt.Color;
import java.text.ParseException;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.swing.JOptionPane;
/**
*
* @author lukas
*/
public class Add_Product extends javax.swing.JDialog {
private Product product;
/**
* Creates new form Add_Product
*/
public Add_Product(java.awt.Frame parent, boolean modal, Product product) {
super(parent, modal);
initComponents();
jPanel1.setBackground(new Color(0,0,0,0));
this.product = product;
txt_ProductName.setText(product.getDescription());
}
/**
* This method is called from within the constructor to initialize the form.
* WARNING: Do NOT modify this code. The content of this method is always
* regenerated by the Form Editor.
*/
@SuppressWarnings("unchecked")
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
jPanel1 = new javax.swing.JPanel();
jLabel1 = new javax.swing.JLabel();
jLabel3 = new javax.swing.JLabel();
btn_Apply = new javax.swing.JLabel();
btn_Cancel = new javax.swing.JLabel();
txt_ProductName = new javax.swing.JLabel();
spinner_Amount = new javax.swing.JSpinner();
Filter_BackgroundSale = new javax.swing.JLabel();
setDefaultCloseOperation(javax.swing.WindowConstants.DISPOSE_ON_CLOSE);
setUndecorated(true);
jPanel1.setLayout(new org.netbeans.lib.awtextra.AbsoluteLayout());
jLabel1.setFont(new java.awt.Font("Tahoma", 1, 10)); // NOI18N
jLabel1.setForeground(new java.awt.Color(102, 102, 102));
jLabel1.setText("Inserir qtd. :");
jPanel1.add(jLabel1, new org.netbeans.lib.awtextra.AbsoluteConstraints(30, 50, -1, 26));
jLabel3.setFont(new java.awt.Font("Tahoma", 1, 10)); // NOI18N
jLabel3.setForeground(new java.awt.Color(102, 102, 102));
jLabel3.setText("Produto : ");
jPanel1.add(jLabel3, new org.netbeans.lib.awtextra.AbsoluteConstraints(50, 20, -1, 20));
btn_Apply.setIcon(new javax.swing.ImageIcon(getClass().getResource("/img/btn_aplicar.png"))); // NOI18N
btn_Apply.addMouseListener(new java.awt.event.MouseAdapter() {
public void mouseClicked(java.awt.event.MouseEvent evt) {
btn_ApplyMouseClicked(evt);
}
public void mouseEntered(java.awt.event.MouseEvent evt) {
btn_ApplyMouseEntered(evt);
}
public void mouseExited(java.awt.event.MouseEvent evt) {
btn_ApplyMouseExited(evt);
}
});
jPanel1.add(btn_Apply, new org.netbeans.lib.awtextra.AbsoluteConstraints(140, 100, -1, 30));
btn_Cancel.setIcon(new javax.swing.ImageIcon(getClass().getResource("/img/btn_cancelar.png"))); // NOI18N
btn_Cancel.addMouseListener(new java.awt.event.MouseAdapter() {
public void mouseClicked(java.awt.event.MouseEvent evt) {
btn_CancelMouseClicked(evt);
}
public void mouseEntered(java.awt.event.MouseEvent evt) {
btn_CancelMouseEntered(evt);
}
public void mouseExited(java.awt.event.MouseEvent evt) {
btn_CancelMouseExited(evt);
}
});
jPanel1.add(btn_Cancel, new org.netbeans.lib.awtextra.AbsoluteConstraints(30, 100, -1, 30));
txt_ProductName.setFont(new java.awt.Font("Tahoma", 1, 10)); // NOI18N
txt_ProductName.setForeground(new java.awt.Color(102, 0, 153));
txt_ProductName.setHorizontalAlignment(javax.swing.SwingConstants.CENTER);
txt_ProductName.setBorder(javax.swing.BorderFactory.createMatteBorder(0, 0, 1, 0, new java.awt.Color(102, 102, 102)));
jPanel1.add(txt_ProductName, new org.netbeans.lib.awtextra.AbsoluteConstraints(110, 20, 110, 20));
spinner_Amount.setModel(new javax.swing.SpinnerNumberModel(0, 0, null, 1));
jPanel1.add(spinner_Amount, new org.netbeans.lib.awtextra.AbsoluteConstraints(110, 50, 70, 26));
Filter_BackgroundSale.setHorizontalAlignment(javax.swing.SwingConstants.CENTER);
Filter_BackgroundSale.setIcon(new javax.swing.ImageIcon(getClass().getResource("/img/background_FiltroAplicado_Purple.png"))); // NOI18N
Filter_BackgroundSale.addMouseListener(new java.awt.event.MouseAdapter() {
public void mouseClicked(java.awt.event.MouseEvent evt) {
Filter_BackgroundSaleMouseClicked(evt);
}
});
jPanel1.add(Filter_BackgroundSale, new org.netbeans.lib.awtextra.AbsoluteConstraints(0, 0, 250, 140));
getContentPane().add(jPanel1, java.awt.BorderLayout.CENTER);
pack();
setLocationRelativeTo(null);
}// </editor-fold>//GEN-END:initComponents
private void Filter_BackgroundSaleMouseClicked(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_Filter_BackgroundSaleMouseClicked
// TODO add your handling code here:
}//GEN-LAST:event_Filter_BackgroundSaleMouseClicked
private void btn_CancelMouseClicked(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_btn_CancelMouseClicked
this.dispose();
}//GEN-LAST:event_btn_CancelMouseClicked
private void btn_CancelMouseEntered(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_btn_CancelMouseEntered
btn_Cancel.setIcon(new javax.swing.ImageIcon(getClass().getResource("/img/btn_cancelarEscuro.png")));
}//GEN-LAST:event_btn_CancelMouseEntered
private void btn_CancelMouseExited(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_btn_CancelMouseExited
btn_Cancel.setIcon(new javax.swing.ImageIcon(getClass().getResource("/img/btn_cancelar.png")));
}//GEN-LAST:event_btn_CancelMouseExited
private void btn_ApplyMouseClicked(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_btn_ApplyMouseClicked
try {
spinner_Amount.commitEdit();
} catch (ParseException ex) {
spinner_Amount.setValue(0);
}
Integer value = (Integer) spinner_Amount.getValue();
if (value == 0) {
JOptionPane.showMessageDialog(this, "O valor deve ser maior do que zero para adicionar ao produto.",
"Erro", JOptionPane.ERROR_MESSAGE);
return;
}
product.setQuantity(product.getQuantity() + value);
ProductDao.update(product);
JOptionPane.showMessageDialog(this, "Quantidade adicionada com sucesso! Nova quantidade do produto: " + product.getQuantity(),
"Sucesso!", JOptionPane.INFORMATION_MESSAGE);
this.dispose();
}//GEN-LAST:event_btn_ApplyMouseClicked
private void btn_ApplyMouseEntered(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_btn_ApplyMouseEntered
btn_Apply.setIcon(new javax.swing.ImageIcon(getClass().getResource("/img/btn_aplicarEscuro.png")));
}//GEN-LAST:event_btn_ApplyMouseEntered
private void btn_ApplyMouseExited(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_btn_ApplyMouseExited
btn_Apply.setIcon(new javax.swing.ImageIcon(getClass().getResource("/img/btn_aplicar.png")));
}//GEN-LAST:event_btn_ApplyMouseExited
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.JLabel Filter_BackgroundSale;
private javax.swing.JLabel btn_Apply;
private javax.swing.JLabel btn_Cancel;
private javax.swing.JLabel jLabel1;
private javax.swing.JLabel jLabel3;
private javax.swing.JPanel jPanel1;
private javax.swing.JSpinner spinner_Amount;
private javax.swing.JLabel txt_ProductName;
// End of variables declaration//GEN-END:variables
}
|
<reponame>MyScript/interactive-ink-examples-ios<gh_stars>10-100
// Copyright @ MyScript. All rights reserved.
#import <UIKit/UIKit.h>
/**
* Responsible for initialization the editor and managing the different user actions.
*/
@interface MainViewController : UIViewController
@end
|
def words_count(text):
words_dict = {}
for word in text.split(' '):
if word in words_dict:
words_dict[word] += 1
else:
words_dict[word] = 1
return words_dict
words_dict = words_count("A quick brown fox jumps over the lazy dog")
print(words_dict) |
def rob(nums):
if not nums:
return 0
if len(nums) == 1:
return nums[0]
# Helper function to calculate the maximum amount that can be robbed in a range
def rob_range(start, end):
max_rob = 0
prev_rob = 0
for i in range(start, end):
prev_rob, max_rob = max_rob, max(max_rob, prev_rob + nums[i])
return max_rob
# The maximum amount that can be robbed is the maximum of robbing the range [0, n-1] or [1, n]
return max(rob_range(0, len(nums) - 1), rob_range(1, len(nums))) |
package com.lbs.api.json.model
/**
* {
* "access_token": "<KEY>",
* "expires_in": 599,
* "refresh_token": "<PASSWORD>",
* "token_type": "bearer"
* }
*/
case class LoginResponse(accessToken: String, expiresIn: Int, refreshToken: String, tokenType: String) extends SerializableJsonObject
|
<reponame>AlexGillott/Agent_Based_Model
# Final model for GEOG5990M online portfolio. Model created in practical
# sessions during the 1st semester.
"""
Create agents to interact with each other and their environment.
Build agents in random location in environment. Give agents list of
other agents. Move agents around environment. Agents eat environment
and Agents communicate with nearby agents. Share resources with
neighbours. Display the environment in animation.
Arguments:
num_of_agents = Number of Agents in the model
num_of_iterations = Number of steps agents move
neighbourhood = Distance around each agent. If other agents within this
distance communicate and share resources.
Returns:
Animation of inputted agents interacting with the environment.
"""
# importing modules
import requests
import csv
import random
import datetime
import bs4
import tkinter
import matplotlib
import matplotlib.backends.backend_tkagg
import matplotlib
import matplotlib.pyplot
import matplotlib.animation
import agentframework
# Web scraping
r = requests.get\
("http://www.geog.leeds.ac.uk/courses/computing/practicals/python/\
agent-framework/part9/data.html")
content = r.text
soup = bs4.BeautifulSoup(content, 'html.parser')
td_ys = soup.find_all(attrs={"class" : "y"})
td_xs = soup.find_all(attrs={"class" : "x"})
print(td_ys)
print(td_xs)
# Function for getting time
def getTimeMS():
"""
Record length of time for code to run.
Returns:
Length of time code took to run.
"""
dt = datetime.datetime.now()
return dt.microsecond + (dt.second * 1000000) + \
(dt.minute * 1000000 * 60) + (dt.hour * 1000000 * 60 * 60)
start = getTimeMS()
# Start of Agent Based Modelling code
# Import csv file
f = open('in.txt', newline='')
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
environment = []
for row in reader:
rowlist = []
for item in row:
rowlist.append(item)
environment.append(rowlist)
# Creating number of agents, iterations and neighbourhood variables
num_of_agents = 10
num_of_iterations = 10
neighbourhood = 20
agents = []
fig = matplotlib.pyplot.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
# Make the agents
for i in range(num_of_agents):
y = int(td_ys[i].text)
x = int(td_xs[i].text)
agents.append(agentframework.Agent(environment, agents, neighbourhood,\
y, x))
# 1st part of stopping condition code. Continued line ""
carry_on = True
def update(frame_number):
"""
Create frames for animation.
Argument:
frame_number
Returns:
A frame for each iteration of the model.
"""
fig.clear()
global carry_on
# Moving agents
for j in range(num_of_iterations):
random.shuffle(agents)
for i in range(num_of_agents):
agents[i].move()
agents[i].eat()
agents[i].share_with_neighbours(neighbourhood)
# Stopping condition
total = 0
for agent in agents:
total += agent.store
if total >= 100000 :
carry_on = False
print ("stopping conditon met")
# plotting co-ordinates
for i in range(num_of_agents):
matplotlib.pyplot.scatter(agents[i].x,agents[i].y)
print(agents[i].x,agents[i].y)
matplotlib.pyplot.ylim(0, 99)
matplotlib.pyplot.xlim(0, 99)
matplotlib.pyplot.imshow(environment)
# For stopping condition to work.
def gen_function(b = [0]):
"""
Stop model when a < num_of_interations and carry_on = false.
Requires no setup
"""
a = 0
global carry_on
while (a < num_of_iterations) & (carry_on) :
yield a
a = a + 1
# Total amount stored by agents saved to file "store.txt"
total = 0
for agent in agents:
total +=agent.store
with open('store.txt', 'a') as f3:
f3.write (str(total) + "\n")
# Write out the enviroment as a file
f2 = open('Environment.txt', 'w', newline='')
writer = csv.writer(f2, delimiter=',')
for row in environment:
writer.writerow(row)
f2.close()
# Function to run model animation
def run():
"""
Run the animation.
Requires no setup
"""
animation = matplotlib.animation.FuncAnimation(fig, update,\
frames=gen_function, repeat=False)
canvas.show()
# Main window GUI
root = tkinter.Tk()
root.wm_title("Model")
canvas = matplotlib.backends.backend_tkagg.FigureCanvasTkAgg(fig, master=root)
canvas._tkcanvas.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
menu_bar = tkinter.Menu(root)
root.config(menu=menu_bar)
model_menu = tkinter.Menu(menu_bar)
menu_bar.add_cascade(label="Model", menu=model_menu)
model_menu.add_command(label="Run model", command=run)
# End of Agent Based Modelling code
# Second part of get time code
end = getTimeMS()
print ("time =" + str(end - start))
# Proving that list of agents put into each agent
print (agents[2])
print (num_of_iterations)
tkinter.mainloop() |
<gh_stars>1-10
package cucumber.runtime;
import gherkin.I18n;
import gherkin.formatter.Argument;
import gherkin.formatter.model.Step;
import java.lang.reflect.Type;
import java.util.List;
public interface StepDefinition {
/**
* Returns a list of arguments. Return null if the step definition
* doesn't match at all. Return an empty List if it matches with 0 arguments
* and bigger sizes if it matches several.
*/
List<Argument> matchedArguments(Step step);
/**
* The source line where the step definition is defined.
* Example: foo/bar/Zap.brainfuck:42
*
* @param detail true if extra detailed location information should be included.
*/
String getLocation(boolean detail);
/**
* How many declared parameters this stepdefinition has. Returns null if unknown.
*/
Integer getParameterCount();
/**
* The parameter type at index n. A hint about the raw parameter type is passed to make
* it easier for the implementation to make a guess based on runtime information.
* <p/>
* Statically typed languages will typically ignore the {@code argumentType} while dynamically
* typed ones will use it to infer a "good type". It's also ok to return null.
*/
ParameterInfo getParameterType(int n, Type argumentType) throws IndexOutOfBoundsException;
/**
* Invokes the step definition. The method should raise a Throwable
* if the invocation fails, which will cause the step to fail.
*/
void execute(I18n i18n, Object[] args) throws Throwable;
/**
* Return true if this matches the location. This is used to filter
* stack traces.
*/
boolean isDefinedAt(StackTraceElement stackTraceElement); // TODO: redundant with getLocation?
/**
* @return the pattern associated with this instance. Used for error reporting only.
*/
String getPattern();
/**
* @return true if this instance is scoped to a single scenario, or false if it can be reused across scenarios.
*/
boolean isScenarioScoped();
}
|
import argparse
from ucsmsdk.ucshandle import UcsHandle
def main():
parser = argparse.ArgumentParser(description='Manage VLAN assignments for service profiles in UCSM')
parser.add_argument('vlan', type=int, help='VLAN ID to assign or remove')
parser.add_argument('--remove', action='store_true', help='Remove the VLAN assignment from the service profile')
parser.add_argument('ucsm_ip', help='UCSM IP address')
parser.add_argument('username', help='UCSM username')
parser.add_argument('password', help='UCSM password')
args = parser.parse_args()
handle = connect_to_ucsm(args)
assign_vlan_to_sp_vnic(handle, args)
def connect_to_ucsm(args):
handle = UcsHandle(args.ucsm_ip, args.username, args.password)
handle.login()
return handle
def assign_vlan_to_sp_vnic(handle, args):
service_profile_name = 'example_service_profile' # Replace with actual service profile name
vnic_name = 'example_vnic' # Replace with actual vNIC name
if args.remove:
handle.remove_vlan_from_vnic(service_profile_name, vnic_name, args.vlan)
print(f"VLAN {args.vlan} removed from vNIC {vnic_name} of service profile {service_profile_name}")
else:
handle.add_vlan_to_vnic(service_profile_name, vnic_name, args.vlan)
print(f"VLAN {args.vlan} assigned to vNIC {vnic_name} of service profile {service_profile_name}")
if __name__ == '__main__':
main() |
<gh_stars>0
package de.busse_apps.bakcalculator.common;
public class Calculator {
public static int SEX_MALE = 1;
public static int SEX_FEMALE = 0;
// Berechnungsfaktoren
private int weight;
private int height;
private int age;
private int sex;
private double volume_drunk;
private double percent_alcohol;
// Ergebnisse
private double volume_alcohol;
private double body_water_content;
private double blood_distribution_factor;
private double blood_alcohol_content;
private double time;
private double rho_alcohol = 0.8;
private double rho_blood = 1.055;
private double calculate_volumeAlcohol() {
/* Berechnung des absoluten Alkoholvolumen im Körper
* Dazu wird das getrunkene Volumen und der Volumenanteil an Alkohol benötigt
*
* A = V * e * p
* A: absolutes Alkoholvolumen in Liter (l)
* V: getrunkenes Volumen in Liter (l)
* e: Volumenanteil an Alkohol in Prozent (%)
* p: Dichte von Alkohol in Kilogramm / Liter (kg/l)
*/
return 1.0;
}
private double calculate_bodywatercontent() {
/* Berechnung des Gesamtkörperwassers abhängig vom Geschlecht
* Dazu wird das Alter, die Körpergröße und das Körpergewicht benötigt
*
* GKW(m) = 2,447 - 0,09516 * t + 0,1074 * h + 0,3362 * m
* GKW(w) = 0,203 - 0,07 * t + 0,1069 * h + 0,2466 * m
* GKW: Gesamtkörperwasser (männlich/weiblich)
* t: Alter in Jahren (y)
* h: Körpergröße in Zentimetern (cm)
* m: Körpergewicht in Kilogramm (kg)
*/
return 1.0;
}
private double calculate_bloodDistributionFactor() {
/* Berechnung des Blutverteilungsfaktors
* Dazu wird die Menge des Gesamtkörperwassers und das Körpergewicht benötigt
*
* r = (p * GKW) / (0,8 * m)
* r: Blutverteilungsfaktor
* p: Dichte von Blut in Kilogramm / Liter (kg/l)
* GKW: Gesamtkörperwasser
* m: Körpergewicht in Kilogramm (kg)
*/
return 1.0;
}
private double calculate_bloodAlcoholContent() {
/* Berechnung des Blutalkoholanteils
* Dazu wird das getrunkene Alkoholvolumen, das Körpergewicht und der Blutverteilungsfaktor benötigt
* Es werden 10% abgezogen, da der Alkohol nicht vollständig aufgenommen wird
*
* c = A / (m * r)
* c: Blutalkoholkonzentration
* A: getrunkenes Alkoholvolumen in Liter (l)
* m: Körpergewicht in Kilogramm (kg)
* r: Blutverteilungsfaktor
*/
return 1.0;
}
private double calculate_time() {
/* Berechnung der Zeit, die vergeht, bis der Alkohol nicht mehr im Körper nachweisbar ist
* Dabei gilt eine durchschnittliche Abbaukonstante von ca. 0,15 Promille (%.)
*/
return 1.0;
}
public void calculate() {
/*
* Hauptmethode zum Berechnen, wird von außen aufgerufen
*/
}
public void set_weight(int weight) {
this.weight = weight;
}
public void set_height(int height) {
this.height = height;
}
public void set_age(int age) {
this.age = age;
}
public void set_sex(int sex) {
this.sex = sex;
}
public void set_volume_drunk(double volume) {
this.volume_drunk = volume;
}
public void set_percent_alcohol(double percent) {
this.percent_alcohol = percent;
}
}
|
#!/usr/bin/env bash
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eu
BINDIR=$(dirname "$0")
readonly BINDIR
# The initial portion of the preamble needs variable expansion:
cat <<EOF
# Google Cloud Platform C++ Client Libraries
<!-- This file is automatically generated by ci/test-markdown/$(basename "$0") -->
EOF
cat <<"EOF"
[![GCB CI status][gcb-clang-tidy-shield]][gcb-clang-tidy-link]
[![Kokoro CI status][kokoro-windows-cmake-shield]][kokoro-windows-cmake-link]
[![Kokoro CI status][kokoro-macos-cmake-shield]][kokoro-macos-cmake-link]
[![Codecov Coverage status][codecov-shield]][codecov-link]<br>
[![GCB CI status][gcb-asan-shield]][gcb-asan-link]
[![Kokoro CI status][kokoro-windows-bazel-shield]][kokoro-windows-bazel-link]
[![Kokoro CI status][kokoro-macos-bazel-shield]][kokoro-macos-bazel-link]
[kokoro-windows-cmake-shield]: https://storage.googleapis.com/cloud-cpp-kokoro-status/kokoro-windows-cmake.svg
[kokoro-windows-cmake-link]: https://storage.googleapis.com/cloud-cpp-kokoro-status/kokoro-windows-cmake-link.html
[kokoro-windows-bazel-shield]: https://storage.googleapis.com/cloud-cpp-kokoro-status/kokoro-windows-bazel.svg
[kokoro-windows-bazel-link]: https://storage.googleapis.com/cloud-cpp-kokoro-status/kokoro-windows-bazel-link.html
[kokoro-macos-cmake-shield]: https://storage.googleapis.com/cloud-cpp-kokoro-status/macos/kokoro-cmake-vcpkg.svg
[kokoro-macos-cmake-link]: https://storage.googleapis.com/cloud-cpp-kokoro-status/macos/kokoro-cmake-vcpkg-link.html
[kokoro-macos-bazel-shield]: https://storage.googleapis.com/cloud-cpp-kokoro-status/macos/kokoro-bazel.svg
[kokoro-macos-bazel-link]: https://storage.googleapis.com/cloud-cpp-kokoro-status/macos/kokoro-bazel-link.html
[codecov-shield]: https://codecov.io/gh/googleapis/google-cloud-cpp/branch/main/graph/badge.svg
[codecov-link]: https://codecov.io/gh/googleapis/google-cloud-cpp
[gcb-clang-tidy-shield]: https://storage.googleapis.com/cloud-cpp-community-publiclogs/badges/google-cloud-cpp/main/clang-tidy.svg
[gcb-clang-tidy-link]: https://storage.googleapis.com/cloud-cpp-community-publiclogs/badges/google-cloud-cpp/main/clang-tidy.html
[gcb-asan-shield]: https://storage.googleapis.com/cloud-cpp-community-publiclogs/badges/google-cloud-cpp/main/asan.svg
[gcb-asan-link]: https://storage.googleapis.com/cloud-cpp-community-publiclogs/badges/google-cloud-cpp/main/asan.html
This repository contains idiomatic C++ client libraries for the following
[Google Cloud Platform](https://cloud.google.com/) services.
* [Google Cloud Bigtable](google/cloud/bigtable/README.md) [[quickstart]](google/cloud/bigtable/quickstart/README.md)
* [Google Cloud Spanner](google/cloud/spanner/README.md) [[quickstart]](google/cloud/spanner/quickstart/README.md)
* [Google Cloud Pub/Sub](google/cloud/pubsub/README.md) [[quickstart]](google/cloud/pubsub/quickstart/README.md)
* [Google Cloud Storage](google/cloud/storage/README.md) [[quickstart]](google/cloud/storage/quickstart/README.md)
See each library's `README.md` file for more information about:
* Where to find the documentation for the library and the service.
* How to get started using the library.
* How to incorporate the library into your build system.
* The library's support status if not Generally Available (GA); unless noted in
a library's `README.md`, these libraries are all GA and supported by Google.
## Building and Installing
This is a quickstart guide for developers wanting to compile the libraries and
run the examples included with the libraries.
* Packaging maintainers or developers that prefer to install the library in a
fixed directory (such as `/usr/local` or `/opt`) should consult the
[packaging guide](/doc/packaging.md).
* Developers wanting to use the libraries as part of a larger CMake or Bazel
project should consult the [quickstart guides](#quickstart) for the library
or libraries they want to use.
* Developers wanting to compile the library just to run some of the examples or
tests should read the current document.
* Contributors and developers to `google-cloud-cpp` should consult the guide to
[setup a development workstation][howto-setup-dev-workstation].
[howto-setup-dev-workstation]: /doc/contributor/howto-guide-setup-development-workstation.md
### Building with Bazel
This library requires Bazel >= 3.0. From the top-level directory, run the usual
commands.
```shell
bazel build //...
```
### Building with CMake
This library requires CMake >= 3.5. If you are planning to install the libraries
please consult the [packaging guide](/doc/packaging.md), these instructions will
**NOT** produce artifacts that you can put in `/usr/local`, or share with your
colleagues.
From the top-level directory of `google-cloud-cpp` run these commands:
```shell
git clone -C $HOME https://github.com/microsoft/vcpkg.git
$HOME/vcpkg/bootstrap-vcpkg.sh
cmake -H. -Bcmake-out/ -DCMAKE_TOOLCHAIN_FILE=$HOME/vcpkg/scripts/buildsystems/vcpkg.cmake
cmake --build cmake-out -- -j $(nproc)
```
The binary artifacts, such as examples, will be placed in
`cmake-out/super/src/google_cloud_cpp_project-build/`.
## Quickstart
Each library (linked above) contains a directory named `quickstart/` that's
intended to help you get up and running in a matter of minutes. This
`quickstart/` directory contains a minimal "Hello World" program demonstrating
how to use the library, along with minimal build files for common build
systems, such as CMake and Bazel.
* [Google Cloud Bigtable Quickstart](google/cloud/bigtable/quickstart/README.md)
* [Google Cloud Spanner Quickstart](google/cloud/spanner/quickstart/README.md)
* [Google Cloud Pub/Sub Quickstart](google/cloud/pubsub/quickstart/README.md)
* [Google Cloud Storage Quickstart](google/cloud/storage/quickstart/README.md)
As an example, the following code snippet, taken from [Google Cloud
Storage](google/cloud/storage/README.md), should give you a taste of what it's
like to use one of these C++ libraries.
```cc
EOF
# Dumps the contents of GCS's quickstart.cc starting at the first #include, so
# we skip the license header comment.
sed -n '/^#/,$p' "${BINDIR}/../../google/cloud/storage/quickstart/quickstart.cc"
cat <<"EOF"
```
## Support
* This project supports Windows, macOS, Linux
* This project supports C++11 (and higher) compilers (we test with GCC >= 5.4,
Clang >= 6.0, and MSVC >= 2019)
* This project supports Bazel and CMake builds. See the [Quickstart examples](https://github.com/googleapis/google-cloud-cpp#quickstart)
* This project uses dependencies described in [doc/packaging.md](https://github.com/googleapis/google-cloud-cpp/blob/main/doc/packaging.md)
* This project works with or without exceptions enabled
* This project cuts [monthly releases](https://github.com/googleapis/google-cloud-cpp/releases) with detailed release notes
## Public API and API Breaking Changes
In general, we avoid making backwards incompatible changes to our C++ APIs (see
below for the definition of "API"). Sometimes such changes yield benefits to
our customers, in the form of better performance, easier-to-understand APIs,
and/or more consistent APIs across services. When these benefits warrant it, we
will announce these changes prominently in our `CHANGELOG.md` file and in the
affected release's notes. Nevertheless, though we take commercially reasonable
efforts to prevent this, it is possible that backwards incompatible changes go
undetected and, therefore, undocumented. We apologize if this is the case and
welcome feedback or bug reports to rectify the problem.
By "API" we mean the C++ API exposed by public header files in this repo. We
are not talking about the gRPC or REST APIs exposed by Google Cloud servers. We
are also talking only about A**P**I stability -- the ABI is subject to change
without notice. You should not assume that binary artifacts (e.g. static
libraries, shared objects, dynamically loaded libraries, object files) created
with one version of the library are usable with newer/older versions of the
library. The ABI may, and does, change on "minor revisions", and even patch
releases.
We request that our customers adhere to the following guidelines to avoid
accidentally depending on parts of the library we do not consider to be part of
the public API and therefore may change (including removal) without notice:
Previous versions of the library will remain available on the [GitHub Releases
page](https://github.com/googleapis/google-cloud-cpp/releases). In many cases,
you will be able to use an older version even if a newer version has changes
that you are unable (or do not have time) to adopt.
Note that this document has no bearing on the Google Cloud Platform deprecation
policy described at https://cloud.google.com/terms.
### C++ Symbols and Files
* You should only include headers matching the `google/cloud/${library}/*.h`,
`google/cloud/${library}/mock/*.h` or `google/cloud/*.h` patterns.
* You should **NOT** directly include headers in any subdirectories, such as
`google/cloud/${library}/internal`.
* The files *included from* our public headers are **not part of our public
API**. Depending on indirect includes may break your build in the future, as
we may change a header `"foo.h"` to stop including `"bar.h"` if `"foo.h"` no
longer needs the symbols in `"bar.h"`. To avoid having your code broken, you
should directly include the public headers that define all the symbols you
use (this is sometimes known as
[include-what-you-use](https://include-what-you-use.org/)).
* Any file or symbol that lives within a directory or namespace containing
`internal`, `impl`, `test`, `detail`, `benchmark`, `sample`, or `example`, is
explicitly **not part of our public API**.
* Any file or symbol with `Impl` or `impl` in its name is **not part of our
public API**.
* Any symbol with `experimental` in its name is not part of the public API.
## Beyond the C++ API
Applications developers interact with a C++ library through more than just
the C++ symbols and headers. They also need to reference the name of the
library in their build scripts. Depending of the build system they use
this may be a CMake target, a Bazel rule, a pkg-config module, or just the
name of some object in the file system.
As with the C++ API, we try to avoid breaking changes to these interface
points. Sometimes such changes yield benefits to our customers, in the form of
easier-to-understand what names go with with services, or more consistency
across services. When these benefits warrant it, we will announce these changes
prominently in our `CHANGELOG.md` file and in the affected release's notes.
Nevertheless, though we take commercially reasonable efforts to prevent this,
it is possible that backwards incompatible changes go undetected and,
therefore, undocumented. We apologize if this is the case and welcome feedback
or bug reports to rectify the problem.
### Experimental Libraries
From time to time we add libraries to `google-cloud-cpp` to validate new
designs, expose experimental (or otherwise not generally available) GCP
features, or simply because a library is not yet complete. Such libraries
will have `experimental` in their CMake target and Bazel rule names. The
README file for these libraries will also document that they are experimental.
Such libraries are subject to change, including removal, without notice.
This includes, but it is not limited to, all their symbols, pre-processor
macros, files, targets, rules, and installed artifacts.
### Bazel rules
Only the rules exported at the top-level directory are intended for customer
use, e.g.,`//:spanner`. Experimental rules have `experimental` in their name,
e.g. `//:experimental-firestore`. As previously stated, experimental rules are
subject to change or removal without notice.
Previously some of the rules in subdirectories
(e.g. `//google/cloud/bigtable:bigtable_client`) had public visibility. These
rules are deprecated as of 2021-02-15, and will be become inaccessible
(or removed) on or shortly after **2022-02-15**.
### CMake targets and packages
Only CMake packages starting with the `google_cloud_cpp_` prefix are intended
for customer use. Only targets starting with `google-cloud-cpp::`, are intended
for customer use. Experimental targets have `experimental` in their name (e.g.
`google-cloud-cpp::experimental-iam`). As previously stated, experimental
targets are subject to change or removal without notice.
In previous versions we released packages with other prefixes (or without
specific prefixes), these are deprecated as of 2021-02-15, and will be retired
on or shortly after **2022-02-15**. Same applies to any targets exported with
other prefixes (or without an specific prefix).
### pkg-config modules
Only modules starting with `google_cloud_cpp_` are intended for customer use.
In previous versions we released modules with other prefixes (or without
specific prefixes), these are deprecated as of 2021-02-15, and will be retired
on or shortly after **2022-02-15**.
### Unsupported use cases
We try to provide stable names for the previously described mechanisms:
* Bazel rules,
* CMake targets loaded via `find_package()`,
* pkg-config modules
It is certainly possible to use the library using other approaches. While
these may work, we may accidentally break these from time to time. Examples of
such, and the recommended alternatives, include:
* CMake's `FetchContent` and/or git submodules: in these approaches the
`google-cloud-cpp` library becomes a sub-directory of a larger CMake build
We do not test `google-cloud-cpp` in these configurations, and we find them
brittle as **all** CMake targets become visible to the larger project.
This is both prone to conflicts, and makes it impossible to enforce that
some targets are only for testing or are implementation details.
Applications may want to consider source package managers, such as
`vcpkg`, or CMake super builds via `ExternalProject_Add()` as alternatives.
* Using library names directly: applications should not use the
library names, e.g., by using `-lgoogle_cloud_cpp_bigtable`
in build scripts. We may need to split or merge libraries over time,
making such names unstable. Applications should use CMake targets,
e.g., `google-cloud-cpp::bigtable`, or pkg-config modules, e.g.,
`$(pkg-config google_cloud_cpp_bigtable --libs)` instead.
### Documentation and Comments
The documentation (and its links) is intended for human consumption and not
third party websites, or automation (such as scripts scrapping the contents).
The contents and links of our documentation may change without notice.
### Other Interface Points
We think this covers all interface points, if we missed something please
file a [GitHub issue][github-issue].
## Contact us
* [GitHub Discussions] -- For questions and general comments
* [GitHub Issues] -- For reporting bugs and feature requests
[GitHub Discussions]: https://github.com/googleapis/google-cloud-cpp/discussions
[GitHub Issues]: https://github.com/googleapis/google-cloud-cpp/issues
## Contributing changes
See [`CONTRIBUTING.md`](CONTRIBUTING.md) for details on how to contribute to
this project, including how to build and test your changes as well as how to
properly format your code.
## Licensing
Apache 2.0; see [`LICENSE`](LICENSE) for details.
EOF
|
from django.contrib import admin
from .models import OrderForWeek
# Register your models here.
@admin.register(OrderForWeek)
class OrderForWeekAdmin(admin.ModelAdmin):
filter_horizontal = ('monday','tuesday','wednesday','thursday','friday')
fieldsets = (
(None, {
'fields': (('name', 'status'), 'date')
}),
('Dates', {
'fields': ('monday', 'tuesday','wednesday','thursday','friday')
})
) |
def merge_sort(arr):
if len(arr) >1:
# find the middle point of the array
mid = len(arr)//2
# divide the array elements into two halfs
Left = arr[:mid]
Right = arr[mid:]
# recursive call for left and right half
merge_sort(Left)
merge_sort(Right)
# iterators for traversing left and right sub arrays
i=j=k=0
# move elements from left and right subarrays to main array
while i < len(Left) and j < len(Right):
if Left[i] < Right[j]:
arr[k] = Left[i]
i = i + 1
else:
arr[k] = Right[j]
j = j + 1
k = k + 1
# copy remaining elements of left subarray, if there are any
while i < len(Left):
arr[k] = Left[i]
i = i + 1
k = k + 1
# copy remaining elements of right subarray, if there are any
while j < len(Right):
arr[k] = Right[j]
j = j + 1
k = k + 1 |
#!/bin/bash
# This script updates the knowledge graph
# cd $HOME/notebooks/dataprep
LOGDIR="$NEO4J_HOME"/logs/`date +%Y-%m-%d`
mkdir -p "$LOGDIR"
python3 /home/pseudo/Coding/GeoGraph/python-files/import-data/geonames.py
# $HOME/scripts/run_notebooks.sh
# enable conda in bash (see: https://github.com/conda/conda/issues/7980)
# eval "$(conda shell.bash hook)"
# # create conda environment
# conda env remove -n Dundy &>> $LOGDIR/update.log
# conda env create -f $DUNDY/environment.yml &>> $LOGDIR/update.log
# conda activate Dundy &>> $LOGDIR/update.log
# run Jupyter Notebooks to download, clean, and standardize data for the knowledge graph
# To check for any errors, look at the executed notebooks in the $LOGDIR directory
# for f in *.ipynb
# do
# echo "Processing $f file.."
# papermill $f "$LOGDIR"/$f
# done
# deactivate conda environment
# conda deactivate &>> $LOGDIR/update.log
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.map = void 0;
var map = {
"viewBox": "0 0 512 512",
"children": [{
"name": "g",
"attribs": {},
"children": [{
"name": "polygon",
"attribs": {
"points": "135.5,368 199.5,336 199.5,144 135.5,176 \t"
},
"children": [{
"name": "polygon",
"attribs": {
"points": "135.5,368 199.5,336 199.5,144 135.5,176 \t"
},
"children": []
}]
}, {
"name": "polygon",
"attribs": {
"points": "215.5,336 279.5,368 279.5,176 215.5,144 \t"
},
"children": [{
"name": "polygon",
"attribs": {
"points": "215.5,336 279.5,368 279.5,176 215.5,144 \t"
},
"children": []
}]
}, {
"name": "polygon",
"attribs": {
"points": "295.5,368 359.5,336 359.5,144 295.5,176 \t"
},
"children": [{
"name": "polygon",
"attribs": {
"points": "295.5,368 359.5,336 359.5,144 295.5,176 \t"
},
"children": []
}]
}, {
"name": "path",
"attribs": {
"d": "M256,0C114.625,0,0,114.609,0,256s114.625,256,256,256c141.406,0,256-114.609,256-256S397.406,0,256,0z M256,472\r\n\t\tc-119.281,0-216-96.703-216-216S136.719,40,256,40c119.312,0,216,96.703,216,216S375.312,472,256,472z"
},
"children": [{
"name": "path",
"attribs": {
"d": "M256,0C114.625,0,0,114.609,0,256s114.625,256,256,256c141.406,0,256-114.609,256-256S397.406,0,256,0z M256,472\r\n\t\tc-119.281,0-216-96.703-216-216S136.719,40,256,40c119.312,0,216,96.703,216,216S375.312,472,256,472z"
},
"children": []
}]
}]
}]
};
exports.map = map; |
<gh_stars>0
import numpy as np
def bag_of_words_compare(stemmed_words, corpus_words):
bag = [0 for _ in range(len(corpus_words))]
for se in stemmed_words:
for i, w in enumerate(corpus_words):
if w == se:
bag[i] = 1
return np.array(bag)
def bag_of_words_Xy(words, labels, docs_x, docs_y):
train_X = []
train_y = []
out_empty = [0 for _ in range(len(labels))]
for idx, doc in enumerate(docs_x):
bag = []
for w in words:
if w in doc:
bag.append(1)
else:
bag.append(0)
output_row = out_empty[:]
output_row[labels.index(docs_y[idx])] = 1
train_X.append(bag)
train_y.append(output_row)
train_X = np.array(train_X)
train_y = np.array(train_y)
return train_X, train_y
|
/*
*
*/
package net.community.chest.jfree.jfreechart.axis.value;
import net.community.chest.convert.ValueStringInstantiator;
import net.community.chest.jfree.jfreechart.data.time.RegularTimePeriodValueStringInstantiator;
import org.jfree.chart.axis.PeriodAxis;
import org.jfree.data.time.RegularTimePeriod;
/**
* <P>Copyright GPLv2</P>
*
* @param <A> Type of {@link PeriodAxis} being reflected
* @author <NAME>.
* @since May 6, 2009 9:55:30 AM
*/
public class PeriodAxisReflectiveProxy<A extends PeriodAxis> extends ValueAxisReflectiveProxy<A> {
protected PeriodAxisReflectiveProxy (Class<A> objClass, boolean registerAsDefault)
throws IllegalArgumentException, IllegalStateException
{
super(objClass, registerAsDefault);
}
public PeriodAxisReflectiveProxy (Class<A> objClass) throws IllegalArgumentException
{
this(objClass, false);
}
/*
* @see net.community.chest.jfree.jfreechart.ChartReflectiveAttributesProxy#resolveAttributeInstantiator(java.lang.String, java.lang.Class)
*/
@SuppressWarnings("unchecked")
@Override
protected <C> ValueStringInstantiator<C> resolveAttributeInstantiator (String name, Class<C> type) throws Exception
{
if ((type != null) && RegularTimePeriod.class.isAssignableFrom(type))
return (ValueStringInstantiator<C>) RegularTimePeriodValueStringInstantiator.DEFAULT;
return super.resolveAttributeInstantiator(name, type);
}
public static final PeriodAxisReflectiveProxy<PeriodAxis> PERIOD=
new PeriodAxisReflectiveProxy<PeriodAxis>(PeriodAxis.class, true);
}
|
<reponame>karzuo/merlin<gh_stars>1-10
// Copyright 2020 The Merlin Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package batch
import (
"bytes"
"context"
"fmt"
"github.com/ghodss/yaml"
"github.com/gogo/protobuf/jsonpb"
"github.com/gojek/merlin-pyspark-app/pkg/spec"
"github.com/gojek/merlin/log"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/rbac/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type ManifestManager interface {
CreateJobSpec(ctx context.Context, predictionJobName string, namespace string, spec *spec.PredictionJob) (string, error)
DeleteJobSpec(ctx context.Context, predictionJobName string, namespace string) error
CreateSecret(ctx context.Context, predictionJobName string, namespace string, data string) (string, error)
DeleteSecret(ctx context.Context, predictionJobName string, namespace string) error
CreateDriverAuthorization(ctx context.Context, namespace string) (string, error)
DeleteDriverAuthorization(ctx context.Context, namespace string) error
}
var (
jsonMarshaller = &jsonpb.Marshaler{}
defaultSparkDriverRoleRules = []v1.PolicyRule{
{
// Allow driver to manage pods
APIGroups: []string{
"", // indicates the core API group
},
Resources: []string{
"pods",
},
Verbs: []string{
"*",
},
},
{
// Allow driver to manage services
APIGroups: []string{
"", // indicates the core API group
},
Resources: []string{
"services",
},
Verbs: []string{
"*",
},
},
}
)
type manifestManager struct {
kubeClient kubernetes.Interface
}
func NewManifestManager(kubeClient kubernetes.Interface) ManifestManager {
return &manifestManager{kubeClient: kubeClient}
}
func (m *manifestManager) CreateJobSpec(ctx context.Context, predictionJobName string, namespace string, spec *spec.PredictionJob) (string, error) {
configYaml, err := toYamlString(spec)
if err != nil {
log.Errorf("failed converting prediction job spec to yaml: %v", err)
return "", errors.New("failed converting prediction job spec to yaml")
}
cm, err := m.kubeClient.CoreV1().ConfigMaps(namespace).Create(ctx, &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: predictionJobName,
Namespace: namespace,
},
Data: map[string]string{
jobSpecFileName: configYaml,
},
}, metav1.CreateOptions{})
if err != nil {
log.Errorf("failed creating job specification config map %s in namespace %s: %v", predictionJobName, namespace, err)
return "", errors.New("failed creating job specification config map")
}
return cm.Name, nil
}
func (m *manifestManager) DeleteJobSpec(ctx context.Context, predictionJobName string, namespace string) error {
err := m.kubeClient.CoreV1().ConfigMaps(namespace).Delete(ctx, predictionJobName, metav1.DeleteOptions{})
if client.IgnoreNotFound(err) != nil {
log.Errorf("failed deleting configmap %s in namespace %s: %v", predictionJobName, namespace, err)
return errors.Errorf("failed deleting configmap %s in namespace %s", predictionJobName, namespace)
}
return nil
}
func (m *manifestManager) CreateDriverAuthorization(ctx context.Context, namespace string) (string, error) {
serviceAccountName, driverRoleName, driverRoleBindingName := createAuthorizationResourceNames(namespace)
// create service account
sa, err := m.kubeClient.CoreV1().ServiceAccounts(namespace).Get(ctx, serviceAccountName, metav1.GetOptions{})
if err != nil {
if !kerrors.IsNotFound(err) {
return "", errors.Errorf("failed getting status of driver service account %s in namespace %s", serviceAccountName, namespace)
}
sa, err = m.kubeClient.CoreV1().ServiceAccounts(namespace).Create(ctx, &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
Namespace: namespace,
},
}, metav1.CreateOptions{})
if err != nil {
return "", errors.Errorf("failed creating driver service account %s in namespace %s", serviceAccountName, namespace)
}
}
// create role
role, err := m.kubeClient.RbacV1().Roles(namespace).Get(ctx, driverRoleName, metav1.GetOptions{})
if err != nil {
if !kerrors.IsNotFound(err) {
return "", errors.Errorf("failed getting status of driver role %s in namespace %s", driverRoleName, namespace)
}
role, err = m.kubeClient.RbacV1().Roles(namespace).Create(ctx, &v1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: driverRoleName,
Namespace: namespace,
},
Rules: defaultSparkDriverRoleRules,
}, metav1.CreateOptions{})
if err != nil {
return "", errors.Errorf("failed creating driver roles %s in namespace %s", driverRoleName, namespace)
}
}
// create role binding
_, err = m.kubeClient.RbacV1().RoleBindings(namespace).Get(ctx, driverRoleBindingName, metav1.GetOptions{})
if err != nil {
if !kerrors.IsNotFound(err) {
return "", errors.Errorf("failed getting status of driver rolebinding %s in namespace %s", driverRoleBindingName, namespace)
}
_, err = m.kubeClient.RbacV1().RoleBindings(namespace).Create(ctx, &v1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: driverRoleBindingName,
Namespace: namespace,
},
Subjects: []v1.Subject{
{
Kind: "ServiceAccount",
Namespace: namespace,
Name: sa.Name,
},
},
RoleRef: v1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "Role",
Name: role.Name,
},
}, metav1.CreateOptions{})
if err != nil {
return "", errors.Errorf("failed creating driver roles binding %s in namespace %s", driverRoleBindingName, namespace)
}
}
return sa.Name, nil
}
func (m *manifestManager) DeleteDriverAuthorization(ctx context.Context, namespace string) error {
serviceAccountName, driverRoleName, driverRoleBindingName := createAuthorizationResourceNames(namespace)
err := m.kubeClient.RbacV1().RoleBindings(namespace).Delete(ctx, driverRoleBindingName, metav1.DeleteOptions{})
if client.IgnoreNotFound(err) != nil {
return errors.Errorf("failed deleting driver roles binding %s in namespace %s", driverRoleBindingName, namespace)
}
err = m.kubeClient.RbacV1().Roles(namespace).Delete(ctx, driverRoleName, metav1.DeleteOptions{})
if client.IgnoreNotFound(err) != nil {
return errors.Errorf("failed deleting driver roles %s in namespace %s", driverRoleName, namespace)
}
err = m.kubeClient.CoreV1().ServiceAccounts(namespace).Delete(ctx, serviceAccountName, metav1.DeleteOptions{})
if client.IgnoreNotFound(err) != nil {
return errors.Errorf("failed deleting service account %s in namespace %s", serviceAccountName, namespace)
}
return nil
}
func (m *manifestManager) CreateSecret(ctx context.Context, predictionJobName string, namespace string, data string) (string, error) {
secret, err := m.kubeClient.CoreV1().Secrets(namespace).Create(ctx, &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: predictionJobName,
Namespace: namespace,
},
StringData: map[string]string{
serviceAccountFileName: data,
},
Type: corev1.SecretTypeOpaque,
}, metav1.CreateOptions{})
if err != nil {
log.Errorf("failed creating secret %s in namespace %s: %v", predictionJobName, namespace, err)
return "", errors.Errorf("failed creating secret %s in namespace %s", predictionJobName, namespace)
}
return secret.Name, nil
}
func (m *manifestManager) DeleteSecret(ctx context.Context, predictionJobName string, namespace string) error {
err := m.kubeClient.CoreV1().Secrets(namespace).Delete(ctx, predictionJobName, metav1.DeleteOptions{})
if client.IgnoreNotFound(err) != nil {
log.Errorf("failed deleting secret %s in namespace %s: %v", predictionJobName, namespace, err)
return errors.Errorf("failed deleting secret %s in namespace %s", predictionJobName, namespace)
}
return nil
}
func toYamlString(spec *spec.PredictionJob) (string, error) {
buf := new(bytes.Buffer)
err := jsonMarshaller.Marshal(buf, spec)
if err != nil {
return "", err
}
res, err := yaml.JSONToYAML(buf.Bytes())
if err != nil {
return "", err
}
return string(res), nil
}
func createAuthorizationResourceNames(namespace string) (serviceAccountName, driverRoleName, driverRoleBindingName string) {
serviceAccountName = fmt.Sprintf("%s-driver-sa", namespace)
driverRoleName = fmt.Sprintf("%s-driver-role", namespace)
driverRoleBindingName = fmt.Sprintf("%s-driver-role-binding", namespace)
return
}
|
package io.opensphere.server.services;
import io.opensphere.core.geometry.renderproperties.TileRenderProperties;
import io.opensphere.mantle.data.MapVisualizationType;
import io.opensphere.mantle.data.impl.DefaultMapTileVisualizationInfo;
/**
* The Class ServerMapVisualizationInfo.
*/
public class ServerMapVisualizationInfo extends DefaultMapTileVisualizationInfo
{
/**
* Instantiates a new map visualization info for server layers.
*
* @param visType the {@link MapVisualizationType}
* @param props the props
*/
public ServerMapVisualizationInfo(MapVisualizationType visType, TileRenderProperties props)
{
super(visType, props, true);
}
}
|
#!/bin/bash
# Copyright Red Hat
if [ ! -d ssl ]; then
mkdir -p ssl
export BASE_DOMAIN=$(oc cluster-info | grep api | sed 's/.*api.//g' | cut -d':' -f1)
cat << EOF > ssl/req.cnf
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = dex-gitops
DNS.2 = dex-gitops-grpc
DNS.3 = dex-gitops.svc.cluster.local
DNS.4 = dex-gitops.apps.${BASE_DOMAIN}
EOF
openssl genrsa -out ssl/ca-key.pem 2048
openssl req -x509 -new -nodes -key ssl/ca-key.pem -days 10 -out ssl/ca.pem -subj "/CN=kube-ca"
openssl genrsa -out ssl/key.pem 2048
openssl req -new -key ssl/key.pem -out ssl/csr.pem -subj "/CN=kube-ca" -config ssl/req.cnf
openssl x509 -req -in ssl/csr.pem -CA ssl/ca.pem -CAkey ssl/ca-key.pem -CAcreateserial -out ssl/cert.pem -days 10 -extensions v3_req -extfile ssl/req.cnf
fi
cat <<EOF | oc apply -f -
apiVersion: v1
data:
admin.password: JDJhJDEwJHMyR0FEZm5zWmxITTVLOU0wSzN5aWVaOUVibHFzYTFtS3BVUE9NMnJCOVhNdlNLeno4aVNl
admin.passwordMtime: MjAyMS0wNy0xNFQyMDoxMzo0OFo=
server.secretkey: ejJtMDFiOVdwNE5vbEhTdE1HVWk=
tls.crt: $(cat ssl/cert.pem | base64)
tls.key: $(cat ssl/key.pem | base64)
kind: Secret
metadata:
labels:
app.kubernetes.io/managed-by: openshift-gitops
app.kubernetes.io/name: argocd-secret
app.kubernetes.io/part-of: argocd
name: argocd-secret
type: Opaque
EOF
cat <<EOF | oc apply -f -
apiVersion: v1
data:
admin.enabled: "true"
application.instanceLabelKey: ""
configManagementPlugins: ""
dex.config: |
connectors:
- config:
clientID: system:serviceaccount:dex-gitops:gitops-argocd-dex-server
clientSecret: eyJhbGciOiJSUzI1NiIsImtpZCI6ImgzdXNfT1JLZE9tUlE3cXFHUzV4RmxoODJHWEVJVlFfeHJsa0xpa0k2U2MifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJvcGVuc2hpZnQtZ2l0b3BzIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6Im9wZW5zaGlmdC1naXRvcHMtYXJnb2NkLWRleC1zZXJ2ZXItdG9rZW4tN245azUiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoib3BlbnNoaWZ0LWdpdG9wcy1hcmdvY2QtZGV4LXNlcnZlciIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6Ijc4Zjg5ZTYwLWQ4YmMtNGUyYS1iNWFjLWRhYTBjOWNmMmFjYiIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpvcGVuc2hpZnQtZ2l0b3BzOm9wZW5zaGlmdC1naXRvcHMtYXJnb2NkLWRleC1zZXJ2ZXIifQ.n94sLiToDOIlnLEqiQ8Mc8EPG8_tVP2hNoM0A9mFTxTYeL14B6fUZcjdNDjA5vG7Wnmz-LDr0qG888cLxdvau4VjkthT_y9YfheWkpPN8S4nEob-1ddIFVmp1eangnr5A9qodAJpjSsqxpfCv4932Z0reUCklsJVlJDl-22mmP0oO1DWcuUM4hm6c4NZUyHP0rkSekYf0A1DKVM_5tjsmrvVfBYLlsfYHKPPuSO8-6oXqnFnQVpGFTMnrP8akJjqfadUQXG4spwiR3RA21kmeJBKuCxmt6ZtdGRy3X6l6fK6fIXTtW6NTsNkfqv5Aq0K58pqkISmzaCG03-7JMBvIB2IUdRs7JbQtAUsKZBTcLIatz0q5-DbdvSj0PLiqQZN5Elf5arWXuFlU8tpijBYbaebOZrl8l9yqRWgFJSPK75xpKmYrMgA964o-krtRUCFqRQtpHhjZoWutQYOQ_kpiekE1fQMJQTTzRfAJ0A_QX9VHJpfZijtluTOZuvPBB3-gPoLYndI7MtRabRTnOsc0IrzH_cFtjV28YDj-dz93DQs6ORUdAjED8bAwpqtYCQ5CSPe9cr8zpHFbkiTDUj8Gz2qLU6Mq6KQkBIkPIkDMq5KsJqR72-YDzWV8rhcuZdh0ZnhvLUK6KDjkPHUcjnOiGkocFaqjRrShzfWT8Uspes
insecureCA: true
issuer: https://kubernetes.default.svc
redirectURI: https://dex-gitops.apps.${BASE_DOMAIN}/api/dex/callback
id: openshift
name: OpenShift
type: openshift
ga.anonymizeusers: "false"
ga.trackingid: ""
help.chatText: ""
help.chatUrl: ""
kustomize.buildOptions: ""
repositories: ""
repository.credentials: ""
resource.exclusions: |
- apiGroups:
- tekton.dev
clusters:
- '*'
kinds:
- TaskRun
- PipelineRun
resource.inclusions: ""
statusbadge.enabled: "false"
url: https://dex-gitops.apps.${BASE_DOMAIN}
users.anonymous.enabled: "false"
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/managed-by: openshift-gitops
app.kubernetes.io/name: argocd-cm
app.kubernetes.io/part-of: argocd
name: argocd-cm
EOF
|
#!/bin/bash
trap 'echo "\nCaught signal, exiting...\n"; exit 1' SIGINT SIGTERM
: ${UPDATE_EXPECTED_DATA:=false}
CALICOCTL="calicoctl --allow-version-mismatch"
# Execute the suite of tests. It is assumed the following environment variables will
# have been set up beforehand:
# - DATASTORE_TYPE + other calico datastore envs
# - LOGPATH
execute_test_suite() {
# This is needed for two reasons:
# - to substitute for "NODENAME" in some of the cond TOML files
# - for the confd Calico client to select which node to listen to for key events.
export NODENAME="kube-master"
# Make sure the log and rendered templates paths are created and old test run data is
# deleted.
mkdir -p $LOGPATH
mkdir -p $LOGPATH/rendered
rm $LOGPATH/log* || true
rm $LOGPATH/rendered/*.cfg || true
if [ "$DATASTORE_TYPE" = kubernetes ]; then
run_extra_test test_node_mesh_bgp_password
run_extra_test test_bgp_password_deadlock
fi
if [ "$DATASTORE_TYPE" = etcdv3 ]; then
run_extra_test test_node_mesh_bgp_password
run_extra_test test_bgp_password
run_extra_test test_bgp_sourceaddr_gracefulrestart
run_extra_test test_node_deletion
run_extra_test test_idle_peers
run_extra_test test_router_id_hash
echo "Extra etcdv3 tests passed"
fi
# Run the set of tests using confd in oneshot mode.
echo "Execute oneshot-mode tests"
execute_tests_oneshot
echo "Oneshot-mode tests passed"
# Now run a set of tests with confd running continuously.
# Note that changes to the node to node mesh config option will result in a restart of
# confd, so order the tests accordingly. We'll start with a set of tests that use the
# node mesh enabled, so turn it on now before we start confd.
echo "Execute daemon-mode tests"
turn_mesh_on
for i in $(seq 1 2); do
execute_tests_daemon
done
echo "Daemon-mode tests passed"
}
run_extra_test() {
test_fn=$1
echo
echo "Run test: $1"
echo "==============================="
eval $1
echo "==============================="
}
test_bgp_password() {
# Run confd as a background process.
echo "Running confd as background process"
NODENAME=node1 BGP_LOGSEVERITYSCREEN="debug" confd -confdir=/etc/calico/confd >$LOGPATH/logd1 2>&1 &
CONFD_PID=$!
echo "Running with PID " $CONFD_PID
# Turn the node-mesh off.
turn_mesh_off
# Create 4 nodes with various password peerings.
$CALICOCTL apply -f - <<EOF
kind: Node
apiVersion: projectcalico.org/v3
metadata:
name: node1
labels:
node: yes
spec:
bgp:
ipv4Address: 10.24.0.1/24
---
kind: Node
apiVersion: projectcalico.org/v3
metadata:
name: node2
labels:
node: yes
spec:
bgp:
ipv4Address: 10.24.0.2/24
---
kind: Node
apiVersion: projectcalico.org/v3
metadata:
name: node3
labels:
node: yes
spec:
bgp:
ipv4Address: 10.24.0.3/24
---
kind: Node
apiVersion: projectcalico.org/v3
metadata:
name: node4
labels:
node: yes
spec:
bgp:
ipv4Address: 10.24.0.4/24
---
kind: BGPPeer
apiVersion: projectcalico.org/v3
metadata:
name: bgppeer-1
spec:
nodeSelector: has(node)
peerIP: 10.24.0.2
asNumber: 64512
password:
secretKeyRef:
name: my-secrets-1
key: a
---
kind: BGPPeer
apiVersion: projectcalico.org/v3
metadata:
name: bgppeer-2
spec:
nodeSelector: has(node)
peerIP: 10.24.0.3
asNumber: 64512
password:
secretKeyRef:
name: my-secrets-1
key: b
---
kind: BGPPeer
apiVersion: projectcalico.org/v3
metadata:
name: bgppeer-3
spec:
node: node1
peerIP: 10.24.10.10
asNumber: 64512
password:
secretKeyRef:
name: my-secrets-2
key: c
EOF
# Expect 3 peerings, all with no password because we haven't
# created the secrets yet.
test_confd_templates password/step1
# Create my-secrets-1 secret with only one of the required keys.
kubectl create -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
name: my-secrets-1
namespace: kube-system
type: Opaque
stringData:
b: password-b
EOF
# Expect password now on the peering using my-secrets-1/b.
test_confd_templates password/step2
# Update my-secrets-1 secret with the other required key.
kubectl replace -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
name: my-secrets-1
namespace: kube-system
type: Opaque
stringData:
b: password-b
a: password-a
EOF
# Also create my-secrets-2 secret.
kubectl create -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
name: my-secrets-2
namespace: kube-system
type: Opaque
stringData:
c: password-c
EOF
# Expect passwords on all peerings.
test_confd_templates password/step3
# Delete a secret.
kubectl delete secret my-secrets-2 -n kube-system
# Expect password-c to have disappeared.
test_confd_templates password/step4
# Change the passwords in the other secret.
kubectl replace -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
name: my-secrets-1
namespace: kube-system
type: Opaque
stringData:
b: new-password-b
a: new-password-a
EOF
# Expect peerings to have new passwords.
test_confd_templates password/step5
# Delete one of the keys from that secret.
kubectl replace -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
name: my-secrets-1
namespace: kube-system
type: Opaque
stringData:
b: new-password-b
EOF
# Expect new-password-a to have disappeared.
test_confd_templates password/step6
# Delete the remaining secret.
kubectl delete secret my-secrets-1 -n kube-system
# Kill confd.
kill -9 $CONFD_PID
# Turn the node-mesh back on.
turn_mesh_on
# Delete remaining resources.
$CALICOCTL delete node node1
$CALICOCTL delete node node2
$CALICOCTL delete node node3
$CALICOCTL delete node node4
$CALICOCTL delete bgppeer bgppeer-1
$CALICOCTL delete bgppeer bgppeer-2
# Check that passwords were not logged.
password_logs="`grep 'password-' $LOGPATH/logd1 || true`"
echo "$password_logs"
if [ "$password_logs" ]; then
echo "ERROR: passwords were logged"
return 1
fi
}
test_bgp_password_deadlock() {
# For this test we populate the datastore before starting confd.
# Also we use Typha.
start_typha
# Clean up the output directory.
rm -f /etc/calico/confd/config/*
# Turn the node-mesh off.
turn_mesh_off
# Adjust this number until confd's iteration through BGPPeers
# takes longer than 100ms. That is what's needed to see the
# deadlock.
SCALE=99
# Create $SCALE nodes and BGPPeer configs.
for ii in `seq 1 $SCALE`; do
kubectl apply -f - <<EOF
apiVersion: v1
kind: Node
metadata:
annotations:
node.alpha.kubernetes.io/ttl: "0"
volumes.kubernetes.io/controller-managed-attach-detach: "true"
labels:
beta.kubernetes.io/arch: amd64
beta.kubernetes.io/os: linux
kubernetes.io/hostname: node$ii
name: node$ii
namespace: ""
spec:
externalID: node$ii
EOF
$CALICOCTL apply -f - <<EOF
kind: Node
apiVersion: projectcalico.org/v3
metadata:
name: node$ii
labels:
node: yes
spec:
bgp:
ipv4Address: 10.24.0.$ii/24
---
kind: BGPPeer
apiVersion: projectcalico.org/v3
metadata:
name: bgppeer-$ii
spec:
node: node$ii
peerIP: 10.24.0.2
asNumber: 64512
password:
secretKeyRef:
name: my-secrets-1
key: a
EOF
done
# Create the required secret.
kubectl create -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
name: my-secrets-1
namespace: kube-system
type: Opaque
stringData:
a: password-a
EOF
# Run confd as a background process.
echo "Running confd as background process"
NODENAME=node1 BGP_LOGSEVERITYSCREEN="debug" confd -confdir=/etc/calico/confd >$LOGPATH/logd1 2>&1 &
CONFD_PID=$!
echo "Running with PID " $CONFD_PID
# Expect BIRD config to be generated.
test_confd_templates password-deadlock
# Kill confd.
kill -9 $CONFD_PID
# Kill Typha.
kill_typha
# Turn the node-mesh back on.
turn_mesh_on
# Delete resources.
kubectl delete secret my-secrets-1 -n kube-system
for ii in `seq 1 $SCALE`; do
$CALICOCTL delete bgppeer bgppeer-$ii
kubectl delete node node$ii
done
}
test_node_deletion() {
# Run confd as a background process.
echo "Running confd as background process"
NODENAME=node1 BGP_LOGSEVERITYSCREEN="debug" confd -confdir=/etc/calico/confd >$LOGPATH/logd1 2>&1 &
CONFD_PID=$!
echo "Running with PID " $CONFD_PID
# Turn the node-mesh off.
turn_mesh_off
# Create 4 nodes with a mesh of peerings.
$CALICOCTL apply -f - <<EOF
kind: Node
apiVersion: projectcalico.org/v3
metadata:
name: node1
labels:
node: yes
spec:
bgp:
ipv4Address: 10.24.0.1/24
---
kind: Node
apiVersion: projectcalico.org/v3
metadata:
name: node2
labels:
node: yes
spec:
bgp:
ipv4Address: 10.24.0.2/24
---
kind: Node
apiVersion: projectcalico.org/v3
metadata:
name: node3
labels:
node: yes
spec:
bgp:
ipv4Address: 10.24.0.3/24
---
kind: Node
apiVersion: projectcalico.org/v3
metadata:
name: node4
labels:
node: yes
spec:
bgp:
ipv4Address: 10.24.0.4/24
---
kind: BGPPeer
apiVersion: projectcalico.org/v3
metadata:
name: bgppeer-1
spec:
nodeSelector: has(node)
peerSelector: has(node)
EOF
# Expect 3 peerings.
expect_peerings 3
# Delete one of the nodes.
$CALICOCTL delete node node3
# Expect just 2 peerings.
expect_peerings 2
# Kill confd.
kill -9 $CONFD_PID
# Turn the node-mesh back on.
turn_mesh_on
# Delete remaining resources.
$CALICOCTL delete node node1
$CALICOCTL delete node node2
$CALICOCTL delete node node4
$CALICOCTL delete bgppeer bgppeer-1
}
# Test that when BGPPeers generate overlapping global and node-specific peerings, we reliably
# only see the global peerings in the v1 data model.
test_idle_peers() {
# Run confd as a background process.
echo "Running confd as background process"
NODENAME=node1 BGP_LOGSEVERITYSCREEN="debug" confd -confdir=/etc/calico/confd >$LOGPATH/logd1 2>&1 &
CONFD_PID=$!
echo "Running with PID " $CONFD_PID
# Turn the node-mesh off.
turn_mesh_off
# Create 2 nodes, a global peering between them, and a node-specific peering between them.
$CALICOCTL apply -f - <<EOF
kind: Node
apiVersion: projectcalico.org/v3
metadata:
name: node1
labels:
node: yes
spec:
bgp:
ipv4Address: 10.24.0.1/24
---
kind: Node
apiVersion: projectcalico.org/v3
metadata:
name: node2
labels:
node: yes
spec:
bgp:
ipv4Address: 10.24.0.2/24
---
kind: BGPPeer
apiVersion: projectcalico.org/v3
metadata:
name: node-specific
spec:
node: node1
peerSelector: has(node)
---
kind: BGPPeer
apiVersion: projectcalico.org/v3
metadata:
name: global
spec:
peerSelector: has(node)
EOF
# Expect 1 peering.
expect_peerings 1
# 10 times, touch a Node resource to cause peerings to be recomputed, and check that we
# always see just one peering.
for n in `seq 1 10`; do
$CALICOCTL apply -f - <<EOF
kind: Node
apiVersion: projectcalico.org/v3
metadata:
name: node1
labels:
node: yes
spec:
bgp:
ipv4Address: 10.24.0.1/24
EOF
sleep 0.25
expect_peerings 1
done
# Kill confd.
kill -9 $CONFD_PID
# Turn the node-mesh back on.
turn_mesh_on
# Delete resources. Note that deleting Node node1 also deletes the node-specific BGPPeer.
$CALICOCTL delete node node1
$CALICOCTL delete node node2
$CALICOCTL delete bgppeer global
}
expect_peerings() {
expected_count=$1
attempts=0
while sleep 1; do
grep "protocol bgp" /etc/calico/confd/config/bird.cfg
count=`grep "protocol bgp" /etc/calico/confd/config/bird.cfg | wc -l`
if [ "$count" = "$expected_count" ]; then
break
fi
let 'attempts += 1'
echo Failed attempts = $attempts
if [ "$attempts" -gt 5 ]; then
echo Test failed
cat /etc/calico/confd/config/bird.cfg
return 2
fi
done
}
# Execute a set of tests using daemon mode.
execute_tests_daemon() {
# For KDD, run Typha.
if [ "$DATASTORE_TYPE" = kubernetes ]; then
start_typha
fi
# Run confd as a background process.
echo "Running confd as background process"
BGP_LOGSEVERITYSCREEN="debug" confd -confdir=/etc/calico/confd >$LOGPATH/logd1 2>&1 &
CONFD_PID=$!
echo "Running with PID " $CONFD_PID
# Run the node-mesh-enabled tests.
for i in $(seq 1 2); do
run_individual_test 'mesh/bgp-export'
run_individual_test 'mesh/ipip-always'
run_individual_test 'mesh/ipip-cross-subnet'
run_individual_test 'mesh/ipip-off'
run_individual_test 'mesh/route-reflector-mesh-enabled'
run_individual_test 'mesh/static-routes'
run_individual_test 'mesh/static-routes-exclude-node'
run_individual_test 'mesh/communities'
run_individual_test 'mesh/restart-time'
done
# Turn the node-mesh off.
turn_mesh_off
# Run the explicit peering tests.
for i in $(seq 1 2); do
run_individual_test 'explicit_peering/global'
run_individual_test 'explicit_peering/global-external'
run_individual_test 'explicit_peering/global-ipv6'
run_individual_test 'explicit_peering/specific_node'
run_individual_test 'explicit_peering/selectors'
run_individual_test 'explicit_peering/route_reflector'
run_individual_test 'explicit_peering/keepnexthop'
run_individual_test 'explicit_peering/keepnexthop-global'
run_individual_test 'explicit_peering/local-as'
run_individual_test 'explicit_peering/local-as-global'
done
# Turn the node-mesh back on.
turn_mesh_on
# Kill confd.
kill -9 $CONFD_PID
# For KDD, kill Typha.
if [ "$DATASTORE_TYPE" = kubernetes ]; then
kill_typha
fi
}
# Execute a set of tests using oneshot mode.
execute_tests_oneshot() {
# Note that changes to the node to node mesh config option will result in a restart of
# confd, so order the tests accordingly. Since the default nodeToNodeMeshEnabled setting
# is true, perform the mesh tests first. Then run the explicit peering tests - we should
# see confd terminate when we turn of the mesh.
for i in $(seq 1 2); do
run_individual_test_oneshot 'mesh/bgp-export'
run_individual_test_oneshot 'mesh/ipip-always'
run_individual_test_oneshot 'mesh/ipip-cross-subnet'
run_individual_test_oneshot 'mesh/ipip-off'
run_individual_test_oneshot 'mesh/vxlan-always'
run_individual_test_oneshot 'explicit_peering/global'
run_individual_test_oneshot 'explicit_peering/specific_node'
run_individual_test_oneshot 'explicit_peering/selectors'
run_individual_test_oneshot 'explicit_peering/route_reflector'
run_individual_test_oneshot 'explicit_peering/route_reflector_v6_by_ip'
run_individual_test_oneshot 'mesh/static-routes'
run_individual_test_oneshot 'mesh/static-routes-exclude-node'
run_individual_test_oneshot 'mesh/communities'
run_individual_test_oneshot 'mesh/restart-time'
run_individual_test_oneshot 'explicit_peering/keepnexthop'
run_individual_test_oneshot 'explicit_peering/keepnexthop-global'
export CALICO_ROUTER_ID=10.10.10.10
run_individual_test_oneshot 'mesh/static-routes-no-ipv4-address'
export -n CALICO_ROUTER_ID
unset CALICO_ROUTER_ID
done
}
# Turn the node-to-node mesh off.
turn_mesh_off() {
$CALICOCTL apply -f - <<EOF
kind: BGPConfiguration
apiVersion: projectcalico.org/v3
metadata:
name: default
spec:
nodeToNodeMeshEnabled: false
EOF
}
# Turn the node-to-node mesh on.
turn_mesh_on() {
$CALICOCTL apply -f - <<EOF
kind: BGPConfiguration
apiVersion: projectcalico.org/v3
metadata:
name: default
spec:
nodeToNodeMeshEnabled: true
EOF
}
# Run an individual test using confd in daemon mode:
# - apply a set of resources using calicoctl
# - verify the templates generated by confd as a result.
run_individual_test() {
testdir=$1
testdir_save=$testdir
# Populate Calico using calicoctl to load the input.yaml test data.
echo "Populating calico with test data using calicoctl: " $testdir
$CALICOCTL apply -f /tests/mock_data/calicoctl/${testdir}/input.yaml
# Populate Kubernetes API with data if it exists for this test.
if [[ -f /tests/mock_data/calicoctl/${testdir}/kubectl-input.yaml ]]; then
KUBECONFIG=/home/user/certs/kubeconfig kubectl apply -f /tests/mock_data/calicoctl/${testdir}/kubectl-input.yaml
fi
# Check the confd templates are updated.
test_confd_templates $testdir
if [ -f /tests/mock_data/calicoctl/${testdir}/step2/input.yaml ]; then
echo "Config changes for step 2"
$CALICOCTL apply -f /tests/mock_data/calicoctl/${testdir}/step2/input.yaml
# Check config changes as expected.
test_confd_templates ${testdir}/step2
# That changes testdir, so undo that change.
testdir=$testdir_save
fi
# Remove any resource that does not need to be persisted due to test environment
# limitations.
echo "Preparing Calico data for next test"
if [[ -f /tests/mock_data/calicoctl/${testdir}/kubectl-delete.yaml ]]; then
KUBECONFIG=/home/user/certs/kubeconfig kubectl delete -f /tests/mock_data/calicoctl/${testdir}/kubectl-delete.yaml
fi
if [ -f /tests/mock_data/calicoctl/${testdir}/step2/delete.yaml ]; then
$CALICOCTL delete -f /tests/mock_data/calicoctl/${testdir}/step2/delete.yaml
fi
$CALICOCTL delete -f /tests/mock_data/calicoctl/${testdir}/delete.yaml
}
# Run an individual test using oneshot mode:
# - applying a set of resources using calicoctl
# - run confd in oneshot mode
# - verify the templates generated by confd as a result.
run_individual_test_oneshot() {
testdir=$1
# Populate Calico using calicoctl to load the input.yaml test data.
echo "Populating calico with test data using calicoctl: " $testdir
$CALICOCTL apply -f /tests/mock_data/calicoctl/${testdir}/input.yaml
# Populate Kubernetes API with data if it exists for this test.
if [[ -f /tests/mock_data/calicoctl/${testdir}/kubectl-input.yaml ]]; then
KUBECONFIG=/home/user/certs/kubeconfig kubectl apply -f /tests/mock_data/calicoctl/${testdir}/kubectl-input.yaml
fi
# For KDD, run Typha.
if [ "$DATASTORE_TYPE" = kubernetes ]; then
start_typha
fi
# Clean up the output directory.
rm -f /etc/calico/confd/config/*
# Run confd in oneshot mode.
BGP_LOGSEVERITYSCREEN="debug" confd -confdir=/etc/calico/confd -onetime >$LOGPATH/logss 2>&1 || true
# Check the confd templates are updated.
test_confd_templates $testdir
# For KDD, kill Typha.
if [ "$DATASTORE_TYPE" = kubernetes ]; then
kill_typha
fi
# Remove any resource that does not need to be persisted due to test environment
# limitations.
echo "Preparing Calico data for next test"
if [[ -f /tests/mock_data/calicoctl/${testdir}/kubectl-delete.yaml ]]; then
KUBECONFIG=/home/user/certs/kubeconfig kubectl delete -f /tests/mock_data/calicoctl/${testdir}/kubectl-delete.yaml
fi
$CALICOCTL delete -f /tests/mock_data/calicoctl/${testdir}/delete.yaml
}
start_typha() {
echo "Starting Typha"
TYPHA_DATASTORETYPE=kubernetes \
KUBECONFIG=/home/user/certs/kubeconfig \
TYPHA_LOGSEVERITYSCREEN=debug \
TYPHA_LOGSEVERITYSYS=none \
TYPHA_LOGFILEPATH=none \
typha >$LOGPATH/typha 2>&1 &
TYPHA_PID=$!
# Set variables needed for confd to connect to Typha.
export FELIX_TYPHAADDR=127.0.0.1:5473
export FELIX_TYPHAREADTIMEOUT=50
# Allow a little time for Typha to start up and start listening.
#
# If Typha isn't ready when confd tries to connect to it, confd drops a FATAL
# log and exits. You might think that confd should retry, but our general
# design (e.g. what Felix also does) here is to exit and be restarted by the
# surrounding service framework.
sleep 0.25
# Avoid getting bash's "Killed" message in the output when we kill Typha.
disown %?typha
}
kill_typha() {
echo "Killing Typha"
kill -9 $TYPHA_PID 2>/dev/null
}
# Tests that confd generates the required set of templates for the test.
# $1 would be the tests you want to run e.g. mesh/global
test_confd_templates() {
# Compare the templates until they match (for a max of 10s).
testdir=$1
for i in $(seq 1 10); do echo "comparing templates attempt $i" && compare_templates $testdir 0 false && break || sleep 1; done
compare_templates $testdir 1 ${UPDATE_EXPECTED_DATA}
}
# Compares the generated templates against the known good templates
# $1 would be the tests you want to run e.g. mesh/global
# $2 is whether or not we should output the diff results (0=no)
compare_templates() {
# Check the generated templates against known compiled templates.
testdir=$1
output=$2
record=$3
rc=0
for f in `ls /tests/compiled_templates/${testdir}`; do
if [ $f = step2 ]; then
# Some tests have a "step2" subdirectory. If so, the BIRD
# config in that subdir will be used when
# compare_templates is called again with ${testdir}/step2.
# This time through, we should skip "step2" because there
# is nothing matching it in the actual generated config at
# /etc/calico/confd/config/.
continue
fi
expected=/tests/compiled_templates/${testdir}/${f}
actual=/etc/calico/confd/config/${f}
if ! diff --ignore-blank-lines -q ${expected} ${actual} 1>/dev/null 2>&1; then
if ! $record; then
rc=1;
fi
if [ $output -ne 0 ]; then
echo "Failed: $f templates do not match, showing diff of expected vs received"
set +e
diff ${expected} ${actual}
if $record; then
echo "Updating expected result..."
cp ${actual} ${expected}
else
echo "Copying confd rendered output to ${LOGPATH}/rendered/${f}"
cp ${actual} ${LOGPATH}/rendered/${f}
set -e
rc=2
fi
fi
fi
done
if [ $rc -eq 2 ]; then
echo "Copying nodes to ${LOGPATH}/nodes.yaml"
$CALICOCTL get nodes -o yaml > ${LOGPATH}/nodes.yaml
echo "Copying bgp config to ${LOGPATH}/bgpconfig.yaml"
$CALICOCTL get bgpconfigs -o yaml > ${LOGPATH}/bgpconfig.yaml
echo "Copying bgp peers to ${LOGPATH}/bgppeers.yaml"
$CALICOCTL get bgppeers -o yaml > ${LOGPATH}/bgppeers.yaml
echo "Copying ip pools to ${LOGPATH}/ippools.yaml"
$CALICOCTL get ippools -o yaml > ${LOGPATH}/ippools.yaml
echo "Listing running processes"
ps
fi
return $rc
}
test_router_id_hash() {
export CALICO_ROUTER_ID=hash
run_individual_test_oneshot 'mesh/hash'
export -n CALICO_ROUTER_ID
unset CALICO_ROUTER_ID
}
test_bgp_sourceaddr_gracefulrestart() {
# Run confd as a background process.
echo "Running confd as background process"
NODENAME=node1 BGP_LOGSEVERITYSCREEN="debug" confd -confdir=/etc/calico/confd >$LOGPATH/logd1 2>&1 &
CONFD_PID=$!
echo "Running with PID " $CONFD_PID
# Turn the node-mesh off.
turn_mesh_off
# Create 2 nodes with IPs directly on a local subnet, and a
# peering between them.
$CALICOCTL apply -f - <<EOF
kind: Node
apiVersion: projectcalico.org/v3
metadata:
name: node1
spec:
bgp:
ipv4Address: 172.17.0.5/24
---
kind: Node
apiVersion: projectcalico.org/v3
metadata:
name: node2
spec:
bgp:
ipv4Address: 172.17.0.6/24
---
kind: BGPPeer
apiVersion: projectcalico.org/v3
metadata:
name: bgppeer-1
spec:
node: node1
peerIP: 172.17.0.6
asNumber: 64512
EOF
# Expect a "direct" peering.
test_confd_templates sourceaddr_gracefulrestart/step1
# Change the peering to omit source address.
$CALICOCTL apply -f - <<EOF
kind: BGPPeer
apiVersion: projectcalico.org/v3
metadata:
name: bgppeer-1
spec:
node: node1
peerIP: 172.17.0.6
asNumber: 64512
sourceAddress: None
EOF
# Expect direct peering without source address.
test_confd_templates sourceaddr_gracefulrestart/step2
# Change the peering to specify max restart time.
$CALICOCTL apply -f - <<EOF
kind: BGPPeer
apiVersion: projectcalico.org/v3
metadata:
name: bgppeer-1
spec:
node: node1
peerIP: 172.17.0.6
asNumber: 64512
sourceAddress: None
maxRestartTime: 10s
EOF
# Expect "graceful restart time 10".
test_confd_templates sourceaddr_gracefulrestart/step3
# Kill confd.
kill -9 $CONFD_PID
# Turn the node-mesh back on.
turn_mesh_on
# Delete remaining resources.
$CALICOCTL delete node node1
$CALICOCTL delete node node2
}
test_node_mesh_bgp_password() {
# For KDD, run Typha and clean up the output directory.
if [ "$DATASTORE_TYPE" = kubernetes ]; then
start_typha
rm -f /etc/calico/confd/config/*
fi
# Run confd as a background process.
echo "Running confd as background process"
BGP_LOGSEVERITYSCREEN="debug" confd -confdir=/etc/calico/confd >$LOGPATH/logd1 2>&1 &
CONFD_PID=$!
echo "Running with PID " $CONFD_PID
# Create 3 nodes and enable node mesh BGP password
$CALICOCTL apply -f - <<EOF
kind: BGPConfiguration
apiVersion: projectcalico.org/v3
metadata:
name: default
spec:
logSeverityScreen: Info
nodeToNodeMeshEnabled: true
nodeMeshPassword:
secretKeyRef:
name: my-secrets-1
key: a
---
kind: Node
apiVersion: projectcalico.org/v3
metadata:
name: kube-master
spec:
bgp:
ipv4Address: 10.192.0.2/16
ipv6Address: "2001::103/64"
---
kind: Node
apiVersion: projectcalico.org/v3
metadata:
name: kube-node-1
spec:
bgp:
ipv4Address: 10.192.0.3/16
ipv6Address: "2001::102/64"
---
kind: Node
apiVersion: projectcalico.org/v3
metadata:
name: kube-node-2
spec:
bgp:
ipv4Address: 10.192.0.4/16
ipv6Address: "2001::104/64"
---
kind: IPPool
apiVersion: projectcalico.org/v3
metadata:
name: ippool-1
spec:
cidr: 192.168.0.0/16
ipipMode: Never
natOutgoing: true
---
kind: IPPool
apiVersion: projectcalico.org/v3
metadata:
name: ippool-2
spec:
cidr: 2002::/64
ipipMode: Never
vxlanMode: Never
natOutgoing: true
EOF
# Expect 3 peerings, all with no password because we haven't
# created the secrets yet.
test_confd_templates mesh/password/step1
# Create my-secrets-1 secret with only one of the required keys.
kubectl create -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
name: my-secrets-1
namespace: kube-system
type: Opaque
stringData:
a: password-a
EOF
# Expect the password now on all the peerings using my-secrets-1/a.
test_confd_templates mesh/password/step2
# Change the passwords in the other secret.
kubectl replace -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
name: my-secrets-1
namespace: kube-system
type: Opaque
stringData:
a: new-password-a
EOF
# Expect peerings to have new passwords.
test_confd_templates mesh/password/step3
# Change the password to an unreferenced key.
kubectl replace -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
name: my-secrets-1
namespace: kube-system
type: Opaque
stringData:
b: password-b
EOF
# Expect the password to have disappeared
test_confd_templates mesh/password/step1
# Delete a secret.
kubectl delete secret my-secrets-1 -n kube-system
# Expect password-a to still be gone.
test_confd_templates mesh/password/step1
# Kill confd.
kill -9 $CONFD_PID
# Delete remaining resources.
# Only delete the ippools in KDD mode since calicoctl cannot remove the nodes
$CALICOCTL delete ippool ippool-1
$CALICOCTL delete ippool ippool-2
if [ "$DATASTORE_TYPE" = etcdv3 ]; then
$CALICOCTL delete node kube-master
$CALICOCTL delete node kube-node-1
$CALICOCTL delete node kube-node-2
fi
# For KDD, kill Typha.
if [ "$DATASTORE_TYPE" = kubernetes ]; then
kill_typha
fi
# Revert BGPConfig changes
$CALICOCTL apply -f - <<EOF
kind: BGPConfiguration
apiVersion: projectcalico.org/v3
metadata:
name: default
spec:
EOF
# Check that passwords were not logged.
password_logs="`grep 'password-' $LOGPATH/logd1 || true`"
echo "$password_logs"
if [ "$password_logs" ]; then
echo "ERROR: passwords were logged"
return 1
fi
}
|
#!/bin/sh
set -e
# first arg is `-f` or `--some-option`
# or first arg is `something.conf`
if [ "${1#-}" != "$1" ] || [ "${1%.conf}" != "$1" ]; then
set -- redis-server "$@"
fi
exec "$@"
|
<reponame>eengineergz/Lambda<filename>7-assets/past-student-repos/_MOST-USEFUL/Boilerplate-master/Apollo/imports/api/resolvers.js<gh_stars>0
import { Random } from 'meteor/random'
export const resolvers = {
Query: {
user(root, args, context) {
return context.user
},
},
User: {
emails: ({ emails }) => emails,
randomString: () => Random.id()
},
Query: {
hi: () => 'Hello!'
},
} |
# frozen_string_literal: true
module Etsource
# Loads data relating to the calculation of molecule flows based on the energy graph.
module Molecules
module_function
# Internal: Computes the list of molecule graph nodes which have a molecule_conversion
# attribute.
#
# These nodes will receive a demand based on flows in the energy graph.
#
# Returns an Array of Symbols.
def from_energy_keys
Rails.cache.fetch('molecules.from_energy_keys') do
Atlas::MoleculeNode.all.select(&:from_energy).map(&:key).sort
end
end
# Internal: Computes the list of molecule graph nodes which have a molecule_conversion
# attribute.
#
# These nodes will receive a demand based on flows in the energy graph.
#
# Returns an Array of Symbols.
def from_molecules_keys
Rails.cache.fetch('molecules.from_molecules_keys') do
Atlas::EnergyNode.all.select(&:from_molecules).map(&:key).sort
end
end
end
end
|
import numpy as np
from pcit.IndependenceTest import FDRcontrol, PCIT
from pcit.MetaEstimator import MetaEstimator
def find_neighbours(X, estimator = MetaEstimator(), confidence = 0.05):
'''
Undirected graph skeleton learning routine.
----------------
Attributes:
- X: data set for undirected graph estimation, size: [samples x dimensions]
- estimator: object of the MetaEstimator class
- confidence: false-discovery rate level
Returns:
- skeleton: Matrix (graph) with entries being the p-values for each individual test
- skeleton_adj: Matrix (graph) with skeleton, after application of FDR control
'''
p = X.shape[1]
skeleton = np.reshape(np.zeros(p**2), (p,p))
# Loop over all subsets of X of size 2
for i in range(p-1):
for j in range(i + 1, p):
input_var = np.reshape(X[:,i], (-1,1))
output_var = np.reshape(X[:,j], (-1,1))
conditioning_set = np.delete(X, (i,j), 1)
# Conditional independence test conditional on all other variables
p_values_adj, independent, ci = PCIT(output_var, input_var,
z = conditioning_set, confidence = confidence, estimator = estimator)
# P-value of null-hypothesis that pair is independent give all other variables
skeleton[j,i] = independent[1]
# Ensure symmetry
skeleton[i,j] = skeleton[j,i]
# Apply FDR control and make hard assignments if independent or not according to confidence level
skeleton_adj = (FDRcontrol(skeleton, confidence)[0] < confidence) * 1
return skeleton, skeleton_adj |
<filename>open-sphere-plugins/open-sensor-hub/src/main/java/io/opensphere/osh/results/video/VideoWindow.java
package io.opensphere.osh.results.video;
import java.awt.Dimension;
import java.awt.EventQueue;
import java.awt.Window;
import java.io.ByteArrayInputStream;
import javafx.application.Platform;
import javafx.embed.swing.JFXPanel;
import javafx.scene.Group;
import javafx.scene.Scene;
import javafx.scene.image.Image;
import javafx.scene.image.ImageView;
import javax.swing.JDialog;
import io.opensphere.mantle.data.DataTypeInfo;
/** A video window. */
public class VideoWindow extends JDialog
{
/** Serial version UID. */
private static final long serialVersionUID = 1L;
/** The image view. */
private ImageView myImageView;
/** Whether we're handling the first image. */
private boolean myFirstTime = true;
/**
* Constructor.
*
* @param owner the owner
* @param dataType the data type
*/
public VideoWindow(Window owner, DataTypeInfo dataType)
{
super(owner, dataType.getDisplayName(), ModalityType.MODELESS);
setDefaultCloseOperation(HIDE_ON_CLOSE);
setMinimumSize(new Dimension(300, 300));
setSize(600, 600);
setLocationRelativeTo(owner);
JFXPanel fxPanel = new JFXPanel();
add(fxPanel);
Platform.runLater(() -> initFx(fxPanel));
}
/**
* Sets the image bytes.
*
* @param bytes the image bytes
*/
public void setImageBytes(byte[] bytes)
{
Platform.runLater(() ->
{
Image image = new Image(new ByteArrayInputStream(bytes));
myImageView.setImage(image);
if (myFirstTime)
{
myFirstTime = false;
int width = (int)image.getWidth();
int height = (int)image.getHeight();
EventQueue.invokeLater(() -> setSize(width, height));
}
});
}
/**
* Initializes the JavaFX stuff.
*
* @param fxPanel the JFXPanel
*/
private void initFx(JFXPanel fxPanel)
{
myImageView = new ImageView();
Scene scene = new Scene(new Group(myImageView));
fxPanel.setScene(scene);
myImageView.fitWidthProperty().bind(scene.widthProperty());
myImageView.fitHeightProperty().bind(scene.heightProperty());
}
}
|
<form action="process.php" method="post">
<label for="name">Name:</label>
<input type="text" name="name" id="name" />
<br />
<label for="gender">Gender:</label>
<input type="radio" name="gender" value="female" /> Female
<input type="radio" name="gender" value="male" /> Male
<br />
<label for="drinks">Drinks:</label>
<input type="checkbox" name="drinks" value="soda"/> Soda
<input type="checkbox" name="drinks" value="coffee"/> Coffee
<input type="checkbox" name="drinks" value="tea"/> Tea
<br />
<label for="language">Language:</label>
<select name="language">
<option value="en">English</option>
<option value="es">Spanish</option>
<option value="fr">French</option>
</select>
<br />
<input type="submit" value="Submit" />
</form> |
const randomNumbers = () => {
let arr = [];
for (let i = 0; i<1000; i++) {
arr.push(Math.floor(Math.random() * 1000));
}
return Math.max(...arr);
}
console.log(randomNumbers()); |
package io.miti.jarman.data;
import io.miti.jarman.gui.Jarman;
import io.miti.jarman.util.WindowState;
import java.awt.event.KeyEvent;
import java.io.File;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import javax.swing.JMenu;
import javax.swing.JMenuItem;
import io.miti.jarman.data.FileList;
import io.miti.jarman.data.JarData;
import io.miti.jarman.data.MenuItemAction;
/**
* This class manages the most-recently opened files listed in the
* File menu item.
*
* @author mwallace
* @version 1.0
*/
public final class FileList
{
/**
* The one instance of this class.
*/
private static FileList data = null;
/**
* The list of files.
*/
private List<String> files = null;
/**
* The number of files to save.
*/
private static final int MAX_FILES = 4;
/**
* The property string for the number of files.
*/
private static final String numFilesProp = "num.files";
/**
* Default constructor.
*/
private FileList()
{
super();
files = new ArrayList<String>(MAX_FILES);
}
/**
* Return the one instance of this class.
*
* @return the one instance of this class
*/
public static FileList getInstance()
{
if (data == null)
{
data = new FileList();
}
return data;
}
/**
* Load the data from the properties file.
*
* @param props the properties file instance
*/
public void loadData(final Properties props)
{
// Get the number of files listed in the properties file
final int count = Math.min(MAX_FILES, WindowState.parseInteger(props, numFilesProp));
if (count < 1)
{
return;
}
// Get each file
for (int i = 0; i < count; ++i)
{
// Get the next file in the list
String key = "file." + Integer.toString(i);
String val = (String) props.get(key);
// If there's no entry or it's blank, skip it
if ((val == null) || (val.trim().length() < 1))
{
continue;
}
// Add the file
files.add(val.trim());
}
}
/**
* Add a file to the list.
*
* @param file the file to add
*/
public void addFile(final File file)
{
// Look for a match on the filename
final String name = file.getAbsolutePath();
int match = -1;
final int size = files.size();
for (int i = 0; i < size; ++i)
{
if (name.equals(files.get(i)))
{
match = i;
break;
}
}
// Check if/where the match was found
if (match == 0)
{
// The file is already at the top of the list, so do nothing
return;
}
else if (match > 0)
{
// We found the file, and now we need to move it to the top
files.remove(match);
files.add(0, name);
}
else
{
// Add to the top of the list
files.add(0, name);
// If the list is too big, remove the last item
if (files.size() > MAX_FILES)
{
files.remove(MAX_FILES);
}
}
}
/**
* Remove the entry with the specified index.
*
* @param index the index of the file to remove
*/
public void removeFile(final int index)
{
files.remove(index);
}
/**
* Update the menu item.
*/
public void updateMenu()
{
// Get the File menu and remove any extra menu items
JMenu menu = Jarman.getApp().getFrame().getJMenuBar().getMenu(0);
final int count = menu.getItemCount();
int numLeft = count - 8;
// Remove any files and separator after the first separator
while (numLeft > 0)
{
menu.remove(7);
--numLeft;
}
// If no files to list, return
final int size = files.size();
if (size < 1)
{
return;
}
// Add a separator after File | List All
int menuIndex = 6;
menu.insertSeparator(menuIndex++);
// Add the files
int fileIndex = 0;
for (int i = 0; i < size; ++i)
{
String name = parseFilename(files.get(i));
if (name == null)
{
continue;
}
// Create the menu item
String text = Integer.toString(fileIndex + 1) + " " + name;
MenuItemAction action = new MenuItemAction(text, i, files.get(i));
JMenuItem item = new JMenuItem(action);
item.setMnemonic(KeyEvent.VK_1 + fileIndex);
menu.insert(item, menuIndex++);
++fileIndex;
}
}
/**
* Parse the name of the file from the full path.
*
* @param fullPath the full name of the file
* @return the filename
*/
private static String parseFilename(final String fullPath)
{
final int slashIndex = JarData.getLastSlashIndex(fullPath);
final String fname = ((slashIndex >= 0) ? fullPath.substring(
slashIndex + 1) : fullPath);
return fname;
}
/**
* Save the data to the properties file.
*
* @param props the properties file instance
*/
public void saveData(final Properties props)
{
// Save the number of files, and if any, save each filename
final int size = files.size();
props.put(numFilesProp, Integer.toString(size));
for (int i = 0; i < size; ++i)
{
String key = "file." + Integer.toString(i);
props.put(key, files.get(i));
}
}
}
|
<reponame>manuel-hegner/conquery<gh_stars>0
import { css } from "@emotion/react";
import styled from "@emotion/styled";
import React, { ReactNode } from "react";
import { IndexPrefix } from "../common/components/IndexPrefix";
import { exists } from "../common/helpers/exists";
import InfoTooltip from "../tooltip/InfoTooltip";
import Label from "./Label";
import Optional from "./Optional";
const Root = styled("label")<{ fullWidth?: boolean }>`
${({ fullWidth }) =>
fullWidth &&
css`
width: 100%;
input {
width: 100%;
}
`};
`;
interface Props {
label: ReactNode;
indexPrefix?: number;
className?: string;
tinyLabel?: boolean;
largeLabel?: boolean;
fullWidth?: boolean;
children?: React.ReactNode;
optional?: boolean;
tooltip?: string;
htmlFor?: string;
}
const Labeled = ({
indexPrefix,
className,
fullWidth,
label,
tinyLabel,
largeLabel,
tooltip,
optional,
htmlFor,
children,
}: Props) => {
return (
<Root className={className} fullWidth={fullWidth} htmlFor={htmlFor}>
<Label fullWidth={fullWidth} tiny={tinyLabel} large={largeLabel}>
{exists(indexPrefix) && <IndexPrefix># {indexPrefix}</IndexPrefix>}
{optional && <Optional />}
{label}
{exists(tooltip) && <InfoTooltip text={tooltip} />}
</Label>
{children}
</Root>
);
};
export default Labeled;
|
vp8_common_forward_decls() {
cat <<EOF
/*
* VP8
*/
struct blockd;
struct macroblockd;
struct loop_filter_info;
/* Encoder forward decls */
struct block;
struct macroblock;
struct variance_vtable;
union int_mv;
struct yv12_buffer_config;
EOF
}
forward_decls vp8_common_forward_decls
#
# system state
#
prototype void vp8_clear_system_state ""
specialize vp8_clear_system_state mmx
vp8_clear_system_state_mmx=vpx_reset_mmx_state
#
# Dequant
#
prototype void vp8_dequantize_b "struct blockd*, short *dqc"
specialize vp8_dequantize_b mmx media neon
vp8_dequantize_b_media=vp8_dequantize_b_v6
prototype void vp8_dequant_idct_add "short *input, short *dq, unsigned char *output, int stride"
specialize vp8_dequant_idct_add mmx media neon dspr2
vp8_dequant_idct_add_media=vp8_dequant_idct_add_v6
vp8_dequant_idct_add_dspr2=vp8_dequant_idct_add_dspr2
prototype void vp8_dequant_idct_add_y_block "short *q, short *dq, unsigned char *dst, int stride, char *eobs"
specialize vp8_dequant_idct_add_y_block mmx sse2 media neon dspr2
vp8_dequant_idct_add_y_block_media=vp8_dequant_idct_add_y_block_v6
vp8_dequant_idct_add_y_block_dspr2=vp8_dequant_idct_add_y_block_dspr2
prototype void vp8_dequant_idct_add_uv_block "short *q, short *dq, unsigned char *dst_u, unsigned char *dst_v, int stride, char *eobs"
specialize vp8_dequant_idct_add_uv_block mmx sse2 media neon dspr2
vp8_dequant_idct_add_uv_block_media=vp8_dequant_idct_add_uv_block_v6
vp8_dequant_idct_add_y_block_dspr2=vp8_dequant_idct_add_y_block_dspr2
#
# Loopfilter
#
prototype void vp8_loop_filter_mbv "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
specialize vp8_loop_filter_mbv mmx sse2 media neon dspr2
vp8_loop_filter_mbv_media=vp8_loop_filter_mbv_armv6
vp8_loop_filter_mbv_dspr2=vp8_loop_filter_mbv_dspr2
prototype void vp8_loop_filter_bv "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
specialize vp8_loop_filter_bv mmx sse2 media neon dspr2
vp8_loop_filter_bv_media=vp8_loop_filter_bv_armv6
vp8_loop_filter_bv_dspr2=vp8_loop_filter_bv_dspr2
prototype void vp8_loop_filter_mbh "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
specialize vp8_loop_filter_mbh mmx sse2 media neon dspr2
vp8_loop_filter_mbh_media=vp8_loop_filter_mbh_armv6
vp8_loop_filter_mbh_dspr2=vp8_loop_filter_mbh_dspr2
prototype void vp8_loop_filter_bh "unsigned char *y, unsigned char *u, unsigned char *v, int ystride, int uv_stride, struct loop_filter_info *lfi"
specialize vp8_loop_filter_bh mmx sse2 media neon dspr2
vp8_loop_filter_bh_media=vp8_loop_filter_bh_armv6
vp8_loop_filter_bh_dspr2=vp8_loop_filter_bh_dspr2
prototype void vp8_loop_filter_simple_mbv "unsigned char *y, int ystride, const unsigned char *blimit"
specialize vp8_loop_filter_simple_mbv mmx sse2 media neon
vp8_loop_filter_simple_mbv_c=vp8_loop_filter_simple_vertical_edge_c
vp8_loop_filter_simple_mbv_mmx=vp8_loop_filter_simple_vertical_edge_mmx
vp8_loop_filter_simple_mbv_sse2=vp8_loop_filter_simple_vertical_edge_sse2
vp8_loop_filter_simple_mbv_media=vp8_loop_filter_simple_vertical_edge_armv6
vp8_loop_filter_simple_mbv_neon=vp8_loop_filter_mbvs_neon
prototype void vp8_loop_filter_simple_mbh "unsigned char *y, int ystride, const unsigned char *blimit"
specialize vp8_loop_filter_simple_mbh mmx sse2 media neon
vp8_loop_filter_simple_mbh_c=vp8_loop_filter_simple_horizontal_edge_c
vp8_loop_filter_simple_mbh_mmx=vp8_loop_filter_simple_horizontal_edge_mmx
vp8_loop_filter_simple_mbh_sse2=vp8_loop_filter_simple_horizontal_edge_sse2
vp8_loop_filter_simple_mbh_media=vp8_loop_filter_simple_horizontal_edge_armv6
vp8_loop_filter_simple_mbh_neon=vp8_loop_filter_mbhs_neon
prototype void vp8_loop_filter_simple_bv "unsigned char *y, int ystride, const unsigned char *blimit"
specialize vp8_loop_filter_simple_bv mmx sse2 media neon
vp8_loop_filter_simple_bv_c=vp8_loop_filter_bvs_c
vp8_loop_filter_simple_bv_mmx=vp8_loop_filter_bvs_mmx
vp8_loop_filter_simple_bv_sse2=vp8_loop_filter_bvs_sse2
vp8_loop_filter_simple_bv_media=vp8_loop_filter_bvs_armv6
vp8_loop_filter_simple_bv_neon=vp8_loop_filter_bvs_neon
prototype void vp8_loop_filter_simple_bh "unsigned char *y, int ystride, const unsigned char *blimit"
specialize vp8_loop_filter_simple_bh mmx sse2 media neon
vp8_loop_filter_simple_bh_c=vp8_loop_filter_bhs_c
vp8_loop_filter_simple_bh_mmx=vp8_loop_filter_bhs_mmx
vp8_loop_filter_simple_bh_sse2=vp8_loop_filter_bhs_sse2
vp8_loop_filter_simple_bh_media=vp8_loop_filter_bhs_armv6
vp8_loop_filter_simple_bh_neon=vp8_loop_filter_bhs_neon
#
# IDCT
#
#idct16
prototype void vp8_short_idct4x4llm "short *input, unsigned char *pred, int pitch, unsigned char *dst, int dst_stride"
specialize vp8_short_idct4x4llm mmx media neon dspr2
vp8_short_idct4x4llm_media=vp8_short_idct4x4llm_v6_dual
vp8_short_idct4x4llm_dspr2=vp8_short_idct4x4llm_dspr2
#iwalsh1
prototype void vp8_short_inv_walsh4x4_1 "short *input, short *output"
specialize vp8_short_inv_walsh4x4_1 dspr2
vp8_short_inv_walsh4x4_1_dspr2=vp8_short_inv_walsh4x4_1_dspr2
# no asm yet
#iwalsh16
prototype void vp8_short_inv_walsh4x4 "short *input, short *output"
specialize vp8_short_inv_walsh4x4 mmx sse2 media neon dspr2
vp8_short_inv_walsh4x4_media=vp8_short_inv_walsh4x4_v6
vp8_short_inv_walsh4x4_dspr2=vp8_short_inv_walsh4x4_dspr2
#idct1_scalar_add
prototype void vp8_dc_only_idct_add "short input, unsigned char *pred, int pred_stride, unsigned char *dst, int dst_stride"
specialize vp8_dc_only_idct_add mmx media neon dspr2
vp8_dc_only_idct_add_media=vp8_dc_only_idct_add_v6
vp8_dc_only_idct_add_dspr2=vp8_dc_only_idct_add_dspr2
#
# RECON
#
prototype void vp8_copy_mem16x16 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
specialize vp8_copy_mem16x16 mmx sse2 media neon dspr2
vp8_copy_mem16x16_media=vp8_copy_mem16x16_v6
vp8_copy_mem16x16_dspr2=vp8_copy_mem16x16_dspr2
prototype void vp8_copy_mem8x8 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
specialize vp8_copy_mem8x8 mmx media neon dspr2
vp8_copy_mem8x8_media=vp8_copy_mem8x8_v6
vp8_copy_mem8x8_dspr2=vp8_copy_mem8x8_dspr2
prototype void vp8_copy_mem8x4 "unsigned char *src, int src_pitch, unsigned char *dst, int dst_pitch"
specialize vp8_copy_mem8x4 mmx media neon dspr2
vp8_copy_mem8x4_media=vp8_copy_mem8x4_v6
vp8_copy_mem8x4_dspr2=vp8_copy_mem8x4_dspr2
prototype void vp8_build_intra_predictors_mby_s "struct macroblockd *x, unsigned char * yabove_row, unsigned char * yleft, int left_stride, unsigned char * ypred_ptr, int y_stride"
specialize vp8_build_intra_predictors_mby_s sse2 ssse3
#TODO: fix assembly for neon
prototype void vp8_build_intra_predictors_mbuv_s "struct macroblockd *x, unsigned char * uabove_row, unsigned char * vabove_row, unsigned char *uleft, unsigned char *vleft, int left_stride, unsigned char * upred_ptr, unsigned char * vpred_ptr, int pred_stride"
specialize vp8_build_intra_predictors_mbuv_s sse2 ssse3
prototype void vp8_intra4x4_predict "unsigned char *Above, unsigned char *yleft, int left_stride, int b_mode, unsigned char *dst, int dst_stride, unsigned char top_left"
specialize vp8_intra4x4_predict media
vp8_intra4x4_predict_media=vp8_intra4x4_predict_armv6
#
# Postproc
#
if [ "$CONFIG_POSTPROC" = "yes" ]; then
prototype void vp8_mbpost_proc_down "unsigned char *dst, int pitch, int rows, int cols,int flimit"
specialize vp8_mbpost_proc_down mmx sse2
vp8_mbpost_proc_down_sse2=vp8_mbpost_proc_down_xmm
prototype void vp8_mbpost_proc_across_ip "unsigned char *dst, int pitch, int rows, int cols,int flimit"
specialize vp8_mbpost_proc_across_ip sse2
vp8_mbpost_proc_across_ip_sse2=vp8_mbpost_proc_across_ip_xmm
prototype void vp8_post_proc_down_and_across_mb_row "unsigned char *src, unsigned char *dst, int src_pitch, int dst_pitch, int cols, unsigned char *flimits, int size"
specialize vp8_post_proc_down_and_across_mb_row sse2
prototype void vp8_plane_add_noise "unsigned char *s, char *noise, char blackclamp[16], char whiteclamp[16], char bothclamp[16], unsigned int w, unsigned int h, int pitch"
specialize vp8_plane_add_noise mmx sse2
vp8_plane_add_noise_sse2=vp8_plane_add_noise_wmt
prototype void vp8_blend_mb_inner "unsigned char *y, unsigned char *u, unsigned char *v, int y1, int u1, int v1, int alpha, int stride"
# no asm yet
prototype void vp8_blend_mb_outer "unsigned char *y, unsigned char *u, unsigned char *v, int y1, int u1, int v1, int alpha, int stride"
# no asm yet
prototype void vp8_blend_b "unsigned char *y, unsigned char *u, unsigned char *v, int y1, int u1, int v1, int alpha, int stride"
# no asm yet
prototype void vp8_filter_by_weight16x16 "unsigned char *src, int src_stride, unsigned char *dst, int dst_stride, int src_weight"
specialize vp8_filter_by_weight16x16 sse2
prototype void vp8_filter_by_weight8x8 "unsigned char *src, int src_stride, unsigned char *dst, int dst_stride, int src_weight"
specialize vp8_filter_by_weight8x8 sse2
prototype void vp8_filter_by_weight4x4 "unsigned char *src, int src_stride, unsigned char *dst, int dst_stride, int src_weight"
# no asm yet
fi
#
# Subpixel
#
prototype void vp8_sixtap_predict16x16 "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"
specialize vp8_sixtap_predict16x16 mmx sse2 ssse3 media neon dspr2
vp8_sixtap_predict16x16_media=vp8_sixtap_predict16x16_armv6
vp8_sixtap_predict16x16_dspr2=vp8_sixtap_predict16x16_dspr2
prototype void vp8_sixtap_predict8x8 "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"
specialize vp8_sixtap_predict8x8 mmx sse2 ssse3 media neon dspr2
vp8_sixtap_predict8x8_media=vp8_sixtap_predict8x8_armv6
vp8_sixtap_predict8x8_dspr2=vp8_sixtap_predict8x8_dspr2
prototype void vp8_sixtap_predict8x4 "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"
specialize vp8_sixtap_predict8x4 mmx sse2 ssse3 media neon dspr2
vp8_sixtap_predict8x4_media=vp8_sixtap_predict8x4_armv6
vp8_sixtap_predict8x4_dspr2=vp8_sixtap_predict8x4_dspr2
prototype void vp8_sixtap_predict4x4 "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"
specialize vp8_sixtap_predict4x4 mmx ssse3 media neon dspr2
vp8_sixtap_predict4x4_media=vp8_sixtap_predict4x4_armv6
vp8_sixtap_predict4x4_dspr2=vp8_sixtap_predict4x4_dspr2
prototype void vp8_bilinear_predict16x16 "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"
specialize vp8_bilinear_predict16x16 mmx sse2 ssse3 media neon
vp8_bilinear_predict16x16_media=vp8_bilinear_predict16x16_armv6
prototype void vp8_bilinear_predict8x8 "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"
specialize vp8_bilinear_predict8x8 mmx sse2 ssse3 media neon
vp8_bilinear_predict8x8_media=vp8_bilinear_predict8x8_armv6
prototype void vp8_bilinear_predict8x4 "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"
specialize vp8_bilinear_predict8x4 mmx media neon
vp8_bilinear_predict8x4_media=vp8_bilinear_predict8x4_armv6
prototype void vp8_bilinear_predict4x4 "unsigned char *src, int src_pitch, int xofst, int yofst, unsigned char *dst, int dst_pitch"
specialize vp8_bilinear_predict4x4 mmx media neon
vp8_bilinear_predict4x4_media=vp8_bilinear_predict4x4_armv6
#
# Whole-pixel Variance
#
prototype unsigned int vp8_variance4x4 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance4x4 mmx sse2
vp8_variance4x4_sse2=vp8_variance4x4_wmt
prototype unsigned int vp8_variance8x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance8x8 mmx sse2 media neon
vp8_variance8x8_sse2=vp8_variance8x8_wmt
vp8_variance8x8_media=vp8_variance8x8_armv6
prototype unsigned int vp8_variance8x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance8x16 mmx sse2 neon
vp8_variance8x16_sse2=vp8_variance8x16_wmt
prototype unsigned int vp8_variance16x8 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance16x8 mmx sse2 neon
vp8_variance16x8_sse2=vp8_variance16x8_wmt
prototype unsigned int vp8_variance16x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance16x16 mmx sse2 media neon
vp8_variance16x16_sse2=vp8_variance16x16_wmt
vp8_variance16x16_media=vp8_variance16x16_armv6
#
# Sub-pixel Variance
#
prototype unsigned int vp8_sub_pixel_variance4x4 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp8_sub_pixel_variance4x4 mmx sse2
vp8_sub_pixel_variance4x4_sse2=vp8_sub_pixel_variance4x4_wmt
prototype unsigned int vp8_sub_pixel_variance8x8 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp8_sub_pixel_variance8x8 mmx sse2 media neon
vp8_sub_pixel_variance8x8_sse2=vp8_sub_pixel_variance8x8_wmt
vp8_sub_pixel_variance8x8_media=vp8_sub_pixel_variance8x8_armv6
prototype unsigned int vp8_sub_pixel_variance8x16 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp8_sub_pixel_variance8x16 mmx sse2
vp8_sub_pixel_variance8x16_sse2=vp8_sub_pixel_variance8x16_wmt
prototype unsigned int vp8_sub_pixel_variance16x8 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp8_sub_pixel_variance16x8 mmx sse2 ssse3
vp8_sub_pixel_variance16x8_sse2=vp8_sub_pixel_variance16x8_wmt
prototype unsigned int vp8_sub_pixel_variance16x16 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp8_sub_pixel_variance16x16 mmx sse2 ssse3 media neon
vp8_sub_pixel_variance16x16_sse2=vp8_sub_pixel_variance16x16_wmt
vp8_sub_pixel_variance16x16_media=vp8_sub_pixel_variance16x16_armv6
prototype unsigned int vp8_variance_halfpixvar16x16_h "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance_halfpixvar16x16_h mmx sse2 media neon
vp8_variance_halfpixvar16x16_h_sse2=vp8_variance_halfpixvar16x16_h_wmt
vp8_variance_halfpixvar16x16_h_media=vp8_variance_halfpixvar16x16_h_armv6
prototype unsigned int vp8_variance_halfpixvar16x16_v "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance_halfpixvar16x16_v mmx sse2 media neon
vp8_variance_halfpixvar16x16_v_sse2=vp8_variance_halfpixvar16x16_v_wmt
vp8_variance_halfpixvar16x16_v_media=vp8_variance_halfpixvar16x16_v_armv6
prototype unsigned int vp8_variance_halfpixvar16x16_hv "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_variance_halfpixvar16x16_hv mmx sse2 media neon
vp8_variance_halfpixvar16x16_hv_sse2=vp8_variance_halfpixvar16x16_hv_wmt
vp8_variance_halfpixvar16x16_hv_media=vp8_variance_halfpixvar16x16_hv_armv6
#
# Single block SAD
#
prototype unsigned int vp8_sad4x4 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp8_sad4x4 mmx sse2 neon
vp8_sad4x4_sse2=vp8_sad4x4_wmt
prototype unsigned int vp8_sad8x8 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp8_sad8x8 mmx sse2 neon
vp8_sad8x8_sse2=vp8_sad8x8_wmt
prototype unsigned int vp8_sad8x16 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp8_sad8x16 mmx sse2 neon
vp8_sad8x16_sse2=vp8_sad8x16_wmt
prototype unsigned int vp8_sad16x8 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp8_sad16x8 mmx sse2 neon
vp8_sad16x8_sse2=vp8_sad16x8_wmt
prototype unsigned int vp8_sad16x16 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int max_sad"
specialize vp8_sad16x16 mmx sse2 sse3 media neon
vp8_sad16x16_sse2=vp8_sad16x16_wmt
vp8_sad16x16_media=vp8_sad16x16_armv6
#
# Multi-block SAD, comparing a reference to N blocks 1 pixel apart horizontally
#
prototype void vp8_sad4x4x3 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp8_sad4x4x3 sse3
prototype void vp8_sad8x8x3 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp8_sad8x8x3 sse3
prototype void vp8_sad8x16x3 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp8_sad8x16x3 sse3
prototype void vp8_sad16x8x3 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp8_sad16x8x3 sse3 ssse3
prototype void vp8_sad16x16x3 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sad_array"
specialize vp8_sad16x16x3 sse3 ssse3
# Note the only difference in the following prototypes is that they return into
# an array of short
prototype void vp8_sad4x4x8 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp8_sad4x4x8 sse4_1
vp8_sad4x4x8_sse4_1=vp8_sad4x4x8_sse4
prototype void vp8_sad8x8x8 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp8_sad8x8x8 sse4_1
vp8_sad8x8x8_sse4_1=vp8_sad8x8x8_sse4
prototype void vp8_sad8x16x8 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp8_sad8x16x8 sse4_1
vp8_sad8x16x8_sse4_1=vp8_sad8x16x8_sse4
prototype void vp8_sad16x8x8 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp8_sad16x8x8 sse4_1
vp8_sad16x8x8_sse4_1=vp8_sad16x8x8_sse4
prototype void vp8_sad16x16x8 "const unsigned char *src_ptr, int src_stride, const unsigned char *ref_ptr, int ref_stride, unsigned short *sad_array"
specialize vp8_sad16x16x8 sse4_1
vp8_sad16x16x8_sse4_1=vp8_sad16x16x8_sse4
#
# Multi-block SAD, comparing a reference to N independent blocks
#
prototype void vp8_sad4x4x4d "const unsigned char *src_ptr, int src_stride, const unsigned char * const ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp8_sad4x4x4d sse3
prototype void vp8_sad8x8x4d "const unsigned char *src_ptr, int src_stride, const unsigned char * const ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp8_sad8x8x4d sse3
prototype void vp8_sad8x16x4d "const unsigned char *src_ptr, int src_stride, const unsigned char * const ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp8_sad8x16x4d sse3
prototype void vp8_sad16x8x4d "const unsigned char *src_ptr, int src_stride, const unsigned char * const ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp8_sad16x8x4d sse3
prototype void vp8_sad16x16x4d "const unsigned char *src_ptr, int src_stride, const unsigned char * const ref_ptr[], int ref_stride, unsigned int *sad_array"
specialize vp8_sad16x16x4d sse3
#
# Encoder functions below this point.
#
if [ "$CONFIG_VP8_ENCODER" = "yes" ]; then
#
# Sum of squares (vector)
#
prototype unsigned int vp8_get_mb_ss "const short *"
specialize vp8_get_mb_ss mmx sse2
#
# SSE (Sum Squared Error)
#
prototype unsigned int vp8_sub_pixel_mse16x16 "const unsigned char *src_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr, int Refstride, unsigned int *sse"
specialize vp8_sub_pixel_mse16x16 mmx sse2
vp8_sub_pixel_mse16x16_sse2=vp8_sub_pixel_mse16x16_wmt
prototype unsigned int vp8_mse16x16 "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
specialize vp8_mse16x16 mmx sse2 media neon
vp8_mse16x16_sse2=vp8_mse16x16_wmt
vp8_mse16x16_media=vp8_mse16x16_armv6
prototype unsigned int vp8_get4x4sse_cs "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride"
specialize vp8_get4x4sse_cs mmx neon
#
# Block copy
#
case $arch in
x86*)
prototype void vp8_copy32xn "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int ref_stride, int n"
specialize vp8_copy32xn sse2 sse3
;;
esac
#
# Structured Similarity (SSIM)
#
if [ "$CONFIG_INTERNAL_STATS" = "yes" ]; then
[ $arch = "x86_64" ] && sse2_on_x86_64=sse2
prototype void vp8_ssim_parms_8x8 "unsigned char *s, int sp, unsigned char *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_sq_s, unsigned long *sum_sq_r, unsigned long *sum_sxr"
specialize vp8_ssim_parms_8x8 $sse2_on_x86_64
prototype void vp8_ssim_parms_16x16 "unsigned char *s, int sp, unsigned char *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_sq_s, unsigned long *sum_sq_r, unsigned long *sum_sxr"
specialize vp8_ssim_parms_16x16 $sse2_on_x86_64
fi
#
# Forward DCT
#
prototype void vp8_short_fdct4x4 "short *input, short *output, int pitch"
specialize vp8_short_fdct4x4 mmx sse2 media neon
vp8_short_fdct4x4_media=vp8_short_fdct4x4_armv6
prototype void vp8_short_fdct8x4 "short *input, short *output, int pitch"
specialize vp8_short_fdct8x4 mmx sse2 media neon
vp8_short_fdct8x4_media=vp8_short_fdct8x4_armv6
prototype void vp8_short_walsh4x4 "short *input, short *output, int pitch"
specialize vp8_short_walsh4x4 sse2 media neon
vp8_short_walsh4x4_media=vp8_short_walsh4x4_armv6
#
# Quantizer
#
prototype void vp8_regular_quantize_b "struct block *, struct blockd *"
specialize vp8_regular_quantize_b sse2 #sse4_1
# TODO(johann) Update sse4 implementation and re-enable
#vp8_regular_quantize_b_sse4_1=vp8_regular_quantize_b_sse4
prototype void vp8_fast_quantize_b "struct block *, struct blockd *"
specialize vp8_fast_quantize_b sse2 ssse3 media neon
vp8_fast_quantize_b_media=vp8_fast_quantize_b_armv6
prototype void vp8_regular_quantize_b_pair "struct block *b1, struct block *b2, struct blockd *d1, struct blockd *d2"
# no asm yet
prototype void vp8_fast_quantize_b_pair "struct block *b1, struct block *b2, struct blockd *d1, struct blockd *d2"
specialize vp8_fast_quantize_b_pair neon
prototype void vp8_quantize_mb "struct macroblock *"
specialize vp8_quantize_mb neon
prototype void vp8_quantize_mby "struct macroblock *"
specialize vp8_quantize_mby neon
prototype void vp8_quantize_mbuv "struct macroblock *"
specialize vp8_quantize_mbuv neon
#
# Block subtraction
#
prototype int vp8_block_error "short *coeff, short *dqcoeff"
specialize vp8_block_error mmx sse2
vp8_block_error_sse2=vp8_block_error_xmm
prototype int vp8_mbblock_error "struct macroblock *mb, int dc"
specialize vp8_mbblock_error mmx sse2
vp8_mbblock_error_sse2=vp8_mbblock_error_xmm
prototype int vp8_mbuverror "struct macroblock *mb"
specialize vp8_mbuverror mmx sse2
vp8_mbuverror_sse2=vp8_mbuverror_xmm
prototype void vp8_subtract_b "struct block *be, struct blockd *bd, int pitch"
specialize vp8_subtract_b mmx sse2 media neon
vp8_subtract_b_media=vp8_subtract_b_armv6
prototype void vp8_subtract_mby "short *diff, unsigned char *src, int src_stride, unsigned char *pred, int pred_stride"
specialize vp8_subtract_mby mmx sse2 media neon
vp8_subtract_mby_media=vp8_subtract_mby_armv6
prototype void vp8_subtract_mbuv "short *diff, unsigned char *usrc, unsigned char *vsrc, int src_stride, unsigned char *upred, unsigned char *vpred, int pred_stride"
specialize vp8_subtract_mbuv mmx sse2 media neon
vp8_subtract_mbuv_media=vp8_subtract_mbuv_armv6
#
# Motion search
#
prototype int vp8_full_search_sad "struct macroblock *x, struct block *b, struct blockd *d, union int_mv *ref_mv, int sad_per_bit, int distance, struct variance_vtable *fn_ptr, int *mvcost[2], union int_mv *center_mv"
specialize vp8_full_search_sad sse3 sse4_1
vp8_full_search_sad_sse3=vp8_full_search_sadx3
vp8_full_search_sad_sse4_1=vp8_full_search_sadx8
prototype int vp8_refining_search_sad "struct macroblock *x, struct block *b, struct blockd *d, union int_mv *ref_mv, int sad_per_bit, int distance, struct variance_vtable *fn_ptr, int *mvcost[2], union int_mv *center_mv"
specialize vp8_refining_search_sad sse3
vp8_refining_search_sad_sse3=vp8_refining_search_sadx4
prototype int vp8_diamond_search_sad "struct macroblock *x, struct block *b, struct blockd *d, union int_mv *ref_mv, union int_mv *best_mv, int search_param, int sad_per_bit, int *num00, struct variance_vtable *fn_ptr, int *mvcost[2], union int_mv *center_mv"
vp8_diamond_search_sad_sse3=vp8_diamond_search_sadx4
#
# Alt-ref Noise Reduction (ARNR)
#
if [ "$CONFIG_REALTIME_ONLY" != "yes" ]; then
prototype void vp8_temporal_filter_apply "unsigned char *frame1, unsigned int stride, unsigned char *frame2, unsigned int block_size, int strength, int filter_weight, unsigned int *accumulator, unsigned short *count"
specialize vp8_temporal_filter_apply sse2
fi
#
# Pick Loopfilter
#
prototype void vp8_yv12_copy_partial_frame "struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc"
specialize vp8_yv12_copy_partial_frame neon
#
# Denoiser filter
#
if [ "$CONFIG_TEMPORAL_DENOISING" = "yes" ]; then
prototype int vp8_denoiser_filter "struct yv12_buffer_config* mc_running_avg, struct yv12_buffer_config* running_avg, struct macroblock* signal, unsigned int motion_magnitude2, int y_offset, int uv_offset"
specialize vp8_denoiser_filter sse2 neon
fi
# End of encoder only functions
fi
|
<reponame>jameseden1/lorawan-stack
// Copyright © 2019 The Things Network Foundation, The Things Industries B.V.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package gatewayserver contains the structs and methods necessary to start a gRPC Gateway Server
package gatewayserver
import (
"context"
"fmt"
stdio "io"
stdlog "log"
"math"
"net"
"net/http"
"strings"
"sync"
"time"
pbtypes "github.com/gogo/protobuf/types"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"go.thethings.network/lorawan-stack/v3/pkg/cluster"
"go.thethings.network/lorawan-stack/v3/pkg/component"
"go.thethings.network/lorawan-stack/v3/pkg/config"
"go.thethings.network/lorawan-stack/v3/pkg/config/tlsconfig"
"go.thethings.network/lorawan-stack/v3/pkg/encoding/lorawan"
"go.thethings.network/lorawan-stack/v3/pkg/errors"
"go.thethings.network/lorawan-stack/v3/pkg/events"
"go.thethings.network/lorawan-stack/v3/pkg/frequencyplans"
"go.thethings.network/lorawan-stack/v3/pkg/gatewayserver/io"
iogrpc "go.thethings.network/lorawan-stack/v3/pkg/gatewayserver/io/grpc"
"go.thethings.network/lorawan-stack/v3/pkg/gatewayserver/io/mqtt"
"go.thethings.network/lorawan-stack/v3/pkg/gatewayserver/io/udp"
"go.thethings.network/lorawan-stack/v3/pkg/gatewayserver/io/ws"
"go.thethings.network/lorawan-stack/v3/pkg/gatewayserver/io/ws/lbslns"
"go.thethings.network/lorawan-stack/v3/pkg/gatewayserver/upstream"
"go.thethings.network/lorawan-stack/v3/pkg/gatewayserver/upstream/ns"
"go.thethings.network/lorawan-stack/v3/pkg/gatewayserver/upstream/packetbroker"
"go.thethings.network/lorawan-stack/v3/pkg/log"
"go.thethings.network/lorawan-stack/v3/pkg/random"
"go.thethings.network/lorawan-stack/v3/pkg/rpcmetadata"
"go.thethings.network/lorawan-stack/v3/pkg/rpcmiddleware/hooks"
"go.thethings.network/lorawan-stack/v3/pkg/rpcmiddleware/rpclog"
"go.thethings.network/lorawan-stack/v3/pkg/task"
"go.thethings.network/lorawan-stack/v3/pkg/ttnpb"
"go.thethings.network/lorawan-stack/v3/pkg/types"
"go.thethings.network/lorawan-stack/v3/pkg/unique"
"go.thethings.network/lorawan-stack/v3/pkg/workerpool"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
// GatewayServer implements the Gateway Server component.
//
// The Gateway Server exposes the Gs, GtwGs and NsGs services and MQTT and UDP frontends for gateways.
type GatewayServer struct {
*component.Component
ctx context.Context
config *Config
requireRegisteredGateways bool
forward map[string][]types.DevAddrPrefix
entityRegistry EntityRegistry
upstreamHandlers map[string]upstream.Handler
connections sync.Map // string to connectionEntry
statsRegistry GatewayConnectionStatsRegistry
}
// Option configures GatewayServer.
type Option func(*GatewayServer)
// WithRegistry overrides the registry.
func WithRegistry(registry EntityRegistry) Option {
return func(gs *GatewayServer) {
gs.entityRegistry = registry
}
}
// Context returns the context of the Gateway Server.
func (gs *GatewayServer) Context() context.Context {
return gs.ctx
}
var (
errListenFrontend = errors.DefineFailedPrecondition(
"listen_frontend",
"failed to start frontend listener `{protocol}` on address `{address}`",
)
errNotConnected = errors.DefineNotFound("not_connected", "gateway `{gateway_uid}` not connected")
errSetupUpstream = errors.DefineFailedPrecondition("upstream", "failed to setup upstream `{name}`")
errInvalidUpstreamName = errors.DefineInvalidArgument("invalid_upstream_name", "upstream `{name}` is invalid")
modelAttribute = "model"
firmwareAttribute = "firmware"
)
// New returns new *GatewayServer.
func New(c *component.Component, conf *Config, opts ...Option) (gs *GatewayServer, err error) {
forward, err := conf.ForwardDevAddrPrefixes()
if err != nil {
return nil, err
}
if len(forward) == 0 {
forward[""] = []types.DevAddrPrefix{{}}
}
ctx := log.NewContextWithField(c.Context(), "namespace", "gatewayserver")
gs = &GatewayServer{
Component: c,
ctx: ctx,
config: conf,
requireRegisteredGateways: conf.RequireRegisteredGateways,
forward: forward,
upstreamHandlers: make(map[string]upstream.Handler),
statsRegistry: conf.Stats,
entityRegistry: NewIS(c),
}
for _, opt := range opts {
opt(gs)
}
// Setup forwarding table.
for name, prefix := range gs.forward {
if len(prefix) == 0 {
continue
}
if name == "" {
name = "cluster"
}
var handler upstream.Handler
switch name {
case "cluster":
handler = ns.NewHandler(gs.Context(), c, c, prefix)
case "packetbroker":
handler = packetbroker.NewHandler(gs.Context(), packetbroker.Config{
GatewayRegistry: gs.entityRegistry,
Cluster: c,
DevAddrPrefixes: prefix,
UpdateInterval: conf.PacketBroker.UpdateGatewayInterval,
UpdateJitter: conf.PacketBroker.UpdateGatewayJitter,
OnlineTTLMargin: conf.PacketBroker.OnlineTTLMargin,
})
default:
return nil, errInvalidUpstreamName.WithAttributes("name", name)
}
if err := handler.Setup(gs.Context()); err != nil {
return nil, errSetupUpstream.WithCause(err).WithAttributes("name", name)
}
gs.upstreamHandlers[name] = handler
}
// Register gRPC services.
hooks.RegisterUnaryHook("/ttn.lorawan.v3.NsGs", rpclog.NamespaceHook, rpclog.UnaryNamespaceHook("gatewayserver"))
hooks.RegisterUnaryHook("/ttn.lorawan.v3.NsGs", cluster.HookName, c.ClusterAuthUnaryHook())
c.RegisterGRPC(gs)
// Start UDP listeners.
for addr, fallbackFrequencyPlanID := range conf.UDP.Listeners {
addr := addr
fallbackFrequencyPlanID := fallbackFrequencyPlanID
gs.RegisterTask(&task.Config{
Context: gs.Context(),
ID: fmt.Sprintf("serve_udp/%s", addr),
Func: func(ctx context.Context) error {
var conn *net.UDPConn
conn, err = gs.ListenUDP(addr)
if err != nil {
return errListenFrontend.WithCause(err).WithAttributes(
"protocol", "udp",
"address", addr,
)
}
defer conn.Close()
lisCtx := ctx
if fallbackFrequencyPlanID != "" {
lisCtx = frequencyplans.WithFallbackID(ctx, fallbackFrequencyPlanID)
}
return udp.Serve(lisCtx, gs, conn, conf.UDP.Config)
},
Restart: task.RestartOnFailure,
Backoff: task.DefaultBackoffConfig,
})
}
// Start MQTT listeners.
for _, version := range []struct {
Format mqtt.Format
Config config.MQTT
}{
{
Format: mqtt.NewProtobuf(gs.ctx),
Config: conf.MQTT,
},
{
Format: mqtt.NewProtobufV2(gs.ctx),
Config: conf.MQTTV2,
},
} {
for _, endpoint := range []component.Endpoint{
component.NewTCPEndpoint(version.Config.Listen, "MQTT"),
component.NewTLSEndpoint(version.Config.ListenTLS, "MQTT"),
} {
version := version
endpoint := endpoint
if endpoint.Address() == "" {
continue
}
gs.RegisterTask(&task.Config{
Context: gs.Context(),
ID: fmt.Sprintf("serve_mqtt/%s", endpoint.Address()),
Func: func(ctx context.Context) error {
l, err := gs.ListenTCP(endpoint.Address())
if err != nil {
return errListenFrontend.WithCause(err).WithAttributes(
"address", endpoint.Address(),
"protocol", endpoint.Protocol(),
)
}
lis, err := endpoint.Listen(l)
if err != nil {
return errListenFrontend.WithCause(err).WithAttributes(
"address", endpoint.Address(),
"protocol", endpoint.Protocol(),
)
}
defer lis.Close()
return mqtt.Serve(ctx, gs, lis, version.Format, endpoint.Protocol())
},
Restart: task.RestartOnFailure,
Backoff: task.DefaultBackoffConfig,
})
}
}
// Start Web Socket listeners.
type listenerConfig struct {
fallbackFreqPlanID string
listen string
listenTLS string
frontend ws.Config
}
for _, version := range []struct {
Name string
Formatter ws.Formatter
listenerConfig listenerConfig
}{
{
Name: "basicstation",
Formatter: lbslns.NewFormatter(conf.BasicStation.MaxValidRoundTripDelay),
listenerConfig: listenerConfig{
fallbackFreqPlanID: conf.BasicStation.FallbackFrequencyPlanID,
listen: conf.BasicStation.Listen,
listenTLS: conf.BasicStation.ListenTLS,
frontend: conf.BasicStation.Config,
},
},
} {
ctx := gs.Context()
if version.listenerConfig.fallbackFreqPlanID != "" {
ctx = frequencyplans.WithFallbackID(ctx, version.listenerConfig.fallbackFreqPlanID)
}
web, err := ws.New(ctx, gs, version.Formatter, version.listenerConfig.frontend)
if err != nil {
return nil, err
}
for _, endpoint := range []component.Endpoint{
component.NewTCPEndpoint(version.listenerConfig.listen, version.Name),
component.NewTLSEndpoint(version.listenerConfig.listenTLS, version.Name, tlsconfig.WithNextProtos("h2", "http/1.1")),
} {
endpoint := endpoint
if endpoint.Address() == "" {
continue
}
gs.RegisterTask(&task.Config{
Context: gs.Context(),
ID: fmt.Sprintf("serve_%s/%s", version.Name, endpoint.Address()),
Func: func(ctx context.Context) error {
l, err := gs.ListenTCP(endpoint.Address())
if err != nil {
return errListenFrontend.WithCause(err).WithAttributes(
"address", endpoint.Address(),
"protocol", endpoint.Protocol(),
)
}
lis, err := endpoint.Listen(l)
if err != nil {
return errListenFrontend.WithCause(err).WithAttributes(
"address", endpoint.Address(),
"protocol", endpoint.Protocol(),
)
}
defer lis.Close()
srv := http.Server{
Handler: web,
ReadTimeout: 120 * time.Second,
ReadHeaderTimeout: 5 * time.Second,
ErrorLog: stdlog.New(stdio.Discard, "", 0),
}
go func() {
<-ctx.Done()
srv.Close()
}()
return srv.Serve(lis)
},
Restart: task.RestartOnFailure,
Backoff: task.DefaultBackoffConfig,
})
}
}
return gs, nil
}
// RegisterServices registers services provided by gs at s.
func (gs *GatewayServer) RegisterServices(s *grpc.Server) {
ttnpb.RegisterGsServer(s, gs)
ttnpb.RegisterNsGsServer(s, gs)
ttnpb.RegisterGtwGsServer(s, iogrpc.New(gs,
iogrpc.WithMQTTConfigProvider(
config.MQTTConfigProviderFunc(func(ctx context.Context) (*config.MQTT, error) {
config, err := gs.GetConfig(ctx)
if err != nil {
return nil, err
}
return &config.MQTT, nil
})),
iogrpc.WithMQTTV2ConfigProvider(
config.MQTTConfigProviderFunc(func(ctx context.Context) (*config.MQTT, error) {
config, err := gs.GetConfig(ctx)
if err != nil {
return nil, err
}
return &config.MQTTV2, nil
})),
))
}
// RegisterHandlers registers gRPC handlers.
func (gs *GatewayServer) RegisterHandlers(s *runtime.ServeMux, conn *grpc.ClientConn) {
ttnpb.RegisterGsHandler(gs.Context(), s, conn)
ttnpb.RegisterGtwGsHandler(gs.Context(), s, conn)
}
// Roles returns the roles that the Gateway Server fulfills.
func (gs *GatewayServer) Roles() []ttnpb.ClusterRole {
return []ttnpb.ClusterRole{ttnpb.ClusterRole_GATEWAY_SERVER}
}
var (
errGatewayEUINotRegistered = errors.DefineNotFound(
"gateway_eui_not_registered",
"gateway EUI `{eui}` is not registered",
)
errEmptyIdentifiers = errors.Define("empty_identifiers", "empty identifiers")
)
// FillGatewayContext fills the given context and identifiers.
// This method should only be used for request contexts.
func (gs *GatewayServer) FillGatewayContext(ctx context.Context, ids ttnpb.GatewayIdentifiers) (context.Context, ttnpb.GatewayIdentifiers, error) {
ctx = gs.FillContext(ctx)
if ids.IsZero() || ids.Eui != nil && ids.Eui.IsZero() {
return nil, ttnpb.GatewayIdentifiers{}, errEmptyIdentifiers.New()
}
if ids.GatewayId == "" {
extIDs, err := gs.entityRegistry.GetIdentifiersForEUI(ctx, &ttnpb.GetGatewayIdentifiersForEUIRequest{
Eui: ids.Eui,
})
if err == nil {
ids = *extIDs
} else if errors.IsNotFound(err) {
if gs.requireRegisteredGateways {
return nil, ttnpb.GatewayIdentifiers{}, errGatewayEUINotRegistered.WithAttributes("eui", *ids.Eui).WithCause(err)
}
ids.GatewayId = fmt.Sprintf("eui-%v", strings.ToLower(ids.Eui.String()))
} else {
return nil, ttnpb.GatewayIdentifiers{}, err
}
}
return ctx, ids, nil
}
var (
errGatewayNotRegistered = errors.DefineNotFound(
"gateway_not_registered",
"gateway `{gateway_uid}` is not registered",
)
errNoFallbackFrequencyPlan = errors.DefineNotFound(
"no_fallback_frequency_plan",
"gateway `{gateway_uid}` is not registered and no fallback frequency plan defined",
)
errUnauthenticatedGatewayConnection = errors.DefineUnauthenticated(
"unauthenticated_gateway_connection",
"gateway requires an authenticated connection",
)
errNewConnection = errors.DefineAborted(
"new_connection",
"new connection from same gateway",
)
)
type connectionEntry struct {
*io.Connection
tasksDone *sync.WaitGroup
}
// Connect connects a gateway by its identifiers to the Gateway Server, and returns a io.Connection for traffic and
// control.
func (gs *GatewayServer) Connect(ctx context.Context, frontend io.Frontend, ids ttnpb.GatewayIdentifiers) (*io.Connection, error) {
if err := gs.entityRegistry.AssertGatewayRights(ctx, ids, ttnpb.Right_RIGHT_GATEWAY_LINK); err != nil {
return nil, err
}
uid := unique.ID(ctx, ids)
logger := log.FromContext(ctx).WithFields(log.Fields(
"protocol", frontend.Protocol(),
"gateway_uid", uid,
))
ctx = log.NewContext(ctx, logger)
ctx = events.ContextWithCorrelationID(ctx, fmt.Sprintf("gs:conn:%s", events.NewCorrelationID()))
var isAuthenticated bool
if _, err := rpcmetadata.WithForwardedAuth(ctx, gs.AllowInsecureForCredentials()); err == nil {
isAuthenticated = true
}
gtw, err := gs.entityRegistry.Get(ctx, &ttnpb.GetGatewayRequest{
GatewayIds: &ids,
FieldMask: &pbtypes.FieldMask{
Paths: []string{
"antennas",
"attributes",
"disable_packet_broker_forwarding",
"downlink_path_constraint",
"enforce_duty_cycle",
"frequency_plan_id",
"frequency_plan_ids",
"location_public",
"require_authenticated_connection",
"schedule_anytime_delay",
"schedule_downlink_late",
"status_public",
"update_location_from_status",
},
},
})
if errors.IsNotFound(err) {
if gs.requireRegisteredGateways {
return nil, errGatewayNotRegistered.WithAttributes("gateway_uid", uid).WithCause(err)
}
fpID, ok := frequencyplans.FallbackIDFromContext(ctx)
if !ok {
return nil, errNoFallbackFrequencyPlan.WithAttributes("gateway_uid", uid)
}
logger.Warn("Connect unregistered gateway")
gtw = &ttnpb.Gateway{
Ids: &ids,
FrequencyPlanId: fpID,
FrequencyPlanIds: []string{fpID},
EnforceDutyCycle: true,
DownlinkPathConstraint: ttnpb.DownlinkPathConstraint_DOWNLINK_PATH_CONSTRAINT_NONE,
Antennas: []*ttnpb.GatewayAntenna{},
}
} else if err != nil {
return nil, err
}
if gtw.RequireAuthenticatedConnection && !isAuthenticated {
return nil, errUnauthenticatedGatewayConnection.New()
}
ids = *gtw.GetIds()
fps, err := gs.FrequencyPlansStore(ctx)
if err != nil {
return nil, err
}
conn, err := io.NewConnection(ctx, frontend, gtw, fps, gtw.EnforceDutyCycle, ttnpb.StdDuration(gtw.ScheduleAnytimeDelay))
if err != nil {
return nil, err
}
wg := &sync.WaitGroup{}
// The tasks will always start once the entry is stored.
// As such, we must ensure any new connection waits for
// all of the upstream tasks to finish.
wg.Add(len(gs.upstreamHandlers))
connEntry := connectionEntry{
Connection: conn,
tasksDone: wg,
}
for existing, exists := gs.connections.LoadOrStore(uid, connEntry); exists; existing, exists = gs.connections.LoadOrStore(uid, connEntry) {
existingConnEntry := existing.(connectionEntry)
logger.Warn("Disconnect existing connection")
existingConnEntry.Disconnect(errNewConnection.New())
existingConnEntry.tasksDone.Wait()
}
registerGatewayConnect(ctx, ids, frontend.Protocol())
logger.Info("Connected")
gs.startDisconnectOnChangeTask(connEntry)
gs.startHandleUpstreamTask(connEntry)
gs.startUpdateConnStatsTask(connEntry)
gs.startHandleLocationUpdatesTask(connEntry)
gs.startHandleVersionUpdatesTask(connEntry)
for name, handler := range gs.upstreamHandlers {
connCtx := log.NewContextWithField(conn.Context(), "upstream_handler", name)
handler := handler
gs.StartTask(&task.Config{
Context: connCtx,
ID: fmt.Sprintf("%s_connect_gateway_%s", name, ids.GatewayId),
Func: func(ctx context.Context) error {
return handler.ConnectGateway(ctx, ids, conn)
},
Done: wg.Done,
Restart: task.RestartOnFailure,
Backoff: task.DialBackoffConfig,
})
}
return conn, nil
}
// GetConnection returns the *io.Connection for the given gateway. If not found, this method returns nil, false.
func (gs *GatewayServer) GetConnection(ctx context.Context, ids ttnpb.GatewayIdentifiers) (*io.Connection, bool) {
entry, loaded := gs.connections.Load(unique.ID(ctx, ids))
if !loaded {
return nil, false
}
return entry.(connectionEntry).Connection, true
}
func requireDisconnect(connected, current *ttnpb.Gateway) bool {
if !sameAntennaLocations(connected.GetAntennas(), current.GetAntennas()) {
// Gateway Server may update the location from status messages. If the locations aren't the same, but if the new
// location is a GPS location, do not disconnect the gateway. This is to avoid that updating the location from a
// gateway status message results in disconnecting the gateway.
if len(current.Antennas) > 0 && current.Antennas[0].Location != nil && current.Antennas[0].Location.Source != ttnpb.LocationSource_SOURCE_GPS {
return true
}
}
if connected.DownlinkPathConstraint != current.DownlinkPathConstraint ||
connected.DisablePacketBrokerForwarding != current.DisablePacketBrokerForwarding ||
connected.EnforceDutyCycle != current.EnforceDutyCycle ||
connected.LocationPublic != current.LocationPublic ||
connected.RequireAuthenticatedConnection != current.RequireAuthenticatedConnection ||
ttnpb.StdDurationOrZero(connected.ScheduleAnytimeDelay) != ttnpb.StdDurationOrZero(current.ScheduleAnytimeDelay) ||
connected.ScheduleDownlinkLate != current.ScheduleDownlinkLate ||
connected.StatusPublic != current.StatusPublic ||
connected.UpdateLocationFromStatus != current.UpdateLocationFromStatus ||
connected.FrequencyPlanId != current.FrequencyPlanId ||
len(connected.FrequencyPlanIds) != len(current.FrequencyPlanIds) {
return true
}
for i := range connected.FrequencyPlanIds {
if connected.FrequencyPlanIds[i] != current.FrequencyPlanIds[i] {
return true
}
}
return false
}
var errGatewayChanged = errors.Define("gateway_changed", "gateway changed in registry")
func (gs *GatewayServer) startDisconnectOnChangeTask(conn connectionEntry) {
conn.tasksDone.Add(1)
gs.StartTask(&task.Config{
Context: conn.Context(),
ID: fmt.Sprintf("disconnect_on_change_%s", unique.ID(conn.Context(), conn.Gateway().GetIds())),
Func: func(ctx context.Context) error {
d := random.Jitter(gs.config.FetchGatewayInterval, gs.config.FetchGatewayJitter)
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(d):
}
gtw, err := gs.entityRegistry.Get(ctx, &ttnpb.GetGatewayRequest{
GatewayIds: conn.Gateway().GetIds(),
FieldMask: &pbtypes.FieldMask{
Paths: []string{
"antennas",
"disable_packet_broker_forwarding",
"downlink_path_constraint",
"enforce_duty_cycle",
"frequency_plan_id",
"frequency_plan_ids",
"location_public",
"require_authenticated_connection",
"schedule_anytime_delay",
"schedule_downlink_late",
"status_public",
"update_location_from_status",
},
},
})
if err != nil {
if errors.IsUnauthenticated(err) || errors.IsPermissionDenied(err) {
// Since there is an active connection, the `Get` request will not return a `NotFound` error as the gateway existed during the connect, since the rights assertion fails first.
// Instead,
// 1. If the gateway is connected with an API key and is deleted, the IS returns an `Unauthenticated`, since the API Key is also deleted.
// 2. If the gateway is connected without an API key (UDP, LBS in unauthenticated mode) and is deleted the IS returns an `PermissionDenied` as there are no rights for these IDs.
log.FromContext(ctx).WithError(err).Debug("Gateway was deleted and/or the API key used to link the gateway was invalidated")
conn.Disconnect(err)
} else {
log.FromContext(ctx).WithError(err).Warn("Failed to get gateway")
}
return err
}
if requireDisconnect(conn.Gateway(), gtw) {
log.FromContext(ctx).Info("Gateway changed in registry, disconnect")
conn.Disconnect(errGatewayChanged.New())
}
return nil
},
Done: conn.tasksDone.Done,
Restart: task.RestartAlways,
Backoff: task.DialBackoffConfig,
})
}
func (gs *GatewayServer) startHandleUpstreamTask(conn connectionEntry) {
conn.tasksDone.Add(1)
gs.StartTask(&task.Config{
Context: conn.Context(),
ID: fmt.Sprintf("handle_upstream_%s", unique.ID(conn.Context(), conn.Gateway().GetIds())),
Func: func(ctx context.Context) error {
gs.handleUpstream(ctx, conn)
return nil
},
Done: conn.tasksDone.Done,
Restart: task.RestartNever,
Backoff: task.DialBackoffConfig,
})
}
func (gs *GatewayServer) startUpdateConnStatsTask(conn connectionEntry) {
if gs.statsRegistry == nil {
return
}
conn.tasksDone.Add(1)
gs.StartTask(&task.Config{
Context: conn.Context(),
ID: fmt.Sprintf("update_connection_stats_%s", unique.ID(conn.Context(), conn.Gateway().GetIds())),
Func: func(ctx context.Context) error {
gs.updateConnStats(ctx, conn)
return nil
},
Done: conn.tasksDone.Done,
Restart: task.RestartNever,
Backoff: task.DialBackoffConfig,
})
}
func (gs *GatewayServer) startHandleLocationUpdatesTask(conn connectionEntry) {
if !conn.Gateway().GetUpdateLocationFromStatus() {
return
}
conn.tasksDone.Add(1)
gs.StartTask(&task.Config{
Context: conn.Context(),
ID: fmt.Sprintf("handle_location_updates_%s", unique.ID(conn.Context(), conn.Gateway().GetIds())),
Func: func(ctx context.Context) error {
gs.handleLocationUpdates(ctx, conn)
return nil
},
Done: conn.tasksDone.Done,
Restart: task.RestartNever,
Backoff: task.DialBackoffConfig,
})
}
func (gs *GatewayServer) startHandleVersionUpdatesTask(conn connectionEntry) {
conn.tasksDone.Add(1)
gs.StartTask(&task.Config{
Context: conn.Context(),
ID: fmt.Sprintf("handle_version_updates_%s", unique.ID(conn.Context(), conn.Gateway().GetIds())),
Func: func(ctx context.Context) error {
gs.handleVersionInfoUpdates(ctx, conn)
return nil
},
Done: conn.tasksDone.Done,
Restart: task.RestartNever,
Backoff: task.DialBackoffConfig,
})
}
var errHostHandle = errors.Define("host_handle", "host `{host}` failed to handle message")
type upstreamHost struct {
name string
handler upstream.Handler
pool workerpool.WorkerPool
gtw *ttnpb.Gateway
correlationID string
}
func (host *upstreamHost) handlePacket(ctx context.Context, item interface{}) {
ctx = events.ContextWithCorrelationID(ctx, host.correlationID)
logger := log.FromContext(ctx)
gtw := host.gtw
switch msg := item.(type) {
case *ttnpb.GatewayUplinkMessage:
up := *msg.Message
msg = &ttnpb.GatewayUplinkMessage{
BandId: msg.BandId,
Message: &up,
}
msg.Message.CorrelationIds = append(make([]string, 0, len(msg.Message.CorrelationIds)+1), msg.Message.CorrelationIds...)
msg.Message.CorrelationIds = append(msg.Message.CorrelationIds, host.correlationID)
drop := func(ids *ttnpb.EndDeviceIdentifiers, err error) {
logger := logger.WithError(err)
if ids.JoinEui != nil {
logger = logger.WithField("join_eui", *ids.JoinEui)
}
if ids.DevEui != nil && !ids.DevEui.IsZero() {
logger = logger.WithField("dev_eui", *ids.DevEui)
}
if ids.DevAddr != nil && !ids.DevAddr.IsZero() {
logger = logger.WithField("dev_addr", *ids.DevAddr)
}
logger.Debug("Drop message")
registerDropUplink(ctx, gtw, msg, host.name, err)
}
ids := up.Payload.EndDeviceIdentifiers()
var pass bool
switch {
case ids.DevAddr != nil:
for _, prefix := range host.handler.DevAddrPrefixes() {
if ids.DevAddr.HasPrefix(prefix) {
pass = true
break
}
}
default:
pass = true
}
if !pass {
break
}
switch err := host.handler.HandleUplink(ctx, *gtw.Ids, ids, msg); codes.Code(errors.Code(err)) {
case codes.Canceled, codes.DeadlineExceeded,
codes.Unknown, codes.Internal,
codes.Unimplemented, codes.Unavailable:
drop(ids, errHostHandle.WithCause(err).WithAttributes("host", host.name))
default:
registerForwardUplink(ctx, gtw, msg.Message, host.name)
}
case *ttnpb.GatewayStatus:
if err := host.handler.HandleStatus(ctx, *gtw.Ids, msg); err != nil {
registerDropStatus(ctx, gtw, msg, host.name, err)
} else {
registerForwardStatus(ctx, gtw, msg, host.name)
}
case *ttnpb.TxAcknowledgment:
if err := host.handler.HandleTxAck(ctx, *gtw.Ids, msg); err != nil {
registerDropTxAck(ctx, gtw, msg, host.name, err)
} else {
registerForwardTxAck(ctx, gtw, msg, host.name)
}
}
}
func (gs *GatewayServer) handleUpstream(ctx context.Context, conn connectionEntry) {
var (
gtw = conn.Gateway()
protocol = conn.Frontend().Protocol()
logger = log.FromContext(ctx)
)
defer func() {
gs.connections.Delete(unique.ID(ctx, gtw.GetIds()))
registerGatewayDisconnect(ctx, *gtw.GetIds(), protocol, ctx.Err())
logger.Info("Disconnected")
}()
hosts := make([]*upstreamHost, 0, len(gs.upstreamHandlers))
for name, handler := range gs.upstreamHandlers {
if name == "packetbroker" && gtw.DisablePacketBrokerForwarding {
continue
}
host := &upstreamHost{
name: name,
handler: handler,
gtw: gtw,
correlationID: fmt.Sprintf("gs:up:host:%s", events.NewCorrelationID()),
}
wp := workerpool.NewWorkerPool(workerpool.Config{
Component: gs,
Context: ctx,
Name: fmt.Sprintf("upstream_handlers_%v", name),
Handler: host.handlePacket,
MinWorkers: -1,
MaxWorkers: 32,
QueueSize: -1,
})
defer wp.Wait()
host.pool = wp
hosts = append(hosts, host)
}
for {
var (
ctx = ctx
val interface{}
)
select {
case <-ctx.Done():
return
case msg := <-conn.Up():
ctx = events.ContextWithCorrelationID(ctx, fmt.Sprintf("gs:uplink:%s", events.NewCorrelationID()))
msg.Message.CorrelationIds = append(msg.Message.CorrelationIds, events.CorrelationIDsFromContext(ctx)...)
if msg.Message.Payload == nil {
msg.Message.Payload = &ttnpb.Message{}
if err := lorawan.UnmarshalMessage(msg.Message.RawPayload, msg.Message.Payload); err != nil {
registerDropUplink(ctx, gtw, msg, "validation", err)
continue
}
}
val = msg
registerReceiveUplink(ctx, gtw, msg.Message, protocol)
case msg := <-conn.Status():
ctx = events.ContextWithCorrelationID(ctx, fmt.Sprintf("gs:status:%s", events.NewCorrelationID()))
val = msg
registerReceiveStatus(ctx, gtw, msg, protocol)
case msg := <-conn.TxAck():
ctx = events.ContextWithCorrelationID(ctx, fmt.Sprintf("gs:tx_ack:%s", events.NewCorrelationID()))
if d := msg.DownlinkMessage; d != nil {
d.CorrelationIds = append(d.CorrelationIds, events.CorrelationIDsFromContext(ctx)...)
}
if msg.Result == ttnpb.TxAcknowledgment_SUCCESS {
registerSuccessDownlink(ctx, gtw, protocol)
} else {
registerFailDownlink(ctx, gtw, msg, protocol)
}
val = msg
registerReceiveTxAck(ctx, gtw, msg, protocol)
}
for _, host := range hosts {
err := host.pool.Publish(ctx, val)
if err == nil {
continue
}
logger.WithField("name", host.name).WithError(err).Warn("Upstream handler publish failed")
switch msg := val.(type) {
case *ttnpb.GatewayUplinkMessage:
registerDropUplink(ctx, gtw, msg, host.name, err)
case *ttnpb.GatewayStatus:
registerDropStatus(ctx, gtw, msg, host.name, err)
case *ttnpb.TxAcknowledgment:
registerDropTxAck(ctx, gtw, msg, host.name, err)
default:
panic("unreachable")
}
}
}
}
func (gs *GatewayServer) updateConnStats(ctx context.Context, conn connectionEntry) {
decoupledCtx := gs.FromRequestContext(ctx)
logger := log.FromContext(ctx)
ids := conn.Connection.Gateway().GetIds()
connectTime := conn.Connection.ConnectTime()
stats := &ttnpb.GatewayConnectionStats{
ConnectedAt: ttnpb.ProtoTimePtr(connectTime),
Protocol: conn.Connection.Frontend().Protocol(),
}
refreshTTLTimer := time.NewTicker(gs.config.ConnectionStatsTTL / 2)
defer refreshTTLTimer.Stop()
// Initial update, so that the gateway appears connected.
if err := gs.statsRegistry.Set(decoupledCtx, *ids, stats, ttnpb.GatewayConnectionStatsFieldPathsTopLevel, gs.config.ConnectionStatsTTL); err != nil {
logger.WithError(err).Warn("Failed to initialize connection stats")
}
defer func() {
logger.Debug("Delete connection stats")
if err := gs.statsRegistry.Set(
decoupledCtx, *ids, &ttnpb.GatewayConnectionStats{
ConnectedAt: nil,
DisconnectedAt: ttnpb.ProtoTimePtr(time.Now()),
},
[]string{"connected_at", "disconnected_at"},
gs.config.ConnectionStatsDisconnectTTL,
); err != nil {
logger.WithError(err).Warn("Failed to clear connection stats")
}
}()
for {
select {
case <-ctx.Done():
return
case <-conn.StatsChanged():
case <-refreshTTLTimer.C:
}
stats, paths := conn.Stats()
if err := gs.statsRegistry.Set(decoupledCtx, *ids, stats, paths, gs.config.ConnectionStatsTTL); err != nil {
logger.WithError(err).Warn("Failed to update connection stats")
}
}
}
const (
allowedLocationDelta = 0.00001
)
func sameLocation(a, b ttnpb.Location) bool {
return a.Altitude == b.Altitude && a.Accuracy == b.Accuracy &&
math.Abs(a.Latitude-b.Latitude) <= allowedLocationDelta &&
math.Abs(a.Longitude-b.Longitude) <= allowedLocationDelta
}
func sameAntennaLocations(a, b []*ttnpb.GatewayAntenna) bool {
if len(a) != len(b) {
return false
}
for i := range a {
a, b := a[i], b[i]
if a.Location != nil && b.Location != nil && !sameLocation(*a.Location, *b.Location) {
return false
}
if (a.Location == nil) != (b.Location == nil) {
return false
}
}
return true
}
var statusLocationFields = ttnpb.ExcludeFields(ttnpb.LocationFieldPathsNested, "source")
func (gs *GatewayServer) handleLocationUpdates(ctx context.Context, conn connectionEntry) {
var (
gtw = conn.Gateway()
lastAntennas []*ttnpb.GatewayAntenna
)
for {
select {
case <-ctx.Done():
return
case <-conn.LocationChanged():
status, _, ok := conn.StatusStats()
if ok && len(status.AntennaLocations) > 0 {
// Construct the union of antennas that are in the gateway fetched from the entity registry with the antennas
// that are in the status message.
gtwAntennas := gtw.GetAntennas()
c := len(gtwAntennas)
if cs := len(status.AntennaLocations); cs > c {
c = cs
}
antennas := make([]*ttnpb.GatewayAntenna, c)
for i := range antennas {
antennas[i] = &ttnpb.GatewayAntenna{}
if i < len(gtwAntennas) {
if err := antennas[i].SetFields(
gtwAntennas[i],
ttnpb.GatewayAntennaFieldPathsNested...,
); err != nil {
log.FromContext(ctx).WithError(err).Warn("Failed to clone antenna")
}
}
if i < len(status.AntennaLocations) && status.AntennaLocations[i] != nil {
antennas[i].Location = &ttnpb.Location{
Source: ttnpb.LocationSource_SOURCE_GPS,
}
if err := antennas[i].Location.SetFields(
status.AntennaLocations[i],
statusLocationFields...,
); err != nil {
log.FromContext(ctx).WithError(err).Warn("Failed to clone antenna location")
}
}
}
if lastAntennas != nil && sameAntennaLocations(lastAntennas, antennas) {
break
}
err := gs.entityRegistry.UpdateAntennas(ctx, *gtw.GetIds(), antennas)
if err != nil {
log.FromContext(ctx).WithError(err).Warn("Failed to update antennas")
} else {
lastAntennas = antennas
}
}
timeout := time.After(gs.config.UpdateGatewayLocationDebounceTime)
select {
case <-ctx.Done():
return
case <-timeout:
}
}
}
}
// handleVersionInfoUpdates updates gateway attributes with version info.
// This function runs exactly once; only for the first status message of each connection, since version information should not change within the same connection.
func (gs *GatewayServer) handleVersionInfoUpdates(ctx context.Context, conn connectionEntry) {
select {
case <-ctx.Done():
return
case <-conn.VersionInfoChanged():
status, _, ok := conn.StatusStats()
versionsFromStatus := status.Versions
if !ok || versionsFromStatus["model"] == "" || versionsFromStatus["firmware"] == "" {
return
}
gtwAttributes := conn.Gateway().Attributes
if versionsFromStatus[modelAttribute] == gtwAttributes[modelAttribute] && versionsFromStatus[firmwareAttribute] == gtwAttributes[firmwareAttribute] {
return
}
attributes := map[string]string{
modelAttribute: versionsFromStatus[modelAttribute],
firmwareAttribute: versionsFromStatus[firmwareAttribute],
}
d := random.Jitter(gs.config.UpdateVersionInfoDelay, 0.25)
select {
case <-ctx.Done():
return
case <-time.After(d):
}
err := gs.entityRegistry.UpdateAttributes(conn.Context(), *conn.Gateway().Ids, gtwAttributes, attributes)
if err != nil {
log.FromContext(ctx).WithError(err).Debug("Failed to update version information")
}
}
}
// GetFrequencyPlans gets the frequency plans by the gateway identifiers.
func (gs *GatewayServer) GetFrequencyPlans(ctx context.Context, ids ttnpb.GatewayIdentifiers) (map[string]*frequencyplans.FrequencyPlan, error) {
gtw, err := gs.entityRegistry.Get(ctx, &ttnpb.GetGatewayRequest{
GatewayIds: &ids,
FieldMask: &pbtypes.FieldMask{Paths: []string{"frequency_plan_ids"}},
})
var fpIDs []string
if err == nil {
fpIDs = gtw.FrequencyPlanIds
} else if errors.IsNotFound(err) {
fpID, ok := frequencyplans.FallbackIDFromContext(ctx)
if !ok {
return nil, err
}
fpIDs = append(fpIDs, fpID)
} else {
return nil, err
}
fps, err := gs.FrequencyPlansStore(ctx)
if err != nil {
return nil, err
}
fpGroup := make(map[string]*frequencyplans.FrequencyPlan, len(fpIDs))
for _, fpID := range fpIDs {
fp, err := fps.GetByID(fpID)
if err != nil {
return nil, err
}
fpGroup[fpID] = fp
}
return fpGroup, nil
}
// ClaimDownlink claims the downlink path for the given gateway.
func (gs *GatewayServer) ClaimDownlink(ctx context.Context, ids ttnpb.GatewayIdentifiers) error {
return gs.ClaimIDs(ctx, &ids)
}
// UnclaimDownlink releases the claim of the downlink path for the given gateway.
func (gs *GatewayServer) UnclaimDownlink(ctx context.Context, ids ttnpb.GatewayIdentifiers) error {
return gs.UnclaimIDs(ctx, &ids)
}
// ValidateGatewayID implements io.Server.
func (gs *GatewayServer) ValidateGatewayID(ctx context.Context, ids ttnpb.GatewayIdentifiers) error {
return gs.entityRegistry.ValidateGatewayID(ctx, ids)
}
type ctxConfigKeyType struct{}
// GetConfig returns the Gateway Server config based on the context.
func (gs *GatewayServer) GetConfig(ctx context.Context) (*Config, error) {
if val, ok := ctx.Value(&ctxConfigKeyType{}).(*Config); ok {
return val, nil
}
return gs.config, nil
}
// GetMQTTConfig returns the MQTT frontend configuration based on the context.
func (gs *GatewayServer) GetMQTTConfig(ctx context.Context) (*config.MQTT, error) {
config, err := gs.GetConfig(ctx)
if err != nil {
return nil, err
}
return &config.MQTT, nil
}
|
package org.egovframe.rte.ptl.mvc.async;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.*;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.*;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import org.springframework.test.web.servlet.MockMvc;
import org.springframework.test.web.servlet.setup.MockMvcBuilders;
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration(locations = "classpath*:META-INF/spring/async/test_servlet.xml")
public class AsyncReqTest {
@Test
public void responseBodyHandler() throws Exception {
MockMvc mockMvc = MockMvcBuilders.standaloneSetup(new AsyncReqTestController()).build();
mockMvc.perform(get("/callable.do")).andExpect(request().asyncStarted()).andExpect(request().asyncResult("result"));
}
}
|
<filename>src/test/java/io/redbarn/TestUtils.java
package io.redbarn;
import com.google.common.base.Stopwatch;
import org.slf4j.Logger;
import javax.script.ScriptException;
import java.io.IOException;
import static org.slf4j.LoggerFactory.getLogger;
import static org.testng.Assert.*;
/**
* Exposes utilities useful for working with Redbarn's automated tests.
*
* @author <NAME>
* @since 0.1.0
*/
public class TestUtils {
public static final Logger LOG = getLogger(TestUtils.class);
public static void assertRenderSuccess(
TemplateRenderer renderer,
String templatePath,
Object[] args) throws ScriptException, IOException {
Stopwatch watch = Stopwatch.createStarted();
String actual = renderer.render(templatePath, args);
watch.stop();
LOG.info("Render time for {}: {}", templatePath, watch);
String expectedPath = templatePath.replace(".html", ".expected.html");
String expected = ResourceUtils.getResourceString(expectedPath);
assertMarkupEquivalent(actual, expected);
}
/**
* An assertion to prove that two markup strings are the same despite case
* and whitespace / line breaks between tags.
*
* @param actual The markup that you want to prove from, say, a test.
* @param expected The markup you expect.
*/
public static void assertMarkupEquivalent(String actual, String expected) {
actual = actual.toLowerCase();
actual = minifyMarkup(actual);
expected = expected.toLowerCase();
expected = minifyMarkup(expected);
assertEquals(actual, expected);
}
/**
* Determines if two markup strings are the same despite case and
* whitespace / line breaks between tags.
*
* @param actual The markup that you want to prove from, say, a test.
* @param expected The markup you expect.
* @return true if the two markup strings contain similar tags and content.
*/
public static boolean isMarkupEquivalent(String actual, String expected) {
actual = actual.toLowerCase();
actual = minifyMarkup(actual);
expected = expected.toLowerCase();
expected = minifyMarkup(expected);
return actual.equals(expected);
}
/**
* Removes all whitespace and line breaks in an HTML or other tag based
* string.
*
* @param markup The markup to minify.
* @return The minified markup.
*/
public static String minifyMarkup(String markup) {
return markup.replaceAll(">\\s+",">")
.replaceAll("\\s+<","<")
.replaceAll("\\{\\s+", "{")
.replaceAll("\\s+}", "}");
}
}
|
using System;
using System.Collections.Generic;
using System.Globalization;
using System.Resources;
using Microsoft.Extensions.FileProviders;
public class FileResourceManagerWithCultureStringLocalizer
{
private readonly ResourceManager _resourceManager;
private readonly IDictionary<string, ResourceSet> _resourceSets;
public FileResourceManagerWithCultureStringLocalizer(
ResourceManager resourceManager,
IFileProvider fileProvider,
string resourcePath,
string baseName,
IEnumerable<string> enabledFiles
)
{
_resourceManager = resourceManager ?? throw new ArgumentNullException(nameof(resourceManager));
_resourceSets = new Dictionary<string, ResourceSet>();
foreach (var culture in CultureInfo.GetCultures(CultureTypes.AllCultures))
{
var cultureSpecificPath = $"{resourcePath}/{culture.Name}/{baseName}";
if (enabledFiles.Contains(culture.Name))
{
var fileInfo = fileProvider.GetFileInfo(cultureSpecificPath);
if (fileInfo.Exists)
{
var resourceSet = new ResourceSet(fileInfo.CreateReadStream());
_resourceSets[culture.Name] = resourceSet;
}
}
}
}
public string GetLocalizedString(string culture, string key)
{
if (_resourceSets.TryGetValue(culture, out var resourceSet))
{
return resourceSet.GetString(key);
}
return null; // or throw exception based on requirements
}
} |
export class PlatformGroups {
}
PlatformGroups.PlayStation = [8, 165, 38, 9, 48, 167, 7, 390];
PlatformGroups.Nintendo = [130, 159, 47, 4, 18, 37, 19, 20, 21, 137];
PlatformGroups.Xbox = [11, 49, 169, 12];
PlatformGroups.PC = [6, 14];
PlatformGroups.Mobile = [34, 74];
|
let hour = new Date().getHours();
let minutes = new Date().getMinutes();
console.log(`Current hour: ${hour} and minutes: ${minutes}`); |
package crd
import (
"context"
"k8s.io/klog/v2"
"time"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
extensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
)
// WaitForCRDEstablished is check the result if crd can be acceped and be serverd,
// for apiextensions-apiserver will validate the name of crd, and print the result to status
func WaitForCRDEstablished(extClientSet extensionsclientset.Interface, crdName string) error {
return wait.Poll(1250*time.Millisecond, 10*time.Second, func() (done bool, err error) {
crd, err := extClientSet.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crdName, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, cond := range crd.Status.Conditions {
switch cond.Type {
case apiextensionsv1.NamesAccepted:
// 检查资源定义名字是否满足一致性要求,并且是不是存在冲突
if cond.Status == apiextensionsv1.ConditionFalse {
klog.Error("CRD Name conflict")
}
case apiextensionsv1.Established:
// ApiServer是否开始可以为定义的资源提供服务
if cond.Status == apiextensionsv1.ConditionTrue {
return true, nil
}
}
}
return false, err
})
}
|
import numpy as np
import matplotlib.pyplot as plt
def calculate_and_plot_savings(MedicalExample, M, P):
for j in range(MedicalExample.MedShkDstn[0].pmf.size):
MedShk = MedicalExample.MedShkDstn[0].X[j] * np.ones_like(M)
Sav = (
M
- MedicalExample.solution[0].cFunc(M, P, MedShk)
- MedicalExample.MedPrice[0] * MedicalExample.solution[0].MedFunc(M, P, MedShk)
)
plt.plot(M, Sav)
plt.title("End of period savings by medical need shock (constant permanent income)")
plt.xlabel("M_temp")
plt.ylabel("Savings")
plt.show() |
.rounded {
border-radius: 5px;
margin: 10px;
} |
#!/bin/bash
echo_status() {
local args="${@}"
tput setaf 4
tput bold
echo -e "- $args"
tput sgr0
}
echo_status "Starting up..."
# Create Salt Config
echo "file_client: local" > /etc/salt/minion
echo "postgres.host: '${TETHYS_DB_HOST}'" >> /etc/salt/minion
echo "postgres.port: '${TETHYS_DB_PORT}'" >> /etc/salt/minion
echo "postgres.user: '${TETHYS_DB_USERNAME}'" >> /etc/salt/minion
echo "postgres.pass: '${TETHYS_DB_PASSWORD}'" >> /etc/salt/minion
echo "postgres.bins_dir: '${CONDA_HOME}/envs/${CONDA_ENV_NAME}/bin'" >> /etc/salt/minion
# Apply States
echo_status "Enforcing start state... (This might take a bit)"
salt-call --local state.apply
|
/*
胡杠碰吃出过 操作基类
*/
package roombase
import (
//"container/list"
"sync"
)
var maxOpt = 6 // 用来排序, 值越小优先级越高
// 需要等待请求
type NeedWait struct {
Index int // 座位号, 和用户分离
IsPass bool // 是否过
CanTools []int // 用户可以的选择 {0胡 1杠 2碰 3吃 4出 5过} <=0 不能操作 >0 可以操作 是多选
Choice int // 用户的选择 {0胡 1杠 2碰 3吃 4出 5过} -1 = 没有操作
TopOpt int // 最高级别的操作
Param []int // 操作的cids
}
// 当前等待的操作
type RoomWaitOpts struct {
IsSelf bool // 是否是判断自己
WaitOpts []int // 等待操作的用户(一炮多响, 需要等待多个用户)
OptIndex []int // 操作的用户 0,1,2,3
NeedWaitTool []*NeedWait //
mx *sync.Mutex
}
// 清空所有操作
func (rw *RoomWaitOpts) ClearAll() {
if rw.mx == nil {
rw.mx = new(sync.Mutex)
}
rw.mx.Lock()
rw.IsSelf = false
rw.WaitOpts = []int{}
rw.OptIndex = []int{}
if len(rw.NeedWaitTool) > 0 {
for i := 0; i < len(rw.NeedWaitTool); i++ {
rw.NeedWaitTool[i].Index = -1
rw.NeedWaitTool[i].Choice = maxOpt
rw.NeedWaitTool[i] = nil
}
}
rw.NeedWaitTool = nil
rw.NeedWaitTool = []*NeedWait{}
rw.mx.Unlock()
}
// 清除
func (rw *RoomWaitOpts) ClearUser(uIndex int) {
if rw.mx == nil {
rw.mx = new(sync.Mutex)
}
rw.mx.Lock()
for i := 0; i < len(rw.NeedWaitTool); i++ {
if rw.NeedWaitTool[i] == nil || rw.NeedWaitTool[i].Index == uIndex {
rw.NeedWaitTool = append(rw.NeedWaitTool[:i], rw.NeedWaitTool[i+1:]...)
}
}
rw.mx.Unlock()
}
// 添加用户操作
// <=0 没有操作 >0 有操作
func (rw *RoomWaitOpts) AddCanTool(uIndex int, iWin int, iKong int, iPeng int, iChow int, iPut int, iPass int) {
findcount := -1
for i := 0; i < len(rw.NeedWaitTool); i++ {
if rw.NeedWaitTool[i].Index == uIndex { // 经存在
rw.NeedWaitTool[i].CanTools[0] += iWin
rw.NeedWaitTool[i].CanTools[1] += iKong
rw.NeedWaitTool[i].CanTools[2] += iPeng
rw.NeedWaitTool[i].CanTools[3] += iChow
rw.NeedWaitTool[i].CanTools[4] += iPut
rw.NeedWaitTool[i].CanTools[5] += iPass
rw.NeedWaitTool[i].TopOpt = rw.getUserTopOpt(uIndex)
findcount = i
break
}
}
if findcount == -1 {
nw := NeedWait{ // 新添加操作
Index: uIndex, Choice: -1,
CanTools: []int{
iWin,
iKong,
iPeng,
iChow,
iPut,
iPass,
},
}
rw.NeedWaitTool = append(rw.NeedWaitTool, &nw)
nw.TopOpt = rw.getUserTopOpt(uIndex)
}
}
// 得到最高优先级别的操作(单个用户)
func (rw *RoomWaitOpts) getUserTopOpt(uIndex int) int {
needWait := rw.GetOpt(uIndex)
if needWait == nil {
return -1
}
topOpt := maxOpt
for t := 0; t < len(needWait.CanTools); t++ {
if needWait.CanTools[t] > 0 {
if topOpt >= t {
topOpt = t
}
}
}
if topOpt == maxOpt {
return -1
} else {
return topOpt
}
}
// 用户操作
func (rw *RoomWaitOpts) SetUserOpt(index int, optType int, param []int) {
for i := 0; i < len(rw.NeedWaitTool); i++ {
if rw.NeedWaitTool[i].Index == index { // 存在
if rw.NeedWaitTool[i].Choice == -1 && rw.NeedWaitTool[i].CanTools[optType] > 0 { // 必须要未操作的用户才可以设置
rw.NeedWaitTool[i].Choice = optType // 设置用户操作
rw.NeedWaitTool[i].Param = param
if optType == Pass {
rw.NeedWaitTool[i].IsPass = true // 过
}
}
break
}
}
}
// 得到操作数量
func (rw *RoomWaitOpts) Count() int {
return len(rw.NeedWaitTool)
}
// 得到NeedWait
func (rw *RoomWaitOpts) GetOpt(userIndex int) *NeedWait {
for i := 0; i < len(rw.NeedWaitTool); i++ {
if rw.NeedWaitTool[i] != nil && rw.NeedWaitTool[i].Index == userIndex { // 已经存在
return rw.NeedWaitTool[i]
}
}
return nil
}
// 得到可以操作的类型
func (rw *RoomWaitOpts) GetOptCanTools(userIndex int) []int {
if rw.GetOpt(userIndex) != nil {
return rw.GetOpt(userIndex).CanTools
}
var rValue []int
return rValue
}
// 判断操作是否完毕, true =则开始执行最高优先级的操作
// 检查并返回最高优先级的操作.
// 返回 (0用户索引[]int, 1操作类型int, 2是否成功bool)
//
func (rw *RoomWaitOpts) CheckGetCpt() ([]int, int, int, bool) {
huList := make([]int, 0)
// 获得最高级别的操作和用户
topOpt := maxOpt // 最高级别的操作
topIndex := -1
for i := 0; i < len(rw.NeedWaitTool); i++ {
needWait := rw.NeedWaitTool[i]
if needWait != nil && !needWait.IsPass && topOpt >= needWait.TopOpt {
topOpt = needWait.TopOpt
topIndex = needWait.Index
}
}
// 全部点过
if topIndex == -1 {
return huList, -1, Pass, true
}
if topOpt == 0 { // 获得所有多人胡牌的列表
for i := 0; i < len(rw.NeedWaitTool); i++ {
if !rw.NeedWaitTool[i].IsPass {
if rw.NeedWaitTool[i].CanTools[0] > 0 && rw.NeedWaitTool[i].Choice == 0 {
huList = append(huList, rw.NeedWaitTool[i].Index)
topIndex = rw.NeedWaitTool[i].Index
}
if rw.NeedWaitTool[i].CanTools[0] > 0 && rw.NeedWaitTool[i].Choice == -1 {
// 只要有一个人没有操作
return huList, -1, -1, false
}
}
}
}
// 判断最高级别的用户是否操作
nw := rw.GetOpt(topIndex)
if nw != nil {
// fmt.Println("nw choice ", nw.Choice)
// fmt.Println("nw ", nw.CanTools)
// fmt.Println("nw index ", nw.Index)
}
if nw != nil && nw.Choice >= 0 { // 最高级别的用户已经操作了
return huList, nw.Index, nw.Choice, true
} else {
return huList, -1, -1, false
}
}
|
package com.huatuo.adapter;
import java.util.ArrayList;
import org.json.JSONObject;
import com.huatuo.R;
import com.huatuo.util.CommonUtil;
import com.huatuo.util.ImageLoader_DisplayImageOptions;
import com.nostra13.universalimageloader.core.ImageLoader;
import android.content.Context;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.BaseAdapter;
import android.widget.ImageView;
import android.widget.TextView;
public class XiaoFeiMaAdapter extends BaseAdapter {
private LayoutInflater inflater;
private ArrayList<JSONObject> arrayList;
private Context mContext;
public XiaoFeiMaAdapter(Context mContext) {
this.mContext = mContext;
inflater = LayoutInflater.from(mContext);
arrayList = new ArrayList<JSONObject>();
}
public void add(JSONObject item) {
arrayList.add(item);
notifyDataSetChanged();
}
public void add(ArrayList<JSONObject> arrayList) {
if (!CommonUtil.emptyListToString3(arrayList)) {
for (int i = 0; i < arrayList.size(); i++) {
JSONObject item = arrayList.get(i);
this.arrayList.add(item);
}
}
notifyDataSetChanged();
}
public void clear() {
this.arrayList.clear();
}
@Override
public int getCount() {
return arrayList.size();
}
@Override
public Object getItem(int position) {
return arrayList.get(position);
}
@Override
public long getItemId(int position) {
return position;
}
@Override
public View getView(int position, View convertView, ViewGroup parent) {
final ViewHolder holder;
JSONObject jsonObject = arrayList.get(position);
if (convertView == null) {
convertView = inflater.inflate(
R.layout.activity_xiaofeima_list_listview_item, null);
holder = new ViewHolder();
holder.iv_icon = (ImageView) convertView.findViewById(R.id.iv_icon);
holder.iv_status = (ImageView) convertView
.findViewById(R.id.iv_Status);
holder.tv_storeName = (TextView) convertView
.findViewById(R.id.tv_storeName);
holder.tv_xiaofeima = (TextView) convertView
.findViewById(R.id.tv_xiaofeima);
holder.tv_xiangmuName = (TextView) convertView
.findViewById(R.id.tv_xiangmuName);
holder.tv_yuyueTime = (TextView) convertView
.findViewById(R.id.tv_yuyueTime);
convertView.setTag(holder);
} else {
holder = (ViewHolder) convertView.getTag();
}
if ("2".equals(jsonObject.optString("state", ""))) {
holder.iv_status.setBackgroundResource(R.drawable.img_code_notused);
} else if ("3".equals(jsonObject.optString("state", ""))) {
holder.iv_status.setBackgroundResource(R.drawable.img_code_used);
}
ImageLoader.getInstance().displayImage(
jsonObject.optString("icon", ""),
holder.iv_icon,
ImageLoader_DisplayImageOptions.getInstance()
.setDefaultImageSmallImg());//图片icon
holder.tv_storeName.setText(jsonObject.optString("storeName", ""));
holder.tv_xiaofeima.setText(jsonObject.optString("exchangeCode", ""));
holder.tv_xiangmuName.setText("服务项目:"
+ jsonObject.optString("servName", ""));
holder.tv_yuyueTime.setText("预约时间:"
+ jsonObject.optString("serviceTime", ""));
return convertView;
}
static class ViewHolder {
TextView tv_storeName, tv_xiaofeima, tv_xiangmuName, tv_yuyueTime;
ImageView iv_icon, iv_status;
}
}
|
FactoryBot.define do
factory :player_video, class: TeamManager::PlayerVideo do
player
video_url { 'https://youtu.be/Yfi9nqA0MVU' }
description { 'Test Video' }
end
end
|
"""
Design a code which can accept two strings as input and check whether they are anagrams or not.
"""
def are_anagrams(s1, s2):
if len(s1) != len(s2):
return False
s1_dict = {}
s2_dict = {}
for c in s1:
s1_dict[c] = s1_dict.get(c, 0) + 1
for c in s2:
s2_dict[c] = s2_dict.get(c, 0) + 1
return s1_dict == s2_dict
if __name__ == '__main__':
s1, s2 = "listen", "silent"
print(are_anagrams(s1, s2)) |
<reponame>googleapis/googleapis-gen
# Generated by the protocol buffer compiler. DO NOT EDIT!
# Source: google/cloud/datacatalog/v1beta1/policytagmanager.proto for package 'Google.Cloud.DataCatalog.V1beta1'
# Original file comments:
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'grpc'
require 'google/cloud/datacatalog/v1beta1/policytagmanager_pb'
module Google
module Cloud
module DataCatalog
module V1beta1
module PolicyTagManager
# The policy tag manager API service allows clients to manage their taxonomies
# and policy tags.
class Service
include ::GRPC::GenericService
self.marshal_class_method = :encode
self.unmarshal_class_method = :decode
self.service_name = 'google.cloud.datacatalog.v1beta1.PolicyTagManager'
# Creates a taxonomy in the specified project.
rpc :CreateTaxonomy, ::Google::Cloud::DataCatalog::V1beta1::CreateTaxonomyRequest, ::Google::Cloud::DataCatalog::V1beta1::Taxonomy
# Deletes a taxonomy. This operation will also delete all
# policy tags in this taxonomy along with their associated policies.
rpc :DeleteTaxonomy, ::Google::Cloud::DataCatalog::V1beta1::DeleteTaxonomyRequest, ::Google::Protobuf::Empty
# Updates a taxonomy.
rpc :UpdateTaxonomy, ::Google::Cloud::DataCatalog::V1beta1::UpdateTaxonomyRequest, ::Google::Cloud::DataCatalog::V1beta1::Taxonomy
# Lists all taxonomies in a project in a particular location that the caller
# has permission to view.
rpc :ListTaxonomies, ::Google::Cloud::DataCatalog::V1beta1::ListTaxonomiesRequest, ::Google::Cloud::DataCatalog::V1beta1::ListTaxonomiesResponse
# Gets a taxonomy.
rpc :GetTaxonomy, ::Google::Cloud::DataCatalog::V1beta1::GetTaxonomyRequest, ::Google::Cloud::DataCatalog::V1beta1::Taxonomy
# Creates a policy tag in the specified taxonomy.
rpc :CreatePolicyTag, ::Google::Cloud::DataCatalog::V1beta1::CreatePolicyTagRequest, ::Google::Cloud::DataCatalog::V1beta1::PolicyTag
# Deletes a policy tag. Also deletes all of its descendant policy tags.
rpc :DeletePolicyTag, ::Google::Cloud::DataCatalog::V1beta1::DeletePolicyTagRequest, ::Google::Protobuf::Empty
# Updates a policy tag.
rpc :UpdatePolicyTag, ::Google::Cloud::DataCatalog::V1beta1::UpdatePolicyTagRequest, ::Google::Cloud::DataCatalog::V1beta1::PolicyTag
# Lists all policy tags in a taxonomy.
rpc :ListPolicyTags, ::Google::Cloud::DataCatalog::V1beta1::ListPolicyTagsRequest, ::Google::Cloud::DataCatalog::V1beta1::ListPolicyTagsResponse
# Gets a policy tag.
rpc :GetPolicyTag, ::Google::Cloud::DataCatalog::V1beta1::GetPolicyTagRequest, ::Google::Cloud::DataCatalog::V1beta1::PolicyTag
# Gets the IAM policy for a taxonomy or a policy tag.
rpc :GetIamPolicy, ::Google::Iam::V1::GetIamPolicyRequest, ::Google::Iam::V1::Policy
# Sets the IAM policy for a taxonomy or a policy tag.
rpc :SetIamPolicy, ::Google::Iam::V1::SetIamPolicyRequest, ::Google::Iam::V1::Policy
# Returns the permissions that a caller has on the specified taxonomy or
# policy tag.
rpc :TestIamPermissions, ::Google::Iam::V1::TestIamPermissionsRequest, ::Google::Iam::V1::TestIamPermissionsResponse
end
Stub = Service.rpc_stub_class
end
end
end
end
end
|
<filename>android/src/com/virgilsecurity/voip/MainActivity.java
/**
* Custom Activity is used to:
* - pass application context to the WebRTC;
* - request voice recording permissions.
*
* Code to request permissions is taken from:
* https://developer.here.com/documentation/android-premium/3.15/dev_guide/topics/request-android-permissions.html
*/
package com.virgilsecurity.voip;
import android.os.Bundle;
import android.content.Context;
import android.content.pm.PackageManager;
import android.Manifest;
import android.util.Log;
import android.widget.Toast;
import androidx.annotation.NonNull;
import androidx.core.content.ContextCompat;
import androidx.core.app.ActivityCompat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.qtproject.qt5.android.bindings.QtActivity;
import org.webrtc.ContextUtils;
public class MainActivity extends QtActivity {
private static final String TAG = MainActivity.class.getName();
/**
* permissions request code
*/
private final static int REQUEST_CODE_ASK_PERMISSIONS = 1;
/**
* Permissions that need to be explicitly requested from end user.
*/
private static final String[] REQUIRED_PERMISSIONS = new String[] {
Manifest.permission.RECORD_AUDIO
};
@Override
public void onCreate(Bundle savedInstanceState) {
Log.d(TAG, "Initialize MainActivity");
ContextUtils.initialize(getApplicationContext());
super.onCreate(savedInstanceState);
checkPermissions();
}
/**
* Checks the dynamically-controlled permissions and requests missing permissions from end user.
*/
protected void checkPermissions() {
final List < String > missingPermissions = new ArrayList < String > ();
// check all required dynamic permissions
for (final String permission: REQUIRED_PERMISSIONS) {
final int result = ContextCompat.checkSelfPermission(this, permission);
if (result != PackageManager.PERMISSION_GRANTED) {
missingPermissions.add(permission);
}
}
if (!missingPermissions.isEmpty()) {
// request all missing permissions
final String[] permissions = missingPermissions
.toArray(new String[missingPermissions.size()]);
ActivityCompat.requestPermissions(this, permissions, REQUEST_CODE_ASK_PERMISSIONS);
} else {
final int[] grantResults = new int[REQUIRED_PERMISSIONS.length];
Arrays.fill(grantResults, PackageManager.PERMISSION_GRANTED);
onRequestPermissionsResult(REQUEST_CODE_ASK_PERMISSIONS, REQUIRED_PERMISSIONS, grantResults);
}
}
@Override
public void onRequestPermissionsResult(int requestCode, @NonNull String permissions[],
@NonNull int[] grantResults) {
switch (requestCode) {
case REQUEST_CODE_ASK_PERMISSIONS:
for (int index = permissions.length - 1; index >= 0; --index) {
if (grantResults[index] != PackageManager.PERMISSION_GRANTED) {
// exit the app if one permission is not granted
Toast.makeText(this, "Required permission '" + permissions[index] +
"' not granted, exiting", Toast.LENGTH_LONG).show();
finish();
return;
}
}
Log.d(TAG, "All permissions were granted.");
break;
}
}
}
|
<filename>Projects/RPI4/EMAR.py
############################################################################################
#
# Project: Peter Moss COVID-19 AI Research Project
# Repository: EMAR Mini, Emergency Assistance Robot
#
# Author: <NAME> (<EMAIL>)
# Contributors:
# Title: EMAR Mini Emergency Assistance Robot Class
# Description: The EMAR Mini Emergency Assistance Robot Class is the the core
# for the EMAR Mini software.
# License: MIT License
# Last Modified: 2020-07-12
#
############################################################################################
import geocoder, json, psutil, sys, threading, time
import RPi.GPIO as GPIO
from threading import Thread
from Classes.Helpers import Helpers
from Classes.iotJumpWay import Device as iotJumpWay
from Classes.RealsenseRead import RealsenseRead
from Classes.RealsenseStream import RealsenseStream
class EMAR():
""" EMAR Mini Emergency Assistance Robot Class
The EMAR Mini Emergency Assistance Robot Class is the the core wrapper class
for the EMAR Mini software.
"""
def __init__(self):
""" Initializes the class. """
self.Helpers = Helpers("EMAR")
# Starts the iotJumpWay
self.iotJumpWay = iotJumpWay()
self.iotJumpWay.connect()
# Subscribes to the EMAR Mini commands topic
self.iotJumpWay.channelSub("Commands")
# Sets the EMAR Mini commands callback function
self.iotJumpWay.commandsCallback = self.commands
self.Helpers.logger.info("EMAR Mini awaiting commands.")
self.Helpers.logger.info("EMAR Mini Emergency Assistance Robot Class initialization complete.")
def hardware(self):
""" Loads the EMAR Mini hardware modules. """
# Head Servo 1
h1Pin = 18
GPIO.setmode(GPIO.BCM)
GPIO.setup(h1Pin, GPIO.OUT)
self.h1 = GPIO.PWM(h1Pin, 50)
self.h1.start(7)
time.sleep(0.5)
self.h1.ChangeDutyCycle(0)
# Arm Servo 1
a1Pin = 12
GPIO.setmode(GPIO.BCM)
GPIO.setup(a1Pin, GPIO.OUT)
self.a1 = GPIO.PWM(a1Pin, 50)
self.a1.start(7)
time.sleep(0.5)
self.a1.ChangeDutyCycle(0)
# Arm Servo 2
a2Pin = 13
GPIO.setmode(GPIO.BCM)
GPIO.setup(a2Pin, GPIO.OUT)
self.a2 = GPIO.PWM(a2Pin, 50)
self.a2.start(7)
time.sleep(0.5)
self.a2.ChangeDutyCycle(0)
self.Helpers.logger.info("EMAR Mini hardware modules loaded.")
def commands(self, topic, payload):
"""
iotJumpWay Commands Callback
The callback function that is triggerend in the event of a
command communication from the iotJumpWay.
"""
self.Helpers.logger.info("Recieved iotJumpWay Command Data : " + payload.decode())
command = json.loads(payload.decode("utf-8"))
cycle = 0
servo = None
if(command["Type"]=="Head"):
if(command["Value"]=="RIGHT"):
cycle = 2.0
servo = self.h1
if(command["Value"]=="LEFT"):
cycle = 12.0
servo = self.h1
if(command["Value"]=="CENTER"):
cycle = 7.0
servo = self.h1
if(command["Type"]=="Arm"):
if(command["Value"]=="2UP"):
cycle = 7.0
servo = self.a1
if(command["Value"]=="2DOWN"):
cycle = 12.0
servo = self.a1
if(command["Value"] == "UP"):
cycle = 7.0
servo = self.a2
if(command["Value"]=="DOWN"):
cycle = 12.0
servo = self.a2
servo.ChangeDutyCycle(cycle)
time.sleep(0.5)
servo.ChangeDutyCycle(0)
def life(self):
""" Sends vital statistics to HIAS """
# Gets vitals
cpu = psutil.cpu_percent()
mem = psutil.virtual_memory()[2]
hdd = psutil.disk_usage('/').percent
tmp = psutil.sensors_temperatures()['cpu-thermal'][0].current
g = geocoder.ip('me')
self.Helpers.logger.info("EMAR Mini Life (TEMPERATURE): " + str(tmp) + "\u00b0")
self.Helpers.logger.info("EMAR Mini Life (CPU): " + str(cpu) + "%")
self.Helpers.logger.info("EMAR Mini Life (Memory): " + str(mem) + "%")
self.Helpers.logger.info("EMAR Mini Life (HDD): " + str(hdd) + "%")
self.Helpers.logger.info("EMAR Mini Life (LAT): " + str(g.latlng[0]))
self.Helpers.logger.info("EMAR Mini Life (LNG): " + str(g.latlng[1]))
# Send iotJumpWay notification
self.iotJumpWay.channelPub("Life", {
"CPU": cpu,
"Memory": mem,
"Diskspace": hdd,
"Temperature": tmp,
"Latitude": g.latlng[0],
"Longitude": g.latlng[1]
})
# Life thread
threading.Timer(60.0, self.life).start()
def threading(self):
""" Starts the EMAR Mini software threads. """
# Life thread
threading.Timer(60.0, self.life).start()
# Realsense threads
Thread(target=RealsenseRead().run).start()
Thread(target=RealsenseStream().run).start()
def shutdown(self):
""" Shuts down the EMAR Mini software. """
# Shutdown servos
self.h1.stop()
self.a1.stop()
self.a2.stop()
GPIO.cleanup()
# Disconnect from iotJumpWay
self.iotJumpWay.disconnect()
self.Helpers.logger.info("EMAR Mini Exiting")
sys.exit()
EMAR = EMAR()
def main():
# Starts threading
try:
EMAR.hardware()
EMAR.threading()
#Continous loop to keep the program running
while True:
continue
# Exits the program
EMAR.shutdown()
except KeyboardInterrupt:
# Cathces CTRL + C and exits the program
EMAR.shutdown()
if __name__ == "__main__":
main()
|
#!/bin/bash
modprobe uio_dmem_genirq
modprobe uio_pdrv_genirq
modprobe uio_xilinx_apm
ls -l /dev/uio?
ls -l /sys/class/uio/uio?/
cat /sys/class/uio/uio?/name
ls -l /dev/dma?
ls -l /sys/class/uio/dma?/
cat /sys/class/uio/dma?/name
|
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
nosetests --with-gae --gae-lib-root=${GAE_ROOT} "${@:-.}"
|
<filename>client/src/lib/Client.ts
import { ExposedPadlockClient } from "@llkennedy/padlock-api"
export interface Client extends ExposedPadlockClient { }
export default Client; |
import java.lang.*;
import java.util.*;
public class decoding{
public static void main(String args[]){
Scanner sc = new Scanner(System.in);
int n=sc.nextInt();
String s=sc.nextLine();
s=sc.nextLine();
char a[]=s.toCharArray();
char b[]=new char[n];
int c=((n-1)/2);
for(int i=0;i<n;i++){
if((n-i)%2==0){
c=c-i;
b[c]=a[i];
}
else{
c=c+i;
b[c]=a[i];
}
}
String t=String.valueOf(b);
System.out.print(t);
}
}
|
import { get, set } from '@ember/object';
import { inject as service } from '@ember/service';
import Route from '@ember/routing/route';
import { hash } from 'rsvp';
export default Route.extend({
access: service(),
globalStore: service(),
roleTemplateService: service('roleTemplate'),
model() {
const globalStore = this.get('globalStore');
const cluster = this.modelFor('authenticated.cluster');
return hash({
originalCluster: cluster,
cluster: cluster.clone(),
kontainerDrivers: globalStore.findAll('kontainerDriver'),
nodeTemplates: globalStore.findAll('nodeTemplate'),
nodeDrivers: globalStore.findAll('nodeDriver'),
psps: globalStore.findAll('podSecurityPolicyTemplate'),
roleTemplates: get(this, 'roleTemplateService').get('allFilteredRoleTemplates'),
users: globalStore.findAll('user'),
clusterRoleTemplateBinding: globalStore.findAll('clusterRoleTemplateBinding'),
me: get(this, 'access.principal'),
});
},
setupController(controller/* , model*/) {
this._super(...arguments);
set(controller, 'step', 1);
},
resetController(controller, isExisting /* , transition*/ ) {
if (isExisting) {
controller.set('errors', null);
controller.set('provider', null);
}
}
});
|
import inspect
def add_tracing_prints_to_all_methods(class_object):
for method_name, v in inspect.getmembers(class_object, inspect.ismethod):
add_tracing_prints_to_method(class_object, method_name)
|
def update_delivery_stats(data: dict, parsed_json: dict) -> None:
if 'get_no_ack' in parsed_json.get('message_stats', {}):
get_no_ack_count = parsed_json['message_stats']['get_no_ack']
data['delivered_basicGet_messages'] = get_no_ack_count
total_basicGet_messages = data.get('delivered_basicGet_messages', 0)
if total_basicGet_messages > 0:
data['delivered_basicGet_messages_rate'] = get_no_ack_count / total_basicGet_messages
else:
data['delivered_basicGet_messages_rate'] = 0 |
#!/bin/bash
python /Users/xx/PycharmProjects/GPT_3_server/debugFile/gpt_process.py |
import tensorflow as tf
import numpy as np
from sklearn import svm
# Input data.
X = np.array([[0, 0, 1], [1, 1, 1], [1, 0, 1], [0, 1, 1]])
y = np.array([0, 1, 1, 0])
# Define the model.
model = svm.SVC(kernel='linear', C=1.0)
# Fit the model.
model.fit(X, y)
# Evaluate the model.
accuracy = model.score(X, y)
# Print the accuracy.
print('Accuracy: ' + str(accuracy)) |
docker build -t gibsonchallenge/gibsonv2:jenkins2 --build-arg USER_ID=$(id -u jenkins) --build-arg GROUP_ID=$(id -g jenkins) .
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.ic_copyright = void 0;
var ic_copyright = {
"viewBox": "0 0 24 24",
"children": [{
"name": "g",
"attribs": {},
"children": [{
"name": "rect",
"attribs": {
"fill": "none",
"height": "24",
"width": "24",
"x": "0"
},
"children": [{
"name": "rect",
"attribs": {
"fill": "none",
"height": "24",
"width": "24",
"x": "0"
},
"children": []
}]
}]
}, {
"name": "g",
"attribs": {},
"children": [{
"name": "g",
"attribs": {},
"children": [{
"name": "g",
"attribs": {},
"children": [{
"name": "g",
"attribs": {},
"children": [{
"name": "g",
"attribs": {},
"children": [{
"name": "path",
"attribs": {
"d": "M11.88,9.14c1.28,0.06,1.61,1.15,1.63,1.66h1.79c-0.08-1.98-1.49-3.19-3.45-3.19C9.64,7.61,8,9,8,12.14 c0,1.94,0.93,4.24,3.84,4.24c2.22,0,3.41-1.65,3.44-2.95h-1.79c-0.03,0.59-0.45,1.38-1.63,1.44C10.55,14.83,10,13.81,10,12.14 C10,9.25,11.28,9.16,11.88,9.14z M12,2C6.48,2,2,6.48,2,12s4.48,10,10,10s10-4.48,10-10S17.52,2,12,2z M12,20c-4.41,0-8-3.59-8-8 s3.59-8,8-8s8,3.59,8,8S16.41,20,12,20z"
},
"children": [{
"name": "path",
"attribs": {
"d": "M11.88,9.14c1.28,0.06,1.61,1.15,1.63,1.66h1.79c-0.08-1.98-1.49-3.19-3.45-3.19C9.64,7.61,8,9,8,12.14 c0,1.94,0.93,4.24,3.84,4.24c2.22,0,3.41-1.65,3.44-2.95h-1.79c-0.03,0.59-0.45,1.38-1.63,1.44C10.55,14.83,10,13.81,10,12.14 C10,9.25,11.28,9.16,11.88,9.14z M12,2C6.48,2,2,6.48,2,12s4.48,10,10,10s10-4.48,10-10S17.52,2,12,2z M12,20c-4.41,0-8-3.59-8-8 s3.59-8,8-8s8,3.59,8,8S16.41,20,12,20z"
},
"children": []
}]
}]
}]
}]
}]
}]
}]
};
exports.ic_copyright = ic_copyright; |
#!/usr/bin/env bash
debconf-set-selections <<< 'phpmyadmin phpmyadmin/dbconfig-install boolean true'
debconf-set-selections <<< 'phpmyadmin phpmyadmin/app-password-confirm password vagrant'
debconf-set-selections <<< 'phpmyadmin phpmyadmin/mysql/admin-pass password vagrant'
debconf-set-selections <<< 'phpmyadmin phpmyadmin/mysql/app-pass password vagrant'
debconf-set-selections <<< 'phpmyadmin phpmyadmin/reconfigure-webserver multiselect apache2'
apt-get install -y phpmyadmin
|
// @flow
import React, { useState } from 'react';
import PropTypes from 'prop-types';
import { Switch, Route, withRouter } from 'react-router-dom';
import { Menu, Icon } from 'antd';
import Docker from './Docker';
import Error404 from './Error';
import ContainerId from './Container/ContainerId';
import Volume from './Volume/Volume';
import Network from './Network/Network';
const Home = props => {
const { history } = props;
const [menu, setMenu] = useState('/home');
if (history.location.pathname === '/') {
history.push('/home');
}
const handleClick = e => {
history.push(`/${e.key}`);
setMenu(e.key);
};
return (
<div>
<Menu onClick={handleClick} selectedKeys={[menu]} mode="horizontal">
<Menu.Item key="home">
<Icon type="mail" />
Container
</Menu.Item>
<Menu.Item key="volume">
<Icon type="appstore" />
Volume
</Menu.Item>
<Menu.Item key="network">
<Icon type="appstore" />
Network
</Menu.Item>
</Menu>
<div>
<Switch location={history.location}>
<Route path="/home" component={Docker} />
<Route path="/volume" component={Volume} />
<Route path="/network" component={Network} />
<Route path="*" component={Error404} />
<Route path="/container/:id" component={ContainerId} />
</Switch>
</div>
</div>
);
};
Home.propTypes = {
history: PropTypes.shape({}).isRequired
};
export default withRouter(Home);
|
#!/bin/bash
set -e
pushd /
git clone https://github.com/facebook/buck.git
cd buck
ANT_OPTS=-Xmx1024m ant default
popd
|
<reponame>dcoloma/gaia<filename>shared/js/advanced_timer.js
/* -*- Mode: js; js-indent-level: 2; indent-tabs-mode: nil -*- */
/* vim: set shiftwidth=2 tabstop=2 autoindent cindent expandtab: */
'use strict';
var advanced_timer = {
/**
* Maps between user Ids and navigator ones
*/
timers: {},
/**
* Register a new timer with the user's timerId
*/
start: function(timerId, timeout, callback) {
if (typeof(callback) != 'function') {
callback = function() {};
}
var self = this;
var _id = setTimeout(function advTimer() {
delete(self.timers[timerId]);
callback();
}, timeout);
this.timers[timerId] = {
'timeout': timeout,
'internalTimerId': _id,
'timestamp': new Date().getTime()
};
},
/**
* Stops timer and returns the pending time
*/
stop: function(timerId) {
var timer = this.timers[timerId];
if (!timer) {
return 0;
}
clearTimeout(timer.internalTimerId);
var pendingTime = this.queryPendingTime();
delete(this.timers[timerId]);
return pendingTime;
},
/**
* Returns the pending time to timeout the timer
*/
queryPendingTime: function(timerId) {
var timer = this.timers[timerId];
if (!timer) {
return 0;
}
return timer.timeout - (new Date().getTime() - timer.timestamp);
}
};
|
#! /bin/bash
#################################################
# Title: mk-install-iso #
# Date: 2014-11-26 #
# Version: 1.0 #
# Author: dthaluru@vmware.com #
# Options: #
#################################################
# Overview
# Generates a photon iso
# End
#
set -e
set -x
SCRIPT_PATH=$(dirname $(realpath -s $0))
PRGNAME=${0##*/} # script name minus the path
INSTALLER_PATH=$1
WORKINGDIR=$2
shift 2
ISO_OUTPUT_NAME=$1
RPMS_PATH=$2
PACKAGE_LIST_FILE=$3
RPM_LIST=$4
STAGE_PATH=$5
ADDITIONAL_FILES_TO_COPY_FROM_STAGE=$6
OUTPUT_DATA_PATH=$7
PHOTON_COMMON_DIR=$(dirname "${PACKAGE_LIST_FILE}")
PACKAGE_LIST_FILE_BASE_NAME=$(basename "${PACKAGE_LIST_FILE}")
INITRD=${WORKINGDIR}/photon-chroot
PACKAGES=$8
rm -rf $WORKINGDIR/*
mkdir -p $INITRD
chmod 755 $INITRD
cp $SCRIPT_PATH/open_source_license.txt $WORKINGDIR/
cp $STAGE_PATH/NOTICE $WORKINGDIR/
# 1. install rpms into initrd path
cat > ${WORKINGDIR}/photon-local.repo <<EOF
[photon-local]
name=VMware Photon Linux
baseurl=file://${RPMS_PATH}
gpgcheck=0
enabled=1
skip_if_unavailable=True
EOF
cat > ${WORKINGDIR}/tdnf.conf <<EOF
[main]
gpgcheck=0
installonly_limit=3
clean_requirements_on_remove=true
repodir=${WORKINGDIR}
EOF
rpm --root $INITRD --initdb --dbpath /var/lib/rpm
TDNF_CMD="tdnf install -y --installroot $INITRD --rpmverbosity 10 -c ${WORKINGDIR}/tdnf.conf -q $PACKAGES"
# run host's tdnf, if fails - try one from photon:3.0 docker image
$TDNF_CMD || docker run -v $RPMS_PATH:$RPMS_PATH -v $WORKINGDIR:$WORKINGDIR photon:3.0 $TDNF_CMD
rm -f ${WORKINGDIR}/photon-local.repo ${WORKINGDIR}/tdnf.conf
# 2. copy installer code to initrd
cp -r $INSTALLER_PATH $INITRD
# 3. finalize initrd system (mk-finalize-system.sh)
chroot ${INITRD} /usr/sbin/pwconv
chroot ${INITRD} /usr/sbin/grpconv
chroot ${INITRD} /bin/systemd-machine-id-setup
echo "LANG=en_US.UTF-8" > $INITRD/etc/locale.conf
echo "photon-installer" > $INITRD/etc/hostname
# locales/en_GB should be moved to glibc main package to make it working
#chroot ${INITRD} /usr/bin/localedef -c -i en_US -f UTF-8 en_US.UTF-8
# Importing the pubkey (photon-repos required)
#chroot ${INITRD} rpm --import /etc/pki/rpm-gpg/*
cp -r $SCRIPT_PATH/BUILD_DVD/isolinux $SCRIPT_PATH/BUILD_DVD/boot ${WORKINGDIR}/
mkdir ${WORKINGDIR}/boot/grub2/fonts/
cp $INSTALLER_PATH/boot/ascii.pf2 ${WORKINGDIR}/boot/grub2/fonts/
mkdir -p ${WORKINGDIR}/boot/grub2/themes/photon/
cp $INSTALLER_PATH/boot/splash.png ${WORKINGDIR}/boot/grub2/themes/photon/photon.png
cp $INSTALLER_PATH/boot/terminal_*.tga ${WORKINGDIR}/boot/grub2/themes/photon/
cp $INSTALLER_PATH/boot/theme.txt ${WORKINGDIR}/boot/grub2/themes/photon/
echo ${WORKINGDIR}
cp $SCRIPT_PATH/BUILD_DVD/isolinux/splash.png ${INITRD}/installer/boot/.
mkdir -p ${INITRD}/installer/EFI/BOOT
cp $INSTALLER_PATH/EFI_$(uname -m)/BOOT/* ${INITRD}/installer/EFI/BOOT/
#Generate efiboot image
# efiboot is a fat16 image that has at least EFI/BOOT/bootx64.efi
EFI_IMAGE=boot/grub2/efiboot.img
EFI_FOLDER=`readlink -f ${STAGE_PATH}/efiboot`
dd if=/dev/zero of=${WORKINGDIR}/${EFI_IMAGE} bs=3K count=1024
mkdosfs ${WORKINGDIR}/${EFI_IMAGE}
mkdir $EFI_FOLDER
mount -o loop ${WORKINGDIR}/${EFI_IMAGE} $EFI_FOLDER
mkdir $EFI_FOLDER/EFI
mkdir ${WORKINGDIR}/EFI
cp -r $INSTALLER_PATH/EFI_$(uname -m)/BOOT $EFI_FOLDER/EFI/
cp -r $INSTALLER_PATH/EFI_$(uname -m)/BOOT ${WORKINGDIR}/EFI/
ls -lR $EFI_FOLDER
umount $EFI_FOLDER
rm -rf $EFI_FOLDER
#mcopy -s -i ${WORKINGDIR}/${EFI_IMAGE} ./EFI '::/'
cp $INSTALLER_PATH/sample_ks.cfg ${WORKINGDIR}/isolinux/
mv ${INITRD}/boot/vmlinuz* ${WORKINGDIR}/isolinux/vmlinuz
rm -f ${INITRD}/installer/*.pyc
# Copy package list json files, dereference symlinks
cp -rf -L $OUTPUT_DATA_PATH/*.json ${INITRD}/installer/
#ID in the initrd.gz now is PHOTON_VMWARE_CD . This is how we recognize that the cd is actually ours. touch this file there.
touch ${WORKINGDIR}/PHOTON_VMWARE_CD
# Step 4.5 Create necessary devices
mkfifo ${INITRD}/dev/initctl
mknod ${INITRD}/dev/ram0 b 1 0
mknod ${INITRD}/dev/ram1 b 1 1
mknod ${INITRD}/dev/ram2 b 1 2
mknod ${INITRD}/dev/ram3 b 1 3
mknod ${INITRD}/dev/sda b 8 0
#- Step 5 - Creating the boot script
mkdir -p ${INITRD}/etc/systemd/scripts
# Step 6 create fstab
cp $SCRIPT_PATH/BUILD_DVD/fstab ${INITRD}/etc/fstab
mkdir -p ${INITRD}/etc/yum.repos.d
cat > ${INITRD}/etc/yum.repos.d/photon-iso.repo << EOF
[photon-iso]
name=VMWare Photon Linux 1.0(x86_64)
baseurl=file:///mnt/media/RPMS
gpgkey=file:///etc/pki/rpm-gpg/VMWARE-RPM-GPG-KEY
gpgcheck=1
enabled=1
skip_if_unavailable=True
EOF
#- Step 7 - Create installer script
cat >> ${INITRD}/bin/bootphotoninstaller << EOF
#!/bin/bash
cd /installer
ACTIVE_CONSOLE="\$(< /sys/devices/virtual/tty/console/active)"
install() {
LANG=en_US.UTF-8 ./isoInstaller.py --json-file=$PACKAGE_LIST_FILE_BASE_NAME && shutdown -r now
}
try_run_installer() {
if [ "\$ACTIVE_CONSOLE" == "tty0" ]; then
[ "\$(tty)" == '/dev/tty1' ] && install
else
[ "\$(tty)" == "/dev/\$ACTIVE_CONSOLE" ] && install
fi
}
try_run_installer || exec /bin/bash
EOF
chmod 755 ${INITRD}/bin/bootphotoninstaller
cat >> ${INITRD}/init << EOF
mount -t proc proc /proc
/lib/systemd/systemd
EOF
chmod 755 ${INITRD}/init
#adding autologin to the root user
# and set TERM=linux for installer
sed -i "s/ExecStart.*/ExecStart=-\/sbin\/agetty --autologin root --noclear %I linux/g" ${INITRD}/lib/systemd/system/getty@.service
sed -i "s/ExecStart.*/ExecStart=-\/sbin\/agetty --autologin root --keep-baud 115200,38400,9600 %I screen/g" ${INITRD}/lib/systemd/system/serial-getty@.service
#- Step 7 - Create installer script
sed -i "s/root:.*/root:x:0:0:root:\/root:\/bin\/bootphotoninstaller/g" ${INITRD}/etc/passwd
mkdir -p ${INITRD}/mnt/photon-root/photon-chroot
rm -rf ${INITRD}/RPMS
echo ${RPMS_PATH}
#cp -r ${RPMS_PATH} ${WORKINGDIR}/
(
cd ${RPMS_PATH}
mkdir ${WORKINGDIR}/RPMS
for rpm_name in $RPM_LIST; do
cp --parent $rpm_name ${WORKINGDIR}/RPMS/
chmod 644 ${WORKINGDIR}/RPMS/$rpm_name
done
)
# Work in sub-shell using ( ... ) to come back to original folder.
(
cd $STAGE_PATH
for file_name in $ADDITIONAL_FILES_TO_COPY_FROM_STAGE; do
[ -n "$file_name" ] && cp $file_name ${WORKINGDIR}
done
)
#creating rpm repo in cd..
createrepo --database ${WORKINGDIR}/RPMS
repodatadir=${WORKINGDIR}/RPMS/repodata
if [ -d $repodatadir ]; then
pushd $repodatadir
metaDataFile=`find -type f -name "*primary.xml.gz"`
ln -sfv $metaDataFile primary.xml.gz
popd
fi
rm -rf ${INITRD}/LOGS
# Cleaning up
find ${INITRD}/usr/lib/ -maxdepth 1 -mindepth 1 -type f | xargs -i sh -c "grep ELF {} >/dev/null 2>&1 && strip {} || :"
rm -rf ${INITRD}/home/* \
${INITRD}/var/lib/rpm \
${INITRD}/cache \
${INITRD}/boot \
${INITRD}/usr/include \
${INITRD}/usr/sbin/sln \
${INITRD}/usr/bin/iconv \
${INITRD}/usr/bin/oldfind \
${INITRD}/usr/bin/localedef \
${INITRD}/usr/bin/sqlite3 \
${INITRD}/usr/bin/grub2-* \
${INITRD}/usr/bin/bsdcpio \
${INITRD}/usr/bin/bsdtar \
${INITRD}/usr/bin/networkctl \
${INITRD}/usr/bin/machinectl \
${INITRD}/usr/bin/pkg-config \
${INITRD}/usr/bin/openssl \
${INITRD}/usr/bin/timedatectl \
${INITRD}/usr/bin/localectl \
${INITRD}/usr/bin/systemd-cgls \
${INITRD}/usr/bin/systemd-analyze \
${INITRD}/usr/bin/systemd-nspawn \
${INITRD}/usr/bin/systemd-inhibit \
${INITRD}/usr/bin/systemd-studio-bridge \
${INITRD}/usr/lib/python2.7/lib2to3 \
${INITRD}/usr/lib/python2.7/lib-tk \
${INITRD}/usr/lib/python2.7/ensurepip \
${INITRD}/usr/lib/python2.7/distutils \
${INITRD}/usr/lib/python2.7/pydoc_data \
${INITRD}/usr/lib/python2.7/idlelib \
${INITRD}/usr/lib/python2.7/unittest \
${INITRD}/usr/lib/librpmbuild.so* \
${INITRD}/usr/lib/libdb_cxx* \
${INITRD}/usr/lib/libnss_compat* \
${INITRD}/usr/lib/grub/i386-pc/*.module \
${INITRD}/usr/lib/grub/x86_64-efi/*.module \
${INITRD}/lib64/libmvec* \
${INITRD}/usr/lib64/gconv
find "${INITRD}/usr/sbin" -mindepth 1 -maxdepth 1 -name "grub2*" \
! -name grub2-install -exec rm -rvf {} \;
find "${INITRD}/usr/share" -mindepth 1 -maxdepth 1 \
! -name terminfo \
! -name cracklib \
! -name grub \
! -name factory \
! -name dbus-1 -exec rm -rvf {} \;
# Set password max days to 99999 (disable aging)
chroot ${INITRD} /bin/bash -c "chage -M 99999 root"
# Generate the initrd
pushd $INITRD
(find . | cpio -o -H newc --quiet | gzip -9) > ${WORKINGDIR}/isolinux/initrd.img
popd
rm -rf $INITRD
#Step 9 Generate the ISO!!!!
pushd $WORKINGDIR
mkisofs -R -l -L -D -b isolinux/isolinux.bin -c isolinux/boot.cat \
-no-emul-boot -boot-load-size 4 -boot-info-table \
-eltorito-alt-boot -e ${EFI_IMAGE} -no-emul-boot \
-V "PHOTON_$(date +%Y%m%d)" \
$WORKINGDIR >$ISO_OUTPUT_NAME
popd
|
#!/bin/sh
rm -rf generated
rm -rf compiled
COVERAGE_ROOT=$KIN_GEN_ROOT/tests/coverage
SAMPLE_QUERIES=$KIN_GEN_ROOT/tests/sample-queries
cd $KIN_GEN_ROOT
./ilk-generator.sh --robot $KIN_GEN_ROOT/tests/models/ur5/ur5-kul.kindsl --query $SAMPLE_QUERIES/sample_"$1"/model/ur5.dtdsl --output-dir $COVERAGE_ROOT/generated
cd $KIN_GEN_ROOT/ilk-compiler
./ilk-compiler.lua -b eigen --indir $COVERAGE_ROOT/generated --outdir $COVERAGE_ROOT/compiled
cd $COVERAGE_ROOT/compiled
make ur5_"$1"_timing_dbg
./ur5_"$1"_timing_dbg 1 1
gcov --relative-only ur5_"$1"_timing.cpp
lcov --no-external -t "blah" -o ur5_"$1"_timing.info -c -d `pwd` -d `pwd`
genhtml -o ../coverage_html_"$1"/ ur5_"$1"_timing.info
|
<reponame>kuihao/KuihaoFL<filename>Implement_google_AdaptiveFL/test.py<gh_stars>1-10
A = 1
print(
f"**** {A} ****\n\
*** 12312313 ***"
)
|
#!/bin/bash
cd ~/Desktop/DevOps/Scripts/Bash
# delete login profile
aws iam delete-login-profile --user-name devops1
# Detach policy
aws iam detach-user-policy --user-name devops1 --policy-arn arn:aws:iam::aws:policy/AdministratorAccess
# Delete key of the user <change --access-key>
aws iam delete-access-key --access-key AKIASOAGHVG6I5C4PEVY --user-name devops1
# delete user
aws iam delete-user --user-name devops1 |
<div class="main-panel">
<?php include_once('lib/navbar.php'); ?>
<div class="content">
<div class="container-fluid">
<div class="row">
<?php if(isset($_GET['aksi']) && $_GET['aksi'] == 'edit') : ?>
<?php include_once('content/edit_pelanggan.php'); ?>
<?php else: ?>
<?php include_once('content/pelanggan.php'); ?>
<?php endif; ?>
</div>
</div>
</div>
</div> |
#!/bin/bash
set -o errexit
set -o nounset
CURRENT_DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd);
PROJECT_DIR=$(dirname "$CURRENT_DIR")
DEPENDS_DIR="$PROJECT_DIR/depends";
git -C "$PROJECT_DIR" submodule init;
git -C "$PROJECT_DIR" submodule update;
export PACKAGE_PLATFORM=iOS;
"$DEPENDS_DIR/Elastos.SDK.Wallet.C/scripts/package-mobile.sh";
rm -rf "$PROJECT_DIR/frameworks";
mkdir "$PROJECT_DIR/frameworks";
cp -rv "$DEPENDS_DIR/Elastos.SDK.Wallet.C/build/package/Elastos.SDK.Wallet.C.framework" "$PROJECT_DIR/frameworks";
|
package io.opensphere.core.server;
/**
* The different application types that we can post to an {@link HttpServer}.
*/
public enum ContentType
{
/** JSON application type. */
JSON,
/** XML application type. */
XML,
}
|
package com.zyf.algorithm.test.queue09;
import com.zyf.algorithm.bean.ListNode;
/**
* 通过链表实现队列
*/
public class LinkedQueue {
private ListNode head;//队头指针
private ListNode tail;//队尾指针
/**
* 入队
*/
private void enqueue(int num) {
ListNode node = new ListNode(num);
if (tail == null) {
head = node;
tail = node;
return;
}
tail.next = node;
tail = node;
}
/**
* 出队
*/
private int dequeue() {
if (head == null) {
return -1;
}
int tmp = head.val;
head = head.next;
if (head == null) {
tail = null;
}
return tmp;
}
public static void main(String[] args){
LinkedQueue queue = new LinkedQueue();
queue.enqueue(1);
queue.enqueue(2);
queue.enqueue(3);
queue.enqueue(4);
queue.enqueue(5);
printAll(queue.head);
System.out.println("dequeue: " + queue.dequeue());
System.out.println("dequeue: " + queue.dequeue());
System.out.println("dequeue: " + queue.dequeue());
System.out.println("dequeue: " + queue.dequeue());
System.out.println("dequeue: " + queue.dequeue());
printAll(queue.head);
queue.enqueue(6);
printAll(queue.head);
queue.enqueue(7);
printAll(queue.head);
}
private static void printAll(ListNode head) {
if (head == null) {
System.out.println("链表为空");
return;
}
while (head != null) {
if (head.next != null) {
System.out.print(head.val + " --> ");
} else {
System.out.println(head.val);
}
head = head.next;
}
}
}
|
package org.usfirst.frc2791.robot2014.utils;
/**
* @author 2791
*/
public class Latch {
private boolean output;
private boolean firstSwitch = true;
public Latch(boolean defaultState) { output = defaultState; }
public void setLatchInput(boolean input) {
if (input) {
if (firstSwitch) { // first time the button was hit after being released
output = !output;
firstSwitch = false;
} // otherwise do nothing
} else { // button released
firstSwitch = true;
}
}
public void setManual(boolean newOutput) { output = newOutput; }
public boolean getLatchOutput() { return output; }
public boolean get() { return getLatchOutput(); }
}
|
$.extend({
//参数转对象
//name = '小明',age = 10;
//$.argUtil('name' ,'age');
//$.argUtil({name: 'name',age: 'age'});
argUtil: function(){
var obj = {};
var args = arguments;
var length = args.length;
var i = 0;
var j = 0;
for(i in args){
if(typeof args[j] === "object" && typeof args[i] === "string" ){
obj[i] = args[i];
}else if(typeof args[j] === "string" && args[i] !== null){
try{
obj[args[i]] = eval(args[i]);
}catch(e){
console.log('变量' + args[i] + "不存在");
}
}
}
return obj;
},
//获取星期
//$.formateWeek();
formateWeek: function(){
var week,weekStr;
if(arguments.length==0){
week = new Date().getDay();
}else if(isNaN(arguments[0])&&!isNaN(Date.parse(arguments[0]))){
week = new Date(arguments[0]).getDay();
}else{
return "日期格式错误,请输入此种格式(yyyy-MM-dd)";
}
switch(week){
case 0:
weekStr = '星期日';
break;
case 1:
weekStr = '星期一';
break;
case 2:
weekStr = '星期二';
break;
case 3:
weekStr = '星期三';
break;
case 4:
weekStr = '星期四';
break;
case 5:
weekStr = '星期五';
break;
case 6:
weekStr = '星期六';
break;
}
return weekStr;
},
//判字符串是否为数字字符串
isNumber:function(val) {
var regPos = /^\d+(\.\d+)?$/; //非负浮点数
var regNeg = /^(-(([0-9]+\.[0-9]*[1-9][0-9]*)|([0-9]*[1-9][0-9]*\.[0-9]+)|([0-9]*[1-9][0-9]*)))$/; //负浮点数
if(regPos.test(val) || regNeg.test(val)) {
return true;
} else {
return false;
}
},
// fomate: "yyyy-MM-dd hh:mm:ss","yyyy-MM-dd"
//调用:$.fomateData(10位或者13位数字,fomate); //时间戳为10或者13位
fomateData:function(value,format){
format = format || "yyyy-MM-dd hh:mm:ss";
if(value.length === 10 && ($.isNumber(value) || typeof value ==="number")){
var data = new Date(parseInt(value) * 1000);
}else if(value.length === 13 && ($.isNumber(value) || typeof value ==="number")){
var data = new Date(parseInt(value));
}else{
return '时间戳错误'
}
var o = {
"M+" : data.getMonth()+1, //month
"d+" : data.getDate(), //day
"h+" : data.getHours(), //hour
"m+" : data.getMinutes(), //minute
"s+" : data.getSeconds(), //second
}
if(/(y+)/.test(format)) {
format = format.replace(RegExp.$1, (data.getFullYear()+"").substr(4 - RegExp.$1.length));
}
for(var k in o) {
if(new RegExp("("+ k +")").test(format)) {
format = format.replace(RegExp.$1, RegExp.$1.length==1 ? o[k] : ("00"+ o[k]).substr((""+ o[k]).length));
}
}
return format;
}
})
$.fn.extend({
})
|
<reponame>miriamtech/sqlite3_json_rails4<gh_stars>0
# frozen_string_literal: true
# This class is taken from the Rails v5.2.6 tree and rewritten for the Rails 4
# API.
module Sqlite3JsonRails4
module Extensions
module ActiveRecord
module Type
class Json < ::ActiveRecord::Type::Value
include ::ActiveRecord::Type::Mutable
def type
:json
end
def type_cast_from_database(value)
return value unless value.is_a?(::String)
begin
ActiveSupport::JSON.decode(value)
rescue StandardError
nil
end
end
def type_cast_for_database(value)
ActiveSupport::JSON.encode(value) unless value.nil?
end
def changed_in_place?(raw_old_value, new_value)
type_cast_from_database(raw_old_value) != new_value
end
def accessor
ActiveRecord::Store::StringKeyedHashAccessor
end
end
end
end
end
end
|
<filename>src/main/java/com/badlogic/gdx/scenes/scene2d/ui/StageLayout.java
package com.badlogic.gdx.scenes.scene2d.ui;
import com.badlogic.gdx.files.FileHandle;
import com.badlogic.gdx.json.AnnotatedJson;
import com.badlogic.gdx.json.annotations.*;
import com.badlogic.gdx.scenes.scene2d.Actor;
import com.badlogic.gdx.scenes.scene2d.Stage;
import com.badlogic.gdx.utils.*;
import java.io.IOException;
public class StageLayout {
private final Stage stage;
private final StageLayout.JsonData json;
private final StageLayoutListener listener;
private Array<Actor> rootActors = new Array<>();
public StageLayout(Stage stage, FileHandle layoutPath, StageLayoutListener listener) throws IOException {
this.stage = stage;
this.json = readJson(layoutPath);
this.listener = listener;
}
public void create(Skin skin) {
rootActors.clear();
for (ActorLayout<?> layout : json.actors) {
Actor actor = create(skin, layout, layout.actorClass);
stage.addActor(actor);
}
}
private <T extends Actor> T create(Skin skin, ActorLayout layout, Class<T> clazz) {
Actor actor = layout.create(skin, listener);
if (!clazz.isAssignableFrom(actor.getClass())) {
throw new GdxRuntimeException("Type mismatch for actor layout: " + layout.name);
}
rootActors.add(actor);
return (T) actor;
}
public <T extends Actor> T get(String name, Class<T> clazz) {
return get(rootActors, name, clazz);
}
private <T extends Actor> T get(Array<Actor> actors, String name, Class<T> clazz) {
for (Actor actor : actors) {
if(actor.getName() == null)
continue;
if (actor.getName().equals(name)) {
if (!clazz.isAssignableFrom(actor.getClass())) {
throw new GdxRuntimeException("Type mismatch for actor layout: " + name);
}
return (T) actor;
}
if (actor instanceof LayoutGroup) {
LayoutGroup group = (LayoutGroup) actor;
T foundActor = get(group.getChildren(), name, clazz);
if (foundActor != null)
return foundActor;
}
}
return null;
}
public Array<Actor> getRootActors() {
return rootActors;
}
public void resizeAll() {
for (Actor actor : rootActors) {
resize(actor, true);
}
}
public void resize(Actor actor, boolean root) {
if(actor.getName() == null)
return;
// Resize this actor layout
ActorLayout<?> actorLayout = getLayoutByName(json.actors, actor.getName());
if (actorLayout == null) {
throw new GdxRuntimeException("No actor layout found for: " + actor.getName());
}
actorLayout.layout.resize(stage, actor, root);
// If it happens to be a group, also resize any children
if (actor instanceof LayoutGroup) {
LayoutGroup groupActor = (LayoutGroup) actor;
SnapshotArray<Actor> children = groupActor.getChildren();
for (int i = 0; i < children.size; i++) {
resize(children.get(i), false);
}
}
}
public ActorLayout<?> getLayoutByName(String name) {
return getLayoutByName(json.actors, name);
}
private ActorLayout<?> getLayoutByName(Array<ActorLayout<?>> layouts, String name) {
if (name == null)
return null;
for (ActorLayout layout : layouts) {
if (name.equals(layout.name)) {
return layout;
}
if (layout instanceof GroupLayout) {
GroupLayout groupLayout = (GroupLayout) layout;
ActorLayout actorLayout = getLayoutByName(groupLayout.actors, name);
if (actorLayout != null)
return actorLayout;
}
}
return null;
}
@JsonSerializable
@SuppressWarnings("WeakerAccess")
public static class JsonData {
@JsonSerialize(array = @JsonArray(value = ActorLayout.class))
public Array<ActorLayout<?>> actors;
}
private static final Json reader = AnnotatedJson.newReader(JsonData.class, StageLayout::setupJson);
private static void setupJson(Json json) {
AnnotatedJson.registerSubclasses(json, ActorLayout.class,
className -> className.startsWith("com.badlogic.gdx.scenes.scene2d.ui"));
}
private static JsonData readJson(FileHandle path) throws IOException {
return AnnotatedJson.read(path, JsonData.class, reader);
}
}
|
#!/bin/sh
echo "The application will start in ${JHIPSTER_SLEEP}s..." && sleep ${JHIPSTER_SLEEP}
exec java ${JAVA_OPTS} -noverify -XX:+AlwaysPreTouch -Djava.security.egd=file:/dev/./urandom -cp /app/resources/:/app/classes/:/app/libs/* "com.salaboy.invoice.InvoiceApp" "$@"
|
#!/bin/bash
# get origin source code
# wget -c https://raw.github.com/creationix/nvm/master/install.sh
{ # this ensures the entire script is downloaded #
nvm_has() {
type "$1" > /dev/null 2>&1
}
if [ -z "$NVM_DIR" ]; then
NVM_DIR="$HOME/.nvm"
fi
nvm_latest_version() {
echo "v0.37.2"
}
#
# Outputs the location to NVM depending on:
# * The availability of $NVM_SOURCE
# * The method used ("script" or "git" in the script, defaults to "git")
# NVM_SOURCE always takes precedence unless the method is "script-nvm-exec"
#
nvm_source() {
local NVM_METHOD
NVM_METHOD="$1"
local NVM_SOURCE_URL
NVM_SOURCE_URL="$NVM_SOURCE"
if [ "_$NVM_METHOD" = "_script-nvm-exec" ]; then
NVM_SOURCE_URL="https://raw.githubusercontent.com/creationix/nvm/$(nvm_latest_version)/nvm-exec"
elif [ -z "$NVM_SOURCE_URL" ]; then
if [ "_$NVM_METHOD" = "_script" ]; then
NVM_SOURCE_URL="https://raw.githubusercontent.com/creationix/nvm/$(nvm_latest_version)/nvm.sh"
elif [ "_$NVM_METHOD" = "_git" ] || [ -z "$NVM_METHOD" ]; then
NVM_SOURCE_URL="https://github.com/creationix/nvm.git"
else
echo >&2 "Unexpected value \"$NVM_METHOD\" for \$NVM_METHOD"
return 1
fi
fi
echo "$NVM_SOURCE_URL"
}
nvm_download() {
if nvm_has "curl"; then
curl -q $*
elif nvm_has "wget"; then
# Emulate curl with wget
ARGS=$(echo "$*" | command sed -e 's/--progress-bar /--progress=bar /' \
-e 's/-L //' \
-e 's/-I /--server-response /' \
-e 's/-s /-q /' \
-e 's/-o /-O /' \
-e 's/-C - /-c /')
wget $ARGS
fi
}
install_nvm_from_git() {
if [ -d "$NVM_DIR/.git" ]; then
echo "=> nvm is already installed in $NVM_DIR, trying to update using git"
printf "\r=> "
cd "$NVM_DIR" && (command git fetch 2> /dev/null || {
echo >&2 "Failed to update nvm, run 'git fetch' in $NVM_DIR yourself." && exit 1
})
else
# Cloning to $NVM_DIR
echo "=> Downloading nvm from git to '$NVM_DIR'"
printf "\r=> "
mkdir -p "$NVM_DIR"
command git clone "$(nvm_source git)" "$NVM_DIR"
fi
cd "$NVM_DIR" && command git checkout --quiet $(nvm_latest_version)
if [ ! -z "$(cd "$NVM_DIR" && git show-ref refs/heads/master)" ]; then
if git branch --quiet 2>/dev/null; then
cd "$NVM_DIR" && command git branch --quiet -D master >/dev/null 2>&1
else
echo >&2 "Your version of git is out of date. Please update it!"
cd "$NVM_DIR" && command git branch -D master >/dev/null 2>&1
fi
fi
return
}
install_nvm_as_script() {
local NVM_SOURCE_LOCAL
NVM_SOURCE_LOCAL=$(nvm_source script)
local NVM_EXEC_SOURCE
NVM_EXEC_SOURCE=$(nvm_source script-nvm-exec)
# Downloading to $NVM_DIR
mkdir -p "$NVM_DIR"
if [ -d "$NVM_DIR/nvm.sh" ]; then
echo "=> nvm is already installed in $NVM_DIR, trying to update the script"
else
echo "=> Downloading nvm as script to '$NVM_DIR'"
fi
nvm_download -s "$NVM_SOURCE_LOCAL" -o "$NVM_DIR/nvm.sh" || {
echo >&2 "Failed to download '$NVM_SOURCE_LOCAL'"
return 1
}
nvm_download -s "$NVM_EXEC_SOURCE" -o "$NVM_DIR/nvm-exec" || {
echo >&2 "Failed to download '$NVM_EXEC_SOURCE'"
return 2
}
chmod a+x "$NVM_DIR/nvm-exec" || {
echo >&2 "Failed to mark '$NVM_DIR/nvm-exec' as executable"
return 3
}
}
#
# Detect profile file if not specified as environment variable
# (eg: PROFILE=~/.myprofile)
# The echo'ed path is guaranteed to be an existing file
# Otherwise, an empty string is returned
#
nvm_detect_profile() {
local DETECTED_PROFILE
DETECTED_PROFILE=''
local SHELLTYPE
SHELLTYPE="$(basename /$SHELL)"
if [ $SHELLTYPE = "bash" ]; then
if [ -f "$HOME/.bashrc" ]; then
DETECTED_PROFILE="$HOME/.bashrc"
elif [ -f "$HOME/.bash_profile" ]; then
DETECTED_PROFILE="$HOME/.bash_profile"
fi
elif [ $SHELLTYPE = "zsh" ]; then
DETECTED_PROFILE="$HOME/.zshrc"
fi
if [ -z $DETECTED_PROFILE ]; then
if [ -f "$PROFILE" ]; then
DETECTED_PROFILE="$PROFILE"
elif [ -f "$HOME/.profile" ]; then
DETECTED_PROFILE="$HOME/.profile"
elif [ -f "$HOME/.bashrc" ]; then
DETECTED_PROFILE="$HOME/.bashrc"
elif [ -f "$HOME/.bash_profile" ]; then
DETECTED_PROFILE="$HOME/.bash_profile"
elif [ -f "$HOME/.zshrc" ]; then
DETECTED_PROFILE="$HOME/.zshrc"
fi
fi
if [ ! -z $DETECTED_PROFILE ]; then
echo "$DETECTED_PROFILE"
fi
}
#
# Check whether the user has any globally-installed npm modules in their system
# Node, and warn them if so.
#
nvm_check_global_modules() {
command -v npm >/dev/null 2>&1 || return 0
local NPM_VERSION
NPM_VERSION="$(npm --version)"
NPM_VERSION="${NPM_VERSION:--1}"
[ "${NPM_VERSION%%[!-0-9]*}" -gt 0 ] || return 0
local NPM_GLOBAL_MODULES
NPM_GLOBAL_MODULES="$(
npm list -g --depth=0 |
sed '/ npm@/d' |
sed '/ (empty)$/d'
)"
local MODULE_COUNT
MODULE_COUNT="$(
printf %s\\n "$NPM_GLOBAL_MODULES" |
sed -ne '1!p' | # Remove the first line
wc -l | tr -d ' ' # Count entries
)"
if [ $MODULE_COUNT -ne 0 ]; then
cat <<-'END_MESSAGE'
=> You currently have modules installed globally with `npm`. These will no
=> longer be linked to the active version of Node when you install a new node
=> with `nvm`; and they may (depending on how you construct your `$PATH`)
=> override the binaries of modules installed with `nvm`:
END_MESSAGE
printf %s\\n "$NPM_GLOBAL_MODULES"
cat <<-'END_MESSAGE'
=> If you wish to uninstall them at a later point (or re-install them under your
=> `nvm` Nodes), you can remove them from the system Node as follows:
$ nvm use system
$ npm uninstall -g a_module
END_MESSAGE
fi
}
nvm_do_install() {
if [ -z "$METHOD" ]; then
# Autodetect install method
if nvm_has "git"; then
install_nvm_from_git
elif nvm_has "nvm_download"; then
install_nvm_as_script
else
echo >&2 "You need git, curl, or wget to install nvm"
exit 1
fi
elif [ "~$METHOD" = "~git" ]; then
if ! nvm_has "git"; then
echo >&2 "You need git to install nvm"
exit 1
fi
install_nvm_from_git
elif [ "~$METHOD" = "~script" ]; then
if ! nvm_has "nvm_download"; then
echo >&2 "You need curl or wget to install nvm"
exit 1
fi
install_nvm_as_script
fi
echo
local NVM_PROFILE
NVM_PROFILE=$(nvm_detect_profile)
SOURCE_STR="\nexport NVM_DIR=\"$NVM_DIR\"\n[ -s \"\$NVM_DIR/nvm.sh\" ] && . \"\$NVM_DIR/nvm.sh\" # This loads nvm"
if [ -z "$NVM_PROFILE" ] ; then
echo "=> Profile not found. Tried $NVM_PROFILE (as defined in \$PROFILE), ~/.bashrc, ~/.bash_profile, ~/.zshrc, and ~/.profile."
echo "=> Create one of them and run this script again"
echo "=> Create it (touch $NVM_PROFILE) and run this script again"
echo " OR"
echo "=> Append the following lines to the correct file yourself:"
printf "$SOURCE_STR"
echo
else
if ! command grep -qc '/nvm.sh' "$NVM_PROFILE"; then
echo "=> Appending source string to $NVM_PROFILE"
printf "$SOURCE_STR\n" >> "$NVM_PROFILE"
else
echo "=> Source string already in $NVM_PROFILE"
fi
fi
nvm_check_global_modules
echo "=> Close and reopen your terminal to start using nvm"
nvm_reset
}
#
# Unsets the various functions defined
# during the execution of the install script
#
nvm_reset() {
unset -f nvm_reset nvm_has nvm_latest_version \
nvm_source nvm_download install_nvm_as_script install_nvm_from_git \
nvm_detect_profile nvm_check_global_modules nvm_do_install
}
[ "_$NVM_ENV" = "_testing" ] || nvm_do_install
} # this ensures the entire script is downloaded #
|
#!/bin/bash
current_dir=$(dirname $0)
app_jar=/mnt/d/code/e4/mac-agent/build/libs/mac-agent-1.0.jar
cd $current_dir
JAVA_HOME=/home/wei/jdk1.8.0_212
LIB="$JAVA_HOME/lib/tools.jar:$JAVA_HOME/jre/lib/rt.jar"
OPTIONS="-Djava.library.path=$JAVA_HOME/jre/bin -Dbundles.list=/mnt/d/code/e4/mac-agent/sample/bundles"
echo $LIB > /tmp/a.log
if [ -n "$1" ]; then
# ${JAVA_HOME}/bin/java ${OPTIONS} -cp $LIB -jar ${app_jar} $1 enable
java ${OPTIONS} -jar ${app_jar} $1 enable
else
echo "please provide PID as inputs"
fi |
<gh_stars>10-100
angular.module("wust.config").config(HumaneConfig);
HumaneConfig.$inject = [];
function HumaneConfig() {
marked.setOptions({
renderer: new marked.Renderer(),
gfm: true,
tables: true,
breaks: false,
pedantic: false,
sanitize: true, // additionally escape all input html elements
smartLists: true,
smartypants: false,
highlight: function (code, lang) {
return hljs.highlightAuto(code, lang ? [lang] : undefined).value;
}
});
}
|
#! /bin/bash
#SBATCH -o /home/hpc/pr63so/di69fol/workspace/SWEET_2015_12_26/benchmarks_performance/rexi_tests_lrz_freq_waves/2016_01_03_scalability_rexi_fd_high_res_run2/run_rexi_fd_par_m0512_t001_n0128_r1218_a1.txt
###SBATCH -e /home/hpc/pr63so/di69fol/workspace/SWEET_2015_12_26/benchmarks_performance/rexi_tests_lrz_freq_waves/2016_01_03_scalability_rexi_fd_high_res_run2/run_rexi_fd_par_m0512_t001_n0128_r1218_a1.err
#SBATCH -J rexi_fd_par_m0512_t001_n0128_r1218_a1
#SBATCH --get-user-env
#SBATCH --clusters=mpp2
#SBATCH --ntasks=1218
#SBATCH --cpus-per-task=1
#SBATCH --exclusive
#SBATCH --export=NONE
#SBATCH --time=03:00:00
#declare -x NUMA_BLOCK_ALLOC_VERBOSITY=1
declare -x KMP_AFFINITY="granularity=thread,compact,1,0"
declare -x OMP_NUM_THREADS=1
echo "OMP_NUM_THREADS=$OMP_NUM_THREADS"
echo
. /etc/profile.d/modules.sh
module unload gcc
module unload fftw
module unload python
module load python/2.7_anaconda_nompi
module unload intel
module load intel/16.0
module unload mpi.intel
module load mpi.intel/5.1
module load gcc/5
cd /home/hpc/pr63so/di69fol/workspace/SWEET_2015_12_26/benchmarks_performance/rexi_tests_lrz_freq_waves/2016_01_03_scalability_rexi_fd_high_res_run2
cd ../../../
. local_software/env_vars.sh
# force to use FFTW WISDOM data
declare -x SWEET_FFTW_LOAD_WISDOM_FROM_FILE="FFTW_WISDOM_nofreq_T0"
time -p mpiexec.hydra -genv OMP_NUM_THREADS 1 -envall -ppn 28 -n 1218 ./build/rexi_fd_par_m_tno_a1 --initial-freq-x-mul=2.0 --initial-freq-y-mul=1.0 -f 1 -g 1 -H 1 -X 1 -Y 1 --compute-error 1 -t 50 -R 4 -C 0.3 -N 128 -U 0 -S 0 --use-specdiff-for-complex-array 0 --rexi-h 0.8 --timestepping-mode 1 --staggering 0 --rexi-m=512 -C -5.0
|
#!/usr/bin/env zsh
zle -N up-line-or-beginning-search
zle -N down-line-or-beginning-search
zle -N edit-command-line
zle -N zle-keymap-select
auto-fu-zle-keymap-select
### History Substring Search ###
bindkey '^[[A' history-substring-search-up
bindkey '^[[B' history-substring-search-down
bindkey -M vicmd 'k' history-substring-search-up
bindkey -M vicmd 'j' history-substring-search-down
# fuzzy find: start to type
bindkey "$terminfo[kcuu1]" up-line-or-beginning-search
bindkey "$terminfo[kcud1]" down-line-or-beginning-search
bindkey "$terminfo[cuu1]" up-line-or-beginning-search
bindkey "$terminfo[cud1]" down-line-or-beginning-search
# backward and forward word with option+left/right
bindkey '^[^[[D' backward-word
bindkey '^[b' backward-word
bindkey '^[^[[C' forward-word
bindkey '^[f' forward-word
# to to the beggining/end of line with fn+left/right or home/end
bindkey "${terminfo[khome]}" beginning-of-line
bindkey '^[[H' beginning-of-line
bindkey "${terminfo[kend]}" end-of-line
bindkey '^[[F' end-of-line
# delete char with backspaces and delete
bindkey '^[[3~' delete-char
bindkey '^?' backward-delete-char
# delete word with ctrl+backspace
bindkey '^[[3;5~' backward-delete-word
# bindkey '^[[3~' backward-delete-word
# edit command line in $EDITOR
autoload -U edit-command-line
zle -N edit-command-line
bindkey '^e' edit-command-line
# search history with fzf if installed, default otherwise
if test -d /usr/local/opt/fzf/shell; then
# shellcheck disable=SC1091
. /usr/local/opt/fzf/shell/key-bindings.zsh
else
bindkey '^R' history-incremental-search-backward
fi
# Show dots while waiting to complete. Useful for systems with slow net access,
# like those places where they use giant, slow NFS solutions. (Hint.)
expand-or-complete-with-dots() {
echo -n "\e[31m......\e[0m"
zle expand-or-complete
zle redisplay
}
zle -N expand-or-complete-with-dots
bindkey "^I" expand-or-complete-with-dots
# This inserts a tab after completing a redirect. You want this.
# (Source: http://www.zsh.org/mla/users/2006/msg00690.html)
self-insert-redir() {
integer l=$#LBUFFER
zle self-insert
(( $l >= $#LBUFFER )) && LBUFFER[-1]=" $LBUFFER[-1]"
}
zle -N self-insert-redir
for op in \| \< \> \& ; do
bindkey "$op" self-insert-redir
done
# Automatically quote URLs when pasted
autoload -U url-quote-magic
zle -N self-insert url-quote-magic
# ZSH KEYBINDINGS {{{1
# First, primarily use emacs key bindings
bindkey -e
# One keystroke to cd ..
bindkey -s '\eu' '\eq^Ucd ..; ls^M'
# Smart less-adder
bindkey -s "\el" "^E 2>&1|less^M"
# This lets me use ^Z to toggle between open text editors.
bindkey -s '^Z' '^Ufg^M'
# Edit the current command line with Meta-e
autoload -U edit-command-line
zle -N edit-command-line
bindkey '\ee' edit-command-line
# Let ^W delete to slashes - zsh-users list, 4 Nov 2005
# (I can't live without this)
backward-delete-to-slash() {
local WORDCHARS=${WORDCHARS//\//}
zle .backward-delete-word
}
zle -N backward-delete-to-slash
bindkey "^W" backward-delete-to-slash
# AUTO_PUSHD is set so we can always use popd
bindkey -s '\ep' '^Upopd >/dev/null; dirs -v^M'
|
package cdek
import "time"
//NewStatusReportReq Order Status Report builder
func NewStatusReportReq() *StatusReport {
return new(StatusReport)
}
//SetShowHistory The attribute indicating that the order history must be loaded (1 – yes, 0 – no)
func (req *StatusReport) SetShowHistory(showHistory int) *StatusReport {
req.ShowHistory = &showHistory
return req
}
//SetShowReturnOrder The attribute indicating that the list of return orders must be loaded (1 – yes, 0 – no)
func (req *StatusReport) SetShowReturnOrder(showReturnOrder bool) *StatusReport {
req.ShowReturnOrder = &showReturnOrder
return req
}
//SetShowReturnOrderHistory The attribute indicating that the history of return orders must be loaded (1 – yes, 0 – no)
func (req *StatusReport) SetShowReturnOrderHistory(showReturnOrderHistory bool) *StatusReport {
req.ShowReturnOrderHistory = &showReturnOrderHistory
return req
}
//SetChangePeriod The period during which the order status has changed.
func (req *StatusReport) SetChangePeriod(changePeriod ChangePeriod) *StatusReport {
req.ChangePeriod = &changePeriod
return req
}
//AddOrder Add Shipment (order)
func (req *StatusReport) AddOrder(order StatusReportOrderReq) *StatusReport {
req.Order = append(req.Order, &order)
return req
}
//NewChangePeriod ChangePeriod builder
// dateFirst: start date of requested period
func NewChangePeriod(dateFirst time.Time) *ChangePeriod {
dateFirstFormatted := dateFirst.Format("2006-01-02")
return &ChangePeriod{
DateFirst: &dateFirstFormatted,
}
}
//SetDateLast End date of requested period
func (changePeriod *ChangePeriod) SetDateLast(date time.Time) *ChangePeriod {
dateFormatted := date.Format("2006-01-02")
changePeriod.DateLast = &dateFormatted
return changePeriod
}
//NewStatusReportOrderReq StatusReportOrderReq builder
// dispatchNumber: CDEK shipment number (assigned when orders are imported). Order identifier in the CDEK IS.
// number: Client's shipment number. Order identifier in the IS of the CDEK Client.
// date: Date of an acceptance certificate, based on which the order has been transferred.
func NewStatusReportOrderReq(dispatchNumber int, number string, date time.Time) *StatusReportOrderReq {
dateFormatted := date.Format("2006-01-02")
return &StatusReportOrderReq{
DispatchNumber: &dispatchNumber,
Number: &number,
Date: &dateFormatted,
}
}
|
#!/bin/sh
#sudo mount -uw /
#sudo killall Finder
#big sur系统以上无效
sudo mkdir -p /usr/local/lib
sudo mkdir -p /usr/local/share
cd $(dirname $0)/Resources
sudo cp -r ./DirectHW.kext /usr/local/share/DirectHW.kext
sudo cp -r ./DirectHW.framework /Library/Frameworks/DirectHW.framework
sudo cp -r ./libDirectHW.a /usr/local/lib/libDirectHW.a
sudo cp -r ./libDirectHW.dylib /usr/local/lib/libDirectHW.dylib
sudo chmod -R 755 /usr/local/share/DirectHW.kext
sudo chmod -R 755 /Library/Frameworks/DirectHW.framework
sudo chmod 644 /usr/local/lib/libDirectHW.a
sudo chmod 644 /usr/local/lib/libDirectHW.dylib
sudo chown -R root:wheel /usr/local/share/DirectHW.kext
sudo chown -R root:wheel /Library/Frameworks/DirectHW.framework
echo "Done!"
|
package dijkstra;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.PriorityQueue;
import java.util.StringTokenizer;
/**
*
* @author exponential-e
* 백준 10473번: 인간 대포
*
* @see https://www.acmicpc.net/problem/10473/
*
*/
public class Boj10473 {
private static Coordinate start;
private static Coordinate end;
private static ArrayList<Node>[] graph;
private static final double INF = 10_000_000;
private static class Coordinate {
double x;
double y;
public Coordinate(double x, double y) {
this.x = x;
this.y = y;
}
}
private static class Node implements Comparable<Node>{
int node;
double cost;
public Node(int node, double cost) {
this.node = node;
this.cost = cost;
}
@Override
public int compareTo(Node n) {
return this.cost < n.cost ? -1: 1;
}
}
public static void main(String[] args) throws Exception {
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
StringTokenizer st = new StringTokenizer(br.readLine());
start = new Coordinate(Double.parseDouble(st.nextToken()), Double.parseDouble(st.nextToken()));
st = new StringTokenizer(br.readLine());
end = new Coordinate(Double.parseDouble(st.nextToken()), Double.parseDouble(st.nextToken()));
int N = Integer.parseInt(br.readLine());
Coordinate[] fires = new Coordinate[N + 2];
fires[0] = start;
fires[N + 1] = end;
for(int i = 1; i <= N; i++) {
st = new StringTokenizer(br.readLine());
fires[i] = new Coordinate(Double.parseDouble(st.nextToken()), Double.parseDouble(st.nextToken()));
}
settings(N, fires);
System.out.println(dijkstra(N));
}
private static double dijkstra(int n) {
double[] cost = new double[n + 2];
Arrays.fill(cost, INF);
PriorityQueue<Node> pq = new PriorityQueue<>();
pq.offer(new Node(0, 0));
cost[0] = 0;
while(!pq.isEmpty()) {
Node current = pq.poll();
if(current.cost > cost[current.node]) continue;
for(Node next: graph[current.node]) {
if(cost[next.node] <= cost[current.node] + next.cost) continue;
cost[next.node] = cost[current.node] + next.cost;
pq.offer(new Node(next.node, cost[next.node]));
}
}
return cost[n + 1];
}
private static void settings (int n, Coordinate[] f) {
graph = new ArrayList[n + 2];
for(int i = 0; i < n + 2; i++) {
graph[i] = new ArrayList<>();
}
graph[0].add(new Node(n + 1, getDistance(start, end) / 5));
for(int i = 1; i <= n; i++) {
double d = getDistance(start, f[i]);
double walk = d / 5;
graph[0].add(new Node(i, walk)); // start -> canon
d = getDistance(end, f[i]);
graph[i].add(new Node(n + 1, Math.abs(d - 50) / 5 + 2)); // canon -> end
}
for(int node1 = 1; node1 <= n; node1++) {
for(int node2 = 1; node2 <= n; node2++) {
if(node1 == node2) continue;
double d = getDistance(f[node1], f[node2]);
double walk = d / 5;
graph[node1].add(new Node(node2, Math.min(walk, Math.abs(d - 50) / 5 + 2))); // canon -> canon
}
}
}
private static double getDistance(Coordinate c1, Coordinate c2) {
double x = c1.x - c2.x;
double y = c1.y - c2.y;
return Math.sqrt(x * x + y * y);
}
}
|
<reponame>Iraecio/cb-api<gh_stars>0
import { AuthModule } from './resolvers/auth/auth.module';
import { ConfigModule, ConfigService } from '@nestjs/config';
import { CacheModule, Module } from '@nestjs/common';
import { CoreModule } from './@core/core.module';
import { join } from 'path';
import { ApolloDriver, ApolloDriverConfig } from '@nestjs/apollo';
import { GraphQLModule } from '@nestjs/graphql';
import {
RedisClientOptions,
RedisModule,
RedisModuleOptions,
RedisService,
} from '@liaoliaots/nestjs-redis';
import { ThrottlerModule } from '@nestjs/throttler';
import { ThrottlerStorageRedisService } from './@core/@redis/throttler';
import * as redisStore from 'cache-manager-redis-store';
import { UserModule } from './resolvers/user/user.module';
@Module({
imports: [
ConfigModule.forRoot({
isGlobal: true,
cache: true,
}),
RedisModule.forRootAsync({
imports: [ConfigModule],
inject: [ConfigService],
useFactory(config: ConfigService): RedisModuleOptions {
return {
closeClient: true,
readyLog: true,
config: {
namespace: 'cabir',
host: config.get('REDIS_HOST'),
port: config.get('REDIS_PORT'),
password: config.get('REDIS_PASSWORD'),
},
};
},
}),
CacheModule.registerAsync<RedisClientOptions>({
inject: [ConfigService],
useFactory(config: ConfigService) {
return {
isGlobal: true,
store: redisStore,
host: config.get('REDIS_HOST'),
port: config.get('REDIS_PORT'),
password: config.get('REDIS_PASSWORD'),
ttl: 120,
};
},
}),
ThrottlerModule.forRootAsync({
inject: [RedisService],
useFactory(redisService: RedisService) {
const redis = redisService.getClient('cabir');
return {
ttl: 60,
limit: 10,
storage: new ThrottlerStorageRedisService(redis),
};
},
}),
GraphQLModule.forRoot<ApolloDriverConfig>({
driver: ApolloDriver,
buildSchemaOptions: {
numberScalarMode: 'integer',
},
cors: true,
debug: true,
sortSchema: true,
autoSchemaFile: join(process.cwd(), 'src/schema.gql'),
playground: {
settings: {
'request.credentials': 'include',
},
},
autoTransformHttpErrors: true,
context: ({ req }) => ({ req }),
}),
CoreModule,
AuthModule,
UserModule,
],
providers: [],
})
export class AppModule {}
|
package com.cgfy.mybatis.bussApi.utils.excel;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import com.cgfy.mybatis.bussApi.utils.excel.bean.WriteData;
import com.cgfy.mybatis.bussApi.utils.excel.handler.MyCellWriteHandler;
import com.alibaba.excel.EasyExcel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* title:
* Description:TODO
* Author: Administrator
* Date: 2021/6/4
*/
public class WriteTest {
private static final Logger log = LoggerFactory.getLogger(WriteTest.class);
/**
* 最简单的写
* <p>
* 1. 创建excel对应的实体对象
* <p>
* 2. 直接写即可
*/
public static void main(String[] args) throws MalformedURLException {
List<WriteData> list = new ArrayList<WriteData>();
for (int i = 0; i < 10; i++) {
WriteData data = new WriteData();
data.setStringData("字符串" + i);
data.setDateData(new Date());
data.setDoubleData(0.56);
data.setImgUrl(new URL("http://file.fangzuobiao.com:9000/5,04e1833f2957e9.jpg"));
list.add(data);
}
// 写法1
String fileName = "E:/upload/simpleWrite" + System.currentTimeMillis() + ".xlsx";
// 这里 需要指定写用哪个class去写,然后写到第一个sheet,名字为模板 然后文件流会自动关闭
// EasyExcel.write(fileName, WriteData.class).sheet("模板").doWrite(list);
EasyExcel.write(fileName, WriteData.class).registerWriteHandler(new MyCellWriteHandler()).sheet("Excel导出测试").doWrite(list);
}
}
|
<filename>examples/angular/taskeditor/src/app/app.module.ts
/**
* @author Saki
* @date 2019-05-24 09:05:15
* @Last Modified by: Saki
* @Last Modified time: 2019-05-24 09:05:39
*/
import { BrowserModule } from '@angular/platform-browser';
import { NgModule } from '@angular/core';
import { AppComponent } from './app.component';
import { BryntumAngularSharedModule } from 'bryntum-angular-shared';
@NgModule({
declarations: [
AppComponent
],
imports: [
BrowserModule,
BryntumAngularSharedModule
],
providers: [],
bootstrap: [AppComponent]
})
export class AppModule { }
// eof |
<filename>Config/PostBuildOSXApp.py
#!/usr/bin/env python
import sys
import os
import shutil
from subprocess import call
def Run(args):
#print args
call(args)
appFile = sys.argv[1]
qtPath = sys.argv[2]
libs = sys.argv[3:-1]
binaryName = os.path.splitext(os.path.basename(appFile))[0]
# Update DLL/Binary stoof
frameworkFolder = appFile + "/Contents/Frameworks"
Run(["mkdir", "-p", frameworkFolder])
for lib in libs:
Run(["cp", "-R", qtPath+"/"+lib+".framework", frameworkFolder])
for lib in libs:
Run(["install_name_tool", "-id",
"@executable_path/../Frameworks/"+lib+".framework/"+lib,
appFile+"/Contents/Frameworks/"+lib+".framework/"+lib])
Run(["install_name_tool", "-id",
"@executable_path/../Frameworks/"+lib+".framework/"+lib+"_debug",
appFile+"/Contents/Frameworks/"+lib+".framework/"+lib+"_debug"])
for lib in libs:
Run(["install_name_tool", "-change",
qtPath+"/"+lib+".framework/"+lib,
"@executable_path/../Frameworks/"+lib+".framework/"+lib,
appFile+"/Contents/MacOS/"+binaryName])
Run(["install_name_tool", "-change",
qtPath+"/"+lib+".framework/"+lib,
"@executable_path/../Frameworks/"+lib+".framework/"+lib+"_debug",
appFile+"/Contents/MacOS/"+binaryName])
for lib in libs:
for libB in libs:
Run(["install_name_tool", "-change",
qtPath+"/"+lib+".framework/"+lib,
"@executable_path/../Frameworks/"+lib+".framework/"+lib,
appFile+"/Contents/Frameworks/"+libB+".framework/"+libB])
Run(["install_name_tool", "-change",
qtPath+"/"+lib+".framework/"+lib+"_debug",
"@executable_path/../Frameworks/"+lib+".framework/"+lib+"_debug",
appFile+"/Contents/Frameworks/"+libB+".framework/"+libB+"_debug"])
# Copy The pain... So much pain
platformsFolder = appFile+"/Contents/Frameworks/platforms/"
Run(["mkdir", "-p", platformsFolder])
Run(["cp", "-R", qtPath+"/"+"../plugins/platforms/libqcocoa.dylib", platformsFolder])
for lib in libs:
Run(["install_name_tool", "-change",
qtPath+"/"+lib+".framework/"+lib,
"@executable_path/../Frameworks/"+lib+".framework/"+lib,
appFile+"/Contents/Frameworks/platforms/libqcocoa.dylib"])
Run(["install_name_tool", "-change",
qtPath+"/"+lib+".framework/"+lib+"_debug",
"@executable_path/../Frameworks/"+lib+".framework/"+lib+"_debug",
appFile+"/Contents/Frameworks/platforms/libqcocoa.dylib"])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.