text stringlengths 1 1.05M |
|---|
package stream
import (
"bytes"
"context"
"encoding/gob"
"fmt"
"net/http"
"github.com/google/zoekt"
"github.com/google/zoekt/query"
)
// NewClient returns a client which implements StreamSearch. If httpClient is
// nil, http.DefaultClient is used.
func NewClient(address string, httpClient *http.Client) *Client {
registerGob()
if httpClient == nil {
httpClient = http.DefaultClient
}
return &Client{
address: address,
httpClient: httpClient,
}
}
// Client is an HTTP client for StreamSearch. Do not create directly, call
// NewClient.
type Client struct {
// HTTP address of zoekt-webserver. Will query against address + "/stream".
address string
// httpClient when set is used instead of http.DefaultClient
httpClient *http.Client
}
// SenderFunc is an adapter to allow the use of ordinary functions as Sender.
// If f is a function with the appropriate signature, SenderFunc(f) is a Sender
// that calls f.
type SenderFunc func(result *zoekt.SearchResult)
func (f SenderFunc) Send(result *zoekt.SearchResult) {
f(result)
}
// StreamSearch returns search results as stream by calling streamer.Send(event)
// for each event returned by the server.
//
// Error events returned by the server are returned as error. Context errors are
// recreated and returned on a best-efforts basis.
func (c *Client) StreamSearch(ctx context.Context, q query.Q, opts *zoekt.SearchOptions, streamer zoekt.Sender) error {
// Encode query and opts.
buf := new(bytes.Buffer)
args := &searchArgs{
q, opts,
}
enc := gob.NewEncoder(buf)
err := enc.Encode(args)
if err != nil {
return fmt.Errorf("error during encoding: %w", err)
}
// Send request.
req, err := http.NewRequestWithContext(ctx, "POST", c.address+DefaultSSEPath, buf)
if err != nil {
return err
}
req.Header.Set("Accept", "application/x-gob-stream")
req.Header.Set("Cache-Control", "no-cache")
req.Header.Set("Connection", "keep-alive")
req.Header.Set("Transfer-Encoding", "chunked")
resp, err := c.httpClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
dec := gob.NewDecoder(resp.Body)
for {
reply := &searchReply{}
err := dec.Decode(reply)
if err != nil {
return fmt.Errorf("error during decoding: %w", err)
}
switch reply.Event {
case eventMatches:
if res, ok := reply.Data.(*zoekt.SearchResult); ok {
streamer.Send(res)
} else {
return fmt.Errorf("event of type %s could not be converted to *zoekt.SearchResult", eventMatches.string())
}
case eventError:
if errString, ok := reply.Data.(string); ok {
return fmt.Errorf("error received from zoekt: %s", errString)
} else {
return fmt.Errorf("data for event of type %s could not be converted to string", eventError.string())
}
case eventDone:
return nil
default:
return fmt.Errorf("unknown event type")
}
}
}
|
<reponame>Tri-stone/xupercore
package p2pv1
import (
xctx "github.com/xuperchain/xupercore/kernel/common/xcontext"
"testing"
"time"
"github.com/xuperchain/xupercore/kernel/mock"
nctx "github.com/xuperchain/xupercore/kernel/network/context"
"github.com/xuperchain/xupercore/kernel/network/p2p"
pb "github.com/xuperchain/xupercore/protos"
)
func Handler(ctx xctx.XContext, msg *pb.XuperMessage) (*pb.XuperMessage, error) {
typ := p2p.GetRespMessageType(msg.Header.Type)
resp := p2p.NewMessage(typ, msg, p2p.WithLogId(msg.Header.Logid))
return resp, nil
}
func startNode1(t *testing.T) {
ecfg, _ := mock.NewEnvConfForTest("p2pv1/node1/conf/env.yaml")
ctx, _ := nctx.NewNetCtx(ecfg)
node := NewP2PServerV1()
if err := node.Init(ctx); err != nil {
t.Errorf("server init error: %v", err)
return
}
node.Start()
ch := make(chan *pb.XuperMessage, 1024)
if err := node.Register(p2p.NewSubscriber(ctx, pb.XuperMessage_POSTTX, ch)); err != nil {
t.Errorf("register subscriber error: %v", err)
}
if err := node.Register(p2p.NewSubscriber(ctx, pb.XuperMessage_GET_BLOCK, p2p.HandleFunc(Handler))); err != nil {
t.Errorf("register subscriber error: %v", err)
}
go func(t *testing.T) {
select {
case msg := <-ch:
t.Logf("recv msg: log_id=%v, msgType=%s", msg.GetHeader().GetLogid(), msg.GetHeader().GetType())
}
}(t)
}
func startNode2(t *testing.T) {
ecfg, _ := mock.NewEnvConfForTest("p2pv1/node2/conf/env.yaml")
ctx, _ := nctx.NewNetCtx(ecfg)
node := NewP2PServerV1()
if err := node.Init(ctx); err != nil {
t.Errorf("server init error: %v", err)
return
}
node.Start()
if err := node.Register(p2p.NewSubscriber(ctx, pb.XuperMessage_GET_BLOCK, p2p.HandleFunc(Handler))); err != nil {
t.Errorf("register subscriber error: %v", err)
}
}
func startNode3(t *testing.T) {
ecfg, _ := mock.NewEnvConfForTest("p2pv1/node3/conf/env.yaml")
ctx, _ := nctx.NewNetCtx(ecfg)
node := NewP2PServerV1()
if err := node.Init(ctx); err != nil {
t.Errorf("server init error: %v", err)
return
}
node.Start()
msg := p2p.NewMessage(pb.XuperMessage_POSTTX, nil)
if err := node.SendMessage(ctx, msg); err != nil {
t.Errorf("sendMessage error: %v", err)
}
msg = p2p.NewMessage(pb.XuperMessage_GET_BLOCK, nil)
if responses, err := node.SendMessageWithResponse(ctx, msg); err != nil {
t.Errorf("sendMessage error: %v", err)
} else {
for i, resp := range responses {
t.Logf("resp[%d]: log_id=%v", i, resp)
}
}
}
func TestP2PServerV1(t *testing.T) {
mock.InitLogForTest()
startNode1(t)
time.Sleep(time.Second)
startNode2(t)
time.Sleep(time.Second)
startNode3(t)
time.Sleep(time.Second)
}
|
<reponame>smagill/opensphere-desktop<filename>open-sphere-plugins/infinity/src/main/java/io/opensphere/infinity/model/InfinitySettingsModel.java<gh_stars>10-100
package io.opensphere.infinity.model;
import javafx.application.Platform;
import javafx.beans.property.BooleanProperty;
import io.opensphere.core.preferences.BooleanPreferenceBinding;
import io.opensphere.core.preferences.Preferences;
import io.opensphere.core.util.CompositeService;
import io.opensphere.core.util.javafx.ConcurrentBooleanProperty;
/** Infinity settings model. */
public class InfinitySettingsModel extends CompositeService
{
/** The enabled setting. */
private final BooleanProperty myEnabled = new ConcurrentBooleanProperty(this, "enabled", true);
/**
* Constructor.
*
* @param preferences the preferences
*/
public InfinitySettingsModel(Preferences preferences)
{
addService(new BooleanPreferenceBinding(myEnabled, preferences, Platform::runLater));
}
/**
* Gets the enabled property.
*
* @return the enabled property
*/
public BooleanProperty enabledProperty()
{
return myEnabled;
}
}
|
<filename>polyfills/String/prototype/sup/tests.js
/* eslint-env mocha */
/* globals proclaim */
var hasStrictMode = (function () {
return this === null;
}).call(null);
var ifHasStrictModeIt = hasStrictMode ? it : it.skip;
it('is a function', function () {
proclaim.isFunction(String.prototype.sup);
});
it('has correct arity', function () {
proclaim.arity(String.prototype.sup, 0);
});
it('has correct name', function () {
proclaim.hasName(String.prototype.sup, 'sup');
});
it('is not enumerable', function () {
proclaim.isNotEnumerable(String.prototype, 'sup');
});
ifHasStrictModeIt('should throw a TypeError when called with undefined context', function () {
proclaim.throws(function () {
String.prototype.sup.call(undefined);
}, TypeError);
});
ifHasStrictModeIt('should throw a TypeError when called with null context', function () {
proclaim.throws(function () {
String.prototype.sup.call(null);
}, TypeError);
});
it('works on strings correctly', function() {
proclaim.deepEqual('_'.sup(), '<sup>_</sup>');
proclaim.deepEqual('<'.sup(), '<sup><</sup>');
proclaim.deepEqual(String.prototype.sup.call(1234), '<sup>1234</sup>');
}); |
<gh_stars>0
package pl.jacob_the_liar.tic_tac_toe.core;
public class TicTacToe {
private final int[][] WIN_ROW = {
{0, 1, 2}, {0, 4, 8}, {0, 3, 6}, {1, 4, 7},
{2, 4, 6}, {2, 5, 8}, {3, 4, 5}, {6, 7, 8}};
private Player[] board;
private Player currentPlayer;
private int games;
private int circleWins;
private int crossWins;
private int draws;
public TicTacToe() {
board = new Player[9];
games = 0;
circleWins = 0;
crossWins = 0;
draws = 0;
currentPlayer = Player.NONE;
}
public void startNewGame() {
clearBoard();
if (currentPlayer == Player.NONE)
setNextPlayer();
games++;
}
public Player getFieldStatus(int fieldNumber) {
return board[fieldNumber];
}
public int getGamesCount() {
return games;
}
public int getCircleWins() {
return circleWins;
}
public int getCrossWins() {
return crossWins;
}
public int getDraws() {
return draws;
}
public Player getCurrentPlayer() {
return currentPlayer;
}
public boolean isAvailableMove(int fieldNumber) {
return getFieldStatus(fieldNumber) == Player.NONE;
}
public void setMove(int fieldNumber) {
if (isAvailableMove(fieldNumber) && !isEndGame()) {
board[fieldNumber] = currentPlayer;
setNextPlayer();
}
}
public boolean isEndGame() {
boolean result = false;
result |= isWin(Player.CIRCLE);
result |= isWin(Player.CROSS);
result |= isDraw();
if (result)
setScore();
return result;
}
public Player whoWonGame() {
if (isEndGame()) {
if (isDraw())
return Player.NONE;
else if (isWin(Player.CROSS))
return Player.CROSS;
else if (isWin(Player.CIRCLE))
return Player.CIRCLE;
}
return Player.NONE;
}
private void setScore() {
if (isDraw())
setPlayerScore(Player.NONE);
if (isWin(Player.CIRCLE))
setPlayerScore(Player.CIRCLE);
if (isWin(Player.CROSS))
setPlayerScore(Player.CROSS);
}
private void setNextPlayer() {
if (currentPlayer != Player.CIRCLE)
currentPlayer = Player.CIRCLE;
else
currentPlayer = Player.CROSS;
}
private void clearBoard() {
for (int i = 0; i < board.length; i++)
board[i] = Player.NONE;
}
private boolean isDraw() {
for (int i = 0; i < 9; i++)
if (isAvailableMove(i))
return false;
boolean playerWin = false;
playerWin |= isWin(Player.CIRCLE);
playerWin |= isWin(Player.CROSS);
return !playerWin;
}
private boolean isWin(Player player) {
for (int i = 0; i < WIN_ROW.length; i++)
if (isRowWin(player, WIN_ROW[i]))
return true;
return false;
}
private void setPlayerScore(Player player) {
if (getGamesCount() != getDraws() + getCircleWins() + getCrossWins()) {
if (player == Player.NONE)
draws++;
if (player == Player.CROSS)
crossWins++;
if (player == Player.CIRCLE)
circleWins++;
}
}
private boolean isRowWin(Player player, int[] check) {
boolean result = true;
result &= board[check[0]] == player;
result &= board[check[1]] == player;
result &= board[check[2]] == player;
return result;
}
}
|
public List<Box> getUseBoxes() {
List<Box> useBoxes = new ArrayList<>();
Set<Box> visited = new HashSet<>();
collectUseBoxes(this, useBoxes, visited);
return useBoxes;
}
private void collectUseBoxes(Box currentBox, List<Box> useBoxes, Set<Box> visited) {
if (currentBox == null || visited.contains(currentBox)) {
return;
}
visited.add(currentBox);
if (currentBox.baseBox != null) {
useBoxes.addAll(currentBox.baseBox.getUseBoxes());
useBoxes.add(currentBox.baseBox);
}
if (currentBox.indexBox != null) {
useBoxes.addAll(currentBox.indexBox.getUseBoxes());
}
} |
<reponame>Sherlock92/greentop
/**
* Copyright 2017 <NAME>. Distributed under the MIT license.
*/
#include "greentop/account/GetAffiliateRelationRequest.h"
namespace greentop {
namespace account {
GetAffiliateRelationRequest::GetAffiliateRelationRequest() {
}
GetAffiliateRelationRequest::GetAffiliateRelationRequest(const std::vector<std::string>& vendorClientIds) :
vendorClientIds(vendorClientIds) {
}
void GetAffiliateRelationRequest::fromJson(const Json::Value& json) {
if (json.isMember("vendorClientIds")) {
for (unsigned i = 0; i < json["vendorClientIds"].size(); ++i) {
vendorClientIds.push_back(json["vendorClientIds"][i].asString());
}
}
}
Json::Value GetAffiliateRelationRequest::toJson() const {
Json::Value json(Json::objectValue);
if (vendorClientIds.size() > 0) {
for (unsigned i = 0; i < vendorClientIds.size(); ++i) {
json["vendorClientIds"].append(vendorClientIds[i]);
}
}
return json;
}
bool GetAffiliateRelationRequest::isValid() const {
return vendorClientIds.size() > 0;
}
const std::vector<std::string>& GetAffiliateRelationRequest::getVendorClientIds() const {
return vendorClientIds;
}
void GetAffiliateRelationRequest::setVendorClientIds(const std::vector<std::string>& vendorClientIds) {
this->vendorClientIds = vendorClientIds;
}
}
}
|
<reponame>saiichihashimoto/lint-my-app
#!/usr/bin/env node
/* istanbul ignore file */
import updateNotifier from 'update-notifier';
import { Command } from 'commander';
import pkg from '../package';
import lintMyAppFix from './lint-my-app-fix';
import lintMyAppLint from './lint-my-app-lint';
import lintMyAppStaged from './lint-my-app-staged';
updateNotifier({ pkg }).notify();
let action;
const program = new Command()
.version(pkg.version);
program
.command('lint')
.option('--no-pkg-ok')
.option('--no-eslint')
.option('--no-stylelint')
.option('--no-jsonlint')
.option('--no-dot')
.action(() => { action = lintMyAppLint; });
program
.command('fix')
.option('--no-sort-package-json')
.option('--no-eslint')
.option('--no-stylelint')
.option('--no-fixjson')
.option('--no-imagemin')
.option('--no-dot')
.action(() => { action = lintMyAppFix; });
program
.command('staged')
.action(() => { action = lintMyAppStaged; });
program.parse(process.argv);
action(program)
.catch((err) => { /* eslint-disable-line promise/prefer-await-to-callbacks */
const queue = [err];
while (queue.length) {
const currentErr = queue.shift();
if (currentErr.errors) {
queue.push(...currentErr.errors);
} else if (currentErr.all) {
console.log(currentErr.all); /* eslint-disable-line no-console */
} else if (currentErr.stderr) {
console.error(currentErr.stderr); /* eslint-disable-line no-console */
} else if (currentErr.stdout) {
console.log(currentErr.stdout); /* eslint-disable-line no-console */
} else if (currentErr.message !== 'Staged Failed') {
console.error(currentErr); /* eslint-disable-line no-console */
}
}
process.exit(1);
});
|
#!/bin/bash
if [ $1 -gt $2 ] && [ $1 -gt $3 ]; then
echo $1
elif [ $2 -gt $3 ]; then
echo $2
else
echo $3
fi |
#!/bin/bash
#---------------------------------------------
# Accords Platform Installation Validity Check
#---------------------------------------------
verbose=1
level=
function success()
{
if [ -z "$verbose" ]; then
verbose=0
else
echo "--" $1 ": OK"
fi
}
function failure()
{
echo "------------------------------------------"
echo "The category $1 is missing "
echo "The category server $2 is not running and "
echo "needs to be restarted. "
echo "------------------------------------------"
if [ -z "$level" ]; then
echo "Launching run-$2 "
echo "------------------------------------------"
level=1
sleep 1
/usr/local/bin/run-$2 &
sleep 5
echo "Rechecking $1 $2 "
echo "------------------------------------------"
sleep 2
check $1 $2
level=
else
echo "Validate to restart the accords platform "
echo "------------------------------------------"
read p
echo "------------------------------------------"
echo "Shuting down the Accords Platform in 5 sec"
echo "------------------------------------------"
sleep 5
co-stop
echo "------------------------------------------"
echo " Awaiting TIME-WAIT and CLOSE completion "
echo "------------------------------------------"
sleep 30
echo "Starting up the Accords Platform in 5 sec"
echo "------------------------------------------"
sleep 5
co-start
exit 0
fi
}
function check()
{
v=`/usr/local/bin/co-resolver $1`
if [ -z "$v" ]; then
failure $1 $2
else
success $1
fi
}
function verification()
{
check import parser
check manifest parser
check node procci
check infrastructure procci
check compute procci
check storage procci
check network conets
check port conets
check image ezvm
check system ezvm
check package ezvm
check configuration parser
check action broker
check release parser
check interface parser
check security coss
check account coobas
check user coss
check plan broker
check service broker
check instance broker
check contract procci
check instruction parser
check firewall conets
check authorization coss
check provider broker
check profile broker
check monitor comons
check session comons
check consumer comons
check event comons
check alert comons
check placement coes
check quota coes
check algorithm coes
check schedule cosched
check vm ezvm
check application coips
check price coobas
check invoice coobas
check agreement slam
check terms slam
check term slam
check variable slam
check guarantee slam
check business slam
check control slam
check penalty slam
check openstack osprocci
check opennebula onprocci
check windowsazure azprocci
check jpaas jpaasprocci
check paas_application copass
check paas_deployable copass
check paas_link copass
check paas_environment copass
check paas_manifest copass
check paas_node copass
check paas_version copass
check paas_relation copass
check paas_version_instance copass
check paas_configuration_option copass
check paas_configuration_template copass
}
if [ -z "$verbose" ]; then
verbose=
else
echo "-- Verification of Accords Platform Category Managers --"
fi
verification
if [ -z "$verbose" ]; then
verbose=
else
echo "-- Verification Terminated -- "
fi
|
import path from 'path';
import fs from 'fs';
import {serial as test} from 'ava';
import tempy from 'tempy';
import makeDir from 'make-dir';
import del from '.';
const processCwd = process.cwd();
function exists(t, files) {
for (const file of files) {
t.true(fs.existsSync(path.join(t.context.tmp, file)));
}
}
function notExists(t, files) {
for (const file of files) {
t.false(fs.existsSync(path.join(t.context.tmp, file)));
}
}
const fixtures = [
'1.tmp',
'2.tmp',
'3.tmp',
'4.tmp',
'.dot.tmp'
];
test.beforeEach(t => {
t.context.tmp = tempy.directory();
for (const fixture of fixtures) {
makeDir.sync(path.join(t.context.tmp, fixture));
}
});
test('delete files - async', async t => {
await del(['*.tmp', '!1*'], {cwd: t.context.tmp});
exists(t, ['1.tmp', '.dot.tmp']);
notExists(t, ['2.tmp', '3.tmp', '4.tmp']);
});
test('delete files - sync', t => {
del.sync(['*.tmp', '!1*'], {cwd: t.context.tmp});
exists(t, ['1.tmp', '.dot.tmp']);
notExists(t, ['2.tmp', '3.tmp', '4.tmp']);
});
test('take options into account - async', async t => {
await del(['*.tmp', '!1*'], {
cwd: t.context.tmp,
dot: true
});
exists(t, ['1.tmp']);
notExists(t, ['2.tmp', '3.tmp', '4.tmp', '.dot.tmp']);
});
test('take options into account - sync', t => {
del.sync(['*.tmp', '!1*'], {
cwd: t.context.tmp,
dot: true
});
exists(t, ['1.tmp']);
notExists(t, ['2.tmp', '3.tmp', '4.tmp', '.dot.tmp']);
});
test('return deleted files - async', async t => {
t.deepEqual(
await del('1.tmp', {cwd: t.context.tmp}),
[path.join(t.context.tmp, '1.tmp')]
);
});
test('return deleted files - sync', t => {
t.deepEqual(
del.sync('1.tmp', {cwd: t.context.tmp}),
[path.join(t.context.tmp, '1.tmp')]
);
});
test('don\'t delete files, but return them - async', async t => {
const deletedFiles = await del(['*.tmp', '!1*'], {
cwd: t.context.tmp,
dryRun: true
});
exists(t, ['1.tmp', '2.tmp', '3.tmp', '4.tmp', '.dot.tmp']);
t.deepEqual(deletedFiles, [
path.join(t.context.tmp, '2.tmp'),
path.join(t.context.tmp, '3.tmp'),
path.join(t.context.tmp, '4.tmp')
]);
});
test('don\'t delete files, but return them - sync', t => {
const deletedFiles = del.sync(['*.tmp', '!1*'], {
cwd: t.context.tmp,
dryRun: true
});
exists(t, ['1.tmp', '2.tmp', '3.tmp', '4.tmp', '.dot.tmp']);
t.deepEqual(deletedFiles, [
path.join(t.context.tmp, '2.tmp'),
path.join(t.context.tmp, '3.tmp'),
path.join(t.context.tmp, '4.tmp')
]);
});
// Currently this is only testable locally on macOS.
// https://github.com/sindresorhus/del/issues/68
test('does not throw EINVAL - async', async t => {
await del('**/*', {
cwd: t.context.tmp,
dot: true
});
const nestedFile = path.resolve(t.context.tmp, 'a/b/c/nested.js');
const totalAttempts = 200;
let count = 0;
while (count !== totalAttempts) {
makeDir.sync(nestedFile);
// eslint-disable-next-line no-await-in-loop
const removed = await del('**/*', {
cwd: t.context.tmp,
dot: true
});
const expected = [
path.resolve(t.context.tmp, 'a'),
path.resolve(t.context.tmp, 'a/b'),
path.resolve(t.context.tmp, 'a/b/c'),
path.resolve(t.context.tmp, 'a/b/c/nested.js')
];
t.deepEqual(removed, expected);
count += 1;
}
notExists(t, [...fixtures, 'a']);
t.is(count, totalAttempts);
});
test('does not throw EINVAL - sync', t => {
del.sync('**/*', {
cwd: t.context.tmp,
dot: true
});
const nestedFile = path.resolve(t.context.tmp, 'a/b/c/nested.js');
const totalAttempts = 200;
let count = 0;
while (count !== totalAttempts) {
makeDir.sync(nestedFile);
const removed = del.sync('**/*', {
cwd: t.context.tmp,
dot: true
});
const expected = [
path.resolve(t.context.tmp, 'a'),
path.resolve(t.context.tmp, 'a/b'),
path.resolve(t.context.tmp, 'a/b/c'),
path.resolve(t.context.tmp, 'a/b/c/nested.js')
];
t.deepEqual(removed, expected);
count += 1;
}
notExists(t, [...fixtures, 'a']);
t.is(count, totalAttempts);
});
test('delete relative files outside of process.cwd using cwd - async', async t => {
await del(['1.tmp'], {cwd: t.context.tmp});
exists(t, ['2.tmp', '3.tmp', '4.tmp', '.dot.tmp']);
notExists(t, ['1.tmp']);
});
test('delete relative files outside of process.cwd using cwd - sync', t => {
del.sync(['1.tmp'], {cwd: t.context.tmp});
exists(t, ['2.tmp', '3.tmp', '4.tmp', '.dot.tmp']);
notExists(t, ['1.tmp']);
});
test('delete absolute files outside of process.cwd using cwd - async', async t => {
const absolutePath = path.resolve(t.context.tmp, '1.tmp');
await del([absolutePath], {cwd: t.context.tmp});
exists(t, ['2.tmp', '3.tmp', '4.tmp', '.dot.tmp']);
notExists(t, ['1.tmp']);
});
test('delete absolute files outside of process.cwd using cwd - sync', t => {
const absolutePath = path.resolve(t.context.tmp, '1.tmp');
del.sync([absolutePath], {cwd: t.context.tmp});
exists(t, ['2.tmp', '3.tmp', '4.tmp', '.dot.tmp']);
notExists(t, ['1.tmp']);
});
test('cannot delete actual working directory without force: true - async', async t => {
process.chdir(t.context.tmp);
await t.throwsAsync(del([t.context.tmp]), {
instanceOf: Error,
message: 'Cannot delete the current working directory. Can be overridden with the `force` option.'
});
exists(t, ['', '1.tmp', '2.tmp', '3.tmp', '4.tmp', '.dot.tmp']);
process.chdir(processCwd);
});
test('cannot delete actual working directory without force: true - sync', t => {
process.chdir(t.context.tmp);
t.throws(() => {
del.sync([t.context.tmp]);
}, {
instanceOf: Error,
message: 'Cannot delete the current working directory. Can be overridden with the `force` option.'
});
exists(t, ['', '1.tmp', '2.tmp', '3.tmp', '4.tmp', '.dot.tmp']);
process.chdir(processCwd);
});
test('cannot delete actual working directory with cwd option without force: true - async', async t => {
process.chdir(t.context.tmp);
await t.throwsAsync(del([t.context.tmp], {cwd: __dirname}), {
instanceOf: Error,
message: 'Cannot delete the current working directory. Can be overridden with the `force` option.'
});
exists(t, ['', '1.tmp', '2.tmp', '3.tmp', '4.tmp', '.dot.tmp']);
process.chdir(processCwd);
});
test('cannot delete actual working directory with cwd option without force: true - sync', t => {
process.chdir(t.context.tmp);
t.throws(() => {
del.sync([t.context.tmp], {cwd: __dirname});
}, {
instanceOf: Error,
message: 'Cannot delete the current working directory. Can be overridden with the `force` option.'
});
exists(t, ['', '1.tmp', '2.tmp', '3.tmp', '4.tmp', '.dot.tmp']);
process.chdir(processCwd);
});
test('cannot delete files outside cwd without force: true - async', async t => {
const absolutePath = path.resolve(t.context.tmp, '1.tmp');
await t.throwsAsync(del([absolutePath]), {
instanceOf: Error,
message: 'Cannot delete files/directories outside the current working directory. Can be overridden with the `force` option.'
});
exists(t, ['1.tmp', '2.tmp', '3.tmp', '4.tmp', '.dot.tmp']);
});
test('cannot delete files outside cwd without force: true - sync', t => {
const absolutePath = path.resolve(t.context.tmp, '1.tmp');
t.throws(() => {
del.sync([absolutePath]);
}, {
instanceOf: Error,
message: 'Cannot delete files/directories outside the current working directory. Can be overridden with the `force` option.'
});
exists(t, ['', '1.tmp', '2.tmp', '3.tmp', '4.tmp', '.dot.tmp']);
});
test('cannot delete files inside process.cwd when outside cwd without force: true - async', async t => {
process.chdir(t.context.tmp);
const removeFile = path.resolve(t.context.tmp, '2.tmp');
const cwd = path.resolve(t.context.tmp, '1.tmp');
await t.throwsAsync(del([removeFile], {cwd}), {
instanceOf: Error,
message: 'Cannot delete files/directories outside the current working directory. Can be overridden with the `force` option.'
});
exists(t, ['1.tmp', '2.tmp', '3.tmp', '4.tmp', '.dot.tmp']);
process.chdir(processCwd);
});
test('cannot delete files inside process.cwd when outside cwd without force: true - sync', t => {
process.chdir(t.context.tmp);
const removeFile = path.resolve(t.context.tmp, '2.tmp');
const cwd = path.resolve(t.context.tmp, '1.tmp');
t.throws(() => {
del.sync([removeFile], {cwd});
}, {
instanceOf: Error,
message: 'Cannot delete files/directories outside the current working directory. Can be overridden with the `force` option.'
});
exists(t, ['1.tmp', '2.tmp', '3.tmp', '4.tmp', '.dot.tmp']);
process.chdir(processCwd);
});
test('windows can pass absolute paths with "\\" - async', async t => {
const filePath = path.resolve(t.context.tmp, '1.tmp');
const removeFiles = await del([filePath], {cwd: t.context.tmp, dryRun: true});
t.deepEqual(removeFiles, [filePath]);
});
test('windows can pass absolute paths with "\\" - sync', t => {
const filePath = path.resolve(t.context.tmp, '1.tmp');
const removeFiles = del.sync([filePath], {cwd: t.context.tmp, dryRun: true});
t.deepEqual(removeFiles, [filePath]);
});
test('windows can pass relative paths with "\\" - async', async t => {
const nestedFile = path.resolve(t.context.tmp, 'a/b/c/nested.js');
makeDir.sync(nestedFile);
const removeFiles = await del([nestedFile], {cwd: t.context.tmp, dryRun: true});
t.deepEqual(removeFiles, [nestedFile]);
});
test('windows can pass relative paths with "\\" - sync', t => {
const nestedFile = path.resolve(t.context.tmp, 'a/b/c/nested.js');
makeDir.sync(nestedFile);
const removeFiles = del.sync([nestedFile], {cwd: t.context.tmp, dryRun: true});
t.deepEqual(removeFiles, [nestedFile]);
});
|
use std::collections::HashMap;
fn parse_svg_macro(macro_invocation: &str) -> HashMap<String, String> {
let mut attributes_map = HashMap::new();
let start_index = macro_invocation.find("!(").unwrap();
let end_index = macro_invocation.rfind(")").unwrap();
let attributes_str = ¯o_invocation[start_index + 1..end_index];
for attribute_pair in attributes_str.split(",") {
let parts: Vec<&str> = attribute_pair.trim().split("=>").collect();
let attribute = parts[0].trim().trim_matches(|c| c == '"' || c == ' ').to_string();
let value = parts[1].trim().trim_matches(|c| c == '"' || c == ' ').to_string();
attributes_map.insert(attribute, value);
}
attributes_map
}
fn main() {
let macro_invocation = r#"path![attrs!(
At::from("d") => "M13.828 10.172a4 4 0 00-5.656 0l-4 4a4 4 0 105.656 5.656l1.102-1.101m-.758-4.899a4 4 0 005.656 0l4-4a4 4 0 00-5.656-5.656l-1.1 1.1",
At::from("stroke-linecap") => "round",
),
At::from("fill") => "none",
At::from("stroke") => "currentColor",
At::from("viewBox") => "0 0 24 24",
)"#;
let result = parse_svg_macro(macro_invocation);
println!("{:?}", result);
} |
#!/bin/bash
# Removing locally and from remote the branches alredy merged in master.
# Be carefull with reverted branches. Git branches --merged does not know about revert.
git checkout master
# Update our list of remotes
git fetch
git remote prune origin
# Remove local fully merged branches
git branch --merged master | grep -v 'master$' | xargs git branch -d
# Show remote fully merged branches in master
echo "The following remote branches are fully merged in master and will be removed:"
git branch -r --merged master | sed 's/ *origin\///' | grep -v 'master$'
read -p "Are you sure there are no reverted branches ? Continue (y/n)? "
if [ "$REPLY" == "y" ]
then
# Remove remote fully merged branches
git branch -r --merged master | sed 's/ *origin\///' \
| grep -v 'master$' | xargs -I% git push origin :%
echo "Done!"
echo "Old branches are removed"
fi
|
#!/bin/bash
mkdir -p build
pushd build
cmake \
-DPKG_CONFIG_DISABLE_NODE=OFF \
-DPKG_CONFIG_DISABLE_NODE8=OFF \
-DPXSCENE_COMPILE_WARNINGS_AS_ERRORS=OFF \
-DPXCORE_COMPILE_WARNINGS_AS_ERRORS=OFF \
-DSUPPORT_NODE=ON \
-DSUPPORT_DUKTAPE=ON \
-DPXCORE_WAYLAND_DISPLAY_READ_EVENTS=OFF \
-DDISABLE_DEBUG_MODE=OFF \
-DBUILD_PX_TESTS=ON \
-DBUILD_PXSCENE_RASTERIZER_PATH=ON \
-DBUILD_RTREMOTE_LIBS=ON \
-DBUILD_WITH_STATIC_NODE=OFF \
-DPREFER_SYSTEM_LIBRARIES=ON \
-DBUILD_PXSCENE_STATIC_LIB=ON \
-DPXSCENE_TEST_HTTP_CACHE=OFF \
-DBUILD_DEBUG_METRICS=ON \
-DCMAKE_CXX_FLAGS="-fno-delete-null-pointer-checks -Wno-unused-parameter -Wno-sign-compare -Wno-deprecated-declarations -Wformat=2 -Wno-format-y2k -Wall -Werror -Wextra -Woverloaded-virtual -Wno-strict-aliasing -Wno-unused-function -DMESA_EGL_NO_X11_HEADERS -O0 -g0 -DPXSCENE_DISABLE_WST_DECODER" ..
# -DCMAKE_CXX_FLAGS="-fno-delete-null-pointer-checks -Wno-unused-parameter -Wno-sign-compare -Wno-deprecated-declarations -Wformat=2 -Wno-format-y2k -Wall -Werror -Wextra -Wno-cast-function-type -Wno-class-memaccess -Woverloaded-virtual -Wno-strict-aliasing -Wno-unused-function -DMESA_EGL_NO_X11_HEADERS -O0 -g0 -DPXSCENE_DISABLE_WST_DECODER" ..
# -DCMAKE_CXX_FLAGS="-fno-delete-null-pointer-checks -Wall -Werror -Wextra -Werror=unused-but-set-variable -Wno-unused-parameter -Wno-deprecated-declarations -Wno-sign-compare -Wno-unused-parameter -Wno-deprecated-declarations -DMESA_EGL_NO_X11_HEADERS -O0 -g3 -DPXSCENE_DISABLE_WST_DECODER -std=gnu++14" ..
time make -j$(nproc) VERBOSE=1 && echo "ok"
popd
|
export type StateUpdater<S> = (state: S) => S;
export type StateSynchronizer<S> = (state: S, previousState: Readonly<S>) => S;
export interface ComposableStateSynchronizer<S, K extends keyof any = keyof S> {
/**
* The name of a piece of state that the synchronizer updates
*/
stateKey: K;
/**
* Names of pieces of state that the synchronizer depends on
*/
dependenciesKeys: K[];
synchronizer: StateSynchronizer<S>;
}
|
<filename>twilio-studio/functions/scrape-c4k-for-supporting-documents-processing-status.js
const SITE_URL = "https://www.ctcare4kids.com/provider-information/status/";
exports.handler = async function(context, event, callback) {
const webscraperPath = Runtime.getAssets()['/web-scraper.js'].path;
const WebScraper = require(webscraperPath);
const webscraper = new WebScraper(SITE_URL);
await webscraper.init();
const redeterminationsProcessingStatusDates = webscraper.getSupportingDocumentsProcessingStatusDates();
callback(null, redeterminationsProcessingStatusDates);
};
|
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package library.helper.ui.addbook;
import com.jfoenix.controls.JFXButton;
import com.jfoenix.controls.JFXTextField;
import java.net.URL;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ResourceBundle;
import java.util.logging.Level;
import java.util.logging.Logger;
import javafx.event.ActionEvent;
import javafx.fxml.FXML;
import javafx.fxml.Initializable;
import javafx.scene.control.Alert;
import javafx.scene.control.Label;
import javafx.scene.layout.AnchorPane;
import javafx.scene.text.Text;
import javafx.stage.Stage;
import library.helper.database.DatabaseHandler;
import library.helper.fxdialogs.FxAlerts;
import library.helper.ui.booklist.Book;
/**
*
* @author kumaq
*/
public class AddBookController implements Initializable {
private Label label;
@FXML
private JFXTextField title;
@FXML
private JFXTextField id;
@FXML
private JFXTextField author;
@FXML
private JFXButton saveButton;
@FXML
private JFXButton cancelButton;
@FXML
private JFXTextField publisher;
@FXML
private Text titleError;
@FXML
private Text idError;
@FXML
private Text authorError;
@FXML
private Text publisherError;
DatabaseHandler handler;
@FXML
private AnchorPane rootPane;
private Boolean isEditing = Boolean.FALSE;
private void handleButtonAction(ActionEvent event) {
System.out.println("You clicked me!");
label.setText("Hello World!");
}
@Override
public void initialize(URL url, ResourceBundle rb) {
handler = DatabaseHandler.getHandlerObject();
}
@FXML
// add a book in the database
private void addBook(ActionEvent event) {
// 1. get book's information and check if this book exists in the database or not
String bookTitle = title.getText();
String bookID = id.getText();
String bookAuthor = author.getText();
String bookPublisher = publisher.getText();
if (isEditing) {
editBook();
return;
}
// 2. if book's info is valid, add the book to database
if (isBookInfoValid(bookTitle, bookID, bookAuthor, bookPublisher) && !isBookAlreadyAdded(bookID)) {
String INSERT = "INSERT INTO " + handler.getBookTableName() + " VALUES ("
+ "'" + bookID + "',"
+ "'" + bookTitle + "',"
+ "'" + bookAuthor + "',"
+ "'" + bookPublisher + "',"
+ "'" + true + "'"
+ ")";
if (handler.execAction(INSERT)) {
FxAlerts.showError("Confirmation", "Book is successfully added.");
// close add-book gui after book is successfully added
Stage stage = (Stage) rootPane.getScene().getWindow();
stage.close();
} else {
FxAlerts.showError("Failed", "An error occurred.\nThis book cannot be added.");
}
}
}
@FXML
// close add-book gui when user clisks "cancel" button
private void cancel(ActionEvent event) {
Stage stage = (Stage) rootPane.getScene().getWindow();
stage.close();
}
// check if a book's information is valid
// if it is not, display appropriate message
public boolean isBookInfoValid(String title, String id, String author, String publisher) {
String[] info = {title, id, author, publisher};
Text[] texts = {titleError, idError, authorError, publisherError};
boolean isValid = true;
for (int i = 0; i < info.length; i++) {
if (info[i].length() == 0) {
texts[i].setText("You cannot leave this field blank");
isValid = false;
} else {
if (i == 1 && info[i].length() != MAX_ID_LENGTH) { // element at position 1 is Book ID
texts[i].setText("Book ID must consist of " + MAX_ID_LENGTH + " characters.");
isValid = false;
} else if (i != 1 && info[i].length() > MAX_LENGTH) {
texts[i].setText("Text entered must not exceed a maximum length of " + MAX_LENGTH + ".");
isValid = false;
}
}
}
return isValid;
}
public boolean isBookAlreadyAdded(String id) {
try {
String SEARCH = "SELECT * FROM " + handler.getBookTableName()
+ " WHERE id = '" + id + "'";
ResultSet result = handler.execQuery(SEARCH);
if (result.next()) {
FxAlerts.showError("Failed","This book already exists.\nPlease add a diffrent Book ID.");
return true;
}
} catch (SQLException ex) {
Logger.getLogger(AddBookController.class.getName()).log(Level.SEVERE, null, ex);
}
return false;
}
public void getBookDataInEdit(Book book) {
title.setText(book.getTitle());
id.setText(book.getId());
author.setText(book.getAuthor());
publisher.setText(book.getPublisher());
id.setEditable(false);
isEditing = Boolean.TRUE;
}
private void editBook() {
Book aBook = new Book(title.getText(), id.getText(), author.getText(), publisher.getText(), true);
if (handler.editBook(aBook)) {
FxAlerts.showInformation("Completed", "Book '" + aBook.getTitle() + "' (ID: " +
aBook.getId() + ") is edited.");
Stage stage = (Stage) rootPane.getScene().getWindow();
stage.close();
} else {
FxAlerts.showError("Error", "Cannot edit the book information, please revise the information");
}
}
private final int NUM_OF_FIELDS = 4;
private final int MAX_ID_LENGTH = 4;
private final int MAX_LENGTH = 100;
}
|
<reponame>mk12/mycraft
/*
* Copyright (c) 2002-2008 LWJGL Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* * Neither the name of 'LWJGL' nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <mach-o/dyld.h>
#include <stdlib.h>
#include <string.h>
#include <CoreFoundation/CoreFoundation.h>
#include "extcl.h"
#include "common_tools.h"
/**
* OpenCL library management
*/
static const struct mach_header* handleOCL = NULL;
static CFBundleRef opencl_bundle = NULL;
void *extcl_NativeGetFunctionPointer(const char *function) {
void *address = NULL;
if (handleOCL != NULL) {
char *mac_symbol_name = (char *)malloc((strlen(function) + 2)*sizeof(char));
if (mac_symbol_name == NULL)
return NULL;
mac_symbol_name[0] = '_';
strcpy(&(mac_symbol_name[1]), function);
NSSymbol symbol = NSLookupSymbolInImage(handleOCL, mac_symbol_name, NSLOOKUPSYMBOLINIMAGE_OPTION_RETURN_ON_ERROR);
free(mac_symbol_name);
if (symbol != NULL) {
address = NSAddressOfSymbol(symbol);
}
} else if (opencl_bundle != NULL) {
CFStringRef cf_function = CFStringCreateWithCString(NULL, function, kCFStringEncodingUTF8);
address = CFBundleGetFunctionPointerForName(opencl_bundle, cf_function);
CFRelease(cf_function);
}
return address;
}
static CFBundleRef tryLoadFramework(JNIEnv *env) {
CFStringRef framework_path = CFSTR("/System/Library/Frameworks/OpenCL.framework");
if (framework_path == NULL) {
printfDebugJava(env, "Failed to allocate string");
return NULL;
}
CFURLRef url = CFURLCreateWithFileSystemPath(NULL, framework_path, kCFURLPOSIXPathStyle, TRUE);
if (url == NULL) {
printfDebugJava(env, "Failed to allocate URL");
return NULL;
}
CFBundleRef opencl_bundle = CFBundleCreate(NULL, url);
CFRelease(url);
return opencl_bundle;
}
void extcl_LoadLibrary(JNIEnv *env, jstring path) {
const char *path_str = (*env)->GetStringUTFChars(env, path, NULL);
printfDebugJava(env, "Testing '%s'", path_str);
handleOCL = NSAddImage(path_str, NSADDIMAGE_OPTION_RETURN_ON_ERROR);
if (handleOCL != NULL) {
printfDebugJava(env, "Found OpenCL at '%s'", path_str);
} else {
throwException(env, "Could not load OpenCL library");
}
(*env)->ReleaseStringUTFChars(env, path, path_str);
}
/**
* Unloads the OpenCL Library
*/
void extcl_UnloadLibrary() {
if (opencl_bundle != NULL) {
CFRelease(opencl_bundle);
opencl_bundle = NULL;
}
}
JNIEXPORT void JNICALL Java_org_lwjgl_opencl_CL_nCreateDefault(JNIEnv *env, jclass clazz) {
opencl_bundle = tryLoadFramework(env);
if (opencl_bundle != NULL)
printfDebugJava(env, "Found OpenCL Bundle");
else
throwException(env, "Could not load OpenCL framework");
}
|
import os
def process_configuration(module: str, request_config: dict) -> str:
if module == "upload":
detect_utils.check_config(request_config, required_arguments=['file', 'param1', 'param2'])
if not os.path.isabs(request_config['file']):
request_config["file"] = os.path.join(file_utils.get_project_base_directory(), request_config["file"])
conf_file_path = new_runtime_conf(job_dir=_job_dir, method=data_func, module=module)
return conf_file_path
else:
raise Exception('can not support this operating: {}'.format(module)) |
<filename>rover/core/servers/imu/miscImu/misc/angularVelocity.py
import json
import requests
from time import sleep
###### global variables for counter, slp(sleepTime), angle1, and angle2 ######
global i
i = 0
global slp
slp = 0.1
global ang1
ang1 = 0
global ang2
ang2 = 0
###### This function calulates the angular velocity in deg/sec ######
def angularVelocity(ang1, ang2):
if ang1 > ang2:
velocity = (ang1 - ang2) / slp
else:
velocity = (ang2 - ang1) / slp
return velocity
###### This functions gets data every slp seconds from deepstream ######
while True:
try:
payload = {"body" : [{"topic" : "record", "action" : "read", "recordName" : "rover/imu"}]}
request = requests.post('http://127.0.0.1:4080', json=payload)
if type(request.content) is bytes:
response = json.loads(request.content.decode('utf-8'))
elif type(request.content) is str:
response = json.loads(request.content)
if response["result"] is "SUCCESS":
ang1 = response["body"][0]["data"]["heading"]
elif response["result"] is "FAILURE":
print("NO RECORD FOUND")
if i > 0 and ang1 != ang2:
velocity = angularVelocity(ang1, ang2)
else:
velocity = 0
print("Heading:", ang1, "Angular velocity:", velocity)
sleep(slp)
ang2 = ang1
i += 1
except:
print("Can't load Deepstream")
|
#!/bin/bash
######################################
# Publish the seafile base image to docker
# registry. This script should only be called during a travis build trigger by a tag.
######################################
# Nerver use "set -x" or it would expose the docker credentials in the travis logs!
set -e
set -o pipefail
## Always use the base image we build manually to reduce the download size of the end user.
# ref: https://docs.travis-ci.com/user/docker/#branch-based-registry-pushes
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
(
cd image
make push-base
)
|
import os
import threading
import time
from unittest import TestCase
from galaxy.util import bunch
from galaxy.jobs.runners import local
from galaxy.jobs import metrics
from galaxy import model
from tools_support import (
UsesApp,
UsesTools
)
class TestLocalJobRunner( TestCase, UsesApp, UsesTools ):
def setUp( self ):
self.setup_app()
self._init_tool()
self.app.job_metrics = metrics.JobMetrics()
self.job_wrapper = MockJobWrapper( self.app, self.test_directory, self.tool )
def tearDown( self ):
self.tear_down_app()
def test_run( self ):
self.job_wrapper.command_line = "echo HelloWorld"
runner = local.LocalJobRunner( self.app, 1 )
runner.queue_job( self.job_wrapper )
assert self.job_wrapper.stdout.strip() == "HelloWorld"
def test_galaxy_lib_on_path( self ):
self.job_wrapper.command_line = '''python -c "import galaxy.util"'''
runner = local.LocalJobRunner( self.app, 1 )
runner.queue_job( self.job_wrapper )
assert self.job_wrapper.exit_code == 0
def test_default_slots( self ):
self.job_wrapper.command_line = '''echo $GALAXY_SLOTS'''
runner = local.LocalJobRunner( self.app, 1 )
runner.queue_job( self.job_wrapper )
assert self.job_wrapper.stdout.strip() == "1"
def test_slots_override( self ):
# Set local_slots in job destination to specify slots for
# local job runner.
self.job_wrapper.job_destination.params[ "local_slots" ] = 3
self.job_wrapper.command_line = '''echo $GALAXY_SLOTS'''
runner = local.LocalJobRunner( self.app, 1 )
runner.queue_job( self.job_wrapper )
assert self.job_wrapper.stdout.strip() == "3"
def test_exit_code( self ):
self.job_wrapper.command_line = '''sh -c "exit 4"'''
runner = local.LocalJobRunner( self.app, 1 )
runner.queue_job( self.job_wrapper )
assert self.job_wrapper.exit_code == 4
def test_metadata_gets_set( self ):
runner = local.LocalJobRunner( self.app, 1 )
runner.queue_job( self.job_wrapper )
assert os.path.exists( self.job_wrapper.mock_metadata_path )
def test_metadata_gets_set_if_embedded( self ):
self.job_wrapper.job_destination.params[ "embed_metadata_in_job" ] = "True"
# Kill off cruft for _handle_metadata_externally and make sure job stil works...
self.job_wrapper.external_output_metadata = None
self.app.datatypes_registry.set_external_metadata_tool = None
runner = local.LocalJobRunner( self.app, 1 )
runner.queue_job( self.job_wrapper )
assert os.path.exists( self.job_wrapper.mock_metadata_path )
def test_stopping_job( self ):
self.job_wrapper.command_line = '''python -c "import time; time.sleep(15)"'''
runner = local.LocalJobRunner( self.app, 1 )
def queue():
runner.queue_job( self.job_wrapper )
t = threading.Thread(target=queue)
t.start()
while True:
if self.job_wrapper.external_id:
break
time.sleep( .01 )
external_id = self.job_wrapper.external_id
mock_job = bunch.Bunch(
get_external_output_metadata=lambda: None,
get_job_runner_external_id=lambda: str(external_id),
get_id=lambda: 1
)
runner.stop_job( mock_job )
t.join(1)
class MockJobWrapper( object ):
def __init__( self, app, test_directory, tool ):
working_directory = os.path.join( test_directory, "workdir" )
os.makedirs( working_directory )
self.app = app
self.tool = tool
self.state = model.Job.states.QUEUED
self.command_line = "echo HelloWorld"
self.environment_variables = []
self.commands_in_new_shell = False
self.prepare_called = False
self.write_version_cmd = None
self.dependency_shell_commands = None
self.working_directory = working_directory
self.requires_setting_metadata = True
self.job_destination = bunch.Bunch( id="default", params={} )
self.galaxy_lib_dir = os.path.abspath( "lib" )
self.job_id = 1
self.external_id = None
self.output_paths = [ '/tmp/output1.dat' ]
self.mock_metadata_path = os.path.abspath( os.path.join( test_directory, "METADATA_SET" ) )
self.metadata_command = "touch %s" % self.mock_metadata_path
# Cruft for setting metadata externally, axe at some point.
self.external_output_metadata = bunch.Bunch(
set_job_runner_external_pid=lambda pid, session: None
)
self.app.datatypes_registry.set_external_metadata_tool = bunch.Bunch(
build_dependency_shell_commands=lambda: []
)
def prepare( self ):
self.prepare_called = True
def set_job_destination( self, job_destination, external_id ):
self.external_id = external_id
def get_command_line( self ):
return self.command_line
def get_id_tag( self ):
return "1"
def get_state( self ):
return self.state
def change_state( self, state ):
self.state = state
def get_output_fnames( self ):
return []
def get_job( self ):
return model.Job()
def setup_external_metadata( self, **kwds ):
return self.metadata_command
def get_env_setup_clause( self ):
return ""
def has_limits( self ):
return False
def finish( self, stdout, stderr, exit_code ):
self.stdout = stdout
self.stderr = stderr
self.exit_code = exit_code
|
coverage run --source=$PYTHONPATH/lfucache --omit=$PYTHONPATH/lfucache/test/* all_tests.py
coverage report --omit=$PYTHONPATH/lfucache/test/* -m
|
<reponame>arwilczek90/Operation-Pyramid
package SelfDrivingCar;
/**
* Created by awilczek on 3/18/14.
*/
public class Seat {
private int recline; //angle position of seat. the higher, the more bent the user is
private int yPosition; // height, up/down position
private int xPosition; // forward/back position. lower is forward
private final int max = 100; //max forward/upward/recline the seat may go
private final int min = 1; //max backward/downward/recline the seat may go
public void Seat() {
//default seat values
recline = 50;
yPosition = 25;
xPosition = 75;
}
public void setX_Y_R(int x, int y, int rec){
recline = rec;
yPosition = y;
xPosition = x;
}
public int getXPosition () {
return xPosition;
}
public int getYPosition () {
return yPosition;
}
public int getRecline () {
return recline;
}
public void forward() {
// reduce x
if(xPosition > min)
xPosition --;
}
public void backward() {
// increase x
if(xPosition < max)
xPosition ++;
}
public void reset() {
// reset attributes to initial values
recline = 50;
yPosition = 25;
xPosition = 75;
}
public void raise() {
// increase y
if(yPosition < max)
yPosition++;
}
public void lower() {
// decrease y
if(yPosition > min)
yPosition--;
}
public void recline() {
// increase recline angle
if(recline < max)
recline++;
}
public void decline() {
// decrease recline angle
if(recline > min)
recline --;
}
}
|
#!/usr/bin/env bash
#export JAVA_HOME=/usr/lib/jvm/java-11-openjdk-11.0.7.10-0.fc30.x86_64/
python preprocess/filter_unique.py \
--input_path data/raw-v1 \
--output_path data/unique-v1.jsonl \
--min_jaccard 0.5
python preprocess/add_references.py \
--input_path data/unique-v1.jsonl \
--output_path data/unique-ref-v1.jsonl
#python preprocess/download_articles.py \
# --input_path data/unique-ref-v1.jsonl \
# --output_path data/unique-ref-articles-v1.jsonl
#
#python preprocess/parse_articles.py \
# --input_path data/unique-ref-articles-v1.jsonl \
# --output_path data/unique-ref-parsed-articles-v1.jsonl
python preprocess/add_articles.py \
--input_path data/unique-ref-v1.jsonl \
--articles_path data/unique-ref-parsed-articles-v1.jsonl \
--output_path data/unique-art-v1.jsonl
#mkdir data/unique-art-v1-index-data
#
#python preprocess/convert_tweets_to_jsonl.py \
# --input_path data/unique-art-v1.jsonl \
# --output_path data/unique-art-v1-index-data/unique-art-v1-index.jsonl
#
#python -m pyserini.index \
# -collection JsonCollection \
# -generator DefaultLuceneDocumentGenerator \
# -threads 8 \
# -input data/unique-art-v1-index-data \
# -index data/unique-v1 \
# -storePositions \
# -storeDocvectors \
# -storeRaw
#
#python preprocess/search_index.py \
# --index_path data/unique-v1 \
# --query_path data/misinfo.json \
# --output_path data/bm25-scores-v1.json \
# --top_k 200
#python preprocess/select_candidates.py \
# --input_path data/unique-art-v1.jsonl \
# --misinfo_path data/misinfo.json \
# --score_path data/bm25-scores-v1.json \
# --output_path data/unique-art-v1-bm25-candidates.jsonl \
# --top_k 200
python preprocess/run_bert_score.py \
--input_path data/unique-art-v1.jsonl \
--misinfo_path data/misinfo.json \
--output_path data/scores.json \
--device cuda:4 \
--batch_size 32
python preprocess/select_candidates.py \
--input_path data/unique-art-v1.jsonl \
--misinfo_path data/misinfo.json \
--score_path data/scores.json \
--output_path data/unique-art-v1-candidates.jsonl \
--top_k 200
python preprocess/run_bert_score.py \
--input_path data/unique-art-v1.jsonl \
--misinfo_path data/misinfo.json \
--misinfo_text_type alternate_text \
--output_path data/alternate-scores.json \
--device cuda:4 \
--batch_size 32
python preprocess/select_candidates.py \
--input_path data/unique-art-v1.jsonl \
--misinfo_path data/misinfo.json \
--misinfo_text_type alternate_text \
--score_path data/alternate-scores.json \
--output_path data/unique-art-v1-candidates-alternate.jsonl \
--top_k 200
python preprocess/merge_candidates.py \
--input_path data/unique-art-v1-candidates.jsonl \
--alternate_path data/unique-art-v1-candidates-alternate.jsonl \
--output_path data/unique-art-v1-candidates-merged.jsonl
python preprocess/merge_candidates.py \
--input_path data/unique-art-v1-candidates-merged.jsonl \
--alternate_path data/unique-art-v1-bm25-candidates.jsonl \
--output_path data/unique-art-v1-candidates-bert-bm25-merged.jsonl
mkdir artifacts/v1
# code for merging annotations is in notebooks on GPU04
# Train size: 3735, Dev size: 415, Test size: 1038
python preprocess/create_split.py \
--input_path data/unique-art-v1-annotated-bert-bm25-merged.jsonl \
--output_path data/v1 \
--test_size 0.2 \
--dev_size 0.1
mkdir artifacts/v2
# Train size: 3637, Dev size: 387, Test size: 1164
python preprocess/create_zero_split.py \
--input_path data/unique-art-v1-annotated-bert-bm25-merged.jsonl \
--output_path data/v2 \
--misinfo_path data/misinfo.json \
--dev_mids 8 \
--test_mids 4,5,7
mkdir artifacts/v3
# Train size: 3070, Dev size: 679, Test size: 1439
python preprocess/create_zero_split.py \
--input_path data/unique-art-v1-annotated-bert-bm25-merged.jsonl \
--output_path data/v3 \
--misinfo_path data/misinfo.json \
--dev_mids 2,10 \
--test_mids 1,4,5,7
|
const MongoClient = require('mongodb').MongoClient;
// Initialize connection
const uri = "mongodb+srv://<username>:<password>@<cluster>/<database>";
const client = new MongoClient(uri, {
useNewUrlParser: true
});
// Connect to the database
client.connect(err => {
// Check for connection error
if (err) throw err;
// Specify database
const db = client.db("database");
// Specify collection
const collection = db.collection("users");
// Find documents with the query
collection.find({ name: "John" }).toArray((err, result) => {
// Check for error
if (err) throw err;
// Print result
console.log(result);
// Close connection
client.close();
});
}); |
f2py --f77flags=-fcheck=all -c MoogStokesPy_Alpha.pyf MoogStokessilent.f -L/usr/lib64/atlas/ -llapack Begin.f Infile.f Getasci.f Nansi.f Getcount.f Synth.f Synspec.f Finish.f Opacit.f OpacHydrogen.f OpacHelium.f Opacmetals.f Opacscat.f Opaccouls.f Nearly.f Eqlib.f Partnew.f Cdcalc.f Jexpint.f Linlimit.f Prinfo.f Sunder.f Gammabark.f Rinteg.f Batom.f Bmolec.f Inlines.f Params.f Putasci.f Taukap.f Voigt.f Discov.f Inmodel.f Partfn.f Damping.f Ucalc.f Invert.f Trudamp.f Lineinfo.f SynStokes.f Spline.f SplineDriver.f WaveGrid.f Curfit.f CalcGeom.f DELOQuad.f CalcOpacities.f ComplexVoigt.f StokesTrace.f StokesDipStick.f
cp MoogStokesPy_Alpha.so ../MoogTools/
f2py --f77flags=-fcheck=all -c MoogStokesPy_Bravo.pyf MoogStokessilent.f -L/usr/lib64/atlas/ -llapack Begin.f Infile.f Getasci.f Nansi.f Getcount.f Synth.f Synspec.f Finish.f Opacit.f OpacHydrogen.f OpacHelium.f Opacmetals.f Opacscat.f Opaccouls.f Nearly.f Eqlib.f Partnew.f Cdcalc.f Jexpint.f Linlimit.f Prinfo.f Sunder.f Gammabark.f Rinteg.f Batom.f Bmolec.f Inlines.f Params.f Putasci.f Taukap.f Voigt.f Discov.f Inmodel.f Partfn.f Damping.f Ucalc.f Invert.f Trudamp.f Lineinfo.f SynStokes.f Spline.f SplineDriver.f WaveGrid.f Curfit.f CalcGeom.f DELOQuad.f CalcOpacities.f ComplexVoigt.f StokesTrace.f StokesDipStick.f
cp MoogStokesPy_Bravo.so ../MoogTools/
f2py --f77flags=-fcheck=all -c MoogStokesPy_Charlie.pyf MoogStokessilent.f -L/usr/lib64/atlas/ -llapack Begin.f Infile.f Getasci.f Nansi.f Getcount.f Synth.f Synspec.f Finish.f Opacit.f OpacHydrogen.f OpacHelium.f Opacmetals.f Opacscat.f Opaccouls.f Nearly.f Eqlib.f Partnew.f Cdcalc.f Jexpint.f Linlimit.f Prinfo.f Sunder.f Gammabark.f Rinteg.f Batom.f Bmolec.f Inlines.f Params.f Putasci.f Taukap.f Voigt.f Discov.f Inmodel.f Partfn.f Damping.f Ucalc.f Invert.f Trudamp.f Lineinfo.f SynStokes.f Spline.f SplineDriver.f WaveGrid.f Curfit.f CalcGeom.f DELOQuad.f CalcOpacities.f ComplexVoigt.f StokesTrace.f StokesDipStick.f
cp MoogStokesPy_Charlie.so ../MoogTools/
f2py --f77flags=-fcheck=all -c MoogStokesPy_Delta.pyf MoogStokessilent.f -L/usr/lib64/atlas/ -llapack Begin.f Infile.f Getasci.f Nansi.f Getcount.f Synth.f Synspec.f Finish.f Opacit.f OpacHydrogen.f OpacHelium.f Opacmetals.f Opacscat.f Opaccouls.f Nearly.f Eqlib.f Partnew.f Cdcalc.f Jexpint.f Linlimit.f Prinfo.f Sunder.f Gammabark.f Rinteg.f Batom.f Bmolec.f Inlines.f Params.f Putasci.f Taukap.f Voigt.f Discov.f Inmodel.f Partfn.f Damping.f Ucalc.f Invert.f Trudamp.f Lineinfo.f SynStokes.f Spline.f SplineDriver.f WaveGrid.f Curfit.f CalcGeom.f DELOQuad.f CalcOpacities.f ComplexVoigt.f StokesTrace.f StokesDipStick.f
cp MoogStokesPy_Delta.so ../MoogTools/
|
# frozen_string_literal: true
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
require "google/cloud/errors"
require "google/cloud/datacatalog/v1/datacatalog_pb"
module Google
module Cloud
module DataCatalog
module V1
module DataCatalog
##
# Client for the DataCatalog service.
#
# Data Catalog API service allows you to discover, understand, and manage
# your data.
#
class Client
include Paths
# @private
attr_reader :data_catalog_stub
##
# Configure the DataCatalog Client class.
#
# See {::Google::Cloud::DataCatalog::V1::DataCatalog::Client::Configuration}
# for a description of the configuration fields.
#
# @example
#
# # Modify the configuration for all DataCatalog clients
# ::Google::Cloud::DataCatalog::V1::DataCatalog::Client.configure do |config|
# config.timeout = 10.0
# end
#
# @yield [config] Configure the Client client.
# @yieldparam config [Client::Configuration]
#
# @return [Client::Configuration]
#
def self.configure
@configure ||= begin
namespace = ["Google", "Cloud", "DataCatalog", "V1"]
parent_config = while namespace.any?
parent_name = namespace.join "::"
parent_const = const_get parent_name
break parent_const.configure if parent_const.respond_to? :configure
namespace.pop
end
default_config = Client::Configuration.new parent_config
default_config.timeout = 60.0
default_config.rpcs.search_catalog.timeout = 60.0
default_config.rpcs.search_catalog.retry_policy = {
initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [14]
}
default_config.rpcs.get_entry_group.timeout = 60.0
default_config.rpcs.get_entry_group.retry_policy = {
initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [14]
}
default_config.rpcs.list_entry_groups.timeout = 60.0
default_config.rpcs.list_entry_groups.retry_policy = {
initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [14]
}
default_config.rpcs.get_entry.timeout = 60.0
default_config.rpcs.get_entry.retry_policy = {
initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [14]
}
default_config.rpcs.lookup_entry.timeout = 60.0
default_config.rpcs.lookup_entry.retry_policy = {
initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [14]
}
default_config.rpcs.list_entries.timeout = 60.0
default_config.rpcs.list_entries.retry_policy = {
initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [14]
}
default_config.rpcs.list_tags.timeout = 60.0
default_config.rpcs.list_tags.retry_policy = {
initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [14]
}
default_config.rpcs.get_iam_policy.timeout = 60.0
default_config.rpcs.get_iam_policy.retry_policy = {
initial_delay: 0.1, max_delay: 60.0, multiplier: 1.3, retry_codes: [14]
}
default_config
end
yield @configure if block_given?
@configure
end
##
# Configure the DataCatalog Client instance.
#
# The configuration is set to the derived mode, meaning that values can be changed,
# but structural changes (adding new fields, etc.) are not allowed. Structural changes
# should be made on {Client.configure}.
#
# See {::Google::Cloud::DataCatalog::V1::DataCatalog::Client::Configuration}
# for a description of the configuration fields.
#
# @yield [config] Configure the Client client.
# @yieldparam config [Client::Configuration]
#
# @return [Client::Configuration]
#
def configure
yield @config if block_given?
@config
end
##
# Create a new DataCatalog client object.
#
# @example
#
# # Create a client using the default configuration
# client = ::Google::Cloud::DataCatalog::V1::DataCatalog::Client.new
#
# # Create a client using a custom configuration
# client = ::Google::Cloud::DataCatalog::V1::DataCatalog::Client.new do |config|
# config.timeout = 10.0
# end
#
# @yield [config] Configure the DataCatalog client.
# @yieldparam config [Client::Configuration]
#
def initialize
# These require statements are intentionally placed here to initialize
# the gRPC module only when it's required.
# See https://github.com/googleapis/toolkit/issues/446
require "gapic/grpc"
require "google/cloud/datacatalog/v1/datacatalog_services_pb"
# Create the configuration object
@config = Configuration.new Client.configure
# Yield the configuration if needed
yield @config if block_given?
# Create credentials
credentials = @config.credentials
# Use self-signed JWT if the endpoint is unchanged from default,
# but only if the default endpoint does not have a region prefix.
enable_self_signed_jwt = @config.endpoint == Client.configure.endpoint &&
!@config.endpoint.split(".").first.include?("-")
credentials ||= Credentials.default scope: @config.scope,
enable_self_signed_jwt: enable_self_signed_jwt
if credentials.is_a?(::String) || credentials.is_a?(::Hash)
credentials = Credentials.new credentials, scope: @config.scope
end
@quota_project_id = @config.quota_project
@quota_project_id ||= credentials.quota_project_id if credentials.respond_to? :quota_project_id
@data_catalog_stub = ::Gapic::ServiceStub.new(
::Google::Cloud::DataCatalog::V1::DataCatalog::Stub,
credentials: credentials,
endpoint: @config.endpoint,
channel_args: @config.channel_args,
interceptors: @config.interceptors
)
end
# Service calls
##
# Searches Data Catalog for multiple resources like entries and tags that
# match a query.
#
# This is a [Custom Method]
# (https://cloud.google.com/apis/design/custom_methods) that doesn't return
# all information on a resource, only its ID and high level fields. To get
# more information, you can subsequently call specific get methods.
#
# Note: Data Catalog search queries don't guarantee full recall. Results
# that match your query might not be returned, even in subsequent
# result pages. Additionally, returned (and not returned) results can vary
# if you repeat search queries.
#
# For more information, see [Data Catalog search syntax]
# (https://cloud.google.com/data-catalog/docs/how-to/search-reference).
#
# @overload search_catalog(request, options = nil)
# Pass arguments to `search_catalog` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::SearchCatalogRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::SearchCatalogRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload search_catalog(scope: nil, query: nil, page_size: nil, page_token: nil, order_by: nil)
# Pass arguments to `search_catalog` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param scope [::Google::Cloud::DataCatalog::V1::SearchCatalogRequest::Scope, ::Hash]
# Required. The scope of this search request.
#
# The `scope` is invalid if `include_org_ids`, `include_project_ids` are
# empty AND `include_gcp_public_datasets` is set to `false`. In this case,
# the request returns an error.
# @param query [::String]
# Optional. The query string with a minimum of 3 characters and specific syntax.
# For more information, see
# [Data Catalog search syntax](https://cloud.google.com/data-catalog/docs/how-to/search-reference).
#
# An empty query string returns all data assets (in the specified scope)
# that you have access to.
#
# A query string can be a simple `xyz` or qualified by predicates:
#
# * `name:x`
# * `column:y`
# * `description:z`
# @param page_size [::Integer]
# Number of results to return in a single search page.
#
# Can't be negative or 0, defaults to 10 in this case.
# The maximum number is 1000. If exceeded, throws an "invalid argument"
# exception.
# @param page_token [::String]
# Optional. Pagination token that, if specified, returns the next page of search
# results. If empty, returns the first page.
#
# This token is returned in the {::Google::Cloud::DataCatalog::V1::SearchCatalogResponse#next_page_token SearchCatalogResponse.next_page_token}
# field of the response to a previous
# {::Google::Cloud::DataCatalog::V1::DataCatalog::Client#search_catalog SearchCatalogRequest}
# call.
# @param order_by [::String]
# Specifies the order of results.
#
# Currently supported case-sensitive values are:
#
# * `relevance` that can only be descending
# * `last_modified_timestamp [asc|desc]` with descending (`desc`) as default
#
# If this parameter is omitted, it defaults to the descending `relevance`.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::PagedEnumerable<::Google::Cloud::DataCatalog::V1::SearchCatalogResult>]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::PagedEnumerable<::Google::Cloud::DataCatalog::V1::SearchCatalogResult>]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def search_catalog request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::SearchCatalogRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.search_catalog.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
options.apply_defaults timeout: @config.rpcs.search_catalog.timeout,
metadata: metadata,
retry_policy: @config.rpcs.search_catalog.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :search_catalog, request, options: options do |response, operation|
response = ::Gapic::PagedEnumerable.new @data_catalog_stub, :search_catalog, request, response, operation, options
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Creates an entry group.
#
# An entry group contains logically related entries together with [Cloud
# Identity and Access Management](/data-catalog/docs/concepts/iam) policies.
# These policies specify users who can create, edit, and view entries
# within entry groups.
#
# Data Catalog automatically creates entry groups with names that start with
# the `@` symbol for the following resources:
#
# * BigQuery entries (`@bigquery`)
# * Pub/Sub topics (`@pubsub`)
# * Dataproc Metastore services (`@dataproc_metastore_{SERVICE_NAME_HASH}`)
#
# You can create your own entry groups for Cloud Storage fileset entries
# and custom entries together with the corresponding IAM policies.
# User-created entry groups can't contain the `@` symbol, it is reserved
# for automatically created groups.
#
# Entry groups, like entries, can be searched.
#
# A maximum of 10,000 entry groups may be created per organization across all
# locations.
#
# You must enable the Data Catalog API in the project identified by
# the `parent` parameter. For more information, see [Data Catalog resource
# project](https://cloud.google.com/data-catalog/docs/concepts/resource-project).
#
# @overload create_entry_group(request, options = nil)
# Pass arguments to `create_entry_group` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::CreateEntryGroupRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::CreateEntryGroupRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload create_entry_group(parent: nil, entry_group_id: nil, entry_group: nil)
# Pass arguments to `create_entry_group` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param parent [::String]
# Required. The names of the project and location that the new entry group belongs to.
#
# Note: The entry group itself and its child resources might not be
# stored in the location specified in its name.
# @param entry_group_id [::String]
# Required. The ID of the entry group to create.
#
# The ID must contain only letters (a-z, A-Z), numbers (0-9),
# underscores (_), and must start with a letter or underscore.
# The maximum size is 64 bytes when encoded in UTF-8.
# @param entry_group [::Google::Cloud::DataCatalog::V1::EntryGroup, ::Hash]
# The entry group to create. Defaults to empty.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Cloud::DataCatalog::V1::EntryGroup]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Cloud::DataCatalog::V1::EntryGroup]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def create_entry_group request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::CreateEntryGroupRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.create_entry_group.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.parent
header_params["parent"] = request.parent
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.create_entry_group.timeout,
metadata: metadata,
retry_policy: @config.rpcs.create_entry_group.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :create_entry_group, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Gets an entry group.
#
# @overload get_entry_group(request, options = nil)
# Pass arguments to `get_entry_group` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::GetEntryGroupRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::GetEntryGroupRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload get_entry_group(name: nil, read_mask: nil)
# Pass arguments to `get_entry_group` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# Required. The name of the entry group to get.
# @param read_mask [::Google::Protobuf::FieldMask, ::Hash]
# The fields to return. If empty or omitted, all fields are returned.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Cloud::DataCatalog::V1::EntryGroup]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Cloud::DataCatalog::V1::EntryGroup]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def get_entry_group request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::GetEntryGroupRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.get_entry_group.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.name
header_params["name"] = request.name
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.get_entry_group.timeout,
metadata: metadata,
retry_policy: @config.rpcs.get_entry_group.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :get_entry_group, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Updates an entry group.
#
# You must enable the Data Catalog API in the project identified by
# the `entry_group.name` parameter. For more information, see [Data Catalog
# resource
# project](https://cloud.google.com/data-catalog/docs/concepts/resource-project).
#
# @overload update_entry_group(request, options = nil)
# Pass arguments to `update_entry_group` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::UpdateEntryGroupRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::UpdateEntryGroupRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload update_entry_group(entry_group: nil, update_mask: nil)
# Pass arguments to `update_entry_group` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param entry_group [::Google::Cloud::DataCatalog::V1::EntryGroup, ::Hash]
# Required. Updates for the entry group. The `name` field must be set.
# @param update_mask [::Google::Protobuf::FieldMask, ::Hash]
# Names of fields whose values to overwrite on an entry group.
#
# If this parameter is absent or empty, all modifiable fields
# are overwritten. If such fields are non-required and omitted in the
# request body, their values are emptied.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Cloud::DataCatalog::V1::EntryGroup]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Cloud::DataCatalog::V1::EntryGroup]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def update_entry_group request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::UpdateEntryGroupRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.update_entry_group.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.entry_group&.name
header_params["entry_group.name"] = request.entry_group.name
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.update_entry_group.timeout,
metadata: metadata,
retry_policy: @config.rpcs.update_entry_group.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :update_entry_group, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Deletes an entry group.
#
# You must enable the Data Catalog API in the project
# identified by the `name` parameter. For more information, see [Data Catalog
# resource
# project](https://cloud.google.com/data-catalog/docs/concepts/resource-project).
#
# @overload delete_entry_group(request, options = nil)
# Pass arguments to `delete_entry_group` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::DeleteEntryGroupRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::DeleteEntryGroupRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload delete_entry_group(name: nil, force: nil)
# Pass arguments to `delete_entry_group` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# Required. The name of the entry group to delete.
# @param force [::Boolean]
# Optional. If true, deletes all entries in the entry group.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Protobuf::Empty]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Protobuf::Empty]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def delete_entry_group request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::DeleteEntryGroupRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.delete_entry_group.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.name
header_params["name"] = request.name
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.delete_entry_group.timeout,
metadata: metadata,
retry_policy: @config.rpcs.delete_entry_group.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :delete_entry_group, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Lists entry groups.
#
# @overload list_entry_groups(request, options = nil)
# Pass arguments to `list_entry_groups` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::ListEntryGroupsRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::ListEntryGroupsRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload list_entry_groups(parent: nil, page_size: nil, page_token: nil)
# Pass arguments to `list_entry_groups` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param parent [::String]
# Required. The name of the location that contains the entry groups to list.
#
# Can be provided as a URL.
# @param page_size [::Integer]
# Optional. The maximum number of items to return.
#
# Default is 10. Maximum limit is 1000.
# Throws an invalid argument if `page_size` is greater than 1000.
# @param page_token [::String]
# Optional. Pagination token that specifies the next page to return.
# If empty, returns the first page.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::PagedEnumerable<::Google::Cloud::DataCatalog::V1::EntryGroup>]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::PagedEnumerable<::Google::Cloud::DataCatalog::V1::EntryGroup>]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def list_entry_groups request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::ListEntryGroupsRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.list_entry_groups.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.parent
header_params["parent"] = request.parent
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.list_entry_groups.timeout,
metadata: metadata,
retry_policy: @config.rpcs.list_entry_groups.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :list_entry_groups, request, options: options do |response, operation|
response = ::Gapic::PagedEnumerable.new @data_catalog_stub, :list_entry_groups, request, response, operation, options
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Creates an entry.
#
# You can create entries only with 'FILESET', 'CLUSTER', 'DATA_STREAM',
# or custom types. Data Catalog automatically creates entries with other
# types during metadata ingestion from integrated systems.
#
# You must enable the Data Catalog API in the project identified by
# the `parent` parameter. For more information, see [Data Catalog resource
# project](https://cloud.google.com/data-catalog/docs/concepts/resource-project).
#
# An entry group can have a maximum of 100,000 entries.
#
# @overload create_entry(request, options = nil)
# Pass arguments to `create_entry` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::CreateEntryRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::CreateEntryRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload create_entry(parent: nil, entry_id: nil, entry: nil)
# Pass arguments to `create_entry` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param parent [::String]
# Required. The name of the entry group this entry belongs to.
#
# Note: The entry itself and its child resources might not be stored in
# the location specified in its name.
# @param entry_id [::String]
# Required. The ID of the entry to create.
#
# The ID must contain only letters (a-z, A-Z), numbers (0-9),
# and underscores (_).
# The maximum size is 64 bytes when encoded in UTF-8.
# @param entry [::Google::Cloud::DataCatalog::V1::Entry, ::Hash]
# Required. The entry to create.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Cloud::DataCatalog::V1::Entry]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Cloud::DataCatalog::V1::Entry]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def create_entry request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::CreateEntryRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.create_entry.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.parent
header_params["parent"] = request.parent
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.create_entry.timeout,
metadata: metadata,
retry_policy: @config.rpcs.create_entry.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :create_entry, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Updates an existing entry.
#
# You must enable the Data Catalog API in the project identified by
# the `entry.name` parameter. For more information, see [Data Catalog
# resource
# project](https://cloud.google.com/data-catalog/docs/concepts/resource-project).
#
# @overload update_entry(request, options = nil)
# Pass arguments to `update_entry` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::UpdateEntryRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::UpdateEntryRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload update_entry(entry: nil, update_mask: nil)
# Pass arguments to `update_entry` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param entry [::Google::Cloud::DataCatalog::V1::Entry, ::Hash]
# Required. Updates for the entry. The `name` field must be set.
# @param update_mask [::Google::Protobuf::FieldMask, ::Hash]
# Names of fields whose values to overwrite on an entry.
#
# If this parameter is absent or empty, all modifiable fields
# are overwritten. If such fields are non-required and omitted in the
# request body, their values are emptied.
#
# You can modify only the fields listed below.
#
# For entries with type `DATA_STREAM`:
#
# * `schema`
#
# For entries with type `FILESET`:
#
# * `schema`
# * `display_name`
# * `description`
# * `gcs_fileset_spec`
# * `gcs_fileset_spec.file_patterns`
#
# For entries with `user_specified_type`:
#
# * `schema`
# * `display_name`
# * `description`
# * `user_specified_type`
# * `user_specified_system`
# * `linked_resource`
# * `source_system_timestamps`
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Cloud::DataCatalog::V1::Entry]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Cloud::DataCatalog::V1::Entry]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def update_entry request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::UpdateEntryRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.update_entry.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.entry&.name
header_params["entry.name"] = request.entry.name
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.update_entry.timeout,
metadata: metadata,
retry_policy: @config.rpcs.update_entry.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :update_entry, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Deletes an existing entry.
#
# You can delete only the entries created by the
# {::Google::Cloud::DataCatalog::V1::DataCatalog::Client#create_entry CreateEntry}
# method.
#
# You must enable the Data Catalog API in the project identified by
# the `name` parameter. For more information, see [Data Catalog
# resource
# project](https://cloud.google.com/data-catalog/docs/concepts/resource-project).
#
# @overload delete_entry(request, options = nil)
# Pass arguments to `delete_entry` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::DeleteEntryRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::DeleteEntryRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload delete_entry(name: nil)
# Pass arguments to `delete_entry` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# Required. The name of the entry to delete.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Protobuf::Empty]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Protobuf::Empty]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def delete_entry request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::DeleteEntryRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.delete_entry.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.name
header_params["name"] = request.name
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.delete_entry.timeout,
metadata: metadata,
retry_policy: @config.rpcs.delete_entry.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :delete_entry, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Gets an entry.
#
# @overload get_entry(request, options = nil)
# Pass arguments to `get_entry` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::GetEntryRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::GetEntryRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload get_entry(name: nil)
# Pass arguments to `get_entry` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# Required. The name of the entry to get.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Cloud::DataCatalog::V1::Entry]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Cloud::DataCatalog::V1::Entry]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def get_entry request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::GetEntryRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.get_entry.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.name
header_params["name"] = request.name
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.get_entry.timeout,
metadata: metadata,
retry_policy: @config.rpcs.get_entry.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :get_entry, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Gets an entry by its target resource name.
#
# The resource name comes from the source Google Cloud Platform service.
#
# @overload lookup_entry(request, options = nil)
# Pass arguments to `lookup_entry` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::LookupEntryRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::LookupEntryRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload lookup_entry(linked_resource: nil, sql_resource: nil, fully_qualified_name: nil)
# Pass arguments to `lookup_entry` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param linked_resource [::String]
# The full name of the Google Cloud Platform resource the Data Catalog
# entry represents. For more information, see [Full Resource Name]
# (https://cloud.google.com/apis/design/resource_names#full_resource_name).
#
# Full names are case-sensitive. For example:
#
# * `//bigquery.googleapis.com/projects/{PROJECT_ID}/datasets/{DATASET_ID}/tables/{TABLE_ID}`
# * `//pubsub.googleapis.com/projects/{PROJECT_ID}/topics/{TOPIC_ID}`
# @param sql_resource [::String]
# The SQL name of the entry. SQL names are case-sensitive.
#
# Examples:
#
# * `pubsub.topic.{PROJECT_ID}.{TOPIC_ID}`
# * `pubsub.topic.{PROJECT_ID}.`\``{TOPIC.ID.SEPARATED.WITH.DOTS}`\`
# * `bigquery.table.{PROJECT_ID}.{DATASET_ID}.{TABLE_ID}`
# * `bigquery.dataset.{PROJECT_ID}.{DATASET_ID}`
# * `datacatalog.entry.{PROJECT_ID}.{LOCATION_ID}.{ENTRY_GROUP_ID}.{ENTRY_ID}`
#
# Identifiers (`*_ID`) should comply with the
# [Lexical structure in Standard SQL]
# (https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical).
# @param fully_qualified_name [::String]
# Fully qualified name (FQN) of the resource.
#
# FQNs take two forms:
#
# * For non-regionalized resources:
#
# `{SYSTEM}:{PROJECT}.{PATH_TO_RESOURCE_SEPARATED_WITH_DOTS}`
#
# * For regionalized resources:
#
# `{SYSTEM}:{PROJECT}.{LOCATION_ID}.{PATH_TO_RESOURCE_SEPARATED_WITH_DOTS}`
#
# Example for a DPMS table:
#
# `dataproc_metastore:{PROJECT_ID}.{LOCATION_ID}.{INSTANCE_ID}.{DATABASE_ID}.{TABLE_ID}`
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Cloud::DataCatalog::V1::Entry]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Cloud::DataCatalog::V1::Entry]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def lookup_entry request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::LookupEntryRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.lookup_entry.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
options.apply_defaults timeout: @config.rpcs.lookup_entry.timeout,
metadata: metadata,
retry_policy: @config.rpcs.lookup_entry.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :lookup_entry, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Lists entries.
#
# @overload list_entries(request, options = nil)
# Pass arguments to `list_entries` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::ListEntriesRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::ListEntriesRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload list_entries(parent: nil, page_size: nil, page_token: nil, read_mask: nil)
# Pass arguments to `list_entries` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param parent [::String]
# Required. The name of the entry group that contains the entries to list.
#
# Can be provided in URL format.
# @param page_size [::Integer]
# The maximum number of items to return. Default is 10. Maximum limit is
# 1000. Throws an invalid argument if `page_size` is more than 1000.
# @param page_token [::String]
# Pagination token that specifies the next page to return. If empty, the
# first page is returned.
# @param read_mask [::Google::Protobuf::FieldMask, ::Hash]
# The fields to return for each entry. If empty or omitted, all
# fields are returned.
#
# For example, to return a list of entries with only the `name` field,
# set `read_mask` to only one path with the `name` value.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::PagedEnumerable<::Google::Cloud::DataCatalog::V1::Entry>]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::PagedEnumerable<::Google::Cloud::DataCatalog::V1::Entry>]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def list_entries request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::ListEntriesRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.list_entries.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.parent
header_params["parent"] = request.parent
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.list_entries.timeout,
metadata: metadata,
retry_policy: @config.rpcs.list_entries.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :list_entries, request, options: options do |response, operation|
response = ::Gapic::PagedEnumerable.new @data_catalog_stub, :list_entries, request, response, operation, options
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Creates a tag template.
#
# You must enable the Data Catalog API in the project identified by the
# `parent` parameter.
# For more information, see [Data Catalog resource project]
# (https://cloud.google.com/data-catalog/docs/concepts/resource-project).
#
# @overload create_tag_template(request, options = nil)
# Pass arguments to `create_tag_template` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::CreateTagTemplateRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::CreateTagTemplateRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload create_tag_template(parent: nil, tag_template_id: nil, tag_template: nil)
# Pass arguments to `create_tag_template` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param parent [::String]
# Required. The name of the project and the template location
# [region](https://cloud.google.com/data-catalog/docs/concepts/regions).
# @param tag_template_id [::String]
# Required. The ID of the tag template to create.
#
# The ID must contain only lowercase letters (a-z), numbers (0-9),
# or underscores (_), and must start with a letter or underscore.
# The maximum size is 64 bytes when encoded in UTF-8.
# @param tag_template [::Google::Cloud::DataCatalog::V1::TagTemplate, ::Hash]
# Required. The tag template to create.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Cloud::DataCatalog::V1::TagTemplate]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Cloud::DataCatalog::V1::TagTemplate]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def create_tag_template request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::CreateTagTemplateRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.create_tag_template.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.parent
header_params["parent"] = request.parent
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.create_tag_template.timeout,
metadata: metadata,
retry_policy: @config.rpcs.create_tag_template.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :create_tag_template, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Gets a tag template.
#
# @overload get_tag_template(request, options = nil)
# Pass arguments to `get_tag_template` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::GetTagTemplateRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::GetTagTemplateRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload get_tag_template(name: nil)
# Pass arguments to `get_tag_template` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# Required. The name of the tag template to get.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Cloud::DataCatalog::V1::TagTemplate]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Cloud::DataCatalog::V1::TagTemplate]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def get_tag_template request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::GetTagTemplateRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.get_tag_template.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.name
header_params["name"] = request.name
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.get_tag_template.timeout,
metadata: metadata,
retry_policy: @config.rpcs.get_tag_template.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :get_tag_template, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Updates a tag template.
#
# You can't update template fields with this method. These fields are
# separate resources with their own create, update, and delete methods.
#
# You must enable the Data Catalog API in the project identified by
# the `tag_template.name` parameter. For more information, see [Data Catalog
# resource
# project](https://cloud.google.com/data-catalog/docs/concepts/resource-project).
#
# @overload update_tag_template(request, options = nil)
# Pass arguments to `update_tag_template` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::UpdateTagTemplateRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::UpdateTagTemplateRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload update_tag_template(tag_template: nil, update_mask: nil)
# Pass arguments to `update_tag_template` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param tag_template [::Google::Cloud::DataCatalog::V1::TagTemplate, ::Hash]
# Required. The template to update. The `name` field must be set.
# @param update_mask [::Google::Protobuf::FieldMask, ::Hash]
# Names of fields whose values to overwrite on a tag template. Currently,
# only `display_name` can be overwritten.
#
# If this parameter is absent or empty, all modifiable fields
# are overwritten. If such fields are non-required and omitted in the
# request body, their values are emptied.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Cloud::DataCatalog::V1::TagTemplate]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Cloud::DataCatalog::V1::TagTemplate]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def update_tag_template request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::UpdateTagTemplateRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.update_tag_template.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.tag_template&.name
header_params["tag_template.name"] = request.tag_template.name
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.update_tag_template.timeout,
metadata: metadata,
retry_policy: @config.rpcs.update_tag_template.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :update_tag_template, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Deletes a tag template and all tags that use it.
#
# You must enable the Data Catalog API in the project identified by
# the `name` parameter. For more information, see [Data Catalog resource
# project](https://cloud.google.com/data-catalog/docs/concepts/resource-project).
#
# @overload delete_tag_template(request, options = nil)
# Pass arguments to `delete_tag_template` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::DeleteTagTemplateRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::DeleteTagTemplateRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload delete_tag_template(name: nil, force: nil)
# Pass arguments to `delete_tag_template` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# Required. The name of the tag template to delete.
# @param force [::Boolean]
# Required. If true, deletes all tags that use this template.
#
# Currently, `true` is the only supported value.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Protobuf::Empty]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Protobuf::Empty]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def delete_tag_template request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::DeleteTagTemplateRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.delete_tag_template.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.name
header_params["name"] = request.name
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.delete_tag_template.timeout,
metadata: metadata,
retry_policy: @config.rpcs.delete_tag_template.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :delete_tag_template, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Creates a field in a tag template.
#
# You must enable the Data Catalog API in the project identified by
# the `parent` parameter. For more information, see [Data Catalog resource
# project](https://cloud.google.com/data-catalog/docs/concepts/resource-project).
#
# @overload create_tag_template_field(request, options = nil)
# Pass arguments to `create_tag_template_field` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::CreateTagTemplateFieldRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::CreateTagTemplateFieldRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload create_tag_template_field(parent: nil, tag_template_field_id: nil, tag_template_field: nil)
# Pass arguments to `create_tag_template_field` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param parent [::String]
# Required. The name of the project and the template location
# [region](https://cloud.google.com/data-catalog/docs/concepts/regions).
# @param tag_template_field_id [::String]
# Required. The ID of the tag template field to create.
#
# Note: Adding a required field to an existing template is *not* allowed.
#
# Field IDs can contain letters (both uppercase and lowercase), numbers
# (0-9), underscores (_) and dashes (-). Field IDs must be at least 1
# character long and at most 128 characters long. Field IDs must also be
# unique within their template.
# @param tag_template_field [::Google::Cloud::DataCatalog::V1::TagTemplateField, ::Hash]
# Required. The tag template field to create.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Cloud::DataCatalog::V1::TagTemplateField]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Cloud::DataCatalog::V1::TagTemplateField]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def create_tag_template_field request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::CreateTagTemplateFieldRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.create_tag_template_field.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.parent
header_params["parent"] = request.parent
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.create_tag_template_field.timeout,
metadata: metadata,
retry_policy: @config.rpcs.create_tag_template_field.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :create_tag_template_field, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Updates a field in a tag template.
#
# You can't update the field type with this method.
#
# You must enable the Data Catalog API in the project
# identified by the `name` parameter. For more information, see [Data Catalog
# resource
# project](https://cloud.google.com/data-catalog/docs/concepts/resource-project).
#
# @overload update_tag_template_field(request, options = nil)
# Pass arguments to `update_tag_template_field` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::UpdateTagTemplateFieldRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::UpdateTagTemplateFieldRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload update_tag_template_field(name: nil, tag_template_field: nil, update_mask: nil)
# Pass arguments to `update_tag_template_field` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# Required. The name of the tag template field.
# @param tag_template_field [::Google::Cloud::DataCatalog::V1::TagTemplateField, ::Hash]
# Required. The template to update.
# @param update_mask [::Google::Protobuf::FieldMask, ::Hash]
# Optional. Names of fields whose values to overwrite on an individual field of a tag
# template. The following fields are modifiable:
#
# * `display_name`
# * `type.enum_type`
# * `is_required`
#
# If this parameter is absent or empty, all modifiable fields
# are overwritten. If such fields are non-required and omitted in the request
# body, their values are emptied with one exception: when updating an enum
# type, the provided values are merged with the existing values. Therefore,
# enum values can only be added, existing enum values cannot be deleted or
# renamed.
#
# Additionally, updating a template field from optional to required is
# *not* allowed.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Cloud::DataCatalog::V1::TagTemplateField]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Cloud::DataCatalog::V1::TagTemplateField]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def update_tag_template_field request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::UpdateTagTemplateFieldRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.update_tag_template_field.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.name
header_params["name"] = request.name
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.update_tag_template_field.timeout,
metadata: metadata,
retry_policy: @config.rpcs.update_tag_template_field.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :update_tag_template_field, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Renames a field in a tag template.
#
# You must enable the Data Catalog API in the project identified by the
# `name` parameter. For more information, see [Data Catalog resource project]
# (https://cloud.google.com/data-catalog/docs/concepts/resource-project).
#
# @overload rename_tag_template_field(request, options = nil)
# Pass arguments to `rename_tag_template_field` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::RenameTagTemplateFieldRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::RenameTagTemplateFieldRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload rename_tag_template_field(name: nil, new_tag_template_field_id: nil)
# Pass arguments to `rename_tag_template_field` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# Required. The name of the tag template.
# @param new_tag_template_field_id [::String]
# Required. The new ID of this tag template field. For example, `my_new_field`.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Cloud::DataCatalog::V1::TagTemplateField]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Cloud::DataCatalog::V1::TagTemplateField]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def rename_tag_template_field request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::RenameTagTemplateFieldRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.rename_tag_template_field.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.name
header_params["name"] = request.name
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.rename_tag_template_field.timeout,
metadata: metadata,
retry_policy: @config.rpcs.rename_tag_template_field.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :rename_tag_template_field, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Renames an enum value in a tag template.
#
# Within a single enum field, enum values must be unique.
#
# @overload rename_tag_template_field_enum_value(request, options = nil)
# Pass arguments to `rename_tag_template_field_enum_value` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::RenameTagTemplateFieldEnumValueRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::RenameTagTemplateFieldEnumValueRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload rename_tag_template_field_enum_value(name: nil, new_enum_value_display_name: nil)
# Pass arguments to `rename_tag_template_field_enum_value` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# Required. The name of the enum field value.
# @param new_enum_value_display_name [::String]
# Required. The new display name of the enum value. For example, `my_new_enum_value`.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Cloud::DataCatalog::V1::TagTemplateField]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Cloud::DataCatalog::V1::TagTemplateField]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def rename_tag_template_field_enum_value request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::RenameTagTemplateFieldEnumValueRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.rename_tag_template_field_enum_value.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.name
header_params["name"] = request.name
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.rename_tag_template_field_enum_value.timeout,
metadata: metadata,
retry_policy: @config.rpcs.rename_tag_template_field_enum_value.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :rename_tag_template_field_enum_value, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Deletes a field in a tag template and all uses of this field from the tags
# based on this template.
#
# You must enable the Data Catalog API in the project identified by
# the `name` parameter. For more information, see [Data Catalog resource
# project](https://cloud.google.com/data-catalog/docs/concepts/resource-project).
#
# @overload delete_tag_template_field(request, options = nil)
# Pass arguments to `delete_tag_template_field` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::DeleteTagTemplateFieldRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::DeleteTagTemplateFieldRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload delete_tag_template_field(name: nil, force: nil)
# Pass arguments to `delete_tag_template_field` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# Required. The name of the tag template field to delete.
# @param force [::Boolean]
# Required. If true, deletes this field from any tags that use it.
#
# Currently, `true` is the only supported value.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Protobuf::Empty]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Protobuf::Empty]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def delete_tag_template_field request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::DeleteTagTemplateFieldRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.delete_tag_template_field.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.name
header_params["name"] = request.name
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.delete_tag_template_field.timeout,
metadata: metadata,
retry_policy: @config.rpcs.delete_tag_template_field.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :delete_tag_template_field, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Creates a tag and assigns it to:
#
# * An {::Google::Cloud::DataCatalog::V1::Entry Entry} if the method name is
# ``projects.locations.entryGroups.entries.tags.create``.
# * Or {::Google::Cloud::DataCatalog::V1::EntryGroup EntryGroup}if the method
# name is ``projects.locations.entryGroups.tags.create``.
#
# Note: The project identified by the `parent` parameter for the [tag]
# (https://cloud.google.com/data-catalog/docs/reference/rest/v1/projects.locations.entryGroups.entries.tags/create#path-parameters)
# and the [tag template]
# (https://cloud.google.com/data-catalog/docs/reference/rest/v1/projects.locations.tagTemplates/create#path-parameters)
# used to create the tag must be in the same organization.
#
# @overload create_tag(request, options = nil)
# Pass arguments to `create_tag` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::CreateTagRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::CreateTagRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload create_tag(parent: nil, tag: nil)
# Pass arguments to `create_tag` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param parent [::String]
# Required. The name of the resource to attach this tag to.
#
# Tags can be attached to entries or entry groups. An entry can have up to
# 1000 attached tags.
#
# Note: The tag and its child resources might not be stored in
# the location specified in its name.
# @param tag [::Google::Cloud::DataCatalog::V1::Tag, ::Hash]
# Required. The tag to create.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Cloud::DataCatalog::V1::Tag]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Cloud::DataCatalog::V1::Tag]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def create_tag request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::CreateTagRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.create_tag.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.parent
header_params["parent"] = request.parent
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.create_tag.timeout,
metadata: metadata,
retry_policy: @config.rpcs.create_tag.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :create_tag, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Updates an existing tag.
#
# @overload update_tag(request, options = nil)
# Pass arguments to `update_tag` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::UpdateTagRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::UpdateTagRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload update_tag(tag: nil, update_mask: nil)
# Pass arguments to `update_tag` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param tag [::Google::Cloud::DataCatalog::V1::Tag, ::Hash]
# Required. The updated tag. The "name" field must be set.
# @param update_mask [::Google::Protobuf::FieldMask, ::Hash]
# Names of fields whose values to overwrite on a tag. Currently, a tag has
# the only modifiable field with the name `fields`.
#
# In general, if this parameter is absent or empty, all modifiable fields
# are overwritten. If such fields are non-required and omitted in the
# request body, their values are emptied.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Cloud::DataCatalog::V1::Tag]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Cloud::DataCatalog::V1::Tag]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def update_tag request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::UpdateTagRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.update_tag.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.tag&.name
header_params["tag.name"] = request.tag.name
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.update_tag.timeout,
metadata: metadata,
retry_policy: @config.rpcs.update_tag.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :update_tag, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Deletes a tag.
#
# @overload delete_tag(request, options = nil)
# Pass arguments to `delete_tag` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::DeleteTagRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::DeleteTagRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload delete_tag(name: nil)
# Pass arguments to `delete_tag` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param name [::String]
# Required. The name of the tag to delete.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Protobuf::Empty]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Protobuf::Empty]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def delete_tag request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::DeleteTagRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.delete_tag.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.name
header_params["name"] = request.name
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.delete_tag.timeout,
metadata: metadata,
retry_policy: @config.rpcs.delete_tag.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :delete_tag, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Lists tags assigned to an {::Google::Cloud::DataCatalog::V1::Entry Entry}.
#
# @overload list_tags(request, options = nil)
# Pass arguments to `list_tags` via a request object, either of type
# {::Google::Cloud::DataCatalog::V1::ListTagsRequest} or an equivalent Hash.
#
# @param request [::Google::Cloud::DataCatalog::V1::ListTagsRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload list_tags(parent: nil, page_size: nil, page_token: nil)
# Pass arguments to `list_tags` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param parent [::String]
# Required. The name of the Data Catalog resource to list the tags of.
#
# The resource can be an {::Google::Cloud::DataCatalog::V1::Entry Entry}
# or an {::Google::Cloud::DataCatalog::V1::EntryGroup EntryGroup}
# (without `/entries/{entries}` at the end).
# @param page_size [::Integer]
# The maximum number of tags to return. Default is 10. Maximum limit is 1000.
# @param page_token [::String]
# Pagination token that specifies the next page to return. If empty, the
# first page is returned.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Gapic::PagedEnumerable<::Google::Cloud::DataCatalog::V1::Tag>]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Gapic::PagedEnumerable<::Google::Cloud::DataCatalog::V1::Tag>]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def list_tags request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Cloud::DataCatalog::V1::ListTagsRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.list_tags.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.parent
header_params["parent"] = request.parent
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.list_tags.timeout,
metadata: metadata,
retry_policy: @config.rpcs.list_tags.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :list_tags, request, options: options do |response, operation|
response = ::Gapic::PagedEnumerable.new @data_catalog_stub, :list_tags, request, response, operation, options
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Sets an access control policy for a resource. Replaces any existing
# policy.
#
# Supported resources are:
#
# - Tag templates
# - Entry groups
#
# Note: This method sets policies only within Data Catalog and can't be
# used to manage policies in BigQuery, Pub/Sub, Dataproc Metastore, and any
# external Google Cloud Platform resources synced with the Data Catalog.
#
# To call this method, you must have the following Google IAM permissions:
#
# - `datacatalog.tagTemplates.setIamPolicy` to set policies on tag
# templates.
# - `datacatalog.entryGroups.setIamPolicy` to set policies on entry groups.
#
# @overload set_iam_policy(request, options = nil)
# Pass arguments to `set_iam_policy` via a request object, either of type
# {::Google::Iam::V1::SetIamPolicyRequest} or an equivalent Hash.
#
# @param request [::Google::Iam::V1::SetIamPolicyRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload set_iam_policy(resource: nil, policy: nil)
# Pass arguments to `set_iam_policy` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param resource [::String]
# REQUIRED: The resource for which the policy is being specified.
# See the operation documentation for the appropriate value for this field.
# @param policy [::Google::Iam::V1::Policy, ::Hash]
# REQUIRED: The complete policy to be applied to the `resource`. The size of
# the policy is limited to a few 10s of KB. An empty policy is a
# valid policy but certain Cloud Platform services (such as Projects)
# might reject them.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Iam::V1::Policy]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Iam::V1::Policy]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def set_iam_policy request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Iam::V1::SetIamPolicyRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.set_iam_policy.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.resource
header_params["resource"] = request.resource
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.set_iam_policy.timeout,
metadata: metadata,
retry_policy: @config.rpcs.set_iam_policy.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :set_iam_policy, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Gets the access control policy for a resource.
#
# May return:
#
# * A`NOT_FOUND` error if the resource doesn't exist or you don't have the
# permission to view it.
# * An empty policy if the resource exists but doesn't have a set policy.
#
# Supported resources are:
#
# - Tag templates
# - Entry groups
#
# Note: This method doesn't get policies from Google Cloud Platform
# resources ingested into Data Catalog.
#
# To call this method, you must have the following Google IAM permissions:
#
# - `datacatalog.tagTemplates.getIamPolicy` to get policies on tag
# templates.
# - `datacatalog.entryGroups.getIamPolicy` to get policies on entry groups.
#
# @overload get_iam_policy(request, options = nil)
# Pass arguments to `get_iam_policy` via a request object, either of type
# {::Google::Iam::V1::GetIamPolicyRequest} or an equivalent Hash.
#
# @param request [::Google::Iam::V1::GetIamPolicyRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload get_iam_policy(resource: nil, options: nil)
# Pass arguments to `get_iam_policy` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param resource [::String]
# REQUIRED: The resource for which the policy is being requested.
# See the operation documentation for the appropriate value for this field.
# @param options [::Google::Iam::V1::GetPolicyOptions, ::Hash]
# OPTIONAL: A `GetPolicyOptions` object for specifying options to
# `GetIamPolicy`. This field is only used by Cloud IAM.
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Iam::V1::Policy]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Iam::V1::Policy]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def get_iam_policy request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Iam::V1::GetIamPolicyRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.get_iam_policy.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.resource
header_params["resource"] = request.resource
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.get_iam_policy.timeout,
metadata: metadata,
retry_policy: @config.rpcs.get_iam_policy.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :get_iam_policy, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Gets your permissions on a resource.
#
# Returns an empty set of permissions if the resource doesn't exist.
#
# Supported resources are:
#
# - Tag templates
# - Entry groups
#
# Note: This method gets policies only within Data Catalog and can't be
# used to get policies from BigQuery, Pub/Sub, Dataproc Metastore, and any
# external Google Cloud Platform resources ingested into Data Catalog.
#
# No Google IAM permissions are required to call this method.
#
# @overload test_iam_permissions(request, options = nil)
# Pass arguments to `test_iam_permissions` via a request object, either of type
# {::Google::Iam::V1::TestIamPermissionsRequest} or an equivalent Hash.
#
# @param request [::Google::Iam::V1::TestIamPermissionsRequest, ::Hash]
# A request object representing the call parameters. Required. To specify no
# parameters, or to keep all the default parameter values, pass an empty Hash.
# @param options [::Gapic::CallOptions, ::Hash]
# Overrides the default settings for this call, e.g, timeout, retries, etc. Optional.
#
# @overload test_iam_permissions(resource: nil, permissions: nil)
# Pass arguments to `test_iam_permissions` via keyword arguments. Note that at
# least one keyword argument is required. To specify no parameters, or to keep all
# the default parameter values, pass an empty Hash as a request object (see above).
#
# @param resource [::String]
# REQUIRED: The resource for which the policy detail is being requested.
# See the operation documentation for the appropriate value for this field.
# @param permissions [::Array<::String>]
# The set of permissions to check for the `resource`. Permissions with
# wildcards (such as '*' or 'storage.*') are not allowed. For more
# information see
# [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).
#
# @yield [response, operation] Access the result along with the RPC operation
# @yieldparam response [::Google::Iam::V1::TestIamPermissionsResponse]
# @yieldparam operation [::GRPC::ActiveCall::Operation]
#
# @return [::Google::Iam::V1::TestIamPermissionsResponse]
#
# @raise [::Google::Cloud::Error] if the RPC is aborted.
#
def test_iam_permissions request, options = nil
raise ::ArgumentError, "request must be provided" if request.nil?
request = ::Gapic::Protobuf.coerce request, to: ::Google::Iam::V1::TestIamPermissionsRequest
# Converts hash and nil to an options object
options = ::Gapic::CallOptions.new(**options.to_h) if options.respond_to? :to_h
# Customize the options with defaults
metadata = @config.rpcs.test_iam_permissions.metadata.to_h
# Set x-goog-api-client and x-goog-user-project headers
metadata[:"x-goog-api-client"] ||= ::Gapic::Headers.x_goog_api_client \
lib_name: @config.lib_name, lib_version: @config.lib_version,
gapic_version: ::Google::Cloud::DataCatalog::V1::VERSION
metadata[:"x-goog-user-project"] = @quota_project_id if @quota_project_id
header_params = {}
if request.resource
header_params["resource"] = request.resource
end
request_params_header = header_params.map { |k, v| "#{k}=#{v}" }.join("&")
metadata[:"x-goog-request-params"] ||= request_params_header
options.apply_defaults timeout: @config.rpcs.test_iam_permissions.timeout,
metadata: metadata,
retry_policy: @config.rpcs.test_iam_permissions.retry_policy
options.apply_defaults timeout: @config.timeout,
metadata: @config.metadata,
retry_policy: @config.retry_policy
@data_catalog_stub.call_rpc :test_iam_permissions, request, options: options do |response, operation|
yield response, operation if block_given?
return response
end
rescue ::GRPC::BadStatus => e
raise ::Google::Cloud::Error.from_error(e)
end
##
# Configuration class for the DataCatalog API.
#
# This class represents the configuration for DataCatalog,
# providing control over timeouts, retry behavior, logging, transport
# parameters, and other low-level controls. Certain parameters can also be
# applied individually to specific RPCs. See
# {::Google::Cloud::DataCatalog::V1::DataCatalog::Client::Configuration::Rpcs}
# for a list of RPCs that can be configured independently.
#
# Configuration can be applied globally to all clients, or to a single client
# on construction.
#
# @example
#
# # Modify the global config, setting the timeout for
# # search_catalog to 20 seconds,
# # and all remaining timeouts to 10 seconds.
# ::Google::Cloud::DataCatalog::V1::DataCatalog::Client.configure do |config|
# config.timeout = 10.0
# config.rpcs.search_catalog.timeout = 20.0
# end
#
# # Apply the above configuration only to a new client.
# client = ::Google::Cloud::DataCatalog::V1::DataCatalog::Client.new do |config|
# config.timeout = 10.0
# config.rpcs.search_catalog.timeout = 20.0
# end
#
# @!attribute [rw] endpoint
# The hostname or hostname:port of the service endpoint.
# Defaults to `"datacatalog.googleapis.com"`.
# @return [::String]
# @!attribute [rw] credentials
# Credentials to send with calls. You may provide any of the following types:
# * (`String`) The path to a service account key file in JSON format
# * (`Hash`) A service account key as a Hash
# * (`Google::Auth::Credentials`) A googleauth credentials object
# (see the [googleauth docs](https://googleapis.dev/ruby/googleauth/latest/index.html))
# * (`Signet::OAuth2::Client`) A signet oauth2 client object
# (see the [signet docs](https://googleapis.dev/ruby/signet/latest/Signet/OAuth2/Client.html))
# * (`GRPC::Core::Channel`) a gRPC channel with included credentials
# * (`GRPC::Core::ChannelCredentials`) a gRPC credentails object
# * (`nil`) indicating no credentials
# @return [::Object]
# @!attribute [rw] scope
# The OAuth scopes
# @return [::Array<::String>]
# @!attribute [rw] lib_name
# The library name as recorded in instrumentation and logging
# @return [::String]
# @!attribute [rw] lib_version
# The library version as recorded in instrumentation and logging
# @return [::String]
# @!attribute [rw] channel_args
# Extra parameters passed to the gRPC channel. Note: this is ignored if a
# `GRPC::Core::Channel` object is provided as the credential.
# @return [::Hash]
# @!attribute [rw] interceptors
# An array of interceptors that are run before calls are executed.
# @return [::Array<::GRPC::ClientInterceptor>]
# @!attribute [rw] timeout
# The call timeout in seconds.
# @return [::Numeric]
# @!attribute [rw] metadata
# Additional gRPC headers to be sent with the call.
# @return [::Hash{::Symbol=>::String}]
# @!attribute [rw] retry_policy
# The retry policy. The value is a hash with the following keys:
# * `:initial_delay` (*type:* `Numeric`) - The initial delay in seconds.
# * `:max_delay` (*type:* `Numeric`) - The max delay in seconds.
# * `:multiplier` (*type:* `Numeric`) - The incremental backoff multiplier.
# * `:retry_codes` (*type:* `Array<String>`) - The error codes that should
# trigger a retry.
# @return [::Hash]
# @!attribute [rw] quota_project
# A separate project against which to charge quota.
# @return [::String]
#
class Configuration
extend ::Gapic::Config
config_attr :endpoint, "datacatalog.googleapis.com", ::String
config_attr :credentials, nil do |value|
allowed = [::String, ::Hash, ::Proc, ::Symbol, ::Google::Auth::Credentials, ::Signet::OAuth2::Client, nil]
allowed += [::GRPC::Core::Channel, ::GRPC::Core::ChannelCredentials] if defined? ::GRPC
allowed.any? { |klass| klass === value }
end
config_attr :scope, nil, ::String, ::Array, nil
config_attr :lib_name, nil, ::String, nil
config_attr :lib_version, nil, ::String, nil
config_attr(:channel_args, { "grpc.service_config_disable_resolution" => 1 }, ::Hash, nil)
config_attr :interceptors, nil, ::Array, nil
config_attr :timeout, nil, ::Numeric, nil
config_attr :metadata, nil, ::Hash, nil
config_attr :retry_policy, nil, ::Hash, ::Proc, nil
config_attr :quota_project, nil, ::String, nil
# @private
def initialize parent_config = nil
@parent_config = parent_config unless parent_config.nil?
yield self if block_given?
end
##
# Configurations for individual RPCs
# @return [Rpcs]
#
def rpcs
@rpcs ||= begin
parent_rpcs = nil
parent_rpcs = @parent_config.rpcs if defined?(@parent_config) && @parent_config.respond_to?(:rpcs)
Rpcs.new parent_rpcs
end
end
##
# Configuration RPC class for the DataCatalog API.
#
# Includes fields providing the configuration for each RPC in this service.
# Each configuration object is of type `Gapic::Config::Method` and includes
# the following configuration fields:
#
# * `timeout` (*type:* `Numeric`) - The call timeout in seconds
# * `metadata` (*type:* `Hash{Symbol=>String}`) - Additional gRPC headers
# * `retry_policy (*type:* `Hash`) - The retry policy. The policy fields
# include the following keys:
# * `:initial_delay` (*type:* `Numeric`) - The initial delay in seconds.
# * `:max_delay` (*type:* `Numeric`) - The max delay in seconds.
# * `:multiplier` (*type:* `Numeric`) - The incremental backoff multiplier.
# * `:retry_codes` (*type:* `Array<String>`) - The error codes that should
# trigger a retry.
#
class Rpcs
##
# RPC-specific configuration for `search_catalog`
# @return [::Gapic::Config::Method]
#
attr_reader :search_catalog
##
# RPC-specific configuration for `create_entry_group`
# @return [::Gapic::Config::Method]
#
attr_reader :create_entry_group
##
# RPC-specific configuration for `get_entry_group`
# @return [::Gapic::Config::Method]
#
attr_reader :get_entry_group
##
# RPC-specific configuration for `update_entry_group`
# @return [::Gapic::Config::Method]
#
attr_reader :update_entry_group
##
# RPC-specific configuration for `delete_entry_group`
# @return [::Gapic::Config::Method]
#
attr_reader :delete_entry_group
##
# RPC-specific configuration for `list_entry_groups`
# @return [::Gapic::Config::Method]
#
attr_reader :list_entry_groups
##
# RPC-specific configuration for `create_entry`
# @return [::Gapic::Config::Method]
#
attr_reader :create_entry
##
# RPC-specific configuration for `update_entry`
# @return [::Gapic::Config::Method]
#
attr_reader :update_entry
##
# RPC-specific configuration for `delete_entry`
# @return [::Gapic::Config::Method]
#
attr_reader :delete_entry
##
# RPC-specific configuration for `get_entry`
# @return [::Gapic::Config::Method]
#
attr_reader :get_entry
##
# RPC-specific configuration for `lookup_entry`
# @return [::Gapic::Config::Method]
#
attr_reader :lookup_entry
##
# RPC-specific configuration for `list_entries`
# @return [::Gapic::Config::Method]
#
attr_reader :list_entries
##
# RPC-specific configuration for `create_tag_template`
# @return [::Gapic::Config::Method]
#
attr_reader :create_tag_template
##
# RPC-specific configuration for `get_tag_template`
# @return [::Gapic::Config::Method]
#
attr_reader :get_tag_template
##
# RPC-specific configuration for `update_tag_template`
# @return [::Gapic::Config::Method]
#
attr_reader :update_tag_template
##
# RPC-specific configuration for `delete_tag_template`
# @return [::Gapic::Config::Method]
#
attr_reader :delete_tag_template
##
# RPC-specific configuration for `create_tag_template_field`
# @return [::Gapic::Config::Method]
#
attr_reader :create_tag_template_field
##
# RPC-specific configuration for `update_tag_template_field`
# @return [::Gapic::Config::Method]
#
attr_reader :update_tag_template_field
##
# RPC-specific configuration for `rename_tag_template_field`
# @return [::Gapic::Config::Method]
#
attr_reader :rename_tag_template_field
##
# RPC-specific configuration for `rename_tag_template_field_enum_value`
# @return [::Gapic::Config::Method]
#
attr_reader :rename_tag_template_field_enum_value
##
# RPC-specific configuration for `delete_tag_template_field`
# @return [::Gapic::Config::Method]
#
attr_reader :delete_tag_template_field
##
# RPC-specific configuration for `create_tag`
# @return [::Gapic::Config::Method]
#
attr_reader :create_tag
##
# RPC-specific configuration for `update_tag`
# @return [::Gapic::Config::Method]
#
attr_reader :update_tag
##
# RPC-specific configuration for `delete_tag`
# @return [::Gapic::Config::Method]
#
attr_reader :delete_tag
##
# RPC-specific configuration for `list_tags`
# @return [::Gapic::Config::Method]
#
attr_reader :list_tags
##
# RPC-specific configuration for `set_iam_policy`
# @return [::Gapic::Config::Method]
#
attr_reader :set_iam_policy
##
# RPC-specific configuration for `get_iam_policy`
# @return [::Gapic::Config::Method]
#
attr_reader :get_iam_policy
##
# RPC-specific configuration for `test_iam_permissions`
# @return [::Gapic::Config::Method]
#
attr_reader :test_iam_permissions
# @private
def initialize parent_rpcs = nil
search_catalog_config = parent_rpcs.search_catalog if parent_rpcs.respond_to? :search_catalog
@search_catalog = ::Gapic::Config::Method.new search_catalog_config
create_entry_group_config = parent_rpcs.create_entry_group if parent_rpcs.respond_to? :create_entry_group
@create_entry_group = ::Gapic::Config::Method.new create_entry_group_config
get_entry_group_config = parent_rpcs.get_entry_group if parent_rpcs.respond_to? :get_entry_group
@get_entry_group = ::Gapic::Config::Method.new get_entry_group_config
update_entry_group_config = parent_rpcs.update_entry_group if parent_rpcs.respond_to? :update_entry_group
@update_entry_group = ::Gapic::Config::Method.new update_entry_group_config
delete_entry_group_config = parent_rpcs.delete_entry_group if parent_rpcs.respond_to? :delete_entry_group
@delete_entry_group = ::Gapic::Config::Method.new delete_entry_group_config
list_entry_groups_config = parent_rpcs.list_entry_groups if parent_rpcs.respond_to? :list_entry_groups
@list_entry_groups = ::Gapic::Config::Method.new list_entry_groups_config
create_entry_config = parent_rpcs.create_entry if parent_rpcs.respond_to? :create_entry
@create_entry = ::Gapic::Config::Method.new create_entry_config
update_entry_config = parent_rpcs.update_entry if parent_rpcs.respond_to? :update_entry
@update_entry = ::Gapic::Config::Method.new update_entry_config
delete_entry_config = parent_rpcs.delete_entry if parent_rpcs.respond_to? :delete_entry
@delete_entry = ::Gapic::Config::Method.new delete_entry_config
get_entry_config = parent_rpcs.get_entry if parent_rpcs.respond_to? :get_entry
@get_entry = ::Gapic::Config::Method.new get_entry_config
lookup_entry_config = parent_rpcs.lookup_entry if parent_rpcs.respond_to? :lookup_entry
@lookup_entry = ::Gapic::Config::Method.new lookup_entry_config
list_entries_config = parent_rpcs.list_entries if parent_rpcs.respond_to? :list_entries
@list_entries = ::Gapic::Config::Method.new list_entries_config
create_tag_template_config = parent_rpcs.create_tag_template if parent_rpcs.respond_to? :create_tag_template
@create_tag_template = ::Gapic::Config::Method.new create_tag_template_config
get_tag_template_config = parent_rpcs.get_tag_template if parent_rpcs.respond_to? :get_tag_template
@get_tag_template = ::Gapic::Config::Method.new get_tag_template_config
update_tag_template_config = parent_rpcs.update_tag_template if parent_rpcs.respond_to? :update_tag_template
@update_tag_template = ::Gapic::Config::Method.new update_tag_template_config
delete_tag_template_config = parent_rpcs.delete_tag_template if parent_rpcs.respond_to? :delete_tag_template
@delete_tag_template = ::Gapic::Config::Method.new delete_tag_template_config
create_tag_template_field_config = parent_rpcs.create_tag_template_field if parent_rpcs.respond_to? :create_tag_template_field
@create_tag_template_field = ::Gapic::Config::Method.new create_tag_template_field_config
update_tag_template_field_config = parent_rpcs.update_tag_template_field if parent_rpcs.respond_to? :update_tag_template_field
@update_tag_template_field = ::Gapic::Config::Method.new update_tag_template_field_config
rename_tag_template_field_config = parent_rpcs.rename_tag_template_field if parent_rpcs.respond_to? :rename_tag_template_field
@rename_tag_template_field = ::Gapic::Config::Method.new rename_tag_template_field_config
rename_tag_template_field_enum_value_config = parent_rpcs.rename_tag_template_field_enum_value if parent_rpcs.respond_to? :rename_tag_template_field_enum_value
@rename_tag_template_field_enum_value = ::Gapic::Config::Method.new rename_tag_template_field_enum_value_config
delete_tag_template_field_config = parent_rpcs.delete_tag_template_field if parent_rpcs.respond_to? :delete_tag_template_field
@delete_tag_template_field = ::Gapic::Config::Method.new delete_tag_template_field_config
create_tag_config = parent_rpcs.create_tag if parent_rpcs.respond_to? :create_tag
@create_tag = ::Gapic::Config::Method.new create_tag_config
update_tag_config = parent_rpcs.update_tag if parent_rpcs.respond_to? :update_tag
@update_tag = ::Gapic::Config::Method.new update_tag_config
delete_tag_config = parent_rpcs.delete_tag if parent_rpcs.respond_to? :delete_tag
@delete_tag = ::Gapic::Config::Method.new delete_tag_config
list_tags_config = parent_rpcs.list_tags if parent_rpcs.respond_to? :list_tags
@list_tags = ::Gapic::Config::Method.new list_tags_config
set_iam_policy_config = parent_rpcs.set_iam_policy if parent_rpcs.respond_to? :set_iam_policy
@set_iam_policy = ::Gapic::Config::Method.new set_iam_policy_config
get_iam_policy_config = parent_rpcs.get_iam_policy if parent_rpcs.respond_to? :get_iam_policy
@get_iam_policy = ::Gapic::Config::Method.new get_iam_policy_config
test_iam_permissions_config = parent_rpcs.test_iam_permissions if parent_rpcs.respond_to? :test_iam_permissions
@test_iam_permissions = ::Gapic::Config::Method.new test_iam_permissions_config
yield self if block_given?
end
end
end
end
end
end
end
end
end
|
// Copyright 2018 <EMAIL>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package meta
import (
"github.com/storyicon/grbac/pkg/path"
)
// Resource defines resources
type Resource struct {
// Host defines the host of the resource, allowing wildcards to be used.
Host string `json:"host" yaml:"host"`
// Path defines the path of the resource, allowing wildcards to be used.
Path string `json:"path" yaml:"path"`
// Method defines the method of the resource, allowing wildcards to be used.
Method string `json:"method" yaml:"method"`
}
// Match is used to calculate whether the query matches the resource
func (r *Resource) Match(query *Query) (bool, error) {
args := query.GetArguments()
for i, res := range r.GetArguments() {
matched, err := path.Match(res, args[i])
if err != nil {
return false, err
}
if !matched {
return false, nil
}
}
return true, nil
}
// GetArguments is used to convert the current argument to a string slice
func (r *Resource) GetArguments() []string {
return []string{
r.Host,
r.Path,
r.Method,
}
}
// IsValid is used to test the validity of the Rule
func (r *Resource) IsValid() error {
if r.Host == "" || r.Method == "" || r.Path == "" {
return ErrFieldIncomplete
}
return nil
}
|
<gh_stars>1-10
const yaml = require('js-yaml');
const fs = require('fs');
const getExpectationsList = (filePath) => {
try {
const doc = yaml.safeLoad(fs.readFileSync(filePath, 'utf8'));
return doc.expectations
} catch (e) {
console.log(e);
}
}
module.exports = { getExpectationsList } |
#!/usr/bin/env bash
#
# // SPDX-License-Identifier: BSD-3-CLAUSE
#
# (C) Copyright 2018, Xilinx, Inc.
#
#!/usr/bin/env bash
#!/usr/bin/env bash
tag=$1
mlsuite=`dirname $PWD`
#sudo \
docker run \
--name "anup_container" \
--rm \
--net=host \
--privileged=true \
-it \
-v /dev:/dev \
-v /opt/xilinx:/opt/xilinx \
-v $mlsuite:/opt/ml-suite \
-v /wrk/acceleration/models:/opt/models \
-v /wrk/acceleration/shareData:/opt/data \
-w /opt/ml-suite \
xilinxatg/ml_suite:$tag \
bash
#&& cd test_deephi && bash -x nw_list.sh"
|
#!/bin/bash
# Get a list of users
users=`cat /etc/passwd |cut -d: -f1`
echo "Admin users:"
# Loop over the users
for u in $users; do
# Check if the user is an admin
if id -u $u | grep -q "groups=.*\badmin\b"; then
echo "$u"
fi
done |
#!/usr/bin/env bash
REPO_NAME=${PWD##*/}
DEVICE_NAME="XDOT"
# Skip compiling and flash the existing binary. Use "previous" argument.
# ./flash.sh previous
if [ "$1" != "previous" ]; then
# Remove the previous build if any.
if [ -d ./BUILD ]; then
rm -r ./BUILD/*
echo "[INFO] Removed the previous build."
fi
# Compile it for xdot platform with ARM gcc.
mbed compile -m xdot_l151cc -t GCC_ARM
fi
if [ ! -f ./BUILD/xdot_l151cc/GCC_ARM/${REPO_NAME}.bin ]; then
echo "[ERROR] No binary file to flash. Are there any compiling errors?"
exit 1
fi
if [ ! -d /Volumes/${DEVICE_NAME} ]; then
echo "[ERROR] The XDOT device is not connected."
exit 2;
fi
# Flash it on your connected device.
echo "[INFO] Flashing XDOT device ..."
echo "[INFO] Check the blinking led on the device, when it's finished press the reset button, on the device."
cp ./BUILD/xdot_l151cc/GCC_ARM/${REPO_NAME}.bin /Volumes/${DEVICE_NAME}
|
SELECT item_name, COUNT(*) AS num_purchases
FROM Purchases
WHERE purchase_date > (CURDATE() - 7)
GROUP BY item_name
ORDER BY num_purchases DESC
LIMIT 1; |
#!/usr/bin/env sh
if [ "$(id -u)" -eq 0 ]
then
service log2ram stop
systemctl disable log2ram
rm /etc/systemd/system/log2ram.service
rm /usr/local/bin/log2ram
rm /etc/log2ram.conf
rm /etc/cron.daily/log2ram
rm /etc/logrotate.d/log2ram
if [ -d /var/hdd.log ]; then
rm -r /var/hdd.log
fi
echo "Log2Ram is uninstalled, removing the uninstaller in progress"
rm /usr/local/bin/uninstall-log2ram.sh
echo "##### Reboot isn't needed #####"
else
echo "You need to be ROOT (sudo can be used)"
fi
|
const arr = [1, 2, 3, 4];
let total = arr.reduce((acc, currVal) => acc + currVal); |
<reponame>rafax/sourcegraph
package lsifstore
import (
"context"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/sourcegraph/sourcegraph/internal/database/dbtesting"
"github.com/sourcegraph/sourcegraph/internal/observation"
)
func TestDatabaseHover(t *testing.T) {
if testing.Short() {
t.Skip()
}
db := dbtesting.GetDB(t)
populateTestStore(t)
store := NewStore(db, &observation.TestContext)
// `\tcontents, err := findContents(pkgs, p, f, obj)`
// ^^^^^^^^^^^^
if actualText, actualRange, exists, err := store.Hover(context.Background(), testBundleID, "internal/index/indexer.go", 628, 20); err != nil {
t.Fatalf("unexpected error %s", err)
} else if !exists {
t.Errorf("no hover found")
} else {
docstring := "findContents returns contents used as hover info for given object."
signature := "func findContents(pkgs []*Package, p *Package, f *File, obj Object) ([]MarkedString, error)"
expectedText := "```go\n" + signature + "\n```\n\n---\n\n" + docstring
expectedRange := newRange(628, 18, 628, 30)
if actualText != expectedText {
t.Errorf("unexpected hover text. want=%s have=%s", expectedText, actualText)
}
if diff := cmp.Diff(expectedRange, actualRange); diff != "" {
t.Errorf("unexpected hover range (-want +got):\n%s", diff)
}
}
}
|
<gh_stars>1-10
export const FETCH_START = 'isFetching'
export const FETCH_END = 'isFetched'
/* for room */
export const SET_ROOMS = 'setRooms'
export const ADD_ROOM = 'addRoom'
export const REMOVE_ROOM = 'removeRoom'
/* for user */
export const SET_USER = 'setUser'
/* for view */
export const TOGGLE_CREATE_ROOM_DIALOG = 'toggleCreateRoomDialog'
/* for post */
export const SET_POSTS = 'setPosts'
export const ADD_POST = 'addPost' |
<reponame>randoum/geocoder
# frozen_string_literal: true
require 'geocoder/lookups/base'
require 'geocoder/results/db_ip_com'
module Geocoder
module Lookup
class DbIpCom < Base
def name
'DB-IP.com'
end
def supported_protocols
%i[https http]
end
def required_api_key_parts
['api_key']
end
private # ----------------------------------------------------------------
def base_query_url(query)
"#{protocol}://api.db-ip.com/v2/#{configuration.api_key}/#{query.sanitized_text}?"
end
##
# Same as query_url but without the api key.
#
def cache_key(query)
"#{protocol}://api.db-ip.com/v2/#{query.sanitized_text}?" + hash_to_query(cache_key_params(query))
end
def results(query)
return [] unless (doc = fetch_data(query))
case doc['error']
when 'maximum number of queries per day exceeded'
raise_error Geocoder::OverQueryLimitError ||
Geocoder.log(:warn, 'DB-API query limit exceeded.')
when 'invalid API key'
raise_error Geocoder::InvalidApiKey ||
Geocoder.log(:warn, 'Invalid DB-IP API key.')
when nil
[doc]
else
raise_error Geocoder::Error ||
Geocoder.log(:warn, "Request failed: #{doc['error']}")
end
end
end
end
end
|
#!/bin/bash
#SBATCH -N 1
#SBATCH -p res-gpu-small
#SBATCH -c 4
#SBATCH -t 2-00:00
#SBATCH -x gpu[0-8]
#SBATCH --qos short
#SBATCH --job-name vqa2_mao-3_normonly-expanded-nc-pt7_default_METER
#SBATCH --mem 20G
#SBATCH --gres gpu:1
#SBATCH -o ../../../results/vqa2_mao-3_normonly-expanded-nc-pt7_default_METER.out
cd ../../..
source venv/bin/activate
python run.py with norm_clipping=0.7 normonly_flag=expanded data_root=data/vqa2_Expanded-nc-gt0.7_occ_gte3_arrow num_gpus=1 num_nodes=1 task_finetune_vqa_clip_bert per_gpu_batchsize=200 load_path=checkpoints/meter_clip16_224_roberta_pretrain.ckpt clip16 text_roberta image_size=224 clip_randaug num_workers=4 loss_type=default vqav2_label_size=209
|
# When the current working directory changes, run a method that checks for a .env file, then sources it. Happy days.
autoload -U add-zsh-hook
load-local-conf() {
# check file exists, is regular file and is readable:
if [[ -f .env && -r .env ]]; then
source .env
fi
}
add-zsh-hook chpwd load-local-conf
|
/* -*-mode:java; c-basic-offset:2; indent-tabs-mode:nil -*- */
/* JOrbis
* Copyright (C) 2000 ymnk, JCraft,Inc.
*
* Written by: 2000 ymnk<<EMAIL>>
*
* Many thanks to
* Monty <<EMAIL>> and
* The XIPHOPHORUS Company http://www.xiph.org/ .
* JOrbis has been based on their awesome works, Vorbis codec.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public License
* as published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
package org.jcraft.jorbis;
import org.jcraft.jogg.*;
class StaticCodeBook{
int dim; // codebook dimensions (elements per vector)
int entries; // codebook entries
int[] lengthlist; // codeword lengths in bits
// mapping
int maptype; // 0=none
// 1=implicitly populated values from map column
// 2=listed arbitrary values
// The below does a linear, single monotonic sequence mapping.
int q_min; // packed 32 bit float; quant value 0 maps to minval
int q_delta; // packed 32 bit float; val 1 - val 0 == delta
int q_quant; // bits: 0 < quant <= 16
int q_sequencep; // bitflag
// additional information for log (dB) mapping; the linear mapping
// is assumed to actually be values in dB. encodebias is used to
// assign an error weight to 0 dB. We have two additional flags:
// zeroflag indicates if entry zero is to represent -Inf dB; negflag
// indicates if we're to represent negative linear values in a
// mirror of the positive mapping.
int[] quantlist; // map == 1: (int)(entries/dim) element column map
// map == 2: list of dim*entries quantized entry vals
StaticCodeBook(){
}
int pack(Buffer opb){
int i;
boolean ordered=false;
opb.write(0x564342, 24);
opb.write(dim, 16);
opb.write(entries, 24);
// pack the codewords. There are two packings; length ordered and
// length random. Decide between the two now.
for(i=1; i<entries; i++){
if(lengthlist[i]<lengthlist[i-1])
break;
}
if(i==entries)
ordered=true;
if(ordered){
// length ordered. We only need to say how many codewords of
// each length. The actual codewords are generated
// deterministically
int count=0;
opb.write(1, 1); // ordered
opb.write(lengthlist[0]-1, 5); // 1 to 32
for(i=1; i<entries; i++){
int _this=lengthlist[i];
int _last=lengthlist[i-1];
if(_this>_last){
for(int j=_last; j<_this; j++){
opb.write(i-count, Util.ilog(entries-count));
count=i;
}
}
}
opb.write(i-count, Util.ilog(entries-count));
}
else{
// length random. Again, we don't code the codeword itself, just
// the length. This time, though, we have to encode each length
opb.write(0, 1); // unordered
// algortihmic mapping has use for 'unused entries', which we tag
// here. The algorithmic mapping happens as usual, but the unused
// entry has no codeword.
for(i=0; i<entries; i++){
if(lengthlist[i]==0)
break;
}
if(i==entries){
opb.write(0, 1); // no unused entries
for(i=0; i<entries; i++){
opb.write(lengthlist[i]-1, 5);
}
}
else{
opb.write(1, 1); // we have unused entries; thus we tag
for(i=0; i<entries; i++){
if(lengthlist[i]==0){
opb.write(0, 1);
}
else{
opb.write(1, 1);
opb.write(lengthlist[i]-1, 5);
}
}
}
}
// is the entry number the desired return value, or do we have a
// mapping? If we have a mapping, what type?
opb.write(maptype, 4);
switch(maptype){
case 0:
// no mapping
break;
case 1:
case 2:
// implicitly populated value mapping
// explicitly populated value mapping
if(quantlist==null){
// no quantlist? error
return (-1);
}
// values that define the dequantization
opb.write(q_min, 32);
opb.write(q_delta, 32);
opb.write(q_quant-1, 4);
opb.write(q_sequencep, 1);
{
int quantvals=0;
switch(maptype){
case 1:
// a single column of (c->entries/c->dim) quantized values for
// building a full value list algorithmically (square lattice)
quantvals=maptype1_quantvals();
break;
case 2:
// every value (c->entries*c->dim total) specified explicitly
quantvals=entries*dim;
break;
}
// quantized values
for(i=0; i<quantvals; i++){
opb.write(Math.abs(quantlist[i]), q_quant);
}
}
break;
default:
// error case; we don't have any other map types now
return (-1);
}
return (0);
}
// unpacks a codebook from the packet buffer into the codebook struct,
// readies the codebook auxiliary structures for decode
int unpack(Buffer opb){
int i;
//memset(s,0,sizeof(static_codebook));
// make sure alignment is correct
if(opb.read(24)!=0x564342){
// goto _eofout;
clear();
return (-1);
}
// first the basic parameters
dim=opb.read(16);
entries=opb.read(24);
if(entries==-1){
// goto _eofout;
clear();
return (-1);
}
// codeword ordering.... length ordered or unordered?
switch(opb.read(1)){
case 0:
// unordered
lengthlist=new int[entries];
// allocated but unused entries?
if(opb.read(1)!=0){
// yes, unused entries
for(i=0; i<entries; i++){
if(opb.read(1)!=0){
int num=opb.read(5);
if(num==-1){
// goto _eofout;
clear();
return (-1);
}
lengthlist[i]=num+1;
}
else{
lengthlist[i]=0;
}
}
}
else{
// all entries used; no tagging
for(i=0; i<entries; i++){
int num=opb.read(5);
if(num==-1){
// goto _eofout;
clear();
return (-1);
}
lengthlist[i]=num+1;
}
}
break;
case 1:
// ordered
{
int length=opb.read(5)+1;
lengthlist=new int[entries];
for(i=0; i<entries;){
int num=opb.read(Util.ilog(entries-i));
if(num==-1){
// goto _eofout;
clear();
return (-1);
}
for(int j=0; j<num; j++, i++){
lengthlist[i]=length;
}
length++;
}
}
break;
default:
// EOF
return (-1);
}
// Do we have a mapping to unpack?
switch((maptype=opb.read(4))){
case 0:
// no mapping
break;
case 1:
case 2:
// implicitly populated value mapping
// explicitly populated value mapping
q_min=opb.read(32);
q_delta=opb.read(32);
q_quant=opb.read(4)+1;
q_sequencep=opb.read(1);
{
int quantvals=0;
switch(maptype){
case 1:
quantvals=maptype1_quantvals();
break;
case 2:
quantvals=entries*dim;
break;
}
// quantized values
quantlist=new int[quantvals];
for(i=0; i<quantvals; i++){
quantlist[i]=opb.read(q_quant);
}
if(quantlist[quantvals-1]==-1){
// goto _eofout;
clear();
return (-1);
}
}
break;
default:
// goto _eofout;
clear();
return (-1);
}
// all set
return (0);
// _errout:
// _eofout:
// vorbis_staticbook_clear(s);
// return(-1);
}
// there might be a straightforward one-line way to do the below
// that's portable and totally safe against roundoff, but I haven't
// thought of it. Therefore, we opt on the side of caution
private int maptype1_quantvals(){
int vals=(int)(Math.floor(Math.pow(entries, 1./dim)));
// the above *should* be reliable, but we'll not assume that FP is
// ever reliable when bitstream sync is at stake; verify via integer
// means that vals really is the greatest value of dim for which
// vals^b->bim <= b->entries
// treat the above as an initial guess
while(true){
int acc=1;
int acc1=1;
for(int i=0; i<dim; i++){
acc*=vals;
acc1*=vals+1;
}
if(acc<=entries&&acc1>entries){
return (vals);
}
else{
if(acc>entries){
vals--;
}
else{
vals++;
}
}
}
}
void clear(){
}
// unpack the quantized list of values for encode/decode
// we need to deal with two map types: in map type 1, the values are
// generated algorithmically (each column of the vector counts through
// the values in the quant vector). in map type 2, all the values came
// in in an explicit list. Both value lists must be unpacked
float[] unquantize(){
if(maptype==1||maptype==2){
int quantvals;
float mindel=float32_unpack(q_min);
float delta=float32_unpack(q_delta);
float[] r=new float[entries*dim];
// maptype 1 and 2 both use a quantized value vector, but
// different sizes
switch(maptype){
case 1:
// most of the time, entries%dimensions == 0, but we need to be
// well defined. We define that the possible vales at each
// scalar is values == entries/dim. If entries%dim != 0, we'll
// have 'too few' values (values*dim<entries), which means that
// we'll have 'left over' entries; left over entries use zeroed
// values (and are wasted). So don't generate codebooks like that
quantvals=maptype1_quantvals();
for(int j=0; j<entries; j++){
float last=0.f;
int indexdiv=1;
for(int k=0; k<dim; k++){
int index=(j/indexdiv)%quantvals;
float val=quantlist[index];
val=Math.abs(val)*delta+mindel+last;
if(q_sequencep!=0)
last=val;
r[j*dim+k]=val;
indexdiv*=quantvals;
}
}
break;
case 2:
for(int j=0; j<entries; j++){
float last=0.f;
for(int k=0; k<dim; k++){
float val=quantlist[j*dim+k];
//if((j*dim+k)==0){System.err.println(" | 0 -> "+val+" | ");}
val=Math.abs(val)*delta+mindel+last;
if(q_sequencep!=0)
last=val;
r[j*dim+k]=val;
//if((j*dim+k)==0){System.err.println(" $ r[0] -> "+r[0]+" | ");}
}
}
//System.err.println("\nr[0]="+r[0]);
}
return (r);
}
return (null);
}
// 32 bit float (not IEEE; nonnormalized mantissa +
// biased exponent) : neeeeeee eeemmmmm mmmmmmmm mmmmmmmm
// Why not IEEE? It's just not that important here.
static final int VQ_FEXP=10;
static final int VQ_FMAN=21;
static final int VQ_FEXP_BIAS=768; // bias toward values smaller than 1.
// doesn't currently guard under/overflow
static long float32_pack(float val){
int sign=0;
int exp;
int mant;
if(val<0){
sign=0x80000000;
val=-val;
}
exp=(int)Math.floor(Math.log(val)/Math.log(2));
mant=(int)Math.rint(Math.pow(val, (VQ_FMAN-1)-exp));
exp=(exp+VQ_FEXP_BIAS)<<VQ_FMAN;
return (sign|exp|mant);
}
static float float32_unpack(int val){
float mant=val&0x1fffff;
float exp=(val&0x7fe00000)>>>VQ_FMAN;
if((val&0x80000000)!=0)
mant=-mant;
return (ldexp(mant, ((int)exp)-(VQ_FMAN-1)-VQ_FEXP_BIAS));
}
static float ldexp(float foo, int e){
return (float)(foo*Math.pow(2, e));
}
}
|
require 'uri'
module EMRPC
# Pid is a abbreviation for "process id". Pid represents so-called lightweight process (like in Erlang OTP)
# Pids can be created, connected, disconnected, spawned, killed.
# When pid is created, it exists on its own.
# When someone connects to the pid, connection is established.
# When pid is killed, all its connections are unbinded.
module Pid
attr_accessor :uuid, :connections, :killed, :options
attr_accessor :_em_server_signature, :_protocol, :_bind_address
include DefaultCallbacks
include ProtocolMapper
# FIXME: doesn't override user-defined callbacks
include DebugPidCallbacks if $DEBUG
# shorthand for console testing
def self.new(*attributes)
# We create random global const to workaround Marshal.dump issue:
# >> Marshal.dump(Class.new.new)
# TypeError: can't dump anonymous class #<Class:0x5b5338>
#
const_set("DynamicPidClass#{rand(2**128).to_s(16).upcase}", Class.new {
include Pid
attr_accessor(*attributes)
}).new
end
def initialize(*args, &blk)
@uuid = _random_uuid
@options = {:uuid => @uuid}
_common_init
super(*args, &blk) rescue nil
end
def spawn(cls, *args, &blk)
pid = cls.new(*args, &blk)
connect(pid)
pid
end
def tcp_spawn(addr, cls, *args, &blk)
pid = spawn(cls, *args, &blk)
pid.bind(addr)
pid
end
def thread_spawn(cls, *args, &blk)
# TODO: think about thread-safe passing messages back to sender.
end
def bind(addr)
raise "Pid is already binded!" if @_em_server_signature
@_bind_address = addr.parsed_uri
this = self
@_em_server_signature = make_server_connection(@_bind_address, _protocol) do |conn|
conn.local_pid = this
conn.address = addr
end
end
# 1. Connect to the pid.
# 2. When connection is established, asks for uuid.
# 3. When uuid is received, triggers callback on the client.
# (See Protocol for details)
def connect(addr, connected_callback = nil, disconnected_callback = nil)
c = if addr.is_a?(Pid) && pid = addr
LocalConnection.new(self, pid)
else
this = self
make_client_connection(addr, _protocol) do |conn|
conn.local_pid = this
conn.address = addr
end
end
c.connected_callback = connected_callback
c.disconnected_callback = disconnected_callback
c
end
def disconnect(pid, disconnected_callback = nil)
c = @connections[pid.uuid]
c.disconnected_callback = disconnected_callback if disconnected_callback
c.close_connection_after_writing
end
def kill
return if @killed
if @_em_server_signature
EventMachine.stop_server(@_em_server_signature)
end
@connections.each do |uuid, conn|
conn.close_connection_after_writing
end
@connections.clear
@killed = true
end
# TODO:
# When connecting to a spawned pid, we should transparantly discard TCP connection
# in favor of local connection.
def connection_established(pid, conn)
@connections[pid.uuid] ||= conn
__send__(conn.connected_callback, pid)
@connections[pid.uuid].remote_pid || pid # looks like hack, but it is not.
end
def connection_unbind(pid, conn)
@connections.delete(pid.uuid)
__send__(conn.disconnected_callback, pid)
end
#
# Util
#
def options=(opts)
@options = opts
@options[:uuid] = @uuid
@options
end
def killed?
@killed
end
def find_pid(uuid)
return self if uuid == @uuid
((conn = @connections[uuid]) and conn.remote_pid) or raise "Pid #{_uid} was not found in a #{self.inspect}"
end
def marshal_dump
@uuid
end
def marshal_load(uuid)
_common_init
@uuid = uuid
end
def connection_uuids
(@connections || {}).keys
end
def pid_class_name
"Pid"
end
def inspect
return "#<#{pid_class_name}:#{_uid} KILLED>" if @killed
"#<#{pid_class_name}:#{_uid} connected to #{connection_uuids.map{|u|_uid(u)}.inspect}>"
end
def ==(other)
other.is_a?(Pid) && other.uuid == @uuid
end
# shorter uuid for pretty output
def _uid(uuid = @uuid)
uuid && uuid[0,6]
end
#
# Private, but accessible from outside methods are prefixed with underscore.
#
def _protocol
@_protocol ||= self.__send__(:_protocol=, RemoteConnection)
end
def _protocol=(p)
@_protocol = Util.combine_modules(
p,
MarshalProtocol.new(Marshal),
FastMessageProtocol,
$DEBUG ? DebugConnection : Module.new
)
end
# TODO: remove this in favor of using codec.rb
def _send_dirty(*args)
args._initialize_pids_recursively_d4d309bd!(self)
send(*args)
end
private
def _common_init
@connections = {} # pid.uuid -> connection
end
def _random_uuid
# FIXME: insert real uuid generating here!
rand(2**128).to_s(16)
end
end # Pid
end # EMRPC
|
import React from 'react';
import { createUseStyles } from 'react-jss';
const useStyles = createUseStyles({
heading: {
borderBottom: '1px solid var(--jp-border-color2)',
margin: 0,
padding: '24px 16px 8px 16px',
fontWeight: 'bold',
textAlign: 'start',
fontSize: '9pt',
textTransform: 'uppercase'
}
});
interface HorizontalHeadingProps {
title: string;
}
export const HorizontalHeading: React.FC<HorizontalHeadingProps> = ({ title }) => {
const classes = useStyles();
return <div className={classes.heading}>{title}</div>;
};
|
import datetime
import json
import time
import okex.futures_api as future
from common_helper import convert_tv2ok_resolution, convert_timestamp2ok
from config_helper import diamond
okex_cfg = diamond.get_exchange_auth('okex')
api_key = okex_cfg.get('api_key')
secret = okex_cfg.get('secret')
passphrase = okex_cfg.get('passphrase')
okex = future.FutureAPI(api_key, secret, passphrase, True)
def convert_iso2timestamp(iso_time):
# '1984-06-02T19:05:00.000Z'
utc_dt = datetime.datetime.strptime(iso_time, '%Y-%m-%dT%H:%M:%S.%fZ')
# Convert UTC datetime to seconds since the Epoch
timestamp = (utc_dt - datetime.datetime(1970, 1, 1)).total_seconds()
return timestamp
def convert_timestamp2iso(timestamp):
utc_dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=timestamp)
return utc_dt.strftime('%Y-%m-%dT%H:%M:%S.%.3fZ')
def get_history_kline(symbol, time_from, time_to, resolution):
resolution = convert_tv2ok_resolution(resolution)
comlete_kline = []
start_time = time_from
end_time = time_to
limit_num = 200
retry_cnt = 0
while True:
if end_time > time_to:
end_time = time_to
if end_time - start_time > resolution * limit_num:
# 超过限制
end_time = start_time + resolution * limit_num
try:
resp = okex.get_kline(instrument_id=symbol,
start=convert_timestamp2ok(start_time),
end=convert_timestamp2ok(end_time),
granularity=resolution)
comlete_kline.extend(resp[::-1])
except Exception:
print('get kline error.')
if retry_cnt > 10:
return None
time.sleep(5)
if end_time >= time_to:
break
else:
start_time += resolution * limit_num
end_time = start_time + resolution * limit_num
print(len(comlete_kline))
# print(comlete_kline)
t = []
o = []
h = []
l = []
c = []
v = []
for elem in comlete_kline:
t.append(int(float(convert_iso2timestamp(elem[0]))))
o.append(float(elem[1]))
h.append(float(elem[2]))
l.append(float(elem[3]))
c.append(float(elem[4]))
v.append(float(elem[5]))
result = {
"s": "ok",
"t": t,
"o": o,
"h": h,
"l": l,
"c": c,
"v": v
}
return json.dumps(result)
if __name__ == '__main__':
# test = get_history_kline('EOS-USD-181228', 1544371200, 1544457600, '5')
# print(test)
t = '1984-06-02T19:05:00.000Z'
t2 = convert_iso2timestamp(t)
print(t2)
|
"use strict";
Object.defineProperty(exports, "__esModule", {
value: true
});
exports.pauseCircleO = void 0;
var pauseCircleO = {
"viewBox": "0 0 1536 1792",
"children": [{
"name": "path",
"attribs": {
"d": "M768 128q209 0 385.5 103t279.5 279.5 103 385.5-103 385.5-279.5 279.5-385.5 103-385.5-103-279.5-279.5-103-385.5 103-385.5 279.5-279.5 385.5-103zM768 1440q148 0 273-73t198-198 73-273-73-273-198-198-273-73-273 73-198 198-73 273 73 273 198 198 273 73zM864 1216q-14 0-23-9t-9-23v-576q0-14 9-23t23-9h192q14 0 23 9t9 23v576q0 14-9 23t-23 9h-192zM480 1216q-14 0-23-9t-9-23v-576q0-14 9-23t23-9h192q14 0 23 9t9 23v576q0 14-9 23t-23 9h-192z"
}
}]
};
exports.pauseCircleO = pauseCircleO; |
<filename>impl/src/main/java/com/example/demo/model/SystemUser.java
package com.example.demo.model;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import lombok.ToString;
import org.hibernate.annotations.GenericGenerator;
import javax.persistence.CascadeType;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.ManyToMany;
import javax.persistence.Transient;
import javax.transaction.Transactional;
import java.util.Set;
@Entity
@Getter
@Setter
@Transactional
@NoArgsConstructor
@ToString
@JsonIgnoreProperties({"hibernateLazyInitializer","handler"})
public class SystemUser {
@Id
@GeneratedValue(generator = "UUID")
@GenericGenerator(
name = "UUID",
strategy = "org.hibernate.id.UUIDGenerator"
)
private String id;
private String login;
private String password;
@Transient
private String passwordConfirm;
@ManyToMany(cascade = CascadeType.ALL, fetch = FetchType.EAGER)
private Set<Role> roles;
public SystemUser(String login, String password) {
this.login = login;
this.password = password;
}
}
|
<reponame>lautr/slide-monitor-webapp<filename>js/app.js
const client = contentful.createClient({
// This is the space ID. A space is like a project folder in Contentful terms
space: config.contentful.space,
// This is the access token for this space. Normally you get both ID and the token in the Contentful web app
accessToken: config.contentful.accessToken
});
(function(angular) {
const notify = (message) => {
if (true === (config.notify||true)) {
UIkit.notification({
message: message,
status: 'primary',
pos: 'top-right',
timeout: 2000
});
}
}
const app = angular.module('slideMonitorWebapp', ['ngSanitize']);
app.controller('TitleController', function($scope) {
$scope.pageTitle = String((config.title || '<b>EXAMPLE</b>-TV')).replace(/<[^>]+>/gm, '')
})
app.controller('WeatherController', function ($scope) {
$scope.weatherImage = ''
$scope.weather = {}
var updateWeather = () => {
axios.get('http://api.openweathermap.org/data/2.5/weather?q=Berlin&appid=68f132334c5ac35049c0c1afbc431dde&units=metric')
.then(function (response) {
return response.data
})
.catch(function (error) {
console.log(error)
})
.then(function (data) {
notify('Updating Weather')
$scope.weather = data
$scope.weatherImage = 'url('+config.weather.backgrounds[data.weather[0].description]+')'
console.log($scope.weatherImage)
$scope.$apply()
})
}
updateWeather()
setInterval(() => {
updateWeather()
}, config.frequency)
})
app.controller('SlideController', function($scope) {
var updateSlider = () => {
$scope.slides = []
client.getEntries({
'content_type': config.contentful.identifiers.slides.type
}).then((entries) => {
$scope.$apply(function() {
entries.items.forEach((item, index) => {
$scope.slides.push('url(https:'+item.fields[config.contentful.identifiers.slides.property].fields.file.url+'?w='+config.slide.maxWidth+')')
})
})
$scope.$apply()
var slider = UIkit.slider('#slider-element', {
center: true,
autoplay: true,
autoplayInterval: config.slide.speed
});
notify('Updating Slider')
}).catch(err => console.log(err))
}
updateSlider()
setInterval(() => {
updateSlider()
}, config.frequency)
})
app.controller('MarqueeController', function($scope) {
var $marquee = document.getElementById('MarqueeOutput');
var marquee = window.m = new dynamicMarquee.Marquee($marquee, { rate: config.text.speed });
var updateMarquee = () => {
client.getEntries({
'content_type': config.contentful.identifiers.texts.type
}).then((entries) => {
$scope.$apply(function() {
marquee.clear()
var tempTexts = []
entries.items.forEach((item, index) => {
tempTexts.push(function () {
return item.fields[config.contentful.identifiers.texts.property]
})
})
window.l = dynamicMarquee.loop(marquee, tempTexts, function() {
var $separator = document.createElement('div');
$separator.innerHTML = config.text.separator;
return $separator;
});
})
notify('Updating Marquee')
}).catch(err => console.log(err))
}
updateMarquee()
setInterval(() => {
updateMarquee()
}, config.frequency)
})
app.controller('TopController', function($scope) {
$scope.monitorTitle = config.title || '<b>EXAMPLE</b>-TV'
})
})(window.angular); |
def calculate_total_cost(prices, quantities):
total = 0
for price, quantity in zip(prices, quantities):
total += price * quantity
return total |
function checkAnagram(wordA, wordB){
wordA = wordA.replace(/\s/g, '').toLowerCase();
wordB = wordB.replace(/\s/g, '').toLowerCase();
if (wordA.length != wordB.length) return false;
const alphabet = "abcdefghijklmnopqrstuvwxyz";
for (let char of alphabet ){
if (wordA.split("").filter(a => a === char).length !=
wordB.split("").filter(b => b === char).length
) return false;
}
return true;
} |
<filename>src/bonespring/implicit_euler/SpringFloat.ts
import SpringBase from './SpringBase';
// implicit euler spring
class SpringFloat extends SpringBase {
// #region MAIN
vel = 0; // Velocity
val = 0; // Currvent Value
tar = 0; // Target Value
// #endregion ///////////////////////////////////////////////////////////////////
// #region SETTERS / GETTERS
setTarget( v: number ){ this.tar = v; return this; }
reset( v ?: number ): this{
this.vel = 0;
if( v != undefined ){
this.val = v;
this.tar = v;
}else{
this.val = 0
this.tar = 0;
}
return this;
}
// #endregion ///////////////////////////////////////////////////////////////////
update( dt: number ): boolean{
if( this.vel == 0 && this.tar == this.val ) return false;
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if ( Math.abs( this.vel ) < this.epsilon && Math.abs( this.tar - this.val ) < this.epsilon ) {
this.vel = 0;
this.val = this.tar;
return true;
}
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
let friction = 1.0 + 2.0 * dt * this.damping * this.oscPerSec,
dt_osc = dt * this.oscPerSec**2,
dt2_osc = dt * dt_osc,
det_inv = 1.0 / ( friction + dt2_osc );
this.vel = ( this.vel + dt_osc * ( this.tar - this.val ) ) * det_inv;
this.val = ( friction * this.val + dt * this.vel + dt2_osc * this.tar ) * det_inv;
return true;
}
}
export default SpringFloat; |
#!/bin/sh
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
# BEGIN environment bootstrap section
# Do not edit between here and END as this section should stay identical in all scripts
findpath () {
myname=${0}
mypath=${myname%/*}
myname=${myname##*/}
empty_if_start_slash=${mypath%%/*}
if [ "${empty_if_start_slash}" ]; then
mypath=$(pwd)/${mypath}
fi
if [ "$mypath" ] && [ -d "$mypath" ]; then
return
fi
mypath=$(pwd)
if [ -f "${mypath}/${myname}" ]; then
return
fi
echo "FATAL: Could not figure out the path where $myname lives from $0"
exit 1
}
COMMON_ENV=libexec/vespa/common-env.sh
source_common_env () {
if [ "$VESPA_HOME" ] && [ -d "$VESPA_HOME" ]; then
export VESPA_HOME
common_env=$VESPA_HOME/$COMMON_ENV
if [ -f "$common_env" ]; then
. $common_env
return
fi
fi
return 1
}
findroot () {
source_common_env && return
if [ "$VESPA_HOME" ]; then
echo "FATAL: bad VESPA_HOME value '$VESPA_HOME'"
exit 1
fi
if [ "$ROOT" ] && [ -d "$ROOT" ]; then
VESPA_HOME="$ROOT"
source_common_env && return
fi
findpath
while [ "$mypath" ]; do
VESPA_HOME=${mypath}
source_common_env && return
mypath=${mypath%/*}
done
echo "FATAL: missing VESPA_HOME environment variable"
echo "Could not locate $COMMON_ENV anywhere"
exit 1
}
findhost () {
if [ "${VESPA_HOSTNAME}" = "" ]; then
VESPA_HOSTNAME=$(vespa-detect-hostname || hostname -f || hostname || echo "localhost") || exit 1
fi
validate="${VESPA_HOME}/bin/vespa-validate-hostname"
if [ -f "$validate" ]; then
"$validate" "${VESPA_HOSTNAME}" || exit 1
fi
export VESPA_HOSTNAME
}
findroot
findhost
# END environment bootstrap section
ROOT=${VESPA_HOME%/}
export ROOT
cd $ROOT || { echo "Cannot cd to $ROOT" 1>&2; exit 1; }
addopts="-server -Xms32m -Xmx256m -XX:CompressedClassSpaceSize=32m -XX:MaxDirectMemorySize=32m -XX:ThreadStackSize=448 -XX:MaxJavaStackTraceDepth=1000 -XX:ActiveProcessorCount=2 -XX:-OmitStackTraceInFastThrow -Djava.io.tmpdir=${VESPA_HOME}/tmp"
oomopt="-XX:+ExitOnOutOfMemoryError"
jar="-jar $ROOT/lib/jars/logserver-jar-with-dependencies.jar"
export MALLOC_ARENA_MAX=1 #Does not need fast allocation
exec java $addopts "$oomopt" "$@" $jar
|
<reponame>SadashivBirajdar/RealmDagger2MVP<gh_stars>1-10
package sample.sadashiv.examplerealmmvp.application;
import android.app.Application;
import android.content.Context;
import io.realm.Realm;
import io.realm.RealmConfiguration;
import sample.sadashiv.examplerealmmvp.di.component.ApplicationComponent;
import sample.sadashiv.examplerealmmvp.di.component.DaggerApplicationComponent;
import sample.sadashiv.examplerealmmvp.di.module.ApplicationModule;
public class BooksApplication extends Application {
private static BooksApplication sInstance;
private ApplicationComponent mApplicationComponent;
@Override
public void onCreate() {
super.onCreate();
sInstance = this;
initRealmConfiguration();
initDagger();
}
public static BooksApplication get(Context context) {
return (BooksApplication) context.getApplicationContext();
}
public static BooksApplication getInstance() {
return sInstance;
}
private void initRealmConfiguration() {
Realm.init(this);
RealmConfiguration realmConfiguration = new RealmConfiguration.Builder().name(
"sample.realm").schemaVersion(0).deleteRealmIfMigrationNeeded().build();
Realm.setDefaultConfiguration(realmConfiguration);
}
private void initDagger() {
mApplicationComponent = DaggerApplicationComponent
.builder()
.applicationModule(new ApplicationModule(sInstance))
.build();
mApplicationComponent.inject(this);
}
public ApplicationComponent component() {
return mApplicationComponent;
}
}
|
docker-compose up proxy client |
<gh_stars>1-10
package com.github.ayltai.hknews.util;
import android.os.Build;
import org.junit.Assert;
import org.junit.Test;
import com.github.ayltai.hknews.UnitTest;
public final class DevUtilsTest extends UnitTest {
@Test
public void testIsLoggable() {
Assert.assertFalse(DevUtils.isLoggable());
}
@Test
public void testIsRunningTests() {
Assert.assertTrue(DevUtils.isRunningTests());
}
@Test
public void testIsRunningUnitTest() {
Assert.assertTrue(DevUtils.isRunningUnitTest());
}
@Test
public void testIsRunningInstrumentedTest() {
Assert.assertFalse(DevUtils.isRunningInstrumentedTest());
}
@Test
public void testNewThreadPolicy() {
if (Build.VERSION.SDK_INT >= 26) {
Assert.assertEquals("[StrictMode.ThreadPolicy; mask=1114172]", DevUtils.newThreadPolicy().toString());
} else if (Build.VERSION.SDK_INT >= 23) {
Assert.assertEquals("[StrictMode.ThreadPolicy; mask=1114140]", DevUtils.newThreadPolicy().toString());
} else if (Build.VERSION.SDK_INT >= 19) {
Assert.assertEquals("[StrictMode.ThreadPolicy; mask=2076]", DevUtils.newThreadPolicy().toString());
}
}
@Test
public void testNewVmPolicy() {
if (Build.VERSION.SDK_INT >= 28) {
Assert.assertEquals("[StrictMode.VmPolicy; mask=1073854208]", DevUtils.newVmPolicy().toString());
} else if (Build.VERSION.SDK_INT >= 26) {
Assert.assertEquals("[StrictMode.VmPolicy; mask=112384]", DevUtils.newVmPolicy().toString());
} else if (Build.VERSION.SDK_INT >= 23) {
Assert.assertEquals("[StrictMode.VmPolicy; mask=79616]", DevUtils.newVmPolicy().toString());
} else if (Build.VERSION.SDK_INT >= 19) {
Assert.assertEquals("[StrictMode.VmPolicy; mask=28176]", DevUtils.newVmPolicy().toString());
}
}
}
|
import { BaseBlockInterface } from '../block.interface';
import { FileObjectInterface } from '../common/file-object.interface';
export interface FileBlockInterface extends BaseBlockInterface {
file: FileObjectInterface;
}
|
/**
* The high score handler in the game
*
* Shows ten high scores with the name of the owner of the high score.
*/
/*global Vector */
/*global isIntersect */
function HighScore(canvas, status) {
this.canvas = canvas;
this.status = status;
this.isHoverOverStart = false;
this.isHoverOverContinue = false;
this.highScoreList = null;
this.highScoreOffset = 0;
this.numItemHighScoreList = 10;
this.yPosStart = 160;
this.yPosLeftArrow = 0;
this.yPosRightArrow = 0;
// Mystery ship
this.position = new Vector(10, 50);
this.velocity = new Vector(2, 2);
this.width = 35;
this.height = 15;
this.arrowSize = 20;
this.img = new window.Image();
this.img.src = "img/game/mystery_ship.png";
this.reachedBorder = false;
this.direction = "right";
// High scores arrows
this.isHooverOverLeftArrow = false;
this.isHooverOverRightArrow = false;
this.arrowWhiteLeftImg = new window.Image();
this.arrowWhiteLeftImg.src = "img/game/arrow_white_left.png";
this.arrowWhiteRightImg = new window.Image();
this.arrowWhiteRightImg.src = "img/game/arrow_white_right.png";
this.arrowGreenLeftImg = new window.Image();
this.arrowGreenLeftImg.src = "img/game/arrow_green_left.png";
this.arrowGreenRightImg = new window.Image();
this.arrowGreenRightImg.src = "img/game/arrow_green_right.png";
// Event listeners. Needed when removing event listner with binded functions.
this.onMouseClickPlay = this.checkPlayGame.bind(this);
this.onMouseClickContinue = this.checkContinue.bind(this);
this.onMouseClickLeftArrow = this.checkLeftArrow.bind(this);
this.onMouseClickRightArrow = this.checkRightArrow.bind(this);
this.onMouseMove = this.mouseMove.bind(this);
}
/**
* The prototype of the high score describing the characteristics of the high
* score.
*
* @type {Object}
*/
HighScore.prototype = {
/**
* Gets the first ten high scores on the high score list from the database.
* Adds the click and mouse move event listeners.
*
* @return {void}
*/
start: function() {
this.highScoreOffset = 0;
this.isHoverOverStart = false;
this.isHoverOverContinue = false;
this.isHooverOverLeftArrow = false;
this.isHooverOverRightArrow = false;
this.getHighScoreList(this.highScoreOffset, this.numItemHighScoreList + 1);
this.canvas.addEventListener("click", this.onMouseClickPlay, false);
this.canvas.addEventListener("click", this.onMouseClickContinue, false);
this.canvas.addEventListener("click", this.onMouseClickLeftArrow, false);
this.canvas.addEventListener("click", this.onMouseClickRightArrow, false);
this.canvas.addEventListener("mousemove", this.onMouseMove, false);
},
/**
* Draws the high score list on the canvas. The mystery ship is cruising
* above the high score list.
*
* @param {Object} ct - The canvas context.
*
* @return {void}
*/
draw: function(ct) {
ct.save();
ct.translate(this.position.x, this.position.y);
ct.drawImage(this.img, 0, 0, this.width, this.height);
ct.restore();
ct.save();
ct.translate(980 / 2, this.yPosStart);
ct.font = "normal lighter 24px arcade, monospace";
ct.fillStyle = "rgb(79, 255, 48)";
ct.fillText('NAME', -200, -50);
ct.fillText('SCORE', 200, -50);
ct.fillStyle = "#fff";
var yPos = 0;
if (this.highScoreList !== null) {
var numberOfItems = this.highScoreList.scoreList.length;
if (numberOfItems > this.numItemHighScoreList) {
numberOfItems = this.numItemHighScoreList;
}
for (var i = 0; i < numberOfItems; i++) {
ct.fillText((i + this.highScoreOffset + 1) + ".", -280, yPos);
ct.fillText(this.highScoreList.scoreList[i].name, -200, yPos);
ct.fillText(this.highScoreList.scoreList[i].score, 200, yPos);
yPos += 35;
}
}
if (this.highScoreOffset >= this.numItemHighScoreList) {
this.yPosLeftArrow = yPos - 15;
if (this.isHooverOverLeftArrow) {
ct.drawImage(this.arrowGreenLeftImg, -320, this.yPosLeftArrow, this.arrowSize, this.arrowSize);
} else {
ct.drawImage(this.arrowWhiteLeftImg, -320, this.yPosLeftArrow, this.arrowSize, this.arrowSize);
}
}
if (this.highScoreList !== null) {
if (this.highScoreList.scoreList.length == this.numItemHighScoreList + 1) {
this.yPosRightArrow = yPos - 15;
if (this.isHooverOverRightArrow) {
ct.drawImage(this.arrowGreenRightImg, 280, this.yPosRightArrow, this.arrowSize, this.arrowSize);
} else {
ct.drawImage(this.arrowWhiteRightImg, 280, this.yPosRightArrow, this.arrowSize, this.arrowSize);
}
}
}
if (this.isHoverOverStart) {
ct.fillStyle = "rgb(79, 255, 48)";
ct.fillText('PLAY GAME', -105, 380);
} else {
ct.fillStyle = "#fff";
ct.fillText('PLAY GAME', -105, 380);
}
if (this.isHoverOverContinue) {
ct.fillStyle = "rgb(79, 255, 48)";
ct.fillText('CONTINUE', -99, 420);
} else {
ct.fillStyle = "#fff";
ct.fillText('CONTINUE', -99, 420);
}
ct.restore();
},
/**
* Gets the ten first high scores from the database using Ajax and Json.
* The result from the request is stored in the high score array.
*
* @return {void}
*/
getHighScoreList: function(offset, limit) {
var that = this;
$.ajax({
type: 'post',
url: 'game/highScores.php?action=getHighScoreList',
data: {
offset: offset,
limit: limit
},
dataType: 'json',
success: function(data) {
that.highScoreList = data;
},
error: function(jqXHR, textStatus, errorThrown) {
console.log('Ajax request failed: ' + textStatus + ', ' + errorThrown);
}
});
},
/**
* Moves the mystery ship to the left with one pixel muliplied with the velocity.
*
* @return {void}
*/
moveLeft: function() {
this.position.x -= 1 * this.velocity.x;
},
/**
* Moves the mystery ship to the right with one pixel muliplied with the velocity.
*
* @return {void}
*/
moveRight: function() {
this.position.x += 1 * this.velocity.x;
},
/**
* Updates the move of the myster ship and controls that ship change direction
* when reaching the left or right border of the game board.
*
* @return {void}
*/
update: function() {
if (this.direction === "right") {
this.moveRight();
} else {
this.moveLeft();
}
this.stayInArea();
},
/**
* Sets that the myster ship should be removed when reaching the left or
* right border on the game board.
*
* @return {void}
*/
stayInArea: function() {
if (this.position.x < -150) {
this.direction = "right";
this.reachedBorder = true;
}
if (this.position.x + this.width > 1020) {
this.direction = "left";
this.reachedBorder = true;
}
},
/**
* Checks if the text "PLAY GAME" is clicked to play a new game.
*
* @param {Object} event - the click event.
*
* @return {void}
*/
checkPlayGame: function(event) {
var pos = this.getMousePos(event);
if (isIntersect(pos.x, pos.y, 1, 1, 387, 524, 127, 20)) {
this.removeListeners();
this.status.setGameStatus("game");
}
},
/**
* Checks if the text "CONTINUE" is clicked to be redirected to the intro.
* Could be used if the player will not save the result to the high score
* list.
*
* @param {Object} event - the click event.
*
* @return {void}
*/
checkContinue: function(event) {
var pos = this.getMousePos(event);
if (isIntersect(pos.x, pos.y, 1, 1, 391, 566, 117, 20)) {
this.removeListeners();
this.status.setGameStatus("intro");
}
},
/**
* Checks if the text "PLAY GAME" is clicked to play a new game.
*
* @param {Object} event - the click event.
*
* @return {void}
*/
checkLeftArrow: function(event) {
var pos = this.getMousePos(event);
var yPos = this.yPosStart + this.yPosLeftArrow;
if (this.highScoreOffset >= this.numItemHighScoreList) {
if (isIntersect(pos.x, pos.y, 1, 1, 172, yPos, 20, 20)) {
this.highScoreOffset -= 10;
this.getHighScoreList(this.highScoreOffset, this.numItemHighScoreList + 1);
}
}
},
/**
* Checks if the text "PLAY GAME" is clicked to play a new game.
*
* @param {Object} event - the click event.
*
* @return {void}
*/
checkRightArrow: function(event) {
var pos = this.getMousePos(event);
var yPos = this.yPosStart + this.yPosRightArrow;
if (isIntersect(pos.x, pos.y, 1, 1, 770, yPos, 20, 20)) {
this.highScoreOffset += 10;
this.getHighScoreList(this.highScoreOffset, this.numItemHighScoreList + 1);
}
},
/**
* Checks if the mouse is moved and gets the position of the mouse on
* the canvas.
*
* @param {Object} event - the mouse move event
*
* @return {void}
*/
mouseMove: function(event) {
var pos = this.getMousePos(event);
this.hooverOverPlayGame(pos.x, pos.y);
this.hoverOverContinue(pos.x, pos.y);
this.hooverOverLeftArrow(pos.x, pos.y);
this.hooverOverRightArrow(pos.x, pos.y);
},
/**
* Gets the mouse position on the canvas in x and y led.
*
* @param {Object} event - the mouse move event.
*
* @return {void}
*/
getMousePos: function(event) {
var rect = this.canvas.getBoundingClientRect();
return {
x: event.clientX - rect.left,
y: event.clientY - rect.top
};
},
/**
* Checks if the mouse is hoovering over the text "PLAY GAME".
*
* @param {number} ax - the position in x led for the mouse on canvas.
* @param {number} ay - the position in y led for the mouse on canvas.
*
* @return {void}
*/
hooverOverPlayGame: function(ax, ay) {
if (isIntersect(ax, ay, 1, 1, 387, 524, 127, 20)) {
this.isHoverOverStart = true;
} else {
this.isHoverOverStart = false;
}
},
/**
* Checks if the mouse is hoovering over the text "CONTINUE".
*
* @param {number} ax - the position in x led for the mouse on canvas.
* @param {number} ay - the position in y led for the mouse on canvas.
*
* @return {void}
*/
hoverOverContinue: function(ax, ay) {
if (isIntersect(ax, ay, 1, 1, 391, 566, 117, 20)) {
this.isHoverOverContinue = true;
} else {
this.isHoverOverContinue = false;
}
},
/**
* Checks if the mouse is hoovering over the left arrow below the high score
* list.
*
* @param {number} ax - the position in x led for the mouse on canvas.
* @param {number} ay - the position in y led for the mouse on canvas.
*
* @return {void}
*/
hooverOverLeftArrow: function(ax, ay) {
var yPos = this.yPosStart + this.yPosLeftArrow;
if (isIntersect(ax, ay, 1, 1, 172, yPos, 20, 20)) {
this.isHooverOverLeftArrow = true;
} else {
this.isHooverOverLeftArrow = false;
}
},
/**
* Checks if the mouse is hoovering over the right arrow below the high score
* list.
*
* @param {number} ax - the position in x led for the mouse on canvas.
* @param {number} ay - the position in y led for the mouse on canvas.
*
* @return {void}
*/
hooverOverRightArrow: function(ax, ay) {
var yPos = this.yPosStart + this.yPosRightArrow;
if (isIntersect(ax, ay, 1, 1, 770, yPos, 20, 20)) {
this.isHooverOverRightArrow = true;
} else {
this.isHooverOverRightArrow = false;
}
},
/**
* Removes all event listeners created when the file was started (initiated).
*
* @return {void}
*/
removeListeners: function() {
this.canvas.removeEventListener("mousemove", this.onMouseMove, false);
this.canvas.removeEventListener("click", this.onMouseClickRightArrow, false);
this.canvas.removeEventListener("click", this.onMouseClickLeftArrow, false);
this.canvas.removeEventListener("click", this.onMouseClickContinue, false);
this.canvas.removeEventListener("click", this.onMouseClickPlay, false);
}
};
|
package integrationtests
import (
"context"
"fmt"
"testing"
diskv1 "github.com/kubernetes-csi/csi-proxy/client/api/disk/v1"
"github.com/kubernetes-csi/csi-proxy/client/api/volume/v1"
diskv1client "github.com/kubernetes-csi/csi-proxy/client/groups/disk/v1"
v1client "github.com/kubernetes-csi/csi-proxy/client/groups/volume/v1"
)
func v1VolumeTests(t *testing.T) {
var volumeClient *v1client.Client
var diskClient *diskv1client.Client
var err error
if volumeClient, err = v1client.NewClient(); err != nil {
t.Fatalf("Client new error: %v", err)
}
defer volumeClient.Close()
if diskClient, err = diskv1client.NewClient(); err != nil {
t.Fatalf("DiskClient new error: %v", err)
}
defer diskClient.Close()
vhd, vhdCleanup := diskInit(t)
defer vhdCleanup()
listRequest := &v1.ListVolumesOnDiskRequest{
DiskNumber: vhd.DiskNumber,
}
listResponse, err := volumeClient.ListVolumesOnDisk(context.TODO(), listRequest)
if err != nil {
t.Fatalf("List response: %v", err)
}
volumeIDsLen := len(listResponse.VolumeIds)
if volumeIDsLen != 1 {
t.Fatalf("Number of volumes not equal to 1: %d", volumeIDsLen)
}
volumeID := listResponse.VolumeIds[0]
isVolumeFormattedRequest := &v1.IsVolumeFormattedRequest{
VolumeId: volumeID,
}
isVolumeFormattedResponse, err := volumeClient.IsVolumeFormatted(context.TODO(), isVolumeFormattedRequest)
if err != nil {
t.Fatalf("Is volume formatted request error: %v", err)
}
if isVolumeFormattedResponse.Formatted {
t.Fatal("Volume formatted. Unexpected !!")
}
formatVolumeRequest := &v1.FormatVolumeRequest{
VolumeId: volumeID,
}
_, err = volumeClient.FormatVolume(context.TODO(), formatVolumeRequest)
if err != nil {
t.Fatalf("Volume format failed. Error: %v", err)
}
isVolumeFormattedResponse, err = volumeClient.IsVolumeFormatted(context.TODO(), isVolumeFormattedRequest)
if err != nil {
t.Fatalf("Is volume formatted request error: %v", err)
}
if !isVolumeFormattedResponse.Formatted {
t.Fatal("Volume should be formatted. Unexpected !!")
}
t.Logf("VolumeId %v", volumeID)
volumeStatsRequest := &v1.GetVolumeStatsRequest{
VolumeId: volumeID,
}
volumeStatsResponse, err := volumeClient.GetVolumeStats(context.TODO(), volumeStatsRequest)
if err != nil {
t.Fatalf("VolumeStats request error: %v", err)
}
// For a volume formatted with 1GB it should be around 1GB, in practice it was 1056947712 bytes or 0.9844GB
// let's compare with a range of +- 20MB
if !sizeIsAround(t, volumeStatsResponse.TotalBytes, vhd.InitialSize) {
t.Fatalf("volumeStatsResponse.TotalBytes reported is not valid, it is %v", volumeStatsResponse.TotalBytes)
}
// Resize the disk to twice its size (from 1GB to 2GB)
// To resize a volume we need to resize the virtual hard disk first and then the partition
cmd := fmt.Sprintf("Resize-VHD -Path %s -SizeBytes %d", vhd.Path, int64(vhd.InitialSize*2))
if out, err := runPowershellCmd(t, cmd); err != nil {
t.Fatalf("Error: %v. Command: %q. Out: %s.", err, cmd, out)
}
// Resize the volume to 1.5GB
oldVolumeSize := volumeStatsResponse.TotalBytes
newVolumeSize := int64(float32(oldVolumeSize) * 1.5)
// This is the max partition size when doing a resize to 2GB
//
// Get-PartitionSupportedSize -DiskNumber 7 -PartitionNumber 2 | ConvertTo-Json
// {
// "SizeMin": 404725760,
// "SizeMax": 2130689536
// }
resizeVolumeRequest := &v1.ResizeVolumeRequest{
VolumeId: volumeID,
// resize the partition to 1.5x times instead
SizeBytes: newVolumeSize,
}
t.Logf("Attempt to resize volume from sizeBytes=%d to sizeBytes=%d", oldVolumeSize, newVolumeSize)
_, err = volumeClient.ResizeVolume(context.TODO(), resizeVolumeRequest)
if err != nil {
t.Fatalf("Volume resize request failed. Error: %v", err)
}
volumeStatsResponse, err = volumeClient.GetVolumeStats(context.TODO(), volumeStatsRequest)
if err != nil {
t.Fatalf("VolumeStats request after resize error: %v", err)
}
// resizing from 1GB to approximately 1.5GB
if !sizeIsAround(t, volumeStatsResponse.TotalBytes, newVolumeSize) {
t.Fatalf("VolumeSize reported should be greater than the old size, it is %v", volumeStatsResponse.TotalBytes)
}
volumeDiskNumberRequest := &v1.GetDiskNumberFromVolumeIDRequest{
VolumeId: volumeID,
}
volumeDiskNumberResponse, err := volumeClient.GetDiskNumberFromVolumeID(context.TODO(), volumeDiskNumberRequest)
if err != nil {
t.Fatalf("GetDiskNumberFromVolumeID failed: %v", err)
}
diskStatsRequest := &diskv1.GetDiskStatsRequest{
DiskNumber: volumeDiskNumberResponse.DiskNumber,
}
diskStatsResponse, err := diskClient.GetDiskStats(context.TODO(), diskStatsRequest)
if err != nil {
t.Fatalf("DiskStats request error: %v", err)
}
if diskStatsResponse.TotalBytes < 0 {
t.Fatalf("Invalid disk size was returned %v", diskStatsResponse.TotalBytes)
}
// Mount the volume
mountVolumeRequest := &v1.MountVolumeRequest{
VolumeId: volumeID,
TargetPath: vhd.Mount,
}
_, err = volumeClient.MountVolume(context.TODO(), mountVolumeRequest)
if err != nil {
t.Fatalf("Volume id %s mount to path %s failed. Error: %v", volumeID, vhd.Mount, err)
}
// Unmount the volume
unmountVolumeRequest := &v1.UnmountVolumeRequest{
VolumeId: volumeID,
TargetPath: vhd.Mount,
}
_, err = volumeClient.UnmountVolume(context.TODO(), unmountVolumeRequest)
if err != nil {
t.Fatalf("Volume id %s mount to path %s failed. Error: %v", volumeID, vhd.Mount, err)
}
}
|
import os
from pymongo import MongoClient
DEFAULT_MONGODB_HOST = "mongodb://mongo:password@127.0.0.1:27017"
def create_client() -> MongoClient:
host = os.getenv("MONGODB_HOST", DEFAULT_MONGODB_HOST)
return MongoClient(host)
# Example usage
client = create_client()
db = client.test_database |
SELECT c.name, MAX(o.total)
FROM customers c
JOIN orders o ON o.customer_id = c.id
GROUP BY c.name |
import { Component, OnInit, ViewChild } from "@angular/core";
import { MatPaginator } from "@angular/material/paginator";
import { MatSort } from "@angular/material/sort";
import { DataTableDataSource } from "./data-table-datasource";
@Component({
selector: "app-data-table",
templateUrl: "./data-table.component.html",
styleUrls: ["./data-table.component.css"],
})
export class DataTableComponent implements OnInit {
@ViewChild(MatPaginator, { static: true }) paginator: MatPaginator;
@ViewChild(MatSort, { static: true }) sort: MatSort;
dataSource: DataTableDataSource;
/** Columns displayed in the table. Columns IDs can be added, removed, or reordered. */
displayedColumns = ["id", "name", "amount"];
ngOnInit() {
this.dataSource = new DataTableDataSource(this.paginator, this.sort);
}
onRowClicked(row) {
console.log("Row clicked: ", row);
}
}
|
from typing import List
import re
def extractAlertTypes(xmlString: str) -> List[str]:
alert_types = re.findall(r'<alert type="([^"]+)"', xmlString)
return alert_types |
<filename>index.js
var express = require('express')
var app = express()
app.get('/', function (req, res) {
var request = require('request');
request('http://www.google.com', function (error, response, body) {
// console.log('error:', error); // Print the error if one occurred
// console.log('statusCode:', response && response.statusCode); // Print the response status code if a response was received
// console.log('body:', body); // Print the HTML for the Google homepage.
res.send(body)
});
// res.send('Hello World')
})
app.listen(3000, function () {
console.log('Example app listening on port 3000!');
});
|
#!/bin/sh
do_bin_check()
{
which ${item} && test -x $(readlink -m $(which ${item}))
}
check_bin()
{
item="$1"
echo "${item}?"
do_bin_check || missing_items
}
check_exists()
{
item="$1"
echo "${item}?"
test -e "$item" || missing_items
}
do_common()
{
check_bin "aspell"
check_bin "dot"
check_bin "gcc"
}
do_linux()
{
true
}
do_osx()
{
check_exists "/Applications/Emacs.app"
check_bin "brew"
}
fail()
{
echo "$1"
exit 1
}
missing_items()
{
fail "missing items!!"
}
if [ -f ~/.sperry-no-prereqs ]; then
echo "Skipping checks."
exit 0
fi
do_common
case `uname -s` in
Linux)
do_linux
;;
Darwin)
do_osx
;;
*)
fail "Unknown OS"
;;
esac
|
package com.growingwiththeweb.sorting;
public class BubbleSortOptimised {
public static void sort(Integer[] array) {
int unsortedBelow = array.length;
while (unsortedBelow != 0) {
int lastSwap = 0;
for (int i = 1; i < unsortedBelow; i++) {
if (array[i - 1] > array[i]) {
swap(array, i, i - 1);
lastSwap = i;
}
}
unsortedBelow = lastSwap;
}
}
private static void swap(Integer[] array, int a, int b) {
Integer temp = array[a];
array[a] = array[b];
array[b] = temp;
}
}
|
from flask import Flask
app = Flask(__name__)
app.config['SECRET_KEY'] = "<KEY>"
from weather_app import routes |
<gh_stars>0
package com.wagner.javaconfig;
import com.wagner.javaconfig.coach.Coach;
import com.wagner.javaconfig.config.ApplicationConfig;
import com.wagner.javaconfig.logging.LoggerConfig;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ConfigurableApplicationContext;
import org.springframework.context.annotation.AnnotationConfigApplicationContext;
public class JavaConfigApp {
public static void main(String[] args) {
ApplicationContext context = new AnnotationConfigApplicationContext(LoggerConfig.class, ApplicationConfig.class);
Coach tennisCoach = context.getBean("tennisCoach" , Coach.class);
Coach baseballCoach = context.getBean("baseballCoach", Coach.class);
Coach swimCoach = context.getBean("swimCoach" , Coach.class);
System.out.println("Tennis Coach");
System.out.println(tennisCoach.getDailyWorkout());
System.out.println(tennisCoach.getDailyFortune());
System.out.println();
System.out.println("Baseball Coach");
System.out.println(baseballCoach.getDailyWorkout());
System.out.println(baseballCoach.getDailyFortune());
System.out.println();
System.out.println("Swim Coach");
System.out.println(swimCoach.getDailyWorkout());
System.out.println(swimCoach.getDailyFortune());
((ConfigurableApplicationContext) context).close();
}
} |
package com.waflo.cooltimediaplattform.backend.service;
import com.waflo.cooltimediaplattform.backend.ResourceType;
import java.io.IOException;
import java.io.InputStream;
public interface IUploadService {
/**
* @param toUpload is the stream which should be saved
* @param filePath is a unique URI to the resource, you can separate with /, must not contain the appropriate file endings
* @param typeOfResource Decides wheter the resource is a image, movie (=audio), or raw (other types of resources)
* @return the URI to the resource, in most cases equal to given filePath, except it's already used
*/
String upload(InputStream toUpload, String filePath, ResourceType typeOfResource) throws IOException;
/**
* Renames a resources URI
*
* @param fromURI the current URI of the resource
* @param toURI the URI to rename to
* @param resourceType type of the Resource (Video, Image, Raw)
* @return the URI which the resource was renamed to
*/
String rename(String fromURI, String toURI, ResourceType resourceType) throws IOException;
/**
* @param URI the URI of the resource to download
* @return the URI where the resource can be downloaded
*/
String download(String URI, String format) throws IOException;
/**
* Destroys the given resource
*
* @param URI is the unique identifier of the resource to destroy
* @return true on success, false otherwise
*/
boolean destroy(String URI) throws IOException;
}
|
# Create a project
django-admin startproject MyProject
# Create an app
python manage.py startapp MyApp
# Create the 'User' model in 'models.py'
from django.db import models
class User(models.Model):
# User fields
name = models.CharField(max_length=255)
age = models.PositiveIntegerField()
def __str__(self):
return self.name
# Create the 'Post' model in 'models.py'
class Post(models.Model):
# Post fields
title = models.CharField(max_length=255)
content = models.TextField()
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title
# Register the models in 'admin.py'
from django.contrib import admin
from .models import User, Post
admin.site.register(User)
admin.site.register(Post) |
package pkcs8_test
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"encoding/pem"
"fmt"
"testing"
"github.com/youmark/pkcs8"
)
const rsa2048 = `-----<KEY>
-----END PRIVATE KEY-----
`
const encryptedRSA2048aes = `-----BEGIN ENCRYPTED PRIVATE KEY-----
<KEY>
-----END ENCRYPTED PRIVATE KEY-----
`
const encryptedRSA2048des3 = `-----BEGIN ENCRYPTED PRIVATE KEY-----
<KEY>
-----END ENCRYPTED PRIVATE KEY-----
`
const ec256 = `-----BEGIN PRIVATE KEY-----
<KEY>
-----END PRIVATE KEY-----
`
const encryptedEC256aes = `-----BEGIN ENCRYPTED PRIVATE KEY-----
<KEY>
`
const ec128 = `-----<KEY>
-----END PRIVATE KEY-----`
const encryptedEC128aes = `-----BEGIN ENCRYPTED PRIVATE KEY-----
<KEY>
-----END ENCRYPTED PRIVATE KEY-----`
func TestParsePKCS8PrivateKeyRSA(t *testing.T) {
keyList := []struct {
name string
clear string
encrypted string
}{
{
name: "encryptedRSA2048aes",
clear: rsa2048,
encrypted: encryptedRSA2048aes,
},
{
name: "encryptedRSA2048des3",
clear: rsa2048,
encrypted: encryptedRSA2048des3,
},
}
for i, key := range keyList {
t.Run(key.name, func(t *testing.T) {
block, _ := pem.Decode([]byte(key.encrypted))
_, err := pkcs8.ParsePKCS8PrivateKeyRSA(block.Bytes, []byte("password"))
if err != nil {
t.Errorf("%d: ParsePKCS8PrivateKeyRSA returned: %s", i, err)
}
_, err = pkcs8.ParsePKCS8PrivateKeyRSA(block.Bytes, []byte("wrong password"))
if err == nil {
t.Errorf("%d: should have failed", i)
}
_, err = pkcs8.ParsePKCS8PrivateKeyRSA(block.Bytes)
if err == nil {
t.Errorf("%d: should have failed", i)
}
block, _ = pem.Decode([]byte(key.clear))
_, err = pkcs8.ParsePKCS8PrivateKeyRSA(block.Bytes)
if err != nil {
t.Errorf("%d: ParsePKCS8PrivateKeyRSA returned: %s", i, err)
}
})
}
}
func TestParsePKCS8PrivateKeyECDSA(t *testing.T) {
keyList := []struct {
name string
clear string
encrypted string
}{
{
name: "encryptedEC256aes",
clear: ec256,
encrypted: encryptedEC256aes,
},
}
for i, key := range keyList {
t.Run(key.name, func(t *testing.T) {
block, _ := pem.Decode([]byte(key.encrypted))
_, err := pkcs8.ParsePKCS8PrivateKeyECDSA(block.Bytes, []byte("password"))
if err != nil {
t.Errorf("%d: ParsePKCS8PrivateKeyECDSA returned: %s", i, err)
}
_, err = pkcs8.ParsePKCS8PrivateKeyECDSA(block.Bytes, []byte("wrong password"))
if err == nil {
t.Errorf("%d: should have failed", i)
}
_, err = pkcs8.ParsePKCS8PrivateKeyECDSA(block.Bytes)
if err == nil {
t.Errorf("%d: should have failed", i)
}
block, _ = pem.Decode([]byte(key.clear))
_, err = pkcs8.ParsePKCS8PrivateKeyECDSA(block.Bytes)
if err != nil {
t.Errorf("%d: ParsePKCS8PrivateKeyECDSA returned: %s", i, err)
}
})
}
}
func TestParsePKCS8PrivateKey(t *testing.T) {
keyList := []struct {
name string
clear string
encrypted string
}{
{
name: "encryptedRSA2048aes",
clear: rsa2048,
encrypted: encryptedRSA2048aes,
},
{
name: "encryptedRSA2048des3",
clear: rsa2048,
encrypted: encryptedRSA2048des3,
},
{
name: "encryptedEC256aes",
clear: ec256,
encrypted: encryptedEC256aes,
},
{
name: "encryptedEC128aes",
clear: ec128,
encrypted: encryptedEC128aes,
},
}
for i, key := range keyList {
t.Run(key.name, func(t *testing.T) {
block, _ := pem.Decode([]byte(key.encrypted))
_, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte("password"))
if err != nil {
t.Errorf("%d: ParsePKCS8PrivateKey returned: %s", i, err)
}
_, err = pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte("wrong password"))
if err == nil {
t.Errorf("%d: should have failed", i)
}
_, err = pkcs8.ParsePKCS8PrivateKey(block.Bytes)
if err == nil {
t.Errorf("%d: should have failed", i)
}
block, _ = pem.Decode([]byte(key.clear))
_, err = pkcs8.ParsePKCS8PrivateKey(block.Bytes)
if err != nil {
t.Errorf("%d: ParsePKCS8PrivateKey returned: %s", i, err)
}
})
}
}
func TestConvertPrivateKeyToPKCS8(t *testing.T) {
for i, password := range [][]byte{nil, []byte("password")} {
var args [][]byte
if password != nil {
args = append(args, password)
}
rsaPrivateKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
t.Fatalf("%d: ConvertPrivateKeyToPKCS8 returned: %s", i, err)
}
der, err := pkcs8.ConvertPrivateKeyToPKCS8(rsaPrivateKey, args...)
if err != nil {
t.Fatalf("%d: ConvertPrivateKeyToPKCS8 returned: %s", i, err)
}
decodedRSAPrivateKey, err := pkcs8.ParsePKCS8PrivateKey(der, args...)
if err != nil {
t.Fatalf("%d: ParsePKCS8PrivateKey returned: %s", i, err)
}
if rsaPrivateKey.D.Cmp(decodedRSAPrivateKey.(*rsa.PrivateKey).D) != 0 {
t.Fatalf("%d: Decoded key does not match original key", i)
}
for _, curve := range []elliptic.Curve{
elliptic.P224(), elliptic.P256(), elliptic.P384(), elliptic.P521(),
} {
ecPrivateKey, err := ecdsa.GenerateKey(curve, rand.Reader)
if err != nil {
t.Fatalf("%d, %s: ConvertPrivateKeyToPKCS8 returned: %s", i, curve, err)
}
der, err = pkcs8.ConvertPrivateKeyToPKCS8(ecPrivateKey, args...)
if err != nil {
t.Fatalf("%d, %s: ConvertPrivateKeyToPKCS8 returned: %s", i, curve, err)
}
decodedECPrivateKey, err := pkcs8.ParsePKCS8PrivateKey(der, args...)
if err != nil {
t.Fatalf("%d, %s: ParsePKCS8PrivateKey returned: %s", i, curve, err)
}
if ecPrivateKey.D.Cmp(decodedECPrivateKey.(*ecdsa.PrivateKey).D) != 0 {
t.Fatalf("%d, %s: Decoded key does not match original key", i, curve)
}
}
}
}
type unknown int
func TestUnknownTypeFailure(t *testing.T) {
badInput := unknown(0)
_, err := pkcs8.ConvertPrivateKeyToPKCS8(badInput, []byte("password"))
if err == nil {
t.Fatal("expected error")
}
if e := err.Error(); e != fmt.Sprintf("unsupported key type: %T", badInput) {
t.Fatalf("unexpected error: %s", e)
}
}
|
<filename>frontend/frontend.pqd.config.js
export const network = {
id: 'phuquocdog',
name: '<NAME>',
tokenSymbol: 'PQD',
tokenDecimals: 10,
ss58Format: 42,
coinGeckoDenom: 'phuquocdog',
//backendWs: 'ws://localhost:8082/v1/graphql',
//backendHttp: 'http://localhost:8082/v1/graphql',
nodeWs: 'wss://rpc.phuquoc.dog',
backendWs: 'wss://hasura.phuquoc.dog/',
backendHttp: 'https://hasura.phuquoc.dog',
googleAnalytics: 'UA-144344973-1',
theme: '@/assets/scss/themes/polkastats.scss',
// ranking
historySize: 84, // 84 days
erasPerDay: 1,
validatorSetSize: 24,
}
export const paginationOptions = [10, 20, 50, 100]
|
from wordpal import puzzicon
from collections import defaultdict
from typing import Dict, Tuple, List, Set, Iterable, Callable
import logging
import itertools
import sys
_log = logging.getLogger(__name__)
_ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
_BLANKS = '?_.'
_GARBAGE = ' '
_DEFAULT_PENALTY = -50
_LETTER_SOULS = {
'A': 2, 'B': 3, 'C': 5, 'D': 7, 'E': 11,
'F': 13, 'G': 17, 'H': 19, 'I': 23, 'J': 29,
'K': 31, 'L': 37, 'M': 41, 'N': 43, 'O': 47,
'P': 53, 'Q': 59, 'R': 61, 'S': 67, 'T': 71,
'U': 73, 'V': 79, 'W': 83, 'X': 89, 'Y': 97,
'Z': 101,
}
def _NOOP(*args, **kwargs):
pass
def _to_flat_list(blah):
if isinstance(blah, str):
return list(blah)
else:
lists = [list(x) for x in blah]
master = []
for l in lists:
master += l
return master
def compute_soul(word):
soul = 1
for ch in _to_flat_list(word):
ch = ch.upper()
p = _LETTER_SOULS[ch]
soul *= p
return soul
class Evaluator(object):
def __init__(self, metrics=None):
if metrics is None:
metrics = [
lambda rendering: "'" in rendering,
lambda rendering: rendering[0].upper() == rendering[0]
]
self.metrics = metrics
def evaluate(self, puzzeme):
aggregate = 0
for metric in self.metrics:
measurement = metric(puzzeme.rendering)
if measurement is True:
aggregate += _DEFAULT_PENALTY
elif isinstance(measurement, int) or isinstance(measurement, float):
aggregate += measurement
else:
raise ValueError("bad measurement by metric " + str(metric))
return aggregate
class Soothsayer(object):
def __init__(self, wordmap: Dict[int, List[Tuple[str, ...]]]):
self.wordmap = wordmap
assert isinstance(wordmap, dict), "wordmap must be a dictionary"
@classmethod
def build(cls, canonicals: Iterable[str], nwords=1):
assert nwords <= 3, "anagrams must be at most 3 words"
_log.debug("building word map (max words %d)", nwords)
wordmap = defaultdict(list)
if nwords > 1:
canonicals = list(canonicals)
for canonical in canonicals:
soul = compute_soul(canonical)
wordmap[soul].append((canonical,))
if nwords > 1:
for i in range(nwords - 1):
_log.debug("building dimension %d of word map", i + 2)
wordmap_values = list(wordmap.values())
for ngrams in wordmap_values:
for ngram in ngrams:
for canonical in canonicals:
newgram = tuple(list(ngram) + [canonical])
soul = compute_soul(newgram)
wordmap[soul].append(newgram)
_log.debug("%d souls in word map", len(wordmap))
return Soothsayer(wordmap)
def lookup(self, word: str) -> Set[Tuple[str, ...]]:
soul = compute_soul(word)
try:
return frozenset(self.wordmap[soul])
except KeyError: # allow for regular Dict in constructor
return frozenset()
def values(self):
inner_joiner = lambda value: ' '.join(value)
for ngrams in self.wordmap.values():
for ngram in ngrams:
yield ' '.join(ngram)
class Template(object):
def __init__(self, known_pool, unknown_pools):
self.known_pool = tuple(known_pool)
self.unknown_pools = tuple(unknown_pools)
self.length = len(self.known_pool) + len(self.unknown_pools)
def iterate_unknowns(self):
if self.unknown_pools:
return itertools.product(*(self.unknown_pools))
else:
return ['']
def iterate_possibles(self):
blankproducts = self.iterate_unknowns()
for blankproduct in blankproducts:
_log.debug("yielding %s + %s", self.known_pool, blankproduct)
combo = ''.join(self.known_pool) + ''.join(blankproduct)
yield combo
def count_unknown_combos(self):
product = 1
for pool in self.unknown_pools:
product *= len(pool)
return product
@classmethod
def create(cls, seq: str):
assert isinstance(seq, str), "expect string argument"
chars = list(seq)
blankpools = []
clean_chars = []
pool = None
for ch in chars:
ch = ch.upper()
if pool is None:
if ch in _BLANKS:
blankpools.append(_ALPHABET)
elif ch == '[':
pool = []
elif ch in _ALPHABET:
clean_chars.append(ch)
else:
_log.debug("ignored: '%s'", ch)
else:
if ch == ']':
blankpools.append(pool)
pool = None
else:
pool.append(ch)
clean_chars = ''.join(clean_chars)
blankpools = list(map(lambda pool: ''.join(pool), blankpools))
return Template(clean_chars, blankpools)
def empty(self) -> bool:
return len(self.known_pool) == 0 and len(self.unknown_pools) == 0
def do_lookups(provided: str, dictionary=None, callback:Callable=None, puzzeme_threshold:int=None, max_words:int=1):
callback = callback or _NOOP
template = Template.create(provided)
found = set()
if template.empty():
_log.warn("no valid letters provided")
return found
if dictionary is None:
puzzemes = puzzicon.load_default_puzzemes()
elif isinstance(dictionary, set):
puzzemes = dictionary
else:
if dictionary == '-':
puzzemes = puzzicon.create_puzzeme_set(sys.stdin)
else:
puzzemes = puzzicon.read_puzzeme_set(dictionary)
if puzzeme_threshold is not None:
evaluator = Evaluator()
puzzemes = filter(lambda p: evaluator.evaluate(p) >= puzzeme_threshold, puzzemes)
canonicals = map(lambda p: p.canonical, puzzemes)
soothsayer = Soothsayer.build(canonicals, nwords=max_words)
nlookups, ndupes = 0, 0
for word in template.iterate_possibles():
nlookups += 1
answers = soothsayer.lookup(word) # a set of tuples
for answer in answers: # answer is a tuple of strings
joined = ' '.join(answer)
if joined not in found:
found.add(joined)
print(joined)
else:
_log.debug("duplicate %s", joined)
ndupes += 1
_log.debug("%d words found out of %d lookups (%d duplicates)", len(found), nlookups, ndupes)
return found
|
#!/bin/bash
DIR_HOME=("/opt/openoffice.org3" "/opt/libreoffice7.0" "/opt/openoffice4" "/usr/lib/openoffice" "/usr/lib/libreoffice")
FLAG=
OFFICE_HOME=
KKFILEVIEW_BIN_FOLDER=$(cd "$(dirname "$0")";pwd)
export KKFILEVIEW_BIN_FOLDER=$KKFILEVIEW_BIN_FOLDER
cd $KKFILEVIEW_BIN_FOLDER
echo "Using KKFILEVIEW_BIN_FOLDER $KKFILEVIEW_BIN_FOLDER"
grep 'office\.home' ../config/application.properties | grep '!^#'
if [ $? -eq 0 ]; then
echo "Using customized office.home"
else
for i in ${DIR_HOME[@]}
do
if [ -f $i"/program/soffice.bin" ]; then
FLAG=true
OFFICE_HOME=${i}
break
fi
done
if [ ! -n "${FLAG}" ]; then
echo "Installing OpenOffice"
sh ./install.sh
else
echo "Detected office component has been installed in $OFFICE_HOME"
fi
fi
echo "Starting kkFileView..."
echo "Please execute ./showlog.sh to check log for more information"
echo "You can get help in our official homesite: https://kkFileView.keking.cn"
echo "If this project is helpful to you, please star it on https://gitee.com/kekingcn/file-online-preview/stargazers"
nohup java -Dfile.encoding=UTF-8 -Dsun.java2d.cmm=sun.java2d.cmm.kcms.KcmsServiceProvider -Dspring.config.location=../config/application.properties -jar kkFileView-2.2.1.jar > ../log/kkFileView.log 2>&1 &
|
#!/bin/bash
# utilizzato per impostare le variabili d'ambiente utili
# durante il provisioning delle macchine
# prende in ingresso i parametri di configurazione forniti
# al suo lancio da Vagrantfile
STARTING_IP=$1
REGISTRY_DOMAIN=$2
SWARM_DOMAIN=$3
SWARM_NODE_PREFIX=$4
SWARM_NUM=$5
read IP_A IP_B IP_C IP_D <<<"${STARTING_IP//./ }"
IP_PREFIX=${IP_A}.${IP_B}.${IP_C}.
IP_STARTING_NUM=${IP_D}
SSL_DNS="${SWARM_DOMAIN},${REGISTRY_DOMAIN}"
for ((i=1; i<=${SWARM_NUM}; i++))
do
CURRENT_NUM=$(($IP_STARTING_NUM+$i))
CURRENT_IP=${IP_PREFIX}${CURRENT_NUM}
if [[ $i != 1 ]];
then
SSL_IP+=",${CURRENT_IP}"
else
SSL_IP+="${CURRENT_IP}"
fi
SSL_DNS="${SSL_DNS},${SWARM_NODE_PREFIX}$i"
done
echo "export STARTING_IP=$1" >> ~/.profile
echo "export REGISTRY_DOMAIN=$2" >> ~/.profile
echo "export SWARM_DOMAIN=$3" >> ~/.profile
echo "export SWARM_NODE_PREFIX=$4" >> ~/.profile
echo "export SWARM_NUM=$5" >> ~/.profile
echo "export SSL_DNS=${SSL_DNS}" >> ~/.profile
echo "export SSL_IP=${SSL_IP}" >> ~/.profile
echo "export IP_PREFIX=${IP_PREFIX}" >> ~/.profile
echo "export IP_STARTING_NUM=${IP_STARTING_NUM}" >> ~/.profile |
import tkinter as tk
from tkinter import ttk
# Create window
root = tk.Tk()
root.title("Health Monitor")
# Create label
ttk.Label(root, text = "Enter your vitals:").grid(row = 0, column = 0)
# Create Labels
ttk.Label(root, text = "Temperature :").grid(row = 1, column = 0)
ttk.Label(root, text = "Heart Rate :").grid(row = 2, column = 0)
ttk.Label(root, text = "Blood Pressure :").grid(row = 3, column = 0)
# Create entry boxes
temp_entry = ttk.Entry(root, width = 20)
heart_rate_entry = ttk.Entry(root, width = 20)
blood_pressure_entry = ttk.Entry(root, width = 20)
temp_entry.grid(row = 1, column = 1, padx = 5, pady = 5)
heart_rate_entry.grid(row = 2, column = 1, padx = 5, pady = 5)
blood_pressure_entry.grid(row = 3, column = 1, padx = 5, pady = 5)
# Record updated to text box
def update_record():
temp = temp_entry.get()
heart_rate = heart_rate_entry.get()
blood_pressure = blood_pressure_entry.get()
records = open("user_records.txt", "a")
records.write("Temperature: "+temp+"\t")
records.write("Heart Rate: "+heart_rate+"\t")
records.write("Blood Pressure: "+blood_pressure+"\n")
records.close()
# Create submit button
ttk.Button(root, text = "Submit",
command = update_record).grid(row = 4, column = 1, pady = 5)
# Create quit button
ttk.Button(root, text = "Quit",
command = root.destroy).grid(row = 4, column = 0, pady = 5)
# Keep window open
root.mainloop() |
<filename>project/scripts/hdo_download.py
#!python
# This script is used to download completed (converted) video files back to the source machine
import os
import boto3
import json
import logging
from lock import lock_file_exists, make_lock_file, release_lock_file
from botocore.exceptions import ClientError
from dotenv import load_dotenv
def hdo_get_sqs_message(sqs_queue_url,region_name):
# Establish session using environment variables from config/env
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
session = boto3.session.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,region_name=region_name)
#Setting up the S3 bucket for upload
sqs = session.client('sqs')
# Receive message from SQS queue
response = sqs.receive_message(
QueueUrl = sqs_queue_url,
AttributeNames = [
'SentTimestamp'
],
MaxNumberOfMessages = 1,
MessageAttributeNames = [
'All'
]
,VisibilityTimeout = 60 #declaring here will override the "default" (set in the queue)
)
if 'Messages' in response:
receipt_handle = response['Messages'][0]['ReceiptHandle']
logging.info('--------receipt_handle--------')
logging.info(receipt_handle)
body = response['Messages'][0]['Body']
message = json.loads(body)['Message']
if 'Records' in json.loads(message):
original_input_key = json.loads(message)['Records'][0]['s3']['object']['key']
logging.info(original_input_key)
input_key = original_input_key.replace('+',' ') # S3 event replacing spaces with +
logging.info('-------input_key---------')
logging.info(input_key)
else:
logging.info('there are no Records in this SQS message. Skip')
input_key = None
return receipt_handle, input_key
else:
raise ValueError('SQS has no messages... Terminating')
def hdo_download_file(key,bucket,transcode_load_folder):
# Establish session using environment variables from config/env
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
session = boto3.session.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
#Setting up the S3 bucket for upload
s3 = session.client('s3')
#set the S3 object and local download destination
s3_file_path = key
file_path = transcode_load_folder + os.sep + key[7:]
#download file
s3.download_file(bucket, s3_file_path, file_path)
logging.info(f'{key} downloaded successfully from S3')
def hdo_delete_file(key,bucket):
# Establish session using environment variables from config/env
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
session = boto3.session.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
#Setting up the S3 bucket for upload
s3 = session.client('s3')
#set the S3 object and local download destination
s3_file_path = key
s3.delete_object(Bucket=bucket,Key=s3_file_path)
logging.info(f'{key} deleted successfully from S3')
def hdo_delete_sqs_message(receipt_handle,sqs_queue_url,region_name):
# Establish session using environment variables from config/env
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
session = boto3.session.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,aws_secret_access_key=AWS_SECRET_ACCESS_KEY,region_name=region_name)
#Setting up the S3 bucket for upload
sqs = session.client('sqs', region_name=region_name)
sqs.delete_message(
QueueUrl = sqs_queue_url,
ReceiptHandle = receipt_handle
)
logging.info('Received and deleted SQS message')
def main():
logging.basicConfig(
# filename='hdo_download.log',
encoding='utf-8',
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
#Set aws calls back to WARNING to avoid verbose messages
logging.getLogger('botocore').setLevel(logging.WARNING)
#Load environment variables
load_dotenv()
region_name = os.getenv('region_name')
sqs_queue_url = os.getenv('sqs_queue_url')
bucket = os.getenv('bucket')
transcode_load_folder = os.getenv('download')
logging.info('----- BEGIN PROCESS -----')
if not lock_file_exists('python_running.lock'):
make_lock_file('python_running.lock','Running HDOrganizer downloads')
while True:
logging.info('Poll Amazon SQS')
try:
sqs_message_details = hdo_get_sqs_message(sqs_queue_url,region_name)
except Exception as ex:
logging.error(ex)
break
# logging.info('----- END PROCESS -----')
# exit()
receipt_handle = sqs_message_details[0]
input_key = sqs_message_details[1]
if input_key:
try:
hdo_download_file(input_key,bucket,transcode_load_folder)
hdo_delete_sqs_message(receipt_handle,sqs_queue_url,region_name)
hdo_delete_file(input_key,bucket)
except Exception as e:
logging.warning(f'{input_key} did not download, sqs message retained and will return to the queue after visibility timeout')
logging.error(e)
continue
else:
continue
release_lock_file('python_running.lock')
logging.info('----- END PROCESS -----')
else:
logging.info('----- END PROCESS -----')
if __name__ == '__main__':
main() |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2009 <NAME> All rights reserved.
#
"""
"""
#end_pymotw_header
import robotparser
import urlparse
AGENT_NAME = 'PyMOTW'
URL_BASE = 'http://www.doughellmann.com/'
parser = robotparser.RobotFileParser()
parser.set_url(urlparse.urljoin(URL_BASE, 'robots.txt'))
parser.read()
PATHS = [
'/',
'/PyMOTW/',
'/admin/',
'/downloads/PyMOTW-1.92.tar.gz',
]
for path in PATHS:
print '%6s : %s' % (parser.can_fetch(AGENT_NAME, path), path)
url = urlparse.urljoin(URL_BASE, path)
print '%6s : %s' % (parser.can_fetch(AGENT_NAME, url), url)
print
|
<gh_stars>1-10
package kbasesearchengine.events.handler;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import com.google.common.base.Optional;
import kbasesearchengine.tools.Utils;
import us.kbase.common.service.UObject;
/** A data package loaded from a particular data source.
* @author <EMAIL>
*
*/
public class SourceData {
private final UObject data;
private final String name;
private final String creator;
private final Optional<String> copier;
private final Optional<String> module;
private final Optional<String> method;
private final Optional<String> commitHash;
private final Optional<String> version;
private final Optional<String> md5;
private final Set<String> sourceTags;
private SourceData(
final UObject data,
final String name,
final String creator,
final Optional<String> copier,
final Optional<String> module,
final Optional<String> method,
final Optional<String> commitHash,
final Optional<String> version,
final Optional<String> md5,
final Set<String> sourceTags) {
this.data = data;
this.name = name;
this.creator = creator;
this.copier = copier;
this.module = module;
this.method = method;
this.commitHash = commitHash;
this.version = version;
this.md5 = md5;
this.sourceTags = Collections.unmodifiableSet(sourceTags);
}
/** Get the data.
* @return the data.
*/
public UObject getData() {
return data;
}
/** Get the name of the data.
* @return the data name.
*/
public String getName() {
return name;
}
/** Get the user that created the data.
* @return the creator.
*/
public String getCreator() {
return creator;
}
/** Get the user that copied the data, if any.
* @return the user that copied the data or absent.
*/
public Optional<String> getCopier() {
return copier;
}
/** Get the name of the module that was used to create the data, if available.
* @return the module name or absent.
*/
public Optional<String> getModule() {
return module;
}
/** Get the method that was used to create the data, if available.
* @return the method or absent.
*/
public Optional<String> getMethod() {
return method;
}
/** Get the commit hash of the software that was used to create the data, if available.
* @return the commit hash or absent.
*/
public Optional<String> getCommitHash() {
return commitHash;
}
/** Get the version of the software that created the object, if available.
* @return the version or absent.
*/
public Optional<String> getVersion() {
return version;
}
/** Get the md5 of the object, if available.
* @return the md5.
*/
public Optional<String> getMD5() {
return md5;
}
/** Return any search tags the data source has applied to the data.
* @return the search tags from the data source.
*/
public Set<String> getSourceTags() {
return sourceTags;
}
/** Get a builder for a SourceData instance.
* @param data the data.
* @param name the name of the data.
* @param creator the creator of the data.
* @return a builder.
*/
public static Builder getBuilder(final UObject data, final String name, final String creator) {
return new Builder(data, name, creator);
}
/** A builder for SourceData instances.
* @author <EMAIL>
*
*/
public static class Builder {
private final UObject data;
private final String name;
private final String creator;
private Optional<String> copier = Optional.absent();
private Optional<String> module = Optional.absent();
private Optional<String> method = Optional.absent();
private Optional<String> commitHash = Optional.absent();
private Optional<String> version = Optional.absent();
private Optional<String> md5 = Optional.absent();
private final Set<String> sourceTags = new HashSet<>();
private Builder(final UObject data, final String name, final String creator) {
Utils.nonNull(data, "data");
Utils.notNullOrEmpty(name, "name cannot be null or the empty string");
Utils.notNullOrEmpty(creator, "creator cannot be null or the empty string");
this.data = data;
this.name = name;
this.creator = creator;
}
private Optional<String> checkNullOrEmpty(final String s) {
if (Utils.isNullOrEmpty(s))
return Optional.absent();
else {
return Optional.of(s);
}
}
/** Add the name of the user that copied the data.
* @param copier the user that copied the data.
* @return this builder.
*/
public Builder withNullableCopier(final String copier) {
this.copier = checkNullOrEmpty(copier);
return this;
}
/** Add the software module that was used to create the data.
* @param module the software module.
* @return this builder.
*/
public Builder withNullableModule(final String module) {
this.module = checkNullOrEmpty(module);
return this;
}
/** Add the software method that was used to create the data.
* @param method the software method.
* @return this builder.
*/
public Builder withNullableMethod(final String method) {
this.method = checkNullOrEmpty(method);
return this;
}
/** Add the commit hash of the software that was used to create this data.
* @param commit the commit hash.
* @return this builder.
*/
public Builder withNullableCommitHash(final String commit) {
this.commitHash = checkNullOrEmpty(commit);
return this;
}
/** Add the version of the software that was used to create this data.
* @param version the software version.
* @return this builder.
*/
public Builder withNullableVersion(final String version) {
this.version = checkNullOrEmpty(version);
return this;
}
/** Add the md of the data.
* @param md5 the md5.
* @return this builder.
*/
public Builder withNullableMD5(final String md5) {
this.md5 = checkNullOrEmpty(md5);
return this;
}
/** Add a search tag applied to the data at the external data source.
* @param sourceTag the tag.
* @return this builder.
*/
public Builder withSourceTag(final String sourceTag) {
Utils.notNullOrEmpty(sourceTag, "sourceTag cannot be null or whitespace only");
sourceTags.add(sourceTag);
return this;
}
/** Build the SourceData instance.
* @return the SourceData.
*/
public SourceData build() {
return new SourceData(data, name, creator, copier, module, method, commitHash,
version, md5, sourceTags);
}
}
}
|
<filename>particlesystem/src/main/java/com/github/particlesystem/modifiers/ParticleModifier.java
package com.github.particlesystem.modifiers;
import com.github.particlesystem.Particle;
public interface ParticleModifier {
/**
* modifies the specific value of a particle given the current miliseconds
*
* @param particle
* @param miliseconds
*/
void apply(Particle particle, long miliseconds);
}
|
#!/bin/bash
# -*- mode: shell-script; indent-tabs-mode: nil; sh-basic-offset: 4; -*-
# ex: ts=8 sw=4 sts=4 et filetype=sh
install_ignition_unit() {
local unit=$1; shift
local target=${1:-complete}
inst_simple "$moddir/$unit" "$systemdsystemunitdir/$unit"
# note we `|| exit 1` here so we error out if e.g. the units are missing
# see https://github.com/coreos/fedora-coreos-config/issues/799
systemctl -q --root="$initdir" add-requires "ignition-${target}.target" "$unit" || exit 1
}
install() {
inst_script "$moddir/coreos-propagate-multipath-conf.sh" \
"/usr/sbin/coreos-propagate-multipath-conf"
install_ignition_unit coreos-propagate-multipath-conf.service subsequent
inst_simple "$moddir/coreos-multipath-generator" \
"$systemdutildir/system-generators/coreos-multipath-generator"
# we don't enable these; they're enabled dynamically via the generator
inst_simple "$moddir/coreos-multipath-wait.target" \
"$systemdsystemunitdir/coreos-multipath-wait.target"
inst_simple "$moddir/coreos-multipath-trigger.service" \
"$systemdsystemunitdir/coreos-multipath-trigger.service"
}
|
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
PARENT_DIR="$(dirname "$DIR")"
MAKEFILE_PATH=$PARENT_DIR/templates/linuxarmv6l/Makefile
cd ${DIR}/../../examples
for category in $(ls -1d *)
do
if [ $category != "addons" ] && [ $category != "ios" ] && [ $category != "android" ] && [[ -d $category ]]; then
echo "CHANGED TO CATEGORY >"+$category
cd $category
for j in $(ls -1d *)
do
echo ">>$j"
cd $j
# make clean -f $MAKEFILE_PATH
make -f $MAKEFILE_PATH
ret=$?
if [ $ret -ne 0 ];
then
echo "error compiling: " + $j
else
echo "successfully compiled :" + $j
fi
cd ../
done
cd ../
fi
done
|
import type { IResponseSending, IReponseContacts, IReponseAttendants, IReponseDepartment, IContactFilter } from '../../interfaces/index';
import { BaseProvider, IResultError } from '../BaseProvider';
import { ForWhoType, IProvider } from '../BaseProvider/IProvider';
import { ReqType } from './constants';
import type { MaxbotOptions } from './types/api';
import type { IResponseStatus } from './types/status';
/**
* Class to interact with maxbot server
* @see https://mbr.maxbot.com.br/doc-api-v1.php
*/
export declare class Maxbot extends BaseProvider implements IProvider {
private config;
private ready;
private allowedExt;
constructor(options: MaxbotOptions);
private isValidPayload;
apiPost(type: ReqType, payload?: {}): Promise<any>;
getStatus(): Promise<IResponseStatus>;
isReady(force?: boolean): Promise<boolean>;
sendText(whatsapp: ForWhoType, text: string): Promise<IResponseSending | IResultError>;
sendImage(whatsapp: ForWhoType, urlImage: string, _text?: string): Promise<IResponseSending | IResultError>;
sendFile(whatsapp: ForWhoType, urlFile: string): Promise<IResponseSending | IResultError>;
sendSound(whatsapp: ForWhoType, urlSound: string): Promise<IResponseSending | IResultError>;
sendVideo(whatsapp: ForWhoType, urlVideo: string, _text?: string): Promise<IResponseSending | IResultError>;
getServiceSector(): Promise<IReponseDepartment>;
getContacts(filter: IContactFilter): Promise<IReponseContacts>;
getAttendant(): Promise<IReponseAttendants>;
}
|
#!/bin/sh
# Run this to generate configure and Makefile
srcdir=`dirname $0`
test -z "$srcdir" && srcdir=.
THEDIR=`pwd`
(
cd $srcdir
die=0
(autoconf --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "You must have autoconf installed."
echo "Download the appropriate package for your distribution,"
echo "or see http://www.gnu.org/software/autoconf"
die=1
}
# Require libtool only if one of of LT_INIT,
# AC_PROG_LIBTOOL, AM_PROG_LIBTOOL is used in configure.ac.
grep -E '^[[:blank:]]*(LT_INIT|A[CM]_PROG_LIBTOOL)' configure.ac >/dev/null \
&& {
(libtool --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "You must have libtool installed."
echo "Download the appropriate package for your distribution,"
echo "or see http://www.gnu.org/software/libtool"
die=1
}
}
(automake --version) < /dev/null > /dev/null 2>&1 || {
echo
die=1
echo "You must have automake installed."
echo "Download the appropriate package for your distribution,"
echo "or see http://www.gnu.org/software/automake"
}
test $die = 1 && exit 1
test -f cockpit-ovirt.spec.in || {
echo "You must run this script in the top-level directory"
exit 1
}
if test -z "$*"; then
echo "I am going to run ./configure with no arguments - if you wish "
echo "to pass any to it, please specify them on the $0 command line."
fi
aclocal
# Run autoheader only if needed
grep '^[[:blank:]]*AC_CONFIG_HEADERS' configure.ac >/dev/null && autoheader
automake --add-missing
autoconf
./configure "$@"
)
if test "x$OBJ_DIR" != x; then
mkdir -p "$OBJ_DIR"
cd "$OBJ_DIR"
fi
|
<reponame>Guad/msfs2020-go
package simconnect
//go:generate go-bindata -pkg simconnect -o bindata.go -modtime 1 -prefix "../_vendor" "../_vendor/MSFS-SDK/SimConnect SDK/lib/SimConnect.dll"
// MSFS-SDK/SimConnect\ SDK/include/SimConnect.h
// MSFS-SDK/SimConnect\ SDK/lib/SimConnect.dll
// Everything is stubbed out on linux
import (
"fmt"
"reflect"
"unsafe"
)
type SimConnect struct {
handle unsafe.Pointer
DefineMap map[string]DWORD
LastEventID DWORD
}
func New(name string) (*SimConnect, error) {
return nil, nil
}
func (s *SimConnect) GetEventID() DWORD {
id := s.LastEventID
s.LastEventID += 1
return id
}
func (s *SimConnect) GetDefineID(a interface{}) DWORD {
structName := reflect.TypeOf(a).Elem().Name()
id, ok := s.DefineMap[structName]
if !ok {
id = s.DefineMap["_last"]
s.DefineMap[structName] = id
s.DefineMap["_last"] = id + 1
}
return id
}
func (s *SimConnect) RegisterDataDefinition(a interface{}) error {
defineID := s.GetDefineID(a)
v := reflect.ValueOf(a).Elem()
for j := 1; j < v.NumField(); j++ {
fieldName := v.Type().Field(j).Name
nameTag, _ := v.Type().Field(j).Tag.Lookup("name")
unitTag, _ := v.Type().Field(j).Tag.Lookup("unit")
fieldType := v.Field(j).Kind().String()
if fieldType == "array" {
fieldType = fmt.Sprintf("[%d]byte", v.Field(j).Type().Len())
}
if nameTag == "" {
return fmt.Errorf("%s name tag not found", fieldName)
}
dataType, err := derefDataType(fieldType)
if err != nil {
return err
}
s.AddToDataDefinition(defineID, nameTag, unitTag, dataType)
//fmt.Printf("fieldName: %s fieldType: %s nameTag: %s unitTag: %s\n", fieldName, fieldType, nameTag, unitTag)
}
return nil
}
func (s *SimConnect) Close() error {
// SimConnect_Open(
// HANDLE * phSimConnect,
// );
return nil
}
func (s *SimConnect) AddToDataDefinition(defineID DWORD, name, unit string, dataType DWORD) error {
// SimConnect_AddToDataDefinition(
// HANDLE hSimConnect,
// SIMCONNECT_DATA_DEFINITION_ID DefineID,
// const char * DatumName,
// const char * UnitsName,
// SIMCONNECT_DATATYPE DatumType = SIMCONNECT_DATATYPE_FLOAT64,
// float fEpsilon = 0,
// DWORD DatumID = SIMCONNECT_UNUSED
// );
return nil
}
func (s *SimConnect) SubscribeToSystemEvent(eventID DWORD, eventName string) error {
// SimConnect_SubscribeToSystemEvent(
// HANDLE hSimConnect,
// SIMCONNECT_CLIENT_EVENT_ID EventID,
// const char * SystemEventName
// );
return nil
}
func (s *SimConnect) RequestDataOnSimObjectType(requestID, defineID, radius, simobjectType DWORD) error {
// SimConnect_RequestDataOnSimObjectType(
// HANDLE hSimConnect,
// SIMCONNECT_DATA_REQUEST_ID RequestID,
// SIMCONNECT_DATA_DEFINITION_ID DefineID,
// DWORD dwRadiusMeters,
// SIMCONNECT_SIMOBJECT_TYPE type
// );
return nil
}
func (s *SimConnect) RequestDataOnSimObject(requestID, defineID, objectID, period, flags, origin, interval, limit DWORD) error {
// SimConnect_RequestDataOnSimObject(
// HANDLE hSimConnect,
// SIMCONNECT_DATA_REQUEST_ID RequestID,
// SIMCONNECT_DATA_DEFINITION_ID DefineID,
// SIMCONNECT_OBJECT_ID ObjectID,
// SIMCONNECT_PERIOD Period,
// SIMCONNECT_DATA_REQUEST_FLAG Flags = 0,
// DWORD origin = 0,
// DWORD interval = 0,
// DWORD limit = 0
// );
return nil
}
func (s *SimConnect) TransmitClientEvent(event, data DWORD) error {
return nil
}
func (s *SimConnect) SetDataOnSimObject(defineID, simobjectType, flags, arrayCount, size DWORD, buf unsafe.Pointer) error {
//s.SetDataOnSimObject(defineID, simconnect.OBJECT_ID_USER, 0, 0, size, buf)
// SimConnect_SetDataOnSimObject(
// HANDLE hSimConnect,
// SIMCONNECT_DATA_DEFINITION_ID DefineID,
// SIMCONNECT_OBJECT_ID ObjectID,
// SIMCONNECT_DATA_SET_FLAG Flags,
// DWORD ArrayCount,
// DWORD cbUnitSize,
// void * pDataSet
// );
return nil
}
func (s *SimConnect) SubscribeToFacilities(facilityType, requestID DWORD) error {
// SimConnect_SubscribeToFacilities(
// HANDLE hSimConnect,
// SIMCONNECT_FACILITY_LIST_TYPE type,
// SIMCONNECT_DATA_REQUEST_ID RequestID
// );
return nil
}
func (s *SimConnect) UnsubscribeToFacilities(facilityType DWORD) error {
// SimConnect_UnsubscribeToFacilities(
// HANDLE hSimConnect,
// SIMCONNECT_FACILITY_LIST_TYPE type
// );
return nil
}
func (s *SimConnect) RequestFacilitiesList(facilityType, requestID DWORD) error {
// SimConnect_RequestFacilitiesList(
// HANDLE hSimConnect,
// SIMCONNECT_FACILITY_LIST_TYPE type,
// SIMCONNECT_DATA_REQUEST_ID RequestID
// );
return nil
}
func (s *SimConnect) MapClientEventToSimEvent(eventID DWORD, eventName string) error {
// SimConnect_MapClientEventToSimEvent(
// HANDLE hSimConnect,
// SIMCONNECT_CLIENT_EVENT_ID EventID,
// const char * EventName = ""
// );
return nil
}
func (s *SimConnect) MenuAddItem(menuItem string, menuEventID, Data DWORD) error {
// SimConnect_MenuAddItem(
// HANDLE hSimConnect,
// const char * szMenuItem,
// SIMCONNECT_CLIENT_EVENT_ID MenuEventID,
// DWORD dwData
// );
return nil
}
func (s *SimConnect) MenuDeleteItem(menuItem string, menuEventID, Data DWORD) error {
// SimConnect_MenuDeleteItem(
// HANDLE hSimConnect,
// SIMCONNECT_CLIENT_EVENT_ID MenuEventID
// );
return nil
}
func (s *SimConnect) AddClientEventToNotificationGroup(groupID, eventID DWORD) error {
// SimConnect_AddClientEventToNotificationGroup(
// HANDLE hSimConnect,
// SIMCONNECT_NOTIFICATION_GROUP_ID GroupID,
// SIMCONNECT_CLIENT_EVENT_ID EventID,
// BOOL bMaskable = FALSE
// );
return nil
}
func (s *SimConnect) SetNotificationGroupPriority(groupID, priority DWORD) error {
// SimConnect_SetNotificationGroupPriority(
// HANDLE hSimConnect,
// SIMCONNECT_NOTIFICATION_GROUP_ID GroupID,
// DWORD uPriority
// );
return nil
}
func (s *SimConnect) ShowText(textType DWORD, duration float64, eventID DWORD, text string) error {
// SimConnect_Text(
// HANDLE hSimConnect,
// SIMCONNECT_TEXT_TYPE type,
// float fTimeSeconds,
// SIMCONNECT_CLIENT_EVENT_ID EventID,
// DWORD cbUnitSize,
// void * pDataSet
// );
return nil
}
func (s *SimConnect) GetNextDispatch() (unsafe.Pointer, int32, error) {
return nil, 0, nil
}
|
#!/usr/bin/env bash
# -----------------------------------------------------------------------------
# Safety settings (see https://gist.github.com/ilg-ul/383869cbb01f61a51c4d).
if [[ ! -z ${DEBUG} ]]
then
set ${DEBUG} # Activate the expand mode if DEBUG is anything but empty.
else
DEBUG=""
fi
set -o errexit # Exit if command failed.
set -o pipefail # Exit if pipe failed.
set -o nounset # Exit if variable not set.
# Remove the initial space and instead use '\n'.
IFS=$'\n\t'
# -----------------------------------------------------------------------------
# Identify the script location, to reach, for example, the helper scripts.
script_path="$0"
if [[ "${script_path}" != /* ]]
then
# Make relative path absolute.
script_path="$(pwd)/$0"
fi
script_name="$(basename "${script_path}")"
script_folder_path="$(dirname "${script_path}")"
script_folder_name="$(basename "${script_folder_path}")"
# =============================================================================
# Walk two steps up.
helper_folder_path="$(dirname $(dirname "${script_folder_path}"))/helper"
source "${helper_folder_path}/common-functions-source.sh"
source "${helper_folder_path}/common-docker-functions-source.sh"
# -----------------------------------------------------------------------------
version="3.3"
arch="arm32v7"
distro="ubuntu"
release="16.04"
from="ilegeul/${distro}:${arch}-${release}-xbb-bootstrap-v${version}"
layer="xbb"
# -----------------------------------------------------------------------------
detect_host
host_init_docker_env
host_init_docker_input \
"$(dirname $(dirname "${script_folder_path}"))/ca-bundle/ca-bundle.crt" \
host_run_docker_it_with_image
host_clean_docker_input
echo
echo "Done."
# -----------------------------------------------------------------------------
|
#!/bin/bash
#
# pre-req.sh - Pre configuração de servidor com Ansible
#
# Site : https://alejunio.cloud
# Autor : Alex Junio <contato@alejunio.cloud>
#
# ---------------------------------------------------------
# Esse script vai instalar tudo o que a stack precisa para
# executar o ansible e as tasks corretamente.
# ---------------------------------------------------------
#
# Histórico:
#
# v1.0 17/03/2021, Alex Junio.
# - Versão inicial do Script
#
# Install Ansible
apt update
apt-get install python3 git unzip htop -y
apt install software-properties-common -y
apt-add-repository --yes --update ppa:ansible/ansible
apt install ansible -y
# Configuracao Host Ansible
cd /etc/ansible/ && rm hosts && wget https://raw.githubusercontent.com/alejunio/ansible-wordpress/main/ansible/hosts
# Criacao diretorio Tasks
mkdir /home/ansible
# Instalacao Certbot SSL
snap install core
snap install --classic certbot
ln -s /snap/bin/certbot /usr/bin/certbot
|
import Foundation
struct Alarm: Codable {
let time: String
let label: String
// Add any other properties as needed
}
struct Alarms {
var list: [Alarm] = []
func localRead() {
if let jsonData = UserDefaults.standard.data(forKey: "alarms") {
do {
list = try JSONDecoder().decode([Alarm].self, from: jsonData)
} catch {
print("Error decoding JSON data: \(error)")
}
}
}
static func fromLocal() -> Alarms {
return Alarms().apply { $0.localRead() }
}
mutating func addAlarm(_ newAlarm: Alarm) {
list.append(newAlarm)
saveToLocalStorage()
}
private func saveToLocalStorage() {
do {
let jsonData = try JSONEncoder().encode(list)
UserDefaults.standard.set(jsonData, forKey: "alarms")
} catch {
print("Error encoding JSON data: \(error)")
}
}
} |
<reponame>NAVEENRH/FULL_STACK_TRAINING
import React from "react";
import { Route, Switch } from "react-router-dom";
import Container from "./components/Container";
import ErrorPage from "./components/ErrorPage";
import PrivateRoute from "./components/PrivateRoute";
import Login from "./containers/Login";
import ProductDetail from "./containers/ProductDetail";
import ProductList from "./containers/ProductList";
// import Profile from "./containers/Profile";
import Demo from "./Demo";
const LazyProfile = React.lazy(() => import("./containers/Profile"));
const AppRouter: React.FC = (props) => {
return (
<main>
<Container fluid>
<React.Suspense fallback={<div>Loading...</div>}>
<Switch>
<Route path={"/"} component={Demo} exact />
<Route path={"/products"} component={ProductList} />
<Route path={"/login"} component={Login} />
<PrivateRoute path={"/profile"} component={LazyProfile} />
<Route path={"/productdetail/:id"} component={ProductDetail} />
{/* 404 Route */}
<Route component={ErrorPage} />
</Switch>
</React.Suspense>
</Container>
</main>
);
};
export default AppRouter;
|
<filename>Godeps/_workspace/src/github.com/wellington/go-libsass/libs/func.go
package libs
// #include <stdlib.h>
// #include "sass_context.h"
//
// extern union Sass_Value* GoBridge( union Sass_Value* s_args, void* cookie);
//
// union Sass_Value* CallSassFunction( union Sass_Value* s_args, Sass_Function_Entry cb, struct Sass_Options* opts ) {
// void* cookie = sass_function_get_cookie(cb);
// union Sass_Value* ret;
// ret = GoBridge(s_args, cookie);
// return ret;
// }
//
import "C"
import "unsafe"
type SassFunc C.Sass_Function_Entry
// SassMakeFunction binds a Go pointer to a Sass function signature
func SassMakeFunction(signature string, ptr unsafe.Pointer) SassFunc {
csign := C.CString(signature)
fn := C.sass_make_function(
csign,
C.Sass_Function_Fn(C.CallSassFunction),
ptr)
return (SassFunc)(fn)
}
// BindFuncs attaches a slice of Functions to a sass options. Signatures
// are already defined in the SassFunc.
func BindFuncs(opts SassOptions, funcs []SassFunc) {
sz := C.size_t(len(funcs))
cfuncs := C.sass_make_function_list(sz)
for i, cfn := range funcs {
C.sass_function_set_list_entry(cfuncs, C.size_t(i), cfn)
}
C.sass_option_set_c_functions(opts, cfuncs)
}
|
#!/bin/bash
if [ -f /opt/topic-operator/custom-config/log4j2.properties ];
then
export JAVA_OPTS="${JAVA_OPTS} -Dlog4j2.configurationFile=file:/opt/topic-operator/custom-config/log4j2.properties"
fi
if [ "$STRIMZI_TLS_ENABLED" = "true" ]; then
# Generate temporary keystore password
export CERTS_STORE_PASSWORD=$(< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c32)
mkdir -p /tmp/topic-operator
# Import certificates into keystore and truststore
/bin/tls_prepare_certificates.sh
export STRIMZI_TRUSTSTORE_LOCATION=/tmp/topic-operator/replication.truststore.p12
export STRIMZI_TRUSTSTORE_PASSWORD=$CERTS_STORE_PASSWORD
export STRIMZI_KEYSTORE_LOCATION=/tmp/topic-operator/replication.keystore.p12
export STRIMZI_KEYSTORE_PASSWORD=$CERTS_STORE_PASSWORD
fi
exec /bin/launch_java.sh $1
|
# this script's home dir is trunk/installer/linux/X11
PRODUCT_NAME="ugene"
VERSION_MAJOR=`cat ../../../src/ugene_version.pri | grep 'UGENE_VER_MAJOR=' | awk -F'=' '{print $2}'`
VERSION_MINOR=`cat ../../../src/ugene_version.pri | grep 'UGENE_VER_MINOR=' | awk -F'=' '{print $2}'`
VERSION=`cat ../../../src/ugene_version.pri | grep UGENE_VERSION | awk -F'=' '{print $2}' | \
sed -e 's/$${UGENE_VER_MAJOR}/'"$VERSION_MAJOR"'/g' \
-e 's/$${UGENE_VER_MINOR}/'"$VERSION_MINOR"'/g'`
RELEASE_DIR=../../../src/_release
SYMBOLS_DIR=symbols
DUMP_SYMBOLS_LOG=dump_symbols_log.txt
DATA_DIR=../../../data
TARGET_APP_DIR="${PRODUCT_NAME}-${VERSION}"
PACKAGE_TYPE="linux"
ARCH=`uname -m`
source create_bundle_common.sh
if [ -z "$PATH_TO_QT_LIBS" ]; then
echo PATH_TO_QT_LIBS environment variable is not set!
exit -1
fi
if [ -z "$PATH_TO_LIBPNG12" ]; then
echo PATH_TO_LIBPNG12 environment variable is not set!
fi
echo cleaning previous bundle
rm -rf ${TARGET_APP_DIR}
rm -rf "${SYMBOLS_DIR}"
rm -f "${DUMP_SYMBOLS_LOG}"
rm -rf *.tar.gz
mkdir $TARGET_APP_DIR
mkdir "${SYMBOLS_DIR}"
echo
echo copying ugenecl
add-binary ugenecl
echo copying ugeneui
add-binary ugeneui
echo copying ugenem
add-binary ugenem
echo copying plugins_checker
add-binary plugins_checker
echo copying ugene startup script
cp -v $RELEASE_DIR/ugene "$TARGET_APP_DIR"
echo copying man page for UGENE
mkdir -v "$TARGET_APP_DIR/man1"
cp -v ../../_common_data/ugene.1.gz "$TARGET_APP_DIR/man1"
echo copying README file
cp -v ../../_common_data/README "$TARGET_APP_DIR"
echo copying LICENSE file
cp -v ../../_common_data/LICENSE "$TARGET_APP_DIR"
echo copying LICENSE.3rd_party file
cp -v ../../_common_data/LICENSE.3rd_party "$TARGET_APP_DIR"
echo copying file association script files
cp -v ../../_common_data/Associate_files_to_UGENE.sh "$TARGET_APP_DIR"
cp -v ../../_common_data/icons.tar.gz "$TARGET_APP_DIR"
cp -v ../../_common_data/application-x-ugene.xml "$TARGET_APP_DIR"
cp -v ../../_common_data/ugene.desktop "$TARGET_APP_DIR"
cp -v ../../_common_data/ugene.png "$TARGET_APP_DIR"
mkdir "${TARGET_APP_DIR}/plugins"
echo copying translations
cp -v $RELEASE_DIR/transl_en.qm "$TARGET_APP_DIR"
cp -v $RELEASE_DIR/transl_ru.qm "$TARGET_APP_DIR"
echo copying data dir
cp -R "$RELEASE_DIR/../../data" "${TARGET_APP_DIR}"
if [ ! -z $UGENE_CISTROME_PATH ]; then
echo "Copying cistrome data"
mkdir -p "${TARGET_APP_DIR}/data/cistrome"
mv $UGENE_CISTROME_PATH/* ${TARGET_APP_DIR}/data/cistrome/
fi
echo
#include external tools package if applicable
echo copying tools dir
if [ -e "$RELEASE_DIR/../../tools" ]; then
cp -R "$RELEASE_DIR/../../tools" "${TARGET_APP_DIR}/"
find $TARGET_APP_DIR -name ".svn" | xargs rm -rf
PACKAGE_TYPE="linux-full"
if [ ! -z $UGENE_R_DIST_PATH ]; then
echo "Copying R tool"
cp -R $UGENE_R_DIST_PATH "${TARGET_APP_DIR}/tools"
fi
fi
echo
echo copying core shared libs
add-core-library U2Algorithm
add-core-library U2Core
add-core-library U2Designer
add-core-library U2Formats
add-core-library U2Gui
add-core-library U2Lang
add-core-library U2Private
add-core-library U2Test
add-core-library U2View
add-core-library ugenedb
add-core-library breakpad
echo
echo copying qt libraries
add-qt-library Qt5Core
add-qt-library Qt5DBus
add-qt-library Qt5Gui
add-qt-library Qt5Multimedia
add-qt-library Qt5MultimediaWidgets
add-qt-library Qt5Network
add-qt-library Qt5OpenGL
add-qt-library Qt5Positioning
add-qt-library Qt5PrintSupport
add-qt-library Qt5Qml
add-qt-library Qt5Quick
add-qt-library Qt5Script
add-qt-library Qt5ScriptTools
add-qt-library Qt5Sensors
add-qt-library Qt5Sql
add-qt-library Qt5Svg
add-qt-library Qt5Test
add-qt-library Qt5WebChannel
add-qt-library Qt5WebKit
add-qt-library Qt5WebKitWidgets
add-qt-library Qt5Widgets
add-qt-library Qt5Xml
if [ ! -z "$PATH_TO_LIBPNG12" ]; then
cp -v "$PATH_TO_LIBPNG12/libpng12.so.0" "${TARGET_APP_DIR}"
strip -v "${TARGET_APP_DIR}/libpng12.so.0"
fi
if [ ! -z "$PATH_TO_LIBPROC" ]; then
cp -v "$PATH_TO_LIBPROC" "${TARGET_APP_DIR}"
strip -v "${TARGET_APP_DIR}"
fi
if [ ! -z "$PATH_TO_INCLUDE_LIBS" ]; then
cp -v "$PATH_TO_INCLUDE_LIBS"/* "${TARGET_APP_DIR}"
fi
mkdir "${TARGET_APP_DIR}/sqldrivers"
cp -v "$PATH_TO_QT_LIBS/../plugins/sqldrivers/libqsqlmysql.so" "${TARGET_APP_DIR}/sqldrivers"
strip -v "${TARGET_APP_DIR}/sqldrivers/libqsqlmysql.so"
cp -r -v "$PATH_TO_QT_LIBS/../plugins/platforms" "${TARGET_APP_DIR}"
strip -v "${TARGET_APP_DIR}/platforms"/*.so
cp -r -v "$PATH_TO_QT_LIBS/../plugins/imageformats" "${TARGET_APP_DIR}"
strip -v ${TARGET_APP_DIR}/imageformats/*.so
PATH_TO_MYSQL_CLIENT_LIB=`ldd "${TARGET_APP_DIR}/sqldrivers/libqsqlmysql.so" |grep libmysqlclient |cut -d " " -f3`
cp -v "$PATH_TO_MYSQL_CLIENT_LIB" "${TARGET_APP_DIR}"
PATH_TO_ICU_DATA_LIB=`ldd "${PATH_TO_QT_LIBS}/libQt5Widgets.so.5" |grep libicudata.so |cut -d " " -f3`
cp -v -L "$PATH_TO_ICU_DATA_LIB" "${TARGET_APP_DIR}"
PATH_TO_ICU_I18N_LIB=`ldd "${PATH_TO_QT_LIBS}/libQt5Widgets.so.5" |grep libicui18n.so |cut -d " " -f3`
cp -v -L "$PATH_TO_ICU_I18N_LIB" "${TARGET_APP_DIR}"
PATH_TO_ICU_UUC_LIB=`ldd "${PATH_TO_QT_LIBS}/libQt5Widgets.so.5" |grep libicuuc.so |cut -d " " -f3`
cp -v -L "$PATH_TO_ICU_UUC_LIB" "${TARGET_APP_DIR}"
if [ "$1" == "-test" ]
then
cp "$PATH_TO_QT_LIBS/libQtTest.so.4" "${TARGET_APP_DIR}"
fi
echo copying plugins
add-plugin annotator
add-plugin ball
add-plugin biostruct3d_view
add-plugin chroma_view
add-plugin circular_view
add-plugin clark_support
add-plugin dbi_bam
add-plugin diamond_support
add-plugin dna_export
add-plugin dna_flexibility
add-plugin dna_graphpack
add-plugin dna_stat
add-plugin dotplot
add-plugin enzymes
add-plugin external_tool_support
add-plugin genome_aligner
add-plugin gor4
add-plugin hmm2
add-plugin kalign
add-plugin kraken_support
add-plugin linkdata_support
add-plugin metaphlan2_support
add-plugin ngs_reads_classification
add-plugin orf_marker
add-plugin opencl_support
add-plugin pcr
add-plugin phylip
add-plugin primer3
add-plugin psipred
add-plugin ptools
add-plugin query_designer
add-plugin remote_blast
add-plugin repeat_finder
add-plugin sitecon
add-plugin smith_waterman
add-plugin umuscle
add-plugin variants
add-plugin weight_matrix
add-plugin wevote_support
add-plugin workflow_designer
if [ "$1" == "-test" ]; then
add-plugin test_runner
fi
# remove svn dirs
find $TARGET_APP_DIR -name ".svn" | xargs rm -rf
REVISION=$BUILD_VCS_NUMBER_new_trunk
if [ -z "$REVISION" ]; then
REVISION=`svn status -u | sed -n -e '/revision/p' | awk '{print $4}'`
fi
DATE=`date '+%d_%m_%H-%M'`
if [ "$1" == "-test" ]; then
TEST="-test"
fi
PACKAGE_NAME=$PRODUCT_NAME"-"$VERSION"-$PACKAGE_TYPE-"$ARCH"-r"$REVISION$TEST
tar -czf ${SYMBOLS_DIR}.tar.gz $SYMBOLS_DIR/
tar -czf $PACKAGE_NAME.tar.gz $TARGET_APP_DIR/
if [ ! -z $UGENE_CISTROME_PATH ]; then
echo "Copying cistrome data"
mv ${TARGET_APP_DIR}/data/cistrome/* $UGENE_CISTROME_PATH
fi
|
#!/usr/bin/env bash
# A script file to generate IntelliJ project from using the Intellij Pants Plugin.
# Note: for any modification in this file please modify ExportIntegrationTest#test_intellij_integration
# We don't want to include targets which are used in unit tests in our project so let's exclude them.
./pants export src/python/:: tests/python/pants_test:: contrib/:: \
--exclude-target-regexp='.*go/examples.*' \
--exclude-target-regexp='.*scrooge/tests/thrift.*' \
--exclude-target-regexp='.*spindle/tests/thrift.*' \
--exclude-target-regexp='.*spindle/tests/jvm.*'
|
#!/usr/bin/env bash
docker-compose run php bash |
<gh_stars>0
package com.jackson0714.passjava.question.service.impl;
import com.jackson0714.common.to.es.QuestionEsModel;
import com.jackson0714.common.utils.R;
import com.jackson0714.passjava.question.entity.TypeEntity;
import com.jackson0714.passjava.question.feign.SearchFeignService;
import com.jackson0714.passjava.question.service.ITypeService;
import org.apache.commons.lang.StringUtils;
import org.springframework.beans.BeanUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.Map;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
import com.jackson0714.common.utils.PageUtils;
import com.jackson0714.common.utils.Query;
import com.jackson0714.passjava.question.dao.QuestionDao;
import com.jackson0714.passjava.question.entity.QuestionEntity;
import com.jackson0714.passjava.question.service.IQuestionService;
@Service("questionService")
public class QuestionServiceImpl extends ServiceImpl<QuestionDao, QuestionEntity> implements IQuestionService {
@Autowired
ITypeService typeService;
@Autowired
SearchFeignService searchFeignService;
@Override
public PageUtils queryPage(Map<String, Object> params) {
//1.get key
String key = (String) params.get("key");
QueryWrapper<QuestionEntity> queryWrapper = new QueryWrapper<>();
if (!StringUtils.isEmpty(key)) {
queryWrapper.eq("id", key).or().like("title", key).or().like("answer", key);
}
String type = (String) params.get("type");
if (!StringUtils.isEmpty(type)) {
queryWrapper.eq("type", type);
}
IPage<QuestionEntity> page = this.page(
new Query<QuestionEntity>().getPage(params),
queryWrapper
);
return new PageUtils(page);
}
@Override
public boolean saveQuestion(QuestionEntity question) {
boolean saveResult = save(question);
//saveEs(question);
return true;
}
@Override
public boolean updateQuestion(QuestionEntity question) {
updateById(question);
//saveEs(question);
return true;
}
private boolean saveEs(QuestionEntity question) {
// 1.创建 ES model
QuestionEsModel esModel = new QuestionEsModel();
// 2.复制属性
// 2.1 复制属性
BeanUtils.copyProperties(question, esModel);
// 2.2 获取“题目类型”的名称
TypeEntity typeEntity = typeService.getById(question.getType());
String typeName = typeEntity.getType();
// 2.3 给 ES model 的“类型”字段赋值
esModel.setTypeName(typeName);
System.out.println("-----------------esModel:" + esModel);
// 3. 调用 passjava-search 服务,将数据发送到 ES 中保存。
R r = searchFeignService.saveQuestion(esModel);
System.out.println("r:" + r);
return true;
}
} |
require 'csv'
module NetworkEventsHelper
def copy_network_events?
params[:network_event_ids].present?
end
def default_start_date
if params[:start_date].present?
params[:start_date]
else
Date.today
end
end
def default_end_date
if params[:end_date].present?
params[:end_date]
else
3.months.from_now
end
end
def event_button_name(event)
if event.new_record?
'Create Event'
else
'Update Event'
end
end
def clip_from(event_list)
names = ""
if event_list.present?
for item in event_list
names += item.name + ", "
end
end
names.chomp(', ')
end
def clip_event_info(event)
if event.scheduled_at.present?
event_schedule_time = event.try(:scheduled_at).to_formatted_s(:long)
else
event_schedule_time = "Unscheduled"
end
if event.stop_time.present?
ends_at = event.stop_time.to_formatted_s(:long)
else
ends_at = "Unscheduled"
end
event_info = "Name: " + event.name + "\n" +
"Program: " + event.program.name + "\n" +
"Location: " + event.location.name + "\n" +
"Organizations: " + clip_from(event.organizations) + "\n" +
"Site Contacts: " + clip_from(event.site_contacts) + "\n" +
"School Contacts: " + clip_from(event.school_contacts) + "\n" +
"Volunteers: " + clip_from(event.volunteers) + "\n" +
"Schools: " + clip_from(event.schools) + "\n" +
"Graduating Classes: " + clip_from(event.graduating_classes) + "\n" +
"Cohorts: " + clip_from(event.cohorts) + "\n" +
"Scheduled at: " + event_schedule_time + "\n" +
"Ends at: " + ends_at + "\n" +
"Notes: " + event.try(:notes).to_s
return event_info
end
def network_events_download_query_params
request.query_parameters.slice(
:start_date,
:end_date,
:program_ids,
:school_ids,
:organization_ids,
:cohort_ids,
:graduating_class_ids)
end
def csv(events)
CSV.generate(headers: true, :quote_char=>'"', :force_quotes => true,) do |csv|
csv << %w{name status program location organizations date start_time end_time school cohorts classes school_contacts site_contacts notes}
@network_events.each do |event|
csv << [
event.name,
event.status,
event.program_name,
event.location_name,
event.organizations.map(&:name).join(', '),
if event.date != nil then event.date.strftime('%m-%d-%Y') else "" end,
if event.start_time != nil then event.start_time.strftime("%I:%M") else "" end,
if event.stop_time != nil then event.stop_time.strftime("%I:%M") else "" end,
event.schools.map(&:name).join(', '),
event.cohorts.map(&:name).join(', '),
event.graduating_classes.map(&:year).join(', '),
event.school_contacts.map(&:name).join(', '),
event.site_contacts.map(&:name).join(', '),
event.notes
]
end
end
end
end
|
// <NAME> Copyright (c) 2019
// New Beginnings - Capstone Project
// filename: f_userListDelete.c
#include "headers.h"
int userListDelete(struct userList* L, int p)
{
if (p) { printf("deleting user list..."); }
if (L->head == NULL)
{
if (p) { printf("no head, returning...\n"); }
return 0;
}
struct userData* current = L->head;
struct userData* previous = L->head;
while (current != NULL)
{
previous = current;
current = current->next;
free(previous);
}
if (p) { printf("finished deleting list\n"); }
return 0;
}
|
/*
*
*/
package net.community.apps.tools.adm.config;
import net.community.chest.dom.proxy.XmlProxyConvertible;
import net.community.chest.ui.helpers.table.EnumTableColumn;
import org.w3c.dom.Element;
/**
* <P>Copyright 2009 as per GPLv2</P>
*
* @author <NAME>.
* @since Oct 15, 2009 10:50:38 AM
*/
public class ValuesTableColInfo extends EnumTableColumn<ValuesTableColumn> {
public ValuesTableColInfo (ValuesTableColumn colIndex, String colName, int colWidth)
{
super(ValuesTableColumn.class, colIndex, colWidth);
setColumnName(colName);
}
public ValuesTableColInfo (ValuesTableColumn colIndex, String colName)
{
this(colIndex, colName, DEFAULT_WIDTH);
}
public ValuesTableColInfo (ValuesTableColumn colIndex)
{
this(colIndex, (null == colIndex) ? null : colIndex.toString());
}
public ValuesTableColInfo (Element elem) throws Exception
{
super(ValuesTableColumn.class, elem);
}
/*
* @see net.community.chest.swing.component.table.BaseTableColumn#getColumnConverter(org.w3c.dom.Element)
*/
@Override
protected XmlProxyConvertible<?> getColumnConverter (Element elem)
throws Exception
{
return (null == elem) ? null : ValuesTableColInfoReflectiveProxy.VALSTBLCOLS;
}
}
|
<filename>src/default-stats-capture.d.ts
/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import { Capture } from './interfaces';
export declare const defaultStatsCapture: Capture;
|
<filename>src/lib/jssTheme.js
const theme = {
typography: {
fontFamily: {
serif: "'Playfair Display', serif",
sansSerif: "'Raleway', sans-serif",
serif2: 'Alegreya, serif',
sansSerif2: "'Alegreya Sans', sans-serif",
serif3: "'Bree Serif', serif",
},
},
colors: {
primary: {
main: '#D93F2A',
},
},
};
export default theme;
|
package handlers
import (
"fmt"
"strings"
"time"
"github.com/Not-Cyrus/GoGuardian/utils"
"github.com/bwmarrin/discordgo"
)
func readAudits(s *discordgo.Session, guildID string, auditType int) string {
parsedData, configData := utils.FindConfig(guildID)
auditMap := make(map[string]string)
userMap := make(map[string]int)
audits, err := s.GuildAuditLog(guildID, "", "", auditType, 25)
if err != nil {
utils.SendMessage(s, fmt.Sprintf("I can't read audits : %s", err.Error()), utils.GetGuildOwner(s, guildID))
return ""
}
for _, entry := range audits.AuditLogEntries {
if userMap[entry.UserID] >= configData.GetInt("Config", "Threshold") {
if entry.UserID == DGUser.ID && configData.GetBool("Config", "AntiHijackEnabled") {
if strings.Contains(entry.Reason, "https://github.com/Not-Cyrus/GoGuardian") {
return "" // lazy as fuck, but it'll do the trick to stop false flags. I'll probably make this better soon idk
}
utils.SendMessage(s, "The bot has been comprimised. I have left the guild for your safety.", utils.GetGuildOwner(s, guildID)) // this is an important message so we'll DM the owner.
s.GuildLeave(guildID)
}
err := s.GuildBanCreateWithReason(guildID, entry.UserID, "You just got destroyed by https://github.com/Not-Cyrus/GoGuardian", 0)
if err != nil {
utils.SendMessage(s, fmt.Sprintf("I have no perms to ban <@!%s>: %s", entry.UserID, err.Error()), utils.GetGuildOwner(s, guildID)) // this is an important message so we'll DM the owner.
return ""
}
return entry.UserID
}
current := time.Now()
entryTime, err := discordgo.SnowflakeTimestamp(entry.ID)
if err != nil {
utils.SendMessage(s, fmt.Sprintf("how the fuck did this happen: %s", err.Error()), "")
return ""
}
if current.Sub(entryTime).Round(1*time.Second).Seconds() <= configData.GetFloat64("Config", "Seconds") {
if _, ok := auditMap[entry.ID]; !ok {
inArray, _ := utils.InArray(guildID, "WhitelistedIDs", parsedData, entry.UserID)
if !inArray {
auditMap[entry.ID] = entry.ID
userMap[entry.UserID]++
}
}
}
}
return ""
}
func findAudit(s *discordgo.Session, guildID, targetID string, auditType int) *discordgo.AuditLogEntry {
audits, err := s.GuildAuditLog(guildID, "", "", auditType, 10) // we really don't need 25 here so we'll use 10 instead (I could probably just use one but whatever)
if err != nil {
utils.SendMessage(s, fmt.Sprintf("I can't read audits: %s | if you think this is a mistake make an issue at https://github.com/Not-Cyrus/GoGuardian/issues", err.Error()), utils.GetGuildOwner(s, guildID)) // scrap this last comment as it's now going to be a public bot.
return nil
}
for _, entry := range audits.AuditLogEntries {
if entry.TargetID == targetID {
return entry
}
}
return nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.