text stringlengths 1 1.05M |
|---|
<gh_stars>0
function Airport(weather = new Weather(), capacity = 1){
this.weather = weather
this.hanger = []
this.capacity = capacity;
};
Airport.prototype.landPlane = function(plane){
if(this.weather.storm) { throw("Too stormy to land")}
if(this.hanger.length >= this.capacity) { throw('Unable to land; no hangar space!'); }
this.hanger.push(plane)
};
Airport.prototype.takeOffPlane = function(plane){
if(this.weather.storm) { throw("Too stormy to take off")}
var plane_index = this.hanger.indexOf(plane);
return this.hanger.splice(plane_index, 1)
};
Airport.prototype.updateWeather = function(weather){
this.weather = weather;
};
|
lst = [1, 2, 3, "a", "b", "c", 4, 5]
new_lst = []
for item in lst:
if not isinstance(item, int):
new_lst.append(item)
print(new_lst) |
#!/bin/bash
##===----------------------------------------------------------------------===##
##
## This source file is part of the SwiftNIO open source project
##
## Copyright (c) 2017-2018 Apple Inc. and the SwiftNIO project authors
## Licensed under Apache License v2.0
##
## See LICENSE.txt for license information
## See CONTRIBUTORS.txt for the list of SwiftNIO project authors
##
## SPDX-License-Identifier: Apache-2.0
##
##===----------------------------------------------------------------------===##
source defines.sh
token=$(create_token)
start_server --disable-half-closure "$token" tcp
htdocs=$(get_htdocs "$token")
server_pid=$(get_server_pid "$token")
ip=$(get_server_ip "$token")
port=$(get_server_port "$token")
kill -0 $server_pid
# try to simulate a TCP connection reset, works really well on Darwin but not on
# Linux over loopback. On Linux however
# `test_19_connection_drop_while_waiting_for_response_uds.sh` tests a very
# similar situation.
yes "$( echo -e 'GET /dynamic/write-delay HTTP/1.1\r\n\r\n')" | nc "$ip" "$port" > /dev/null & sleep 0.5; kill -9 $!
sleep 0.2
stop_server "$token"
|
<gh_stars>0
export function formatRelativeTime(value, locale) {
const date = new Date(value);
const deltaDays = (date.getTime() - Date.now()) / (1000 * 60 * 60 * 24);
const formatter = new Intl.RelativeTimeFormat(locale);
if (deltaDays < -7) return "Greater than 1 week";
else return formatter.format(Math.round(deltaDays), 'days');
} |
<filename>splashpy/faker/client.py
# -*- coding: utf-8 -*-
#
# This file is part of SplashSync Project.
#
# Copyright (C) 2015-2020 Splash Sync <www.splashsync.com>
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
#
from splashpy.models.client import ClientInfo
class FakerClient(ClientInfo):
"""Define General Information about this Splash Client"""
def __init__( self ):
pass
def complete(self):
# Use Default Icons Set
self.loadDefaultIcons()
# ====================================================================#
# Override Info to Says we are Faker Mode
self.short_desc = "Splash Py sFake Client"
self.long_desc = "Fake Client for Testing purpose Only..."
# ====================================================================#
# Company Information
self.company = "Splash Sync"
self.address = "Street Address"
self.zip = "12345"
self.town = "Town"
self.country = "Country"
self.www = "www.splashsync.com"
self.email = "<EMAIL>"
self.phone = "060606060"
|
#!/bin/sh
# This file is a part of Julia. License is MIT: https://julialang.org/license
# Run as: fixup-libstdc++.sh <libdir> <private_libdir>
if [ "$#" -ne 2 ]; then
echo "Usage: $0 <libdir> <private_libdir>"
exit 1
fi
libdir="$1"
private_libdir="$2"
if [ ! -f "$private_libdir/libjulia-internal.so" ]; then
echo "ERROR: Could not open $private_libdir/libjulia-internal.so" >&2
exit 2
fi
find_shlib ()
{
if [ -f "$1" ]; then
ldd "$1" | grep $2 | cut -d' ' -f3 | xargs
fi
}
# Discover libstdc++ location and name
LIBSTD=$(find_shlib "$private_libdir/libjulia-internal.so" "libstdc++.so")
LIBSTD_NAME=$(basename $LIBSTD)
LIBSTD_DIR=$(dirname $LIBSTD)
if [ ! -f "$private_libdir/$LIBSTD_NAME" ] && [ -f "$LIBSTD_DIR/$LIBSTD_NAME" ]; then
cp -v "$LIBSTD_DIR/$LIBSTD_NAME" "$private_libdir"
chmod 755 "$private_libdir/$LIBSTD_NAME"
fi
|
package runner
type runnerError struct {
msg string
}
func (e runnerError) Error() string {
return e.msg
}
|
#!/bin/bash
##
# This script installs npm and builds webpack in all directories given as argument
##
# Installs stuff from package.json and bower.json
# $1 - build directory
function install_all_assets() {
local build_dir=$1
cd $build_dir
if [ -f package.json ]; then
if [[ "$PACKAGE_INSTALLER" == "yarn" ]]; then
if ! is_cmd_installed yarn; then
npm install -g yarn
fi
yarn install
else
npm install
fi
fi
if [ -f bower.json ]; then
if ! is_cmd_installed bower; then
npm install -g bower
fi
bower install --allow-root
fi
}
# Builds assets defined in Gruntfile.js, Gulpfile.js or webpack.js
# $1 - build directory
function build_all_assets() {
local build_dir=$1
cd $build_dir
# Match any file case insensitively
shopt -s nocasematch
for file in * ; do
case "$file" in
"gruntfile.js" )
run_local_module grunt
;;
"webpack.js" )
run_local_module webpack
;;
"gulpfile.js" )
run_local_module gulp
;;
esac
done
}
# Uses local node binary from node_modules if it's available
# Fallbacks to global bin
function run_local_module() {
local bin=$1
if [ -f ./node_modules/$bin/bin/$bin.js ]; then
./node_modules/$bin/bin/$bin.js
else
$bin
fi
}
# Finds and returns paths with package.json or bower.json
function collect_asset_directories() {
local find_from_path=$1
local find_max_depth=$2
declare -a build_locations
# Find all paths with package.json/bower.json and append their relative path to absolute path
for package in $(find $find_from_path -maxdepth $find_max_depth \( -name "package.json" -o -name "bower.json" \) ); do
build_locations+=($(realpath $(dirname $package)))
done
# This a bit hacky way to only echo unique elements
# This is useful so that we don't do double amount of work if both package.json and bower.json are present
echo "${build_locations[@]}" | tr ' ' '\n' | sort -u | tr '\n' ' '
}
# Use the node version user provided as $NODE_VERSION env
function prepare_node_version() {
local version=$1
source $NVM_DIR/nvm.sh
nvm install $version
nvm alias default $version
nvm use $version
}
# Checks if command is installed and is in $PATH
function is_cmd_installed() {
local cmd=$1
command -v $cmd >/dev/null 2>&1
return $?
}
# Installs and activates the wanted node version
prepare_node_version $NODE_VERSION
# Set default variables for asset builder
MAX_DEPTH=${MAX_DEPTH-1}
BUILD_DIR=${1-/build}
# Loop only unique items from build locations
for build_dir in $(collect_asset_directories $BUILD_DIR $MAX_DEPTH); do
# Installs all stuff from package.json/bower.json
install_all_assets $build_dir
# Builds Gruntfile.js, gulpfile.js and webpack.js
build_all_assets $build_dir
done |
var Auction = artifacts.require("SecondPriceAuction");
const increaseTime = addSeconds => {
web3.currentProvider.send({jsonrpc: "2.0", method: "evm_increaseTime", params: [addSeconds], id: 0});
web3.currentProvider.send({jsonrpc: "2.0", method: "evm_mine", params: [], id: 1});
}
contract('auction', function(accounts) {
it("Constant time flow.", function() {
var auction;
var tokenCap;
var endTime;
return Auction.deployed().then(function(instance) {
auction = instance;
return auction.tokenCap.call();
}).then(function(cap) {
tokenCap = cap.toNumber();
assert.isAbove(tokenCap, 0, "Selling some tokens.");
return auction.isActive.call();
}).then(function(isActive) {
assert.equal(isActive, false, "The sale has not started.");
return auction.allFinalised.call();
}).then(function(allFinalised) {
assert.equal(allFinalised, false, "The sale is not finalised.");
return auction.currentPrice.call();
}).then(assert.fail).catch(function(error) {
assert.include(error.message, 'invalid opcode', "No price before the sale.");
return auction.calculateEndTime.call();
}).then(function(end) {
endTime = end.toNumber();
assert.isAbove(endTime, web3.eth.getBlock(web3.eth.blockNumber).timestamp, "Sale ends later.");
increaseTime(1000);
return auction.isActive.call();
}).then(function(isActive) {
assert.equal(isActive, true, "The sale has started.");
return auction.allFinalised.call();
}).then(function(allFinalised) {
assert.equal(allFinalised, false, "The sale is not finalised.");
return auction.currentPrice.call();
}).then(function(currentPrice) {
assert.isAbove(currentPrice, 0, "Price is greater than 0 during the sale.");
return auction.calculateEndTime.call();
}).then(function(end) {
assert.equal(end.toNumber(), endTime, "No contributions means that the end estimate is the same.");
return auction.tokensAvailable.call();
}).then(function(available) {
assert.equal(available.toNumber(), tokenCap, "All tokens available.");
return auction.maxPurchase.call();
}).then(function(purchase) {
assert.isAbove(purchase.toNumber(), 0, "Can purchase tokens.");
return auction.bonus.call(100);
}).then(function(extra) {
assert.equal(extra.toNumber(), 15, "Gets bonus at the start.");
return auction.theDeal.call(100);
}).then(function(deal) {
assert.equal(deal[0].toNumber(), 115, "Accounted with bonus.");
assert.equal(deal[1], false, "No refund needed.");
assert.isAbove(deal[2].toNumber(), 0, "Positive price.");
return auction.BONUS_MAX_DURATION.call();
}).then(function(duration) {
increaseTime(duration.toNumber());
return auction.bonus.call(100);
}).then(function(extra) {
assert.equal(extra.toNumber(), 15, "Bonus later if no empty blocks.");
increaseTime(endTime);
return auction.isActive.call();
}).then(function(isActive) {
assert.equal(isActive, false, "The sale has ended.");
return auction.allFinalised.call();
}).then(function(allFinalised) {
assert.equal(allFinalised, true, "No tokens sold, all finalised.");
return auction.currentPrice.call();
}).then(assert.fail).catch(function(error) {
assert.include(error.message, 'invalid opcode', "No price after the sale.");
});
});
});
|
func extractErrorIDs(from jsonResponse: String) -> [Int] {
struct ErrorObject: Codable {
let errorID: Int
}
struct ErrorResponse: Codable {
let errors: [ErrorObject]
}
guard let jsonData = jsonResponse.data(using: .utf8) else {
return []
}
do {
let response = try JSONDecoder().decode(ErrorResponse.self, from: jsonData)
return response.errors.map { $0.errorID }
} catch {
print("Error decoding JSON: \(error)")
return []
}
} |
#include <iostream>
#include <ctime>
#include <vector>
#include <algorithm>
// Function to calculate the time difference in years
double calculateTimeDifference(const std::tm& startTime, const std::tm& endTime) {
std::time_t start = std::mktime(const_cast<std::tm*>(&startTime));
std::time_t end = std::mktime(const_cast<std::tm*>(&endTime));
double differenceInSeconds = std::difftime(end, start);
const double secondsInYear = 60 * 60 * 24 * 365.25; // accounting for leap years
return differenceInSeconds / secondsInYear;
}
int main() {
// Example usage of calculateTimeDifference function
std::tm startTime = {0, 0, 0, 1, 0, 100}; // Example start time (year 2000)
std::tm endTime = {0, 0, 0, 1, 0, 105}; // Example end time (year 2005)
double timeDifferenceInYears = calculateTimeDifference(startTime, endTime);
std::cout << "Time difference in years: " << timeDifferenceInYears << std::endl;
// Example iteration through a data structure and skipping elements based on a condition
std::vector<int> dataStructure = {1, 2, 3, 4, 5};
std::vector<int> elementsToSkip = {2, 4};
for (const auto& element : dataStructure) {
if (std::find(elementsToSkip.begin(), elementsToSkip.end(), element) != elementsToSkip.end()) {
continue; // Skipping the element
}
std::cout << element << " "; // Process the element
}
std::cout << std::endl;
return 0;
} |
/**
* @param string $token
*
* @return CfgAuditoria
*/
public function setToken($token)
{
// Assuming $token is securely stored in the session
// Example: Storing the token in a secure session variable
$_SESSION['user_token'] = $token;
// Return an instance of the CfgAuditoria class
return new CfgAuditoria();
} |
/**
* Copyright 2018 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @author ebidel@ (<NAME>)
*/
/**
* Uses Puppeteer and the browser's online/offline events to monitor internet
* connection status.
*/
const util = require('util');
const dns = require('dns');
const puppeteer = require('puppeteer');
async function isConnected() {
try {
const lookupService = util.promisify(dns.lookupService);
const result = await lookupService('8.8.8.8', 53);
return true;
} catch (err) {
return false;
}
}
puppeteer.launch().then(async browser => {
const page = await browser.newPage();
page.on('online', () => console.info('Online!'));
page.on('offline', () => console.info('Offline!'));
// Adds window.connectionChange in page.
await page.exposeFunction('connectionChange', async online => {
// Since online/offline events aren't 100% reliable, do an
// actual dns lookup to verify connectivity.
const isReallyConnected = await isConnected();
page.emit(isReallyConnected ? 'online' : 'offline');
});
// Monitor browser online/offline events in the page.
await page.evaluateOnNewDocument(() => {
window.addEventListener('online', e => window.connectionChange(navigator.onLine));
window.addEventListener('offline', e => window.connectionChange(navigator.onLine));
});
// Kick off a navigation so evaluateOnNewDocument runs.
await page.goto('data:text/html,hi');
// ... do other stuff ...
// await browser.close(); // Don't close the browser so we can monitor!
});
|
<gh_stars>0
#include <fstream>
#include <iostream>
#include <string>
#include <emscripten/emscripten.h>
#include <emscripten/bind.h>
#define GLFW_INCLUDE_ES3
#define GL_SILENCE_DEPRECATION
#include <GLFW/glfw3.h>
void callback_resize(
[[maybe_unused]] GLFWwindow *window,
int width,
int height) {
printf("window_size_callback received width: %i, height: %i\n", width, height);
}
void callback_key(
GLFWwindow *window,
int key,
[[maybe_unused]] int scancode,
int action,
[[maybe_unused]] int modifier) {
if (key == GLFW_KEY_ESCAPE && action == GLFW_RELEASE)
glfwSetWindowShouldClose(window, 1);
if (key == GLFW_KEY_ENTER)
std::cout << "Enter was hit\n" << std::endl;
}
void callback_mouse(
[[maybe_unused]] GLFWwindow *window,
[[maybe_unused]] int button,
[[maybe_unused]] int action,
[[maybe_unused]] int mods) {
//assert(window != NULL); (void)button; (void)action; (void)mods;
printf("Mouse buttion! \n");
}
struct MyData {
bool is_click = false;
GLFWwindow *window = nullptr;
};
void do_frame(MyData *data) {
if( data->is_click ){
glClearColor(0.8f, 1.0f, 1.0f, 1.0f);
}
else{
glClearColor(1.0f, 1.0f, 0.8f, 1.0f);
}
glClear(GL_COLOR_BUFFER_BIT);
glfwSwapBuffers(data->window);
glfwPollEvents();
}
// ======================
// global data definition
MyData data;
// =================
// exposed functions
void hoge () {
std::cout << "button clicked" << std::endl;
data.is_click = !data.is_click;
}
int main() {
if (glfwInit() != GL_TRUE) {
printf("glfwInit() failed\n");
glfwTerminate();
return 0;
}
printf("glfwInit() success\n");
data.window = glfwCreateWindow(
512, 512,
"GLFW test", nullptr, nullptr);
if (!data.window) {
printf("glfwCreateWindow() failed\n");
glfwTerminate();
return 0;
}
printf("glfwCreateWindow() success\n");
glfwMakeContextCurrent(data.window);
int windowWidth;
int windowHeight;
glfwGetFramebufferSize(data.window, &windowWidth, &windowHeight);
glfwSetWindowSizeCallback(data.window, callback_resize);
glfwSetMouseButtonCallback(data.window, callback_mouse);
glfwSetKeyCallback(data.window, callback_key);
emscripten_set_main_loop_arg((em_arg_callback_func) do_frame, &data, 0, 1);
glfwTerminate();
return EXIT_SUCCESS;
}
EMSCRIPTEN_BINDINGS(wabc) {
emscripten::function("hoge", &hoge);
} |
#!/bin/bash
#SBATCH -N 1
#SBATCH -n 1
#SBATCH -c 14
#SBATCH -t 1200
#SBATCH --output="%x-%j.out"
# module load PrgEnv/gcc10-openmpi-python
export PYTHONPATH=$PYTHONPATH:~/pitts/build/src/:~/pitts/examples/
export PYTHONUNBUFFERED=1
ulimit -s unlimited
srun likwid-pin -c 0-13 python tt_gmres_precond.py -n 20 -d 6 --eps 1.e-4 --maxIter=40 --variant right_precond --adaptive
|
<reponame>tanmba/10Vision
import { Component } from '@angular/core';
import { AuthService } from '../core/auth.service'
import { Router, Params } from '@angular/router';
import { FormBuilder, FormGroup, Validators } from '@angular/forms';
@Component({
selector: 'app-register',
templateUrl: './register.component.html',
styleUrls: ['./register.component.scss']
})
export class RegisterComponent {
registerForm: FormGroup;
errorMessage: string = '';
successMessage: string = '';
loginForm: FormGroup;
showLogin: false;
showRegister: true;
loadRegister: false;
loadLogin: false;
isActive = false;
constructor(
public authService: AuthService,
private router: Router,
private fb: FormBuilder
) {
this.createForm();
this.createFormLogin();
}
ngOnInit() {
this.showRegister = true;
}
loadRegisterComponent() {
this.showRegister = !this.showRegister;
this.loadRegister = !this.loadRegister;
}
loadLoginComponent() {
this.showLogin = !this.showLogin;
this.loadLogin = !this.loadLogin;
}
showLoginComponent() {
this.showLogin = !this.showLogin;
this.showRegister = false;
this.loadRegister = false;
this.isActive = !this.isActive;
}
showRegisterComponent() {
this.loadLogin = false;
this.showRegister = true;
this.showLogin = false;
this.isActive = !this.isActive;
}
goingBack() {
this.loadRegister = false;
this.loadLogin = false;
this.showRegister = true;
}
createForm() {
this.registerForm = this.fb.group({
email: ['', [Validators.email, Validators.required]],
password: ['',[Validators.required]],
});
}
createFormLogin() {
this.loginForm = this.fb.group({
email: ['', Validators.required ],
password: ['',Validators.required]
});
}
tryFacebookLogin(){
this.authService.doFacebookLogin()
.then(res =>{
this.router.navigate(['/register-city']);
}, err => console.log(err)
)
}
tryTwitterLogin(){
this.authService.doTwitterLogin()
.then(res =>{
this.router.navigate(['/user']);
}, err => console.log(err)
)
}
tryGoogleLogin(){
this.authService.doGoogleLogin()
.then(res =>{
this.router.navigate(['/register-city']);
}, err => console.log(err)
)
}
tryRegister(){
const data = this.registerForm.value;
console.log(data);
this.authService.doRegister(data)
.then(res => {
this.errorMessage = "";
this.successMessage = "Votre compte a été créé !";
this.router.navigate(['/register-city']);
}, err => {
console.log(err);
this.errorMessage = err.message;
this.successMessage = "";
})
}
tryLogin(value){
this.authService.doLogin(value)
.then(res => {
this.router.navigate(['/user']);
}, err => {
console.log(err);
this.errorMessage = 'Entrez des informations valides';
})
}
}
|
#!/bin/bash
#SBATCH --account=def-dkulic
#SBATCH --gres=gpu:1 # request GPU generic resource
#SBATCH --cpus-per-task=2 #Maximum of CPU cores per GPU request: 6 on Cedar, 16 on Graham.
#SBATCH --mem=8000M # memory per node
#SBATCH --time=0-01:30 # time (DD-HH:MM)
#SBATCH --output=./job_script_output/Camera1_Sep_19_1430_1500_Prescribed_Behavior_0_%N-%j.out # %N for node name, %j for jobID
## Main processing command
## -v: path to the raw video file
## -o: directory to save processed video
python ./process_video_low_frequent_frame.py -v ../ROM_raw_videos_clips/Sep_19/Camera1_Sep_19_1430_1500_Prescribed_Behavior_0.mp4 -o ../ROM_raw_videos_clips_processed_camera2/Sep_19
|
#!/usr/bin/env python2
import logging
from stftpd import Server
logging.basicConfig(
level=logging.DEBUG
)
s = Server(root_path="dst/", port=5000)
s.run()
|
def find_reverse(kmer):
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
reverse = ''.join(complement[base] for base in reversed(kmer))
return reverse
def uniq_kmers(kmers):
unique_pairs = []
didthat = []
for kmer in kmers:
if kmer not in didthat:
didthat.append(kmer)
reverse = find_reverse(kmer)
unique_pairs.append((kmer, reverse))
return unique_pairs |
// Code generated by entc, DO NOT EDIT.
package ent
import (
"context"
"fmt"
"log"
"github.com/blushft/strana/modules/sink/reporter/store/ent/migrate"
"github.com/google/uuid"
"github.com/blushft/strana/modules/sink/reporter/store/ent/action"
"github.com/blushft/strana/modules/sink/reporter/store/ent/alias"
"github.com/blushft/strana/modules/sink/reporter/store/ent/app"
"github.com/blushft/strana/modules/sink/reporter/store/ent/browser"
"github.com/blushft/strana/modules/sink/reporter/store/ent/campaign"
"github.com/blushft/strana/modules/sink/reporter/store/ent/connectivity"
"github.com/blushft/strana/modules/sink/reporter/store/ent/device"
"github.com/blushft/strana/modules/sink/reporter/store/ent/event"
"github.com/blushft/strana/modules/sink/reporter/store/ent/extra"
"github.com/blushft/strana/modules/sink/reporter/store/ent/group"
"github.com/blushft/strana/modules/sink/reporter/store/ent/library"
"github.com/blushft/strana/modules/sink/reporter/store/ent/location"
"github.com/blushft/strana/modules/sink/reporter/store/ent/network"
"github.com/blushft/strana/modules/sink/reporter/store/ent/oscontext"
"github.com/blushft/strana/modules/sink/reporter/store/ent/page"
"github.com/blushft/strana/modules/sink/reporter/store/ent/referrer"
"github.com/blushft/strana/modules/sink/reporter/store/ent/screen"
"github.com/blushft/strana/modules/sink/reporter/store/ent/session"
"github.com/blushft/strana/modules/sink/reporter/store/ent/timing"
"github.com/blushft/strana/modules/sink/reporter/store/ent/user"
"github.com/blushft/strana/modules/sink/reporter/store/ent/viewport"
"github.com/facebook/ent/dialect"
"github.com/facebook/ent/dialect/sql"
"github.com/facebook/ent/dialect/sql/sqlgraph"
)
// Client is the client that holds all ent builders.
type Client struct {
config
// Schema is the client for creating, migrating and dropping schema.
Schema *migrate.Schema
// Action is the client for interacting with the Action builders.
Action *ActionClient
// Alias is the client for interacting with the Alias builders.
Alias *AliasClient
// App is the client for interacting with the App builders.
App *AppClient
// Browser is the client for interacting with the Browser builders.
Browser *BrowserClient
// Campaign is the client for interacting with the Campaign builders.
Campaign *CampaignClient
// Connectivity is the client for interacting with the Connectivity builders.
Connectivity *ConnectivityClient
// Device is the client for interacting with the Device builders.
Device *DeviceClient
// Event is the client for interacting with the Event builders.
Event *EventClient
// Extra is the client for interacting with the Extra builders.
Extra *ExtraClient
// Group is the client for interacting with the Group builders.
Group *GroupClient
// Library is the client for interacting with the Library builders.
Library *LibraryClient
// Location is the client for interacting with the Location builders.
Location *LocationClient
// Network is the client for interacting with the Network builders.
Network *NetworkClient
// OSContext is the client for interacting with the OSContext builders.
OSContext *OSContextClient
// Page is the client for interacting with the Page builders.
Page *PageClient
// Referrer is the client for interacting with the Referrer builders.
Referrer *ReferrerClient
// Screen is the client for interacting with the Screen builders.
Screen *ScreenClient
// Session is the client for interacting with the Session builders.
Session *SessionClient
// Timing is the client for interacting with the Timing builders.
Timing *TimingClient
// User is the client for interacting with the User builders.
User *UserClient
// Viewport is the client for interacting with the Viewport builders.
Viewport *ViewportClient
}
// NewClient creates a new client configured with the given options.
func NewClient(opts ...Option) *Client {
cfg := config{log: log.Println, hooks: &hooks{}}
cfg.options(opts...)
client := &Client{config: cfg}
client.init()
return client
}
func (c *Client) init() {
c.Schema = migrate.NewSchema(c.driver)
c.Action = NewActionClient(c.config)
c.Alias = NewAliasClient(c.config)
c.App = NewAppClient(c.config)
c.Browser = NewBrowserClient(c.config)
c.Campaign = NewCampaignClient(c.config)
c.Connectivity = NewConnectivityClient(c.config)
c.Device = NewDeviceClient(c.config)
c.Event = NewEventClient(c.config)
c.Extra = NewExtraClient(c.config)
c.Group = NewGroupClient(c.config)
c.Library = NewLibraryClient(c.config)
c.Location = NewLocationClient(c.config)
c.Network = NewNetworkClient(c.config)
c.OSContext = NewOSContextClient(c.config)
c.Page = NewPageClient(c.config)
c.Referrer = NewReferrerClient(c.config)
c.Screen = NewScreenClient(c.config)
c.Session = NewSessionClient(c.config)
c.Timing = NewTimingClient(c.config)
c.User = NewUserClient(c.config)
c.Viewport = NewViewportClient(c.config)
}
// Open opens a database/sql.DB specified by the driver name and
// the data source name, and returns a new client attached to it.
// Optional parameters can be added for configuring the client.
func Open(driverName, dataSourceName string, options ...Option) (*Client, error) {
switch driverName {
case dialect.MySQL, dialect.Postgres, dialect.SQLite:
drv, err := sql.Open(driverName, dataSourceName)
if err != nil {
return nil, err
}
return NewClient(append(options, Driver(drv))...), nil
default:
return nil, fmt.Errorf("unsupported driver: %q", driverName)
}
}
// Tx returns a new transactional client. The provided context
// is used until the transaction is committed or rolled back.
func (c *Client) Tx(ctx context.Context) (*Tx, error) {
if _, ok := c.driver.(*txDriver); ok {
return nil, fmt.Errorf("ent: cannot start a transaction within a transaction")
}
tx, err := newTx(ctx, c.driver)
if err != nil {
return nil, fmt.Errorf("ent: starting a transaction: %v", err)
}
cfg := config{driver: tx, log: c.log, debug: c.debug, hooks: c.hooks}
return &Tx{
ctx: ctx,
config: cfg,
Action: NewActionClient(cfg),
Alias: NewAliasClient(cfg),
App: NewAppClient(cfg),
Browser: NewBrowserClient(cfg),
Campaign: NewCampaignClient(cfg),
Connectivity: NewConnectivityClient(cfg),
Device: NewDeviceClient(cfg),
Event: NewEventClient(cfg),
Extra: NewExtraClient(cfg),
Group: NewGroupClient(cfg),
Library: NewLibraryClient(cfg),
Location: NewLocationClient(cfg),
Network: NewNetworkClient(cfg),
OSContext: NewOSContextClient(cfg),
Page: NewPageClient(cfg),
Referrer: NewReferrerClient(cfg),
Screen: NewScreenClient(cfg),
Session: NewSessionClient(cfg),
Timing: NewTimingClient(cfg),
User: NewUserClient(cfg),
Viewport: NewViewportClient(cfg),
}, nil
}
// BeginTx returns a transactional client with options.
func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) {
if _, ok := c.driver.(*txDriver); ok {
return nil, fmt.Errorf("ent: cannot start a transaction within a transaction")
}
tx, err := c.driver.(*sql.Driver).BeginTx(ctx, opts)
if err != nil {
return nil, fmt.Errorf("ent: starting a transaction: %v", err)
}
cfg := config{driver: &txDriver{tx: tx, drv: c.driver}, log: c.log, debug: c.debug, hooks: c.hooks}
return &Tx{
config: cfg,
Action: NewActionClient(cfg),
Alias: NewAliasClient(cfg),
App: NewAppClient(cfg),
Browser: NewBrowserClient(cfg),
Campaign: NewCampaignClient(cfg),
Connectivity: NewConnectivityClient(cfg),
Device: NewDeviceClient(cfg),
Event: NewEventClient(cfg),
Extra: NewExtraClient(cfg),
Group: NewGroupClient(cfg),
Library: NewLibraryClient(cfg),
Location: NewLocationClient(cfg),
Network: NewNetworkClient(cfg),
OSContext: NewOSContextClient(cfg),
Page: NewPageClient(cfg),
Referrer: NewReferrerClient(cfg),
Screen: NewScreenClient(cfg),
Session: NewSessionClient(cfg),
Timing: NewTimingClient(cfg),
User: NewUserClient(cfg),
Viewport: NewViewportClient(cfg),
}, nil
}
// Debug returns a new debug-client. It's used to get verbose logging on specific operations.
//
// client.Debug().
// Action.
// Query().
// Count(ctx)
//
func (c *Client) Debug() *Client {
if c.debug {
return c
}
cfg := config{driver: dialect.Debug(c.driver, c.log), log: c.log, debug: true, hooks: c.hooks}
client := &Client{config: cfg}
client.init()
return client
}
// Close closes the database connection and prevents new queries from starting.
func (c *Client) Close() error {
return c.driver.Close()
}
// Use adds the mutation hooks to all the entity clients.
// In order to add hooks to a specific client, call: `client.Node.Use(...)`.
func (c *Client) Use(hooks ...Hook) {
c.Action.Use(hooks...)
c.Alias.Use(hooks...)
c.App.Use(hooks...)
c.Browser.Use(hooks...)
c.Campaign.Use(hooks...)
c.Connectivity.Use(hooks...)
c.Device.Use(hooks...)
c.Event.Use(hooks...)
c.Extra.Use(hooks...)
c.Group.Use(hooks...)
c.Library.Use(hooks...)
c.Location.Use(hooks...)
c.Network.Use(hooks...)
c.OSContext.Use(hooks...)
c.Page.Use(hooks...)
c.Referrer.Use(hooks...)
c.Screen.Use(hooks...)
c.Session.Use(hooks...)
c.Timing.Use(hooks...)
c.User.Use(hooks...)
c.Viewport.Use(hooks...)
}
// ActionClient is a client for the Action schema.
type ActionClient struct {
config
}
// NewActionClient returns a client for the Action from the given config.
func NewActionClient(c config) *ActionClient {
return &ActionClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `action.Hooks(f(g(h())))`.
func (c *ActionClient) Use(hooks ...Hook) {
c.hooks.Action = append(c.hooks.Action, hooks...)
}
// Create returns a create builder for Action.
func (c *ActionClient) Create() *ActionCreate {
mutation := newActionMutation(c.config, OpCreate)
return &ActionCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// BulkCreate returns a builder for creating a bulk of Action entities.
func (c *ActionClient) CreateBulk(builders ...*ActionCreate) *ActionCreateBulk {
return &ActionCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Action.
func (c *ActionClient) Update() *ActionUpdate {
mutation := newActionMutation(c.config, OpUpdate)
return &ActionUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *ActionClient) UpdateOne(a *Action) *ActionUpdateOne {
mutation := newActionMutation(c.config, OpUpdateOne, withAction(a))
return &ActionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *ActionClient) UpdateOneID(id int) *ActionUpdateOne {
mutation := newActionMutation(c.config, OpUpdateOne, withActionID(id))
return &ActionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for Action.
func (c *ActionClient) Delete() *ActionDelete {
mutation := newActionMutation(c.config, OpDelete)
return &ActionDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a delete builder for the given entity.
func (c *ActionClient) DeleteOne(a *Action) *ActionDeleteOne {
return c.DeleteOneID(a.ID)
}
// DeleteOneID returns a delete builder for the given id.
func (c *ActionClient) DeleteOneID(id int) *ActionDeleteOne {
builder := c.Delete().Where(action.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &ActionDeleteOne{builder}
}
// Query returns a query builder for Action.
func (c *ActionClient) Query() *ActionQuery {
return &ActionQuery{config: c.config}
}
// Get returns a Action entity by its id.
func (c *ActionClient) Get(ctx context.Context, id int) (*Action, error) {
return c.Query().Where(action.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *ActionClient) GetX(ctx context.Context, id int) *Action {
a, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return a
}
// QueryEvent queries the event edge of a Action.
func (c *ActionClient) QueryEvent(a *Action) *EventQuery {
query := &EventQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := a.ID
step := sqlgraph.NewStep(
sqlgraph.From(action.Table, action.FieldID, id),
sqlgraph.To(event.Table, event.FieldID),
sqlgraph.Edge(sqlgraph.O2O, true, action.EventTable, action.EventColumn),
)
fromV = sqlgraph.Neighbors(a.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *ActionClient) Hooks() []Hook {
return c.hooks.Action
}
// AliasClient is a client for the Alias schema.
type AliasClient struct {
config
}
// NewAliasClient returns a client for the Alias from the given config.
func NewAliasClient(c config) *AliasClient {
return &AliasClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `alias.Hooks(f(g(h())))`.
func (c *AliasClient) Use(hooks ...Hook) {
c.hooks.Alias = append(c.hooks.Alias, hooks...)
}
// Create returns a create builder for Alias.
func (c *AliasClient) Create() *AliasCreate {
mutation := newAliasMutation(c.config, OpCreate)
return &AliasCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// BulkCreate returns a builder for creating a bulk of Alias entities.
func (c *AliasClient) CreateBulk(builders ...*AliasCreate) *AliasCreateBulk {
return &AliasCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Alias.
func (c *AliasClient) Update() *AliasUpdate {
mutation := newAliasMutation(c.config, OpUpdate)
return &AliasUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *AliasClient) UpdateOne(a *Alias) *AliasUpdateOne {
mutation := newAliasMutation(c.config, OpUpdateOne, withAlias(a))
return &AliasUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *AliasClient) UpdateOneID(id int) *AliasUpdateOne {
mutation := newAliasMutation(c.config, OpUpdateOne, withAliasID(id))
return &AliasUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for Alias.
func (c *AliasClient) Delete() *AliasDelete {
mutation := newAliasMutation(c.config, OpDelete)
return &AliasDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a delete builder for the given entity.
func (c *AliasClient) DeleteOne(a *Alias) *AliasDeleteOne {
return c.DeleteOneID(a.ID)
}
// DeleteOneID returns a delete builder for the given id.
func (c *AliasClient) DeleteOneID(id int) *AliasDeleteOne {
builder := c.Delete().Where(alias.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &AliasDeleteOne{builder}
}
// Query returns a query builder for Alias.
func (c *AliasClient) Query() *AliasQuery {
return &AliasQuery{config: c.config}
}
// Get returns a Alias entity by its id.
func (c *AliasClient) Get(ctx context.Context, id int) (*Alias, error) {
return c.Query().Where(alias.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *AliasClient) GetX(ctx context.Context, id int) *Alias {
a, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return a
}
// QueryEvent queries the event edge of a Alias.
func (c *AliasClient) QueryEvent(a *Alias) *EventQuery {
query := &EventQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := a.ID
step := sqlgraph.NewStep(
sqlgraph.From(alias.Table, alias.FieldID, id),
sqlgraph.To(event.Table, event.FieldID),
sqlgraph.Edge(sqlgraph.O2O, true, alias.EventTable, alias.EventColumn),
)
fromV = sqlgraph.Neighbors(a.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryUser queries the user edge of a Alias.
func (c *AliasClient) QueryUser(a *Alias) *UserQuery {
query := &UserQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := a.ID
step := sqlgraph.NewStep(
sqlgraph.From(alias.Table, alias.FieldID, id),
sqlgraph.To(user.Table, user.FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, alias.UserTable, alias.UserColumn),
)
fromV = sqlgraph.Neighbors(a.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *AliasClient) Hooks() []Hook {
return c.hooks.Alias
}
// AppClient is a client for the App schema.
type AppClient struct {
config
}
// NewAppClient returns a client for the App from the given config.
func NewAppClient(c config) *AppClient {
return &AppClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `app.Hooks(f(g(h())))`.
func (c *AppClient) Use(hooks ...Hook) {
c.hooks.App = append(c.hooks.App, hooks...)
}
// Create returns a create builder for App.
func (c *AppClient) Create() *AppCreate {
mutation := newAppMutation(c.config, OpCreate)
return &AppCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// BulkCreate returns a builder for creating a bulk of App entities.
func (c *AppClient) CreateBulk(builders ...*AppCreate) *AppCreateBulk {
return &AppCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for App.
func (c *AppClient) Update() *AppUpdate {
mutation := newAppMutation(c.config, OpUpdate)
return &AppUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *AppClient) UpdateOne(a *App) *AppUpdateOne {
mutation := newAppMutation(c.config, OpUpdateOne, withApp(a))
return &AppUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *AppClient) UpdateOneID(id int) *AppUpdateOne {
mutation := newAppMutation(c.config, OpUpdateOne, withAppID(id))
return &AppUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for App.
func (c *AppClient) Delete() *AppDelete {
mutation := newAppMutation(c.config, OpDelete)
return &AppDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a delete builder for the given entity.
func (c *AppClient) DeleteOne(a *App) *AppDeleteOne {
return c.DeleteOneID(a.ID)
}
// DeleteOneID returns a delete builder for the given id.
func (c *AppClient) DeleteOneID(id int) *AppDeleteOne {
builder := c.Delete().Where(app.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &AppDeleteOne{builder}
}
// Query returns a query builder for App.
func (c *AppClient) Query() *AppQuery {
return &AppQuery{config: c.config}
}
// Get returns a App entity by its id.
func (c *AppClient) Get(ctx context.Context, id int) (*App, error) {
return c.Query().Where(app.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *AppClient) GetX(ctx context.Context, id int) *App {
a, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return a
}
// QueryEvents queries the events edge of a App.
func (c *AppClient) QueryEvents(a *App) *EventQuery {
query := &EventQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := a.ID
step := sqlgraph.NewStep(
sqlgraph.From(app.Table, app.FieldID, id),
sqlgraph.To(event.Table, event.FieldID),
sqlgraph.Edge(sqlgraph.O2M, true, app.EventsTable, app.EventsColumn),
)
fromV = sqlgraph.Neighbors(a.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *AppClient) Hooks() []Hook {
return c.hooks.App
}
// BrowserClient is a client for the Browser schema.
type BrowserClient struct {
config
}
// NewBrowserClient returns a client for the Browser from the given config.
func NewBrowserClient(c config) *BrowserClient {
return &BrowserClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `browser.Hooks(f(g(h())))`.
func (c *BrowserClient) Use(hooks ...Hook) {
c.hooks.Browser = append(c.hooks.Browser, hooks...)
}
// Create returns a create builder for Browser.
func (c *BrowserClient) Create() *BrowserCreate {
mutation := newBrowserMutation(c.config, OpCreate)
return &BrowserCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// BulkCreate returns a builder for creating a bulk of Browser entities.
func (c *BrowserClient) CreateBulk(builders ...*BrowserCreate) *BrowserCreateBulk {
return &BrowserCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Browser.
func (c *BrowserClient) Update() *BrowserUpdate {
mutation := newBrowserMutation(c.config, OpUpdate)
return &BrowserUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *BrowserClient) UpdateOne(b *Browser) *BrowserUpdateOne {
mutation := newBrowserMutation(c.config, OpUpdateOne, withBrowser(b))
return &BrowserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *BrowserClient) UpdateOneID(id int) *BrowserUpdateOne {
mutation := newBrowserMutation(c.config, OpUpdateOne, withBrowserID(id))
return &BrowserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for Browser.
func (c *BrowserClient) Delete() *BrowserDelete {
mutation := newBrowserMutation(c.config, OpDelete)
return &BrowserDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a delete builder for the given entity.
func (c *BrowserClient) DeleteOne(b *Browser) *BrowserDeleteOne {
return c.DeleteOneID(b.ID)
}
// DeleteOneID returns a delete builder for the given id.
func (c *BrowserClient) DeleteOneID(id int) *BrowserDeleteOne {
builder := c.Delete().Where(browser.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &BrowserDeleteOne{builder}
}
// Query returns a query builder for Browser.
func (c *BrowserClient) Query() *BrowserQuery {
return &BrowserQuery{config: c.config}
}
// Get returns a Browser entity by its id.
func (c *BrowserClient) Get(ctx context.Context, id int) (*Browser, error) {
return c.Query().Where(browser.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *BrowserClient) GetX(ctx context.Context, id int) *Browser {
b, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return b
}
// QueryEvent queries the event edge of a Browser.
func (c *BrowserClient) QueryEvent(b *Browser) *EventQuery {
query := &EventQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := b.ID
step := sqlgraph.NewStep(
sqlgraph.From(browser.Table, browser.FieldID, id),
sqlgraph.To(event.Table, event.FieldID),
sqlgraph.Edge(sqlgraph.O2O, true, browser.EventTable, browser.EventColumn),
)
fromV = sqlgraph.Neighbors(b.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *BrowserClient) Hooks() []Hook {
return c.hooks.Browser
}
// CampaignClient is a client for the Campaign schema.
type CampaignClient struct {
config
}
// NewCampaignClient returns a client for the Campaign from the given config.
func NewCampaignClient(c config) *CampaignClient {
return &CampaignClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `campaign.Hooks(f(g(h())))`.
func (c *CampaignClient) Use(hooks ...Hook) {
c.hooks.Campaign = append(c.hooks.Campaign, hooks...)
}
// Create returns a create builder for Campaign.
func (c *CampaignClient) Create() *CampaignCreate {
mutation := newCampaignMutation(c.config, OpCreate)
return &CampaignCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// BulkCreate returns a builder for creating a bulk of Campaign entities.
func (c *CampaignClient) CreateBulk(builders ...*CampaignCreate) *CampaignCreateBulk {
return &CampaignCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Campaign.
func (c *CampaignClient) Update() *CampaignUpdate {
mutation := newCampaignMutation(c.config, OpUpdate)
return &CampaignUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *CampaignClient) UpdateOne(ca *Campaign) *CampaignUpdateOne {
mutation := newCampaignMutation(c.config, OpUpdateOne, withCampaign(ca))
return &CampaignUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *CampaignClient) UpdateOneID(id int) *CampaignUpdateOne {
mutation := newCampaignMutation(c.config, OpUpdateOne, withCampaignID(id))
return &CampaignUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for Campaign.
func (c *CampaignClient) Delete() *CampaignDelete {
mutation := newCampaignMutation(c.config, OpDelete)
return &CampaignDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a delete builder for the given entity.
func (c *CampaignClient) DeleteOne(ca *Campaign) *CampaignDeleteOne {
return c.DeleteOneID(ca.ID)
}
// DeleteOneID returns a delete builder for the given id.
func (c *CampaignClient) DeleteOneID(id int) *CampaignDeleteOne {
builder := c.Delete().Where(campaign.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &CampaignDeleteOne{builder}
}
// Query returns a query builder for Campaign.
func (c *CampaignClient) Query() *CampaignQuery {
return &CampaignQuery{config: c.config}
}
// Get returns a Campaign entity by its id.
func (c *CampaignClient) Get(ctx context.Context, id int) (*Campaign, error) {
return c.Query().Where(campaign.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *CampaignClient) GetX(ctx context.Context, id int) *Campaign {
ca, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return ca
}
// QueryEvent queries the event edge of a Campaign.
func (c *CampaignClient) QueryEvent(ca *Campaign) *EventQuery {
query := &EventQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := ca.ID
step := sqlgraph.NewStep(
sqlgraph.From(campaign.Table, campaign.FieldID, id),
sqlgraph.To(event.Table, event.FieldID),
sqlgraph.Edge(sqlgraph.O2M, true, campaign.EventTable, campaign.EventColumn),
)
fromV = sqlgraph.Neighbors(ca.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *CampaignClient) Hooks() []Hook {
return c.hooks.Campaign
}
// ConnectivityClient is a client for the Connectivity schema.
type ConnectivityClient struct {
config
}
// NewConnectivityClient returns a client for the Connectivity from the given config.
func NewConnectivityClient(c config) *ConnectivityClient {
return &ConnectivityClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `connectivity.Hooks(f(g(h())))`.
func (c *ConnectivityClient) Use(hooks ...Hook) {
c.hooks.Connectivity = append(c.hooks.Connectivity, hooks...)
}
// Create returns a create builder for Connectivity.
func (c *ConnectivityClient) Create() *ConnectivityCreate {
mutation := newConnectivityMutation(c.config, OpCreate)
return &ConnectivityCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// BulkCreate returns a builder for creating a bulk of Connectivity entities.
func (c *ConnectivityClient) CreateBulk(builders ...*ConnectivityCreate) *ConnectivityCreateBulk {
return &ConnectivityCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Connectivity.
func (c *ConnectivityClient) Update() *ConnectivityUpdate {
mutation := newConnectivityMutation(c.config, OpUpdate)
return &ConnectivityUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *ConnectivityClient) UpdateOne(co *Connectivity) *ConnectivityUpdateOne {
mutation := newConnectivityMutation(c.config, OpUpdateOne, withConnectivity(co))
return &ConnectivityUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *ConnectivityClient) UpdateOneID(id int) *ConnectivityUpdateOne {
mutation := newConnectivityMutation(c.config, OpUpdateOne, withConnectivityID(id))
return &ConnectivityUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for Connectivity.
func (c *ConnectivityClient) Delete() *ConnectivityDelete {
mutation := newConnectivityMutation(c.config, OpDelete)
return &ConnectivityDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a delete builder for the given entity.
func (c *ConnectivityClient) DeleteOne(co *Connectivity) *ConnectivityDeleteOne {
return c.DeleteOneID(co.ID)
}
// DeleteOneID returns a delete builder for the given id.
func (c *ConnectivityClient) DeleteOneID(id int) *ConnectivityDeleteOne {
builder := c.Delete().Where(connectivity.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &ConnectivityDeleteOne{builder}
}
// Query returns a query builder for Connectivity.
func (c *ConnectivityClient) Query() *ConnectivityQuery {
return &ConnectivityQuery{config: c.config}
}
// Get returns a Connectivity entity by its id.
func (c *ConnectivityClient) Get(ctx context.Context, id int) (*Connectivity, error) {
return c.Query().Where(connectivity.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *ConnectivityClient) GetX(ctx context.Context, id int) *Connectivity {
co, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return co
}
// QueryEvent queries the event edge of a Connectivity.
func (c *ConnectivityClient) QueryEvent(co *Connectivity) *EventQuery {
query := &EventQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := co.ID
step := sqlgraph.NewStep(
sqlgraph.From(connectivity.Table, connectivity.FieldID, id),
sqlgraph.To(event.Table, event.FieldID),
sqlgraph.Edge(sqlgraph.O2O, true, connectivity.EventTable, connectivity.EventColumn),
)
fromV = sqlgraph.Neighbors(co.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *ConnectivityClient) Hooks() []Hook {
return c.hooks.Connectivity
}
// DeviceClient is a client for the Device schema.
type DeviceClient struct {
config
}
// NewDeviceClient returns a client for the Device from the given config.
func NewDeviceClient(c config) *DeviceClient {
return &DeviceClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `device.Hooks(f(g(h())))`.
func (c *DeviceClient) Use(hooks ...Hook) {
c.hooks.Device = append(c.hooks.Device, hooks...)
}
// Create returns a create builder for Device.
func (c *DeviceClient) Create() *DeviceCreate {
mutation := newDeviceMutation(c.config, OpCreate)
return &DeviceCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// BulkCreate returns a builder for creating a bulk of Device entities.
func (c *DeviceClient) CreateBulk(builders ...*DeviceCreate) *DeviceCreateBulk {
return &DeviceCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Device.
func (c *DeviceClient) Update() *DeviceUpdate {
mutation := newDeviceMutation(c.config, OpUpdate)
return &DeviceUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *DeviceClient) UpdateOne(d *Device) *DeviceUpdateOne {
mutation := newDeviceMutation(c.config, OpUpdateOne, withDevice(d))
return &DeviceUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *DeviceClient) UpdateOneID(id string) *DeviceUpdateOne {
mutation := newDeviceMutation(c.config, OpUpdateOne, withDeviceID(id))
return &DeviceUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for Device.
func (c *DeviceClient) Delete() *DeviceDelete {
mutation := newDeviceMutation(c.config, OpDelete)
return &DeviceDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a delete builder for the given entity.
func (c *DeviceClient) DeleteOne(d *Device) *DeviceDeleteOne {
return c.DeleteOneID(d.ID)
}
// DeleteOneID returns a delete builder for the given id.
func (c *DeviceClient) DeleteOneID(id string) *DeviceDeleteOne {
builder := c.Delete().Where(device.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &DeviceDeleteOne{builder}
}
// Query returns a query builder for Device.
func (c *DeviceClient) Query() *DeviceQuery {
return &DeviceQuery{config: c.config}
}
// Get returns a Device entity by its id.
func (c *DeviceClient) Get(ctx context.Context, id string) (*Device, error) {
return c.Query().Where(device.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *DeviceClient) GetX(ctx context.Context, id string) *Device {
d, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return d
}
// QueryEvents queries the events edge of a Device.
func (c *DeviceClient) QueryEvents(d *Device) *EventQuery {
query := &EventQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := d.ID
step := sqlgraph.NewStep(
sqlgraph.From(device.Table, device.FieldID, id),
sqlgraph.To(event.Table, event.FieldID),
sqlgraph.Edge(sqlgraph.O2M, true, device.EventsTable, device.EventsColumn),
)
fromV = sqlgraph.Neighbors(d.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *DeviceClient) Hooks() []Hook {
return c.hooks.Device
}
// EventClient is a client for the Event schema.
type EventClient struct {
config
}
// NewEventClient returns a client for the Event from the given config.
func NewEventClient(c config) *EventClient {
return &EventClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `event.Hooks(f(g(h())))`.
func (c *EventClient) Use(hooks ...Hook) {
c.hooks.Event = append(c.hooks.Event, hooks...)
}
// Create returns a create builder for Event.
func (c *EventClient) Create() *EventCreate {
mutation := newEventMutation(c.config, OpCreate)
return &EventCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// BulkCreate returns a builder for creating a bulk of Event entities.
func (c *EventClient) CreateBulk(builders ...*EventCreate) *EventCreateBulk {
return &EventCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Event.
func (c *EventClient) Update() *EventUpdate {
mutation := newEventMutation(c.config, OpUpdate)
return &EventUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *EventClient) UpdateOne(e *Event) *EventUpdateOne {
mutation := newEventMutation(c.config, OpUpdateOne, withEvent(e))
return &EventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *EventClient) UpdateOneID(id uuid.UUID) *EventUpdateOne {
mutation := newEventMutation(c.config, OpUpdateOne, withEventID(id))
return &EventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for Event.
func (c *EventClient) Delete() *EventDelete {
mutation := newEventMutation(c.config, OpDelete)
return &EventDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a delete builder for the given entity.
func (c *EventClient) DeleteOne(e *Event) *EventDeleteOne {
return c.DeleteOneID(e.ID)
}
// DeleteOneID returns a delete builder for the given id.
func (c *EventClient) DeleteOneID(id uuid.UUID) *EventDeleteOne {
builder := c.Delete().Where(event.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &EventDeleteOne{builder}
}
// Query returns a query builder for Event.
func (c *EventClient) Query() *EventQuery {
return &EventQuery{config: c.config}
}
// Get returns a Event entity by its id.
func (c *EventClient) Get(ctx context.Context, id uuid.UUID) (*Event, error) {
return c.Query().Where(event.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *EventClient) GetX(ctx context.Context, id uuid.UUID) *Event {
e, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return e
}
// QueryAction queries the action edge of a Event.
func (c *EventClient) QueryAction(e *Event) *ActionQuery {
query := &ActionQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(event.Table, event.FieldID, id),
sqlgraph.To(action.Table, action.FieldID),
sqlgraph.Edge(sqlgraph.O2O, false, event.ActionTable, event.ActionColumn),
)
fromV = sqlgraph.Neighbors(e.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryAlias queries the alias edge of a Event.
func (c *EventClient) QueryAlias(e *Event) *AliasQuery {
query := &AliasQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(event.Table, event.FieldID, id),
sqlgraph.To(alias.Table, alias.FieldID),
sqlgraph.Edge(sqlgraph.O2O, false, event.AliasTable, event.AliasColumn),
)
fromV = sqlgraph.Neighbors(e.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryApp queries the app edge of a Event.
func (c *EventClient) QueryApp(e *Event) *AppQuery {
query := &AppQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(event.Table, event.FieldID, id),
sqlgraph.To(app.Table, app.FieldID),
sqlgraph.Edge(sqlgraph.M2O, false, event.AppTable, event.AppColumn),
)
fromV = sqlgraph.Neighbors(e.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryBrowser queries the browser edge of a Event.
func (c *EventClient) QueryBrowser(e *Event) *BrowserQuery {
query := &BrowserQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(event.Table, event.FieldID, id),
sqlgraph.To(browser.Table, browser.FieldID),
sqlgraph.Edge(sqlgraph.O2O, false, event.BrowserTable, event.BrowserColumn),
)
fromV = sqlgraph.Neighbors(e.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryCampaign queries the campaign edge of a Event.
func (c *EventClient) QueryCampaign(e *Event) *CampaignQuery {
query := &CampaignQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(event.Table, event.FieldID, id),
sqlgraph.To(campaign.Table, campaign.FieldID),
sqlgraph.Edge(sqlgraph.M2O, false, event.CampaignTable, event.CampaignColumn),
)
fromV = sqlgraph.Neighbors(e.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryConnectivity queries the connectivity edge of a Event.
func (c *EventClient) QueryConnectivity(e *Event) *ConnectivityQuery {
query := &ConnectivityQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(event.Table, event.FieldID, id),
sqlgraph.To(connectivity.Table, connectivity.FieldID),
sqlgraph.Edge(sqlgraph.O2O, false, event.ConnectivityTable, event.ConnectivityColumn),
)
fromV = sqlgraph.Neighbors(e.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryDevice queries the device edge of a Event.
func (c *EventClient) QueryDevice(e *Event) *DeviceQuery {
query := &DeviceQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(event.Table, event.FieldID, id),
sqlgraph.To(device.Table, device.FieldID),
sqlgraph.Edge(sqlgraph.M2O, false, event.DeviceTable, event.DeviceColumn),
)
fromV = sqlgraph.Neighbors(e.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryExtra queries the extra edge of a Event.
func (c *EventClient) QueryExtra(e *Event) *ExtraQuery {
query := &ExtraQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(event.Table, event.FieldID, id),
sqlgraph.To(extra.Table, extra.FieldID),
sqlgraph.Edge(sqlgraph.M2O, false, event.ExtraTable, event.ExtraColumn),
)
fromV = sqlgraph.Neighbors(e.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryGroup queries the group edge of a Event.
func (c *EventClient) QueryGroup(e *Event) *GroupQuery {
query := &GroupQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(event.Table, event.FieldID, id),
sqlgraph.To(group.Table, group.FieldID),
sqlgraph.Edge(sqlgraph.M2O, false, event.GroupTable, event.GroupColumn),
)
fromV = sqlgraph.Neighbors(e.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryLibrary queries the library edge of a Event.
func (c *EventClient) QueryLibrary(e *Event) *LibraryQuery {
query := &LibraryQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(event.Table, event.FieldID, id),
sqlgraph.To(library.Table, library.FieldID),
sqlgraph.Edge(sqlgraph.M2O, false, event.LibraryTable, event.LibraryColumn),
)
fromV = sqlgraph.Neighbors(e.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryLocation queries the location edge of a Event.
func (c *EventClient) QueryLocation(e *Event) *LocationQuery {
query := &LocationQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(event.Table, event.FieldID, id),
sqlgraph.To(location.Table, location.FieldID),
sqlgraph.Edge(sqlgraph.M2O, false, event.LocationTable, event.LocationColumn),
)
fromV = sqlgraph.Neighbors(e.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryNetwork queries the network edge of a Event.
func (c *EventClient) QueryNetwork(e *Event) *NetworkQuery {
query := &NetworkQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(event.Table, event.FieldID, id),
sqlgraph.To(network.Table, network.FieldID),
sqlgraph.Edge(sqlgraph.M2O, false, event.NetworkTable, event.NetworkColumn),
)
fromV = sqlgraph.Neighbors(e.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryOs queries the os edge of a Event.
func (c *EventClient) QueryOs(e *Event) *OSContextQuery {
query := &OSContextQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(event.Table, event.FieldID, id),
sqlgraph.To(oscontext.Table, oscontext.FieldID),
sqlgraph.Edge(sqlgraph.M2O, false, event.OsTable, event.OsColumn),
)
fromV = sqlgraph.Neighbors(e.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryPage queries the page edge of a Event.
func (c *EventClient) QueryPage(e *Event) *PageQuery {
query := &PageQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(event.Table, event.FieldID, id),
sqlgraph.To(page.Table, page.FieldID),
sqlgraph.Edge(sqlgraph.M2O, false, event.PageTable, event.PageColumn),
)
fromV = sqlgraph.Neighbors(e.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryReferrer queries the referrer edge of a Event.
func (c *EventClient) QueryReferrer(e *Event) *ReferrerQuery {
query := &ReferrerQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(event.Table, event.FieldID, id),
sqlgraph.To(referrer.Table, referrer.FieldID),
sqlgraph.Edge(sqlgraph.M2O, false, event.ReferrerTable, event.ReferrerColumn),
)
fromV = sqlgraph.Neighbors(e.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryScreen queries the screen edge of a Event.
func (c *EventClient) QueryScreen(e *Event) *ScreenQuery {
query := &ScreenQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(event.Table, event.FieldID, id),
sqlgraph.To(screen.Table, screen.FieldID),
sqlgraph.Edge(sqlgraph.M2O, false, event.ScreenTable, event.ScreenColumn),
)
fromV = sqlgraph.Neighbors(e.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QuerySession queries the session edge of a Event.
func (c *EventClient) QuerySession(e *Event) *SessionQuery {
query := &SessionQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(event.Table, event.FieldID, id),
sqlgraph.To(session.Table, session.FieldID),
sqlgraph.Edge(sqlgraph.M2O, false, event.SessionTable, event.SessionColumn),
)
fromV = sqlgraph.Neighbors(e.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryTiming queries the timing edge of a Event.
func (c *EventClient) QueryTiming(e *Event) *TimingQuery {
query := &TimingQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(event.Table, event.FieldID, id),
sqlgraph.To(timing.Table, timing.FieldID),
sqlgraph.Edge(sqlgraph.M2O, false, event.TimingTable, event.TimingColumn),
)
fromV = sqlgraph.Neighbors(e.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryViewport queries the viewport edge of a Event.
func (c *EventClient) QueryViewport(e *Event) *ViewportQuery {
query := &ViewportQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(event.Table, event.FieldID, id),
sqlgraph.To(viewport.Table, viewport.FieldID),
sqlgraph.Edge(sqlgraph.M2O, false, event.ViewportTable, event.ViewportColumn),
)
fromV = sqlgraph.Neighbors(e.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryUser queries the user edge of a Event.
func (c *EventClient) QueryUser(e *Event) *UserQuery {
query := &UserQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(event.Table, event.FieldID, id),
sqlgraph.To(user.Table, user.FieldID),
sqlgraph.Edge(sqlgraph.M2O, false, event.UserTable, event.UserColumn),
)
fromV = sqlgraph.Neighbors(e.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *EventClient) Hooks() []Hook {
return c.hooks.Event
}
// ExtraClient is a client for the Extra schema.
type ExtraClient struct {
config
}
// NewExtraClient returns a client for the Extra from the given config.
func NewExtraClient(c config) *ExtraClient {
return &ExtraClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `extra.Hooks(f(g(h())))`.
func (c *ExtraClient) Use(hooks ...Hook) {
c.hooks.Extra = append(c.hooks.Extra, hooks...)
}
// Create returns a create builder for Extra.
func (c *ExtraClient) Create() *ExtraCreate {
mutation := newExtraMutation(c.config, OpCreate)
return &ExtraCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// BulkCreate returns a builder for creating a bulk of Extra entities.
func (c *ExtraClient) CreateBulk(builders ...*ExtraCreate) *ExtraCreateBulk {
return &ExtraCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Extra.
func (c *ExtraClient) Update() *ExtraUpdate {
mutation := newExtraMutation(c.config, OpUpdate)
return &ExtraUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *ExtraClient) UpdateOne(e *Extra) *ExtraUpdateOne {
mutation := newExtraMutation(c.config, OpUpdateOne, withExtra(e))
return &ExtraUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *ExtraClient) UpdateOneID(id int) *ExtraUpdateOne {
mutation := newExtraMutation(c.config, OpUpdateOne, withExtraID(id))
return &ExtraUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for Extra.
func (c *ExtraClient) Delete() *ExtraDelete {
mutation := newExtraMutation(c.config, OpDelete)
return &ExtraDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a delete builder for the given entity.
func (c *ExtraClient) DeleteOne(e *Extra) *ExtraDeleteOne {
return c.DeleteOneID(e.ID)
}
// DeleteOneID returns a delete builder for the given id.
func (c *ExtraClient) DeleteOneID(id int) *ExtraDeleteOne {
builder := c.Delete().Where(extra.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &ExtraDeleteOne{builder}
}
// Query returns a query builder for Extra.
func (c *ExtraClient) Query() *ExtraQuery {
return &ExtraQuery{config: c.config}
}
// Get returns a Extra entity by its id.
func (c *ExtraClient) Get(ctx context.Context, id int) (*Extra, error) {
return c.Query().Where(extra.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *ExtraClient) GetX(ctx context.Context, id int) *Extra {
e, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return e
}
// QueryEvent queries the event edge of a Extra.
func (c *ExtraClient) QueryEvent(e *Extra) *EventQuery {
query := &EventQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(extra.Table, extra.FieldID, id),
sqlgraph.To(event.Table, event.FieldID),
sqlgraph.Edge(sqlgraph.O2M, true, extra.EventTable, extra.EventColumn),
)
fromV = sqlgraph.Neighbors(e.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *ExtraClient) Hooks() []Hook {
return c.hooks.Extra
}
// GroupClient is a client for the Group schema.
type GroupClient struct {
config
}
// NewGroupClient returns a client for the Group from the given config.
func NewGroupClient(c config) *GroupClient {
return &GroupClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `group.Hooks(f(g(h())))`.
func (c *GroupClient) Use(hooks ...Hook) {
c.hooks.Group = append(c.hooks.Group, hooks...)
}
// Create returns a create builder for Group.
func (c *GroupClient) Create() *GroupCreate {
mutation := newGroupMutation(c.config, OpCreate)
return &GroupCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// BulkCreate returns a builder for creating a bulk of Group entities.
func (c *GroupClient) CreateBulk(builders ...*GroupCreate) *GroupCreateBulk {
return &GroupCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Group.
func (c *GroupClient) Update() *GroupUpdate {
mutation := newGroupMutation(c.config, OpUpdate)
return &GroupUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *GroupClient) UpdateOne(gr *Group) *GroupUpdateOne {
mutation := newGroupMutation(c.config, OpUpdateOne, withGroup(gr))
return &GroupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *GroupClient) UpdateOneID(id int) *GroupUpdateOne {
mutation := newGroupMutation(c.config, OpUpdateOne, withGroupID(id))
return &GroupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for Group.
func (c *GroupClient) Delete() *GroupDelete {
mutation := newGroupMutation(c.config, OpDelete)
return &GroupDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a delete builder for the given entity.
func (c *GroupClient) DeleteOne(gr *Group) *GroupDeleteOne {
return c.DeleteOneID(gr.ID)
}
// DeleteOneID returns a delete builder for the given id.
func (c *GroupClient) DeleteOneID(id int) *GroupDeleteOne {
builder := c.Delete().Where(group.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &GroupDeleteOne{builder}
}
// Query returns a query builder for Group.
func (c *GroupClient) Query() *GroupQuery {
return &GroupQuery{config: c.config}
}
// Get returns a Group entity by its id.
func (c *GroupClient) Get(ctx context.Context, id int) (*Group, error) {
return c.Query().Where(group.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *GroupClient) GetX(ctx context.Context, id int) *Group {
gr, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return gr
}
// QueryEvents queries the events edge of a Group.
func (c *GroupClient) QueryEvents(gr *Group) *EventQuery {
query := &EventQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := gr.ID
step := sqlgraph.NewStep(
sqlgraph.From(group.Table, group.FieldID, id),
sqlgraph.To(event.Table, event.FieldID),
sqlgraph.Edge(sqlgraph.O2M, true, group.EventsTable, group.EventsColumn),
)
fromV = sqlgraph.Neighbors(gr.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryUsers queries the users edge of a Group.
func (c *GroupClient) QueryUsers(gr *Group) *UserQuery {
query := &UserQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := gr.ID
step := sqlgraph.NewStep(
sqlgraph.From(group.Table, group.FieldID, id),
sqlgraph.To(user.Table, user.FieldID),
sqlgraph.Edge(sqlgraph.M2M, false, group.UsersTable, group.UsersPrimaryKey...),
)
fromV = sqlgraph.Neighbors(gr.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *GroupClient) Hooks() []Hook {
return c.hooks.Group
}
// LibraryClient is a client for the Library schema.
type LibraryClient struct {
config
}
// NewLibraryClient returns a client for the Library from the given config.
func NewLibraryClient(c config) *LibraryClient {
return &LibraryClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `library.Hooks(f(g(h())))`.
func (c *LibraryClient) Use(hooks ...Hook) {
c.hooks.Library = append(c.hooks.Library, hooks...)
}
// Create returns a create builder for Library.
func (c *LibraryClient) Create() *LibraryCreate {
mutation := newLibraryMutation(c.config, OpCreate)
return &LibraryCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// BulkCreate returns a builder for creating a bulk of Library entities.
func (c *LibraryClient) CreateBulk(builders ...*LibraryCreate) *LibraryCreateBulk {
return &LibraryCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Library.
func (c *LibraryClient) Update() *LibraryUpdate {
mutation := newLibraryMutation(c.config, OpUpdate)
return &LibraryUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *LibraryClient) UpdateOne(l *Library) *LibraryUpdateOne {
mutation := newLibraryMutation(c.config, OpUpdateOne, withLibrary(l))
return &LibraryUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *LibraryClient) UpdateOneID(id int) *LibraryUpdateOne {
mutation := newLibraryMutation(c.config, OpUpdateOne, withLibraryID(id))
return &LibraryUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for Library.
func (c *LibraryClient) Delete() *LibraryDelete {
mutation := newLibraryMutation(c.config, OpDelete)
return &LibraryDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a delete builder for the given entity.
func (c *LibraryClient) DeleteOne(l *Library) *LibraryDeleteOne {
return c.DeleteOneID(l.ID)
}
// DeleteOneID returns a delete builder for the given id.
func (c *LibraryClient) DeleteOneID(id int) *LibraryDeleteOne {
builder := c.Delete().Where(library.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &LibraryDeleteOne{builder}
}
// Query returns a query builder for Library.
func (c *LibraryClient) Query() *LibraryQuery {
return &LibraryQuery{config: c.config}
}
// Get returns a Library entity by its id.
func (c *LibraryClient) Get(ctx context.Context, id int) (*Library, error) {
return c.Query().Where(library.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *LibraryClient) GetX(ctx context.Context, id int) *Library {
l, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return l
}
// QueryEvents queries the events edge of a Library.
func (c *LibraryClient) QueryEvents(l *Library) *EventQuery {
query := &EventQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := l.ID
step := sqlgraph.NewStep(
sqlgraph.From(library.Table, library.FieldID, id),
sqlgraph.To(event.Table, event.FieldID),
sqlgraph.Edge(sqlgraph.O2M, true, library.EventsTable, library.EventsColumn),
)
fromV = sqlgraph.Neighbors(l.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *LibraryClient) Hooks() []Hook {
return c.hooks.Library
}
// LocationClient is a client for the Location schema.
type LocationClient struct {
config
}
// NewLocationClient returns a client for the Location from the given config.
func NewLocationClient(c config) *LocationClient {
return &LocationClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `location.Hooks(f(g(h())))`.
func (c *LocationClient) Use(hooks ...Hook) {
c.hooks.Location = append(c.hooks.Location, hooks...)
}
// Create returns a create builder for Location.
func (c *LocationClient) Create() *LocationCreate {
mutation := newLocationMutation(c.config, OpCreate)
return &LocationCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// BulkCreate returns a builder for creating a bulk of Location entities.
func (c *LocationClient) CreateBulk(builders ...*LocationCreate) *LocationCreateBulk {
return &LocationCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Location.
func (c *LocationClient) Update() *LocationUpdate {
mutation := newLocationMutation(c.config, OpUpdate)
return &LocationUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *LocationClient) UpdateOne(l *Location) *LocationUpdateOne {
mutation := newLocationMutation(c.config, OpUpdateOne, withLocation(l))
return &LocationUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *LocationClient) UpdateOneID(id int) *LocationUpdateOne {
mutation := newLocationMutation(c.config, OpUpdateOne, withLocationID(id))
return &LocationUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for Location.
func (c *LocationClient) Delete() *LocationDelete {
mutation := newLocationMutation(c.config, OpDelete)
return &LocationDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a delete builder for the given entity.
func (c *LocationClient) DeleteOne(l *Location) *LocationDeleteOne {
return c.DeleteOneID(l.ID)
}
// DeleteOneID returns a delete builder for the given id.
func (c *LocationClient) DeleteOneID(id int) *LocationDeleteOne {
builder := c.Delete().Where(location.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &LocationDeleteOne{builder}
}
// Query returns a query builder for Location.
func (c *LocationClient) Query() *LocationQuery {
return &LocationQuery{config: c.config}
}
// Get returns a Location entity by its id.
func (c *LocationClient) Get(ctx context.Context, id int) (*Location, error) {
return c.Query().Where(location.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *LocationClient) GetX(ctx context.Context, id int) *Location {
l, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return l
}
// QueryEvents queries the events edge of a Location.
func (c *LocationClient) QueryEvents(l *Location) *EventQuery {
query := &EventQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := l.ID
step := sqlgraph.NewStep(
sqlgraph.From(location.Table, location.FieldID, id),
sqlgraph.To(event.Table, event.FieldID),
sqlgraph.Edge(sqlgraph.O2M, true, location.EventsTable, location.EventsColumn),
)
fromV = sqlgraph.Neighbors(l.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *LocationClient) Hooks() []Hook {
return c.hooks.Location
}
// NetworkClient is a client for the Network schema.
type NetworkClient struct {
config
}
// NewNetworkClient returns a client for the Network from the given config.
func NewNetworkClient(c config) *NetworkClient {
return &NetworkClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `network.Hooks(f(g(h())))`.
func (c *NetworkClient) Use(hooks ...Hook) {
c.hooks.Network = append(c.hooks.Network, hooks...)
}
// Create returns a create builder for Network.
func (c *NetworkClient) Create() *NetworkCreate {
mutation := newNetworkMutation(c.config, OpCreate)
return &NetworkCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// BulkCreate returns a builder for creating a bulk of Network entities.
func (c *NetworkClient) CreateBulk(builders ...*NetworkCreate) *NetworkCreateBulk {
return &NetworkCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Network.
func (c *NetworkClient) Update() *NetworkUpdate {
mutation := newNetworkMutation(c.config, OpUpdate)
return &NetworkUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *NetworkClient) UpdateOne(n *Network) *NetworkUpdateOne {
mutation := newNetworkMutation(c.config, OpUpdateOne, withNetwork(n))
return &NetworkUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *NetworkClient) UpdateOneID(id int) *NetworkUpdateOne {
mutation := newNetworkMutation(c.config, OpUpdateOne, withNetworkID(id))
return &NetworkUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for Network.
func (c *NetworkClient) Delete() *NetworkDelete {
mutation := newNetworkMutation(c.config, OpDelete)
return &NetworkDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a delete builder for the given entity.
func (c *NetworkClient) DeleteOne(n *Network) *NetworkDeleteOne {
return c.DeleteOneID(n.ID)
}
// DeleteOneID returns a delete builder for the given id.
func (c *NetworkClient) DeleteOneID(id int) *NetworkDeleteOne {
builder := c.Delete().Where(network.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &NetworkDeleteOne{builder}
}
// Query returns a query builder for Network.
func (c *NetworkClient) Query() *NetworkQuery {
return &NetworkQuery{config: c.config}
}
// Get returns a Network entity by its id.
func (c *NetworkClient) Get(ctx context.Context, id int) (*Network, error) {
return c.Query().Where(network.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *NetworkClient) GetX(ctx context.Context, id int) *Network {
n, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return n
}
// QueryEvents queries the events edge of a Network.
func (c *NetworkClient) QueryEvents(n *Network) *EventQuery {
query := &EventQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := n.ID
step := sqlgraph.NewStep(
sqlgraph.From(network.Table, network.FieldID, id),
sqlgraph.To(event.Table, event.FieldID),
sqlgraph.Edge(sqlgraph.O2M, true, network.EventsTable, network.EventsColumn),
)
fromV = sqlgraph.Neighbors(n.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *NetworkClient) Hooks() []Hook {
return c.hooks.Network
}
// OSContextClient is a client for the OSContext schema.
type OSContextClient struct {
config
}
// NewOSContextClient returns a client for the OSContext from the given config.
func NewOSContextClient(c config) *OSContextClient {
return &OSContextClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `oscontext.Hooks(f(g(h())))`.
func (c *OSContextClient) Use(hooks ...Hook) {
c.hooks.OSContext = append(c.hooks.OSContext, hooks...)
}
// Create returns a create builder for OSContext.
func (c *OSContextClient) Create() *OSContextCreate {
mutation := newOSContextMutation(c.config, OpCreate)
return &OSContextCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// BulkCreate returns a builder for creating a bulk of OSContext entities.
func (c *OSContextClient) CreateBulk(builders ...*OSContextCreate) *OSContextCreateBulk {
return &OSContextCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for OSContext.
func (c *OSContextClient) Update() *OSContextUpdate {
mutation := newOSContextMutation(c.config, OpUpdate)
return &OSContextUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *OSContextClient) UpdateOne(oc *OSContext) *OSContextUpdateOne {
mutation := newOSContextMutation(c.config, OpUpdateOne, withOSContext(oc))
return &OSContextUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *OSContextClient) UpdateOneID(id int) *OSContextUpdateOne {
mutation := newOSContextMutation(c.config, OpUpdateOne, withOSContextID(id))
return &OSContextUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for OSContext.
func (c *OSContextClient) Delete() *OSContextDelete {
mutation := newOSContextMutation(c.config, OpDelete)
return &OSContextDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a delete builder for the given entity.
func (c *OSContextClient) DeleteOne(oc *OSContext) *OSContextDeleteOne {
return c.DeleteOneID(oc.ID)
}
// DeleteOneID returns a delete builder for the given id.
func (c *OSContextClient) DeleteOneID(id int) *OSContextDeleteOne {
builder := c.Delete().Where(oscontext.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &OSContextDeleteOne{builder}
}
// Query returns a query builder for OSContext.
func (c *OSContextClient) Query() *OSContextQuery {
return &OSContextQuery{config: c.config}
}
// Get returns a OSContext entity by its id.
func (c *OSContextClient) Get(ctx context.Context, id int) (*OSContext, error) {
return c.Query().Where(oscontext.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *OSContextClient) GetX(ctx context.Context, id int) *OSContext {
oc, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return oc
}
// QueryEvents queries the events edge of a OSContext.
func (c *OSContextClient) QueryEvents(oc *OSContext) *EventQuery {
query := &EventQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := oc.ID
step := sqlgraph.NewStep(
sqlgraph.From(oscontext.Table, oscontext.FieldID, id),
sqlgraph.To(event.Table, event.FieldID),
sqlgraph.Edge(sqlgraph.O2M, true, oscontext.EventsTable, oscontext.EventsColumn),
)
fromV = sqlgraph.Neighbors(oc.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *OSContextClient) Hooks() []Hook {
return c.hooks.OSContext
}
// PageClient is a client for the Page schema.
type PageClient struct {
config
}
// NewPageClient returns a client for the Page from the given config.
func NewPageClient(c config) *PageClient {
return &PageClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `page.Hooks(f(g(h())))`.
func (c *PageClient) Use(hooks ...Hook) {
c.hooks.Page = append(c.hooks.Page, hooks...)
}
// Create returns a create builder for Page.
func (c *PageClient) Create() *PageCreate {
mutation := newPageMutation(c.config, OpCreate)
return &PageCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// BulkCreate returns a builder for creating a bulk of Page entities.
func (c *PageClient) CreateBulk(builders ...*PageCreate) *PageCreateBulk {
return &PageCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Page.
func (c *PageClient) Update() *PageUpdate {
mutation := newPageMutation(c.config, OpUpdate)
return &PageUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *PageClient) UpdateOne(pa *Page) *PageUpdateOne {
mutation := newPageMutation(c.config, OpUpdateOne, withPage(pa))
return &PageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *PageClient) UpdateOneID(id int) *PageUpdateOne {
mutation := newPageMutation(c.config, OpUpdateOne, withPageID(id))
return &PageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for Page.
func (c *PageClient) Delete() *PageDelete {
mutation := newPageMutation(c.config, OpDelete)
return &PageDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a delete builder for the given entity.
func (c *PageClient) DeleteOne(pa *Page) *PageDeleteOne {
return c.DeleteOneID(pa.ID)
}
// DeleteOneID returns a delete builder for the given id.
func (c *PageClient) DeleteOneID(id int) *PageDeleteOne {
builder := c.Delete().Where(page.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &PageDeleteOne{builder}
}
// Query returns a query builder for Page.
func (c *PageClient) Query() *PageQuery {
return &PageQuery{config: c.config}
}
// Get returns a Page entity by its id.
func (c *PageClient) Get(ctx context.Context, id int) (*Page, error) {
return c.Query().Where(page.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *PageClient) GetX(ctx context.Context, id int) *Page {
pa, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return pa
}
// QueryEvents queries the events edge of a Page.
func (c *PageClient) QueryEvents(pa *Page) *EventQuery {
query := &EventQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := pa.ID
step := sqlgraph.NewStep(
sqlgraph.From(page.Table, page.FieldID, id),
sqlgraph.To(event.Table, event.FieldID),
sqlgraph.Edge(sqlgraph.O2M, true, page.EventsTable, page.EventsColumn),
)
fromV = sqlgraph.Neighbors(pa.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *PageClient) Hooks() []Hook {
return c.hooks.Page
}
// ReferrerClient is a client for the Referrer schema.
type ReferrerClient struct {
config
}
// NewReferrerClient returns a client for the Referrer from the given config.
func NewReferrerClient(c config) *ReferrerClient {
return &ReferrerClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `referrer.Hooks(f(g(h())))`.
func (c *ReferrerClient) Use(hooks ...Hook) {
c.hooks.Referrer = append(c.hooks.Referrer, hooks...)
}
// Create returns a create builder for Referrer.
func (c *ReferrerClient) Create() *ReferrerCreate {
mutation := newReferrerMutation(c.config, OpCreate)
return &ReferrerCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// BulkCreate returns a builder for creating a bulk of Referrer entities.
func (c *ReferrerClient) CreateBulk(builders ...*ReferrerCreate) *ReferrerCreateBulk {
return &ReferrerCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Referrer.
func (c *ReferrerClient) Update() *ReferrerUpdate {
mutation := newReferrerMutation(c.config, OpUpdate)
return &ReferrerUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *ReferrerClient) UpdateOne(r *Referrer) *ReferrerUpdateOne {
mutation := newReferrerMutation(c.config, OpUpdateOne, withReferrer(r))
return &ReferrerUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *ReferrerClient) UpdateOneID(id int) *ReferrerUpdateOne {
mutation := newReferrerMutation(c.config, OpUpdateOne, withReferrerID(id))
return &ReferrerUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for Referrer.
func (c *ReferrerClient) Delete() *ReferrerDelete {
mutation := newReferrerMutation(c.config, OpDelete)
return &ReferrerDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a delete builder for the given entity.
func (c *ReferrerClient) DeleteOne(r *Referrer) *ReferrerDeleteOne {
return c.DeleteOneID(r.ID)
}
// DeleteOneID returns a delete builder for the given id.
func (c *ReferrerClient) DeleteOneID(id int) *ReferrerDeleteOne {
builder := c.Delete().Where(referrer.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &ReferrerDeleteOne{builder}
}
// Query returns a query builder for Referrer.
func (c *ReferrerClient) Query() *ReferrerQuery {
return &ReferrerQuery{config: c.config}
}
// Get returns a Referrer entity by its id.
func (c *ReferrerClient) Get(ctx context.Context, id int) (*Referrer, error) {
return c.Query().Where(referrer.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *ReferrerClient) GetX(ctx context.Context, id int) *Referrer {
r, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return r
}
// QueryEvents queries the events edge of a Referrer.
func (c *ReferrerClient) QueryEvents(r *Referrer) *EventQuery {
query := &EventQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := r.ID
step := sqlgraph.NewStep(
sqlgraph.From(referrer.Table, referrer.FieldID, id),
sqlgraph.To(event.Table, event.FieldID),
sqlgraph.Edge(sqlgraph.O2M, true, referrer.EventsTable, referrer.EventsColumn),
)
fromV = sqlgraph.Neighbors(r.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *ReferrerClient) Hooks() []Hook {
return c.hooks.Referrer
}
// ScreenClient is a client for the Screen schema.
type ScreenClient struct {
config
}
// NewScreenClient returns a client for the Screen from the given config.
func NewScreenClient(c config) *ScreenClient {
return &ScreenClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `screen.Hooks(f(g(h())))`.
func (c *ScreenClient) Use(hooks ...Hook) {
c.hooks.Screen = append(c.hooks.Screen, hooks...)
}
// Create returns a create builder for Screen.
func (c *ScreenClient) Create() *ScreenCreate {
mutation := newScreenMutation(c.config, OpCreate)
return &ScreenCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// BulkCreate returns a builder for creating a bulk of Screen entities.
func (c *ScreenClient) CreateBulk(builders ...*ScreenCreate) *ScreenCreateBulk {
return &ScreenCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Screen.
func (c *ScreenClient) Update() *ScreenUpdate {
mutation := newScreenMutation(c.config, OpUpdate)
return &ScreenUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *ScreenClient) UpdateOne(s *Screen) *ScreenUpdateOne {
mutation := newScreenMutation(c.config, OpUpdateOne, withScreen(s))
return &ScreenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *ScreenClient) UpdateOneID(id int) *ScreenUpdateOne {
mutation := newScreenMutation(c.config, OpUpdateOne, withScreenID(id))
return &ScreenUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for Screen.
func (c *ScreenClient) Delete() *ScreenDelete {
mutation := newScreenMutation(c.config, OpDelete)
return &ScreenDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a delete builder for the given entity.
func (c *ScreenClient) DeleteOne(s *Screen) *ScreenDeleteOne {
return c.DeleteOneID(s.ID)
}
// DeleteOneID returns a delete builder for the given id.
func (c *ScreenClient) DeleteOneID(id int) *ScreenDeleteOne {
builder := c.Delete().Where(screen.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &ScreenDeleteOne{builder}
}
// Query returns a query builder for Screen.
func (c *ScreenClient) Query() *ScreenQuery {
return &ScreenQuery{config: c.config}
}
// Get returns a Screen entity by its id.
func (c *ScreenClient) Get(ctx context.Context, id int) (*Screen, error) {
return c.Query().Where(screen.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *ScreenClient) GetX(ctx context.Context, id int) *Screen {
s, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return s
}
// QueryEvents queries the events edge of a Screen.
func (c *ScreenClient) QueryEvents(s *Screen) *EventQuery {
query := &EventQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := s.ID
step := sqlgraph.NewStep(
sqlgraph.From(screen.Table, screen.FieldID, id),
sqlgraph.To(event.Table, event.FieldID),
sqlgraph.Edge(sqlgraph.O2M, true, screen.EventsTable, screen.EventsColumn),
)
fromV = sqlgraph.Neighbors(s.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *ScreenClient) Hooks() []Hook {
return c.hooks.Screen
}
// SessionClient is a client for the Session schema.
type SessionClient struct {
config
}
// NewSessionClient returns a client for the Session from the given config.
func NewSessionClient(c config) *SessionClient {
return &SessionClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `session.Hooks(f(g(h())))`.
func (c *SessionClient) Use(hooks ...Hook) {
c.hooks.Session = append(c.hooks.Session, hooks...)
}
// Create returns a create builder for Session.
func (c *SessionClient) Create() *SessionCreate {
mutation := newSessionMutation(c.config, OpCreate)
return &SessionCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// BulkCreate returns a builder for creating a bulk of Session entities.
func (c *SessionClient) CreateBulk(builders ...*SessionCreate) *SessionCreateBulk {
return &SessionCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Session.
func (c *SessionClient) Update() *SessionUpdate {
mutation := newSessionMutation(c.config, OpUpdate)
return &SessionUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *SessionClient) UpdateOne(s *Session) *SessionUpdateOne {
mutation := newSessionMutation(c.config, OpUpdateOne, withSession(s))
return &SessionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *SessionClient) UpdateOneID(id uuid.UUID) *SessionUpdateOne {
mutation := newSessionMutation(c.config, OpUpdateOne, withSessionID(id))
return &SessionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for Session.
func (c *SessionClient) Delete() *SessionDelete {
mutation := newSessionMutation(c.config, OpDelete)
return &SessionDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a delete builder for the given entity.
func (c *SessionClient) DeleteOne(s *Session) *SessionDeleteOne {
return c.DeleteOneID(s.ID)
}
// DeleteOneID returns a delete builder for the given id.
func (c *SessionClient) DeleteOneID(id uuid.UUID) *SessionDeleteOne {
builder := c.Delete().Where(session.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &SessionDeleteOne{builder}
}
// Query returns a query builder for Session.
func (c *SessionClient) Query() *SessionQuery {
return &SessionQuery{config: c.config}
}
// Get returns a Session entity by its id.
func (c *SessionClient) Get(ctx context.Context, id uuid.UUID) (*Session, error) {
return c.Query().Where(session.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *SessionClient) GetX(ctx context.Context, id uuid.UUID) *Session {
s, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return s
}
// QueryEvents queries the events edge of a Session.
func (c *SessionClient) QueryEvents(s *Session) *EventQuery {
query := &EventQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := s.ID
step := sqlgraph.NewStep(
sqlgraph.From(session.Table, session.FieldID, id),
sqlgraph.To(event.Table, event.FieldID),
sqlgraph.Edge(sqlgraph.O2M, true, session.EventsTable, session.EventsColumn),
)
fromV = sqlgraph.Neighbors(s.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *SessionClient) Hooks() []Hook {
return c.hooks.Session
}
// TimingClient is a client for the Timing schema.
type TimingClient struct {
config
}
// NewTimingClient returns a client for the Timing from the given config.
func NewTimingClient(c config) *TimingClient {
return &TimingClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `timing.Hooks(f(g(h())))`.
func (c *TimingClient) Use(hooks ...Hook) {
c.hooks.Timing = append(c.hooks.Timing, hooks...)
}
// Create returns a create builder for Timing.
func (c *TimingClient) Create() *TimingCreate {
mutation := newTimingMutation(c.config, OpCreate)
return &TimingCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// BulkCreate returns a builder for creating a bulk of Timing entities.
func (c *TimingClient) CreateBulk(builders ...*TimingCreate) *TimingCreateBulk {
return &TimingCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Timing.
func (c *TimingClient) Update() *TimingUpdate {
mutation := newTimingMutation(c.config, OpUpdate)
return &TimingUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *TimingClient) UpdateOne(t *Timing) *TimingUpdateOne {
mutation := newTimingMutation(c.config, OpUpdateOne, withTiming(t))
return &TimingUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *TimingClient) UpdateOneID(id int) *TimingUpdateOne {
mutation := newTimingMutation(c.config, OpUpdateOne, withTimingID(id))
return &TimingUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for Timing.
func (c *TimingClient) Delete() *TimingDelete {
mutation := newTimingMutation(c.config, OpDelete)
return &TimingDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a delete builder for the given entity.
func (c *TimingClient) DeleteOne(t *Timing) *TimingDeleteOne {
return c.DeleteOneID(t.ID)
}
// DeleteOneID returns a delete builder for the given id.
func (c *TimingClient) DeleteOneID(id int) *TimingDeleteOne {
builder := c.Delete().Where(timing.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &TimingDeleteOne{builder}
}
// Query returns a query builder for Timing.
func (c *TimingClient) Query() *TimingQuery {
return &TimingQuery{config: c.config}
}
// Get returns a Timing entity by its id.
func (c *TimingClient) Get(ctx context.Context, id int) (*Timing, error) {
return c.Query().Where(timing.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *TimingClient) GetX(ctx context.Context, id int) *Timing {
t, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return t
}
// QueryEvents queries the events edge of a Timing.
func (c *TimingClient) QueryEvents(t *Timing) *EventQuery {
query := &EventQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := t.ID
step := sqlgraph.NewStep(
sqlgraph.From(timing.Table, timing.FieldID, id),
sqlgraph.To(event.Table, event.FieldID),
sqlgraph.Edge(sqlgraph.O2M, true, timing.EventsTable, timing.EventsColumn),
)
fromV = sqlgraph.Neighbors(t.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *TimingClient) Hooks() []Hook {
return c.hooks.Timing
}
// UserClient is a client for the User schema.
type UserClient struct {
config
}
// NewUserClient returns a client for the User from the given config.
func NewUserClient(c config) *UserClient {
return &UserClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `user.Hooks(f(g(h())))`.
func (c *UserClient) Use(hooks ...Hook) {
c.hooks.User = append(c.hooks.User, hooks...)
}
// Create returns a create builder for User.
func (c *UserClient) Create() *UserCreate {
mutation := newUserMutation(c.config, OpCreate)
return &UserCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// BulkCreate returns a builder for creating a bulk of User entities.
func (c *UserClient) CreateBulk(builders ...*UserCreate) *UserCreateBulk {
return &UserCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for User.
func (c *UserClient) Update() *UserUpdate {
mutation := newUserMutation(c.config, OpUpdate)
return &UserUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *UserClient) UpdateOne(u *User) *UserUpdateOne {
mutation := newUserMutation(c.config, OpUpdateOne, withUser(u))
return &UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *UserClient) UpdateOneID(id string) *UserUpdateOne {
mutation := newUserMutation(c.config, OpUpdateOne, withUserID(id))
return &UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for User.
func (c *UserClient) Delete() *UserDelete {
mutation := newUserMutation(c.config, OpDelete)
return &UserDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a delete builder for the given entity.
func (c *UserClient) DeleteOne(u *User) *UserDeleteOne {
return c.DeleteOneID(u.ID)
}
// DeleteOneID returns a delete builder for the given id.
func (c *UserClient) DeleteOneID(id string) *UserDeleteOne {
builder := c.Delete().Where(user.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &UserDeleteOne{builder}
}
// Query returns a query builder for User.
func (c *UserClient) Query() *UserQuery {
return &UserQuery{config: c.config}
}
// Get returns a User entity by its id.
func (c *UserClient) Get(ctx context.Context, id string) (*User, error) {
return c.Query().Where(user.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *UserClient) GetX(ctx context.Context, id string) *User {
u, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return u
}
// QueryAliases queries the aliases edge of a User.
func (c *UserClient) QueryAliases(u *User) *AliasQuery {
query := &AliasQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := u.ID
step := sqlgraph.NewStep(
sqlgraph.From(user.Table, user.FieldID, id),
sqlgraph.To(alias.Table, alias.FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, user.AliasesTable, user.AliasesColumn),
)
fromV = sqlgraph.Neighbors(u.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryEvents queries the events edge of a User.
func (c *UserClient) QueryEvents(u *User) *EventQuery {
query := &EventQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := u.ID
step := sqlgraph.NewStep(
sqlgraph.From(user.Table, user.FieldID, id),
sqlgraph.To(event.Table, event.FieldID),
sqlgraph.Edge(sqlgraph.O2M, true, user.EventsTable, user.EventsColumn),
)
fromV = sqlgraph.Neighbors(u.driver.Dialect(), step)
return fromV, nil
}
return query
}
// QueryGroups queries the groups edge of a User.
func (c *UserClient) QueryGroups(u *User) *GroupQuery {
query := &GroupQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := u.ID
step := sqlgraph.NewStep(
sqlgraph.From(user.Table, user.FieldID, id),
sqlgraph.To(group.Table, group.FieldID),
sqlgraph.Edge(sqlgraph.M2M, true, user.GroupsTable, user.GroupsPrimaryKey...),
)
fromV = sqlgraph.Neighbors(u.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *UserClient) Hooks() []Hook {
return c.hooks.User
}
// ViewportClient is a client for the Viewport schema.
type ViewportClient struct {
config
}
// NewViewportClient returns a client for the Viewport from the given config.
func NewViewportClient(c config) *ViewportClient {
return &ViewportClient{config: c}
}
// Use adds a list of mutation hooks to the hooks stack.
// A call to `Use(f, g, h)` equals to `viewport.Hooks(f(g(h())))`.
func (c *ViewportClient) Use(hooks ...Hook) {
c.hooks.Viewport = append(c.hooks.Viewport, hooks...)
}
// Create returns a create builder for Viewport.
func (c *ViewportClient) Create() *ViewportCreate {
mutation := newViewportMutation(c.config, OpCreate)
return &ViewportCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// BulkCreate returns a builder for creating a bulk of Viewport entities.
func (c *ViewportClient) CreateBulk(builders ...*ViewportCreate) *ViewportCreateBulk {
return &ViewportCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Viewport.
func (c *ViewportClient) Update() *ViewportUpdate {
mutation := newViewportMutation(c.config, OpUpdate)
return &ViewportUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOne returns an update builder for the given entity.
func (c *ViewportClient) UpdateOne(v *Viewport) *ViewportUpdateOne {
mutation := newViewportMutation(c.config, OpUpdateOne, withViewport(v))
return &ViewportUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// UpdateOneID returns an update builder for the given id.
func (c *ViewportClient) UpdateOneID(id int) *ViewportUpdateOne {
mutation := newViewportMutation(c.config, OpUpdateOne, withViewportID(id))
return &ViewportUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// Delete returns a delete builder for Viewport.
func (c *ViewportClient) Delete() *ViewportDelete {
mutation := newViewportMutation(c.config, OpDelete)
return &ViewportDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
}
// DeleteOne returns a delete builder for the given entity.
func (c *ViewportClient) DeleteOne(v *Viewport) *ViewportDeleteOne {
return c.DeleteOneID(v.ID)
}
// DeleteOneID returns a delete builder for the given id.
func (c *ViewportClient) DeleteOneID(id int) *ViewportDeleteOne {
builder := c.Delete().Where(viewport.ID(id))
builder.mutation.id = &id
builder.mutation.op = OpDeleteOne
return &ViewportDeleteOne{builder}
}
// Query returns a query builder for Viewport.
func (c *ViewportClient) Query() *ViewportQuery {
return &ViewportQuery{config: c.config}
}
// Get returns a Viewport entity by its id.
func (c *ViewportClient) Get(ctx context.Context, id int) (*Viewport, error) {
return c.Query().Where(viewport.ID(id)).Only(ctx)
}
// GetX is like Get, but panics if an error occurs.
func (c *ViewportClient) GetX(ctx context.Context, id int) *Viewport {
v, err := c.Get(ctx, id)
if err != nil {
panic(err)
}
return v
}
// QueryEvents queries the events edge of a Viewport.
func (c *ViewportClient) QueryEvents(v *Viewport) *EventQuery {
query := &EventQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
id := v.ID
step := sqlgraph.NewStep(
sqlgraph.From(viewport.Table, viewport.FieldID, id),
sqlgraph.To(event.Table, event.FieldID),
sqlgraph.Edge(sqlgraph.O2M, true, viewport.EventsTable, viewport.EventsColumn),
)
fromV = sqlgraph.Neighbors(v.driver.Dialect(), step)
return fromV, nil
}
return query
}
// Hooks returns the client hooks.
func (c *ViewportClient) Hooks() []Hook {
return c.hooks.Viewport
}
|
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script will set up and run rsyncd to allow data to move into and out of
# our dockerized build system. This is used for syncing sources and changes of
# sources into the docker-build-container. It is also used to transfer built binaries
# and generated files back out.
#
# When run as root (rare) it'll preserve the file ids as sent from the client.
# Usually it'll be run as non-dockerized UID/GID and end up translating all file
# ownership to that.
set -o errexit
set -o nounset
set -o pipefail
# The directory that gets sync'd
VOLUME=${HOME}
# Assume that this is running in Docker on a bridge. Allow connections from
# anything on the local subnet.
ALLOW=$(ip route | awk '/^default via/ { reg = "^[0-9./]+ dev "$5 } ; $0 ~ reg { print $1 }')
CONFDIR="/tmp/rsync.k8s"
PIDFILE="${CONFDIR}/rsyncd.pid"
CONFFILE="${CONFDIR}/rsyncd.conf"
SECRETS="${CONFDIR}/rsyncd.secrets"
mkdir -p "${CONFDIR}"
if [[ -f "${PIDFILE}" ]]; then
PID=$(cat "${PIDFILE}")
echo "Cleaning up old PID file: ${PIDFILE}"
kill $PID &> /dev/null || true
rm "${PIDFILE}"
fi
PASSWORD=$(</rsyncd.password)
cat <<EOF >"${SECRETS}"
k8s:${PASSWORD}
EOF
chmod go= "${SECRETS}"
USER_CONFIG=
if [[ "$(id -u)" == "0" ]]; then
USER_CONFIG=" uid = 0"$'\n'" gid = 0"
fi
cat <<EOF >"${CONFFILE}"
pid file = ${PIDFILE}
use chroot = no
log file = /dev/stdout
reverse lookup = no
munge symlinks = no
port = 8730
[k8s]
numeric ids = true
$USER_CONFIG
hosts deny = *
hosts allow = ${ALLOW} ${ALLOW_HOST-}
auth users = k8s
secrets file = ${SECRETS}
read only = false
path = ${VOLUME}
filter = - /.make/ - /_tmp/
EOF
exec /usr/bin/rsync --no-detach --daemon --config="${CONFFILE}" "$@"
|
<reponame>amukherj/concurrencyts
#ifndef CONCURRENCYTS_H
#define CONCURRENCYTS_H
#include "future.h"
#include "latch.h"
#include "barrier.h"
namespace std { namespace experimental {
using namespace concurrencyts;
}}
#endif /* CONCURRENCYTS_H */
|
<filename>models/masters/Village.js
run = (db) => {
console.log("Creating Village Table...")
const crquery = "CREATE TABLE IF NOT EXISTS `Village` (`village_id` VARCHAR(10) NOT NULL,`village_name` VARCHAR(30) NOT NULL,`taluka_id` VARCHAR(10) NOT NULL, PRIMARY KEY (`village_id`));"
db.query(crquery, (err, result) => {
if (err) {
console.log("Error while creating Village Table!!!");
throw err;
}
console.log("Village Table Successfully Created...");
});
}
module.exports = run; |
<filename>src/app/Component/mixing-deck/journey-detail/journey-detail.component.ts
import { Component, OnInit, Inject } from '@angular/core';
import { MatTableDataSource } from '@angular/material/table';
import { SearchSolutionService } from 'src/app/Services/search-solution.service';
import { MAT_DIALOG_DATA, MatDialog,MatDialogRef } from '@angular/material/dialog';
import {RouteDetailsRequest} from 'src/app/models/journey/route-details-request';
import { RouteDetailsResponse, CallPoints } from 'src/app/models/journey/route-details-response';
import { ResponseData } from 'src/app/models/common/response';
import { SessionExpiredComponent } from 'src/app/Component/session-expired/session-expired.component';
import { NgxSpinnerService } from 'ngx-spinner';
import { AlertMessage } from 'src/app/models/common/alert-message';
import { StorageDataService } from 'src/app/Services/SharedCache.service';
import { SharedService } from 'src/app/Services/shared.service';
@Component({
selector: 'app-journey-detail',
templateUrl: './journey-detail.component.html',
styleUrls: ['./journey-detail.component.css']
})
export class JourneyDetailComponent implements OnInit {
changes: number;
duration:string;
routeDetailsRequest: RouteDetailsRequest;
routeDetailsResponse: RouteDetailsResponse;
responseData: ResponseData;
traveLChanges: RouteDetailsResponse[];
showCallPoints : CallPoints[];
AlertMessageBody : AlertMessage;
IsErrorinResponse : boolean = false;
ErrorMessage :string;
SourceStation : string;
DestinationStation : string;
constructor(public sharedStorage: StorageDataService, public spinnerService: NgxSpinnerService, public dialog: MatDialog, private searchSolutionService: SearchSolutionService,
@Inject(MAT_DIALOG_DATA) public data: any , public dialogRef: MatDialogRef<JourneyDetailComponent>,
public sharedService: SharedService ){
this.routeDetailsRequest = new RouteDetailsRequest;
this.AlertMessageBody = new AlertMessage();
}
ngOnInit() {
this.routeDetailsRequest.TravelSolutionCode = this.data.TravelSolutionCode;
this.routeDetailsRequest.TravelSolutionId = this.data.TravelSolutionId;
this.routeDetailsRequest.TravelSolutionServiceId = this.data.TravelSolutionServiceId;
this.changes = this.data.Changes;
this.duration = this.data.Duration;
this.getRouteDetailsData();
}
getRouteDetailsData()
{
this.spinnerService.show();
this.searchSolutionService.getRouteDetails(this.routeDetailsRequest).subscribe(
res => {
if (res != null) {
this.responseData = res as ResponseData;
if (this.responseData.responseCode == '200') {
localStorage.setItem('sessioncode', this.responseData.sessionCode);
if(this.responseData.data.length == 0 || this.responseData.data[0].errorDetails !=null || this.responseData.data[0].callingPoints.length == 0 )
{
// this.responseData.data.length = 0;
// if(this.responseData.data.length == 0)
// {
// this.AlertMessageBody.isAlertMessage = true;
// this.AlertMessageBody.alertMessage = "Sorry, No Route Details to Display";
// this.sharedStorage.setStorageData('alertMessageBody', this.AlertMessageBody, true);
// }
// this.dialogRef.close();
// this.spinnerService.hide();
// this.dialog.open(SessionExpiredComponent, {
// maxWidth: '95vw',
// maxHeight: '95vh',
// width: '600px',
// panelClass: 'session-expired'
// });
// return;
this.IsErrorinResponse = true;
this.ErrorMessage = 'Route details are not available right now, Please try later';
if(this.sharedStorage.getStorageData('storedAllServices', true)!=null)
{
this.sharedService=this.sharedStorage.getStorageData('storedAllServices', true);
this.SourceStation = this.sharedService.SourceStation;
this.DestinationStation = this.sharedService.DestinationStation;
}
this.sharedService.IsSessionExpired = true;
this.sharedStorage.clearStorageData("storedAllServices");
this.sharedStorage.setStorageData("storedAllServices",this.sharedService,true);
}
this.routeDetailsResponse = this.responseData.data as RouteDetailsResponse;
this.traveLChanges= this.responseData.data;
this.spinnerService.hide();
}
else {
console.log(this.responseData.ResponseMessage);
}
}
});
}
onChangeCallingPoints(checked, change)
{
if(checked)
change.callingPoints.showCallingPoints = true;
else
change.callingPoints.showCallingPoints= false;
}
}
|
//Windows stuff.
#define WINVER 0x0500
#define WIN32_WINNT 0x0500
#include <windows.h>
#include <cassert>
#include <cstdarg>
#include <algorithm>
// SDK
#include "scssdk_telemetry.h"
#include "eurotrucks2/scssdk_eut2.h"
#include "eurotrucks2/scssdk_telemetry_eut2.h"
#include "amtrucks/scssdk_ats.h"
#include "amtrucks/scssdk_telemetry_ats.h"
// Plug-in
#include "scs-telemetry-common.hpp"
#include "sharedmemory.hpp"
#include "scs_config_handlers.hpp"
#include <log.hpp>
#define UNUSED(x)
/**
* These macro's are a shortcut to register channels inside the scs_telemetry_init function
* They require the channel definition name (without prefix SCS_TELEMETRY_), type and destination.
* Not all channel types are implemented; the handler function for a type should be created like so:
* telemetry_store_[Type](const scs_string_t name, const scs_u32_t index, const scs_value_t *const value, const scs_context_t context)
*
* RegisterSpecificChannel allows for your own handler name, without the telemetry_store_ prefix.
*/
#define REGISTER_CHANNEL(name, type, to) version_params->register_for_channel(SCS_TELEMETRY_##name, SCS_U32_NIL, SCS_VALUE_TYPE_##type, SCS_TELEMETRY_CHANNEL_FLAG_no_value, telemetry_store_##type, &( to ));
#define REGISTER_CHANNEL_INDEX(name, type, to, index) version_params->register_for_channel(SCS_TELEMETRY_##name, index, SCS_VALUE_TYPE_##type, SCS_TELEMETRY_CHANNEL_FLAG_no_value, telemetry_store_##type, &( to ));
#define REGISTER_SPECIFIC_CHANNEL(name, type, handler,to) version_params->register_for_channel(SCS_TELEMETRY_##name, SCS_U32_NIL, SCS_VALUE_TYPE_##type, SCS_TELEMETRY_CHANNEL_FLAG_no_value, handler, &( to ));
SharedMemory* telem_mem;
scsTelemetryMap_t* telem_ptr;
// const: scs_mmf_name
// Name/Location of the Shared Memory
const wchar_t* scs_mmf_name = SCS_PLUGIN_MMF_NAME;
// ptr: game_log
// Used to write to the game log
scs_log_t game_log = nullptr;
// About: Game log
//
// - Use function log_line(const scs_log_type_t type, const char*const text,...) to write to the in game console log with choosen message type
// - or use log_line(const char*const text, ...) to write to the in game console log with error type (more for debugging purpose)
// use for values
// char buff[100];
// snprintf(buff, sizeof(buff), "%f", value->value_dplacement.position.x);
// log_line(SCS_LOG_TYPE_warning, buff);
// Function: log_line
// Used to write to the in game console log
void log_line(const scs_log_type_t type, const char*const text, ...) {
if (!game_log) {
return;
}
char formated[1000];
va_list args;
va_start(args, text);
vsnprintf_s(formated, sizeof formated, _TRUNCATE, text, args);
formated[sizeof formated - 1] = 0;
va_end(args);
game_log(type, formated);
}
// Function: log_line
// Used to write to the in game console log as error (debugging)
void log_line(const char*const text, ...) {
if (!game_log) {
return;
}
char formated[1000];
va_list args;
va_start(args, text);
vsnprintf_s(formated, sizeof formated, _TRUNCATE, text, args);
formated[sizeof formated - 1] = 0;
va_end(args);
game_log(SCS_LOG_TYPE_error, formated);
}
// Function: log_configs
// It print every config event that appears to the in game log
// careful, create a lot of logs so that fast parts are not readable anymore in the log window
void log_configs(const scs_telemetry_configuration_t* info) {
log_line("Configuration: %s", info->id);
for (auto current = info->attributes; current->name; ++current) {
if (current->index != SCS_U32_NIL) {
// log_line("[%u]", static_cast<unsigned>(current->index));
}
switch (current->value.type) {
case SCS_VALUE_TYPE_INVALID: {
log_line(" %s none", current->name);
break;
}
case SCS_VALUE_TYPE_bool: {
log_line(" %s bool = %s", current->name, current->value.value_bool.value ? "true" : "false");
break;
}
case SCS_VALUE_TYPE_s32: {
log_line(" %s s32 = %d", current->name, static_cast<int>(current->value.value_s32.value));
break;
}
case SCS_VALUE_TYPE_u32: {
log_line(" %s u32 = %u", current->name, static_cast<unsigned>(current->value.value_u32.value));
break;
}
case SCS_VALUE_TYPE_u64: {
log_line(" %s u64 = %" SCS_PF_U64, current->name, current->value.value_u64.value);
break;
}
case SCS_VALUE_TYPE_float: {
log_line(" %s float = %f", current->name, current->value.value_float.value);
break;
}
case SCS_VALUE_TYPE_double: {
log_line(" %s double = %f", current->name, current->value.value_double.value);
break;
}
case SCS_VALUE_TYPE_fvector: {
log_line(
" %s fvector = (%f,%f,%f)", current->name,
current->value.value_fvector.x,
current->value.value_fvector.y,
current->value.value_fvector.z
);
break;
}
case SCS_VALUE_TYPE_dvector: {
log_line(
" %s dvector = (%f,%f,%f)", current->name,
current->value.value_dvector.x,
current->value.value_dvector.y,
current->value.value_dvector.z
);
break;
}
case SCS_VALUE_TYPE_euler: {
log_line(
" %s euler = h:%f p:%f r:%f", current->name,
current->value.value_euler.heading * 360.0f,
current->value.value_euler.pitch * 360.0f,
current->value.value_euler.roll * 360.0f
);
break;
}
case SCS_VALUE_TYPE_fplacement: {
log_line(
" %s fplacement = (%f,%f,%f) h:%f p:%f r:%f", current->name,
current->value.value_fplacement.position.x,
current->value.value_fplacement.position.y,
current->value.value_fplacement.position.z,
current->value.value_fplacement.orientation.heading * 360.0f,
current->value.value_fplacement.orientation.pitch * 360.0f,
current->value.value_fplacement.orientation.roll * 360.0f
);
break;
}
case SCS_VALUE_TYPE_dplacement: {
log_line(
" %s dplacement = (%f,%f,%f) h:%f p:%f r:%f", current->name,
current->value.value_dplacement.position.x,
current->value.value_dplacement.position.y,
current->value.value_dplacement.position.z,
current->value.value_dplacement.orientation.heading * 360.0f,
current->value.value_dplacement.orientation.pitch * 360.0f,
current->value.value_dplacement.orientation.roll * 360.0f
);
break;
}
case SCS_VALUE_TYPE_string: {
log_line(" %s string = %s", current->name, current->value.value_string.value);
break;
}
default: {
log_line(" %s unknown", current->name);
break;
}
}
}
}
/**
* @brief Last timestamp we received.
*/
scs_timestamp_t last_timestamp = static_cast<scs_timestamp_t>(-1);
scs_timestamp_t timestamp;
static auto clear_job_ticker = 0;
// Function: set_job_values_zero
// set every job (cargo) values to 0/empty string
void set_job_values_zero() {
telem_ptr->config_o.jobIncome = 0;
telem_ptr->config_ui.time_abs_delivery = 0;
telem_ptr->config_f.cargoMass = 0;
memset(telem_ptr->config_s.compDstId, 0, stringsize);
memset(telem_ptr->config_s.compSrcId, 0, stringsize);
memset(telem_ptr->config_s.cityDstId, 0, stringsize);
memset(telem_ptr->config_s.citySrcId, 0, stringsize);
memset(telem_ptr->config_s.citySrc, 0, stringsize);
memset(telem_ptr->config_s.cityDst, 0, stringsize);
memset(telem_ptr->config_s.compSrc, 0, stringsize);
memset(telem_ptr->config_s.compDst, 0, stringsize);
memset(telem_ptr->config_s.cargoId, 0, stringsize);
memset(telem_ptr->config_s.cargoAcessoryId, 0, stringsize);
memset(telem_ptr->config_s.cargo, 0, stringsize);
}
// Function: set_trailer_values_zero
// set every trailer value 0/empty string
void set_trailer_values_zero() {
telem_ptr->truck_f.wearTrailer = 0;
std::fill(telem_ptr->truck_ui.trailer_wheelSubstance, telem_ptr->truck_ui.trailer_wheelSubstance+16, 0u);
std::fill(telem_ptr->truck_f.trailer_wheelSuspDeflection, telem_ptr->truck_f.trailer_wheelSuspDeflection + 16, 0.0f);
std::fill(telem_ptr->truck_f.trailer_wheelVelocity, telem_ptr->truck_f.trailer_wheelVelocity + 16, 0.0f);
std::fill(telem_ptr->truck_f.trailer_wheelSteering, telem_ptr->truck_f.trailer_wheelSteering + 16, 0.0f);
std::fill(telem_ptr->truck_f.trailer_wheelRotation, telem_ptr->truck_f.trailer_wheelRotation + 16, 0.0f);
std::fill(telem_ptr->truck_b.trailer_wheelOnGround, telem_ptr->truck_b.trailer_wheelOnGround + 16, false);
telem_ptr->truck_fv.trailer_lv_accelerationX = 0;
telem_ptr->truck_fv.trailer_lv_accelerationY = 0;
telem_ptr->truck_fv.trailer_lv_accelerationZ = 0;
telem_ptr->truck_fv.trailer_av_accelerationX = 0;
telem_ptr->truck_fv.trailer_av_accelerationY = 0;
telem_ptr->truck_fv.trailer_av_accelerationZ = 0;
telem_ptr->truck_fv.trailer_la_accelerationX = 0;
telem_ptr->truck_fv.trailer_la_accelerationY = 0;
telem_ptr->truck_fv.trailer_la_accelerationZ = 0;
telem_ptr->truck_fv.trailer_aa_accelerationX = 0;
telem_ptr->truck_fv.trailer_aa_accelerationY = 0;
telem_ptr->truck_fv.trailer_aa_accelerationZ = 0;
telem_ptr->config_fv.trailerHookPositionX = 0;
telem_ptr->config_fv.trailerHookPositionY = 0;
telem_ptr->config_fv.trailerHookPositionZ = 0;
telem_ptr->truck_dp.trailer_coordinateX = 0;
telem_ptr->truck_dp.trailer_coordinateY = 0;
telem_ptr->truck_dp.trailer_coordinateZ = 0;
telem_ptr->truck_dp.trailer_rotationX = 0;
telem_ptr->truck_dp.trailer_rotationY = 0;
telem_ptr->truck_dp.trailer_rotationZ = 0;
telem_ptr->config_ui.trailerWheelCount = 0;
std::fill(telem_ptr->config_f.trailerWheelRadius, telem_ptr->config_f.trailerWheelRadius + 16, 0.0f);
std::fill(telem_ptr->config_b.trailerWheelSimulated, telem_ptr->config_b.trailerWheelSimulated + 16, false);
std::fill(telem_ptr->config_b.trailerWheelLiftable, telem_ptr->config_b.trailerWheelLiftable + 16, false);
std::fill(telem_ptr->config_b.trailerWheelPowered, telem_ptr->config_b.trailerWheelPowered + 16, false);
std::fill(telem_ptr->config_b.trailerWheelSteerable, telem_ptr->config_b.trailerWheelSteerable + 16, false);
std::fill(telem_ptr->config_fv.trailerWheelPositionX, telem_ptr->config_fv.trailerWheelPositionX + 16, 0.0f);
std::fill(telem_ptr->config_fv.trailerWheelPositionY, telem_ptr->config_fv.trailerWheelPositionY + 16, 0.0f);
std::fill(telem_ptr->config_fv.trailerWheelPositionZ, telem_ptr->config_fv.trailerWheelPositionZ + 16, 0.0f);
memset(telem_ptr->config_s.trailerId, 0, stringsize);
}
// Function: telemetry_frame_start
// Register telemetry values
SCSAPI_VOID telemetry_frame_start(const scs_event_t UNUSED(event), const void*const event_info,
scs_context_t UNUSED(context)) {
const auto info = static_cast<const scs_telemetry_frame_start_t *>(event_info);
// The following processing of the timestamps is done so the output
// from this plugin has continuous time, it is not necessary otherwise.
// When we just initialized itself, assume that the time started
// just now.
if (last_timestamp == static_cast<scs_timestamp_t>(-1)) {
last_timestamp = info->paused_simulation_time;
}
// The timer might be sometimes restarted (e.g. after load) while
// we want to provide continuous time on our output.
if (info->flags & SCS_TELEMETRY_FRAME_START_FLAG_timer_restart) {
last_timestamp = 0;
}
// Advance the timestamp by delta since last frame.
timestamp += info->paused_simulation_time - last_timestamp;
last_timestamp = info->paused_simulation_time;
/* Copy over the game timestamp to our telemetry memory */
if (telem_ptr != nullptr) {
telem_ptr->time = static_cast<unsigned int>(timestamp);
// Do a non-convential periodic update of this field:
telem_ptr->truck_b.cruiseControl = telem_ptr->truck_f.cruiseControlSpeed > 0;
if (telem_ptr->special_b.jobFinished) {
clear_job_ticker++;
if (clear_job_ticker > 10) {
set_job_values_zero();
telem_ptr->special_b.jobFinished = false;
}
}
}
}
// Function: telemetry_pause
// called if the game fires the event start/pause. Used to set the paused value
SCSAPI_VOID telemetry_pause(const scs_event_t event, const void*const UNUSED(event_info),
scs_context_t UNUSED(context)) {
#if LOGGING
logger::flush();
#endif
if (telem_ptr != nullptr) {
telem_ptr->paused = event == SCS_TELEMETRY_EVENT_paused;
}
}
// Function: telemetry_configuration
// called if the game fires the event configuration. Used to handle all the configuration values
SCSAPI_VOID telemetry_configuration(const scs_event_t event, const void*const event_info,
scs_context_t UNUSED(context)) {
// On configuration change, this function is called.
const auto info = static_cast<const scs_telemetry_configuration_t *>(
event_info);
// check which type the event has
configType type = {};
if(strcmp(info->id,"substances")==0) {
type = substances;
}else if (strcmp(info->id, "controls") == 0) {
type = controls;
}else if (strcmp(info->id, "hshifter") == 0) {
type = hshifter;
}else if (strcmp(info->id, "truck") == 0) {
type = truck;
}else if (strcmp(info->id, "trailer") == 0) {
type = trailer;
}else if (strcmp(info->id, "job") == 0) {
type = job;
}else {
log_line(SCS_LOG_TYPE_warning, "Something went wrong with this %s",info->id);
}
// uncomment to log every config, should work but with function not tested ^^`
//log_configs(info);
// attribute is a pointer array that is never null so ... i have no clue how to check it on another way than this
// if for loop can't loop it is empty so simple
auto is_empty = true;
for ( auto current = info->attributes; current->name; ++current) {
if(!handleCfg(current, type)) {
// actually only for testing/debug purpose, so should there be a message in game with that line there is missed something
log_line("attribute not handled id: %i attribute: %s", type, current->name);
}
is_empty = false;
}
// if id of config is "job" but without element and we are on a job -> we finished it now
if (type==job && is_empty && telem_ptr->special_b.onJob) {
telem_ptr->special_b.onJob = false;
telem_ptr->special_b.jobFinished = true;
clear_job_ticker = 0;
}
else if (!telem_ptr->special_b.onJob && type == job && !is_empty) {
// oh hey no job but now we have fields in this array so we start a new job
telem_ptr->special_b.onJob = true;
}
// no trailer which is connected with us? than delete information of the sdk and say there is no connected trailer
if(type==trailer && is_empty) {
set_trailer_values_zero();
telem_ptr->special_b.trailerConnected = false;
}else if(type == trailer && !is_empty && !telem_ptr->special_b.trailerConnected) {
// there exist trailer information and actually we say there is no connected trailer. That can't be true anymore
// so say we are connected to a trailer
telem_ptr->special_b.trailerConnected = true;
}
}
/******* STORING OF SEVERAL SCS DATA TYPES *******/
SCSAPI_VOID telemetry_store_float(const scs_string_t name, const scs_u32_t index, const scs_value_t*const value,
scs_context_t context) {
if (!value) return;
assert(value);
assert(value->type == SCS_VALUE_TYPE_float);
assert(context);
*static_cast<float *>(context) = value->value_float.value;
}
SCSAPI_VOID telemetry_store_s32(const scs_string_t name, const scs_u32_t index, const scs_value_t*const value,
scs_context_t context) {
if (!value) return;
assert(value);
assert(value->type == SCS_VALUE_TYPE_s32);
assert(context);
*static_cast<int *>(context) = value->value_s32.value;
}
SCSAPI_VOID telemetry_store_u32(const scs_string_t name, const scs_u32_t index, const scs_value_t*const value,
scs_context_t context) {
if (!value) return;
assert(value);
assert(value->type == SCS_VALUE_TYPE_u32);
assert(context);
*static_cast<unsigned int *>(context) = value->value_u32.value;
}
SCSAPI_VOID telemetry_store_bool(const scs_string_t name, const scs_u32_t index, const scs_value_t*const value,
scs_context_t context) {
if (!context) return;
if (value) {
if (value->value_bool.value == 0) {
*static_cast<bool *>(context) = false;
}
else {
*static_cast<bool *>(context) = true;
}
}
else {
*static_cast<bool *>(context) = false;
}
}
SCSAPI_VOID telemetry_store_fvector(const scs_string_t name, const scs_u32_t index, const scs_value_t*const value,
scs_context_t context) {
if (!value) return;
assert(value);
assert(value->type == SCS_VALUE_TYPE_fvector);
assert(context);
*(static_cast<float *>(context) + 0) = value->value_fvector.x;
*(static_cast<float *>(context) + 1) = value->value_fvector.y;
*(static_cast<float *>(context) + 2) = value->value_fvector.z;
}
SCSAPI_VOID telemetry_store_dplacement(const scs_string_t name, const scs_u32_t index, const scs_value_t*const value,
scs_context_t context) {
if (!value) return;
assert(value);
assert(value->type == SCS_VALUE_TYPE_dplacement);
assert(context);
// Messy hack to store the acceleration and orientation values into our telemetry struct
// It is neccesary that these are put together, otherwise it may overwrite over values.
*(static_cast<double *>(context) + 0) = value->value_dplacement.position.x;
*(static_cast<double *>(context) + 1) = value->value_dplacement.position.y;
*(static_cast<double *>(context) + 2) = value->value_dplacement.position.z;
*(static_cast<double *>(context) + 3) = value->value_dplacement.orientation.heading;
*(static_cast<double *>(context) + 4) = value->value_dplacement.orientation.pitch;
*(static_cast<double *>(context) + 5) = value->value_dplacement.orientation.roll;
}
SCSAPI_VOID telemetry_store_fplacement(const scs_string_t name, const scs_u32_t index, const scs_value_t*const value,
scs_context_t context) {
if (!value) return;
assert(value);
assert(value->type == SCS_VALUE_TYPE_fplacement);
assert(context);
// Messy hack to store the acceleration and orientation values into our telemetry struct
// It is neccesary that these are put together, otherwise it may overwrite over values.
*(static_cast<float *>(context) + 0) = value->value_fplacement.position.x;
*(static_cast<float *>(context) + 1) = value->value_fplacement.position.y;
*(static_cast<float *>(context) + 2) = value->value_fplacement.position.z;
*(static_cast<float *>(context) + 3) = value->value_fplacement.orientation.heading;
*(static_cast<float *>(context) + 4) = value->value_fplacement.orientation.pitch;
*(static_cast<float *>(context) + 5) = value->value_fplacement.orientation.roll;
}
/**
* @brief Telemetry API initialization function.
*
* See scssdk_telemetry.h
*/
SCSAPI_RESULT scs_telemetry_init(const scs_u32_t version, const scs_telemetry_init_params_t*const params) {
// We currently support only one version.
if (version != SCS_TELEMETRY_VERSION_1_00) {
return SCS_RESULT_unsupported;
}
const auto version_params = static_cast<const scs_telemetry_init_params_v100_t *>(params); // NOLINT(cppcoreguidelines-pro-type-static-cast-downcast)
game_log = version_params->common.log;
if (version_params == nullptr) {
return SCS_RESULT_generic_error;
}
#if LOGGING
log_line("LOGGING is active find at %s", logger::path.c_str());
logger::out << "start logging" << '\n';
#endif
/*** ACQUIRE SHARED MEMORY BUFFER ***/
telem_mem = new SharedMemory(scs_mmf_name, SCS_PLUGIN_MMF_SIZE);
if (telem_mem == nullptr) {
return SCS_RESULT_generic_error;
}
if (!telem_mem->Hooked()) {
return SCS_RESULT_generic_error;
}
telem_ptr = static_cast<scsTelemetryMap_t*>(telem_mem->GetBuffer());
if (telem_ptr == nullptr) {
return SCS_RESULT_generic_error;
}
memset(telem_ptr, 0, SCS_PLUGIN_MMF_SIZE);
/*** INITIALIZE TELEMETRY MAP TO DEFAULT ***/
telem_ptr->paused = true;
telem_ptr->time = 0;
// Get SCS Game Version and Set Plugin Version
telem_ptr->scs_values.telemetry_plugin_revision = PLUGIN_REVID;
telem_ptr->scs_values.version_major = SCS_GET_MAJOR_VERSION(version_params->common.game_version);
telem_ptr->scs_values.version_minor = SCS_GET_MINOR_VERSION(version_params->common.game_version);
// Set Game ID
if (strcmp(version_params->common.game_id, SCS_GAME_ID_EUT2) == 0) {
telem_ptr->scs_values.game = 1;
telem_ptr->scs_values.telemetry_version_game_major = SCS_GET_MAJOR_VERSION(
SCS_TELEMETRY_EUT2_GAME_VERSION_CURRENT);
telem_ptr->scs_values.telemetry_version_game_minor = SCS_GET_MINOR_VERSION(
SCS_TELEMETRY_EUT2_GAME_VERSION_CURRENT);
}
else if (strcmp(version_params->common.game_id, SCS_GAME_ID_ATS) == 0) {
telem_ptr->scs_values.game = 2;
telem_ptr->scs_values.telemetry_version_game_major = SCS_GET_MAJOR_VERSION(
SCS_TELEMETRY_ATS_GAME_VERSION_CURRENT);
telem_ptr->scs_values.telemetry_version_game_minor = SCS_GET_MINOR_VERSION(
SCS_TELEMETRY_ATS_GAME_VERSION_CURRENT);
}
else {
// unknown game
log_line(SCS_LOG_TYPE_error, "Unknown Game SDK will not work correctly");
telem_ptr->scs_values.game = 0;
telem_ptr->scs_values.telemetry_version_game_major = 0;
telem_ptr->scs_values.telemetry_version_game_minor = 0;
}
// Model & trailer type are stored in configuration event.
/*** REGISTER GAME EVENTS (Pause/Unpause/Start/Time) ***/
const auto events_registered =
version_params->register_for_event(SCS_TELEMETRY_EVENT_frame_start, telemetry_frame_start, nullptr) ==
SCS_RESULT_ok &&
version_params->register_for_event(SCS_TELEMETRY_EVENT_paused, telemetry_pause, nullptr) == SCS_RESULT_ok &&
version_params->register_for_event(SCS_TELEMETRY_EVENT_started, telemetry_pause, nullptr) == SCS_RESULT_ok;
// Register configuration event, because it sends data like truck make, etc.
version_params->register_for_event(SCS_TELEMETRY_EVENT_configuration, telemetry_configuration, nullptr);
if (!events_registered) {
return SCS_RESULT_generic_error;
}
/*** REGISTER ALL TELEMETRY CHANNELS TO OUR SHARED MEMORY MAP ***/
REGISTER_CHANNEL(CHANNEL_game_time, u32, telem_ptr->common_ui.time_abs);
REGISTER_CHANNEL(TRAILER_CHANNEL_connected, bool, telem_ptr->truck_b.trailer_attached);
REGISTER_CHANNEL(TRUCK_CHANNEL_speed, float, telem_ptr->truck_f.speed);
REGISTER_CHANNEL(TRUCK_CHANNEL_local_linear_acceleration, fvector, telem_ptr->truck_fv.accelerationX);
REGISTER_CHANNEL(TRUCK_CHANNEL_local_linear_velocity, fvector, telem_ptr->truck_fv.lv_accelerationX);
REGISTER_CHANNEL(TRUCK_CHANNEL_local_angular_acceleration, fvector, telem_ptr->truck_fv.aa_accelerationX);
REGISTER_CHANNEL(TRUCK_CHANNEL_local_angular_velocity, fvector, telem_ptr->truck_fv.av_accelerationX);
REGISTER_CHANNEL(TRUCK_CHANNEL_world_placement, dplacement, telem_ptr->truck_dp.coordinateX);
REGISTER_CHANNEL(TRUCK_CHANNEL_engine_gear, s32, telem_ptr->truck_i.gear);
REGISTER_CHANNEL(TRUCK_CHANNEL_displayed_gear, s32, telem_ptr->truck_i.gearDashboard);
REGISTER_CHANNEL(TRUCK_CHANNEL_engine_rpm, float, telem_ptr->truck_f.engineRpm);
REGISTER_CHANNEL(TRUCK_CHANNEL_fuel, float, telem_ptr->truck_f.fuel);
REGISTER_CHANNEL(TRUCK_CHANNEL_fuel_average_consumption, float, telem_ptr->truck_f.fuelAvgConsumption);
REGISTER_CHANNEL(TRUCK_CHANNEL_input_steering, float, telem_ptr->truck_f.userSteer);
REGISTER_CHANNEL(TRUCK_CHANNEL_input_throttle, float, telem_ptr->truck_f.userThrottle);
REGISTER_CHANNEL(TRUCK_CHANNEL_input_brake, float, telem_ptr->truck_f.userBrake);
REGISTER_CHANNEL(TRUCK_CHANNEL_input_clutch, float, telem_ptr->truck_f.userClutch);
REGISTER_CHANNEL(TRUCK_CHANNEL_effective_steering, float, telem_ptr->truck_f.gameSteer);
REGISTER_CHANNEL(TRUCK_CHANNEL_effective_throttle, float, telem_ptr->truck_f.gameThrottle);
REGISTER_CHANNEL(TRUCK_CHANNEL_effective_brake, float, telem_ptr->truck_f.gameBrake);
REGISTER_CHANNEL(TRUCK_CHANNEL_effective_clutch, float, telem_ptr->truck_f.gameClutch);
// Auxilliary stuff:
REGISTER_CHANNEL(TRUCK_CHANNEL_retarder_level, u32, telem_ptr->truck_ui.retarderBrake);
REGISTER_CHANNEL(TRUCK_CHANNEL_hshifter_slot, u32, telem_ptr->truck_ui.shifterSlot);
for (auto i = scs_u32_t(0); i < scs_u32_t(2); i++) {
REGISTER_CHANNEL_INDEX(TRUCK_CHANNEL_hshifter_selector, bool, telem_ptr->truck_b.shifterToggle[i], i);
}
// Booleans
REGISTER_CHANNEL(TRUCK_CHANNEL_wipers, bool, telem_ptr->truck_b.wipers);
REGISTER_CHANNEL(TRUCK_CHANNEL_parking_brake, bool, telem_ptr->truck_b.parkBrake);
REGISTER_CHANNEL(TRUCK_CHANNEL_motor_brake, bool, telem_ptr->truck_b.motorBrake);
REGISTER_CHANNEL(TRUCK_CHANNEL_electric_enabled, bool, telem_ptr->truck_b.electricEnabled);
REGISTER_CHANNEL(TRUCK_CHANNEL_engine_enabled, bool, telem_ptr->truck_b.engineEnabled);
REGISTER_CHANNEL(TRUCK_CHANNEL_lblinker, bool, telem_ptr->truck_b.blinkerLeftActive);
REGISTER_CHANNEL(TRUCK_CHANNEL_rblinker, bool, telem_ptr->truck_b.blinkerRightActive);
REGISTER_CHANNEL(TRUCK_CHANNEL_light_lblinker, bool, telem_ptr->truck_b.blinkerLeftOn);
REGISTER_CHANNEL(TRUCK_CHANNEL_light_rblinker, bool, telem_ptr->truck_b.blinkerRightOn);
REGISTER_CHANNEL(TRUCK_CHANNEL_light_parking, bool, telem_ptr->truck_b.lightsParking);
REGISTER_CHANNEL(TRUCK_CHANNEL_light_low_beam, bool, telem_ptr->truck_b.lightsBeamLow);
REGISTER_CHANNEL(TRUCK_CHANNEL_light_high_beam, bool, telem_ptr->truck_b.lightsBeamHigh);
REGISTER_CHANNEL(TRUCK_CHANNEL_light_aux_front, u32, telem_ptr->truck_ui.lightsAuxFront);
REGISTER_CHANNEL(TRUCK_CHANNEL_light_aux_roof, u32, telem_ptr->truck_ui.lightsAuxRoof);
REGISTER_CHANNEL(TRUCK_CHANNEL_light_beacon, bool, telem_ptr->truck_b.lightsBeacon);
REGISTER_CHANNEL(TRUCK_CHANNEL_light_brake, bool, telem_ptr->truck_b.lightsBrake);
REGISTER_CHANNEL(TRUCK_CHANNEL_light_reverse, bool, telem_ptr->truck_b.lightsReverse);
REGISTER_CHANNEL(TRUCK_CHANNEL_battery_voltage_warning, bool, telem_ptr->truck_b.batteryVoltageWarning);
REGISTER_CHANNEL(TRUCK_CHANNEL_brake_air_pressure_warning, bool, telem_ptr->truck_b.airPressureWarning);
REGISTER_CHANNEL(TRUCK_CHANNEL_brake_air_pressure_emergency, bool, telem_ptr->truck_b.airPressureEmergency);
REGISTER_CHANNEL(TRUCK_CHANNEL_adblue_warning, bool, telem_ptr->truck_b.adblueWarning);
REGISTER_CHANNEL(TRUCK_CHANNEL_oil_pressure_warning, bool, telem_ptr->truck_b.oilPressureWarning);
REGISTER_CHANNEL(TRUCK_CHANNEL_water_temperature_warning, bool, telem_ptr->truck_b.waterTemperatureWarning);
// Floats
REGISTER_CHANNEL(TRUCK_CHANNEL_brake_air_pressure, float, telem_ptr->truck_f.airPressure);
REGISTER_CHANNEL(TRUCK_CHANNEL_brake_temperature, float, telem_ptr->truck_f.brakeTemperature);
REGISTER_CHANNEL(TRUCK_CHANNEL_fuel_warning, bool, telem_ptr->truck_b.fuelWarning);
REGISTER_CHANNEL(TRUCK_CHANNEL_adblue, float, telem_ptr->truck_f.adblue);
//registerChannel(TRUCK_CHANNEL_adblue_average_consumption, float, telem_ptr->tel_rev3.adblueConsumption); // seems not to work in ets2/ATS at 19/10 skd 1.9 and actual game versions
REGISTER_CHANNEL(TRUCK_CHANNEL_oil_pressure, float, telem_ptr->truck_f.oilPressure);
REGISTER_CHANNEL(TRUCK_CHANNEL_oil_temperature, float, telem_ptr->truck_f.oilTemperature);
REGISTER_CHANNEL(TRUCK_CHANNEL_water_temperature, float, telem_ptr->truck_f.waterTemperature);
REGISTER_CHANNEL(TRUCK_CHANNEL_battery_voltage, float, telem_ptr->truck_f.batteryVoltage);
REGISTER_CHANNEL(TRUCK_CHANNEL_dashboard_backlight, float, telem_ptr->truck_f.lightsDashboard);
REGISTER_CHANNEL(TRUCK_CHANNEL_wear_engine, float, telem_ptr->truck_f.wearEngine);
REGISTER_CHANNEL(TRUCK_CHANNEL_wear_transmission, float, telem_ptr->truck_f.wearTransmission);
REGISTER_CHANNEL(TRUCK_CHANNEL_wear_cabin, float, telem_ptr->truck_f.wearCabin);
REGISTER_CHANNEL(TRUCK_CHANNEL_wear_chassis, float, telem_ptr->truck_f.wearChassis);
REGISTER_CHANNEL(TRUCK_CHANNEL_wear_wheels, float, telem_ptr->truck_f.wearWheels);
REGISTER_CHANNEL(TRAILER_CHANNEL_wear_chassis, float, telem_ptr->truck_f.wearTrailer);
REGISTER_CHANNEL(TRUCK_CHANNEL_odometer, float, telem_ptr->truck_f.truckOdometer);
REGISTER_CHANNEL(TRUCK_CHANNEL_cruise_control, float, telem_ptr->truck_f.cruiseControlSpeed);
REGISTER_CHANNEL(TRUCK_CHANNEL_navigation_speed_limit, float, telem_ptr->truck_f.speedLimit);
REGISTER_CHANNEL(TRUCK_CHANNEL_navigation_distance, float, telem_ptr->truck_f.routeDistance);
REGISTER_CHANNEL(TRUCK_CHANNEL_navigation_time, float, telem_ptr->truck_f.routeTime);
REGISTER_CHANNEL(TRUCK_CHANNEL_fuel_range, float, telem_ptr->truck_f.fuelRange);
REGISTER_CHANNEL(TRAILER_CHANNEL_world_placement, dplacement, telem_ptr->truck_dp.trailer_coordinateX);
REGISTER_CHANNEL(TRAILER_CHANNEL_local_linear_velocity, fvector, telem_ptr->truck_fv.trailer_lv_accelerationX);
REGISTER_CHANNEL(TRAILER_CHANNEL_local_angular_velocity, fvector, telem_ptr->truck_fv.trailer_la_accelerationX);
REGISTER_CHANNEL(TRAILER_CHANNEL_local_linear_acceleration, fvector, telem_ptr->truck_fv.trailer_la_accelerationX
);
REGISTER_CHANNEL(TRAILER_CHANNEL_local_angular_acceleration, fvector, telem_ptr->truck_fv.
trailer_aa_accelerationX
);
for (auto i = scs_u32_t(0); i < WHEEL_SIZE; i++) {
REGISTER_CHANNEL_INDEX(TRAILER_CHANNEL_wheel_on_ground, bool, telem_ptr->truck_b.trailer_wheelOnGround[i],i
);
REGISTER_CHANNEL_INDEX(TRAILER_CHANNEL_wheel_substance, u32, telem_ptr->truck_ui.trailer_wheelSubstance[i], i
);
REGISTER_CHANNEL_INDEX(TRAILER_CHANNEL_wheel_velocity, float, telem_ptr->truck_f.trailer_wheelVelocity[i], i
);
REGISTER_CHANNEL_INDEX(TRAILER_CHANNEL_wheel_steering, float, telem_ptr->truck_f.trailer_wheelSteering[i], i
);
REGISTER_CHANNEL_INDEX(TRAILER_CHANNEL_wheel_rotation, float, telem_ptr->truck_f.trailer_wheelRotation[i], i
);
REGISTER_CHANNEL_INDEX(TRAILER_CHANNEL_wheel_susp_deflection, float, telem_ptr->truck_f.
trailer_wheelSuspDeflection[i], i);
REGISTER_CHANNEL_INDEX(TRUCK_CHANNEL_wheel_on_ground, bool, telem_ptr->truck_b.truck_wheelOnGround[i], i);
REGISTER_CHANNEL_INDEX(TRUCK_CHANNEL_wheel_substance, u32, telem_ptr->truck_ui.truck_wheelSubstance[i], i);
REGISTER_CHANNEL_INDEX(TRUCK_CHANNEL_wheel_velocity, float, telem_ptr->truck_f.truck_wheelVelocity[i], i);
REGISTER_CHANNEL_INDEX(TRUCK_CHANNEL_wheel_steering, float, telem_ptr->truck_f.truck_wheelSteering[i], i);
REGISTER_CHANNEL_INDEX(TRUCK_CHANNEL_wheel_rotation, float, telem_ptr->truck_f.truck_wheelRotation[i], i);
REGISTER_CHANNEL_INDEX(TRUCK_CHANNEL_wheel_susp_deflection, float, telem_ptr->truck_f.
truck_wheelSuspDeflection[i], i);
REGISTER_CHANNEL_INDEX(TRUCK_CHANNEL_wheel_lift, float, telem_ptr->truck_f.truck_wheelLift[i], i);
REGISTER_CHANNEL_INDEX(TRUCK_CHANNEL_wheel_lift_offset, float, telem_ptr->truck_f.truck_wheelLiftOffset[i], i
);
}
REGISTER_CHANNEL(TRUCK_CHANNEL_cabin_offset, fplacement, telem_ptr->truck_fp.cabinOffsetX);
REGISTER_CHANNEL(TRUCK_CHANNEL_cabin_angular_velocity, fvector, telem_ptr->truck_fv.cabinAVX);
REGISTER_CHANNEL(TRUCK_CHANNEL_cabin_angular_acceleration, fvector, telem_ptr->truck_fv.cabinAAX);
REGISTER_CHANNEL(CHANNEL_next_rest_stop, s32, telem_ptr->common_i.restStop);
REGISTER_CHANNEL(CHANNEL_local_scale, float, telem_ptr->common_f.scale);
REGISTER_CHANNEL(TRUCK_CHANNEL_head_offset, fplacement, telem_ptr->truck_fp.headOffsetX);
// MISSING: (Will be added on wish) see also on config handler
// actually should nothing miss here (1.9)
// Set the structure with defaults.
timestamp = static_cast<scs_timestamp_t>(0);
last_timestamp = static_cast<scs_timestamp_t>(-1);
return SCS_RESULT_ok;
}
/**
* @brief Telemetry API deinitialization function.
*
* See scssdk_telemetry.h
*/
SCSAPI_VOID scs_telemetry_shutdown() {
#if LOGGING
logger::flush();
#endif
// Close MemoryMap
if (telem_mem != nullptr) {
telem_mem->Close();
}
}
// Telemetry api.
// ReSharper disable once CppInconsistentNaming
BOOL APIENTRY DllMain(
HMODULE module,
DWORD reason_for_call,
LPVOID reseved
) {
if (reason_for_call == DLL_PROCESS_DETACH) {
scs_telemetry_shutdown();
}
return TRUE;
}
|
<filename>example/src/schema.ts<gh_stars>10-100
import { subscribe } from 'subscriptionless';
import gql from 'graphql-tag';
export const typeDefs = gql`
type Article {
id: ID!
title: String!
content: String!
}
type Query {
articles: [Article!]!
}
type Mutation {
publishArticle(title: String!, content: String!): Article!
}
type Subscription {
newArticles: [Article!]!
}
`;
export const resolvers = {
Query: {
articles: () => [],
},
Mutation: {
publishArticle: () => ({}),
},
Subscription: {
newArticles: {
resolve: (event, args) => [event.payload],
subscribe: subscribe('NEW_ARTICLE'),
onSubscribe: () => console.log('SUBSCRIBE!'),
onComplete: () => console.log('COMPLETE!'),
},
},
};
|
// Player input for controlling the sprite's movement
void Update()
{
float horizontalInput = Input.GetAxis("Horizontal");
float verticalInput = Input.GetAxis("Vertical");
Vector3 movement = new Vector3(horizontalInput, verticalInput, 0);
transform.position += movement * Time.deltaTime * movementSpeed;
}
// Collision detection and handling
void OnCollisionEnter2D(Collision2D collision)
{
if (collision.gameObject.CompareTag("Obstacle"))
{
// Perform actions when the sprite collides with an obstacle
spriteRenderer.enabled = false;
collider.enabled = false;
hit = true;
if (audioSource != null) audioSource.Play();
}
} |
#!/bin/bash
set -euo pipefail
# based on https://stackoverflow.com/a/28709668/2693875 and https://stackoverflow.com/a/23006365/2693875
cecho() {
if [[ $TERM == "dumb" ]]; then
echo $2
else
local color=$1
local exp=$2
if ! [[ $color =~ ^[0-9]$ ]] ; then
case $(echo "$color" | tr '[:upper:]' '[:lower:]') in
bk | black) color=0 ;;
r | red) color=1 ;;
g | green) color=2 ;;
y | yellow) color=3 ;;
b | blue) color=4 ;;
m | magenta) color=5 ;;
c | cyan) color=6 ;;
w | white|*) color=7 ;; # white or invalid color
esac
fi
tput setaf $color
# shellcheck disable=SC2086
echo $exp
tput sgr0
fi
}
script_directory="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
repo_root_directory="$script_directory/.."
if [[ $# == 1 ]] ; then
gitlab_version="$1"
else
gitlab_version="latest"
fi
cecho b "Pulling GitLab image version '$gitlab_version'..."
docker pull gitlab/gitlab-ee:$gitlab_version
cecho b "Preparing to start GitLab..."
existing_gitlab_container_id=$(docker ps -a -f "name=gitlab" --format "{{.ID}}")
if [[ -n $existing_gitlab_container_id ]] ; then
cecho b "Stopping and removing existing GitLab container..."
docker stop --time=30 "$existing_gitlab_container_id"
docker rm "$existing_gitlab_container_id"
fi
mkdir -p $repo_root_directory/config
if [[ -f Gitlab.gitlab-license ]] ; then
cecho b "EE license file found - using it..."
cp Gitlab.gitlab-license $repo_root_directory/config/
fi
mkdir -p $repo_root_directory/logs
mkdir -p $repo_root_directory/data
cecho b "Starting GitLab..."
# run GitLab with root password pre-set and as many unnecessary features disabled to speed up the startup
container_id=$(docker run --detach \
--hostname gitlab.foobar.com \
--env GITLAB_OMNIBUS_CONFIG="gitlab_rails['initial_root_password'] = 'password'; registry['enable'] = false; grafana['enable'] = false; prometheus_monitoring['enable'] = false;" \
--publish 443:443 --publish 80:80 --publish 2022:22 \
--name gitlab \
--restart always \
--volume "$repo_root_directory/dev/healthcheck-and-setup.sh:/healthcheck-and-setup.sh" \
--health-cmd '/healthcheck-and-setup.sh' \
--health-interval 2s \
--health-timeout 2m \
gitlab/gitlab-ee:$gitlab_version)
cecho b "Waiting 3 minutes before starting to check if GitLab has started..."
cecho b "(Run this in another terminal you want to follow the instance logs:"
cecho y "docker logs -f ${container_id}"
cecho b ")"
sleep 3m
$script_directory/await-healthy.sh
# create files with params needed by the tests to access GitLab
# (we are using these files to pass values from this script to the outside bash shell
# - we cannot change its env variables from inside it)
echo "http://localhost" > $repo_root_directory/gitlab_url.txt
echo "token-string-here123" > $repo_root_directory/gitlab_token.txt
cecho b 'Starting GitLab complete!'
echo ''
cecho b 'GitLab version:'
curl -H "Authorization:Bearer $(cat $repo_root_directory/gitlab_token.txt)" http://localhost/api/v4/version
echo ''
cecho b 'GitLab web UI URL (user: root, password: password)'
echo 'http://localhost'
echo ''
alias stop_gitlab='existing_gitlab_container_id=$(docker ps -a -f "name=gitlab" --format "{{.ID}}"); docker stop --time=30 $existing_gitlab_container_id ; docker rm $existing_gitlab_container_id'
cecho b 'Run this command to stop GitLab container:'
cecho r 'stop_gitlab'
echo ''
cecho b 'To start GitLab container again, re-run this script. Note that GitLab will NOT existing any data'
cecho b 'so the start will take a lot of time again. (But this is the only way to make GitLab in Docker stable.)'
echo ''
cecho b 'Run this to start the acceptance tests (it will automatically load GITLAB_URL from gitlab_url.txt'
cecho b 'and GITLAB_TOKEN from gitlab_token.txt created by this script):'
echo ''
cecho y 'pytest tests/acceptance'
|
#!/bin/bash -f
#*********************************************************************************************************
# Vivado (TM) v2018.3 (64-bit)
#
# Filename : ov5640_hdmi_design.sh
# Simulator : Aldec Riviera-PRO Simulator
# Description : Simulation script for compiling, elaborating and verifying the project source files.
# The script will automatically create the design libraries sub-directories in the run
# directory, add the library logical mappings in the simulator setup file, create default
# 'do/prj' file, execute compilation, elaboration and simulation steps.
#
# Generated by Vivado on Sun Oct 24 18:33:22 +0300 2021
# SW Build 2405991 on Thu Dec 6 23:38:27 MST 2018
#
# Copyright 1986-2018 Xilinx, Inc. All Rights Reserved.
#
# usage: ov5640_hdmi_design.sh [-help]
# usage: ov5640_hdmi_design.sh [-lib_map_path]
# usage: ov5640_hdmi_design.sh [-noclean_files]
# usage: ov5640_hdmi_design.sh [-reset_run]
#
# Prerequisite:- To compile and run simulation, you must compile the Xilinx simulation libraries using the
# 'compile_simlib' TCL command. For more information about this command, run 'compile_simlib -help' in the
# Vivado Tcl Shell. Once the libraries have been compiled successfully, specify the -lib_map_path switch
# that points to these libraries and rerun export_simulation. For more information about this switch please
# type 'export_simulation -help' in the Tcl shell.
#
# You can also point to the simulation libraries by either replacing the <SPECIFY_COMPILED_LIB_PATH> in this
# script with the compiled library directory path or specify this path with the '-lib_map_path' switch when
# executing this script. Please type 'ov5640_hdmi_design.sh -help' for more information.
#
# Additional references - 'Xilinx Vivado Design Suite User Guide:Logic simulation (UG900)'
#
#*********************************************************************************************************
# Script info
echo -e "ov5640_hdmi_design.sh - Script generated by export_simulation (Vivado v2018.3 (64-bit)-id)\n"
# Main steps
run()
{
check_args $# $1
setup $1 $2
compile
simulate
}
# RUN_STEP: <compile>
compile()
{
# Compile design files
source compile.do 2>&1 | tee -a compile.log
}
# RUN_STEP: <simulate>
simulate()
{
runvsimsa -l simulate.log -do "do {simulate.do}"
}
# STEP: setup
setup()
{
case $1 in
"-lib_map_path" )
if [[ ($2 == "") ]]; then
echo -e "ERROR: Simulation library directory path not specified (type \"./ov5640_hdmi_design.sh -help\" for more information)\n"
exit 1
fi
map_setup_file $2
;;
"-reset_run" )
reset_run
echo -e "INFO: Simulation run files deleted.\n"
exit 0
;;
"-noclean_files" )
# do not remove previous data
;;
* )
map_setup_file $2
esac
# Add any setup/initialization commands here:-
# <user specific commands>
}
# Map library.cfg file
map_setup_file()
{
file="library.cfg"
if [[ ($1 != "") ]]; then
lib_map_path="$1"
else
lib_map_path="C:/Users/mi/Desktop/Programma/ZYNQ/XC7Z020_316_ES_ConerDetect/XC7Z020_316_ES_ConerDetect.cache/compile_simlib/riviera"
fi
if [[ ($lib_map_path != "") ]]; then
src_file="$lib_map_path/$file"
if [[ -e $src_file ]]; then
vmap -link $lib_map_path
fi
fi
}
# Delete generated data from the previous run
reset_run()
{
files_to_remove=(compile.log elaboration.log simulate.log dataset.asdb work riviera)
for (( i=0; i<${#files_to_remove[*]}; i++ )); do
file="${files_to_remove[i]}"
if [[ -e $file ]]; then
rm -rf $file
fi
done
}
# Check command line arguments
check_args()
{
if [[ ($1 == 1 ) && ($2 != "-lib_map_path" && $2 != "-noclean_files" && $2 != "-reset_run" && $2 != "-help" && $2 != "-h") ]]; then
echo -e "ERROR: Unknown option specified '$2' (type \"./ov5640_hdmi_design.sh -help\" for more information)\n"
exit 1
fi
if [[ ($2 == "-help" || $2 == "-h") ]]; then
usage
fi
}
# Script usage
usage()
{
msg="Usage: ov5640_hdmi_design.sh [-help]\n\
Usage: ov5640_hdmi_design.sh [-lib_map_path]\n\
Usage: ov5640_hdmi_design.sh [-reset_run]\n\
Usage: ov5640_hdmi_design.sh [-noclean_files]\n\n\
[-help] -- Print help information for this script\n\n\
[-lib_map_path <path>] -- Compiled simulation library directory path. The simulation library is compiled\n\
using the compile_simlib tcl command. Please see 'compile_simlib -help' for more information.\n\n\
[-reset_run] -- Recreate simulator setup files and library mappings for a clean run. The generated files\n\
from the previous run will be removed. If you don't want to remove the simulator generated files, use the\n\
-noclean_files switch.\n\n\
[-noclean_files] -- Reset previous run, but do not remove simulator generated files from the previous run.\n\n"
echo -e $msg
exit 1
}
# Launch script
run $1 $2
|
#!/bin/sh
# Copyright (c) 2007, 2008 Rocco Rutte <pdmef@gmx.net> and others.
# License: MIT <http://www.opensource.org/licenses/mit-license.php>
# Validate we have readlink or greadlink
READLINK="readlink"
if command -v greadlink > /dev/null; then
READLINK="greadlink" # Prefer greadlink over readlink
fi
# Validate we have readlink or greadlink
if ! ${READLINK} -f "$(which "$0")" > /dev/null 2>&1 ; then
ROOT="$(dirname "$(which "$0")")"
if [ ! -f "${ROOT}/hg-fast-export.py" ] ; then
echo "hg-fast-exports requires a readlink implementation which knows" \
" how to canonicalize paths in order to be called via a symlink."
exit 1
fi
else
# Set the root dir
ROOT="$(dirname "$(${READLINK} -f "$(which "$0")")")"
fi
# Vars
REPO=""
PFX="hg2git"
SFX_MAPPING="mapping"
SFX_MARKS="marks"
SFX_HEADS="heads"
SFX_STATE="state"
GFI_OPTS=""
# Check we have python installed
if [ -z "${PYTHON}" ]; then
# $PYTHON is not set, so we try to find a working python with mercurial:
for PYTHON_CMD in python2 python python3; do
if command -v ${PYTHON_CMD} > /dev/null; then
${PYTHON_CMD} -c 'from mercurial.scmutil import revsymbol' 2> /dev/null
# shellcheck disable=SC2181
if [ $? -eq 0 ]; then
PYTHON=${PYTHON_CMD}
break
fi
fi
done
fi
# Check we have python installed
if [ -z "${PYTHON}" ]; then
echo "Could not find a python interpreter with the mercurial module >= 4.6 available. " \
"Please use the 'PYTHON' environment variable to specify the interpreter to use."
exit 1
fi
USAGE="[--quiet] [-r <repo>] [--force] [--ignore-unnamed-heads]" \
"[-m <max>] [-s] [--hgtags] [-A <file>] [-B <file>] [-T <file>]" \
" [-M <name>] [-o <name>] [--hg-hash] [-e <encoding>]"
LONG_USAGE="Import hg repository <repo> up to either tip or <max>
If <repo> is omitted, use last hg repository as obtained from state file,
GIT_DIR/${PFX}-${SFX_STATE} by default.
Note: The argument order matters.
Options:
--quiet Passed to git-fast-import(1)
-r <repo> Mercurial repository to import
--force Ignore validation errors when converting, and pass --force
to git-fast-import(1)
-m <max> Maximum revision to import
-s Enable parsing Signed-off-by lines
--hgtags Enable exporting .hgtags files
-A <file> Read author map from file
(Same as in git-svnimport(1) and git-cvsimport(1))
-B <file> Read branch map from file
-T <file> Read tags map from file
-M <name> Set the default branch name (defaults to 'master')
-n Do not perform built-in (broken in many cases) sanitizing
of branch/tag names.
-o <name> Use <name> as branch namespace to track upstream (eg 'origin')
--hg-hash Annotate commits with the hg hash as git notes in the
hg namespace.
-e <encoding> Assume commit and author strings retrieved from
Mercurial are encoded in <encoding>
--fe <filename_encoding> Assume filenames from Mercurial are encoded
in <filename_encoding>
--mappings-are-raw Assume mappings are raw <key>=<value> lines
--filter-contents <cmd> Pipe contents of each exported file through <cmd>
with <file-path> <hg-hash> <is-binary> as arguments
--plugin <plugin=init> Add a plugin with the given init string (repeatable)
--plugin-path <plugin-path> Add an additional plugin lookup path
"
case "$1" in
-h|--help)
echo "usage: $(basename "$0") ${USAGE}"
echo ""
echo "${LONG_USAGE}"
exit 0
esac
IS_BARE=$(git rev-parse --is-bare-repository) \
|| (echo "Could not find git repo" ; exit 1)
if test "z$IS_BARE" != ztrue; then
# This is not a bare repo, cd to the toplevel
TOPLEVEL=$(git rev-parse --show-toplevel) \
|| (echo "Could not find git repo toplevel" ; exit 1)
cd "${TOPLEVEL}" || exit 1
fi
GIT_DIR=$(git rev-parse --git-dir) || (echo "Could not find git repo" ; exit 1)
IGNORECASEWARN=""
IGNORECASE=$(git config core.ignoreCase)
if [ "true" = "${IGNORECASE}" ]; then
IGNORECASEWARN="true"
fi
while case "$#" in 0) break ;; esac
do
case "$1" in
-r|--r|--re|--rep|--repo)
shift
REPO="$1"
;;
--q|--qu|--qui|--quie|--quiet)
GFI_OPTS="${GFI_OPTS} --quiet"
;;
--force)
# pass --force to git-fast-import and hg-fast-export.py
GFI_OPTS="${GFI_OPTS} --force"
IGNORECASEWARN="";
break
;;
-*)
# pass any other options down to hg2git.py
break
;;
*)
break
;;
esac
shift
done
if [ -n "${IGNORECASEWARN}" ]; then
echo "Error: The option core.ignoreCase is set to true in the git"
echo "repository. This will produce empty changesets for renames that just"
echo "change the case of the file name."
echo "Use --force to skip this check or change the option with"
echo "git config core.ignoreCase false"
exit 1
fi;
# Make a backup copy of each state file
for FILE in ${SFX_STATE} ${SFX_MARKS} ${SFX_MAPPING} ${SFX_HEADS} ; do
if [ -f "${GIT_DIR}/${PFX}-${FILE}" ] ; then
cp "${GIT_DIR}/${PFX}-${FILE}" "${GIT_DIR}/${PFX}-${FILE}~"
fi
done
# for convenience: get default repo from state file
if [ "${REPO}" ] && [ -f "${GIT_DIR}/${PFX}-${SFX_STATE}" ] ; then
REPO="$(grep '^:repo ' "${GIT_DIR}/${PFX}-${SFX_STATE}" | cut -d ' ' -f 2)"
echo "Using last hg repository \"${REPO}\""
fi
# Look to see if we have a repo
if [ -z "${REPO}" ]; then
echo "no repo given, use -r flag"
exit 1
fi
# make sure we have a marks cache
if [ ! -f "${GIT_DIR}/${PFX}-${SFX_MARKS}" ] ; then
touch "${GIT_DIR}/${PFX}-${SFX_MARKS}"
fi
# cleanup on exit
trap 'rm -f "${GIT_DIR}/${PFX}-${SFX_MARKS}.old" "${GIT_DIR}/${PFX}-${SFX_MARKS}.tmp"' 0
_ERROR1=''
_ERROR2=''
exec 3>&1
{ read -r _ERROR1 || :; read -r _ERROR2 || :; } <<-EOT
$(
exec 4>&3 3>&1 1>&4 4>&-
{
_E1=0
# shellcheck disable=SC2097,SC2098
GIT_DIR="${GIT_DIR}" "${PYTHON}" "${ROOT}/hg-fast-export.py" \
--repo "${REPO}" \
--marks "${GIT_DIR}/${PFX}-${SFX_MARKS}" \
--mapping "${GIT_DIR}/${PFX}-${SFX_MAPPING}" \
--heads "${GIT_DIR}/${PFX}-${SFX_HEADS}" \
--status "${GIT_DIR}/${PFX}-${SFX_STATE}" \
"$@" 3>&- || _E1=$?
echo ${_E1} >&3
} | \
{
_E2=0
# shellcheck disable=SC2086
git fast-import ${GFI_OPTS} --export-marks="${GIT_DIR}/${PFX}-${SFX_MARKS}.tmp" 3>&- || _E2=$?
echo ${_E2} >&3
}
)
EOT
exec 3>&-
[ "${_ERROR1}" = 0 ] && [ "${_ERROR2}" = 0 ] || exit 1
# move recent marks cache out of the way...
if [ -f "${GIT_DIR}/${PFX}-${SFX_MARKS}" ] ; then
mv "${GIT_DIR}/${PFX}-${SFX_MARKS}" "${GIT_DIR}/${PFX}-${SFX_MARKS}.old"
else
touch "${GIT_DIR}/${PFX}-${SFX_MARKS}.old"
fi
# ...to create a new merged one
cat "${GIT_DIR}/${PFX}-${SFX_MARKS}.old" "${GIT_DIR}/${PFX}-${SFX_MARKS}.tmp" \
| uniq > "${GIT_DIR}/${PFX}-${SFX_MARKS}"
# save SHA1s of current heads for incremental imports
# and connectivity (plus sanity checking)
for HEAD in $(git branch | sed 's#^..##') ; do
ID="$(git rev-parse "refs/heads/${HEAD}")"
echo ":${HEAD} ${ID}"
done > "${GIT_DIR}/${PFX}-${SFX_HEADS}"
|
<gh_stars>1-10
/*
* Copyright 2017 - 2019 KB Kontrakt LLC - All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package auth
//go:generate mockgen -source=acl_rep.go -package=auth -destination=acl_rep_mocks.go
import (
"errors"
"github.com/kbkontrakt/hlfabric-ccdevkit/extstub"
)
type (
// ACLRepository .
ACLRepository interface {
Get() (ACL, error)
Save(acl ACL) error
}
aclRepositoryImpl struct {
aclKey string
stub extstub.MarshalStub
}
aclDocument struct {
ACList ACL `json:"acl"`
}
)
const aclDefaultKeyName = "ACL"
func (rep *aclRepositoryImpl) Save(acl ACL) error {
return rep.stub.WriteState(rep.aclKey, aclDocument{acl})
}
func (rep *aclRepositoryImpl) Get() (ACL, error) {
var acl aclDocument
err := rep.stub.ReadState(rep.aclKey, &acl)
if err == extstub.ErrNotFound {
return nil, errors.New("ACL not found")
}
if err != nil {
return nil, err
}
return acl.ACList, nil
}
// NewACLRepositoryImpl creates default acl implementation
func NewACLRepositoryImpl(stub extstub.MarshalStub) ACLRepository {
return &aclRepositoryImpl{
aclDefaultKeyName,
stub,
}
}
|
#!/bin/bash
#
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Check for duplicate includes.
# Guard against accidental introduction of new Boost dependencies.
# Check includes: Check for duplicate includes. Enforce bracket syntax includes.
export LC_ALL=C
IGNORE_REGEXP="/(leveldb|secp256k1|univalue)/"
filter_suffix() {
git ls-files | grep -E "^src/.*\.${1}"'$' | grep -Ev "${IGNORE_REGEXP}"
}
EXIT_CODE=0
for HEADER_FILE in $(filter_suffix h); do
DUPLICATE_INCLUDES_IN_HEADER_FILE=$(grep -E "^#include " < "${HEADER_FILE}" | sort | uniq -d)
if [[ ${DUPLICATE_INCLUDES_IN_HEADER_FILE} != "" ]]; then
echo "Duplicate include(s) in ${HEADER_FILE}:"
echo "${DUPLICATE_INCLUDES_IN_HEADER_FILE}"
echo
EXIT_CODE=1
fi
done
for CPP_FILE in $(filter_suffix cpp); do
DUPLICATE_INCLUDES_IN_CPP_FILE=$(grep -E "^#include " < "${CPP_FILE}" | sort | uniq -d)
if [[ ${DUPLICATE_INCLUDES_IN_CPP_FILE} != "" ]]; then
echo "Duplicate include(s) in ${CPP_FILE}:"
echo "${DUPLICATE_INCLUDES_IN_CPP_FILE}"
echo
EXIT_CODE=1
fi
done
INCLUDED_CPP_FILES=$(git grep -E "^#include [<\"][^>\"]+\.cpp[>\"]" -- "*.cpp" "*.h")
if [[ ${INCLUDED_CPP_FILES} != "" ]]; then
echo "The following files #include .cpp files:"
echo "${INCLUDED_CPP_FILES}"
echo
EXIT_CODE=1
fi
EXPECTED_BOOST_INCLUDES=(
boost/algorithm/string.hpp
boost/algorithm/string/case_conv.hpp
boost/algorithm/string/classification.hpp
boost/algorithm/string/join.hpp
boost/algorithm/string/predicate.hpp
boost/algorithm/string/replace.hpp
boost/algorithm/string/split.hpp
boost/assign/std/vector.hpp
boost/bind.hpp
boost/chrono/chrono.hpp
boost/date_time/posix_time/posix_time.hpp
boost/filesystem.hpp
boost/filesystem/detail/utf8_codecvt_facet.hpp
boost/filesystem/fstream.hpp
boost/interprocess/sync/file_lock.hpp
boost/multi_index/hashed_index.hpp
boost/multi_index/ordered_index.hpp
boost/multi_index/sequenced_index.hpp
boost/multi_index_container.hpp
boost/optional.hpp
boost/preprocessor/cat.hpp
boost/preprocessor/stringize.hpp
boost/program_options/detail/config_file.hpp
boost/scoped_array.hpp
boost/signals2/connection.hpp
boost/signals2/last_value.hpp
boost/signals2/signal.hpp
boost/test/unit_test.hpp
boost/thread.hpp
boost/thread/condition_variable.hpp
boost/thread/mutex.hpp
boost/thread/thread.hpp
boost/variant.hpp
boost/variant/apply_visitor.hpp
boost/variant/static_visitor.hpp
)
for BOOST_INCLUDE in $(git grep '^#include <boost/' -- "*.cpp" "*.h" | cut -f2 -d: | cut -f2 -d'<' | cut -f1 -d'>' | sort -u); do
IS_EXPECTED_INCLUDE=0
for EXPECTED_BOOST_INCLUDE in "${EXPECTED_BOOST_INCLUDES[@]}"; do
if [[ "${BOOST_INCLUDE}" == "${EXPECTED_BOOST_INCLUDE}" ]]; then
IS_EXPECTED_INCLUDE=1
break
fi
done
if [[ ${IS_EXPECTED_INCLUDE} == 0 ]]; then
EXIT_CODE=1
echo "A new Boost dependency in the form of \"${BOOST_INCLUDE}\" appears to have been introduced:"
git grep "${BOOST_INCLUDE}" -- "*.cpp" "*.h"
echo
fi
done
for EXPECTED_BOOST_INCLUDE in "${EXPECTED_BOOST_INCLUDES[@]}"; do
if ! git grep -q "^#include <${EXPECTED_BOOST_INCLUDE}>" -- "*.cpp" "*.h"; then
echo "Good job! The Boost dependency \"${EXPECTED_BOOST_INCLUDE}\" is no longer used."
echo "Please remove it from EXPECTED_BOOST_INCLUDES in $0"
echo "to make sure this dependency is not accidentally reintroduced."
echo
EXIT_CODE=1
fi
done
QUOTE_SYNTAX_INCLUDES=$(git grep '^#include "' -- "*.cpp" "*.h" | grep -Ev "${IGNORE_REGEXP}")
if [[ ${QUOTE_SYNTAX_INCLUDES} != "" ]]; then
echo "Please use bracket syntax includes (\"#include <foo.h>\") instead of quote syntax includes:"
echo "${QUOTE_SYNTAX_INCLUDES}"
echo
EXIT_CODE=1
fi
exit ${EXIT_CODE}
|
import tensorflow as tf
class GanTrainer:
def __init__(self, generator, discriminator, config):
self.generator = generator
self.discriminator = discriminator
self.config = config
self.optimizer = tf.keras.optimizers.Adam(config.learning_rate)
def train_step(self, real_images):
noise = tf.random.normal([self.config.batch_size, self.config.noise_dim])
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = self.generator(noise, training=True)
real_output = self.discriminator(real_images, training=True)
fake_output = self.discriminator(generated_images, training=True)
gen_loss = self.generator_loss(fake_output)
disc_loss = self.discriminator_loss(real_output, fake_output)
gradients_of_generator = gen_tape.gradient(gen_loss, self.generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, self.discriminator.trainable_variables)
self.optimizer.apply_gradients(zip(gradients_of_generator, self.generator.trainable_variables))
self.optimizer.apply_gradients(zip(gradients_of_discriminator, self.discriminator.trainable_variables))
def generator_loss(self, fake_output):
return tf.reduce_mean(tf.keras.losses.BinaryCrossentropy(from_logits=True)(tf.ones_like(fake_output), fake_output))
def discriminator_loss(self, real_output, fake_output):
real_loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)(tf.ones_like(real_output), real_output)
fake_loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)(tf.zeros_like(fake_output), fake_output)
return real_loss + fake_loss
def main():
# Create instances of the generator, discriminator, and configuration
generator = # Instantiate the generator
discriminator = # Instantiate the discriminator
config = # Instantiate the configuration object
# Initialize the GanTrainer and start the training process
trainer = GanTrainer(generator, discriminator, config)
dataset = # Load and preprocess the training dataset
for epoch in range(config.epochs):
for batch in dataset:
trainer.train_step(batch)
if __name__ == "__main__":
main() |
def parse_config_file(file_path: str, key: str) -> str:
with open(file_path, 'r') as file:
for line in file:
line = line.split('#')[0].strip() # Remove comments and leading/trailing whitespaces
if line and '=' in line:
k, v = line.split('=')
if k.strip() == key:
return v.strip()
return "Key not found" |
public class DataProcessor
{
public void ProcessSyncResult<T>(DataStoreSyncResult<T> syncResult) where T : class, ISyncEntity
{
switch (syncResult.Code)
{
case DataStoreSyncCode.Success:
Console.WriteLine("Synchronization successful. Processing items...");
// Process the items
foreach (var item in syncResult.Items)
{
// Process each item as required
}
break;
case DataStoreSyncCode.NotAuthenticated:
Console.WriteLine("Authentication error: User not authenticated.");
break;
case DataStoreSyncCode.RequiresResignin:
Console.WriteLine("User needs to re-sign in for synchronization.");
break;
case DataStoreSyncCode.ErrorInServer:
Console.WriteLine("Server error occurred during synchronization.");
break;
case DataStoreSyncCode.Unknown:
Console.WriteLine("Unknown error occurred during synchronization.");
break;
default:
Console.WriteLine("Unrecognized synchronization code: " + syncResult.Code);
break;
}
}
} |
# variants
export SPACK_SPEC="%gcc+mpi+python+babelflow+fides+adios2+dray+mfem"
# constraints
export SPACK_SPEC="${SPACK_SPEC} ^conduit@develop ^vtk-h@develop ^dray@develop"
# config
export SPACK_CONFIG="scripts/uberenv_configs/spack_configs/configs/alpinedav/ubuntu_18_devel/"
cd ascent && python scripts/uberenv/uberenv.py \
-k \
--spec="${SPACK_SPEC}" \
--spack-config-dir="${SPACK_CONFIG}" \
--prefix="/uberenv_libs"
# cleanup the spack build stuff to free up space
/uberenv_libs/spack/bin/spack clean --all
# change perms
chmod -R a+rX /uberenv_libs
|
var searchData=
[
['regex_5fenv_5fexport_0',['REGEX_ENV_EXPORT',['../namespaceproxen_1_1sysproxy.html#abde17ed1f86dd19a702595276a87bea1',1,'proxen::sysproxy']]],
['regex_5fproxy_5fexport_1',['REGEX_PROXY_EXPORT',['../namespaceproxen_1_1sysproxy.html#a479ad94f4a6054ebd432bf4c5eb89417',1,'proxen::sysproxy']]],
['rsync_5fproxy_2',['rsync_proxy',['../classproxen_1_1sysproxy_1_1_proxy.html#a881c6e42b8511243f82155549a850968',1,'proxen::sysproxy::Proxy']]]
];
|
<filename>src/components/header.js
import React from 'react';
import styled, { css } from 'react-emotion';
import { TextLink, Spacer, border } from './styled';
import { Router } from '@reach/router';
const Head = styled('header')`
font-family: Avenir;
border-bottom: ${border};
padding: 0.5em;
display: flex;
align-items: center;
`;
export const Arrow = () => (
<div
className={css`
font-weight: 900;
font-size: 2em;
margin: 1em;
`}
>
⟨
</div>
);
const Back = ({ week, page }) => (
<TextLink to={page ? `/${week}` : `/`}>
<Arrow />
</TextLink>
);
const FakeBack = () => (
<Spacer>
<Arrow />
</Spacer>
);
const Header = ({ siteTitle }) => (
<Head>
<Router>
<Back path="/:week/:page" />
<Back path="/:week" />
<FakeBack default />
</Router>
<h1>
<TextLink to="/">{siteTitle}</TextLink>
</h1>
</Head>
);
export default Header;
|
<filename>src/stl-util.ts<gh_stars>1-10
import * as fs from 'fs'
import * as path from 'path'
import { Geom3 } from "@jscad/modeling/src/geometries/types";
const stlSerializer = require('@jscad/stl-serializer')
/**
* Save a 3D geometery as an STL
* @param {Geom3} shape The 3D geometery to save as an STL
* @param {string} outDir The directory to save the file to
* @param {string} outFile The name of the file
*/
export function save_stl(shape: Geom3, outDir: string, outFile: string)
{
const rawData:[] = stlSerializer.serialize({binary: false}, shape)
fs.mkdirSync(outDir, {recursive: true});
rawData.forEach(element => {
fs.writeFileSync(path.join(outDir, outFile + '.stl'), element, 'utf-8')
});
} |
<filename>artifacts/spring-workshop/dto/src/main/java/com/vmware/spring/workshop/dto/banking/BranchDTO.java<gh_stars>1-10
package com.vmware.spring.workshop.dto.banking;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlType;
import com.vmware.spring.workshop.dto.AbstractNamedIdentifiedDTO;
import com.vmware.spring.workshop.dto.LocatedDTO;
/**
* @author lgoldstein
*/
@XmlRootElement(name="branch")
@XmlType(name="branch")
@XmlAccessorType(XmlAccessType.PUBLIC_MEMBER)
public class BranchDTO extends AbstractNamedIdentifiedDTO implements LocatedDTO, Cloneable {
private static final long serialVersionUID = 2940429393498337775L;
private String _location;
private int _branchCode;
private Long _bankId;
public BranchDTO() {
super();
}
@XmlAttribute(name="bankId",required=true)
public Long getBankId () {
return _bankId;
}
public void setBankId(Long bankId) {
_bankId = bankId;
}
@XmlAttribute(name="branchCode",required=true)
public int getBranchCode() {
return _branchCode;
}
public void setBranchCode(int branchCode) {
_branchCode = branchCode;
}
@XmlElement(name="location",required=true,nillable=false)
public String getLocation() {
return _location;
}
@Override
public void setLocation(String location) {
_location = location;
}
@Override
public BranchDTO clone() {
try {
return getClass().cast(super.clone());
} catch(CloneNotSupportedException e) {
throw new RuntimeException("Failed to clone " + this + ": " + e.getMessage(), e);
}
}
}
|
def most_frequent(my_list):
max_count = 0
frequent_string = ""
for i in my_list:
if my_list.count(i) > max_count:
max_count = my_list.count(i)
frequent_string = i
return frequent_string |
/**
* Copyright 2013 Illumina
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.illumina.basespace.entity;
import java.net.URI;
import java.util.Date;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import com.illumina.basespace.util.DateDeserializer;
/**
* The User resource is provided so that an app can get basic information about the user that is using the application
* @author bking
*
*/
public class User extends UserCompact
{
@JsonProperty("Email")
private String email;
/**
* Current user's email address
* @return the email
*/
public String getEmail()
{
return email;
}
protected void setEmail(String email)
{
this.email = email;
}
@JsonDeserialize(using=DateDeserializer.class)
@JsonProperty("DateLastActive")
private Date dateLastActive;
/**
* Get the date this user was last active
* @return the last active date
*/
public Date getDateLastActive()
{
return dateLastActive;
}
protected void setDateLastActive(Date dateLastActive)
{
this.dateLastActive = dateLastActive;
}
@JsonProperty("HrefRuns")
private URI hrefRuns;
/**
* The runs that the user owns or has access to
* @return the runs URI
*/
public URI getHrefRuns()
{
return hrefRuns;
}
protected void setHrefRuns(URI hrefRuns)
{
this.hrefRuns = hrefRuns;
}
@JsonProperty("HrefProjects")
private URI hrefProjects;
/**
* The projects that the user owns or has access to
* @return the projects URI
*/
public URI getHrefProjects()
{
return hrefProjects;
}
protected void setHrefProjects(URI hrefProjects)
{
this.hrefProjects = hrefProjects;
}
@JsonDeserialize(using=DateDeserializer.class)
@JsonProperty("DateCreated")
private Date dateCreated;
/**
* Get the date this entity was created
* @return the create date
*/
public Date getDateCreated()
{
return dateCreated;
}
protected void setDateCreated(Date dateCreated)
{
this.dateCreated = dateCreated;
}
}
|
#!/bin/bash
if [ "$1" != "" ] && [ "$1" = "-h" ]; then
echo "Shipyard Deploy uses the following environment variables:"
echo " ACTION: this is the action to use (deploy, upgrade, node, remove)"
echo " DISCOVERY: discovery system used by Swarm (only if using 'node' action)"
echo " IMAGE: this overrides the default Shipyard image"
echo " PREFIX: prefix for container names"
echo " SHIPYARD_ARGS: these are passed to the Shipyard controller container as controller args"
echo " TLS_CERT_PATH: path to certs to enable TLS for Shipyard"
echo " PORT: specify the listen port for the controller (default: 8080)"
echo " IP: specify the address at which the controller or node will be available (default: eth0 ip)"
echo " PROXY_PORT: port to run docker proxy (default: 2375)"
exit 1
fi
if [ -z "`which docker`" ]; then
echo "You must have the Docker CLI installed on your \$PATH"
echo " See http://docs.docker.com for details"
exit 1
fi
ACTION=${ACTION:-deploy}
IMAGE=${IMAGE:-shipyard/shipyard:latest}
PREFIX=${PREFIX:-shipyard}
SHIPYARD_ARGS=${SHIPYARD_ARGS:-""}
TLS_CERT_PATH=${TLS_CERT_PATH:-}
CERT_PATH="/etc/shipyard"
PROXY_PORT=${PROXY_PORT:-2375}
SWARM_PORT=3375
SHIPYARD_PROTOCOL=http
SHIPYARD_PORT=${PORT:-8080}
SHIPYARD_IP=${IP}
DISCOVERY_BACKEND=etcd
DISCOVERY_PORT=4001
DISCOVERY_PEER_PORT=7001
ENABLE_TLS=0
CERT_FINGERPRINT=""
LOCAL_CA_CERT=""
LOCAL_SSL_CERT=""
LOCAL_SSL_KEY=""
LOCAL_SSL_CLIENT_CERT=""
LOCAL_SSL_CLIENT_KEY=""
SSL_CA_CERT=""
SSL_CERT=""
SSL_KEY=""
SSL_CLIENT_CERT=""
SSL_CLIENT_KEY=""
show_cert_help() {
echo "To use TLS in Shipyard, you must have existing certificates."
echo "The certs must be named ca.pem, server.pem, server-key.pem, cert.pem and key.pem"
echo "If you need to generate certificates, see https://github.com/ehazlett/certm for examples."
}
check_certs() {
if [ -z "$TLS_CERT_PATH" ]; then
return
fi
if [ ! -e $TLS_CERT_PATH ]; then
echo "Error: unable to find certificates in $TLS_CERT_PATH"
show_cert_help
exit 1
fi
if [ "$PROXY_PORT" = "2375" ]; then
PROXY_PORT=2376
fi
SWARM_PORT=3376
SHIPYARD_PROTOCOL=https
LOCAL_SSL_CA_CERT="$TLS_CERT_PATH/ca.pem"
LOCAL_SSL_CERT="$TLS_CERT_PATH/server.pem"
LOCAL_SSL_KEY="$TLS_CERT_PATH/server-key.pem"
LOCAL_SSL_CLIENT_CERT="$TLS_CERT_PATH/cert.pem"
LOCAL_SSL_CLIENT_KEY="$TLS_CERT_PATH/key.pem"
SSL_CA_CERT="$CERT_PATH/ca.pem"
SSL_CERT="$CERT_PATH/server.pem"
SSL_KEY="$CERT_PATH/server-key.pem"
SSL_CLIENT_CERT="$CERT_PATH/cert.pem"
SSL_CLIENT_KEY="$CERT_PATH/key.pem"
CERT_FINGERPRINT=$(openssl x509 -noout -in $LOCAL_SSL_CERT -fingerprint -sha256 | awk -F= '{print $2;}')
if [ ! -e $LOCAL_SSL_CA_CERT ] || [ ! -e $LOCAL_SSL_CERT ] || [ ! -e $LOCAL_SSL_KEY ] || [ ! -e $LOCAL_SSL_CLIENT_CERT ] || [ ! -e $LOCAL_SSL_CLIENT_KEY ]; then
echo "Error: unable to find certificates"
show_cert_help
exit 1
fi
ENABLE_TLS=1
}
# container functions
start_certs() {
ID=$(docker run \
-ti \
-d \
--restart=always \
--name $PREFIX-certs \
-v $CERT_PATH \
alpine \
sh)
if [ $ENABLE_TLS = 1 ]; then
docker cp $LOCAL_SSL_CA_CERT $PREFIX-certs:$SSL_CA_CERT
docker cp $LOCAL_SSL_CERT $PREFIX-certs:$SSL_CERT
docker cp $LOCAL_SSL_KEY $PREFIX-certs:$SSL_KEY
docker cp $LOCAL_SSL_CLIENT_CERT $PREFIX-certs:$SSL_CLIENT_CERT
docker cp $LOCAL_SSL_CLIENT_KEY $PREFIX-certs:$SSL_CLIENT_KEY
fi
}
remove_certs() {
docker rm -fv $PREFIX-certs > /dev/null 2>&1
}
get_ip() {
if [ -z "$SHIPYARD_IP" ]; then
SHIPYARD_IP=`docker run --rm --net=host alpine ip route get 8.8.8.8 | awk '{ print $7; }'`
fi
}
start_discovery() {
get_ip
ID=$(docker run \
-ti \
-d \
-p 4001:4001 \
-p 7001:7001 \
--restart=always \
--name $PREFIX-discovery \
microbox/etcd:latest -addr $SHIPYARD_IP:$DISCOVERY_PORT -peer-addr $SHIPYARD_IP:$DISCOVERY_PEER_PORT)
}
remove_discovery() {
docker rm -fv $PREFIX-discovery > /dev/null 2>&1
}
start_rethinkdb() {
ID=$(docker run \
-ti \
-d \
--restart=always \
--name $PREFIX-rethinkdb \
rethinkdb)
}
remove_rethinkdb() {
docker rm -fv $PREFIX-rethinkdb > /dev/null 2>&1
}
start_proxy() {
TLS_OPTS=""
if [ $ENABLE_TLS = 1 ]; then
TLS_OPTS="-e SSL_CA=$SSL_CA_CERT -e SSL_CERT=$SSL_CERT -e SSL_KEY=$SSL_KEY -e SSL_SKIP_VERIFY=1"
fi
# Note: we add SSL_SKIP_VERIFY=1 to skip verification of the client
# certificate in the proxy image. this will pass it to swarm that
# does verify. this helps with performance and avoids certificate issues
# when running through the proxy. ultimately if the cert is invalid
# swarm will fail to return.
ID=$(docker run \
-ti \
-d \
-p $PROXY_PORT:$PROXY_PORT \
--hostname=$HOSTNAME \
--restart=always \
--name $PREFIX-proxy \
-v /var/run/docker.sock:/var/run/docker.sock \
-e PORT=$PROXY_PORT \
--volumes-from=$PREFIX-certs $TLS_OPTS\
shipyard/docker-proxy:latest)
}
remove_proxy() {
docker rm -fv $PREFIX-proxy > /dev/null 2>&1
}
start_swarm_manager() {
get_ip
TLS_OPTS=""
if [ $ENABLE_TLS = 1 ]; then
TLS_OPTS="--tlsverify --tlscacert=$SSL_CA_CERT --tlscert=$SSL_CERT --tlskey=$SSL_KEY"
fi
EXTRA_RUN_OPTS=""
if [ -z "$DISCOVERY" ]; then
DISCOVERY="$DISCOVERY_BACKEND://discovery:$DISCOVERY_PORT"
EXTRA_RUN_OPTS="--link $PREFIX-discovery:discovery"
fi
ID=$(docker run \
-ti \
-d \
--restart=always \
--name $PREFIX-swarm-manager \
--volumes-from=$PREFIX-certs $EXTRA_RUN_OPTS \
swarm:latest \
m --replication --addr $SHIPYARD_IP:$SWARM_PORT --host tcp://0.0.0.0:$SWARM_PORT $TLS_OPTS $DISCOVERY)
}
remove_swarm_manager() {
docker rm -fv $PREFIX-swarm-manager > /dev/null 2>&1
}
start_swarm_agent() {
get_ip
if [ -z "$DISCOVERY" ]; then
DISCOVERY="$DISCOVERY_BACKEND://discovery:$DISCOVERY_PORT"
EXTRA_RUN_OPTS="--link $PREFIX-discovery:discovery"
fi
ID=$(docker run \
-ti \
-d \
--restart=always \
--name $PREFIX-swarm-agent $EXTRA_RUN_OPTS \
swarm:latest \
j --addr $SHIPYARD_IP:$PROXY_PORT $DISCOVERY)
}
remove_swarm_agent() {
docker rm -fv $PREFIX-swarm-agent > /dev/null 2>&1
}
start_controller() {
#-v $CERT_PATH:/etc/docker:ro \
TLS_OPTS=""
if [ $ENABLE_TLS = 1 ]; then
TLS_OPTS="--tls-ca-cert $SSL_CA_CERT --tls-cert=$SSL_CERT --tls-key=$SSL_KEY --shipyard-tls-ca-cert=$SSL_CA_CERT --shipyard-tls-cert=$SSL_CERT --shipyard-tls-key=$SSL_KEY"
fi
ID=$(docker run \
-ti \
-d \
--restart=always \
--name $PREFIX-controller \
--link $PREFIX-rethinkdb:rethinkdb \
--link $PREFIX-swarm-manager:swarm \
-p $SHIPYARD_PORT:$SHIPYARD_PORT \
--volumes-from=$PREFIX-certs \
$IMAGE \
--debug \
server \
--listen :$SHIPYARD_PORT \
-d tcp://swarm:$SWARM_PORT $TLS_OPTS $SHIPYARD_ARGS)
}
wait_for_available() {
set +e
IP=$1
PORT=$2
echo Waiting for Shipyard on $IP:$PORT
docker pull ehazlett/curl > /dev/null 2>&1
TLS_OPTS=""
if [ $ENABLE_TLS = 1 ]; then
TLS_OPTS="-k"
fi
until $(docker run --rm ehazlett/curl --output /dev/null --connect-timeout 1 --silent --head --fail $TLS_OPTS $SHIPYARD_PROTOCOL://$IP:$PORT/ > /dev/null 2>&1); do
printf '.'
sleep 1
done
printf '\n'
}
remove_controller() {
docker rm -fv $PREFIX-controller > /dev/null 2>&1
}
if [ "$ACTION" = "deploy" ]; then
set -e
check_certs
get_ip
echo "Deploying Shipyard"
echo " -> Starting Database"
start_rethinkdb
echo " -> Starting Discovery"
start_discovery
echo " -> Starting Cert Volume"
start_certs
echo " -> Starting Proxy"
start_proxy
echo " -> Starting Swarm Manager"
start_swarm_manager
echo " -> Starting Swarm Agent"
start_swarm_agent
echo " -> Starting Controller"
start_controller
wait_for_available $SHIPYARD_IP $SHIPYARD_PORT
echo "Shipyard available at $SHIPYARD_PROTOCOL://$SHIPYARD_IP:$SHIPYARD_PORT"
if [ $ENABLE_TLS = 1 ] && [ ! -z "$CERT_FINGERPRINT" ]; then
echo "SSL SHA-256 Fingerprint: $CERT_FINGERPRINT"
fi
echo "Username: admin Password: shipyard"
elif [ "$ACTION" = "node" ]; then
set -e
if [ -z "$DISCOVERY" ]; then
echo "You must set the DISCOVERY environment variable"
echo "with the discovery system used with Swarm"
exit 1
fi
check_certs
echo "Adding Node"
echo " -> Starting Cert Volume"
start_certs
echo " -> Starting Proxy"
start_proxy
echo " -> Starting Swarm Manager"
start_swarm_manager $DISCOVERY
echo " -> Starting Swarm Agent"
start_swarm_agent
echo "Node added to Swarm: $SHIPYARD_IP"
elif [ "$ACTION" = "upgrade" ]; then
set -e
check_certs
get_ip
echo "Upgrading Shipyard"
echo " -> Pulling $IMAGE"
docker pull $IMAGE
echo " -> Upgrading Controller"
remove_controller
start_controller
wait_for_available $SHIPYARD_IP $SHIPYARD_PORT
echo "Shipyard controller updated"
elif [ "$ACTION" = "remove" ]; then
# ignore errors
set +e
echo "Removing Shipyard"
echo " -> Removing Database"
remove_rethinkdb
echo " -> Removing Discovery"
remove_discovery
echo " -> Removing Cert Volume"
remove_certs
echo " -> Removing Proxy"
remove_proxy
echo " -> Removing Swarm Agent"
remove_swarm_agent
echo " -> Removing Swarm Manager"
remove_swarm_manager
echo " -> Removing Controller"
remove_controller
echo "Done"
else
echo "Unknown action $ACTION"
exit 1
fi
|
from importer import *
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir_pcl', type=str, required=True,
help='Path to shapenet pointclouds')
parser.add_argument('--exp', type=str, required=True,
help='Name of Experiment')
parser.add_argument('--gpu', type=str, required=True,
help='GPU to use')
parser.add_argument('--category', type=str, required=True,
help='Category to train on : \
["all", "airplane", "bench", "cabinet", "car", "chair", "lamp", \
"monitor", "rifle", "sofa", "speaker", "table", "telephone", "vessel"]')
parser.add_argument('--bottleneck', type=int, required=True, default=512,
help='latent space size')
parser.add_argument('--batch_size', type=int, default=32,
help='Batch Size during training')
parser.add_argument('--lr', type=float, default=0.0005,
help='Learning Rate')
parser.add_argument('--max_epoch', type=int, default=500,
help='max num of epoch')
parser.add_argument('--bn_decoder', action='store_true',
help='Supply this parameter if you want batch norm in the decoder, otherwise ignore')
parser.add_argument('--print_n', type=int, default=100,
help='print output to terminal every n iterations')
FLAGS = parser.parse_args()
print '-='*50
print FLAGS
print '-='*50
os.environ['CUDA_VISIBLE_DEVICES'] = str(FLAGS.gpu)
BATCH_SIZE = FLAGS.batch_size # Batch size for training
NUM_POINTS = 2048 # Number of predicted points
GT_PCL_SIZE = 16384 # Number of points in GT point cloud
def fetch_batch(models, batch_num, batch_size):
'''
Input:
models: list of paths to shapenet models
batch_num: batch_num during epoch
batch_size: batch size for training or validation
Returns:
batch_gt: (B,2048,3)
Description:
Batch Loader
'''
batch_gt = []
for ind in range(batch_num*batch_size, batch_num*batch_size+batch_size):
model_path = models[ind]
pcl_path = join(FLAGS.data_dir_pcl, model_path, 'pointcloud_2048.npy') # Path to 2K ground truth point cloud
pcl_gt = np.load(pcl_path)
batch_gt.append(pcl_gt)
batch_gt = np.array(batch_gt)
return batch_gt
def get_epoch_loss(val_models):
'''
Input:
val_models: list of absolute path to models in validation set
Returns:
val_chamfer: chamfer distance calculated on scaled prediction and gt
val_forward: forward distance calculated on scaled prediction and gt
val_backward: backward distance calculated on scaled prediction and gt
Description:
Calculate val epoch metrics
'''
tflearn.is_training(False, session=sess)
batches = len(val_models)/BATCH_SIZE
val_stats = {}
val_stats = reset_stats(ph_summary, val_stats)
for b in xrange(batches):
batch_gt = fetch_batch(val_models, b, BATCH_SIZE)
runlist = [loss, chamfer_distance_scaled, dists_forward_scaled, dists_backward_scaled]
L,C,F,B = sess.run(runlist, feed_dict={pcl_gt:batch_gt})
_summary_losses = [L, C, F, B]
val_stats = update_stats(ph_summary, _summary_losses, val_stats, batches)
summ = sess.run(merged_summ, feed_dict=val_stats)
return val_stats[ph_dists_chamfer], val_stats[ph_dists_forward], val_stats[ph_dists_backward], summ
if __name__ == '__main__':
# Create a folder for experiments and copy the training file
create_folder(FLAGS.exp)
train_filename = basename(__file__)
os.system('cp %s %s'%(train_filename, FLAGS.exp))
with open(join(FLAGS.exp, 'settings.txt'), 'w') as f:
f.write(str(FLAGS)+'\n')
# Create Placeholders
pcl_gt = tf.placeholder(tf.float32, shape=(BATCH_SIZE, NUM_POINTS, 3))
# Generate Prediction
bneck_size = FLAGS.bottleneck
with tf.variable_scope('pointnet_ae') as scope:
z = encoder_with_convs_and_symmetry(in_signal=pcl_gt, n_filters=[64,128,128,256,bneck_size],
filter_sizes=[1],
strides=[1],
b_norm=True,
verbose=True,
scope=scope
)
out = decoder_with_fc_only(z, layer_sizes=[256,256,np.prod([NUM_POINTS, 3])],
b_norm=FLAGS.bn_decoder,
b_norm_finish=False,
verbose=True,
scope=scope
)
out = tf.reshape(out, (BATCH_SIZE, NUM_POINTS, 3))
# Scale output and gt for val losses
pcl_gt_scaled, out_scaled = scale(pcl_gt, out)
# Calculate Chamfer Metrics
dists_forward, dists_backward, chamfer_distance = [tf.reduce_mean(metric) for metric in get_chamfer_metrics(pcl_gt, out)]
# Calculate Chamfer Metrics on scaled prediction and GT
dists_forward_scaled, dists_backward_scaled, chamfer_distance_scaled = [tf.reduce_mean(metric) for metric in get_chamfer_metrics(pcl_gt_scaled, out_scaled)]
# Define Loss to optimize on
loss = (dists_forward + dists_backward/2.0)*10000
# Get Training Models
train_models, val_models, _, _ = get_shapenet_models(FLAGS)
batches = len(train_models) / BATCH_SIZE
# Training Setings
lr = FLAGS.lr
optim = tf.train.AdamOptimizer(FLAGS.lr, beta1=0.9).minimize(loss)
start_epoch = 0
max_epoch = FLAGS.max_epoch
# Define Log Directories
snapshot_folder = join(FLAGS.exp, 'snapshots')
best_folder = join(FLAGS.exp, 'best')
logs_folder = join(FLAGS.exp, 'logs')
# Define Savers
saver = tf.train.Saver(max_to_keep=2)
# Define Summary Placeholders
ph_loss = tf.placeholder(tf.float32, name='loss')
ph_dists_chamfer = tf.placeholder(tf.float32, name='dists_chamfer')
ph_dists_forward = tf.placeholder(tf.float32, name='dists_forward')
ph_dists_backward = tf.placeholder(tf.float32, name='dists_backward')
ph_summary = [ph_loss, ph_dists_chamfer, ph_dists_forward, ph_dists_backward]
merged_summ = get_summary(ph_summary)
# Create log directories
create_folders([snapshot_folder, logs_folder, join(snapshot_folder, 'best'), best_folder])
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
train_writer = tf.summary.FileWriter(logs_folder+'/train', sess.graph_def)
val_writer = tf.summary.FileWriter(logs_folder+'/val', sess.graph_def)
sess.run(tf.global_variables_initializer())
# Load Previous checkpoint
start_epoch = load_previous_checkpoint(snapshot_folder, saver, sess)
ind = 0
best_val_loss = 10000000
since = time.time()
print '*'*30,'\n','Training Started !!!\n', '*'*30
PRINT_N = FLAGS.print_n
for i in xrange(start_epoch, max_epoch):
random.shuffle(train_models)
stats = {}
stats = reset_stats(ph_summary, stats)
iter_start = time.time()
tflearn.is_training(True, session=sess)
for b in xrange(batches):
global_step = i*batches + b + 1
batch_gt = fetch_batch(train_models, b, BATCH_SIZE)
runlist = [loss, chamfer_distance, dists_forward, dists_backward, optim]
L, C, F, B, _ = sess.run(runlist, feed_dict={pcl_gt:batch_gt})
_summary_losses = [L, C, F, B]
stats = update_stats(ph_summary, _summary_losses, stats, PRINT_N)
if global_step % PRINT_N == 0:
summ = sess.run(merged_summ, feed_dict=stats)
train_writer.add_summary(summ, global_step)
till_now = time.time() - iter_start
print 'Loss = {} Iter = {} Minibatch = {} Time:{:.0f}m {:.0f}s'.format(
stats[ph_loss], global_step, b, till_now//60, till_now%60
)
stats = reset_stats(ph_summary, stats)
iter_start = time.time()
print 'Saving Model ....................'
saver.save(sess, join(snapshot_folder, 'model'), global_step=i)
print '..................... Model Saved'
val_epoch_chamfer, val_epoch_forward, val_epoch_backward, val_summ = get_epoch_loss(val_models)
val_writer.add_summary(val_summ, global_step)
time_elapsed = time.time() - since
print '-'*65 + ' EPOCH ' + str(i) + ' ' + '-'*65
print 'Val Chamfer: {:.8f} Forward: {:.8f} Backward: {:.8f} Time:{:.0f}m {:.0f}s'.format(
val_epoch_chamfer, val_epoch_forward, val_epoch_backward, time_elapsed//60, time_elapsed%60
)
print '-'*140
print
if (val_epoch_chamfer < best_val_loss):
print 'Saving Best at Epoch %d ...............'%(i)
saver.save(sess, join(snapshot_folder, 'best', 'best'))
os.system('cp %s %s'%(join(snapshot_folder, 'best/*'), best_folder))
best_val_loss = val_epoch_chamfer
print '.............................Saved Best'
|
<gh_stars>1-10
import React, { useEffect } from "react";
import Popover from "@material-ui/core/Popover";
import { useSelector, useDispatch } from "react-redux";
import Slider from "@material-ui/core/Slider";
import Switch from "@material-ui/core/Switch";
import { CompactPicker } from "react-color";
import FormControlLabel from "@material-ui/core/FormControlLabel";
import Button from "@material-ui/core/Button";
import TextField from "@material-ui/core/TextField";
import socketIOClient from "socket.io-client";
import { SOCKET_SERVER_ENDPOINT } from "../../constants/base";
import {
setBrushColor,
setTotalPage,
setCurrentPage,
} from "../../store/actions/whiteboardActions";
import SVGIcon from "../UI/SVGIcon";
export default function LeftToolBar(props) {
const [anchorEl, setAnchorEl] = React.useState(null);
const [imgURL, setImgURL] = React.useState(
"https://files.gamebanana.com/img/ico/sprays/4ea2f4dad8d6f.png"
);
const [prevColor, setPrevColor] = React.useState("#000");
const {
canvasCLEAR,
canvasUNDO,
canvasREDO,
undoDisabled,
redoDisabled,
addText,
brushRadius,
setBrushRadius,
Tools,
selectTool,
removeSelected,
copyPasteDisabled,
copy,
paste,
fillWithColor,
toggleFillWithColor,
lineColor,
setLineColor,
fillColor,
setFillColor,
addImage,
controlDisabled,
} = props;
const {
brushColor,
canvasHeight,
canvasWidth,
currentPage,
totalPage,
} = useSelector((state) => state.whiteBoard);
const dispatch = useDispatch();
const socket = socketIOClient(SOCKET_SERVER_ENDPOINT);
useEffect(() => {
socket.on("changePage", (data) => {
console.log("Change page to ", data);
dispatch(setCurrentPage(data));
});
return () => {
socket.disconnect();
};
}, []);
const socketEmitData = (data) => {
console.log("Emmiting page...", data);
socket.emit("changePage", data);
};
const handlePopoverBtnClick = (event) => {
setAnchorEl(event.currentTarget);
};
const handlePopoverBtnClose = () => {
setAnchorEl(null);
};
const isOpen = (id) => {
return anchorEl ? anchorEl.id === id : false;
};
const changeColor = (color) => {
setLineColor(color);
handlePopoverBtnClose();
};
const handleBrusSizeChange = (event, newValue) => {
setBrushRadius(newValue);
};
const handleNextPage = (e) => {
if (currentPage < totalPage) {
const page = currentPage + 1;
dispatch(setCurrentPage(page));
socketEmitData(page);
}
};
const handlePrevPage = (e) => {
if (currentPage > 1) {
const page = currentPage - 1;
dispatch(setCurrentPage(page));
socketEmitData(page);
}
};
return (
<div className="toolbar_left">
{!controlDisabled ? (
<div className="board-tools tool_container">
{/* <span className="top-right-icon">
<img alt="" src={"/icons/SS15.svg"} />
</span> */}
<button
className="board-tool"
onClick={(e) => selectTool(Tools.Select)}
>
<span className="custom-icon">
<img alt="" src={"/icons/ARROW.svg"} />
</span>
</button>
<button
className="board-tool"
title="UNDO"
onClick={(e) => canvasUNDO()}
disabled={undoDisabled}
>
<span className="custom-icon">
<img alt="" src={"/icons/UNDO.svg"} />
</span>
</button>
<button
className="board-tool"
title="REDO"
onClick={(e) => canvasREDO()}
disabled={redoDisabled}
>
<span className="custom-icon">
<img alt="" src={"/icons/REDO.svg"} />
</span>
</button>
<button
className="board-tool"
id={"color-popover-btn"}
aria-describedby={"color-popover"}
variant="contained"
onClick={handlePopoverBtnClick}
>
<span className="custom-icon">
<img alt="" src={"/icons/COLOR.svg"} />
</span>
</button>
<Popover
id={"color-popover"}
className="tool-popover"
open={isOpen("color-popover-btn")}
anchorEl={anchorEl}
onClose={handlePopoverBtnClose}
anchorOrigin={{
vertical: "center",
horizontal: "right",
}}
transformOrigin={{
vertical: "center",
horizontal: "left",
}}
>
<div className="tool-popover-container">
{/* <div className="color-container">
<span id="black" onClick={(e) => changeColor("#000")}></span>
<span id="red" onClick={(e) => changeColor("#fc0303")}></span>
<span id="blue" onClick={(e) => changeColor("#0059ff")}></span>
<span id="green" onClick={(e) => changeColor("#06bf00")}></span>
<span id="yellow" onClick={(e) => changeColor("#fcba03")}></span>
</div>
<input
type="color"
value={brushColor}
onChange={(e) => dispatch(setBrushColor(e.target.value))}
/>
<span className="color-picker-icon">
<img alt="" src={"/icons/SS27.svg"} />
</span>
<br/> */}
<label htmlFor="lineColor">Line</label>
<br />
<CompactPicker
id="lineColor"
color={lineColor}
onChange={(color) => changeColor(color.hex)}
/>
<br />
<br />
<FormControlLabel
control={
<Switch
name="fillColor"
size="small"
checked={fillWithColor}
onChange={(e) => toggleFillWithColor(e.target.checked)}
/>
}
label="Fill"
/>
<br />
<CompactPicker
color={fillColor}
onChange={(color) => setFillColor(color.hex)}
/>
</div>
</Popover>
<button
className="board-tool"
id={"pen-popover-btn"}
title={"Select Pen"}
aria-describedby={"pen-popover"}
variant="contained"
onClick={handlePopoverBtnClick}
>
<span className="custom-icon">
<img alt="" src={"/icons/PEN.svg"} />
</span>
</button>
<Popover
id={"pen-popover"}
className="tool-popover"
open={isOpen("pen-popover-btn")}
anchorEl={anchorEl}
onClose={handlePopoverBtnClose}
anchorOrigin={{
vertical: "center",
horizontal: "right",
}}
transformOrigin={{
vertical: "center",
horizontal: "left",
}}
>
<div className="tool-popover-container">
<div className="tool-item-container pen-container">
<button
title="Marker"
onClick={(e) => {
selectTool(Tools.Pencil);
setBrushRadius(4);
changeColor("#444");
}}
>
<SVGIcon filepath="/icons/PEN.svg" />
</button>
<button
title="Pen"
onClick={(e) => {
selectTool(Tools.Pencil);
setBrushRadius(1);
changeColor("#000");
}}
>
<SVGIcon filepath="/icons/SS16.svg" />
</button>
<button
title="Highlighter"
onClick={(e) => {
selectTool(Tools.Pencil);
setBrushRadius(12);
changeColor("#ccff0058");
}}
>
<SVGIcon filepath="/icons/SS17.svg" />
</button>
{/* <button>
<SVGIcon filepath="/icons/SS19.svg" />
</button> */}
</div>
{/* <div className="fixed-line-toggle-container">
<p>Fixed line width </p>
<Switch
// checked={state.checkedB}
// onChange={handleChange}
size="small"
color="primary"
name="fixedLine"
inputProps={{ "aria-label": "primary checkbox" }}
/>
</div> */}
<div className="brush-size-slider-container">
<p>Pen Size</p>
<Slider
value={brushRadius}
min={1}
max={25}
valueLabelDisplay="auto"
onChange={handleBrusSizeChange}
aria-labelledby="continuous-slider"
/>
</div>
</div>
</Popover>
<button
className="board-tool"
title="Clear All"
onClick={(e) => canvasCLEAR()}
>
<span className="custom-icon">
<img alt="" src={"/icons/SS24.svg"} />
</span>
</button>
<button
className="board-tool"
title="Remove Selected"
onClick={(e) => removeSelected()}
>
<span className="custom-icon">
<img alt="" src={"/icons/SS23.svg"} />
</span>
</button>
{/* <button
className="board-tool"
id={"eraser-popover-btn"}
title="ERASER"
aria-describedby={"eraser-popover"}
variant="contained"
onClick={handlePopoverBtnClick}
>
<span className="custom-icon">
<img alt="" src={"/icons/ERASER.svg"} />
</span>
</button>
<Popover
id={"eraser-popover"}
className="tool-popover"
open={isOpen("eraser-popover-btn")}
anchorEl={anchorEl}
onClose={handlePopoverBtnClose}
anchorOrigin={{
vertical: "center",
horizontal: "right",
}}
transformOrigin={{
vertical: "center",
horizontal: "left",
}}
>
<div className="tool-popover-container">
<div className="tool-item-container eraser-container">
<button
title="White Eraser"
onClick={(e) => {
setBrushRadius(8);
changeColor("#fff");
}}
>
<SVGIcon filepath="/icons/SS21.svg" />
</button>
<button
title="Dark Eraser"
onClick={(e) => {
setBrushRadius(8);
changeColor("#000");
}}
>
<SVGIcon filepath="/icons/SS22.svg" />
</button>
<button>
<SVGIcon filepath="/icons/SS23.svg" />
</button>
<button onClick={(e) => canvasCLEAR()}>
<SVGIcon filepath="/icons/SS24.svg" />
</button>
</div>
<div className="fixed-line-toggle-container">
<p>Fixed line width </p>
<Switch
name="fixedLine"
size="small"
// checked={fillWithColor}
// onChange={(e) => toggleFillWithColor(e.target.checked)}
/>
</div>
<div className="brush-size-slider-container">
<p>Eraser Size</p>
<Slider
value={brushRadius}
min={1}
max={25}
valueLabelDisplay="auto"
onChange={handleBrusSizeChange}
aria-labelledby="continuous-slider"
/>
</div>
</div>
</Popover> */}
<button
className="board-tool"
id={"shape-popover-btn"}
title="SHAPE"
aria-describedby={"shape-popover"}
variant="contained"
onClick={handlePopoverBtnClick}
>
<span className="custom-icon">
<img alt="" src={"/icons/SHAPE.svg"} />
</span>
</button>
<Popover
id={"shape-popover"}
className="tool-popover"
open={isOpen("shape-popover-btn")}
anchorEl={anchorEl}
onClose={handlePopoverBtnClose}
anchorOrigin={{
vertical: "center",
horizontal: "right",
}}
transformOrigin={{
vertical: "center",
horizontal: "left",
}}
>
<div className="tool-popover-container">
<div className="tool-item-container eraser-container">
<button
title="Rectangle"
onClick={(e) => selectTool(Tools.Rectangle)}
>
<SVGIcon filepath="/icons/SHAPE.svg" />
</button>
<button
title="Circle"
onClick={(e) => selectTool(Tools.Circle)}
>
<SVGIcon filepath="/icons/circle.svg" id="circle-svg-img" />
</button>
<button onClick={(e) => selectTool(Tools.Line)}>
<SVGIcon filepath="/icons/SS20.svg" />
</button>
{/*
<button>
<SVGIcon filepath="/icons/SS18.svg" />
</button> */}
</div>
<div className="fixed-line-toggle-container">
<p>Filled shape </p>
<Switch
name="filledShape"
size="small"
checked={fillWithColor}
onChange={(e) => toggleFillWithColor(e.target.checked)}
/>
</div>
<div className="brush-size-slider-container">
<p>Stroke Size</p>
<Slider
value={brushRadius}
min={1}
max={25}
valueLabelDisplay="auto"
onChange={handleBrusSizeChange}
aria-labelledby="continuous-slider"
/>
</div>
{/* <div>
<TextField
label="Image URL"
helperText="Copy/Paste an image URL"
onChange={(e) => setImgURL(e.target.value)}
value={imgURL}
/>
<br />
<Button
variant="outlined"
size="small"
onClick={(e) => addImage(imgURL)}
>
Load Image from URL
</Button>
</div> */}
</div>
</Popover>
<button
className="board-tool"
onClick={(e) => {
selectTool(Tools.Select);
addText();
}}
>
<span className="custom-icon">
<img alt="" src={"/icons/FONT.svg"} />
</span>
</button>
</div>
) : null}
{/*
<div className="tool_container scale-tool tool">
<button className="board-tool">
<span className="custom-icon">
<img alt="" src={"/icons/SS.svg"} />
</span>
</button>
<span className="scale-value"> 12 </span>
</div> */}
<div className="pages tool">
<button onClick={handlePrevPage} disabled={controlDisabled}>
<i className="fa fa-chevron-left" />
</button>
<span>
{" "}
{currentPage}/{totalPage}{" "}
</span>
<button onClick={handleNextPage} disabled={controlDisabled}>
<i className="fa fa-chevron-right" />
</button>
</div>
</div>
);
}
|
<reponame>eengineergz/Lambda<gh_stars>0
const express = require('express');
const router = express.Router();
const actionDb = require('../data/helpers/actionModel');
const projectDb = require('../data/helpers/projectModel');
//GET
router.get('/', (req, res) => {
actionDb.get()
.then(actions => {
res.json(actions)
})
.catch(err => {
res
.status(500)
.json({
message: "Could not fetch the actions."
})
})
});
router.get('/:id', (req, res) => {
const {id} = req.params;
actionDb.get(id)
.then(action => {
res.json(action)
})
.catch(err => {
res
.status(404)
.json({
message: "That action ID does not exist"
})
})
});
//POST
router.post('/', (req, res) => {
const action = req.body;
if(action.project_id && action.description && action.notes){
projectDb.get(action.project_id)
.then(response => {
actionDb.insert(action)
.then(newAction => {
res.json(newAction)
})
.catch(err => {
res
.status(500)
.json({
message: "There was an error adding this new action."
})
})
})
.catch(err=>{
res
.status(404)
.json({
message: "That project ID is invalid. Please use a valid one."
})
})
} else if (action.project_id && action.description) {
res
.status(400)
.json({
message: "New actions require notes."
})
} else if (action.project_id && action.notes) {
res
.status(400)
.json({
message: "New actions require a description."
})
} else if (action.description && action.notes) {
res
.status(400)
.json({
message: "New actions require a valid project ID."
})
} else {
res
.status(400)
.json({
message: "New actions require a project ID, description and notes."
})
}
});
//DELETE
router.delete('/:id', (req, res) => {
const {id} = req.params;
actionDb.get(id)
.then(action => {
const theAction = action;
actionDb.remove(id)
.then(count => {
if(count){
res.json(theAction);
}
})
})
.catch(err => {
res
.status(404)
.json({
message: "That action ID is invalid."
})
})
.catch(err => {
res
.status(500)
.json({
message: "This action could not be deleted."
})
})
});
//PUT
router.put('/:id', (req, res) => {
const {id} = req.params;
const action = req.body;
if (action.project_id && action.description && action.notes) {
projectDb.get(action.project_id)
.then(response => {
console.log("the response is:", response);
actionDb.update(id, action)
.then(count => {
if (count === null) {
res
.status(404)
.json({
message: "That action ID is invalid."
})
} else {
actionDb.get(id)
.then(action => {
res.json(action)
})
}
})
.catch(err => {
res
.status(500)
.json({
message: "Unable to update this action."
})
})
})
.catch(err => {
res
.status(404)
.json({
message: "Invalid project ID."
})
})
} else if (action.project_id && action.description){
res
.status(400)
.json({
message: "Actions need notes."
})
} else if (action.project_id && action.notes) {
res
.status(400)
.json({
message: "Actions need a description."
})
} else if (action.notes && action.description) {
res
.status(400)
.json({
message: "Actions need a valid project ID."
})
} else {
res
.status(400)
.json({
message: "Actions need a valid project ID, name and a description."
})
}
});
module.exports = router; |
<!doctype html>
<html>
<head>
<script Language="javascript">
function show()
{
var i, no, fact;
fact=1;
no=Number(document.getElementById("num").value);
for(i=1; i<=no; i++)
{
fact= fact*i;
}
document.getElementById("answer").value= fact;
}
</script>
</head>
<body>
Enter Num: <input id="num">
<button onclick="show()">Factorial</button>
<input id="answer">
</body>
</html>
|
package com.example.httpheaderauth.controller;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
@RestController
@RequestMapping("/api/login")
public class LoginController {
@GetMapping
public ResponseEntity<String> getLogin() {
return new ResponseEntity<>("LOGIN!!!!!", HttpStatus.OK);
}
}
|
public class FibonacciSequence {
public static void printFibonacciSequence(int n)
{
int a = 0, b = 1, c;
System.out.print("Fibonacci Series of "+ n +" numbers:");
for (int i = 1; i <= n; ++i)
{
System.out.print(a + " ");
c = a + b;
a = b;
b = c;
}
}
public static void main(String args[])
{
int n = 10;
printFibonacciSequence(n);
}
} |
<gh_stars>1-10
package pl.tajchert.wear.businesscard.ui;
import android.os.Bundle;
import android.support.v4.app.Fragment;
import android.support.v4.app.FragmentActivity;
import android.support.v4.app.FragmentTransaction;
import android.view.Menu;
import android.view.MenuItem;
import pl.tajchert.businesscardwear.R;
public class MainActivity extends FragmentActivity {
private static final String TAG = "MainActivity";
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
FragmentTransaction ft = getSupportFragmentManager().beginTransaction();
Fragment fb = new MainFragment().newInstance();
ft.replace(android.R.id.content, fb);
ft.commit();
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
// Inflate the menu; this adds items to the action bar if it is present.
getMenuInflater().inflate(R.menu.main, menu);
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
// Handle action bar item clicks here. The action bar will
// automatically handle clicks on the Home/Up button, so long
// as you specify a parent activity in AndroidManifest.xml.
int id = item.getItemId();
if (id == R.id.action_save) {
onBackPressed();
return true;
}
return super.onOptionsItemSelected(item);
}
}
|
#!/usr/bin/env bash
set -e
if [[ "$OSTYPE" == "darwin"* ]]; then
realpath() { [[ $1 = /* ]] && echo "$1" || echo "$PWD/${1#./}"; }
ROOT=$(dirname "$(dirname "$(realpath "$0")")")
else
ROOT=$(dirname "$(dirname "$(readlink -f $0)")")
# If the script is running in Docker using the WSL2 engine, powershell.exe won't exist
if grep -qi Microsoft /proc/version && type powershell.exe > /dev/null 2>&1; then
IN_WSL=true
fi
fi
function code() {
cd "$ROOT"
if [[ "$OSTYPE" == "darwin"* ]]; then
NAME=`node -p "require('./product.json').nameLong"`
CODE="./.build/electron/$NAME.app/Contents/MacOS/Electron"
else
NAME=`node -p "require('./product.json').applicationName"`
CODE=".build/electron/$NAME"
fi
# Get electron, compile, built-in extensions
if [[ -z "${VSCODE_SKIP_PRELAUNCH}" ]]; then
node build/lib/preLaunch.js
fi
# Manage built-in extensions
if [[ "$1" == "--builtin" ]]; then
exec "$CODE" build/builtin
return
fi
# Configuration
export NODE_ENV=development
export VSCODE_DEV=1
export VSCODE_CLI=1
export ELECTRON_ENABLE_STACK_DUMPING=1
export ELECTRON_ENABLE_LOGGING=1
# Launch Code
exec "$CODE" . "$@"
}
function code-wsl()
{
HOST_IP=$(echo "" | powershell.exe -noprofile -Command "& {(Get-NetIPAddress | Where-Object {\$_.InterfaceAlias -like '*WSL*' -and \$_.AddressFamily -eq 'IPv4'}).IPAddress | Write-Host -NoNewline}")
export DISPLAY="$HOST_IP:0"
# in a wsl shell
ELECTRON="$ROOT/.build/electron/Code - OSS.exe"
if [ -f "$ELECTRON" ]; then
local CWD=$(pwd)
cd $ROOT
export WSLENV=ELECTRON_RUN_AS_NODE/w:VSCODE_DEV/w:$WSLENV
local WSL_EXT_ID="ms-vscode-remote.remote-wsl"
local WSL_EXT_WLOC=$(echo "" | VSCODE_DEV=1 ELECTRON_RUN_AS_NODE=1 "$ROOT/.build/electron/Code - OSS.exe" "out/cli.js" --ms-enable-electron-run-as-node --locate-extension $WSL_EXT_ID)
cd $CWD
if [ -n "$WSL_EXT_WLOC" ]; then
# replace \r\n with \n in WSL_EXT_WLOC
local WSL_CODE=$(wslpath -u "${WSL_EXT_WLOC%%[[:cntrl:]]}")/scripts/wslCode-dev.sh
$WSL_CODE "$ROOT" "$@"
exit $?
else
echo "Remote WSL not installed, trying to run VSCode in WSL."
fi
fi
}
if [ "$IN_WSL" == "true" ] && [ -z "$DISPLAY" ]; then
code-wsl "$@"
elif [ -f /mnt/wslg/versions.txt ]; then
code --disable-gpu "$@"
else
code "$@"
fi
exit $?
|
#!/bin/bash
CLASS="info.devatty.spark.knngraphs.eval.remove.MultiSpam"
### Local tests
#JAR="target/spark-knn-graphs-eval-0.1-SNAPSHOT.jar"
#SPARK="/opt/spark-1.6.2-bin-hadoop2.6/bin/spark-submit"
#OPTS=""
#DATASET="-"
### Eurecom cluster
JAR="spark-knn-graphs-eval-0.1-SNAPSHOT.jar"
SPARK="/home/ubuntu/usr/spark-1.5.1-dist/bin/spark-submit"
OPTS="--driver-memory 4g --num-executors 16 --executor-cores 4 --executor-memory 4g --master yarn --deploy-mode client --conf spark.yarn.executor.memoryOverhead=1024 --conf spark.rpc.askTimeout=300s"
DATASET="../datasets/spam-subject-200K.txt"
for i in {1..5}
do
echo "Test $i"
$SPARK $OPTS --class $CLASS $JAR $DATASET
rm /tmp/spark-events/*
# Send e-mail when done
../sendmail.py "MultiTest with SPAM dataset has completed iteration $i"
done
|
<filename>omicron_nl.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 12 18:20:28 2021
Twitter: @hk_nien
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tools
PLT_COLORS = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b',
'#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
def add_or_stats(df):
"""Add columns or, or_std, or_fit, ga1 to df with ntest, npos, return update.
Set datetime index from sdate
or (odds ratio),
or_std (odds ratio standard error),
or_fit (exponential fit on or).
- ga1: growth advantage per day (logistic k parameter).
"""
df = df.copy()
if 'sdate' in df.columns:
df.set_index('sdate', inplace=True)
s_nneg = df['ntest'] - df['npos']
df['or'] = df['npos'] / s_nneg
df['or_std'] = df['or'] * np.sqrt(1/df['npos'] + 1/s_nneg)
# Remove days with zero. This is not entirely correct, but
# too much work to do a real binomial fit.
df = df.loc[df['or_std'].notna()].copy()
# linear regression of log
tm_days = (df.index - df.index[0]) / pd.Timedelta('1 d')
ga1, a0 = np.polyfit(tm_days, np.log(df['or']), w=df['or']/df['or_std'], deg=1)
df['or_fit'] = np.exp(a0 + ga1*tm_days)
df['ga1'] = ga1
return df
def get_data_dk(plot=True, fig_ax=None):
"""Get DK data, optionally plot.
Return:
- df: DataFrame with index sdate (sample date), columns including
ntest, npos.
Source: https://www.ssi.dk/
# https://www.ssi.dk/-/media/cdn/files/covid19/omikron/statusrapport/rapport-omikronvarianten-18122021-wj25.pdf?la=da
https://www.ssi.dk/-/media/cdn/files/covid19/omikron/statusrapport/rapport-omikronvarianten-21122021-14tk.pdf?la=da
"""
# Sample_date, samples_total, samples_omicron, %omicron
data_dk = """\
22-11-2021 4,514 1 0.0% 0%-0.1%
23-11-2021 4,717 1 0.0% 0%-0.1%
24-11-2021 4,034 1 0.0% 0%-0.1%
25-11-2021 4,105 2 0.0% 0%-0.2%
26-11-2021 4,161 2 0.0% 0%-0.2%
27-11-2021 3,853 2 0.1% 0%-0.2%
28-11-2021 3,894 12 0.3% 0.2%-0.5%
29-11-2021 5,096 11 0.2% 0.1%-0.4%
30-11-2021 5,424 24 0.4% 0.3%-0.7%
01-12-2021 4,552 74 1.6% 1.3%-2%
02-12-2021 4,596 60 1.3% 1%-1.7%
03-12-2021 5,174 72 1.4% 1.1%-1.8%
04-12-2021 5,098 101 2.0% 1.6%-2.4%
05-12-2021 4,808 171 3.6% 3.1%-4.1%
06-12-2021 7,115 356 5.0% 4.5%-5.5%
07-12-2021 7,339 569 7.8% 7.2%-8.4%
08-12-2021 6,692 704 10.5% 9.8%-11.3%
09-12-2021 6,637 752 11.3% 10.6%-12.1%
10-12-2021 6,961 887 12.7% 12%-13.6%
11-12-2021 6,701 1,096 16.4% 15.5%-17.3%
12-12-2021 7,139 1,560 21.9% 20.9%-22.8%
13-12-2021 10,580 3,046 28.8% 27.9%-29.7%
14-12-2021 11,471 4,454 38.8% 37.9%-39.7%
15-12-2021 11,273 5,106 45.3% 44.4%-46.2%
"""
data_dk = data_dk.replace(',', '').replace('%', '')
records = [x.split()[:4] for x in data_dk.splitlines() if '-202' in x]
df = pd.DataFrame.from_records(records, columns=['sdate', 'ntest', 'npos', 'pos%'])
df['sdate'] = pd.to_datetime(df['sdate'], format='%d-%m-%Y')
df.set_index('sdate', inplace=True)
for c in df.columns:
df[c] = df[c].astype(float)
df = add_or_stats(df)
return df
def get_data_nl():
"""Return DataFrame.
Source: https://twitter.com/ARGOSamsterdam/status/1473390513646669831
"""
# old: Source: https://twitter.com/JosetteSchoenma/status/1471536542757883918
txt_old = """\
2021-12-06 2.5
2021-12-13 14
2021-12-14 25
"""
txt = """\
2021-12-12 4
2021-12-13 11
2021-12-14 14
2021-12-15 25
2021-12-16 18.5
2021-12-17 21
2021-12-18 35.5
2021-12-19 35
2021-12-20 49
"""
records = [li.split() for li in txt.splitlines() if '202' in li]
df = pd.DataFrame.from_records(records, columns=['sdate', 'pos%'])
df['sdate'] = pd.to_datetime(df['sdate'])
df.set_index('sdate')
df['pos%'] = df['pos%'].astype(float)
ntest = 100
df['ntest'] = ntest
df['npos'] = ntest/100 * df['pos%']
df = add_or_stats(df)
return df
def plot(dfdict, fig_ax=None):
"""Plot from dict region_name -> dataframe with (or, or_std, or_fit)."""
if fig_ax is None:
fig, ax = plt.subplots(tight_layout=True, figsize=(6, 4))
else:
fig, ax = fig_ax
ax.set_yscale('log')
ax.set_ylabel(r'Odds ratio omicron/other')
ax.set_xlabel('Sampling date')
pcycle = plt.rcParams['axes.prop_cycle']()
for region_name, df in dfdict.items():
ga1 = df['ga1'].iloc[-1]
props = next(pcycle)
label = f'{region_name} (k={ga1:.2f} per day)'
yerr = 2*df['or_std']
# big y error bars look ugly on log scale...
yerr_big_mask = yerr >= df['or']
yerr[yerr_big_mask] = df['or'][yerr_big_mask]*0.75
ax.errorbar(df.index, df['or'], yerr=yerr, ls='none', marker='x',
label=label, **props)
ax.plot(df.index, df['or_fit'],
**props)
ax.axhline(1, color='k', linestyle='--')
ax.text(df.index[-1], 1.3, '50% omicron', va='bottom', ha='right')
# ax.set_ylim(None, np.exp(a0 + ga1*(tm_days[-1] + 4)))
ax.legend(loc='upper left')
ax.set_title('Omicron/Delta ratios')
tools.set_xaxis_dateformat(ax)
fig.show()
return df, ga1
def estimate_cases_nd_no(n_cases, gf7, f_o, ga1):
"""Estimate parameters from today's status.
Parameters:
- n_cases: number of positive cases on reference date.
- gf7: growth factor compared to 7 days ago.
- f_o: estimated fraction of omicron in cases today, 0 < f_o < 1.
- ga1: growth advantage per day (ln)
Return:
- nc_d: number of cases delta
- nc_o: number of cases omicron
- k_d: log growth rate of delta
- k_o: log growth rate of omicron
"""
nc_o, nc_d = f_o * n_cases, (1 - f_o) * n_cases
k_d = (1/7) * np.log(gf7 * (nc_d + nc_o*np.exp(-7*ga1)) / n_cases)
k_o = k_d + ga1
return nc_d, nc_o, k_d, k_o
if __name__ == '__main__':
plt.close('all')
dfdict = {
'Denmark': get_data_dk(),
'Netherlands': get_data_nl(),
}
plot(dfdict)
ax = plt.gcf().get_axes()[0]
ax.set_ylim(None, 10)
ax.set_xlim(None, pd.to_datetime('2021-12-25'))
ax.set_title('Omicron/Delta ratios - data source SSI, Argos - plot @hk_nien')
#%%
fig, axs = plt.subplots(2, 1, figsize=(7, 6), tight_layout=True, sharex=True)
# DK data plot
ax = axs[0]
# Dutch growth advantage
ga1_nl = 0.3
plot(dfdict, fig_ax=(fig, axs[0]))
ax.set_ylim(1e-4, 110)
ax.set_xlabel(None)
ax.set_title('Verhouding Omicron/Delta Denemarken en Nederland')
# Here are the model parameters
date_ref = pd.to_datetime('2021-12-18')
ncases_ref = 15000
daynos = np.arange(-21, 7)
nc0_d, nc0_o, k_d, k_o = estimate_cases_nd_no(
ncases_ref, gf7=0.9, f_o=0.9, ga1=ga1_nl
)
dates = (date_ref + pd.Series(daynos * pd.Timedelta('1 d'))).values
ncs_d = nc0_d * np.exp(daynos * k_d)
ncs_o = nc0_o * np.exp(daynos * k_o)
Tgen = 4.0
R_delta = np.exp(k_d * Tgen)
R_om = np.exp(k_o * Tgen)
ax.plot(dates, ncs_o/ncs_d, label='Nederland (model)', ls='--',
color=PLT_COLORS[len(dfdict)]
)
ax.legend()
#for ax in axs:
# ax.set_xlim(*dates[[0, -1]])
ax = axs[1]
i0 = 7 # skip old NL data on delta
ax.plot(dates[i0:], (ncs_d+ncs_o)[i0:], label='Totaal')
ax.scatter([date_ref], [ncases_ref])
ax.plot(dates[i0:], ncs_d[i0:], label=f'Delta (R={R_delta:.2f})', ls='-.')
ax.plot(dates, ncs_o, label=f'Omicron (R={R_om:.2f})', ls=':')
ax.set_ylim(100, 1e5)
ax.legend()
ax.set_yscale('log')
ax.set_title('Cases per dag Nederland (model)')
ax.set_xlabel('Datum monstername')
ax.text(pd.to_datetime('2021-11-25'), 200, 'RIVM:\ngeen Omicron', ha='center', va='bottom')
tools.set_xaxis_dateformat(ax)
|
def collector(ds, **context):
message = "hello"
return message
def printer(ds, **context):
data = context['task_instance'].xcom_pull(task_ids='collector')
print(data)
|
function number_format(number, decimals, dec_point, thousands_sep) {
// * example 1: number_format(1234.5678, 2, '.', '');
// * returns 1: 1234.57
number = number.toString().replace(/[(,)|(.)]/g, "");
var n = number, c = isNaN(decimals = Math.abs(decimals)) ? 2 : decimals;
var d = dec_point == undefined ? "," : dec_point;
var t = thousands_sep == undefined ? "." : thousands_sep, s = n < 0 ? "-" : "";
var i = parseInt(n = Math.abs(+n || 0).toFixed(c)) + "", j = (j = i.length) > 3 ? j % 3 : 0;
return s + (j ? i.substr(0, j) + t : "") + i.substr(j).replace(/(\d{3})(?=\d)/g, "$1" + t) + (c ? d + Math.abs(n - i).toFixed(c).slice(2) : "");
}
// Hàm tính số ngày trong 1 tháng
function day_count(t, y) {
let d;
switch (t) {
case 1:
case 3:
case 5:
case 7:
case 8:
case 10:
case 12:
d = 31;
break;
case 4:
case 6:
case 9:
case 11:
d = 30;
break;
case 2:
if(y % 100 != 0 && y % 4 == 0) {
d = 29;
} else {
d = 28;
}
break;
default:
d = 0;
}
return d;
} |
import halem.Mesh_maker as Mesh_maker
import halem.Functions as Functions
import halem.Calc_path as Calc_path
import halem.Flow_class as Flow_class
import pytest
import numpy as np
def test_nodes_on_land_Waddensea():
nodes = np.zeros((10, 2))
WD = np.zeros((10, 20))
u = 1.0 * WD
v = 1.0 * WD
new_nodes, new_u, new_v, new_WD = Flow_class.nodes_on_land_Waddensea(
nodes, u, v, WD
)
assert new_nodes.shape == (212, 2)
assert new_WD.shape == (212, 20)
assert new_u.shape == (212, 20)
assert new_v.shape == (212, 20)
np.testing.assert_array_equal(new_WD, np.zeros(new_WD.shape))
np.testing.assert_array_equal(new_v, np.zeros(new_v.shape))
np.testing.assert_array_equal(new_u, np.zeros(new_u.shape))
|
float areaOfTriangle(float a, float b, float c)
{
float s = (a+b+c)/2;
return sqrt(s*(s-a)*(s-b)*(s-c));
} |
#!/bin/bash
DIR=$(cd "$(dirname $0)" && pwd)
MNTFILE="$DIR/.borg-mntconfig"
export BORG_REPO=$(cat "$DIR/.borg-repo")
export BORG_PASSPHRASE=$(cat "$DIR/.borg-passphrase")
echo $BACKUP_TARGETS
echo "------------------------------------------------------------------------------"
echo "Starting backup on $(date)..."
## Mount drive
if [ -f "$MNTFILE" ]; then
echo
echo "Mount backupdrive"
source "$MNTFILE"
[ ! -d "$MNTPOINT" ] && { mkdir $MNTPOINT; }
if ! mountpoint -q $MNTPOINT; then
echo "Mount $MNTPOINT"
/bin/mount -t nfs $MNTSHARE $MNTPOINT
fi
export BORG_REPO="$MNTPOINT/"$(cat "$DIR/.borg-repo")
fi
## Create list of installed software
echo
echo "Create list of installed software ..."
dpkg --get-selections > "$DIR/software.list"
RESULT=$?; if [ ${RESULT} != 0 ]; then
echo "****************************************************"
echo " Backup error => exit code: ${RESULT}"
echo "****************************************************"
exit 1
fi
## Create database dumps
##echo
##echo "Creating database dumps ..."
## Perform Backup
FOLDER[$i]="$DIR"
[ -d "/home" ] && { i+=1; FOLDER[$i]="/home"; }
[ -d "/root/dockerdata" ] && { i+=1; FOLDER[$i]="/root/dockerdata"; }
[ -d "/etc" ] && { i+=1; FOLDER[$i]="/etc"; }
[ -d "/var/lib" ] && { i+=1; FOLDER[$i]="/var/lib"; }
[ -d "/var/webmin" ] && { i+=1; FOLDER[$i]="/var/webmin"; }
[ -d "/var/www" ] && { i+=1; FOLDER[$i]="/var/www"; }
echo
echo "Folders to backup:"
for i in ${!FOLDER[@]}; do
echo "${FOLDER[$i]}"
done
echo
echo "Create backup ..."
borg create -v --stats --compression lzma,6 ::'{hostname}-{now:%Y-%m-%d_%H-%M-%S}' ${FOLDER[@]}
RESULT=$?; if [ ${RESULT} != 0 ]; then
echo "****************************************************"
echo " Backup error => exit code: ${RESULT}"
echo "****************************************************"
exit 1
fi
## Prune old backups
echo
echo "Prune old backups ..."
borg prune -v --list --keep-daily=7 --keep-weekly=4 --keep-monthly=6
RESULT=$?; if [ ${RESULT} != 0 ]; then
echo "****************************************************"
echo " Backup error => exit code: ${RESULT}"
echo "****************************************************"
exit 1
fi
## Stats
echo
echo "Repository stats..."
borg info
RESULT=$?; if [ ${RESULT} != 0 ]; then
echo "****************************************************"
echo " Backup error => exit code: ${RESULT}"
echo "****************************************************"
exit 1
fi
## Unmount backupdrive
if [ -f "$MNTFILE" ]; then
if mountpoint -q $MNTPOINT; then
echo "Unmount $MNTPOINT"
/bin/umount $MNTPOINT
fi
fi
echo
echo "Finished backup on $(date)."
echo "------------------------------------------------------------------------------"
|
/*
Copyright 2016 WaizungTaam. All rights reserved.
License: Apache License 2.0
Email: <EMAIL>
Creation time: 2016-07-16
Last modified: 2016-07-17
Reference: <NAME>., & <NAME>. (2006).
Reducing the dimensionality of data with neural networks.
Science, 313(5786), 504-507.
*/
// Deep AutoEncoder Network
#ifndef DEEP_AE_H
#define DEEP_AE_H
#include <vector>
#include <string>
#include "./math/vector.h"
#include "./math/matrix.h"
#include "./math/utils.h"
class MLP {
public:
MLP() = default;
MLP(const std::vector<int> &,
const std::string & activ_func = "logistic",
const std::string & output_func = "binary_step");
MLP(const std::initializer_list<int> &,
const std::string & activ_func = "logistic",
const std::string & output_func = "binary_step");
MLP(const MLP &) = default;
MLP(MLP &&) = default;
MLP & operator=(const MLP &) = default;
MLP & operator=(MLP &&) = default;
~MLP() = default;
void train(const Matrix &, const Matrix &,
int, int, double, double);
Matrix output(const Matrix &);
Matrix predict(const Matrix &);
Matrix & share_weight(int); // share weight with rbm
Matrix & share_w_bias(int); // share w_bias with rbm
bool debug;
private:
std::string activ_func_name;
std::string output_func_name;
std::vector<Matrix> weights;
std::vector<Matrix> ws_bias;
std::vector<Matrix> delta_weights;
std::vector<Matrix> delta_ws_bias;
std::vector<Matrix> data_forward;
std::vector<Matrix> local_fields;
std::vector<Matrix> local_gradients;
void forward(const Matrix &);
void backward(const Matrix &);
void update(double, double);
};
class RBM {
public:
RBM() = default;
RBM(Matrix &, const Matrix &, Matrix &,
const std::string & activ_func = "logistic",
const std::string & type_vis = "binary",
const std::string & type_hid = "binary");
RBM(const RBM &) = default;
RBM(RBM &&) = default;
RBM & operator=(const RBM &) = default;
RBM & operator=(RBM &&) =default;
~RBM() = default;
void train(const Matrix &, int, int, double, double);
Matrix output(const Matrix &);
Matrix reconstruct(const Matrix &);
Matrix reconstruct(const Matrix &, const Vector &); // for linear vis
void set_type(const std::string &, const std::string &); // set the layer type: binary, linear
bool debug;
private:
std::string activ_func_name;
std::string type_vis;
std::string type_hid;
Matrix & weight;
Matrix w_bias_vis;
Matrix & w_bias_hid;
Matrix delta_weight;
Matrix delta_w_bias_vis;
Matrix delta_w_bias_hid;
Matrix prop_vh(const Matrix &);
Matrix prop_vh(const Matrix &, const Vector &); // for linear vis
Matrix prop_hv(const Matrix &);
Matrix sample_vh(const Matrix &);
Matrix sample_hv(const Matrix &);
Matrix sample_hv(const Matrix &, const Vector &); // for linear vis
};
class DeepAE {
public:
DeepAE() = default;
// continuous: true -> Input being continuous data
// false -> Input being distrete (binary) data
// debug: true -> Print the error variations during training
// false -> No printing
DeepAE(const std::vector<int> &,
bool debug = false, bool continuous = false);
DeepAE(const std::initializer_list<int> &,
bool debug = false, bool continuous = false);
DeepAE(const DeepAE &) = default;
DeepAE(DeepAE &&) = default;
DeepAE & operator=(const DeepAE &) = default;
DeepAE & operator=(DeepAE &&) =default;
~DeepAE() = default;
void train(const Matrix &, const Matrix &,
int, int, double, double,
int, int, double, double);
Matrix predict(const Matrix &);
private:
MLP mlp;
std::vector<RBM> rbms;
void pre_train(const Matrix &, int, int, double, double);
void fine_tune(const Matrix &, const Matrix &,
int, int, double, double);
};
#endif // deep_ae.h |
def generateFibonacci(n):
a = 0
b = 1
print("Fibonacci sequence:")
print(a,b,end=" ")
for i in range(2,n):
c = a + b
print(c,end=" ")
a = b
b = c
generateFibonacci(10)
# Output: Fibonacci sequence: 0 1 1 2 3 5 8 13 21 34 |
package cn.celess.blog.entity.model;
import com.github.pagehelper.PageInfo;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.List;
/**
* @Author: 小海
* @Date: 2020-05-25 17:13
* @Desc:
*/
@Data
@AllArgsConstructor
@NoArgsConstructor
public class PageData<T> {
private List<T> list;
private long total;
private int pageSize;
private int pageNum;
public PageData(PageInfo pageInfo) {
this.pageNum = pageInfo.getPageNum();
this.pageSize = pageInfo.getPageSize();
this.total = pageInfo.getTotal();
}
public PageData(PageInfo pageInfo, List<T> data) {
this(pageInfo);
this.list = data;
}
}
|
import { Request, Response } from "express";
import { User, ListUser } from "../models/user";
import { UserService } from "../services/user.service";
export class UserController {
private userService = new UserService();
public users(req: Request, res: Response) {
const users: ListUser[] = this.userService.getAll();
res.json(users).status(200);
}
public user(req: Request, res: Response) {
const user: User = this.userService.get(Number(req.params["id"]));
res.json(user).status(200);
}
public deleteUser(req: Request, res: Response) {
const isDeleted = this.userService.delete(Number(req.params["id"]));
req.params["id"]
? res.json(isDeleted).status(200)
: res.json(isDeleted).status(404);
}
public saveOrUpdate(req: Request, res: Response) {
const userReq = req.body;
const user = this.userService.save(userReq);
res.json(user).status(200);
}
}
|
#!/bin/bash
if ! [[ "$2" =~ ^(git@)?(www.)?github.com(:|/)kazugold/kazugold(.git)?$ ]]; then
exit 0
fi
while read LINE; do
set -- A $LINE
if [ "$4" != "refs/heads/master" ]; then
continue
fi
if ! ./contrib/verify-commits/verify-commits.sh $3 > /dev/null 2>&1; then
echo "ERROR: A commit is not signed, can't push"
./contrib/verify-commits/verify-commits.sh
exit 1
fi
done < /dev/stdin
|
#!/bin/bash
#
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Test the local_repository binding
#
# --- begin runfiles.bash initialization ---
# Copy-pasted from Bazel's Bash runfiles library (tools/bash/runfiles/runfiles.bash).
set -euo pipefail
if [[ ! -d "${RUNFILES_DIR:-/dev/null}" && ! -f "${RUNFILES_MANIFEST_FILE:-/dev/null}" ]]; then
if [[ -f "$0.runfiles_manifest" ]]; then
export RUNFILES_MANIFEST_FILE="$0.runfiles_manifest"
elif [[ -f "$0.runfiles/MANIFEST" ]]; then
export RUNFILES_MANIFEST_FILE="$0.runfiles/MANIFEST"
elif [[ -f "$0.runfiles/bazel_tools/tools/bash/runfiles/runfiles.bash" ]]; then
export RUNFILES_DIR="$0.runfiles"
fi
fi
if [[ -f "${RUNFILES_DIR:-/dev/null}/bazel_tools/tools/bash/runfiles/runfiles.bash" ]]; then
source "${RUNFILES_DIR}/bazel_tools/tools/bash/runfiles/runfiles.bash"
elif [[ -f "${RUNFILES_MANIFEST_FILE:-/dev/null}" ]]; then
source "$(grep -m1 "^bazel_tools/tools/bash/runfiles/runfiles.bash " \
"$RUNFILES_MANIFEST_FILE" | cut -d ' ' -f 2-)"
else
echo >&2 "ERROR: cannot find @bazel_tools//tools/bash/runfiles:runfiles.bash"
exit 1
fi
# --- end runfiles.bash initialization ---
source "$(rlocation "io_bazel/src/test/shell/integration_test_setup.sh")" \
|| { echo "integration_test_setup.sh not found!" >&2; exit 1; }
# `uname` returns the current platform, e.g "MSYS_NT-10.0" or "Linux".
# `tr` converts all upper case letters to lower case.
# `case` matches the result if the `uname | tr` expression to string prefixes
# that use the same wildcards as names do in Bash, i.e. "msys*" matches strings
# starting with "msys", and "*" matches everything (it's the default case).
case "$(uname -s | tr [:upper:] [:lower:])" in
msys*)
# As of 2018-08-14, Bazel on Windows only supports MSYS Bash.
declare -r is_windows=true
;;
*)
declare -r is_windows=false
;;
esac
if "$is_windows"; then
# Disable MSYS path conversion that converts path-looking command arguments to
# Windows paths (even if they arguments are not in fact paths).
export MSYS_NO_PATHCONV=1
export MSYS2_ARG_CONV_EXCL="*"
fi
source "$(rlocation "io_bazel/src/test/shell/bazel/remote_helpers.sh")" \
|| { echo "remote_helpers.sh not found!" >&2; exit 1; }
# Basic test.
function test_macro_local_repository() {
create_new_workspace
repo2=$new_workspace_dir
mkdir -p carnivore
cat > carnivore/BUILD <<'EOF'
genrule(
name = "mongoose",
cmd = "echo 'Tra-la!' | tee $@",
outs = ["moogoose.txt"],
visibility = ["//visibility:public"],
)
EOF
cd ${WORKSPACE_DIR}
cat >> $(create_workspace_with_default_repos WORKSPACE) <<EOF
load('//:test.bzl', 'macro')
macro('$repo2')
EOF
# Empty package for the .bzl file
echo -n >BUILD
# Our macro
cat >test.bzl <<EOF
def macro(path):
print('bleh')
native.local_repository(name='endangered', path=path)
native.bind(name='mongoose', actual='@endangered//carnivore:mongoose')
EOF
mkdir -p zoo
cat > zoo/BUILD <<'EOF'
genrule(
name = "ball-pit1",
srcs = ["@endangered//carnivore:mongoose"],
outs = ["ball-pit1.txt"],
cmd = "cat $< >$@",
)
genrule(
name = "ball-pit2",
srcs = ["//external:mongoose"],
outs = ["ball-pit2.txt"],
cmd = "cat $< >$@",
)
EOF
bazel build //zoo:ball-pit1 >& $TEST_log || fail "Failed to build"
expect_log "bleh"
expect_log "Tra-la!" # Invalidation
cat bazel-genfiles/zoo/ball-pit1.txt >$TEST_log
expect_log "Tra-la!"
bazel build //zoo:ball-pit1 >& $TEST_log || fail "Failed to build"
expect_not_log "Tra-la!" # No invalidation
bazel build //zoo:ball-pit2 >& $TEST_log || fail "Failed to build"
expect_not_log "Tra-la!" # No invalidation
cat bazel-genfiles/zoo/ball-pit2.txt >$TEST_log
expect_log "Tra-la!"
# Test invalidation of the WORKSPACE file
create_new_workspace
repo2=$new_workspace_dir
mkdir -p carnivore
cat > carnivore/BUILD <<'EOF'
genrule(
name = "mongoose",
cmd = "echo 'Tra-la-la!' | tee $@",
outs = ["moogoose.txt"],
visibility = ["//visibility:public"],
)
EOF
cd ${WORKSPACE_DIR}
cat >test.bzl <<EOF
def macro(path):
print('blah')
native.local_repository(name='endangered', path='$repo2')
native.bind(name='mongoose', actual='@endangered//carnivore:mongoose')
EOF
bazel build //zoo:ball-pit1 >& $TEST_log || fail "Failed to build"
expect_log "blah"
expect_log "Tra-la-la!" # Invalidation
cat bazel-genfiles/zoo/ball-pit1.txt >$TEST_log
expect_log "Tra-la-la!"
bazel build //zoo:ball-pit1 >& $TEST_log || fail "Failed to build"
expect_not_log "Tra-la-la!" # No invalidation
bazel build //zoo:ball-pit2 >& $TEST_log || fail "Failed to build"
expect_not_log "Tra-la-la!" # No invalidation
cat bazel-genfiles/zoo/ball-pit2.txt >$TEST_log
expect_log "Tra-la-la!"
}
function test_load_from_symlink_to_outside_of_workspace() {
OTHER=$TEST_TMPDIR/other
cat >> $(create_workspace_with_default_repos WORKSPACE)<<EOF
load("//a/b:c.bzl", "c")
EOF
mkdir -p $OTHER/a/b
touch $OTHER/a/b/BUILD
cat > $OTHER/a/b/c.bzl <<EOF
def c():
pass
EOF
touch BUILD
ln -s $TEST_TMPDIR/other/a a
bazel build //:BUILD || fail "Failed to build"
rm -fr $TEST_TMPDIR/other
}
# Test load from repository.
function test_external_load_from_workspace() {
create_new_workspace
repo2=$new_workspace_dir
mkdir -p carnivore
cat > carnivore/BUILD <<'EOF'
genrule(
name = "mongoose",
cmd = "echo 'Tra-la-la!' | tee $@",
outs = ["moogoose.txt"],
visibility = ["//visibility:public"],
)
EOF
create_new_workspace
repo3=$new_workspace_dir
# Our macro
cat >WORKSPACE
cat >test.bzl <<EOF
def macro(path):
print('bleh')
native.local_repository(name='endangered', path=path)
EOF
cat >BUILD <<'EOF'
exports_files(["test.bzl"])
EOF
cd ${WORKSPACE_DIR}
cat >> $(create_workspace_with_default_repos WORKSPACE) <<EOF
local_repository(name='proxy', path='$repo3')
load('@proxy//:test.bzl', 'macro')
macro('$repo2')
EOF
bazel build @endangered//carnivore:mongoose >& $TEST_log \
|| fail "Failed to build"
expect_log "bleh"
}
# Test loading a repository with a load statement in the WORKSPACE file
function test_load_repository_with_load() {
create_new_workspace
repo2=$new_workspace_dir
echo "Tra-la!" > data.txt
cat <<'EOF' >BUILD
exports_files(["data.txt"])
EOF
cat <<'EOF' >ext.bzl
def macro():
print('bleh')
EOF
cat <<'EOF' >WORKSPACE
workspace(name = "foo")
load("//:ext.bzl", "macro")
macro()
EOF
cd ${WORKSPACE_DIR}
cat >> $(create_workspace_with_default_repos WORKSPACE) <<EOF
local_repository(name='foo', path='$repo2')
load("@foo//:ext.bzl", "macro")
macro()
EOF
cat > BUILD <<'EOF'
genrule(name = "foo", srcs=["@foo//:data.txt"], outs=["foo.txt"], cmd = "cat $< | tee $@")
EOF
bazel build //:foo >& $TEST_log || fail "Failed to build"
expect_log "bleh"
expect_log "Tra-la!"
}
# Test cycle when loading a repository with a load statement in the WORKSPACE file that is not
# yet defined.
function test_cycle_load_repository() {
create_new_workspace
repo2=$new_workspace_dir
echo "Tra-la!" > data.txt
cat <<'EOF' >BUILD
exports_files(["data.txt"])
EOF
cat <<'EOF' >ext.bzl
def macro():
print('bleh')
EOF
cat >WORKSPACE
cd ${WORKSPACE_DIR}
cat >> $(create_workspace_with_default_repos WORKSPACE) <<EOF
load("@foo//:ext.bzl", "macro")
macro()
local_repository(name='foo', path='$repo2')
EOF
local exitCode=0
bazel build @foo//:data.txt >& $TEST_log || exitCode=$?
[ $exitCode != 0 ] || fail "building @foo//:data.txt succeed while expected failure"
expect_not_log "PACKAGE"
expect_log "Failed to load Starlark extension '@foo//:ext.bzl'"
}
function test_load_nonexistent_with_subworkspace() {
mkdir ws2
cat >ws2/WORKSPACE
cat <<'EOF' >WORKSPACE
load("@does_not_exist//:random.bzl", "random")
EOF
cat >BUILD
# Test build //...
bazel clean --expunge
bazel build //... >& $TEST_log || exitCode=$?
[ $exitCode != 0 ] || fail "building //... succeed while expected failure"
expect_not_log "PACKAGE"
expect_log "Failed to load Starlark extension '@does_not_exist//:random.bzl'"
# Retest with query //...
bazel clean --expunge
bazel query //... >& $TEST_log || exitCode=$?
[ $exitCode != 0 ] || fail "querying //... succeed while expected failure"
expect_not_log "PACKAGE"
expect_log "Failed to load Starlark extension '@does_not_exist//:random.bzl'"
}
function test_starlark_local_repository() {
create_new_workspace
repo2=$new_workspace_dir
# Remove the WORKSPACE file in the symlinked repo, so our Starlark rule has to
# create one.
rm $repo2/WORKSPACE
cat > BUILD <<'EOF'
genrule(name='bar', cmd='echo foo | tee $@', outs=['bar.txt'])
EOF
cd ${WORKSPACE_DIR}
cat >> $(create_workspace_with_default_repos WORKSPACE) <<EOF
load('//:test.bzl', 'repo')
repo(name='foo', path='$repo2')
EOF
# Our custom repository rule
cat >test.bzl <<EOF
def _impl(repository_ctx):
repository_ctx.symlink(repository_ctx.path(repository_ctx.attr.path), repository_ctx.path(""))
repo = repository_rule(
implementation=_impl,
local=True,
attrs={"path": attr.string(mandatory=True)})
EOF
# Need to be in a package
cat > BUILD
bazel build @foo//:bar >& $TEST_log || fail "Failed to build"
expect_log "foo"
expect_not_log "Workspace name in .*/WORKSPACE (.*) does not match the name given in the repository's definition (@foo)"
cat bazel-genfiles/external/foo/bar.txt >$TEST_log
expect_log "foo"
}
function setup_starlark_repository() {
create_new_workspace
repo2=$new_workspace_dir
cat > bar.txt
echo "filegroup(name='bar', srcs=['bar.txt'])" > BUILD
cd "${WORKSPACE_DIR}"
cat >> $(create_workspace_with_default_repos WORKSPACE) <<EOF
load('//:test.bzl', 'repo')
repo(name = 'foo')
EOF
# Need to be in a package
cat > BUILD
}
function test_starlark_flags_affect_repository_rule() {
setup_starlark_repository
cat >test.bzl <<EOF
def _impl(repository_ctx):
print("In repo rule: ")
# Symlink so a repository is created
repository_ctx.symlink(repository_ctx.path("$repo2"), repository_ctx.path(""))
repo = repository_rule(implementation=_impl, local=True)
EOF
MARKER="<== Starlark flag test ==>"
bazel build @foo//:bar >& $TEST_log \
|| fail "Expected build to succeed"
expect_log "In repo rule: " "Did not find repository rule print output"
expect_not_log "$MARKER" \
"Marker string '$MARKER' was seen even though \
--internal_starlark_flag_test_canary wasn't passed"
# Build with the special testing flag that appends a marker string to all
# print() calls.
bazel build @foo//:bar --internal_starlark_flag_test_canary >& $TEST_log \
|| fail "Expected build to succeed"
expect_log "In repo rule: $MARKER" \
"Starlark flags are not propagating to repository rule implementation \
function evaluation"
}
function test_starlark_repository_which_and_execute() {
setup_starlark_repository
echo "#!/bin/sh" > bin.sh
echo "exit 0" >> bin.sh
chmod +x bin.sh
# Our custom repository rule
cat >test.bzl <<EOF
def _impl(repository_ctx):
# Symlink so a repository is created
repository_ctx.symlink(repository_ctx.path("$repo2"), repository_ctx.path(""))
bash = repository_ctx.which("bash")
if bash == None:
fail("Bash not found!")
bin = repository_ctx.which("bin.sh")
if bin == None:
fail("bin.sh not found!")
result = repository_ctx.execute([bash, "--version"], 10, {"FOO": "BAR"})
if result.return_code != 0:
fail("Non-zero return code from bash: " + str(result.return_code))
if result.stderr != "":
fail("Non-empty error output: " + result.stderr)
print(result.stdout)
repo = repository_rule(implementation=_impl, local=True)
EOF
# Test we are using the client environment, not the server one
bazel info &> /dev/null # Start up the server.
FOO="BAZ" PATH="${PATH}:${PWD}" bazel build @foo//:bar >& $TEST_log \
|| fail "Failed to build"
expect_log "version"
}
function test_starlark_repository_execute_stderr() {
setup_starlark_repository
cat >test.bzl <<EOF
def _impl(repository_ctx):
# Symlink so a repository is created
repository_ctx.symlink(repository_ctx.path("$repo2"), repository_ctx.path(""))
result = repository_ctx.execute([str(repository_ctx.which("bash")), "-c", "echo erf >&2; exit 1"])
if result.return_code != 1:
fail("Incorrect return code from bash: %s != 1\n%s" % (result.return_code, result.stderr))
if result.stdout != "":
fail("Non-empty output: %s (stderr was %s)" % (result.stdout, result.stderr))
print(result.stderr)
repository_ctx.execute([str(repository_ctx.which("bash")), "-c", "echo shhhh >&2"], quiet = False)
repo = repository_rule(implementation=_impl, local=True)
EOF
bazel build @foo//:bar >& $TEST_log || fail "Failed to build"
expect_log "erf"
expect_log "shhhh"
}
function test_starlark_repository_execute_env_and_workdir() {
setup_starlark_repository
cat >test.bzl <<EOF
def _impl(repository_ctx):
# Symlink so a repository is created
repository_ctx.symlink(repository_ctx.path("$repo2"), repository_ctx.path(""))
result = repository_ctx.execute(
[str(repository_ctx.which("bash")), "-c", "echo PWD=\$PWD TOTO=\$TOTO"],
1000000,
{ "TOTO": "titi" },
working_directory = "$repo2")
if result.return_code != 0:
fail("Incorrect return code from bash: %s != 0\n%s" % (result.return_code, result.stderr))
print(result.stdout)
repo = repository_rule(implementation=_impl, local=True)
EOF
bazel build @foo//:bar >& $TEST_log || fail "Failed to build"
if "$is_windows"; then
repo2="$(cygpath $repo2)"
fi
expect_log "PWD=$repo2 TOTO=titi"
}
function test_starlark_repository_environ() {
setup_starlark_repository
# Our custom repository rule
cat >test.bzl <<EOF
def _impl(repository_ctx):
print(repository_ctx.os.environ["FOO"])
# Symlink so a repository is created
repository_ctx.symlink(repository_ctx.path("$repo2"), repository_ctx.path(""))
repo = repository_rule(implementation=_impl, local=False)
EOF
# TODO(dmarting): We should seriously have something better to force a refetch...
bazel clean --expunge
FOO=BAR bazel build @foo//:bar >& $TEST_log || fail "Failed to build"
expect_log "BAR"
FOO=BAR bazel clean --expunge >& $TEST_log
FOO=BAR bazel info >& $TEST_log
FOO=BAZ bazel build @foo//:bar >& $TEST_log || fail "Failed to build"
expect_log "BAZ"
# Test that we don't re-run on server restart.
FOO=BEZ bazel build @foo//:bar >& $TEST_log || fail "Failed to build"
expect_not_log "BEZ"
bazel shutdown >& $TEST_log
FOO=BEZ bazel build @foo//:bar >& $TEST_log || fail "Failed to build"
expect_not_log "BEZ"
# Test that --action_env value is taken
# TODO(dmarting): The current implemnentation cannot invalidate on environment
# but the incoming change can declare environment dependency, once this is
# done, maybe we should update this test to remove clean --expunge and use the
# invalidation mechanism instead?
bazel clean --expunge
FOO=BAZ bazel build --action_env=FOO=BAZINGA @foo//:bar >& $TEST_log \
|| fail "Failed to build"
expect_log "BAZINGA"
bazel clean --expunge
FOO=BAZ bazel build --action_env=FOO @foo//:bar >& $TEST_log \
|| fail "Failed to build"
expect_log "BAZ"
expect_not_log "BAZINGA"
# Test modifying test.bzl invalidate the repository
cat >test.bzl <<EOF
def _impl(repository_ctx):
print(repository_ctx.os.environ["BAR"])
# Symlink so a repository is created
repository_ctx.symlink(repository_ctx.path("$repo2"), repository_ctx.path(""))
repo = repository_rule(implementation=_impl, local=True)
EOF
BAR=BEZ bazel build @foo//:bar >& $TEST_log || fail "Failed to build"
expect_log "BEZ"
# Shutdown and modify again
bazel shutdown
cat >test.bzl <<EOF
def _impl(repository_ctx):
print(repository_ctx.os.environ["BAZ"])
# Symlink so a repository is created
repository_ctx.symlink(repository_ctx.path("$repo2"), repository_ctx.path(""))
repo = repository_rule(implementation=_impl, local=True)
EOF
BAZ=BOZ bazel build @foo//:bar >& $TEST_log || fail "Failed to build"
expect_log "BOZ"
}
function write_environ_starlark() {
local execution_file="$1"
local environ="$2"
cat >test.bzl <<EOF
load("//:environ.bzl", "environ")
def _impl(repository_ctx):
# This might cause a function restart, do it first
foo = environ(repository_ctx, "FOO")
bar = environ(repository_ctx, "BAR")
baz = environ(repository_ctx, "BAZ")
repository_ctx.template("bar.txt", Label("//:bar.tpl"), {
"%{FOO}": foo,
"%{BAR}": bar,
"%{BAZ}": baz}, False)
exe_result = repository_ctx.execute(["cat", "${execution_file}"]);
execution = int(exe_result.stdout.strip()) + 1
repository_ctx.execute(["bash", "-c", "echo %s >${execution_file}" % execution])
print("<%s> FOO=%s BAR=%s BAZ=%s" % (execution, foo, bar, baz))
repository_ctx.file("BUILD", "filegroup(name='bar', srcs=['bar.txt'])")
repo = repository_rule(implementation=_impl, environ=[${environ}])
EOF
}
function setup_invalidation_test() {
local startup_flag="${1-}"
setup_starlark_repository
# We use a counter to avoid other invalidation to hide repository
# invalidation (e.g., --action_env will cause all action to re-run).
local execution_file="${TEST_TMPDIR}/execution"
# Our custom repository rule
cat >environ.bzl <<EOF
def environ(r_ctx, var):
return r_ctx.os.environ[var] if var in r_ctx.os.environ else "undefined"
EOF
write_environ_starlark "${execution_file}" '"FOO", "BAR"'
cat <<EOF >bar.tpl
FOO=%{FOO} BAR=%{BAR} BAZ=%{BAZ}
EOF
bazel ${startup_flag} clean --expunge
echo 0 >"${execution_file}"
echo "${execution_file}"
}
# Test invalidation based on environment variable
function environ_invalidation_test_template() {
local startup_flag="${1-}"
local execution_file="$(setup_invalidation_test)"
FOO=BAR bazel ${startup_flag} build @foo//:bar >& $TEST_log \
|| fail "Failed to build"
expect_log "<1> FOO=BAR BAR=undefined BAZ=undefined"
assert_equals 1 $(cat "${execution_file}")
FOO=BAR bazel ${startup_flag} build @foo//:bar >& $TEST_log \
|| fail "Failed to build"
assert_equals 1 $(cat "${execution_file}")
# Test that changing FOO is causing a refetch
FOO=BAZ bazel ${startup_flag} build @foo//:bar >& $TEST_log \
|| fail "Failed to build"
expect_log "<2> FOO=BAZ BAR=undefined BAZ=undefined"
assert_equals 2 $(cat "${execution_file}")
FOO=BAZ bazel ${startup_flag} build @foo//:bar >& $TEST_log \
|| fail "Failed to build"
assert_equals 2 $(cat "${execution_file}")
# Test that changing BAR is causing a refetch
FOO=BAZ BAR=FOO bazel ${startup_flag} build @foo//:bar >& $TEST_log \
|| fail "Failed to build"
expect_log "<3> FOO=BAZ BAR=FOO BAZ=undefined"
assert_equals 3 $(cat "${execution_file}")
FOO=BAZ BAR=FOO bazel ${startup_flag} build @foo//:bar >& $TEST_log \
|| fail "Failed to build"
assert_equals 3 $(cat "${execution_file}")
# Test that changing BAZ is not causing a refetch
FOO=BAZ BAR=FOO BAZ=BAR bazel ${startup_flag} build @foo//:bar >& $TEST_log \
|| fail "Failed to build"
assert_equals 3 $(cat "${execution_file}")
# Test more change in the environment
FOO=BAZ BAR=FOO BEZ=BAR bazel ${startup_flag} build @foo//:bar >& $TEST_log \
|| fail "Failed to build"
assert_equals 3 $(cat "${execution_file}")
# Test that removing BEZ is not causing a refetch
FOO=BAZ BAR=FOO bazel ${startup_flag} build @foo//:bar >& $TEST_log \
|| fail "Failed to build"
assert_equals 3 $(cat "${execution_file}")
# Test that removing BAR is causing a refetch
FOO=BAZ bazel ${startup_flag} build @foo//:bar >& $TEST_log \
|| fail "Failed to build"
expect_log "<4> FOO=BAZ BAR=undefined BAZ=undefined"
assert_equals 4 $(cat "${execution_file}")
FOO=BAZ bazel ${startup_flag} build @foo//:bar >& $TEST_log \
|| fail "Failed to build"
assert_equals 4 $(cat "${execution_file}")
# Now try to depends on more variables
write_environ_starlark "${execution_file}" '"FOO", "BAR", "BAZ"'
# The Starlark rule has changed, so a rebuild should happen
FOO=BAZ bazel ${startup_flag} build @foo//:bar >& $TEST_log \
|| fail "Failed to build"
expect_log "<5> FOO=BAZ BAR=undefined BAZ=undefined"
assert_equals 5 $(cat "${execution_file}")
FOO=BAZ bazel ${startup_flag} build @foo//:bar >& $TEST_log \
|| fail "Failed to build"
assert_equals 5 $(cat "${execution_file}")
# Now a change to BAZ should trigger a rebuild
FOO=BAZ BAZ=BEZ bazel ${startup_flag} build @foo//:bar >& $TEST_log \
|| fail "Failed to build"
expect_log "<6> FOO=BAZ BAR=undefined BAZ=BEZ"
assert_equals 6 $(cat "${execution_file}")
FOO=BAZ BAZ=BEZ bazel ${startup_flag} build @foo//:bar >& $TEST_log \
|| fail "Failed to build"
assert_equals 6 $(cat "${execution_file}")
}
function environ_invalidation_action_env_test_template() {
local startup_flag="${1-}"
setup_starlark_repository
# We use a counter to avoid other invalidation to hide repository
# invalidation (e.g., --action_env will cause all action to re-run).
local execution_file="$(setup_invalidation_test)"
# Set to FOO=BAZ BAR=FOO
FOO=BAZ BAR=FOO bazel ${startup_flag} build @foo//:bar >& $TEST_log \
|| fail "Failed to build"
expect_log "<1> FOO=BAZ BAR=FOO BAZ=undefined"
assert_equals 1 $(cat "${execution_file}")
# Test with changing using --action_env
bazel ${startup_flag} build \
--action_env FOO=BAZ --action_env BAR=FOO --action_env BEZ=BAR \
@foo//:bar >& $TEST_log || fail "Failed to build"
assert_equals 1 $(cat "${execution_file}")
bazel ${startup_flag} build \
--action_env FOO=BAZ --action_env BAR=FOO --action_env BAZ=BAR \
@foo//:bar >& $TEST_log || fail "Failed to build"
assert_equals 1 $(cat "${execution_file}")
bazel ${startup_flag} build \
--action_env FOO=BAR --action_env BAR=FOO --action_env BAZ=BAR \
@foo//:bar >& $TEST_log || fail "Failed to build"
expect_log "<2> FOO=BAR BAR=FOO BAZ=BAR"
assert_equals 2 $(cat "${execution_file}")
}
function test_starlark_repository_environ_invalidation() {
environ_invalidation_test_template
}
# Same test as previous but with server restart between each invocation
function test_starlark_repository_environ_invalidation_batch() {
environ_invalidation_test_template --batch
}
function test_starlark_repository_environ_invalidation_action_env() {
environ_invalidation_action_env_test_template
}
function test_starlark_repository_environ_invalidation_action_env_batch() {
environ_invalidation_action_env_test_template --batch
}
# Test invalidation based on change to the bzl files
function bzl_invalidation_test_template() {
local startup_flag="${1-}"
local execution_file="$(setup_invalidation_test)"
local flags="--action_env FOO=BAR --action_env BAR=BAZ --action_env BAZ=FOO"
local bazel_build="bazel ${startup_flag} build ${flags}"
${bazel_build} @foo//:bar >& $TEST_log || fail "Failed to build"
expect_log "<1> FOO=BAR BAR=BAZ BAZ=FOO"
assert_equals 1 $(cat "${execution_file}")
${bazel_build} @foo//:bar >& $TEST_log || fail "Failed to build"
assert_equals 1 $(cat "${execution_file}")
# Changing the Starlark file cause a refetch
cat <<EOF >>test.bzl
# Just add a comment
EOF
${bazel_build} @foo//:bar >& $TEST_log || fail "Failed to build"
expect_log "<2> FOO=BAR BAR=BAZ BAZ=FOO"
assert_equals 2 $(cat "${execution_file}")
${bazel_build} @foo//:bar >& $TEST_log || fail "Failed to build"
assert_equals 2 $(cat "${execution_file}")
# But also changing the environ.bzl file does a refetch
cat <<EOF >>environ.bzl
# Just add a comment
EOF
${bazel_build} @foo//:bar >& $TEST_log || fail "Failed to build"
expect_log "<3> FOO=BAR BAR=BAZ BAZ=FOO"
assert_equals 3 $(cat "${execution_file}")
${bazel_build} @foo//:bar >& $TEST_log || fail "Failed to build"
assert_equals 3 $(cat "${execution_file}")
}
function test_starlark_repository_bzl_invalidation() {
bzl_invalidation_test_template
}
# Same test as previous but with server restart between each invocation
function test_starlark_repository_bzl_invalidation_batch() {
bzl_invalidation_test_template --batch
}
# Test invalidation based on change to the bzl files
function file_invalidation_test_template() {
local startup_flag="${1-}"
local execution_file="$(setup_invalidation_test)"
local flags="--action_env FOO=BAR --action_env BAR=BAZ --action_env BAZ=FOO"
local bazel_build="bazel ${startup_flag} build ${flags}"
${bazel_build} @foo//:bar >& $TEST_log || fail "Failed to build"
expect_log "<1> FOO=BAR BAR=BAZ BAZ=FOO"
assert_equals 1 $(cat "${execution_file}")
${bazel_build} @foo//:bar >& $TEST_log || fail "Failed to build"
assert_equals 1 $(cat "${execution_file}")
# Changing the Starlark file cause a refetch
cat <<EOF >>bar.tpl
Add more stuff
EOF
${bazel_build} @foo//:bar >& $TEST_log || fail "Failed to build"
expect_log "<2> FOO=BAR BAR=BAZ BAZ=FOO"
assert_equals 2 $(cat "${execution_file}")
${bazel_build} @foo//:bar >& $TEST_log || fail "Failed to build"
assert_equals 2 $(cat "${execution_file}")
}
function test_starlark_repository_file_invalidation() {
file_invalidation_test_template
}
# Same test as previous but with server restart between each invocation
function test_starlark_repository_file_invalidation_batch() {
file_invalidation_test_template --batch
}
# Test invalidation based on changes of the Starlark semantics
function starlark_invalidation_test_template() {
local startup_flag="${1-}"
local execution_file="$(setup_invalidation_test)"
local flags="--action_env FOO=BAR --action_env BAR=BAZ --action_env BAZ=FOO"
local bazel_build="bazel ${startup_flag} build ${flags}"
${bazel_build} --noincompatible_run_shell_command_string @foo//:bar \
>& ${TEST_log} || fail "Expected success"
expect_log "<1> FOO=BAR BAR=BAZ BAZ=FOO"
assert_equals 1 $(cat "${execution_file}")
echo; cat ${TEST_log}; echo
${bazel_build} --noincompatible_run_shell_command_string @foo//:bar \
>& ${TEST_log} || fail "Expected success"
assert_equals 1 $(cat "${execution_file}")
echo; cat ${TEST_log}; echo
# Changing the starlark semantics should invalidate once
${bazel_build} --incompatible_run_shell_command_string @foo//:bar \
>& ${TEST_log} || fail "Expected success"
expect_log "<2> FOO=BAR BAR=BAZ BAZ=FOO"
assert_equals 2 $(cat "${execution_file}")
${bazel_build} --incompatible_run_shell_command_string @foo//:bar \
>& ${TEST_log} || fail "Expected success"
assert_equals 2 $(cat "${execution_file}")
}
function test_starlark_invalidation() {
starlark_invalidation_test_template
}
function test_starlark_invalidation_batch() {
starlark_invalidation_test_template --batch
}
function test_repo_env() {
setup_starlark_repository
cat > test.bzl <<'EOF'
def _impl(ctx):
# Make a rule depending on the environment variable FOO,
# properly recording its value. Also add a time stamp
# to verify that the rule is rerun.
ctx.execute(["bash", "-c", "echo FOO=$FOO > env.txt"])
ctx.execute(["bash", "-c", "date +%s >> env.txt"])
ctx.file("BUILD", 'exports_files(["env.txt"])')
repo = repository_rule(
implementation = _impl,
environ = ["FOO"],
)
EOF
cat > BUILD <<'EOF'
genrule(
name = "repoenv",
outs = ["repoenv.txt"],
srcs = ["@foo//:env.txt"],
cmd = "cp $< $@",
)
# Have a normal rule, unrelated to the external repository.
# To test if it was rerun, make it non-hermetic and record a
# time stamp.
genrule(
name = "unrelated",
outs = ["unrelated.txt"],
cmd = "date +%s > $@",
)
EOF
cat > .bazelrc <<EOF
build:foo --repo_env=FOO=foo
build:bar --repo_env=FOO=bar
EOF
bazel build --config=foo //:repoenv //:unrelated
cp `bazel info bazel-genfiles 2>/dev/null`/repoenv.txt repoenv1.txt
cp `bazel info bazel-genfiles 2> /dev/null`/unrelated.txt unrelated1.txt
echo; cat repoenv1.txt; echo; cat unrelated1.txt; echo
grep -q 'FOO=foo' repoenv1.txt \
|| fail "Expected FOO to be visible to repo rules"
sleep 2 # ensure any rerun will have a different time stamp
FOO=CHANGED bazel build --config=foo //:repoenv //:unrelated
# nothing should change, as actions don't see FOO and for repositories
# the value is fixed by --repo_env
cp `bazel info bazel-genfiles 2>/dev/null`/repoenv.txt repoenv2.txt
cp `bazel info bazel-genfiles 2> /dev/null`/unrelated.txt unrelated2.txt
echo; cat repoenv2.txt; echo; cat unrelated2.txt; echo
diff repoenv1.txt repoenv2.txt \
|| fail "Expected repository to not change"
diff unrelated1.txt unrelated2.txt \
|| fail "Expected unrelated action to not be rerun"
bazel build --config=bar //:repoenv //:unrelated
# The new config should be picked up, but the unrelated target should
# not be rerun
cp `bazel info bazel-genfiles 3>/dev/null`/repoenv.txt repoenv3.txt
cp `bazel info bazel-genfiles 3> /dev/null`/unrelated.txt unrelated3.txt
echo; cat repoenv3.txt; echo; cat unrelated3.txt; echo
grep -q 'FOO=bar' repoenv3.txt \
|| fail "Expected FOO to be visible to repo rules"
diff unrelated1.txt unrelated3.txt \
|| fail "Expected unrelated action to not be rerun"
}
function test_repo_env_invalidation() {
# regression test for https://github.com/bazelbuild/bazel/issues/8869
WRKDIR=$(mktemp -d "${TEST_TMPDIR}/testXXXXXX")
cd "${WRKDIR}"
cat > WORKSPACE <<'EOF'
load("//:my_repository_rule.bzl", "my_repository_rule")
my_repository_rule(
name = "my_repository_rule",
)
EOF
cat > my_repository_rule.bzl <<'EOF'
def _my_repository_rule_impl(rctx):
foo = rctx.os.environ.get("foo", default = "")
print('foo = ' + foo)
rctx.file("BUILD.bazel",
"exports_files(['update_time'], visibility=['//visibility:public'])")
rctx.execute(["bash", "-c", "date +%s > update_time"])
my_repository_rule = repository_rule(
environ = ["foo"],
implementation = _my_repository_rule_impl,
)
EOF
cat > BUILD <<'EOF'
genrule(
name = "repotime",
outs = ["repotime.txt"],
srcs = ["@my_repository_rule//:update_time"],
cmd = "cp $< $@",
)
EOF
bazel build //:repotime
cp `bazel info bazel-genfiles 2>/dev/null`/repotime.txt time1.txt
sleep 2;
bazel build --repo_env=foo=bar //:repotime
cp `bazel info bazel-genfiles 2>/dev/null`/repotime.txt time2.txt
diff time1.txt time2.txt && fail "Expected repo to be refetched" || :
bazel shutdown
sleep 2;
bazel build --repo_env=foo=bar //:repotime
cp `bazel info bazel-genfiles 2>/dev/null`/repotime.txt time3.txt
diff time2.txt time3.txt || fail "Expected repo to not be refetched"
}
function test_starlark_repository_executable_flag() {
if "$is_windows"; then
# There is no executable flag on Windows.
echo "Skipping test_starlark_repository_executable_flag on Windows"
return
fi
setup_starlark_repository
# Our custom repository rule
cat >test.bzl <<EOF
def _impl(repository_ctx):
repository_ctx.file("test.sh", "exit 0")
repository_ctx.file("BUILD", "sh_binary(name='bar',srcs=['test.sh'])", False)
repository_ctx.template("test2", Label("//:bar"), {}, False)
repository_ctx.template("test2.sh", Label("//:bar"), {}, True)
repo = repository_rule(implementation=_impl, local=True)
EOF
cat >bar
bazel run @foo//:bar >& $TEST_log || fail "Execution of @foo//:bar failed"
output_base=$(bazel info output_base)
test -x "${output_base}/external/foo/test.sh" || fail "test.sh is not executable"
test -x "${output_base}/external/foo/test2.sh" || fail "test2.sh is not executable"
test ! -x "${output_base}/external/foo/BUILD" || fail "BUILD is executable"
test ! -x "${output_base}/external/foo/test2" || fail "test2 is executable"
}
function test_starlark_repository_download() {
# Prepare HTTP server with Python
local server_dir="${TEST_TMPDIR}/server_dir"
mkdir -p "${server_dir}"
local download_with_sha256="${server_dir}/download_with_sha256.txt"
local download_executable_file="${server_dir}/download_executable_file.sh"
echo "This is a file" > "${download_with_sha256}"
echo "echo 'I am executable'" > "${download_executable_file}"
file_sha256="$(sha256sum "${download_with_sha256}" | head -c 64)"
file_exec_sha256="$(sha256sum "${download_executable_file}" | head -c 64)"
# Start HTTP server with Python
startup_server "${server_dir}"
setup_starlark_repository
# Our custom repository rule
cat >test.bzl <<EOF
def _impl(repository_ctx):
repository_ctx.download(
"http://localhost:${fileserver_port}/download_with_sha256.txt",
"download_with_sha256.txt", "${file_sha256}")
repository_ctx.download(
"http://localhost:${fileserver_port}/download_executable_file.sh",
"download_executable_file.sh", executable=True, sha256="$file_exec_sha256")
repository_ctx.file("BUILD") # necessary directories should already created by download function
repo = repository_rule(implementation=_impl, local=False)
EOF
bazel build @foo//:all >& $TEST_log && shutdown_server \
|| fail "Execution of @foo//:all failed"
output_base="$(bazel info output_base)"
# Test download
test -e "${output_base}/external/foo/download_with_sha256.txt" \
|| fail "download_with_sha256.txt is not downloaded"
test -e "${output_base}/external/foo/download_executable_file.sh" \
|| fail "download_executable_file.sh is not downloaded"
# Test download
diff "${output_base}/external/foo/download_with_sha256.txt" \
"${download_with_sha256}" >/dev/null \
|| fail "download_with_sha256.txt is not downloaded successfully"
diff "${output_base}/external/foo/download_executable_file.sh" \
"${download_executable_file}" >/dev/null \
|| fail "download_executable_file.sh is not downloaded successfully"
# No executable flag for file on Windows
if "$is_windows"; then
return
fi
# Test executable
test ! -x "${output_base}/external/foo/download_with_sha256.txt" \
|| fail "download_with_sha256.txt is executable"
test -x "${output_base}/external/foo/download_executable_file.sh" \
|| fail "download_executable_file.sh is not executable"
}
function test_starlark_repository_context_downloads_return_struct() {
# Prepare HTTP server with Python
local server_dir="${TEST_TMPDIR}/server_dir"
mkdir -p "${server_dir}"
local download_with_sha256="${server_dir}/download_with_sha256.txt"
local download_no_sha256="${server_dir}/download_no_sha256.txt"
local compressed_with_sha256="${server_dir}/compressed_with_sha256.txt"
local compressed_no_sha256="${server_dir}/compressed_no_sha256.txt"
echo "This is one file" > "${download_no_sha256}"
echo "This is another file" > "${download_with_sha256}"
echo "Compressed file with sha" > "${compressed_with_sha256}"
echo "Compressed file no sha" > "${compressed_no_sha256}"
zip "${compressed_with_sha256}".zip "${compressed_with_sha256}"
zip "${compressed_no_sha256}".zip "${compressed_no_sha256}"
provided_sha256="$(sha256sum "${download_with_sha256}" | head -c 64)"
not_provided_sha256="$(sha256sum "${download_no_sha256}" | head -c 64)"
compressed_provided_sha256="$(sha256sum "${compressed_with_sha256}.zip" | head -c 64)"
compressed_not_provided_sha256="$(sha256sum "${compressed_no_sha256}.zip" | head -c 64)"
# Start HTTP server with Python
startup_server "${server_dir}"
# On Windows, a file url should be file:///C:/foo/bar,
# we need to add one more slash at the beginning.
if "$is_windows"; then
server_dir="/${server_dir}"
fi
setup_starlark_repository
# Our custom repository rule
cat >test.bzl <<EOF
def _impl(repository_ctx):
no_sha_return = repository_ctx.download(
url = "file://${server_dir}/download_no_sha256.txt",
output = "download_no_sha256.txt")
with_sha_return = repository_ctx.download(
url = "http://localhost:${fileserver_port}/download_with_sha256.txt",
output = "download_with_sha256.txt",
sha256 = "${provided_sha256}")
compressed_no_sha_return = repository_ctx.download_and_extract(
url = "file://${server_dir}/compressed_no_sha256.txt.zip",
output = "compressed_no_sha256.txt.zip")
compressed_with_sha_return = repository_ctx.download_and_extract(
url = "http://localhost:${fileserver_port}/compressed_with_sha256.txt.zip",
output = "compressed_with_sha256.txt.zip",
sha256 = "${compressed_provided_sha256}")
file_content = "no_sha_return " + no_sha_return.sha256 + "\n"
file_content += "with_sha_return " + with_sha_return.sha256 + "\n"
file_content += "compressed_no_sha_return " + compressed_no_sha_return.sha256 + "\n"
file_content += "compressed_with_sha_return " + compressed_with_sha_return.sha256
repository_ctx.file("returned_shas.txt", content = file_content, executable = False)
repository_ctx.file("BUILD") # necessary directories should already created by download function
repo = repository_rule(implementation = _impl, local = False)
EOF
# This test case explictly verifies that a checksum is returned, even if
# none was provided by the call to download_and_extract. So we do have to
# allow a download without provided checksum, even though it is plain http;
# nevertheless, localhost is pretty safe against man-in-the-middle attacs.
bazel build @foo//:all \
>& $TEST_log && shutdown_server || fail "Execution of @foo//:all failed"
output_base="$(bazel info output_base)"
grep "no_sha_return $not_provided_sha256" $output_base/external/foo/returned_shas.txt \
|| fail "expected calculated sha256 $not_provided_sha256"
grep "with_sha_return $provided_sha256" $output_base/external/foo/returned_shas.txt \
|| fail "expected provided sha256 $provided_sha256"
grep "compressed_with_sha_return $compressed_provided_sha256" $output_base/external/foo/returned_shas.txt \
|| fail "expected provided sha256 $compressed_provided_sha256"
grep "compressed_no_sha_return $compressed_not_provided_sha256" $output_base/external/foo/returned_shas.txt \
|| fail "expected compressed calculated sha256 $compressed_not_provided_sha256"
}
function test_starlark_repository_download_args() {
# Prepare HTTP server with Python
local server_dir="${TEST_TMPDIR}/server_dir"
mkdir -p "${server_dir}"
local download_1="${server_dir}/download_1.txt"
local download_2="${server_dir}/download_2.txt"
echo "The file's content" > "${download_1}"
echo "The file's content" > "${download_2}"
file_sha256="$(sha256sum "${download_1}" | head -c 64)"
# Start HTTP server with Python
startup_server "${server_dir}"
create_new_workspace
repo2=$new_workspace_dir
cat > bar.txt
echo "filegroup(name='bar', srcs=['bar.txt'])" > BUILD
cat >> $(create_workspace_with_default_repos WORKSPACE) <<EOF
load('//:test.bzl', 'repo')
repo(name = 'foo',
urls = [
"http://localhost:${fileserver_port}/download_1.txt",
"http://localhost:${fileserver_port}/download_2.txt",
],
sha256 = "${file_sha256}",
output = "whatever.txt"
)
EOF
# Our custom repository rule
cat >test.bzl <<EOF
def _impl(repository_ctx):
repository_ctx.file("BUILD")
repository_ctx.download(
repository_ctx.attr.urls,
sha256 = repository_ctx.attr.sha256,
output=repository_ctx.attr.output,
)
repo = repository_rule(implementation=_impl,
local=False,
attrs = {
"urls" : attr.string_list(),
"output" : attr.string(),
"sha256" : attr.string(),
}
)
EOF
bazel build @foo//:all >& $TEST_log && shutdown_server \
|| fail "Execution of @foo//:all failed"
output_base="$(bazel info output_base)"
# Test download
test -e "${output_base}/external/foo/whatever.txt" \
|| fail "whatever.txt is not downloaded"
}
function test_starlark_repository_download_and_extract() {
# Prepare HTTP server with Python
local server_dir="${TEST_TMPDIR}/server_dir"
mkdir -p "${server_dir}"
local file_prefix="${server_dir}/download_and_extract"
pushd ${TEST_TMPDIR}
echo "This is one file" > server_dir/download_and_extract1.txt
echo "This is another file" > server_dir/download_and_extract2.txt
echo "This is a third file" > server_dir/download_and_extract3.txt
tar -zcvf server_dir/download_and_extract1.tar.gz server_dir/download_and_extract1.txt
zip server_dir/download_and_extract2.zip server_dir/download_and_extract2.txt
zip server_dir/download_and_extract3.zip server_dir/download_and_extract3.txt
file1_sha256="$(sha256sum server_dir/download_and_extract1.tar.gz | head -c 64)"
file2_sha256="$(sha256sum server_dir/download_and_extract2.zip | head -c 64)"
file3_sha256="$(sha256sum server_dir/download_and_extract3.zip | head -c 64)"
popd
# Start HTTP server with Python
startup_server "${server_dir}"
setup_starlark_repository
# Our custom repository rule
cat >test.bzl <<EOF
def _impl(repository_ctx):
repository_ctx.file("BUILD")
repository_ctx.download_and_extract(
"http://localhost:${fileserver_port}/download_and_extract1.tar.gz", "", sha256="${file1_sha256}")
repository_ctx.download_and_extract(
"http://localhost:${fileserver_port}/download_and_extract2.zip", "", "${file2_sha256}")
repository_ctx.download_and_extract(
"http://localhost:${fileserver_port}/download_and_extract1.tar.gz", "some/path", sha256="${file1_sha256}")
repository_ctx.download_and_extract(
"http://localhost:${fileserver_port}/download_and_extract3.zip", ".", "${file3_sha256}", "", "")
repository_ctx.download_and_extract(
url = ["http://localhost:${fileserver_port}/download_and_extract3.zip"],
output = "other/path",
sha256 = "${file3_sha256}"
)
repo = repository_rule(implementation=_impl, local=False)
EOF
bazel clean --expunge_async >& $TEST_log || fail "bazel clean failed"
bazel build @foo//:all >& $TEST_log && shutdown_server \
|| fail "Execution of @foo//:all failed"
output_base="$(bazel info output_base)"
# Test cleanup
test -e "${output_base}/external/foo/server_dir/download_and_extract1.tar.gz" \
&& fail "temp file was not deleted successfully" || true
test -e "${output_base}/external/foo/server_dir/download_and_extract2.zip" \
&& fail "temp file was not deleted successfully" || true
test -e "${output_base}/external/foo/server_dir/download_and_extract3.zip" \
&& fail "temp file was not deleted successfully" || true
# Test download_and_extract
diff "${output_base}/external/foo/server_dir/download_and_extract1.txt" \
"${file_prefix}1.txt" >/dev/null \
|| fail "download_and_extract1.tar.gz was not extracted successfully"
diff "${output_base}/external/foo/some/path/server_dir/download_and_extract1.txt" \
"${file_prefix}1.txt" >/dev/null \
|| fail "download_and_extract1.tar.gz was not extracted successfully in some/path"
diff "${output_base}/external/foo/server_dir/download_and_extract2.txt" \
"${file_prefix}2.txt" >/dev/null \
|| fail "download_and_extract2.zip was not extracted successfully"
diff "${output_base}/external/foo/server_dir/download_and_extract3.txt" \
"${file_prefix}3.txt" >/dev/null \
|| fail "download_and_extract3.zip was not extracted successfully"
diff "${output_base}/external/foo/other/path/server_dir/download_and_extract3.txt" \
"${file_prefix}3.txt" >/dev/null \
|| fail "download_and_extract3.tar.gz was not extracted successfully"
}
# Test native.bazel_version
function test_bazel_version() {
create_new_workspace
repo2=$new_workspace_dir
cat > BUILD <<'EOF'
genrule(
name = "test",
cmd = "echo 'Tra-la!' | tee $@",
outs = ["test.txt"],
visibility = ["//visibility:public"],
)
EOF
cd ${WORKSPACE_DIR}
cat >> $(create_workspace_with_default_repos WORKSPACE) <<EOF
load('//:test.bzl', 'macro')
macro('$repo2')
EOF
# Empty package for the .bzl file
echo -n >BUILD
# Our macro
cat >test.bzl <<EOF
def macro(path):
print(native.bazel_version)
native.local_repository(name='test', path=path)
EOF
local version="$(bazel info release)"
# On release, Bazel binary get stamped, else we might run with an unstamped version.
if [ "$version" == "development version" ]; then
version=""
else
version="${version#* }"
fi
bazel build @test//:test >& $TEST_log || fail "Failed to build"
expect_log ": ${version}."
}
# Test native.existing_rule(s), regression test for #1277
function test_existing_rule() {
create_new_workspace
setup_skylib_support
repo2=$new_workspace_dir
cat > BUILD
cat >> WORKSPACE <<EOF
local_repository(name = 'existing', path='$repo2')
load('//:test.bzl', 'macro')
macro()
EOF
# Empty package for the .bzl file
echo -n >BUILD
# Our macro
cat >test.bzl <<EOF
def test(s):
print("%s = %s,%s" % (s,
native.existing_rule(s) != None,
s in native.existing_rules()))
def macro():
test("existing")
test("non_existing")
EOF
bazel query //... >& $TEST_log || fail "Failed to build"
expect_log "existing = True,True"
expect_log "non_existing = False,False"
}
function test_configure_like_repos() {
cat > repos.bzl <<'EOF'
def _impl(ctx):
print("Executing %s" % (ctx.attr.name,))
ref = ctx.path(ctx.attr.reference)
# Here we explicitly copy a file where we constructed the name
# completely outside any build interfaces, so it is not registered
# as a dependency of the external repository.
ctx.execute(["cp", "%s.shadow" % (ref,), ctx.path("it.txt")])
ctx.file("BUILD", "exports_files(['it.txt'])")
source = repository_rule(
implementation = _impl,
attrs = {"reference" : attr.label()},
)
configure = repository_rule(
implementation = _impl,
attrs = {"reference" : attr.label()},
configure = True,
)
EOF
cat > WORKSPACE <<'EOF'
load("//:repos.bzl", "configure", "source")
configure(name="configure", reference="@//:reference.txt")
source(name="source", reference="@//:reference.txt")
EOF
cat > BUILD <<'EOF'
[ genrule(
name = name,
srcs = ["@%s//:it.txt" % (name,)],
outs = ["%s.txt" % (name,)],
cmd = "cp $< $@",
) for name in ["source", "configure"] ]
EOF
echo "Just to get the path" > reference.txt
echo "initial" > reference.txt.shadow
bazel build //:source //:configure
grep 'initial' `bazel info bazel-genfiles`/source.txt \
|| fail '//:source not generated properly'
grep 'initial' `bazel info bazel-genfiles`/configure.txt \
|| fail '//:configure not generated properly'
echo "new value" > reference.txt.shadow
bazel sync --configure --experimental_repository_resolved_file=resolved.bzl \
2>&1 || fail "Expected sync --configure to succeed"
grep -q 'name.*configure' resolved.bzl \
|| fail "Expected 'configure' to be synced"
grep -q 'name.*source' resolved.bzl \
&& fail "Expected 'source' not to be synced" || :
bazel build //:source //:configure
grep -q 'initial' `bazel info bazel-genfiles`/source.txt \
|| fail '//:source did not keep its old value'
grep -q 'new value' `bazel info bazel-genfiles`/configure.txt \
|| fail '//:configure not synced properly'
}
function test_timeout_tunable() {
cat >> $(create_workspace_with_default_repos WORKSPACE) <<'EOF'
load("//:repo.bzl", "with_timeout")
with_timeout(name="maytimeout")
EOF
touch BUILD
cat > repo.bzl <<'EOF'
def _impl(ctx):
st =ctx.execute(["bash", "-c", "sleep 10 && echo Hello world > data.txt"],
timeout=1)
if st.return_code:
fail("Command did not succeed")
ctx.file("BUILD", "exports_files(['data.txt'])")
with_timeout = repository_rule(attrs = {}, implementation = _impl)
EOF
bazel sync && fail "expected timeout" || :
bazel sync --experimental_scale_timeouts=100 \
|| fail "expected success now the timeout is scaled"
bazel build @maytimeout//... \
|| fail "expected success after successful sync"
}
function test_sync_only() {
# Set up two repositories that count how often they are fetched
cat >environ.bzl <<'EOF'
def environ(r_ctx, var):
return r_ctx.os.environ[var] if var in r_ctx.os.environ else "undefined"
EOF
cat <<'EOF' >bar.tpl
FOO=%{FOO} BAR=%{BAR} BAZ=%{BAZ}
EOF
write_environ_starlark "${TEST_TMPDIR}/executionFOO" ""
mv test.bzl testfoo.bzl
write_environ_starlark "${TEST_TMPDIR}/executionBAR" ""
mv test.bzl testbar.bzl
cat > WORKSPACE <<'EOF'
load("//:testfoo.bzl", foorepo="repo")
load("//:testbar.bzl", barrepo="repo")
foorepo(name="foo")
barrepo(name="bar")
EOF
touch BUILD
bazel clean --expunge
echo 0 > "${TEST_TMPDIR}/executionFOO"
echo 0 > "${TEST_TMPDIR}/executionBAR"
# Normal sync should hit both repositories
echo; echo bazel sync; echo
bazel sync
assert_equals 1 $(cat "${TEST_TMPDIR}/executionFOO")
assert_equals 1 $(cat "${TEST_TMPDIR}/executionBAR")
# Only foo
echo; echo bazel sync --only foo; echo
bazel sync --only foo
assert_equals 2 $(cat "${TEST_TMPDIR}/executionFOO")
assert_equals 1 $(cat "${TEST_TMPDIR}/executionBAR")
# Only bar
echo; echo bazel sync --only bar; echo
bazel sync --only bar
assert_equals 2 $(cat "${TEST_TMPDIR}/executionFOO")
assert_equals 2 $(cat "${TEST_TMPDIR}/executionBAR")
# Only bar
echo; echo bazel sync --only bar; echo
bazel sync --only bar
assert_equals 2 $(cat "${TEST_TMPDIR}/executionFOO")
assert_equals 3 $(cat "${TEST_TMPDIR}/executionBAR")
}
function test_download_failure_message() {
# Regression test for #7850
# Verify that the for a failed downlaod, it is clearly indicated
# what was attempted to download and how it fails.
cat > BUILD <<'EOF'
genrule(
name = "it",
outs = ["it.txt"],
srcs = ["@ext_foo//:data.txt"],
cmd = "cp $< $@",
)
EOF
cat > repo.bzl <<'EOF'
def _impl(ctx):
ctx.file("BUILD", "exports_files(['data.txt'])")
ctx.symlink(ctx.attr.data, "data.txt")
trivial_repo = repository_rule(
implementation = _impl,
attrs = { "data" : attr.label() },
)
EOF
cat > root.bzl <<'EOF'
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def root_cause():
http_archive(
name = "this_is_the_root_cause",
urls = ["http://does.not.exist.example.com/some/file.tar"],
sha256 = "aba1fcb7781eb26c854d13446a4b3e8a906cc03676371bbb69eb4430926f5969",
)
EOF
cat >> $(create_workspace_with_default_repos WORKSPACE) <<'EOF'
load("//:root.bzl", "root_cause")
load("//:repo.bzl", "trivial_repo")
root_cause()
trivial_repo(
name = "ext_baz",
data = "@this_is_the_root_cause//:data.txt",
)
trivial_repo(
name = "ext_bar",
data = "@ext_baz//:data.txt",
)
trivial_repo(
name = "ext_foo",
data = "@ext_bar//:data.txt",
)
EOF
bazel build //:it > "${TEST_log}" 2>&1 \
&& fail "Expected failure" || :
# Extract the first error message printed
#
# ERROR: An error occurred during the fetch of repository 'this_is_the_root_cause':
# Traceback (most recent call last):
# File ".../http.bzl", line 111, column 45, in _http_archive_impl
# download_info = ctx.download_and_extract(
# Error in download_and_extract: java.io.IOException: Error downloading \
# [http://does.not.exist.example.com/some/file.tar] to ...file.tar: \
# Unknown host: does.not.exist.example.com
awk '/^ERROR/ {on=1} on {print} /^Error/ {exit}' < "${TEST_log}" > firsterror.log
echo; echo "first error message which should focus on the root cause";
echo "=========="; cat firsterror.log; echo "=========="
# We expect it to contain the root cause, and the failure ...
grep -q 'this_is_the_root_cause' firsterror.log \
|| fail "Root-cause repository not mentioned"
grep -q '[uU]nknown host.*does.not.exist.example.com' firsterror.log \
|| fail "Failure reason not mentioned"
# ...but not be cluttered with information not related to the root cause
grep 'ext_foo' firsterror.log && fail "unrelated repo mentioned" || :
grep 'ext_bar' firsterror.log && fail "unrelated repo mentioned" || :
grep 'ext_baz' firsterror.log && fail "unrelated repo mentioned" || :
grep '//:it' firsterror.log && fail "unrelated target mentioned" || :
# Verify that the same is true, if the error is caused by a fail statement.
cat > root.bzl <<'EOF'
def _impl(ctx):
fail("Here be dragons")
repo = repository_rule(implementation=_impl, attrs = {})
def root_cause():
repo(name = "this_is_the_root_cause")
EOF
bazel build //:it > "${TEST_log}" 2>&1 \
&& fail "Expected failure" || :
# Extract the first error message printed (see previous awk command).
awk '/^ERROR/ {on=1} on {print} /^Error/ {exit}' < "${TEST_log}" > firsterror.log
echo "=========="; cat firsterror.log; echo "=========="
grep -q 'this_is_the_root_cause' firsterror.log \
|| fail "Root-cause repository not mentioned"
grep -q 'Here be dragons' firsterror.log \
|| fail "fail error message not shown"
grep 'ext_foo' firsterror.log && fail "unrelated repo mentioned" || :
grep 'ext_bar' firsterror.log && fail "unrelated repo mentioned" || :
grep 'ext_baz' firsterror.log && fail "unrelated repo mentioned" || :
grep '//:it' firsterror.log && fail "unrelated target mentioned" || :
}
function test_circular_load_error_message() {
cat >> $(create_workspace_with_default_repos WORKSPACE) <<'EOF'
load("//:a.bzl", "total")
EOF
touch BUILD
cat > a.bzl <<'EOF'
load("//:b.bzl", "b")
a = 10
total = a + b
EOF
cat > b.bzl <<'EOF'
load("//:a.bzl", "a")
b = 20
difference = b - a
EOF
bazel build //... > "${TEST_log}" 2>&1 && fail "Expected failure" || :
expect_not_log "[iI]nternal [eE]rror"
expect_not_log "IllegalStateException"
expect_log "//:a.bzl"
expect_log "//:b.bzl"
}
function test_ciruclar_load_error_with_path_message() {
cat >> $(create_workspace_with_default_repos WORKSPACE) <<'EOF'
load("//:x.bzl", "x")
EOF
touch BUILD
cat > x.bzl <<'EOF'
load("//:y.bzl", "y")
x = y
EOF
cat > y.bzl <<'EOF'
load("//:a.bzl", "total")
y = total
EOF
cat > a.bzl <<'EOF'
load("//:b.bzl", "b")
a = 10
total = a + b
EOF
cat > b.bzl <<'EOF'
load("//:a.bzl", "a")
b = 20
difference = b - a
EOF
bazel build //... > "${TEST_log}" 2>&1 && fail "Expected failure" || :
expect_not_log "[iI]nternal [eE]rror"
expect_not_log "IllegalStateException"
expect_log "WORKSPACE"
expect_log "//:x.bzl"
expect_log "//:y.bzl"
expect_log "//:a.bzl"
expect_log "//:b.bzl"
}
function test_auth_provided() {
mkdir x
echo 'exports_files(["file.txt"])' > x/BUILD
echo 'Hello World' > x/file.txt
tar cvf x.tar x
sha256="$(sha256sum x.tar | head -c 64)"
serve_file_auth x.tar
cat >> $(create_workspace_with_default_repos WORKSPACE) <<EOF
load("//:auth.bzl", "with_auth")
with_auth(
name="ext",
url = "http://127.0.0.1:$nc_port/x.tar",
sha256 = "$sha256",
)
EOF
cat > auth.bzl <<'EOF'
def _impl(ctx):
ctx.download_and_extract(
url = ctx.attr.url,
sha256 = ctx.attr.sha256,
# Use the username/password pair hard-coded
# in the testing server.
auth = {ctx.attr.url : { "type": "basic",
"login" : "foo",
"password" : "bar"}}
)
with_auth = repository_rule(
implementation = _impl,
attrs = { "url" : attr.string(), "sha256" : attr.string() }
)
EOF
cat > BUILD <<'EOF'
genrule(
name = "it",
srcs = ["@ext//x:file.txt"],
outs = ["it.txt"],
cmd = "cp $< $@",
)
EOF
bazel build //:it \
|| fail "Expected success despite needing a file behind basic auth"
}
function test_netrc_reading() {
# Write a badly formated, but correct, .netrc file
cat > .netrc <<'EOF'
machine ftp.example.com
macdef init
cd pub
mget *
quit
# this is a comment
machine example.com login
myusername password mysecret default
login anonymous password myusername@example.com
EOF
# We expect that `read_netrc` can parse this file...
cat > def.bzl <<'EOF'
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "read_netrc")
def _impl(ctx):
rc = read_netrc(ctx, ctx.attr.path)
ctx.file("data.bzl", "netrc = %s" % (rc,))
ctx.file("BUILD", "")
ctx.file("WORKSPACE", "")
netrcrepo = repository_rule(
implementation = _impl,
attrs = {"path": attr.string()},
)
EOF
netrc_dir="$(pwd)"
if "$is_windows"; then
netrc_dir="$(cygpath -m ${netrc_dir})"
fi
cat >> $(create_workspace_with_default_repos WORKSPACE) <<EOF
load("//:def.bzl", "netrcrepo")
netrcrepo(name = "netrc", path="${netrc_dir}/.netrc")
EOF
# ...and that from the parse result, we can read off the
# credentials for example.com.
cat > BUILD <<'EOF'
load("@netrc//:data.bzl", "netrc")
[genrule(
name = name,
outs = [ "%s.txt" % (name,)],
cmd = "echo %s > $@" % (netrc["example.com"][name],),
) for name in ["login", "password"]]
EOF
bazel build //:login //:password
grep 'myusername' `bazel info bazel-genfiles`/login.txt \
|| fail "Username not parsed correctly"
grep 'mysecret' `bazel info bazel-genfiles`/password.txt \
|| fail "Password not parsed correctly"
# Also check the precise value of parsed file
cat > expected.bzl <<'EOF'
expected = {
"ftp.example.com" : { "macdef init" : "cd pub\nmget *\nquit\n" },
"example.com" : { "login" : "myusername",
"password" : "mysecret",
},
"" : { "login": "anonymous",
"password" : "myusername@example.com" },
}
EOF
cat > verify.bzl <<'EOF'
load("@netrc//:data.bzl", "netrc")
load("//:expected.bzl", "expected")
def check_equal_expected():
print("Parsed value: %s" % (netrc,))
print("Expected value: %s" % (expected,))
if netrc == expected:
return "OK"
else:
return "BAD"
EOF
cat > BUILD <<'EOF'
load ("//:verify.bzl", "check_equal_expected")
genrule(
name = "check_expected",
outs = ["check_expected.txt"],
cmd = "echo %s > $@" % (check_equal_expected(),)
)
EOF
bazel build //:check_expected
grep 'OK' `bazel info bazel-genfiles`/check_expected.txt \
|| fail "Parsed dict not equal to expected value"
}
function test_use_netrc() {
# Test the starlark utility function use_netrc.
cat > .netrc <<'EOF'
machine foo.example.org
login foousername
password foopass
machine bar.example.org
login barusername
password passbar
machine oauthlife.com
password TOKEN
EOF
# Read a given .netrc file and combine it with a list of URL,
# and write the obtained authentication dicionary to disk; this
# is not the intended way of using, but makes testing easy.
cat > def.bzl <<'EOF'
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "read_netrc", "use_netrc")
def _impl(ctx):
rc = read_netrc(ctx, ctx.attr.path)
auth = use_netrc(rc, ctx.attr.urls, {"oauthlife.com": "Bearer <password>",})
ctx.file("data.bzl", "auth = %s" % (auth,))
ctx.file("BUILD", "")
ctx.file("WORKSPACE", "")
authrepo = repository_rule(
implementation = _impl,
attrs = {"path": attr.string(),
"urls": attr.string_list()
},
)
EOF
netrc_dir="$(pwd)"
if "$is_windows"; then
netrc_dir="$(cygpath -m ${netrc_dir})"
fi
cat >> $(create_workspace_with_default_repos WORKSPACE) <<EOF
load("//:def.bzl", "authrepo")
authrepo(
name = "auth",
path="${netrc_dir}/.netrc",
urls = [
"http://example.org/public/null.tar",
"https://foo.example.org/file1.tar",
"https://foo.example.org:8080/file2.tar",
"https://bar.example.org/file3.tar",
"https://evil.com/bar.example.org/file4.tar",
"https://oauthlife.com/fizz/buzz/file5.tar",
],
)
EOF
# Here dicts give us the correct notion of equality, so we can simply
# compare against the expected value.
cat > expected.bzl <<'EOF'
expected = {
"https://foo.example.org/file1.tar" : {
"type" : "basic",
"login": "foousername",
"password" : "foopass",
},
"https://foo.example.org:8080/file2.tar" : {
"type" : "basic",
"login": "foousername",
"password" : "foopass",
},
"https://bar.example.org/file3.tar" : {
"type" : "basic",
"login": "barusername",
"password" : "passbar",
},
"https://oauthlife.com/fizz/buzz/file5.tar": {
"type" : "pattern",
"pattern" : "Bearer <password>",
"password" : "TOKEN",
},
}
EOF
cat > verify.bzl <<'EOF'
load("@auth//:data.bzl", "auth")
load("//:expected.bzl", "expected")
def check_equal_expected():
print("Computed value: %s" % (auth,))
print("Expected value: %s" % (expected,))
if auth == expected:
return "OK"
else:
return "BAD"
EOF
cat > BUILD <<'EOF'
load ("//:verify.bzl", "check_equal_expected")
genrule(
name = "check_expected",
outs = ["check_expected.txt"],
cmd = "echo %s > $@" % (check_equal_expected(),)
)
EOF
bazel build //:check_expected
grep 'OK' `bazel info bazel-genfiles`/check_expected.txt \
|| fail "Authentication merged incorrectly"
}
function test_disallow_unverified_http() {
mkdir x
echo 'exports_files(["file.txt"])' > x/BUILD
echo 'Hello World' > x/file.txt
tar cvf x.tar x
sha256="$(sha256sum x.tar | head -c 64)"
serve_file x.tar
cat > WORKSPACE <<EOF
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name="ext",
url = "http://127.0.0.1:$nc_port/x.tar",
)
EOF
cat > BUILD <<'EOF'
genrule(
name = "it",
srcs = ["@ext//x:file.txt"],
outs = ["it.txt"],
cmd = "cp $< $@",
)
EOF
bazel build //:it > "${TEST_log}" 2>&1 && fail "Expeceted failure" || :
expect_log 'plain http.*missing checksum'
# After adding a good checksum, we expect success
ed WORKSPACE <<EOF
/url
a
sha256 = "$sha256",
.
w
q
EOF
bazel build //:it || fail "Expected success one the checksum is given"
}
function tear_down() {
shutdown_server
if [ -d "${TEST_TMPDIR}/server_dir" ]; then
rm -fr "${TEST_TMPDIR}/server_dir"
fi
true
}
function test_http_archive_netrc() {
mkdir x
echo 'exports_files(["file.txt"])' > x/BUILD
echo 'Hello World' > x/file.txt
tar cvf x.tar x
sha256=$(sha256sum x.tar | head -c 64)
serve_file_auth x.tar
netrc_dir="$(pwd)"
if "$is_windows"; then
netrc_dir="$(cygpath -m ${netrc_dir})"
fi
cat > WORKSPACE <<EOF
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name="ext",
url = "http://127.0.0.1:$nc_port/x.tar",
netrc = "${netrc_dir}/.netrc",
sha256="$sha256",
)
EOF
cat > .netrc <<'EOF'
machine 127.0.0.1
login foo
password bar
EOF
cat > BUILD <<'EOF'
genrule(
name = "it",
srcs = ["@ext//x:file.txt"],
outs = ["it.txt"],
cmd = "cp $< $@",
)
EOF
bazel build //:it \
|| fail "Expected success despite needing a file behind basic auth"
}
function test_http_archive_auth_patterns() {
mkdir x
echo 'exports_files(["file.txt"])' > x/BUILD
echo 'Hello World' > x/file.txt
tar cvf x.tar x
sha256=$(sha256sum x.tar | head -c 64)
serve_file_auth x.tar
netrc_dir="$(pwd)"
if "$is_windows"; then
netrc_dir="$(cygpath -m ${netrc_dir})"
fi
cat > WORKSPACE <<EOF
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name="ext",
url = "http://127.0.0.1:$nc_port/x.tar",
netrc = "${netrc_dir}/.netrc",
sha256="$sha256",
auth_patterns = {
"127.0.0.1": "Bearer <password>"
}
)
EOF
cat > .netrc <<'EOF'
machine 127.0.0.1
password TOKEN
EOF
cat > BUILD <<'EOF'
genrule(
name = "it",
srcs = ["@ext//x:file.txt"],
outs = ["it.txt"],
cmd = "cp $< $@",
)
EOF
bazel build //:it \
|| fail "Expected success despite needing a file behind bearer auth"
}
function test_implicit_netrc() {
mkdir x
echo 'exports_files(["file.txt"])' > x/BUILD
echo 'Hello World' > x/file.txt
tar cvf x.tar x
sha256=$(sha256sum x.tar | head -c 64)
serve_file_auth x.tar
export HOME=`pwd`
if "$is_windows"; then
export USERPROFILE="$(cygpath -m ${HOME})"
fi
cat > .netrc <<'EOF'
machine 127.0.0.1
login foo
password bar
EOF
mkdir main
cd main
cat > WORKSPACE <<EOF
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name="ext",
url = "http://127.0.0.1:$nc_port/x.tar",
sha256="$sha256",
)
EOF
cat > BUILD <<'EOF'
genrule(
name = "it",
srcs = ["@ext//x:file.txt"],
outs = ["it.txt"],
cmd = "cp $< $@",
)
EOF
bazel build //:it \
|| fail "Expected success despite needing a file behind basic auth"
}
function test_disable_download_should_prevent_downloading() {
mkdir x
echo 'exports_files(["file.txt"])' > x/BUILD
echo 'Hello World' > x/file.txt
tar cvf x.tar x
sha256=$(sha256sum x.tar | head -c 64)
serve_file x.tar
mkdir main
cd main
cat > WORKSPACE <<EOF
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name="ext",
url = "http://127.0.0.1:$nc_port/x.tar",
sha256="$sha256",
)
EOF
cat > BUILD <<'EOF'
genrule(
name = "it",
srcs = ["@ext//x:file.txt"],
outs = ["it.txt"],
cmd = "cp $< $@",
)
EOF
bazel build --experimental_repository_disable_download //:it > "${TEST_log}" 2>&1 \
&& fail "Expected failure" || :
expect_log "Failed to download repo ext: download is disabled"
}
function test_disable_download_should_allow_distdir() {
mkdir x
echo 'exports_files(["file.txt"])' > x/BUILD
echo 'Hello World' > x/file.txt
tar cvf x.tar x
sha256=$(sha256sum x.tar | head -c 64)
mkdir main
cp x.tar main
cd main
cat > WORKSPACE <<EOF
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name="ext",
url = "http://127.0.0.1/x.tar",
sha256="$sha256",
)
EOF
cat > BUILD <<'EOF'
genrule(
name = "it",
srcs = ["@ext//x:file.txt"],
outs = ["it.txt"],
cmd = "cp $< $@",
)
EOF
bazel build --distdir="." --experimental_repository_disable_download //:it || fail "Failed to build"
}
function test_disable_download_should_allow_local_repository() {
mkdir x
echo 'exports_files(["file.txt"])' > x/BUILD
echo 'Hello World' > x/file.txt
touch x/WORKSPACE
mkdir main
cd main
cat > WORKSPACE <<EOF
local_repository(
name="ext",
path="../x",
)
EOF
cat > BUILD <<'EOF'
genrule(
name = "it",
srcs = ["@ext//:file.txt"],
outs = ["it.txt"],
cmd = "cp $< $@",
)
EOF
bazel build --experimental_repository_disable_download //:it || fail "Failed to build"
}
run_suite "local repository tests"
|
func getProgressBarColor(progress: Double) -> String {
if progress >= 0.7 {
return "green"
} else if progress >= 0.4 {
return "yellow"
} else {
return "red"
}
} |
<reponame>sam-baumann/Tempest
#include "texture2d.h"
#include <Tempest/Device>
#include <algorithm>
using namespace Tempest;
Texture2d::Texture2d(Device &, AbstractGraphicsApi::PTexture&& impl, uint32_t w, uint32_t h, TextureFormat frm)
:impl(std::move(impl)),texW(int(w)),texH(int(h)),frm(frm) {
}
Texture2d::Texture2d(Texture2d&& other)
:impl(std::move(other.impl)), texW(other.texW), texH(other.texH), frm(other.frm) {
other.texW = 0;
other.texH = 0;
}
Texture2d::~Texture2d(){
}
Texture2d& Texture2d::operator=(Texture2d&& other) {
std::swap(impl, other.impl);
std::swap(texW, other.texW);
std::swap(texH, other.texH);
std::swap(frm, other.frm);
return *this;
}
uint32_t Texture2d::mipCount() const {
return impl.handler ? impl.handler->mipCount() : 0;
}
|
<filename>test-data/comp-changes/old/src/main/constructorLessAccessible/ConstructorLessAccessiblePackPriv2Priv.java
package main.constructorLessAccessible;
public class ConstructorLessAccessiblePackPriv2Priv {
ConstructorLessAccessiblePackPriv2Priv() {
}
ConstructorLessAccessiblePackPriv2Priv(int p) {
}
}
|
package id.duglegir.jagosholat.ui.tutorial;
import android.os.Bundle;
import androidx.fragment.app.Fragment;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ListView;
import java.util.ArrayList;
import id.duglegir.jagosholat.util.JSONHelper;
import id.duglegir.jagosholat.model.NiatShalat;
import id.duglegir.jagosholat.R;
public class FeatureNiatFragment extends Fragment {
public FeatureNiatFragment() {
// Required empty public constructor
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
// Inflate the layout for this fragment
View rootView = inflater.inflate(R.layout.fragment_feature_tutor_text, container, false);
// -----------------------------------------------------------------------------------------
ArrayList<NiatShalat> arrayWords = JSONHelper.extractNiatShalat();
// -----------------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------------
ListView mListView = rootView.findViewById(R.id.listViewFeature);
NiatShalatAdapter call = new NiatShalatAdapter(getActivity(), arrayWords);
mListView.setAdapter(call);
// -----------------------------------------------------------------------------------------
return rootView;
}
}
|
const express = require('express');
const mysql = require('mysql');
// Create connection
const db = mysql.createConnection({
host : 'localhost',
user : 'root',
password : '',
database : 'mydata'
});
// Connect
db.connect((err) => {
if(err){
throw err;
}
console.log('MySql Connected...');
});
// Create an Express app
const app = express();
// Create table
app.get('/createdb', (req, res) => {
let sql = 'CREATE TABLE mydata(mydataName varchar(255), mydataAddress varchar(255));';
db.query(sql, (err, result) => {
if(err) throw err;
console.log(result);
res.send('Database created...');
});
});
// Insert POST
app.post('/addpost', (req, res) => {
let post = {mydataName: 'John Doe', mydataAddress: 'Highway 71'};
let sql = 'INSERT INTO mydata SET ?';
let query = db.query(sql, post, (err, result) => {
if(err) throw err;
console.log(result);
res.send('Data added to database');
});
});
// Server listening
app.listen('3000', () => {
console.log('Server started on port 3000');
}); |
<filename>src/database/manual/DELETE-manual.js<gh_stars>1-10
const Database = require("../db")
Database.then(async (db) => {
/////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////// INSIRA O ** ID ** /////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////
const id_up = null //insira o id da tabela que deseja DELETAR
if (id_up == null || id_up == undefined) {
console.error("[!!! ERROR !!!] id = " + id_up + ", por favor insira um id no arquivo DELETE-manual.js[7,19] para continuar!") //ERROR! (id)
process.on("SIGINT", () => {
db.close()
})
}
const fosterHomes = await db.all('DELETE FROM fosterHomes WHERE id = ' + id_up)
console.log(fosterHomes) //verifique se foi deletado da tabela
})
process.on("SIGINT", () => {
db.close()
})
/////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////// SÓ EXECUTE EM CASOS **ESPECÍFICOS** //////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////
/***SÓ RODE UMA VEZ***///WARNINGX
/*
$ npm run DELETE-manual
$ "ENTER"
.......
.......
.......
Server started
$ "Ctrl + c"
*/ |
const mdsSdk = require('@maddonkeysoftware/mds-cloud-sdk-node');
const utils = require('../../../lib/utils');
const deleteFunction = async (id) => {
const client = await mdsSdk.getServerlessFunctionsClient();
return client.deleteFunction(id);
};
const handle = (id, env) =>
deleteFunction(id, env)
.then((resp) =>
utils.display(`Function removed successfully. Id: ${resp.id}`),
)
.catch((err) =>
utils.display(
`An error occurred while removing the function. Message: ${err.message}`,
),
);
exports.command = 'delete <id>';
exports.desc = 'Removes the function with the supplied id';
exports.builder = utils.extendBaseCommandBuilder();
exports.handler = (argv) => handle(argv.id, argv.env);
|
#!/bin/bash
set -x
set -eu
set -o pipefail
SOCKET=/var/tmp/mysql.sock
rm -f /tmp/healthy
# Wait for mysql server to be ready.
function serverwait {
for i in {60..0};
do
if mysqladmin ping -uroot --socket=$SOCKET >/dev/null 2>&1; then
return 0
fi
# Test to make sure we got it started in the first place. kill -s 0 just tests to see if process exists.
if ! kill -s 0 $pid 2>/dev/null; then
echo "MariaDB initialization startup failed"
return 2
fi
sleep 1
done
return 1
}
# If we have a restore_snapshot arg, get the snapshot directory
# otherwise, fail and abort startup
if [ $# = "2" -a "${1:-}" = "restore_snapshot" ] ; then
snapshot_dir="/mnt/ddev_config/db_snapshots/${2:-nothingthere}"
if [ -d "$snapshot_dir" ] ; then
echo "Restoring from snapshot directory $snapshot_dir"
# Ugly macOS .DS_Store in this directory can break the restore
find ${snapshot_dir} -name .DS_Store -print0 | xargs rm -f
rm -rf /var/lib/mysql/*
else
echo "$snapshot_dir does not exist, not attempting restore of snapshot"
unset snapshot_dir
exit 101
fi
fi
server_db_version=$(PATH=$PATH:/usr/sbin:/usr/local/bin:/usr/local/mysql/bin mysqld -V 2>/dev/null | awk '{sub( /\.[0-9]+(-.*)?$/, "", $3); print $3 }')
# If we have extra mariadb cnf files,, copy them to where they go.
if [ -d /mnt/ddev_config/mysql -a "$(echo /mnt/ddev_config/mysql/*.cnf)" != "/mnt/ddev_config/mysql/*.cnf" ] ; then
echo "!includedir /mnt/ddev_config/mysql" >/etc/mysql/conf.d/ddev.cnf
fi
export BACKUPTOOL=mariabackup
if command -v xtrabackup; then BACKUPTOOL="xtrabackup"; fi
# If mariadb has not been initialized, copy in the base image from either the default starter image (/mysqlbase)
# or from a provided $snapshot_dir.
if [ ! -f "/var/lib/mysql/db_mariadb_version.txt" ]; then
# If snapshot_dir is not set, this is a normal startup, so
# tell healthcheck to wait by touching /tmp/initializing
if [ -z "${snapshot_dir:-}" ] ; then
touch /tmp/initializing
fi
target=${snapshot_dir:-/mysqlbase/}
name=$(basename $target)
rm -rf /var/lib/mysql/* /var/lib/mysql/.[a-z]*
${BACKUPTOOL} --prepare --skip-innodb-use-native-aio --target-dir "$target" --user=root --password=root --socket=$SOCKET 2>&1 | tee "/var/log/mariabackup_prepare_$name.log"
${BACKUPTOOL} --copy-back --skip-innodb-use-native-aio --force-non-empty-directories --target-dir "$target" --user=root --password=root --socket=$SOCKET 2>&1 | tee "/var/log/mariabackup_copy_back_$name.log"
echo "Database initialized from ${target}"
rm -f /tmp/initializing
fi
database_db_version=$(cat /var/lib/mysql/db_mariadb_version.txt)
if [ "${server_db_version}" != "${database_db_version}" ]; then
echo "Starting with db server version=${server_db_version} but database was created with '${database_db_version}'."
echo "Attempting upgrade, but it may not work, you may need to export your database, 'ddev delete --omit-snapshot', start, and reimport".
PATH=$PATH:/usr/sbin:/usr/local/bin:/usr/local/mysql/bin mysqld --skip-networking --skip-grant-tables --socket=$SOCKET >/tmp/mysqld_temp_startup.log 2>&1 &
pid=$!
set +x
if ! serverwait ; then
echo "Failed to get mysqld running to run mysql_upgrade"
exit 103
fi
set -x
echo "Attempting mysql_upgrade because db server version ${server_db_version} is not the same as database db version ${database_db_version}"
mysql_upgrade --socket=$SOCKET
kill $pid
fi
# And update the server db version we have here.
echo $server_db_version >/var/lib/mysql/db_mariadb_version.txt
cp -r /home/{.my.cnf,.bashrc} ~/
mkdir -p /mnt/ddev-global-cache/{bashhistory,mysqlhistory}/${HOSTNAME} || true
echo
echo 'MySQL init process done. Ready for start up.'
echo
echo "Starting mysqld."
tail -f /var/log/mysqld.log &
exec mysqld --server-id=0
|
function getData(code, message, data) {
return {
code, message, data
}
}
let userLogin = (ctx) => {
let body = ctx.request.body;
let isValid = body.username === 'elys' && body.password === '<PASSWORD>'; // 校验
if(isValid) {
ctx.response.body = getData(0, '登录成功', {
token: 'effective_token'
})
} else {
ctx.response.body = getData(-1, '登录失败', {
token: 'invalid_token'
})
}
}
module.exports = {
userLogin
} |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v8/services/keyword_theme_constant_service.proto
require 'google/ads/googleads/v8/resources/keyword_theme_constant_pb'
require 'google/api/annotations_pb'
require 'google/api/client_pb'
require 'google/api/field_behavior_pb'
require 'google/api/resource_pb'
require 'google/protobuf'
Google::Protobuf::DescriptorPool.generated_pool.build do
add_file("google/ads/googleads/v8/services/keyword_theme_constant_service.proto", :syntax => :proto3) do
add_message "google.ads.googleads.v8.services.GetKeywordThemeConstantRequest" do
optional :resource_name, :string, 1
end
add_message "google.ads.googleads.v8.services.SuggestKeywordThemeConstantsRequest" do
optional :query_text, :string, 1
optional :country_code, :string, 2
optional :language_code, :string, 3
end
add_message "google.ads.googleads.v8.services.SuggestKeywordThemeConstantsResponse" do
repeated :keyword_theme_constants, :message, 1, "google.ads.googleads.v8.resources.KeywordThemeConstant"
end
end
end
module Google
module Ads
module GoogleAds
module V8
module Services
GetKeywordThemeConstantRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.ads.googleads.v8.services.GetKeywordThemeConstantRequest").msgclass
SuggestKeywordThemeConstantsRequest = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.ads.googleads.v8.services.SuggestKeywordThemeConstantsRequest").msgclass
SuggestKeywordThemeConstantsResponse = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("google.ads.googleads.v8.services.SuggestKeywordThemeConstantsResponse").msgclass
end
end
end
end
end
|
#!/bin/bash
curl -i -w '\n' -X POST -H 'Content-type: application/json' \
-d @okapi-enable-users.json http://localhost:9130/_/proxy/tenants/testlib/modules |
<filename>player/src/main/java/fr/unice/polytech/si3/qgl/soyouz/ListLogHandler.java<gh_stars>0
package fr.unice.polytech.si3.qgl.soyouz;
import fr.unice.polytech.si3.qgl.soyouz.classes.utilities.Util;
import java.util.Queue;
import java.util.logging.Handler;
import java.util.logging.Level;
import java.util.logging.LogRecord;
import java.util.logging.SimpleFormatter;
/**
* Handles logs
*/
class ListLogHandler extends Handler
{
private final SimpleFormatter fmt = new SimpleFormatter();
private final Queue<String> logList;
public ListLogHandler(Queue<String> logList)
{
this.logList = logList;
Util.currentLogLevel = Level.CONFIG;
}
@Override
public void publish(LogRecord record)
{
var str = fmt.format(record);
if (record.getLevel().intValue() < Util.currentLogLevel.intValue())
{
return;
}
System.out.print(str);
logList.add(str);
}
@Override
public void flush()
{
//Not necessary to implement but you know.. override..
}
@Override
public void close()
{
//Not necessary to implement but you know.. override..
}
}
|
#!/usr/bin/env bash
set -eu
set -o pipefail
ee() {
echo "+ $*"
eval "$@"
}
cd "$(dirname "$0")/.."
if [ ! -f .git/hooks/pre-commit ]; then
ee ln -s ../../githooks/pre-commit.sh .git/hooks/pre-commit
fi
ee chmod a+x githooks/*
|
touch ../../application.wsgi |
import { Component, OnInit, ViewChild, ElementRef, NgZone } from '@angular/core';
import { AgmMap, GoogleMapsAPIWrapper, MapsAPILoader } from '@agm/core';
import { Loc } from '../models/location';
import { HttpClient, HttpHeaders } from '@angular/common/http';
declare var google: any;
interface Marker {
lat:number;
lng: number;
label?: string;
draggable: boolean;
}
interface Location {
lat:number;
lng: number;
viewport?: Object;
zoom: number;
address_level_1?: string;
address_level_2?: string;
address_country?: string;
address_zip?: string;
address_state?: string;
marker?: Marker;
}
@Component({
selector: 'app-geocoding',
templateUrl: './geocoding.component.html',
styleUrls: ['./geocoding.component.css']
})
export class GeocodingComponent implements OnInit {
geocoder: any;
baseurl: string = "http://ec2-35-175-212-202.compute-1.amazonaws.com:9095/locations/"
testlocs: Loc[] = [ ]
public location:Location = {
lat: 40.70735,
lng: -100.21625,
marker: {
lat: 40.70735,
lng: -100.21625,
draggable: false
},
zoom: 5
}
title: string = 'AGM project';
latitude: number;
longitude: number;
zoom: number;
address: string;
@ViewChild('search')
public searchElementRef: ElementRef;
@ViewChild(AgmMap) map: AgmMap;
constructor(public mapsApiLoader: MapsAPILoader,
private zone: NgZone,
private wrapper: GoogleMapsAPIWrapper,
private http: HttpClient) {
this.mapsApiLoader = mapsApiLoader;
this.zone = zone;
this. wrapper = wrapper;
this.mapsApiLoader.load().then(() => {
this.geocoder = new google.maps.Geocoder();
})
}
httpOptions = {headers: new HttpHeaders({'Content-Type':'application/json'})}
getLocations() {
this.http.get<Loc[]>(this.baseurl).subscribe(locations => {
this.testlocs = locations
})
}
ngOnInit() {
//load Places Autocomplete
this.mapsApiLoader.load()
this.setCurrentLocation();
this.getLocations();
}
//Get Current Location Coordinates
private setCurrentLocation() {
if ('geolocation' in navigator) {
navigator.geolocation.getCurrentPosition((position) => {
this.latitude = this.location.lat;
this.longitude = this.location.lng;
this.zoom = 12;
});
}
}
findLocation(address) {
if (!this.geocoder) this.geocoder = new google.maps.Geocoder()
this.geocoder.geocode({
'address': address
}, (results, status) => {
console.log(results);
if (status == google.maps.GeocoderStatus.OK) {
for (var i = 0; i < results[0].address_components.length; i++) {
let types = results[0].address_components[i].types
if (types.indexOf('locality') != -1) {
this.location.address_level_2 = results[0].address_components[i].long_name
}
if (types.indexOf('country') != -1) {
this.location.address_country = results[0].address_components[i].long_name
}
if (types.indexOf('postal_code') != -1) {
this.location.address_zip = results[0].address_components[i].long_name
}
if (types.indexOf('administrative_area_level_1') != -1) {
this.location.address_state = results[0].address_components[i].long_name
}
}
if (results[0].geometry.location) {
this.location.lat = results[0].geometry.location.lat();
console.log(this.location.lat)
this.location.lng = results[0].geometry.location.lng();
this.location.marker.lat = results[0].geometry.location.lat();
this.location.marker.lng = results[0].geometry.location.lng();
this.location.marker.draggable = false;
this.location.viewport = results[0].geometry.viewport;
this.latitude = results[0].geometry.location.lat();
this.longitude = results[0].geometry.location.lng();
}
this.map.triggerResize()
} else {
alert("Sorry, this search produced no results.");
}
})
}
updateClick(loc: Loc) {
this.findLocation(loc.city + ", " + loc.state);
}
initMap() {
}
} |
#!/usr/bin/env bash
# ecs 139.159.161.176
# install git and clone devstack repo
yum install git
git clone https://github.com/edx/devstack.git
# overlay2 is tested and supported
docker info | grep -i 'storage driver'
# Install the requirements inside of a Python virtualenv.
make requirements
# pull the latest images
make pull
# customize the local repositories
make dev.clone
make dev.status
# Validate the devstack configuration
make validate
# Stop all services
make stop
# Remove all service containers and networks
make down
# Provision dev environment with all services stopped
make dev.provision
# start all of the devstack containers
make dev.up
# To see logs from containers
make logs
# To view the logs of a specific service container
make ecommerce-logs
# Remove all devstack-related containers, networks, and volumes
make destroy
# For information on all the available make commands, you can run:
make help
|
<filename>website/apps/monster/tests.py
from canvas.tests.tests_helpers import (CanvasTestCase, create_content, create_user, create_group, create_comment,
create_staff)
from services import Services, override_service
class TestApi(CanvasTestCase):
def test_no_completed_mobile_monsters(self):
self.assertFalse(self.api_post('/api/monster/all_completed_mobile_monsters')['bottoms'])
|
<filename>src/components/Keuntungan.js
import React from 'react'
import "../styles/keuntunga.scss"
function Keuntungan() {
return (
<div className="kContainer">
<div className="contain c1">
</div>
<div className="contain c2">
</div>
<div className="contain c3">
</div>
<div className="contain c4">
</div>
</div>
)
}
export default Keuntungan
|
function validateUserRegistrationForm($formData) {
$errors = [];
// Validate username
if (empty($formData['username'])) {
$errors['username'] = 'Username is required';
} elseif (!preg_match('/^\w{3,20}$/', $formData['username'])) {
$errors['username'] = 'Username should contain only alphanumeric characters and underscores, and be between 3 and 20 characters long';
}
// Validate email
if (empty($formData['email'])) {
$errors['email'] = 'Email is required';
} elseif (!filter_var($formData['email'], FILTER_VALIDATE_EMAIL)) {
$errors['email'] = 'Invalid email format';
}
// Validate password
if (empty($formData['password'])) {
$errors['password'] = 'Password is required';
} elseif (strlen($formData['password']) < 8 || !preg_match('/[A-Z]/', $formData['password']) || !preg_match('/[a-z]/', $formData['password']) || !preg_match('/\d/', $formData['password'])) {
$errors['password'] = 'Password should be at least 8 characters long and contain at least one uppercase letter, one lowercase letter, and one digit';
}
return $errors;
} |
<reponame>smagill/opensphere-desktop<gh_stars>10-100
package io.opensphere.core.quantify;
import java.util.Set;
import javafx.beans.property.BooleanProperty;
/**
* An interface defining a service that collects and periodically sends metrics
* to a remote location.
*/
public interface QuantifyService
{
/**
* Collects an instance of the metric identified by the key.
*
* @param key the metric key for which to collect the metric.
*/
void collectMetric(String key);
/**
* The property used to maintain the enabled state of the plugin.
*
* @return the property in which the enabled state is tracked.
*/
BooleanProperty enabledProperty();
/**
* Flushes, sends all collected metrics through the sender to the remote
* collector. Also resets the internal metric store to empty so that metrics
* will not be counted twice.
*/
void flush();
/**
* Gets the set of senders to which metrics will be sent.
*
* @return the set of senders to which metrics will be sent.
*/
Set<QuantifySender> getSenders();
/** Terminates the service. */
void close();
}
|
<reponame>longwind09/sampling
package org.felix.ml.sampling.filter.combo;
import org.felix.ml.sampling.FilterContext;
import org.felix.ml.sampling.IFilter;
import org.felix.ml.sampling.SamplePackage;
import org.felix.ml.sampling.ScoreResult;
import org.felix.ml.sampling.exception.ConfigException;
import org.felix.ml.sampling.exception.FilterDependenceException;
import org.felix.ml.sampling.exception.FilterException;
import org.felix.ml.sampling.filter.BaseFilter;
import org.felix.ml.sampling.filter.FilterFactory;
import org.felix.ml.sampling.util.QueryUtil;
import org.apache.commons.lang.BooleanUtils;
import org.apache.commons.lang.StringUtils;
import java.util.ArrayList;
import java.util.List;
import java.util.SortedSet;
import java.util.TreeSet;
/**
*
* */
public class AndFilter extends BaseFilter {
private boolean isShort = true;
private List<IFilter> filters = new ArrayList<IFilter>();
public List<IFilter> getFilters() {
return filters;
}
public void setFilters(List<IFilter> filters) {
this.filters = filters;
}
public void setShort(boolean isShort) {
this.isShort = isShort;
}
public void init(String param) throws ConfigException {
String[] filterNames = QueryUtil.parseUrlQueryString(param).get("filters");
if (StringUtils.isEmpty(param) || filterNames == null || filterNames.length != 1)
throw new ConfigException(
String.format("wrong config for %s,param:%s", getClass().getSimpleName(), param));
String[] items = filterNames[0].split(",");
for (String item : items) {
IFilter filter = FilterFactory.getFilter(StringUtils.trim(item));
if (filter == null)
throw new FilterDependenceException(
String.format("Init %s filter error, param:%s", getClass().getSimpleName(), param));
filters.add(filter);
}
if (filters.size() < 2)
throw new ConfigException(String.format("wrong config for %s,param:%s", getClass().getSimpleName(), param));
String[] shortArr = QueryUtil.parseUrlQueryString(param).get("short");
if (shortArr != null && shortArr.length == 1) {
isShort = BooleanUtils.toBoolean(shortArr[0]);
}
super.init(param);
}
public List<Integer> doFilter(SamplePackage spackage, ScoreResult scoreResult, FilterContext context)
throws FilterException {
if (isShort)
return doShort(spackage, scoreResult, context);
else
return doFull(spackage, scoreResult, context);
}
private List<Integer> doShort(SamplePackage spackage, ScoreResult scoreResult, FilterContext context)
throws FilterException {
List<Integer> bakList = spackage.cloneFilterBefore();
List<Integer> ret = new ArrayList<Integer>();
for (IFilter filter : filters) {
List<Integer> filterResult = filter.filter(spackage, scoreResult, context);
ret = filterResult;
spackage.setFilterBefore(filterResult);
}
spackage.setFilterBefore(bakList);
return ret;
}
private List<Integer> doFull(SamplePackage spackage, ScoreResult scoreResult, FilterContext context)
throws FilterException {
List<List<Integer>> all = new ArrayList<List<Integer>>();
List<Integer> orderList = spackage.cloneFilterBefore();
for (IFilter filter : filters) {
List<Integer> flterResult = filter.filter(spackage, scoreResult, context);
all.add(flterResult);
}
return merge(all, orderList);
}
protected List<Integer> merge(List<List<Integer>> all, List<Integer> orderList) {
List<Integer> ret = new ArrayList<Integer>();
if (all.size() == 0) return ret;
List<Integer> firstList = all.get(0);
SortedSet<Integer> set = new TreeSet<Integer>();
outer:
for (Integer i : firstList) {
for (int j = 1; j < all.size(); j++) {
if (!all.get(j).contains(i)) {
continue outer;
}
}
set.add(i);
}
for (Integer i : orderList)
if (set.contains(i)) ret.add(i);
return ret;
}
public String toString(String prefix) {
StringBuilder sb = new StringBuilder();
sb.append(super.toString(prefix));
sb.append(prefix + "start print sub filters:\n");
for (IFilter filter : filters) {
sb.append(filter.toString(prefix + "\t"));
}
sb.append(prefix + "end print sub filters:\n");
return sb.toString();
}
}
|
package ctag;
import ctag.exception.CTagInvalidException;
import ctag.exception.EndException;
import ctag.exception.NegativeLengthException;
import ctag.tags.*;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
/**
* Decodes a ctag code
* @since 1.0
*/
public class CTagDecoder {
private CTagInput input;
/**
* Decodes from a {@link CTagInput} stream
* @param input The {@link CTagInput}
* @since 1.0
*/
public CTagDecoder( CTagInput input ) {
this.input = input;
}
/**
* Decodes from an {@link InputStream}
* @param stream The {@link InputStream}
* @since 1.0
*/
public CTagDecoder( InputStream stream ) {
input = new CTagInput( stream );
}
/**
* Decodes from a base 64 string
* @param string The base 64 string
* @since 1.1
*/
public CTagDecoder( String string ) throws IOException {
input = new CTagInput( new Base64InputStream( new ByteArrayInputStream( string.getBytes() ) ) );
}
/**
* Decodes from a byte array
* @param bytes The byte array
* @since 1.0
*/
public CTagDecoder( byte[] bytes ) {
input = new CTagInput( new ByteArrayInputStream( bytes ) );
}
/**
* Decodes from a {@link Binary}
* @param bytes The {@link Binary}
* @since 1.0
*/
public CTagDecoder( Binary bytes ) {
input = new CTagInput( new ByteArrayInputStream( bytes.getBytes() ) );
}
/**
* Decodes the CTag code
* @return The {@link ITag} this CTag code stores
* @exception IOException If {@link CTagInput}'s underlying input stream
* throws an {@link IOException}.
* @exception CTagInvalidException If the CTag code is invalid.
* @since 1.0
*/
public ITag decode() throws IOException, CTagInvalidException, EndException, NegativeLengthException {
Binary type = input.read( 1 );
byte typeByte = type.getByte( 0 );
if( typeByte == 0 ) {
throw new CTagInvalidException( "Found TagEnd as main type. TagEnd should not be used." );
} else if( typeByte == 1 ) {
return TagByte.parse( input );
} else if( typeByte == 2 ) {
return TagShort.parse( input );
} else if( typeByte == 3 ) {
return TagInteger.parse( input );
} else if( typeByte == 4 ) {
return TagLong.parse( input );
} else if( typeByte == 5 ) {
return TagFloat.parse( input );
} else if( typeByte == 6 ) {
return TagDouble.parse( input );
} else if( typeByte == 7 ) {
return TagString.parse( input );
} else if( typeByte == 8 ) {
return TagArray.parse( input );
} else if( typeByte == 9 ) {
return TagCompound.parse( input );
} else if( typeByte == 10 ) {
return TagNull.parse( input );
} else if( typeByte == 11 ) {
return TagBoolean.parse( input );
} else if( typeByte == 12 ) {
return TagByteArray.parse( input );
} else if( typeByte == 13 ) {
return TagShortArray.parse( input );
} else if( typeByte == 14 ) {
return TagIntegerArray.parse( input );
} else if( typeByte == 15 ) {
return TagLongArray.parse( input );
} else if( typeByte == 16 ) {
return TagFloatArray.parse( input );
} else if( typeByte == 17 ) {
return TagDoubleArray.parse( input );
} else if( typeByte == 18 ) {
return TagBooleanArray.parse( input );
} else if( typeByte == 19 ) {
return TagStringArray.parse( input );
} else {
throw new CTagInvalidException( "Found invalid prefix: '" + type + "'." );
}
}
}
|
<filename>lib/jsonpath.rb<gh_stars>0
# frozen_string_literal: true
require 'strscan'
require 'multi_json'
require 'jsonpath/proxy'
require 'jsonpath/dig'
require 'jsonpath/enumerable'
require 'jsonpath/version'
require 'jsonpath/parser'
# JsonPath: initializes the class with a given JsonPath and parses that path
# into a token array.
class JsonPath
PATH_ALL = '$..*'
DEFAULT_OPTIONS = {
:default_path_leaf_to_null => false,
:symbolize_keys => false,
:use_symbols => false,
:allow_send => true
}
attr_accessor :path
def initialize(path, opts = {})
@opts = DEFAULT_OPTIONS.merge(opts)
scanner = StringScanner.new(path.strip)
@path = []
until scanner.eos?
if (token = scanner.scan(/\$\B|@\B|\*|\.\./))
@path << token
elsif (token = scanner.scan(/[$@a-zA-Z0-9:{}_-]+/))
@path << "['#{token}']"
elsif (token = scanner.scan(/'(.*?)'/))
@path << "[#{token}]"
elsif (token = scanner.scan(/\[/))
@path << find_matching_brackets(token, scanner)
elsif (token = scanner.scan(/\]/))
raise ArgumentError, 'unmatched closing bracket'
elsif (token = scanner.scan(/\(.*\)/))
@path << token
elsif scanner.scan(/\./)
nil
elsif (token = scanner.scan(/[><=] \d+/))
@path.last << token
elsif (token = scanner.scan(/./))
begin
@path.last << token
rescue RuntimeError
raise ArgumentError, "character '#{token}' not supported in query"
end
end
end
end
def find_matching_brackets(token, scanner)
count = 1
until count.zero?
if (t = scanner.scan(/\[/))
token << t
count += 1
elsif (t = scanner.scan(/\]/))
token << t
count -= 1
elsif (t = scanner.scan(/[^\[\]]+/))
token << t
elsif scanner.eos?
raise ArgumentError, 'unclosed bracket'
end
end
token
end
def join(join_path)
res = deep_clone
res.path += JsonPath.new(join_path).path
res
end
def on(obj_or_str, opts = {})
a = enum_on(obj_or_str).to_a
if opts[:symbolize_keys]
a.map! do |e|
e.each_with_object({}) { |(k, v), memo| memo[k.to_sym] = v; }
end
end
a
end
def self.fetch_all_path(obj)
all_paths = ['$']
find_path(obj, '$', all_paths, obj.class == Array)
return all_paths
end
def self.find_path(obj, root_key, all_paths, is_array = false)
obj.each do |key, value|
table_params = { key: key, root_key: root_key}
is_loop = value.class == Array || value.class == Hash
if is_loop
path_exp = construct_path(table_params)
all_paths << path_exp
find_path(value, path_exp, all_paths, value.class == Array)
elsif is_array
table_params[:index] = obj.find_index(key)
path_exp = construct_path(table_params)
find_path(key, path_exp, all_paths, key.class == Array) if key.class == Hash || key.class == Array
all_paths << path_exp
else
all_paths << construct_path(table_params)
end
end
end
def self.construct_path(table_row)
if table_row[:index]
return table_row[:root_key] + '['+ table_row[:index].to_s + ']'
else
return table_row[:root_key] + '.'+ table_row[:key]
end
end
def first(obj_or_str, *args)
enum_on(obj_or_str).first(*args)
end
def enum_on(obj_or_str, mode = nil)
JsonPath::Enumerable.new(self, self.class.process_object(obj_or_str), mode,
@opts)
end
alias [] enum_on
def self.on(obj_or_str, path, opts = {})
new(path, opts).on(process_object(obj_or_str))
end
def self.for(obj_or_str)
Proxy.new(process_object(obj_or_str))
end
private
def self.process_object(obj_or_str)
obj_or_str.is_a?(String) ? MultiJson.decode(obj_or_str) : obj_or_str
end
def deep_clone
Marshal.load Marshal.dump(self)
end
end
|
package study.business.application.jobs.person.listener;
import javax.batch.api.chunk.listener.RetryWriteListener;
import javax.enterprise.context.Dependent;
import javax.inject.Named;
import java.util.List;
@Dependent
@Named("RetryWriteListenerImpl")
public class RetryWriteListenerImpl implements RetryWriteListener {
@Override
public void onRetryWriteException(List<Object> items, Exception ex) throws Exception {
}
}
|
<gh_stars>1-10
#include "cglass/species_base.hpp"
SpeciesBase::SpeciesBase(unsigned long seed) : rng_(seed) {}
void SpeciesBase::SetSID(species_id sid) { sid_ = sid; }
/* Static functions*/
void SpeciesBase::SetParams(system_parameters *params) { params_ = params; }
void SpeciesBase::SetSpace(SpaceBase *space) { space_ = space; }
const SpaceBase *SpeciesBase::space_ = nullptr;
system_parameters *SpeciesBase::params_ = nullptr;;
void SpeciesBase::InitPositFile(std::string run_name) {
std::string sid_str = sid_._to_string();
std::string posit_file_name =
run_name + "_" + sid_str + "_" + GetSpeciesName() + ".posit";
oposit_file_.open(posit_file_name, std::ios::out | std::ios::binary);
if (!oposit_file_.is_open()) {
Logger::Error("Output file %s did not open", posit_file_name.c_str());
}
int n_posit = GetNPosit();
int n_steps = params_->n_steps;
double delta = params_->delta;
oposit_file_.write(reinterpret_cast<char *>(&n_steps), sizeof(int));
oposit_file_.write(reinterpret_cast<char *>(&n_posit), sizeof(int));
oposit_file_.write(reinterpret_cast<char *>(&delta), sizeof(double));
}
void SpeciesBase::InitPositFileInput(std::string run_name) {
std::string sid_str = sid_._to_string();
std::string posit_file_name =
run_name + "_" + sid_str + "_" + GetSpeciesName() + ".posit";
iposit_file_.open(posit_file_name, std::ios::in | std::ios::binary);
if (!iposit_file_.is_open()) {
Logger::Error("Input file %s did not open", posit_file_name.c_str());
}
// long n_steps;
int n_posit, n_steps;
double delta;
iposit_file_.read(reinterpret_cast<char *>(&n_steps), sizeof(int));
iposit_file_.read(reinterpret_cast<char *>(&n_posit), sizeof(int));
iposit_file_.read(reinterpret_cast<char *>(&delta), sizeof(double));
if (n_steps != params_->n_steps || n_posit != GetNPosit() ||
delta != params_->delta) {
Logger::Warning("Input file %s does not match parameter file: "
"n_steps: %d %d, "
"n_spec: %d %d, "
"delta: %2.2f %2.2f ",
posit_file_name.c_str(), n_steps, params_->n_steps, n_posit,
GetNPosit(), delta, params_->delta);
}
ReadPosits();
}
void SpeciesBase::InitSpecFile(std::string run_name) {
std::string sid_str = sid_._to_string();
std::string spec_file_name =
run_name + "_" + sid_str + "_" + GetSpeciesName() + ".spec";
Logger::Trace("Initializing spec file %s", spec_file_name.c_str());
ospec_file_.open(spec_file_name, std::ios::out | std::ios::binary);
if (!ospec_file_.is_open()) {
Logger::Error("Output file %s did not open", spec_file_name.c_str());
}
int n_spec = GetNSpec();
int n_steps = params_->n_steps;
double delta = params_->delta;
ospec_file_.write(reinterpret_cast<char *>(&n_steps), sizeof(int));
ospec_file_.write(reinterpret_cast<char *>(&n_spec), sizeof(int));
ospec_file_.write(reinterpret_cast<char *>(&delta), sizeof(double));
}
bool SpeciesBase::HandleEOF() {
if (++spec_file_iterator_) {
std::ostringstream file_name, nload;
std::string sid_str = sid_._to_string();
file_name << params_->run_name;
nload << std::setw(3) << std::setfill('0') << spec_file_iterator_;
size_t pos;
if ((pos = file_name.str().find("reload")) == std::string::npos) {
// This is not a reload file currently
if (params_->reduced) {
/* The file is either a reduced file, or we are currently reducing */
if ((pos = file_name.str().find("_reduced")) == std::string::npos) {
/* we are currently reducing, so input file does not have reduce in
* name */
file_name << "_reload" << nload.str();
} else {
if (!params_->reload_reduce_switch) {
/* need to (probably) prefix with reload, assuming the reduction
came after the reload (most typical case) */
file_name.seekp(pos);
file_name << "_reload" << nload.str();
file_name << "_reduced" << params_->reduced;
} else {
file_name << "_reload" << nload.str();
}
}
} else {
file_name << "_reload" << nload.str();
}
} else {
// The file is currently a reload file, simply seek to beginning of
// substring "reload"
file_name.seekp(pos);
file_name << "_reload" << nload.str();
if (params_->reduced) {
file_name << "_reduced" << params_->reduced;
}
}
file_name << "_" << sid_str << "_" << GetSpeciesName() << ".spec";
if (ispec_file_.is_open()) {
ispec_file_.close();
}
Logger::Info("Switching to new spec file, %s", file_name.str().c_str());
return InitSpecFileInputFromFile(file_name.str(), false);
} else {
return false;
}
}
bool SpeciesBase::InitSpecFileInputFromFile(std::string spec_file_name, bool convert) {
ispec_file_.open(spec_file_name, std::ios::in | std::ios::binary);
if (!ispec_file_.is_open()) {
return false;
}
// long n_steps;
int n_spec, n_steps;
double delta;
ispec_file_.read(reinterpret_cast<char *>(&n_steps), sizeof(int));
ispec_file_.read(reinterpret_cast<char *>(&n_spec), sizeof(int));
ispec_file_.read(reinterpret_cast<char *>(&delta), sizeof(double));
if (n_steps != params_->n_steps || n_spec != GetNSpec() ||
delta != params_->delta) {
Logger::Warning("Input file %s does not match parameter file: "
"n_steps: %d %d, "
"n_spec: %d %d, "
"delta: %2.2f %2.2f ",
spec_file_name.c_str(), n_steps, params_->n_steps, n_spec,
GetNSpec(), delta, params_->delta);
}
if (!convert) ReadSpecs();
return true;
}
void SpeciesBase::InitConvertSpecFile(std::string run_name) {
InitSpecFileInput(run_name, true);
std::string sid_str = sid_._to_string();
std::string text_file_name =
run_name + "_" + sid_str + "_" + GetSpeciesName() + "Spec.txt";
if (!spec_valid_) return; // Spec file did not open
ospec_text_file_.open(text_file_name, std::ios::out);
ospec_text_file_ << "n_steps n_spec delta" << std::endl;
ospec_text_file_ << params_->n_steps << " " << GetNSpec();
ospec_text_file_ << " " << params_->delta << std::endl;
}
void SpeciesBase::InitSpecFileInput(std::string run_name, bool convert) {
std::string sid_str = sid_._to_string();
std::string spec_file_name =
run_name + "_" + sid_str + "_" + GetSpeciesName() + ".spec";
if (!InitSpecFileInputFromFile(spec_file_name, convert)) {
if (convert) {
Logger::Warning("Input file %s did not open", spec_file_name.c_str());
// Delete converted text file and do not continue reading this spec file.
spec_valid_ = false;
} else {
Logger::Error("Input file %s did not open", spec_file_name.c_str());
}
}
}
void SpeciesBase::InitOutputFiles(std::string run_name) {
Logger::Trace("Initializing output files for %s %s", sid_._to_string(),
GetSpeciesName().c_str());
if (GetPositFlag())
InitPositFile(run_name);
if (GetSpecFlag())
InitSpecFile(run_name);
if (GetCheckpointFlag())
InitCheckpoints(run_name);
}
void SpeciesBase::InitConvertFiles(std::string run_name) {
Logger::Trace("Initializing conversion input/output files for %s %s",
sid_._to_string(), GetSpeciesName().c_str());
if (GetSpecFlag())
InitConvertSpecFile(run_name);
}
void SpeciesBase::InitCheckpoints(std::string run_name) {
std::string sid_str = sid_._to_string();
checkpoint_file_ =
run_name + "_" + sid_str + "_" + GetSpeciesName() + ".checkpoint";
}
void SpeciesBase::LoadFromCheckpoints(std::string run_name,
std::string checkpoint_run_name) {
std::string sid_str = sid_._to_string();
checkpoint_file_ = checkpoint_run_name + "_" + sid_str + "_" +
GetSpeciesName() + ".checkpoint";
Logger::Trace("Loading %s %s from checkpoint file %s", sid_._to_string(),
GetSpeciesName().c_str(), checkpoint_file_.c_str());
if (!GetCheckpointFlag()) {
Logger::Error("Checkpoint file %s not available for parameter file!",
checkpoint_file_.c_str());
}
ReadCheckpoints();
}
void SpeciesBase::InitInputFiles(std::string run_name, bool posits_only,
bool with_reloads) {
if (posits_only && GetPositFlag()) {
InitPositFileInput(run_name);
} else if (GetSpecFlag() && with_reloads) {
spec_file_iterator_ = 0;
InitSpecFileInput(run_name, false);
} else if (GetSpecFlag()) {
InitSpecFileInput(run_name, false);
}
}
void SpeciesBase::CloseFiles() {
Logger::Trace("Closing output files for %s %s", sid_._to_string(),
GetSpeciesName().c_str());
if (oposit_file_.is_open())
oposit_file_.close();
if (iposit_file_.is_open())
iposit_file_.close();
if (ospec_file_.is_open())
ospec_file_.close();
if (ispec_file_.is_open())
ispec_file_.close();
if (ospec_text_file_.is_open())
ospec_file_.close();
// FinalizeAnalysis();
}
|
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
elif [ -L "${binary}" ]; then
echo "Destination binary is symlinked..."
dirname="$(dirname "${binary}")"
binary="${dirname}/$(readlink "${binary}")"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u)
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
warn_missing_arch=${2:-true}
if [ -r "$source" ]; then
# Copy the dSYM into the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .dSYM "$source")"
binary_name="$(ls "$source/Contents/Resources/DWARF")"
binary="${DERIVED_FILES_DIR}/${basename}.dSYM/Contents/Resources/DWARF/${binary_name}"
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then
strip_invalid_archs "$binary" "$warn_missing_arch"
fi
if [[ $STRIP_BINARY_RETVAL == 1 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.dSYM"
fi
fi
}
# Copies the bcsymbolmap files of a vendored framework
install_bcsymbolmap() {
local bcsymbolmap_path="$1"
local destination="${BUILT_PRODUCTS_DIR}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}"
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identity
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
warn_missing_arch=${2:-true}
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
if [[ "$warn_missing_arch" == "true" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
fi
STRIP_BINARY_RETVAL=0
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary"
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=1
}
install_artifact() {
artifact="$1"
base="$(basename "$artifact")"
case $base in
*.framework)
install_framework "$artifact"
;;
*.dSYM)
# Suppress arch warnings since XCFrameworks will include many dSYM files
install_dsym "$artifact" "false"
;;
*.bcsymbolmap)
install_bcsymbolmap "$artifact"
;;
*)
echo "error: Unrecognized artifact "$artifact""
;;
esac
}
copy_artifacts() {
file_list="$1"
while read artifact; do
install_artifact "$artifact"
done <$file_list
}
ARTIFACT_LIST_FILE="${BUILT_PRODUCTS_DIR}/cocoapods-artifacts-${CONFIGURATION}.txt"
if [ -r "${ARTIFACT_LIST_FILE}" ]; then
copy_artifacts "${ARTIFACT_LIST_FILE}"
fi
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/WJLTestTrunk/WJLTestTrunk.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/WJLTestTrunk/WJLTestTrunk.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
import React from "react"
import Responsive from "react-responsive"
import Img from "gatsby-image"
import { useStaticQuery, graphql, Link } from "gatsby"
import OutlineBtn from "./outlineBtn"
const WindowsPortfolio = () => {
const Desktop = props => <Responsive {...props} minWidth={760} />;
const Mobile = props => <Responsive {...props} maxWidth={759} />;
const data = useStaticQuery(graphql`
query {
windowwhite: file(absolutePath: { regex: "/logo-white.png/" }) {
childImageSharp {
fixed(width: 225, quality: 100) {
...GatsbyImageSharpFixed
}
}
}
replacementdisplay: file(absolutePath: { regex: "/replacement-windows-display.png/" }) {
childImageSharp {
fluid(maxHeight: 1000, quality: 100) {
...GatsbyImageSharpFluid
...GatsbyImageSharpFluidLimitPresentationSize
}
}
}
}
`)
return (
<div>
<Desktop>
<div className="numberSection" style={{
backgroundColor: '#71808E',
padding: '150px 0',
color: '#ffffff',
}}>
<div className="container">
<div style={{
display: 'flex',
alignItems: 'center'
}}>
<div style={{
width: '40%'
}}>
<Img fixed={data.windowwhite.childImageSharp.fixed} style={{ padding: 0, margin: 0 }} />
<h2 style={{
fontSize: '1.5rem',
padding: '50px 0 25px 0',
margin: '0'
}}>Windows for Every Budget</h2>
<hr style={{
border: '2px solid #ffffff',
width: '50px',
padding: '0',
margin: '0',
}} />
<p style={{
lineHeight: '1.7',
paddingTop: '25px',
margin: '0'
}}>Receive new windows with no long demo, no pushy salesman and no huge markeup. No matter your financial situation, they have a pricing structure that works for you!</p>
{/* <Link to={'https://replacementwindowsonline.com/'}>
<OutlineBtn buttonText="Learn More" />
</Link> */}
</div>
<div style={{
width: '60%'
}}>
<Img fluid={data.replacementdisplay.childImageSharp.fluid} style={{ padding: 0, margin: 0 }} />
</div>
</div>
</div>
</div>
</Desktop>
<Mobile>
<div className="numberSection" style={{
display: 'flex',
justifyContent: 'center',
alignItems: 'center',
textAlign: 'center',
backgroundColor: '#091841',
padding: '150px 0',
margin: '0',
color: '#ffffff',
}}>
<div className="container">
<Img fluid={data.replacementdisplay.childImageSharp.fluid} style={{ padding: 0, margin: '0 0 50px 0' }} />
<Img fixed={data.windowwhite.childImageSharp.fixed} style={{ padding: 0, margin: 0 }} />
<h2 style={{
fontSize: '1.5rem',
padding: '50px 0 25px 0',
margin: '0'
}}>Windows for Every Budget</h2>
<hr style={{
border: '2px solid #ffffff',
width: '50px',
padding: '0',
margin: '0 auto',
}} />
<p style={{
lineHeight: '1.7',
paddingTop: '25px',
margin: '0'
}}>Receive new windows with no long demo, no pushy salesman and no huge markeup. No matter your financial situation, they have a pricing structure that works for you!</p>
</div>
</div>
</Mobile>
</div>
)
}
export default WindowsPortfolio |
#!/bin/bash
for i in *; do
mv "$i" "`echo $i | sed "s/\([a-zA-Z0-9_]*\)-.*/\1\.a/"`";
done
|
#!/bin/bash
cd macos
mkdir build
docker build -t scrypta:macos .
docker run -d --name macos -p 42222:42222 -p 42223:42223 scrypta:macos
docker cp macos:/opt/scrypta/src/lyrad ./build/lyrad
docker cp macos:/opt/scrypta/src/lyra-cli ./build/lyra-cli
docker cp macos:/opt/scrypta/src/qt/lyra-qt ./build/lyra-qt
docker cp macos:/opt/scrypta/Scrypta-Core.dmg ./build/Scrypta-Core.dmg |
#pragma once
#include <memory>
#include <SFML/Graphics.hpp>
#include <SFML/System.hpp>
#include <SFML/Window.hpp>
#include <string>
namespace komodo::core
{
class Game
{
public:
#pragma region Constructors
Game();
#pragma endregion
~Game();
#pragma region Static Members
//static ContentManager contentManager;
#pragma endregion
#pragma region Accessors
//weak_ptr<BehaviorSystem> getBehaviorSystem() const;
//weak_ptr<CameraSystem> getCameraSystem() const;
//weak_ptr<Shader> getDefaultSpriteShader() const;
float getFramesPerSecond() const;
//weak_ptr<GraphicsManager> getGraphicsManager() const;
bool getIsActive() const;
//vector<PhysicsSystem> getPhysicsSystem() const;
//vector<Render2DSystem> getRender2DSystem() const;
//vector<Render3DSystem> getRender3DSystem() const;
std::string getScreenDeviceName() const;
//weak_ptr<SoundSystem> getSoundSystem() const;
std::string getTitle() const;
#pragma endregion
#pragma region Mutators
//void setDefaultSpriteShader(Shader value);
void setIsActive(bool value);
void setTitle(std::string value);
#pragma endregion
#pragma region Member Methods
//std::shared_ptr<PhysicsSystem> createPhysicsSystem();
//std::shared_ptr<Render2DSystem> createRender2DSystem();
//std::shared_ptr<Render3DSystem> createRender3DSystem();
void draw(float dt, sf::Color clearColor = sf::Color(0.0f, 100.0f, 100.0f));
void exit();
void initialize();
void run();
void update(float dt);
#pragma region Event handlers
//int addExitingEvent(const std::shared_ptr<function<bool(ExitEventArgs args)>> handler);
//int addFocusGainedEvent(const std::shared_ptr<function<bool(FoxusGainedArgs args)>> handler);
//int addFocusLostEvent(const std::shared_ptr<function<bool(FocusLostArgs args)>> handler);
//int addKeyDownEvent(const std::shared_ptr<function<bool(KeyDownEventArgs args)>> handler);
//int addScreenDeviceNameChangedEvent(const std::shared_ptr<function<bool(KeyUpArgs args)>> handler);
//int addTextInputEvent(const std::shared_ptr<function<bool(TextInputEventArgs args)>> handler);
//int addWindowSizeChangedEvent(const std::shared_ptr<function<bool(WindowSizeChangedEventArgs args)>> handler);
//bool removeExitingEvent(const int handlerId);
//bool removeFocusGainedEvent(const int handlerId);
//bool removeFocusLostEvent(const int handlerId);
//bool removeKeyDownEvent(const int handlerId);
//bool removeScreenDeviceNameChangedEvent(const int handlerId);
//bool removeTextInputEvent(const int handlerId);
//bool removeWindowSizeChangedEvent(const int handlerId);
#pragma endregion
#pragma endregion Member Methods
private:
#pragma region Members
//BehaviorSystem behaviorSystem;
//CameraSystem cameraSystem;
std::shared_ptr<sf::Clock> clock;
//Shader defaultSpriteShader;
float framesPerSecond = 0.0;
//GraphicsManager graphicsManager;
bool isActive = true;
//vector<PhysicsSystem> physicsSystems;
//vector<Render2DSystem> render2DSystems;
//vector<Render2DSystem> render2DSystems;
std::string screenDeviceName;
bool shouldClose = false;
//SoundSystem soundSystem;
std::string title;
std::shared_ptr<sf::RenderWindow> window;
#pragma endregion
};
} |
<reponame>ops-class/test161
package main
import (
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/ops-class/test161"
"github.com/parnurzeal/gorequest"
color "gopkg.in/fatih/color.v0"
"net/http"
"os"
"sort"
"strings"
)
var listRemoteFlag bool
var (
listTagsShort bool
listTagsList []string
)
func doListCommand() int {
if len(os.Args) < 3 {
fmt.Fprintf(os.Stderr, "Missing argument to list command\n")
return 1
}
switch os.Args[2] {
case "targets":
return doListTargets()
case "tags":
return doListTags()
case "tests":
return doListTests()
case "all":
return doListAll()
case "tagnames":
return doListTagnames()
default:
fmt.Fprintf(os.Stderr, "Invalid option to 'test161 list'. Must be one of (targets, tags, tests)\n")
return 1
}
}
type targetsByName []*test161.TargetListItem
func (t targetsByName) Len() int { return len(t) }
func (t targetsByName) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t targetsByName) Less(i, j int) bool { return t[i].Name < t[j].Name }
func doListTargets() int {
if err := getListArgs(); err != nil {
printRunError(err)
return 1
}
var targets *test161.TargetList
if listRemoteFlag {
var errs []error
if targets, errs = getRemoteTargets(); len(errs) > 0 {
printRunErrors(errs)
return 1
}
} else {
targets = env.TargetList()
}
sort.Sort(targetsByName(targets.Targets))
printTargets(targets)
return 0
}
func getRemoteTargets() (*test161.TargetList, []error) {
if len(clientConf.Server) == 0 {
return nil, []error{errors.New("server field missing in .test161.conf")}
}
endpoint := clientConf.Server + "/api-v1/targets"
request := gorequest.New()
resp, body, errs := request.Get(endpoint).End()
if errs != nil {
return nil, connectionError(endpoint, errs)
}
if resp.StatusCode != http.StatusOK {
return nil, []error{fmt.Errorf("Unable to retrieve remote targets: %v", resp.Status)}
}
targets := &test161.TargetList{}
if err := json.Unmarshal([]byte(body), targets); err != nil {
return nil, []error{err}
}
return targets, nil
}
func printTargets(list *test161.TargetList) {
var desc string
if listRemoteFlag {
desc = "Remote Target"
} else {
desc = "Local Target"
}
pd := &PrintData{
Headings: []*Heading{
&Heading{
Text: desc,
MinWidth: 20,
},
&Heading{
Text: "Type",
},
&Heading{
Text: "Version",
},
&Heading{
Text: "Points",
RightJustified: true,
},
},
Rows: make(Rows, 0),
Config: defaultPrintConf,
}
for _, t := range list.Targets {
row := []*Cell{
&Cell{Text: t.Name},
&Cell{Text: t.Type},
&Cell{Text: fmt.Sprintf("v%v", t.Version)},
&Cell{Text: fmt.Sprintf("%v", t.Points)},
}
pd.Rows = append(pd.Rows, row)
}
fmt.Println()
pd.Print()
fmt.Println()
}
func getListArgs() error {
listFlags := flag.NewFlagSet("test161 list-targets", flag.ExitOnError)
listFlags.Usage = usage
listFlags.BoolVar(&listRemoteFlag, "remote", false, "")
listFlags.BoolVar(&listRemoteFlag, "r", false, "")
listFlags.Parse(os.Args[3:]) // this may exit
if len(listFlags.Args()) > 0 {
return errors.New("test161 list-targets does not support positional arguments")
}
return nil
}
func getTagArgs() error {
flags := flag.NewFlagSet("test161 list-tags", flag.ExitOnError)
flags.Usage = usage
flags.BoolVar(&listTagsShort, "short", false, "")
flags.BoolVar(&listTagsShort, "s", false, "")
flags.Parse(os.Args[3:]) // this may exit
listTagsList = flags.Args()
return nil
}
func getAllTests() ([]*test161.Test, []error) {
conf := &test161.GroupConfig{
Tests: []string{"**/*.t"},
Env: env,
}
tg, errs := test161.GroupFromConfig(conf)
if len(errs) > 0 {
return nil, errs
}
// Sort the tests by ID
tests := make([]*test161.Test, 0)
for _, t := range tg.Tests {
tests = append(tests, t)
}
sort.Sort(testsByID(tests))
return tests, nil
}
// Hidden option for autocomplete
func doListTagnames() int {
// Load every test file
tests, errs := getAllTests()
if len(errs) > 0 {
printRunErrors(errs)
return 1
}
tags := make(map[string]bool)
for _, test := range tests {
for _, tag := range test.Tags {
tags[tag] = true
}
}
// Print tags
for key, _ := range tags {
fmt.Println(key)
}
return 0
}
func doListTags() int {
if err := getTagArgs(); err != nil {
printRunError(err)
return 1
}
tags := make(map[string][]*test161.Test)
desired := make(map[string]bool)
for _, t := range listTagsList {
desired[t] = true
}
// Load every test file
tests, errs := getAllTests()
if len(errs) > 0 {
printRunErrors(errs)
return 1
}
// Get a tagmap of tag name -> list of tests
for _, test := range tests {
for _, tag := range test.Tags {
if _, ok := tags[tag]; !ok {
tags[tag] = make([]*test161.Test, 0)
}
tags[tag] = append(tags[tag], test)
}
}
sorted := make([]string, 0)
for tag, _ := range tags {
sorted = append(sorted, tag)
}
sort.Strings(sorted)
// Printing
fmt.Println()
if listTagsShort {
// For the short version, we'll print a table to align the descriptions
pd := &PrintData{
Headings: []*Heading{
&Heading{
Text: "Tag",
},
&Heading{
Text: "Description",
},
},
Config: defaultPrintConf,
Rows: make(Rows, 0),
}
for _, tag := range sorted {
if len(desired) > 0 && !desired[tag] {
continue
}
desc := ""
if info, ok := env.Tags[tag]; ok {
desc = info.Description
}
pd.Rows = append(pd.Rows, []*Cell{
&Cell{Text: tag},
&Cell{Text: desc},
})
}
if len(pd.Rows) > 0 {
pd.Print()
}
fmt.Println()
} else {
bold := color.New(color.Bold)
for _, tag := range sorted {
if len(desired) > 0 && !desired[tag] {
continue
}
if info, ok := env.Tags[tag]; ok {
bold.Printf("%v:", tag)
fmt.Printf(" %v\n", info.Description)
} else {
bold.Print(tag)
}
for _, test := range tags[tag] {
fmt.Println(" ", test.DependencyID)
}
fmt.Println()
}
}
return 0
}
func doListTests() int {
pd := &PrintData{
Headings: []*Heading{
&Heading{
Text: "Test ID",
},
&Heading{
Text: "Name",
},
&Heading{
Text: "Description",
},
},
Rows: make(Rows, 0),
Config: defaultPrintConf,
}
// Load every test file
tests, errs := getAllTests()
if len(errs) > 0 {
printRunErrors(errs)
return 1
}
// Print ID, line, description for each tests
for _, test := range tests {
row := Row{
&Cell{Text: test.DependencyID},
&Cell{Text: test.Name},
&Cell{Text: strings.TrimSpace(test.Description)},
}
pd.Rows = append(pd.Rows, row)
}
fmt.Println()
pd.Print()
fmt.Println()
return 0
}
func doListAll() int {
// Load every test file
tests, errs := getAllTests()
if len(errs) > 0 {
printRunErrors(errs)
return 1
}
tags := make(map[string]bool)
for _, test := range tests {
fmt.Println(test.DependencyID)
for _, tag := range test.Tags {
tags[tag] = true
}
}
// Print tags
for key, _ := range tags {
fmt.Println(key)
}
// Print targets
for _, target := range env.Targets {
fmt.Println(target.Name)
}
return 0
}
|
from pathlib import Path
def needs_update(stamp, doxyfile_path, update_script_path):
if not stamp.exists():
return True
stamp_mtime = stamp.stat().st_mtime
if stamp_mtime <= doxyfile_path.stat().st_mtime:
return True
if stamp_mtime <= update_script_path.stat().st_mtime:
return True
return False |
#
# quick way to update code by pulling new code and removing and reinstalling virtual environment to install only python
# dependencies from python dependency file
#
# EXECUTE FROM ROOT OF PROJECT
#
git pull
rm -fr ./venv
virtualenv venv
. ./venv/bin/activate
pip install -r ./python_deps.txt
echo "Done" |
import com.google.common.collect.Multiset;
import java.util.HashSet;
import java.util.Set;
public class ResourceFilter {
public Set<String> filterResources(Multiset<String> executionCounts, double threshold) {
Set<String> authorized = new HashSet<>();
double avg = getAverage(executionCounts);
for (String res : executionCounts.elementSet()) {
if (executionCounts.count(res) >= threshold * avg) {
authorized.add(res);
}
}
return authorized;
}
private double getAverage(Multiset<String> executionCounts) {
int totalCount = 0;
for (String res : executionCounts.elementSet()) {
totalCount += executionCounts.count(res);
}
return (double) totalCount / executionCounts.elementSet().size();
}
} |
def char_freq(string):
map = {}
for char in string:
if char in map:
map[char] += 1
else:
map[char] = 1
return map |
const reducer = (accumulator, currentValue) => accumulator + currentValue;
const sum = array.reduce(reducer, 0);
console.log(sum); // 10 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.