text
stringlengths 1
1.05M
|
|---|
#!/usr/bin/env bash
stack build && \
echo "running ----" && \
stack exec haskell-diagrams-cellular-automata-exe -- -h 32 -w 1024 -o images/simple.gif
|
const regexp = new RegExp(/^t[a-z]*$/);
const strings = ['test', 'teststring', 'tower', 'tease'];
const matches = strings.filter(string => regexp.test(string));
console.log(matches); // ['test', 'tower', 'tease']
|
INSTALL_BUILDER_VERSION="18.10.0"
if [ "$(uname)" == "Darwin" ]; then
INSTALLBUILDERCLI="/Applications/Bitrock InstallBuilder Enterprise ${INSTALL_BUILDER_VERSION}/bin/Builder.app/Contents/MacOS/installbuilder.sh"
elif [ "$(expr substr $(uname -s) 1 5)" == "Linux" ]; then
if [[ $WERCKER = true ]]; then
INSTALLBUILDERCLI="/opt/installbuilder-${INSTALL_BUILDER_VERSION}/bin/builder"
else
INSTALLBUILDERCLI="${HOME}/installbuilder-${INSTALL_BUILDER_VERSION}/bin/builder"
fi
fi
PATHTOBUILDFILE="flickrdownloadr.xml"
PATHTOLICENSEFILE="flickrdownloadrlicense.xml"
PLATFORM="mac"
if [ $# = 1 ]; then
PLATFORM=$1
fi
PACKVARIABLE="pack_${PLATFORM}_platform_files"
if [ -f $PATHTOLICENSEFILE ]; then
EXECCOMMAND="'$INSTALLBUILDERCLI' build $PATHTOBUILDFILE $PLATFORM --license $PATHTOLICENSEFILE --setvars project.version=$BUILDNUMBER $PACKVARIABLE=true"
else
EXECCOMMAND="'$INSTALLBUILDERCLI' build $PATHTOBUILDFILE $PLATFORM --setvars project.version=$BUILDNUMBER $PACKVARIABLE=true"
fi
echo "About to execute: $EXECCOMMAND"
eval $EXECCOMMAND
|
"""
Creating an AI Chatbot in Python
"""
import re
import random
bot_greetings = ["Hi!", "Hey there!", "Good morning!", "Good afternoon!"]
# Define a function that responds to a user's message
def respond_to_message(message):
# If the message is a greeting, then return a random greeting response
if re.search('(hey|hi|hello|morning|afternoon)', message):
return random.choice(bot_greetings)
# For other messages, you can provide a response that contains the appropriate task instructions
elif re.search('(ticket|booking|reservation)', message):
return "To book a ticket, you can enter your travel details here ."
elif re.search('(hotel|accomodation)', message):
return "To book a hotel room, you can enter your dates and other details here ."
else:
return "I don't understand your query. Please enter a valid input!"
if __name__ == '__main__':
user_message = input("User: ")
bot_response = respond_to_message(user_message)
print("Bot:", bot_response)
|
/// <reference types="ledgerhq__hw-transport" />
import Transport from '@ledgerhq/hw-transport';
export declare type LedgerTypes = 'hid' | 'u2f' | 'webusb';
export interface TransportDef {
create(): Promise<Transport>;
type: LedgerTypes;
}
|
import UIKit
class ViewController: UIViewController {
@IBOutlet weak var diceImageView1: UIImageView!
@IBOutlet weak var diceImageView2: UIImageView!
let diceImages = ["dice1", "dice2", "dice3", "dice4", "dice5", "dice6"]
override func viewDidLoad() {
super.viewDidLoad()
// Additional setup code if needed
}
@IBAction func rollButtonPressed(_ sender: UIButton) {
let randomIndex1 = Int.random(in: 0..<6)
let randomIndex2 = Int.random(in: 0..<6)
diceImageView1.image = UIImage(named: diceImages[randomIndex1])
diceImageView2.image = UIImage(named: diceImages[randomIndex2])
}
@IBAction func resetButtonPressed(_ sender: UIButton) {
diceImageView1.image = nil
diceImageView2.image = nil
}
}
|
#...............................................................................
#
# This file is part of the Doxyrest toolkit.
#
# Doxyrest is distributed under the MIT license.
# For details see accompanying license.txt file,
# the public copy of which is also available at:
# http://tibbo.com/downloads/archive/doxyrest/license.txt
#
#...............................................................................
THIS_DIR=`pwd`
mkdir axl/build
pushd axl/build
cmake .. -DTARGET_CPU=$TARGET_CPU -DCMAKE_BUILD_TYPE=$BUILD_CONFIGURATION
make
popd
echo "set(AXL_CMAKE_DIR $THIS_DIR/axl/cmake $THIS_DIR/axl/build/cmake)" >> paths.cmake
if [ "$BUILD_DOC" != "" ]; then
mkdir graco/build
pushd graco/build
cmake .. -DTARGET_CPU=$TARGET_CPU -DCMAKE_BUILD_TYPE=$BUILD_CONFIGURATION
make
popd
echo "set(GRACO_CMAKE_DIR $THIS_DIR/graco/cmake $THIS_DIR/graco/build/cmake)" >> paths.cmake
mkdir luadoxyxml/build
pushd luadoxyxml/build
cmake .. -DTARGET_CPU=$TARGET_CPU -DCMAKE_BUILD_TYPE=$BUILD_CONFIGURATION
make
popd
echo "set(LUADOXYXML_EXE $THIS_DIR/luadoxyxml/build/bin/$BUILD_CONFIGURATION/luadoxyxml)" >> paths.cmake
fi
mkdir build
pushd build
cmake .. -DTARGET_CPU=$TARGET_CPU -DCMAKE_BUILD_TYPE=$BUILD_CONFIGURATION
make
ctest --output-on-failure
if [ "$BUILD_PACKAGE" != "" ]; then
fakeroot cpack -G TXZ --config CPackConfig.cmake
fi
popd
if [ "$GET_COVERAGE" != "" ]; then
lcov --capture --directory . --no-external --output-file coverage.info
lcov --remove coverage.info '*/axl/*' --output-file coverage.info
lcov --list coverage.info
curl -s https://codecov.io/bash | bash
fi
if [ "$BUILD_DOC" != "" ]; then
pushd build
source doc/index/build-html.sh
source doc/build-guide/build-html.sh
source doc/manual/build-xml.sh
source doc/manual/build-rst.sh
source doc/manual/build-html.sh
source samples/libusb/build-rst.sh
source samples/libusb/build-html.sh -D html_theme=sphinxdoc
mv doc/html/samples/{libusb,libusb-sphinxdoc}
source samples/libusb/build-html.sh
source samples/libssh/build-rst.sh
source samples/libssh/build-html.sh -D html_theme=sphinxdoc
mv doc/html/samples/{libssh,libssh-sphinxdoc}
source samples/libssh/build-html.sh
source samples/alsa/build-rst.sh
source samples/alsa/build-html.sh -D html_theme=sphinxdoc
mv doc/html/samples/{alsa,alsa-sphinxdoc}
source samples/alsa/build-html.sh
source samples/apr/build-rst.sh
source samples/apr/build-html.sh -D html_theme=sphinxdoc
mv doc/html/samples/{apr,apr-sphinxdoc}
source samples/apr/build-html.sh
touch doc/html/.nojekyll
popd
fi
|
cd data/untrimmed_fastq
#download raw data
if [ ! -f SRR2589044_1.fastq.gz ]; then
curl -O ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR258/004/SRR2589044/SRR2589044_1.fastq.gz
fi
if [ ! -f SRR2589044_2.fastq.gz ]; then
curl -O ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR258/004/SRR2589044/SRR2589044_2.fastq.gz
fi
if [ ! -f SRR2584863_1.fastq.gz ]; then
curl -O ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR258/003/SRR2584863/SRR2584863_1.fastq.gz
fi
if [ ! -f SRR2584863_2.fastq.gz ]; then
curl -O ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR258/003/SRR2584863/SRR2584863_2.fastq.gz
fi
if [ ! -f SRR2584866_1.fastq.gz ]; then
curl -O ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR258/006/SRR2584866/SRR2584866_1.fastq.gz
fi
if [ ! -f SRR2584866_2.fastq.gz ]; then
curl -O ftp://ftp.sra.ebi.ac.uk/vol1/fastq/SRR258/006/SRR2584866/SRR2584866_2.fastq.gz
fi
#Check checksums of downloaded files
md5sum -c < CHECKSUMS.MD5
|
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package Example2;
/**
*
* @author Adminfdf
*/
public class Example2 {
public int[] arrayA = {1, 5, 3, 12, 54, 7, 45, 8, 9, 32, 4, 5, 23, 41};
public int[] arrayB = {1, 5, 7, 8};
int temp = 0;
int lengthC = arrayA.length - arrayB.length;
public int[] ArrayC = new int[lengthC];
int flag = 1;
private int sumArray = 0;
public void handleArray() {
for (int i = 0; i < arrayA.length; i++) {
for (int j = 0; j < arrayB.length; j++) {
if (arrayA[i] == arrayB[j]) {
flag = 2;
}
if (arrayA[i] == arrayA[j] && i > j) {
ArrayC[temp] = arrayA[i];
temp++;
flag = 2;
}
}
if (flag == 1) {
ArrayC[temp] = arrayA[i];
temp++;
}
flag = 1;
}
}
public int sumArray(int[] array) {
for (int i = 0; i < array.length; i++) {
sumArray += array[i];
}
return sumArray;
}
public boolean checkArraySame() {
if (sumArray(arrayB) == sumArray(arrayA)) {
return true;
}
return false;
}
public void displayArray() {
if(checkArraySame()){
System.out.println("True");
}
else System.out.println("False");
}
public static void main(String[] args) {
Example2 arrayExample = new Example2();
arrayExample.handleArray();
arrayExample.displayArray();
}
}
|
#!/bin/bash
#
# profiles = xccdf_org.ssgproject.content_profile_stig-rhel7-disa
SSSD_PAM_SERVICES_REGEX="^[\s]*\[sssd]([^\n]*\n+)+?[\s]*services.*pam.*$"
SSSD_PAM_SERVICES="[sssd]
services = pam"
SSSD_CONF="/etc/sssd/sssd.conf"
grep -q "$SSSD_PAM_SERVICES_REGEX" $SSSD_CONF && \
sed -i "s/$SSSD_PAM_SERVICES_REGEX/$SSD_PAM_SERVICES/" $SSSD_CONF || \
echo "$SSSD_PAM_SERVICES" >> $SSSD_CONF
|
#!/bin/bash
dieharder -d 202 -g 3 -S 1928803870
|
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
#include "pch.h"
#include "TestCommon.h"
#include <AppInstallerRuntime.h>
#include <AppInstallerVersions.h>
using namespace AppInstaller;
using namespace AppInstaller::Utility;
TEST_CASE("VersionParse", "[versions]")
{
Version version("1.2.3.4-alpha");
const auto& parts = version.GetParts();
REQUIRE(parts.size() == 4);
for (size_t i = 0; i < parts.size(); ++i)
{
INFO(i);
REQUIRE(parts[i].Integer == static_cast<uint64_t>(i + 1));
if (i != 3)
{
REQUIRE(parts[i].Other == "");
}
else
{
REQUIRE(parts[i].Other == "-alpha");
}
}
}
TEST_CASE("VersionParsePlusDash", "[versions]")
{
Version version("1.2.3.4-alpha", ".-");
const auto& parts = version.GetParts();
REQUIRE(parts.size() == 5);
for (size_t i = 0; i < 4; ++i)
{
INFO(i);
REQUIRE(parts[i].Integer == static_cast<uint64_t>(i + 1));
REQUIRE(parts[i].Other == "");
}
REQUIRE(parts[4].Other == "alpha");
}
TEST_CASE("VersionParseCorner", "[versions]")
{
Version version1("");
auto parts = version1.GetParts();
REQUIRE(parts.size() == 0);
Version version2(".");
parts = version2.GetParts();
REQUIRE(parts.size() == 0);
Version version3(".0");
parts = version3.GetParts();
REQUIRE(parts.size() == 0);
Version version4(".1");
parts = version4.GetParts();
REQUIRE(parts.size() == 2);
REQUIRE(parts[0].Integer == 0);
REQUIRE(parts[0].Other == "");
REQUIRE(parts[1].Integer == 1);
REQUIRE(parts[1].Other == "");
Version version5("version");
parts = version5.GetParts();
REQUIRE(parts.size() == 1);
REQUIRE(parts[0].Integer == 0);
REQUIRE(parts[0].Other == "version");
}
void RequireLessThan(std::string_view a, std::string_view b)
{
Version vA{ std::string(a) };
Version vB{ std::string(b) };
REQUIRE(vA < vB);
REQUIRE_FALSE(vB < vA);
REQUIRE(vA <= vB);
REQUIRE_FALSE(vB <= vA);
REQUIRE(vB > vA);
REQUIRE_FALSE(vA > vB);
REQUIRE(vB >= vA);
REQUIRE_FALSE(vA >= vB);
REQUIRE_FALSE(vA == vB);
REQUIRE(vA != vB);
}
void RequireEqual(std::string_view a, std::string_view b)
{
Version vA{ std::string(a) };
Version vB{ std::string(b) };
REQUIRE(vA == vB);
REQUIRE_FALSE(vA != vB);
REQUIRE(vA <= vB);
REQUIRE(vA >= vB);
REQUIRE_FALSE(vA < vB);
REQUIRE_FALSE(vA > vB);
}
TEST_CASE("VersionCompare", "[versions]")
{
RequireLessThan("1", "2");
RequireLessThan("1.0.0", "2.0.0");
RequireLessThan("0.0.1", "0.0.2");
RequireLessThan("0.0.1-alpha", "0.0.2-alpha");
RequireLessThan("0.0.1-beta", "0.0.2-alpha");
RequireLessThan("0.0.1-beta", "0.0.2-alpha");
RequireLessThan("13.9.8", "14.1");
RequireEqual("1.0", "1.0.0");
}
TEST_CASE("VersionAndChannelSort", "[versions]")
{
std::vector<VersionAndChannel> sortedList =
{
{ Version("15.0.0"), Channel("") },
{ Version("14.0.0"), Channel("") },
{ Version("13.2.0-bugfix"), Channel("") },
{ Version("13.2.0"), Channel("") },
{ Version("13.0.0"), Channel("") },
{ Version("16.0.0"), Channel("alpha") },
{ Version("15.8.0"), Channel("alpha") },
{ Version("15.1.0"), Channel("beta") },
};
std::vector<size_t> reorderList = { 4, 2, 1, 7, 6, 3, 5, 0 };
REQUIRE(sortedList.size() == reorderList.size());
std::vector<VersionAndChannel> jumbledList;
for (auto i : reorderList)
{
jumbledList.emplace_back(sortedList[i]);
}
std::sort(jumbledList.begin(), jumbledList.end());
for (size_t i = 0; i < jumbledList.size(); ++i)
{
const VersionAndChannel& sortedVAC = sortedList[i];
const VersionAndChannel& jumbledVAC = jumbledList[i];
INFO(i);
REQUIRE(sortedVAC.GetVersion().ToString() == jumbledVAC.GetVersion().ToString());
REQUIRE(sortedVAC.GetChannel().ToString() == jumbledVAC.GetChannel().ToString());
}
}
TEST_CASE("MinOsVersion_Check", "[versions]")
{
// Just verify that we are greater than Win 7 and less than far future Win 10.
// Unfortunately, an unmanifested process will also pass these validations,
// but an unmanifested process also can't use Windows APIs to determine the actual version.
REQUIRE(Runtime::IsCurrentOSVersionGreaterThanOrEqual(Version("6.1")));
REQUIRE(!Runtime::IsCurrentOSVersionGreaterThanOrEqual(Version("10.0.65535")));
}
|
#!/bin/sh
#
# Run dora in a container
set -e
VERSION="0.1.3"
IMAGE="lvfrazao/dora:$VERSION"
exec docker run --rm --network host $IMAGE "$@"
|
<reponame>acidburn0zzz/remo
var EventsLib = {};
EventsLib.markers_array = [];
EventsLib.request = undefined;
EventsLib.number_of_events = 0;
EventsLib.searchform_elm = $('#searchform');
EventsLib.searchfield_elm = $('#searchfield');
EventsLib.events_table_body_elm = $('#events-table-body');
EventsLib.eventsitem_tmpl_elm = $('#eventItem-tmpl');
EventsLib.period_selector_elm = $('#events-period-selector');
EventsLib.category_selector_elm = $('#adv-search-categories');
EventsLib.initiative_selector_elm = $('#adv-search-initiatives');
EventsLib.attendance_selector_elm = $('#adv-search-attendance');
EventsLib.events_number_elm = $('#events-number');
EventsLib.events_table_elm = $('#events-table');
EventsLib.search_loading_icon_elm = $('#search-loading-icon');
EventsLib.search_ready_icon_elm = $('#search-ready-icon');
EventsLib.events_loading_wrapper_elm = $('#events-loading-wrapper');
EventsLib.map_overlay_elm = $('#map-overlay');
EventsLib.datepicker_start_elm = $('#date-start');
EventsLib.datepicker_end_elm = $('#date-end');
EventsLib.multi_e_ical_elm = $('#icalendar-export-button');
EventsLib.adv_search_elm = $('#adv-search');
EventsLib.adv_search_icon_elm = $('#adv-search-icon-events');
EventsLib.datepicker_elm = $('.datepicker');
EventsLib.timeline_overlay_elm = $('#timeline-overlay');
EventsLib.event_timeline_elm = $('#event-timeline');
EventsLib.window_elm = $(window);
EventsLib.location_elm = $(location);
EventsLib.trigger_timeout = undefined;
EventsLib.allset = false;
EventsLib.offset = 0;
EventsLib.window_offset = 450;
EventsLib.results_batch = 21;
var MAPBOX_TOKEN = $('body').data('mapbox-token');
var ATTENDANCE_LEVEL = {
'10': [null, 10],
'50': [11, 50],
'100': [51, 100],
'500': [101, 500],
'1000': [501, 1000],
'2000': [1000, null]
};
function initialize_map() {
// Initialize map.
var center = new L.LatLng(25, 0); // geographical point (longitude and latitude)
EventsLib.map = L.mapbox.map('map', MAPBOX_TOKEN, {minZoom: 1});
addAttributionOSM(EventsLib.map);
EventsLib.map.setView(center, 2);
L.control.locate({
setView: false,
onLocationError: handleLocationError,
locateOptions: {
watch: false
}
}).addTo(EventsLib.map);
EventsLib.map.on('locationfound', handleLocationFound);
// When user clicks on map and a search filter exists, remove filter.
EventsLib.map.on('click', function(e) {
var val = EventsLib.searchfield_elm.val();
if (val !== '') {
search_string = '';
EventsLib.searchfield_elm.val(search_string);
EventsLib.searchfield_elm.trigger('input');
}
});
}
function handleLocationError() {
// Show message when geolocation fails
var msg = 'Sorry, we could not determine your location.';
showMessage({
'msg': msg,
'tag': 'warning'
});
}
function handleLocationFound(e) {
// setView and zoom map when geolocation succeeds
EventsLib.map.setView(e.latlng, 5);
}
function clear_map() {
// Remove pointer layers from map.
for (var marker in EventsLib.markers_array) {
EventsLib.map.removeLayer(EventsLib.markers_array[marker]);
}
EventsLib.markers_array = [];
}
function add_pointers() {
// Add user pointers on map.
$('.event-item').each(function(index, item) {
var lat = $(item).data('lat');
var lon = $(item).data('lon');
var markerLocation = new L.LatLng(lat, lon);
var marker = new L.Marker(markerLocation);
// Clicking on a pointer makes others disappear if visible, or
// otherwise appear.
marker.on('click', function(e) {
var val = EventsLib.searchfield_elm.val();
var name = $(item).data('name').toString();
if (val === name) {
search_string = '';
}
else {
search_string = name;
}
EventsLib.searchfield_elm.val(search_string);
EventsLib.searchfield_elm.trigger('input');
});
EventsLib.map.addLayer(marker);
EventsLib.markers_array.push(marker);
});
}
function dateFormatter(date) {
var day = date.getDate();
var month = date.getMonth() + 1;
var year = date.getFullYear();
return year + ',' + month + ',' + day;
}
function initialize_timeline(events, enable) {
var event_timeline = {};
var timeline = {};
timeline.headline = 'Events';
timeline.type = 'default';
if (enable && events.objects.length > 0) {
var dates = [];
events.objects.forEach(function(item) {
var start = Date.parse(item.start);
var date_start = new Date(start);
var end = Date.parse(item.end);
var date_end = new Date(end);
var elm = {};
elm.startDate = dateFormatter(date_start);
elm.endDate = dateFormatter(date_end);
elm.headline = '<a href="'+item.event_url+'">'+item.name+'</a>';
dates.push(elm);
});
timeline.date = dates;
event_timeline.timeline = timeline;
EventsLib.event_timeline_elm.empty();
EventsLib.timeline_overlay_elm.appendTo(EventsLib.event_timeline_elm);
EventsLib.timeline_overlay_elm.hide();
createStoryJS({type: 'timeline',
width: '980',
height: '300',
source: event_timeline,
embed_id: 'event-timeline',
debug: false});
} else {
EventsLib.event_timeline_elm.empty();
EventsLib.timeline_overlay_elm.appendTo(EventsLib.event_timeline_elm);
EventsLib.timeline_overlay_elm.show();
}
}
function show_timeline() {
var enable = false;
var data;
var period = hash_get_value('period');
var valid_period = $.inArray(period, ['custom', 'future']);
if (EventsLib.allset) {
data = JSON.parse(EventsLib.request.responseText);
enable = (parseInt(data.meta.total_count, 10) < 100 && valid_period >= 0);
initialize_timeline(data, enable);
} else {
send_query(true);
}
}
function ical_url(period, start, end, search) {
var search_term = function(key, value) {
return key + '/' + value + '/';
};
var url = '/events/';
url += search_term('period', period);
if (period === 'custom') {
if (start) {
url += search_term('start', start);
}
if (end) {
url += search_term('end', end);
}
}
if (search) {
url += search_term('search', search);
}
url += 'ical/';
return url;
}
function bind_events() {
// Bind events
// Update hash, on search input update.
EventsLib.searchfield_elm.bind('propertychange keyup input paste', function(event) {
hash_set_value('search', EventsLib.searchfield_elm.val());
});
EventsLib.period_selector_elm.change(function() {
var period = EventsLib.period_selector_elm.val();
hash_set_value('period', period);
if (period === 'custom') {
var start_date = EventsLib.datepicker_start_elm.datepicker('getDate');
var end_date = EventsLib.datepicker_end_elm.datepicker('getDate');
if (start_date) {
hash_set_value('start', $.datepicker.formatDate('yy-mm-dd', start_date));
}
if (end_date) {
hash_set_value('end', $.datepicker.formatDate('yy-mm-dd', end_date));
}
}
});
EventsLib.category_selector_elm.change(function() {
hash_set_value('category', EventsLib.category_selector_elm.val());
});
EventsLib.initiative_selector_elm.change(function() {
hash_set_value('initiative', EventsLib.initiative_selector_elm.val());
});
EventsLib.attendance_selector_elm.change(function() {
hash_set_value('attendance', EventsLib.attendance_selector_elm.val());
});
EventsLib.window_elm.bind('hashchange', function(e) {
// Set icon.
EventsLib.search_ready_icon_elm.hide();
EventsLib.search_loading_icon_elm.show();
clearTimeout(EventsLib.trigger_timeout);
EventsLib.trigger_timeout = setTimeout(function() {
send_query(newquery=true);
}, 400);
});
}
function unbind_events() {
// Unbind events
EventsLib.searchfield_elm.unbind('propertychange keyup input paste');
EventsLib.period_selector_elm.unbind('change');
EventsLib.window_elm.unbind('hashchange');
}
function set_number_of_events(number_of_events) {
// Display the number of visible reps.
number_of_events = parseInt(number_of_events, 10);
EventsLib.number_of_events = number_of_events;
//set the suffix according to period
var search_type = hash_get_value('period');
var suffix = '';
if (search_type === 'custom') {
var start_date = hash_get_value('start');
var end_date = hash_get_value('end');
if (start_date && end_date) {
suffix = ' for period ' + start_date + ' to ' + end_date;
}
else if (start_date) {
suffix = ' from ' + start_date;
}
else if (end_date) {
suffix = ' till ' + end_date;
}
}
else if (search_type === 'future'){
suffix = ' currently or in the future';
}
else if (search_type === 'past'){
suffix = ' in the past';
}
if (number_of_events === 0) {
EventsLib.events_table_elm.hide();
EventsLib.events_number_elm.html('Sorry, no events found' + suffix + '.');
}
else {
EventsLib.events_table_elm.show();
if (number_of_events === 1) {
EventsLib.events_number_elm.html('1 event found' + suffix + '.');
}
else {
EventsLib.events_number_elm.html(number_of_events + ' events found' + suffix + '.');
}
}
}
function request_error(query, status) {
// Unset data-searching after half a second to deal with API timeouts.
EventsLib.searchfield_elm.data('searching', undefined);
EventsLib.search_loading_icon_elm.hide();
EventsLib.search_ready_icon_elm.show();
EventsLib.events_loading_wrapper_elm.hide();
}
function handle_xhr_response(value, newquery, past_events) {
return function(event) {
if (EventsLib.request.status === 200) {
update_results(JSON.parse(EventsLib.request.responseText), value, newquery, past_events);
}
else {
request_error();
}
};
}
var update_results = function(data, query, newquery, past_events) {
if (EventsLib.location_elm.attr('hash').substring(2) !== query) {
return;
}
EventsLib.search_loading_icon_elm.hide();
EventsLib.search_ready_icon_elm.show();
EventsLib.events_loading_wrapper_elm.hide();
if (newquery) {
clear_map();
EventsLib.events_table_body_elm.empty();
}
set_number_of_events(data.meta.total_count);
if (!data.meta.next) {
EventsLib.allset = true;
}
else {
EventsLib.offset = parseInt(data.meta.offset, 10) + EventsLib.results_batch;
}
source = EventsLib.eventsitem_tmpl_elm.html();
template = Handlebars.compile(source);
$('#events-table-body').append(template(data.objects));
EventsLib.searchfield_elm.data('searching', undefined);
// Check if query result has less than 100 events
// and period is either 'custom' or 'future'
var period = hash_get_value('period');
var valid_period = $.inArray(period, ['custom', 'future']);
var enable = (parseInt(data.meta.total_count, 10)<100 && valid_period>=0);
initialize_timeline(data, enable);
if (past_events && parseInt(data.meta.total_count, 10) > EventsLib.results_batch) {
EventsLib.map_overlay_elm.show();
}
else {
EventsLib.map_overlay_elm.hide();
setTimeout(function() { add_pointers(); }, 500);
}
};
function UTCDateString(d){
return (d.getUTCFullYear() + '-' +
pad2(d.getUTCMonth() + 1) + '-' +
pad2(d.getUTCDate()));
}
function LocalDateString(d){
return (d.getFullYear() + '-' +
pad2(d.getMonth() + 1) + '-' +
pad2(d.getDate()));
}
function send_query(newquery) {
var past_events = true;
var extra_q = '';
var value = EventsLib.location_elm.attr('hash').substring(2);
if (newquery) {
EventsLib.allset = false;
EventsLib.offset = 0;
}
else {
newquery = false;
}
var API_URL = '/api/v1/event/?offset=' + EventsLib.offset;
if ((EventsLib.searchfield_elm.data('searching') === API_URL && !newquery) || (!newquery && EventsLib.allset)) {
return;
}
// Show bottom loading icon.
EventsLib.events_loading_wrapper_elm.show();
EventsLib.searchfield_elm.data('searching', API_URL);
// Unbind change events to avoid triggering twice the same action.
unbind_events();
// Period selector.
var period = hash_get_value('period');
var start_date = null;
var end_date = null;
var start = hash_get_value('start');
var end = hash_get_value('end');
if (period === 'future') {
start_date = new Date();
}
if (period === 'past') {
end_date = new Date();
}
if (period !== 'custom') {
if (start) {
hash_set_value('start', '');
}
if (end) {
hash_set_value('end', '');
}
}
set_dropdown_value(EventsLib.period_selector_elm, period);
if (period) {
var today = new Date();
var today_utc_string = UTCDateString(today);
if (period === 'future') {
extra_q += '&limit=0';
extra_q += '&start__gte=' + today_utc_string;
past_events = false;
}
else if (period === 'past') {
extra_q += '&order_by=-end';
extra_q += '&limit=' + EventsLib.results_batch;
extra_q += '&start__lt=' + today_utc_string;
}
else if (period === 'all') {
extra_q += '&limit=' + EventsLib.results_batch;
extra_q += '&start__gt=1970-01-01';
}
else if (period === 'custom') {
if (start) {
EventsLib.datepicker_start_elm.datepicker('setDate', start);
}
if (end) {
EventsLib.datepicker_end_elm.datepicker('setDate', end);
}
start_date = EventsLib.datepicker_start_elm.datepicker('getDate');
end_date = EventsLib.datepicker_end_elm.datepicker('getDate');
extra_q += '&limit=0';
if (start_date) {
var start_utc_string = LocalDateString(start_date);
extra_q += '&start__gte=' + start_utc_string;
}
if (end_date) {
var end_utc_string = LocalDateString(end_date);
extra_q += '&end__lte=' + end_utc_string;
}
}
EventsLib.datepicker_start_elm.datepicker('setDate', start_date);
EventsLib.datepicker_end_elm.datepicker('setDate', end_date);
}
// Search term.
var search = hash_get_value('search');
EventsLib.searchfield_elm.val(search);
if (search) {
extra_q += '&query=' + search;
}
// Update iCAL url
EventsLib.multi_e_ical_elm.attr('href', ical_url(period, start, end, search));
var category = hash_get_value('category');
var initiative = hash_get_value('initiative');
var attendance = hash_get_value('attendance');
set_dropdown_value(EventsLib.category_selector_elm, category);
set_dropdown_value(EventsLib.initiative_selector_elm, initiative);
set_dropdown_value(EventsLib.attendance_selector_elm, attendance);
if (category) {
extra_q += '&categories__name__iexact=' + category;
}
if (initiative) {
extra_q += '&campaign__name__iexact=' + initiative;
}
if (attendance) {
var attendance_range = ATTENDANCE_LEVEL[attendance];
if (attendance_range[0]) {
extra_q += '&estimated_attendance__gte=' + attendance_range[0];
}
if (attendance_range[1]) {
extra_q += '&estimated_attendance__lte=' + attendance_range[1];
}
}
if (period === 'custom' || category || initiative) {
EventsLib.adv_search_elm.slideDown();
}
else if (period === 'future' || period === 'past' || period === 'all') {
EventsLib.adv_search_elm.slideUp();
}
// Abort previous request
if (EventsLib.request) {
EventsLib.request.abort();
}
EventsLib.request = new XMLHttpRequest();
EventsLib.request.open('GET', API_URL + extra_q, true);
EventsLib.request.onload = handle_xhr_response(value, newquery, past_events);
EventsLib.request.onerror = request_error;
EventsLib.request.send();
// Rebind events.
bind_events();
}
function loader_canvas_icon_init() {
// Initialize bottom loader.
var cl = new CanvasLoader('events-loading');
cl.setColor('#888888'); // default is '#000000'
cl.setDiameter(24); // default is 40
cl.setDensity(30); // default is 40
cl.setRange(0.8); // default is 1.3
cl.setFPS(23); // default is 24
cl.show(); // Hidden by default
// Initialize search loader.
var sl = new CanvasLoader('search-loading-icon');
sl.setColor('#888888'); // default is '#000000'
sl.setDiameter(24); // default is 40
sl.setDensity(30); // default is 40
sl.setRange(0.8); // default is 1.3
sl.setFPS(23); // default is 24
sl.show(); // Hidden by default
}
$(document).ready(function () {
EventsLib.event_timeline_elm.hide();
$('#events-map-button').click(function (e) {
e.preventDefault();
EventsLib.event_timeline_elm.fadeOut('fast');
$('#map').fadeIn('slow');
$(this).parent().addClass('active');
$('#events-timeline-button').parent().removeClass('active');
});
$('#events-timeline-button').click(function (e) {
e.preventDefault();
$('#map').fadeOut('fast');
EventsLib.event_timeline_elm.empty().show();
show_timeline();
$(this).parent().addClass('active');
$('#events-map-button').parent().removeClass('active');
});
initialize_map();
// Click geolocation button on load
$('a[title="Show me where I am"]', '#map')[0].click();
EventsLib.searchform_elm.submit(function (event) {
event.preventDefault();
});
var period = hash_get_value('period');
if (!period) {
period = 'future';
hash_set_value('period', period);
}
// Advanced button click.
EventsLib.adv_search_icon_elm.click(function() {
var visible = EventsLib.adv_search_elm.is(':visible');
EventsLib.adv_search_elm.slideToggle();
});
//Initiate datepicker
EventsLib.datepicker_elm.datepicker({
onSelect: function(selectedDate) {
var period = hash_get_value('period');
if (period !== 'custom') {
hash_set_value('period', 'custom');
}
if (this.id == 'date-start') {
if (EventsLib.datepicker_start_elm.val() === '') {
hash_set_value('start', '');
}
else{
hash_set_value('start', selectedDate);
}
}
if (this.id == 'date-end') {
if (EventsLib.datepicker_end_elm.val() === '') {
hash_set_value('end', '');
}
else{
hash_set_value('end', selectedDate);
}
}
send_query(newquery=true);
},
dateFormat: 'yy-mm-dd'
});
EventsLib.datepicker_elm.click(function(){
$(this).datepicker('show');
});
var start_date = hash_get_value('start');
var end_date = hash_get_value('end');
if (start_date) {
EventsLib.datepicker_start_elm.datepicker('setDate', start_date);
}
if (end_date) {
EventsLib.datepicker_end_elm.datepicker('setDate', end_date);
}
// Set values to fields.
set_dropdown_value(EventsLib.period_selector_elm, period);
set_dropdown_value(EventsLib.category_selector_elm, hash_get_value('category'));
set_dropdown_value(EventsLib.initiative_selector_elm, hash_get_value('initiative'));
// Bind events.
bind_events();
// Enable search field when ready.
EventsLib.searchfield_elm.attr('Placeholder', 'Filter using any keyword');
EventsLib.searchfield_elm.removeAttr('disabled');
EventsLib.searchfield_elm.val(hash_get_value('search'));
send_query();
loader_canvas_icon_init();
// Leaflet is loaded, so move map overlay into map div.
EventsLib.map_overlay_elm.appendTo('#map');
// Set infinite scroll.
EventsLib.window_elm.scroll(function(){
if (EventsLib.window_elm.scrollTop() >=
$(document).height() - EventsLib.window_elm.height() - EventsLib.window_offset) {
send_query(newquery=false);
}
});
});
|
package com.thinkgem.jeesite.modules.yipan.web;
import com.thinkgem.jeesite.common.utils.wx.MapXmlUtil;
import com.thinkgem.jeesite.modules.yipan.entity.UnifiedorderResult;
import com.thinkgem.jeesite.modules.yipan.entity.YpUnifiedorder;
import com.thinkgem.jeesite.modules.yipan.service.YpBillService;
import com.thinkgem.jeesite.modules.yipan.service.YpPayService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
import javax.annotation.Resource;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.HashMap;
import java.util.Map;
/**
* @ClassName WechatPayController
* @Description 微信支付
* @Author TuTu
* @Date 2020/5/26 11:35
* @Version 1.0
*/
@RestController
@RequestMapping("value = ${adminPath}/yipan/wxpay")
public class WechatPayController {
@Resource
private YpPayService ypPayService;
@Autowired
private YpBillService ypBillService;
/* 获取openId
* @param code
* @return
*/
@RequestMapping("/getOpenId")
public Map<String, Object> getOpenId(@RequestParam("code") String code){
return ypPayService.getOpenId(code);
}
/**
* 统一下单
* @param ypUnifiedorder 统一下单提交为微信的参数
* @return
*/
@RequestMapping("/unifiedorder")
@ResponseBody
public UnifiedorderResult Unifiedorder(@RequestBody YpUnifiedorder ypUnifiedorder) {
return ypPayService.unifiedorder(ypUnifiedorder);
}
/**
*
* 回调方法 此处作业务逻辑处理,到此处说明已支付成功,在这块需要改变订单状态等逻辑处理的信息
* @param request
* @param response
* @return
* @throws IOException
*/
@RequestMapping(value = "/notifyUrl")
public String notifyUrl(HttpServletRequest request, HttpServletResponse response) throws IOException {
String resXml="";
String xmlBack="";
System.err.println("进入异步通知");
InputStream is = request.getInputStream();
//将InputStream转换成String
BufferedReader reader = new BufferedReader(new InputStreamReader(is));
StringBuilder sb = new StringBuilder();
String line = null;
try {
while ((line = reader.readLine()) != null) {
sb.append(line + "\n");
}
} catch (IOException e) {
e.printStackTrace();
} finally {
try {
is.close();
} catch (IOException e) {
e.printStackTrace();
}
}
resXml=sb.toString();
System.err.println(resXml);
Map<String, String> notifyMap = new HashMap<String, String>();
try {
notifyMap = MapXmlUtil.xmltoMap(resXml);
String return_code = notifyMap.get("return_code");//状态
String out_trade_no = notifyMap.get("out_trade_no");//订单号
if("SUCCESS".equals(return_code)){
//TODO 处理自己的业务逻辑 比如修改订单状态
xmlBack = "<xml>" + "<return_code><![CDATA[SUCCESS]]></return_code>" + "<return_msg><![CDATA[OK]]></return_msg>" + "</xml> ";
}else {
xmlBack = "<xml>" + "<return_code><![CDATA[FAIL]]></return_code>" + "<return_msg><![CDATA[报文为空]]></return_msg>" + "</xml> ";
}
} catch (Exception e) {
}
return xmlBack;
}
}
|
#! /bin/bash
#
# Open Asset Import Library
# cross platform 3D model loader
# https://github.com/assimp/assimp
#
# uses CMake
# define the version
VER=3.1.1
# tools for git use
GIT_URL=
# GIT_URL=https://github.com/assimp/assimp.git
GIT_TAG=
FORMULA_TYPES=( "osx" "osx-clang-libc++" "ios" "android" "emscripten" "vs" )
# download the source code and unpack it into LIB_NAME
function download() {
# stable release from source forge
curl -LO "https://github.com/assimp/assimp/archive/v$VER.zip"
unzip -oq "v$VER.zip"
mv "assimp-$VER" assimp
rm "v$VER.zip"
# fix an issue with static libs being disabled - see issue https://github.com/assimp/assimp/issues/271
# this could be fixed fairly soon - so see if its needed for future releases.
if [ "$TYPE" == "ios" ] ; then
echo "iOS"
elif [ "$TYPE" == "vs" ] ; then
#ADDED EXCEPTION, FIX DOESN'T WORK IN VS
echo "VS"
else
echo "$TYPE"
sed -i -e 's/SET ( ASSIMP_BUILD_STATIC_LIB OFF/SET ( ASSIMP_BUILD_STATIC_LIB ON/g' assimp/CMakeLists.txt
sed -i -e 's/option ( BUILD_SHARED_LIBS "Build a shared version of the library" ON )/option ( BUILD_SHARED_LIBS "Build a shared version of the library" OFF )/g' assimp/CMakeLists.txt
fi
}
# prepare the build environment, executed inside the lib src dir
function prepare() {
echo "Prepare"
# if [ "$TYPE" == "ios" ] ; then
# # # patch outdated Makefile.osx provided with FreeImage, check if patch was applied first
# # if patch -p0 -u -N --dry-run --silent < $FORMULA_DIR/assimp.ios.patch 2>/dev/null ; then
# # patch -p0 -u < $FORMULA_DIR/assimp.ios.patch
# # fi
# fi
}
# executed inside the lib src dir
function build() {
rm -f CMakeCache.txt || true
# we don't use the build script for iOS now as it is less reliable than doing it our self
if [ "$TYPE" == "ios" ] ; then
# ref: http://stackoverflow.com/questions/6691927/how-to-build-assimp-library-for-ios-device-and-simulator-with-boost-library
export TOOLCHAIN=$XCODE_DEV_ROOT/Toolchains/XcodeDefault.xctoolchain
export TARGET_IOS
local IOS_ARCHS="armv7 arm64 i386 x86_64" #armv7s
local STDLIB="libc++"
local CURRENTPATH=`pwd`
echo $CURRENTPATH
SDKVERSION=`xcrun -sdk iphoneos --show-sdk-version`
DEVELOPER=$XCODE_DEV_ROOT
TOOLCHAIN=${DEVELOPER}/Toolchains/XcodeDefault.xctoolchain
VERSION=$VER
# Validate environment
case $XCODE_DEV_ROOT in
*\ * )
echo "Your Xcode path contains whitespaces, which is not supported."
exit 1
;;
esac
case $CURRENTPATH in
*\ * )
echo "Your path contains whitespaces, which is not supported by 'make install'."
exit 1
;;
esac
mkdir -p "builddir/$TYPE"
libsToLink=""
echo $DEVELOPER
# loop through architectures! yay for loops!
for IOS_ARCH in ${IOS_ARCHS}
do
unset IOS_DEVROOT IOS_SDKROOT
unset CC CPP CXX CXXCPP CFLAGS CXXFLAGS LDFLAGS LD AR AS NM RANLIB LIBTOOL
unset EXTRA_PLATFORM_CFLAGS EXTRA_PLATFORM_LDFLAGS
unset CROSS_TOP CROSS_SDK BUILD_TOOLS PLATFORM
export CC=$TOOLCHAIN/usr/bin/clang
export CPP=$TOOLCHAIN/usr/bin/clang++
export CXX=$TOOLCHAIN/usr/bin/clang++
export CXXCPP=$TOOLCHAIN/usr/bin/clang++
export LD=$TOOLCHAIN/usr/bin/ld
export AR=$TOOLCHAIN/usr/bin/ar
export AS=$TOOLCHAIN/usr/bin/as
export NM=$TOOLCHAIN/usr/bin/nm
export RANLIB=$TOOLCHAIN/usr/bin/ranlib
export LIBTOOL=$TOOLCHAIN/usr/bin/libtool
echo "Building $IOS_ARCH "
export PLATFORM=""
if [[ "${IOS_ARCH}" == "i386" || "${IOS_ARCH}" == "x86_64" ]];
then
PLATFORM="iPhoneSimulator"
else
PLATFORM="iPhoneOS"
fi
export CROSS_TOP="${DEVELOPER}/Platforms/${PLATFORM}.platform/Developer"
export CROSS_SDK="${PLATFORM}.sdk"
export BUILD_TOOLS="${DEVELOPER}"
MIN_IOS_VERSION=$IOS_MIN_SDK_VER
# min iOS version for arm64 is iOS 7
if [[ "${IOS_ARCH}" == "arm64" || "${IOS_ARCH}" == "x86_64" ]]; then
MIN_IOS_VERSION=7.0 # 7.0 as this is the minimum for these architectures
elif [ "${IOS_ARCH}" == "i386" ]; then
MIN_IOS_VERSION=5.1 # 6.0 to prevent start linking errors
fi
MIN_TYPE=-miphoneos-version-min=
if [[ "${IOS_ARCH}" == "i386" || "${IOS_ARCH}" == "x86_64" ]]; then
MIN_TYPE=-mios-simulator-version-min=
fi
export EXTRA_PLATFORM_CFLAGS="$"
export EXTRA_PLATFORM_LDFLAGS="$ -L${CROSS_TOP}/SDKs/$CROSS_SDK/usr/lib/"
echo $EXTRA_PLATFORM_LDFLAGS
EXTRA_LINK_FLAGS="-arch $IOS_ARCH -stdlib=libc++ -Os -DHAVE_UNISTD_H=1 -DNDEBUG -fPIC "
EXTRA_FLAGS="$EXTRA_LINK_FLAGS -pipe -no-cpp-precomp -funroll-loops $MIN_TYPE$MIN_IOS_VERSION -isysroot ${CROSS_TOP}/SDKs/${CROSS_SDK} -I${CROSS_TOP}/SDKs/${CROSS_SDK}/usr/include/"
unset CFLAGS LDFLAGS CPPFLAGS CXXFLAGS DEVROOT SDKROOT
export LDFLAGS="$EXTRA_LINK_FLAGS $EXTRA_PLATFORM_LDFLAGS -std=c++11"
export DEVROOT="$CROSS_TOP"
export SDKROOT="$CROSS_SDK"
export CFLAGS="$EXTRA_FLAGS -std=c11"
export CPPFLAGS="$EXTRA_FLAGS -std=c++11"
export CXXFLAGS="$EXTRA_FLAGS -std=c++11"
#echo " out c_flags are $OUR_CFLAGS "
rm -f CMakeCache.txt
cmake -G 'Unix Makefiles' -DCMAKE_TOOLCHAIN_FILE=./port/iOS/IPHONEOS_$(echo $IOS_ARCH | tr '[:lower:]' '[:upper:]')_TOOLCHAIN.cmake -DASSIMP_ENABLE_BOOST_WORKAROUND=1 -DASSIMP_BUILD_STATIC_LIB=1 -DBUILD_SHARED_LIBS=0 -DCMAKE_C_FLAGS="$EXTRA_FLAGS" -DCMAKE_CXX_FLAGS="$EXTRA_FLAGS" -DCMAKE_CXX_FLAGS="$EXTRA_FLAGS".
$XCODE_DEV_ROOT/usr/bin/make clean
echo "--------------------"
echo "Running make for ${IOS_ARCH}"
echo "Please stand by..."
$XCODE_DEV_ROOT/usr/bin/make assimp -j 8 -l
fileToRenameTo="./builddir/$TYPE/libassimp-$IOS_ARCH.a"
mv ./lib/libassimp.a $fileToRenameTo
libsToLink="$libsToLink $fileToRenameTo"
$XCODE_DEV_ROOT/usr/bin/make clean
echo "--------------------"
done
mkdir -p "lib/$TYPE"
cd "./builddir/$TYPE/"
# link into universal lib
echo "Running lipo to create fat lib"
echo "Please stand by..."
SDKVERSION=`xcrun -sdk iphoneos --show-sdk-version`
DEVELOPER=$XCODE_DEV_ROOT
TOOLCHAIN=${DEVELOPER}/Toolchains/XcodeDefault.xctoolchain
# libassimp-armv7s.a \
$TOOLCHAIN/usr/bin/lipo -create libassimp-armv7.a \
libassimp-arm64.a \
libassimp-i386.a \
libassimp-x86_64.a \
-output "../../lib/$TYPE/assimp.a"
cd ../../
if [ $? != 0 ];
then
echo "Problem while creating fat lib with lipo"
exit 1
else
echo "Lipo Successful."
fi
echo "--------------------"
echo "Stripping any lingering symbols"
set -e
CURRENTPATH=`pwd`
cd lib/$TYPE
SLOG="$CURRENTPATH/lib/$TYPE-stripping.log"
local TOBESTRIPPED
for TOBESTRIPPED in $( ls -1) ; do
$TOOLCHAIN/usr/bin/strip -x $TOBESTRIPPED >> "${SLOG}" 2>&1
if [ $? != 0 ];
then
tail -n 100 "${LOG}"
echo "Problem while stripping lib - Please check ${SLOG}"
exit 1
else
echo "Strip Successful for ${SLOG}"
fi
done
cd ../../
echo "--------------------"
echo "Completed Assimp for $TYPE"
fi
if [ "$TYPE" == "osx" ] ; then
# warning, assimp on github uses the ASSIMP_ prefix for CMake options ...
# these may need to be updated for a new release
local buildOpts="--build build/$TYPE -DASSIMP_BUILD_STATIC_LIB=1 -DASSIMP_BUILD_SHARED_LIB=0 -DASSIMP_ENABLE_BOOST_WORKAROUND=1"
# 32 bit
cmake -G 'Unix Makefiles' $buildOpts -DCMAKE_C_FLAGS="-arch i386 -arch x86_64 -O3 -DNDEBUG -funroll-loops" -DCMAKE_CXX_FLAGS="-arch i386 -arch x86_64 -stdlib=libc++ -O3 -DNDEBUG -funroll-loops" .
make assimp -j${PARALLEL_MAKE}
elif [ "$TYPE" == "vs" ] ; then
#architecture selection inspired int he tess formula, shouldn't build both architectures in the same run?
echo "building $TYPE | $ARCH | $VS_VER"
echo "--------------------"
local buildOpts=" -DASSIMP_BUILD_STATIC_LIB=0 -DASSIMP_ENABLE_BOOST_WORKAROUND=1 -DASSIMP_BUILD_ASSIMP_TOOLS=0"
local generatorName="Visual Studio "
generatorName+=$VS_VER
if [ $ARCH == 32 ] ; then
mkdir -p build_vs_32
cd build_vs_32
cmake .. -G "$generatorName" $buildOpts
vs-build "Assimp.sln" build "Release|Win32"
elif [ $ARCH == 64 ] ; then
mkdir -p build_vs_64
cd build_vs_64
generatorName+=' Win64'
cmake .. -G "$generatorName" $buildOpts
vs-build "Assimp.sln" build "Release|x64"
fi
cd ..
#cleanup to not fail if the other platform is called
rm -f CMakeCache.txt
echo "--------------------"
echo "Completed Assimp for $TYPE | $ARCH | $VS_VER"
elif [ "$TYPE" == "msys2" ] ; then
echoWarning "TODO: msys2 build"
elif [ "$TYPE" == "android" ] ; then
# warning, assimp on github uses the ASSIMP_ prefix for CMake options ...
# these may need to be updated for a new release
local buildOpts="--build build/$TYPE -DASSIMP_BUILD_STATIC_LIB=1 -DASSIMP_BUILD_SHARED_LIB=0 -DASSIMP_ENABLE_BOOST_WORKAROUND=1 -DASSIMP_ENABLE_BOOST_WORKAROUND=1"
# arm
ABI=armeabi-v7a
source ../../android_configure.sh $ABI
mkdir -p build_arm
cd build_arm
cmake -G 'Unix Makefiles' $buildOpts -DCMAKE_C_FLAGS="-O3 -DNDEBUG $CFLAGS" -DCMAKE_CXX_FLAGS="-O3 -DNDEBUG $CFLAGS" -DCMAKE_LD_FLAGS="$LDFLAGS" ..
make assimp -j${PARALLEL_MAKE}
cd ..
# x86
ABI=x86
source ../../android_configure.sh $ABI
mkdir -p build_x86
cd build_x86
cmake -G 'Unix Makefiles' $buildOpts -DCMAKE_C_FLAGS="-O3 -DNDEBUG $CFLAGS" -DCMAKE_CXX_FLAGS="-O3 -DNDEBUG $CFLAGS" -DCMAKE_LD_FLAGS="$LDFLAGS" ..
make assimp -j${PARALLEL_MAKE}
cd ..
elif [ "$TYPE" == "emscripten" ] ; then
# warning, assimp on github uses the ASSIMP_ prefix for CMake options ...
# these may need to be updated for a new release
local buildOpts="--build build/$TYPE -DASSIMP_BUILD_STATIC_LIB=1 -DASSIMP_BUILD_SHARED_LIB=0 -DASSIMP_ENABLE_BOOST_WORKAROUND=1"
mkdir -p build_emscripten
cd build_emscripten
emcmake cmake -G 'Unix Makefiles' $buildOpts -DCMAKE_C_FLAGS="-O3 -DNDEBUG" -DCMAKE_CXX_FLAGS="-O3 -DNDEBUG" ..
emmake make assimp -j${PARALLEL_MAKE}
cd ..
fi
}
# executed inside the lib src dir, first arg $1 is the dest libs dir root
function copy() {
# headers
mkdir -p $1/include
rm -r $1/include/assimp || true
rm -r $1/include/* || true
cp -Rv include/* $1/include
# libs
mkdir -p $1/lib/$TYPE
if [ "$TYPE" == "vs" ] ; then
if [ $ARCH == 32 ] ; then
mkdir -p $1/lib/$TYPE/Win32
cp -v build_vs_32/code/Release/assimp.lib $1/lib/$TYPE/Win32/assimp.lib
cp -v build_vs_32/code/Release/assimp.dll ../../../../export/vs/Win32/assimp.dll
elif [ $ARCH == 64 ] ; then
mkdir -p $1/lib/$TYPE/x64
cp -v build_vs_64/code/Release/assimp.lib $1/lib/$TYPE/x64/assimp.lib
cp -v build_vs_64/code/Release/assimp.dll ../../../../export/vs/x64/assimp.dll
fi
elif [ "$TYPE" == "osx" ] ; then
cp -Rv lib/libassimp.a $1/lib/$TYPE/assimp.a
elif [ "$TYPE" == "ios" ] ; then
cp -Rv lib/$TYPE/assimp.a $1/lib/$TYPE/assimp.a
elif [ "$TYPE" == "android" ]; then
mkdir -p $1/lib/$TYPE/armeabi-v7a/
mkdir -p $1/lib/$TYPE/x86/
cp -Rv build_arm/code/libassimp.a $1/lib/$TYPE/armeabi-v7a/libassimp.a
cp -Rv build_x86/code/libassimp.a $1/lib/$TYPE/x86/libassimp.a
elif [ "$TYPE" == "emscripten" ]; then
cp -Rv build_emscripten/code/libassimp.a $1/lib/$TYPE/libassimp.a
fi
# copy license files
rm -rf $1/license # remove any older files if exists
mkdir -p $1/license
cp -v LICENSE $1/license/
}
# executed inside the lib src dir
function clean() {
if [ "$TYPE" == "vs" ] ; then
if [ $ARCH == 32 ] ; then
vs-clean "build_vs_32/Assimp.sln";
elif [ $ARCH == 64 ] ; then
vs-clean "build_vs_64/Assimp.sln";
fi
rm -f CMakeCache.txt
echo "Assimp VS | $TYPE | $ARCH cleaned"
elif [ "$TYPE" == "android" ] ; then
echoWarning "TODO: clean android"
else
make clean
make rebuild_cache
rm -f CMakeCache.txt
fi
}
|
package com.thinkaurelius.titan;
import com.thinkaurelius.titan.diskstorage.cassandra.embedded.CassandraEmbeddedStoreManager;
import com.thinkaurelius.titan.graphdb.configuration.GraphDatabaseConfiguration;
import org.apache.commons.configuration.BaseConfiguration;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.lang.StringUtils;
import java.io.File;
public class CassandraStorageSetup {
public static final String CASSANDRA_TEMP_PATH = System.getProperty("user.dir")
+ File.separator + "target"
+ File.separator + "cassandra-temp";
public static final String cassandraYamlPath = StringUtils.join(
new String[]{"file://", System.getProperty("user.dir"), "target",
"cassandra-tmp", "conf", "127.0.0.1", "cassandra.yaml"},
File.separator);
public static final String cassandraOrderedYamlPath = StringUtils.join(
new String[]{"file://", System.getProperty("user.dir"), "target",
"cassandra-tmp", "conf", "127.0.0.1", "cassandra-ordered.yaml"},
File.separator);
public static Configuration getCassandraStorageConfiguration() {
BaseConfiguration config = new BaseConfiguration();
return config;
}
public static Configuration getEmbeddedCassandraStorageConfiguration(boolean ordered) {
Configuration config = getCassandraStorageConfiguration();
if (ordered)
config.addProperty(
CassandraEmbeddedStoreManager.CASSANDRA_CONFIG_DIR_KEY,
cassandraOrderedYamlPath);
else
config.addProperty(
CassandraEmbeddedStoreManager.CASSANDRA_CONFIG_DIR_KEY,
cassandraYamlPath);
return config;
}
public static Configuration getAstyanaxGraphConfiguration() {
BaseConfiguration config = new BaseConfiguration();
config.subset(GraphDatabaseConfiguration.STORAGE_NAMESPACE).addProperty(GraphDatabaseConfiguration.STORAGE_BACKEND_KEY, "astyanax");
return config;
}
public static Configuration getCassandraGraphConfiguration() {
Configuration config = new BaseConfiguration();
config.subset(GraphDatabaseConfiguration.STORAGE_NAMESPACE).addProperty(GraphDatabaseConfiguration.STORAGE_BACKEND_KEY, "cassandra");
return config;
}
public static Configuration getCassandraThriftGraphConfiguration() {
Configuration config = new BaseConfiguration();
config.subset(GraphDatabaseConfiguration.STORAGE_NAMESPACE).addProperty(GraphDatabaseConfiguration.STORAGE_BACKEND_KEY, "cassandrathrift");
return config;
}
public static Configuration getEmbeddedCassandraGraphConfiguration() {
Configuration config = new BaseConfiguration();
config.subset(GraphDatabaseConfiguration.STORAGE_NAMESPACE).addProperty(GraphDatabaseConfiguration.STORAGE_BACKEND_KEY, "embeddedcassandra");
config.subset(GraphDatabaseConfiguration.STORAGE_NAMESPACE).addProperty(
CassandraEmbeddedStoreManager.CASSANDRA_CONFIG_DIR_KEY,
cassandraYamlPath);
return config;
}
public static Configuration getEmbeddedCassandraPartitionGraphConfiguration() {
Configuration config = new BaseConfiguration();
config.subset(GraphDatabaseConfiguration.STORAGE_NAMESPACE).addProperty(GraphDatabaseConfiguration.STORAGE_BACKEND_KEY, "embeddedcassandra");
config.subset(GraphDatabaseConfiguration.STORAGE_NAMESPACE).addProperty(
CassandraEmbeddedStoreManager.CASSANDRA_CONFIG_DIR_KEY,
cassandraOrderedYamlPath);
config.subset(GraphDatabaseConfiguration.IDS_NAMESPACE).addProperty(GraphDatabaseConfiguration.IDS_PARTITION_KEY, true);
config.subset(GraphDatabaseConfiguration.IDS_NAMESPACE).addProperty(GraphDatabaseConfiguration.IDS_FLUSH_KEY, false);
return config;
}
}
|
{ path: 'books/add', component: AddBookComponent },
{ path: 'books/edit/:bookId', component: EditBookComponent },
{ path: 'profile', component: UserProfileComponent },
|
<reponame>IanCassTwo/AkamaiOPEN-edgegrid-golang<filename>firewallrules-v1/firewallrules_test.go
package firewallrules
import (
"fmt"
"github.com/stretchr/testify/assert"
"gopkg.in/h2non/gock.v1"
"testing"
)
func TestListSubscriptions(t *testing.T) {
defer gock.Off()
mock := gock.New(fmt.Sprintf("https://akaa-baseurl-xxxxxxxxxxx-xxxxxxxxxxxxx.luna.akamaiapis.net/firewall-rules-manager/v1/subscriptions"))
mock.
Get("/firewall-rules-manager/v1/subscriptions").
HeaderPresent("Authorization").
Reply(200).
SetHeader("Content-Type", "application/json;charset=UTF-8").
BodyString(`
{
"subscriptions": [
{
"description": "Edge Staging Network",
"email": "<EMAIL>",
"serviceId": 7,
"serviceName": "ESN",
"signupDate": "2020-04-24"
},
{
"description": "Development Test IPs",
"email": "<EMAIL>",
"serviceId": 13,
"serviceName": "Test IPs",
"signupDate": "2020-04-24"
}
]
}
`)
Init(config)
response, err := ListSubscriptions()
assert.NoError(t, err)
assert.Equal(t, assert.IsType(t, &ListSubscriptionsResponse{}, response), true)
assert.Equal(t, len(response.Subscriptions), 2)
}
func TestListServices(t *testing.T) {
defer gock.Off()
mock := gock.New(fmt.Sprintf("https://akaa-baseurl-xxxxxxxxxxx-xxxxxxxxxxxxx.luna.akamaiapis.net/firewall-rules-manager/v1/services"))
mock.
Get("/firewall-rules-manager/v1/services").
HeaderPresent("Authorization").
Reply(200).
SetHeader("Content-Type", "application/json;charset=UTF-8").
BodyString(`
[
{
"serviceId": 7,
"serviceName": "ESN",
"description": "Edge Staging Network"
},
{
"serviceId": 13,
"serviceName": "Test IPs",
"description": "Development Test IPs"
}
]
`)
Init(config)
response, err := ListServices()
assert.NoError(t, err)
assert.Equal(t, assert.IsType(t, &ListServicesResponse{}, response), true)
assert.Equal(t, len(*response), 2)
}
func TestListCidrBlocks(t *testing.T) {
defer gock.Off()
mock := gock.New(fmt.Sprintf("https://akaa-baseurl-xxxxxxxxxxx-xxxxxxxxxxxxx.luna.akamaiapis.net/firewall-rules-manager/v1/cidr-blocks"))
mock.
Get("/firewall-rules-manager/v1/cidr-blocks").
HeaderPresent("Authorization").
Reply(200).
SetHeader("Content-Type", "application/json;charset=UTF-8").
BodyString(`
[
{
"changeDate": null,
"cidr": "172.16.58.3",
"cidrId": 303,
"cidrMask": "/31",
"creationDate": "2007-09-27",
"description": "Secure Edge Staging Network",
"effectiveDate": "2007-10-13",
"lastAction": "update",
"maxIp": "172.16.58.3",
"minIp": "172.16.58.3",
"port": "80,443",
"serviceId": 8,
"serviceName": "SESN"
},
{
"changeDate": null,
"cidr": "192.168.3.11",
"cidrId": 304,
"cidrMask": "/31",
"creationDate": "2007-09-27",
"description": "Secure Edge Staging Network",
"effectiveDate": "2007-10-13",
"lastAction": "update",
"maxIp": "172.16.58.3",
"minIp": "192.168.3.11",
"port": "80,443",
"serviceId": 8,
"serviceName": "SESN"
}
]
`)
Init(config)
response, err := ListCidrBlocks()
assert.NoError(t, err)
assert.Equal(t, assert.IsType(t, &ListCidrBlocksResponse{}, response), true)
assert.Equal(t, len(*response), 2)
}
func TestUpdateSubscriptions(t *testing.T) {
defer gock.Off()
mock := gock.New(fmt.Sprintf("https://akaa-baseurl-xxxxxxxxxxx-xxxxxxxxxxxxx.luna.akamaiapis.net/firewall-rules-manager/v1/subscriptions"))
mock.
Put("/firewall-rules-manager/v1/subscriptions").
HeaderPresent("Authorization").
Reply(200).
SetHeader("Content-Type", "application/json;charset=UTF-8").
BodyString(`
{
"subscriptions": [
{
"email": "<EMAIL>",
"serviceId": 10,
"serviceName": "NETSTORAGE",
"signupDate": "2020-04-24"
}
]
}
`)
Init(config)
// Create a new subscription
var subscription Subscription
subscription.ServiceID = 10
subscription.ServiceName = "NETSTORAGE"
subscription.Email = "<EMAIL>"
subscription.SignupDate = "2020-04-24"
// Add it to the list of subscriptions
var subscriptions = make([]Subscription, 0)
subscriptions = append(subscriptions, subscription)
// Wrap it in a request
var request UpdateSubscriptionsRequest
request.Subscriptions = subscriptions
response, err := UpdateSubscriptions(request)
assert.NoError(t, err)
assert.Equal(t, assert.IsType(t, &UpdateSubscriptionsResponse{}, response), true)
assert.Equal(t, len(response.Subscriptions), 1)
}
|
#!/bin/sh
# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Simple posix sh reproducible go build container script with caching.
#
# Usage:
# hack/go_container.sh go version
# hack/go_container.sh go build -o /out/kind .
set -o nounset -o errexit
# ============================ SCRIPT SETTINGS =================================
# get the repo root for defaulting OUT_DIR and SOURCE_DIR
REPO_ROOT="${REPO_ROOT:-$(cd "$(dirname "$0")/.." && pwd)}"
# output directory, will be mounted to /out, defaults to /bin in REPO_ROOT
OUT_DIR="${OUT_DIR:-${REPO_ROOT}/bin}"
# source directory, will be mounted to /src, defaults to current directory
SOURCE_DIR="${SOURCE_DIR:-$(pwd -P)}"
# default to disabling CGO for easier reproducible builds and cross compilation
export CGO_ENABLED="${CGO_ENABLED:-0}"
# the container image, by default a recent official golang image
GOIMAGE="${GOIMAGE:-golang:1.14.3}"
# docker volume name, used as a go module / build cache
CACHE_VOLUME="${CACHE_VOLUME:-kind-build-cache}"
# allow overriding docker cli, auto-detect with fallback to docker
DOCKER="${DOCKER:-"$(which docker || which podman || echo "docker")"}"
# ========================== END SCRIPT SETTINGS ===============================
# autodetects host GOOS and GOARCH and exports them if not set
detect_and_set_goos_goarch() {
# if we have go, just ask go! NOTE: this respects explicitly set GOARCH / GOOS
if which go >/dev/null 2>&1; then
GOARCH=$(go env GOARCH)
GOOS=$(go env GOOS)
fi
# detect GOOS equivalent if unset
if [ -z "${GOOS:-}" ]; then
case "$(uname -s)" in
Darwin) export GOOS="darwin" ;;
Linux) export GOOS="linux" ;;
*) echo "Unknown host OS! '$(uname -s)'" exit 2 ;;
esac
fi
# detect GOARCH equivalent if unset
if [ -z "${GOARCH:-}" ]; then
case "$(uname -m)" in
x86_64) export GOARCH="amd64" ;;
arm*)
export GOARCH="arm"
if [ "$(getconf LONG_BIT)" = "64" ]; then
export GOARCH="arm64"
fi
;;
*) echo "Unknown host architecture! '$(uname -m)'" exit 2 ;;
esac
fi
export GOOS GOARCH
}
# run $@ in a golang container with caching etc.
run_in_go_container() {
"${DOCKER}" run \
`# docker options: remove container on exit, run as the host user / group` \
--rm --user "$(id -u):$(id -g)" \
`# disable SELinux relabelling /src` \
--security-opt label=disable \
`# golang caching: mount and use the cache volume` \
-v "${CACHE_VOLUME}:/go" -e XDG_CACHE_HOME=/go/cache \
`# mount the output & source dir, set working directory to the source dir` \
-v "${OUT_DIR}:/out" -v "${SOURCE_DIR}:/src" -w "/src" \
`# pass through go settings: modules, proxy, cgo, OS / Arch` \
-e GO111MODULE -e GOPROXY -e CGO_ENABLED -e GOOS -e GOARCH \
`# pass through proxy settings` \
-e HTTP_PROXY -e HTTPS_PROXY -e NO_PROXY \
`# run the image with the args passed to this script` \
"${GOIMAGE}" "$@"
}
mkdir -p "${OUT_DIR}"
"${DOCKER}" volume inspect "${CACHE_VOLUME}" >/dev/null 2>&1 || "${DOCKER}" volume create "${CACHE_VOLUME}" >/dev/null
detect_and_set_goos_goarch
run_in_go_container "$@"
|
'use strict';
self.onmessage = function (e) {
self.postMessage(e.data);
};
|
#!/bin/bash
set -eu
self="$(basename "${BASH_SOURCE[0]}")"
cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"
# get the most recent commit which modified any of "$@"
fileCommit() {
git log -1 --format='format:%H' HEAD -- "$@"
}
# get the most recent commit which modified "$1/Dockerfile" or any file COPY'd from "$1/Dockerfile"
dirCommit() {
local dir="$1"; shift
(
cd "$dir"
fileCommit \
Dockerfile \
"$(git show HEAD:./Dockerfile | awk '
toupper($1) == "COPY" {
for (i = 2; i < NF; i++) {
print $i
}
}
')"
)
}
getArches() {
local repo="$1"; shift
local officialImagesUrl='https://github.com/docker-library/official-images/raw/master/library/'
eval "declare -g -A parentRepoToArches=( $(
find . -name 'Dockerfile' -exec awk '
toupper($1) == "FROM" && $2 !~ /^('"$repo"'|scratch|microsoft\/[^:]+)(:|$)/ {
print "'"$officialImagesUrl"'" $2
}
' '{}' + \
| sort -u \
| xargs bashbrew cat --format '[{{ .RepoName }}:{{ .TagName }}]="{{ join " " .TagEntry.Architectures }}"'
) )"
}
getArches 'yourls'
cat <<-EOH
# this file is generated via https://github.com/YOURLS/docker-yourls/blob/$(fileCommit "$self")/$self
Maintainers: YOURLS <yourls@yourls.org> (@YOURLS),
Léo Colombaro <git@colombaro.fr> (@LeoColomb)
GitRepo: https://github.com/YOURLS/docker-yourls.git
EOH
# prints "$2$1$3$1...$N"
join() {
local sep="$1"; shift
local out; printf -v out "${sep//%/%%}%s" "$@"
echo "${out#$sep}"
}
for variant in apache fpm fpm-alpine; do
commit="$(dirCommit "$variant")"
fullVersion="$(git show "${commit}:${variant}/Dockerfile" | awk '$1 == "ENV" && $2 == "YOURLS_VERSION" { print $3; exit }')"
versionAliases=()
while [ "${fullVersion%[.-]*}" != "$fullVersion" ]; do
versionAliases+=( "$fullVersion" )
fullVersion="${fullVersion%[.-]*}"
done
versionAliases+=(
"$fullVersion"
latest
)
variantAliases=( "${versionAliases[@]/%/-$variant}" )
variantAliases=( "${variantAliases[@]//latest-/}" )
if [ "$variant" = 'apache' ]; then
variantAliases+=( "${versionAliases[@]}" )
fi
variantParent="$(awk 'toupper($1) == "FROM" { print $2 }' "$variant/Dockerfile")"
# shellcheck disable=SC2154
variantArches="${parentRepoToArches[$variantParent]}"
echo
cat <<-EOE
Tags: $(join ', ' "${variantAliases[@]}")
Architectures: $(join ', ' $variantArches)
GitCommit: $commit
Directory: $variant
EOE
done
|
# Define function to calculate cross-validation score
def calculate_score(model, hyperparameters):
# Make predictions
predictions = model.predict(X_validation)
# Calculate validation score
validation_score = model.score(X_prediction, y_prediction)
# Return validation score
return validation_score
# Initialize the best score to infinitely low
best_score = float('-inf')
# Loop over the hyperparameters
for num_iterations in hyperparameters['num_iterations']:
# Set the hyperparameters
model.set_params(num_iterations=num_iterations)
# Train the model
model.fit(X_train, y_train)
# Get the score for this iteration
score = calculate_score(model, hyperparameters)
# If we got a better score, store the hyperparameters
if score > best_score:
best_score = score
best_hyperparameters = {'num_iterations': num_iterations }
# Set the model to the best hyperparameters
model.set_params(**best_hyperparameters)
|
def operate(arr):
new_arr = [i+5 for i in arr]
new_arr = [i-2 for i in new_arr]
new_arr.sort()
return new_arr
arr = [1, 2, 0, 5, 2, 1]
result = operate(arr)
print(result)
# [2, 3, 4, 6, 7, 8]
|
ret=0
read N
read line
for a in $line; do
ret=$[$ret^$a]
done
echo $ret
|
#ifndef COMMON_PUBLIC_PRIORITY_QUEUE_H__
#define COMMON_PUBLIC_PRIORITY_QUEUE_H__
#include <stdbool.h>
#include <stddef.h>
#include "iterator.h"
// A priority queue data structure.
typedef struct PriorityQueue PriorityQueue;
typedef enum {
PQ_MODE_INVALID,
PQ_MODE_SMALLEST_FIRST,
PQ_MODE_LARGEST_FIRST,
} PriorityQueueMode;
// Creates a new PriorityQueue object.
PriorityQueue *PriorityQueue_alloc(RelationalKeyInfo *key_info,
size_t elem_size, PriorityQueueMode mode);
// Gets the number of elements in the PriorityQueue
size_t PriorityQueue_count(const PriorityQueue *queue);
// Returns whether the priority queue is empty
bool PriorityQueue_empty(const PriorityQueue *queue);
// Gets the capacity of the PriorityQueue
size_t PriorityQueue_capacity(const PriorityQueue *queue);
// Reserves memory for a total of `num_elems' objects.
// Capacity always increases by a factor of 2.
// Returns whether the allocation was successful.
bool PriorityQueue_reserve(PriorityQueue *queue, size_t num_elems);
// Removes excess memory for the PriorityQueue object.
// Returns whether the allocation was successful.
bool PriorityQueue_trim(PriorityQueue *queue);
// Frees up the PriorityQueue object.
void PriorityQueue_free(PriorityQueue *queue);
// Gets the size of an element in the PriorityQueue
size_t PriorityQueue_element_size(const PriorityQueue *queue);
// Gets the priority queue mode
PriorityQueueMode PriorityQueue_mode(const PriorityQueue *queue);
// Gets the key info for the PriorityQueue
RelationalKeyInfo *PriorityQueue_key_info(const PriorityQueue *queue);
// Adds the item to the queue and returns pointer to new data.
// Returns NULL if unsuccessful.
void *PriorityQueue_enqueue(PriorityQueue *queue, const void *key,
const void *data);
// Removes an item from the queue and stores it in the given location.
bool PriorityQueue_dequeue(PriorityQueue *queue, void *key_out,
void *data_out);
// Peeks the highest priority item in the queue and stores it in the given
// location.
bool PriorityQueue_peek(PriorityQueue *queue, void *key_out,
void *data_out);
// Removes all the items from the queue.
// Remember to clean up memory first.
void PriorityQueue_clear(PriorityQueue *queue);
// Copies a queue. Returns whether successful.
bool PriorityQueue_copy(PriorityQueue *dest_queue, const PriorityQueue *queue);
// Gets an Iterator for this PriorityQueue. Maintains order.
// Modifies the priority queue!
void PriorityQueue_get_iterator(const PriorityQueue *queue, Iterator *iter);
// Gets a Sink for this queue. Must sink KeyValuePair.
// Sink function returns NULL.
void PriorityQueue_get_sink(const PriorityQueue *queue, Sink *sink);
#endif // COMMON_PUBLIC_PRIORITY_QUEUE_H__
|
# Create a Customer class
class Customer:
# Initialize the data attributes
def __init__(self, name, age):
self.name = name
self.age = age
self.books = {}
# Add a book to the customer's list
def addBook(self, bookName):
self.books[bookName] = True
# Remove a book from the customer's list
def removeBook(self, bookName):
if bookName in self.books:
del self.books[bookName]
# Print the customer's information
def printCustomer(self):
print("Name:", self.name)
print("Age:", self.age)
print("Books borrowed:", self.books)
# Initialize the Customer class
customer = Customer("John Doe", 30)
# Add some books to the customer's list
customer.addBook("The Great Gatsby")
customer.addBook("The Catcher in the Rye")
# Print the customer's information
customer.printCustomer()
|
import Post from 'models/Post'
import sendErrorMessage from 'utils/errorMessage'
import sendMessage from 'utils/message'
const ADD_COMMENT_MESSAGE = 'you have commented on this post'
const REMOVE_COMMENT_MESSAGE = 'you have removed comment from this post'
const resolver = {
Mutation: {
commentPost: async (_, { Input: { postID, text } }, { user: { id } }) => {
try {
const updateObject = {
$push: {
comments: {
user: id,
text,
},
},
$inc: { totalComments: 1 },
}
const update = await Post.updateOne({ _id: postID }, updateObject)
if (!update || !update.nModified) return sendErrorMessage()
return sendMessage(ADD_COMMENT_MESSAGE)
} catch (e) {
return sendErrorMessage(e)
}
},
removeCommentPost: async (
_,
{ Input: { postID, commentID } },
{ user: { id } }
) => {
try {
const post = await Post.findOne(
{ _id: postID },
{
comments: {
$elemMatch: {
_id: commentID,
user: id,
},
},
}
)
if (
!post ||
!post.comments.length ||
post.comments[0].user.toString() !== id
)
return sendErrorMessage()
const update = await Post.updateOne(
{ _id: postID },
{
$pull: {
comments: {
_id: commentID,
user: id,
},
},
$inc: {
totalComments: -1,
},
}
)
if (!update || !update.nModified) return sendErrorMessage()
return sendMessage(REMOVE_COMMENT_MESSAGE)
} catch (e) {
return sendErrorMessage(e)
}
},
editComment: async (
_,
{ Input: { commentID, postID, text } },
{ user: { id } }
) => {
const post = await Post.findOne(
{ _id: postID },
{
comments: {
$elemMatch: {
_id: commentID,
user: id,
},
},
}
)
if (
!post ||
!post.comments.length ||
post.comments[0].user.toString() !== id
)
return sendErrorMessage()
const update = await Post.updateOne(
{ _id: postID, 'comments._id': commentID },
{
$set: {
'comments.$.text': text,
},
}
)
if (!update || !update.nModified) return sendErrorMessage()
return sendMessage('Comment updated successfully')
},
},
}
export default resolver
|
import React from 'react';
class Calculator extends React.Component {
state = {
num1: '',
num2: '',
result: ''
}
handleInputChange = (event) => {
this.setState({[event.target.name]: event.target.value});
}
handleSubmit = (event) => {
event.preventDefault();
this.setState({result: parseInt(this.state.num1) + parseInt(this.state.num2)});
}
render() {
return (
<form onSubmit={this.handleSubmit}>
<label>
First Number:
<input
type="text"
name="num1"
value={this.state.num1}
onChange={this.handleInputChange}
/>
</label>
<label>
Second Number:
<input
type="text"
name="num2"
value={this.state.num2}
onChange={this.handleInputChange}
/>
</label>
<input type="submit" value="Submit"/>
{this.state.result && <h1>Result: {this.state.result}</h1>}
</form>
)
}
}
|
#!/bin/bash
#
# Laucher batch script file for TACC systems (like Frontera, Stampede2, etc.)
# Si Liu
# July 13, 2020
#
# Simple SLURM script for submitting multiple serial
# jobs (e.g. parametric studies) using a script wrapper
# to launch the jobs.
#
# To use, build the launcher executable and your
# serial application(s) and place them in your WORKDIR
# directory. Then, edit the LAUNCHER_JOB_FILE to specify
# each executable per process.
#-------------------------------------------------------
#
# <------ Setup Parameters ------>
#
#SBATCH -J resp_postproc
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=1
#SBATCH --qos=devel
##SBATCH --mem-per-cpu=1GB
#SBATCH -o postproc.%j.out
#SBATCH -e postproc.%j.err
#SBATCH -t 00:30:00
#SBATCH -A nn9464k
#SBATCH --mail-type=ALL
#SBATCH --mail-user=fabio.zeiser@fys.uio.no
#------------------------------------------------------
set -o errexit # Exit the script on any error
set -o nounset # Treat any unset variables as an error
module --quiet purge # Reset the modules to the system default
# setup working dir (remember to copy back later)
workdir=$USERWORK/$SLURM_JOB_ID
mkdir -p $workdir
#cp -r $SUBMITDIR/* $workdir
cp -r /cluster/home/fabiobz/OCL_GEANT4/* $workdir
#module load launcher
export LAUNCHER_DIR=/cluster/home/fabiobz/launcher
# USING SLURM; plugins defines SLURM env. vars.
export LAUNCHER_RMI=SLURM
export LAUNCHER_PLUGIN_DIR=$LAUNCHER_DIR/plugins
#export LAUNCHER_WORKDIR=$workdir/OscarBuild/
export LAUNCHER_WORKDIR=/cluster/projects/nn9464k/fabio/OSCAR_response_results/641159/data
export LOGDIR=$workdir/logs_postproc
mkdir -p $LOGDIR
export LAUNCHER_JOB_FILE=`pwd`/commands_postproc
#export LAUNCHER_SCHED=block
module load Python/3.8.2-GCCcore-9.3.0 ROOT/6.12.06-intel-2018a-Python-2.7.14 icc/2019.1.144-GCC-8.2.0-2.31.1 CMake/3.13.3-GCCcore-8.2.0
export LD_LIBRARY_PATH=/cluster/home/fabiobz/progs/xerces-c-3.2.3/install/lib:$LD_LIBRARY_PATH
source /cluster/home/fabiobz/progs/geant4.10.06.p02-install/bin/geant4.sh
module list
$LAUNCHER_DIR/paramrun
|
#include "ArtistTree.h"
TreeNode::TreeNode(std::string name)
: artist{ name } {};
ArtistTree::ArtistTree() {};
ArtistTree::ArtistTree(TreeNode* first)
: root{ first } {};
ArtistTree::ArtistTree(std::string artist)
: root{ new TreeNode(artist) } {}
void ArtistTree::insert(TreeNode* new_artist)
{
TreeNode *temp = root;
TreeNode *prev = nullptr;
while (temp)
{
prev = temp;
// do not allow for duplicate artists
if (new_artist->artist == temp->artist)
{
return;
}
else if (new_artist->artist < temp->artist)
{
temp = temp->left;
}
else
{
temp = temp->right;
}
}
new_artist->par = prev;
if (!prev)
{
root = new_artist;
}
else if (new_artist->artist < prev->artist)
{
prev->left = new_artist;
}
else
{
prev->right = new_artist;
}
}
void ArtistTree::insert(std::string artist)
{
TreeNode *temp = new TreeNode(artist);
insert(temp);
}
TreeNode* ArtistTree::find(std::string name)
{
TreeNode* temp = root;
while (temp)
{
if (temp->artist == name)
{
return temp;
}
else if (name < temp->artist)
{
temp = temp->left;
}
else
{
temp = temp->right;
}
}
if (!temp)
{
std::cout << "Artist " << name << " not found." << std::endl;
}
return temp; // temp is nullptr if no match found
}
void ArtistTree::print_inorder(TreeNode* start)
{
if (start)
{
print_inorder(start->left);
std::cout << start->artist << ", ";
print_inorder(start->right);
}
}
void ArtistTree::print_inorder()
{
print_inorder(root);
}
void ArtistTree::strings_inorder(TreeNode* start)
{
if (start)
{
strings_inorder(start->left);
artists.push_back(start->artist);
strings_inorder(start->right);
}
}
void ArtistTree::strings_inorder()
{
strings_inorder(root);
}
std::vector<std::string> ArtistTree::get_strings()
{
artists.clear();
strings_inorder(root);
return artists;
}
|
<gh_stars>0
package BSP;
import java.util.*;
import org.json.JSONObject;
import static BSP.BSPConstant.*;
/**
* Created by nathan on 13/5/17.
*/
public class SystemController {
private List<Customer> customers = new ArrayList<Customer>();
private Customer customer;
private DataManager dataManager = new DataManager();
public SystemController() {
}
public void main()
{
List<JSONObject> jsonCustomers = dataManager.readFile();
for (JSONObject j: jsonCustomers)
{
customers.add(new Customer(j));
}
}
public boolean checkLoginInfo(String username, String password)
{
for (Customer c: customers)
{
if (String.valueOf(c.getCustomerId()).equals(username))
{
if (c.getPassword().equals(password))
{
customer = c;
customer.updateLastLogin();
return true;
}
}
}
return false;
}
public boolean isAdmin(String username, String password)
{
return username.equals(SUPER_ID) && password.equals(<PASSWORD>);
}
public int[] createCustomer(String password, String fname, String lname, Date dob, String state, String city, String district, String street, String unit, String postcode, String emailAddress, String pin) {
int customerId = getAvailableCustomerId();
Customer newCustomer = new Customer(customerId, password, fname, lname, dob, state, city, district, street, unit, postcode, emailAddress, pin, getAvailableAccountId(0));
if (customers.add(newCustomer))
{
return new int[] {newCustomer.getCustomerId(), newCustomer.getSavingAccount().getAccountId()};
}
else
{
return null;
}
}
public boolean deleteCustomer(Customer customer) {
return customers.remove(customer);
}
public int getAvailableCustomerId()
{
if (customers.isEmpty())
{
return 100001;
}
else
{
return customers.get(customers.size() - 1).getCustomerId() + 1;
}
}
public void close()
{
List<String> jsons = new ArrayList<String>();
for (Customer c: customers)
{
jsons.add(c.toString());
}
dataManager.writeFile(jsons);
}
public int getAvailableAccountId(int index)
{
int maxNumber = INITIAL_ACCONT_ID[index];
for (Customer c: customers)
{
if (maxNumber < c.getAccounts()[index].getAccountId())
{
maxNumber = c.getAccounts()[index].getAccountId();
}
}
return maxNumber + 1;
}
public int getAccountIndex(String accountType)
{
int index = 0;
for (int i = 0; i < ACCOUNT_TYPES.length; i++)
{
if (accountType.equalsIgnoreCase(ACCOUNT_TYPES[i]))
{
index = i;
break;
}
}
return index;
}
public String getCustomerName()
{
return customer.getFname() + " " + customer.getLname();
}
public String[] withdraw(String accountType, double amount)
{
if (accountType.equalsIgnoreCase(ACCOUNT_TYPES[2]) && !customer.canCreateHomeLoan(amount))
{
return new String[] {"", "Not enough fund in saving account"};
}
ArrayList<String[]> list = new ArrayList<>();
String[] array = new String[] {"", ""};
String string = "";
Transaction transaction = customer.getAccounts()[getAccountIndex(accountType)].withdraw(amount, 0);
if (transaction.getError().isEmpty())
{
for (String[] s: transaction.toArrayList())
{
string += "\n" + s[0] + ": " + s[1];
}
array[0] = string;
}
else
array[1] = transaction.getError();
return array;
}
public String[] deposit(String accountType, double amount)
{
String[] array = new String[] {"", ""};
Transaction transaction = customer.getAccounts()[getAccountIndex(accountType)].deposit(amount, 0);
if (transaction.getError().isEmpty())
{
String string = "";
for (String[] s: transaction.toArrayList())
{
string += "\n" + s[0] + ": " + s[1];
}
array[0] = string;
}
else
array[1] = transaction.getError();
return array;
}
public String[] termDeposit(double amount, int term)
{
String[] array = new String[] {"", ""};
Transaction transaction = customer.getAccounts()[3].deposit(amount, 0);
if (transaction.getError().isEmpty())
{
String string = "Term:" + TD_TERMS_ARRAY[term];
for (String[] s: transaction.toArrayList())
{
string += "\n" + s[0] + ": " + s[1];
}
array[0] = string;
}
else
array[1] = transaction.getError();
return array;
}
public String[] transfer(String outputAccountType, String inputAccountType, double amount)
{
String[] array = new String[] {"", ""};
Transaction transaction = customer.transfer(getAccountIndex(outputAccountType), getAccountIndex(inputAccountType), amount);
if (transaction.getError().isEmpty())
{
String string = "";
for (String[] s: transaction.toArrayList())
{
string += "\n" + s[0] + ": " + s[1];
}
array[0] = string;
}
else
array[1] = transaction.getError();
return array;
}
public String createAccount(String accountType)
{
int index = getAccountIndex(accountType);
return String.valueOf(customer.createAccount(index, getAvailableAccountId(index)).getAccountId());
}
public ArrayList<String> getExistAccountList()
{
return customer.getExistAccounts();
}
public ArrayList<String> getNonExistAccountList()
{
return customer.getNonExistAccounts();
}
public void countWrongPin()
{
customer.countWrongPin();
}
public boolean isCustomerCanTransfer()
{
return customer.canTransfer();
}
public boolean isPin(String pin)
{
return customer.getPin().equals(pin);
}
public List<Double> getBalances()
{
return customer.getBalances();
}
}
|
<reponame>NilsCoding/hierarchical-data<filename>src/com/nilscoding/datastruct/hierarchical/HierarchyStream.java
package com.nilscoding.datastruct.hierarchical;
import java.util.Iterator;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
/**
* Utility class to create a Stream on
* @author NilsCoding
*/
public class HierarchyStream {
private HierarchyStream() { }
/**
* Creates a Stream of the iterable elements, starting at the given rootObj
* and iterating over all child elements, using the given dataAccessor
* @param <T> type of elements
* @param rootObj root object
* @param dataAccessor data accessor
* @return Stream to iterate over elements
*/
public static <T> Stream<T> of(T rootObj, IHierarchyDataAccessor<?,T> dataAccessor) {
Iterator<T> itr = new HierarchyIterator<>(rootObj, dataAccessor);
return StreamSupport.stream(Spliterators.spliteratorUnknownSize(itr, Spliterator.ORDERED), false);
}
}
|
<gh_stars>1-10
//
// Licensed to Green Energy Corp (www.greenenergycorp.com) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. Green Enery Corp licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
#ifndef __PHYSICAL_LAYER_ASYNC_BASE_TCP_H_
#define __PHYSICAL_LAYER_ASYNC_BASE_TCP_H_
#include <opendnp3/APL/PhysicalLayerAsyncASIO.h>
#include <boost/asio.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <memory>
namespace apl
{
/**
Common socket object and some shared implementations for server/client.
*/
class PhysicalLayerAsyncBaseTCP : public PhysicalLayerAsyncASIO
{
public:
PhysicalLayerAsyncBaseTCP(Logger*, boost::asio::io_service* apIOService);
virtual ~PhysicalLayerAsyncBaseTCP() {}
/* Implement the shared client/server actions */
void DoClose();
void DoAsyncRead(boost::uint8_t*, size_t);
void DoAsyncWrite(const boost::uint8_t*, size_t);
void DoOpenFailure();
protected:
boost::asio::ip::tcp::socket mSocket;
void CloseSocket();
boost::asio::ip::address ResolveAddress(const std::string& arEndpoint,
boost::system::error_code& ec);
private:
void ShutdownSocket();
};
}
#endif
|
<reponame>LesterCerioli/Umbriel<gh_stars>100-1000
import { Tag } from '@modules/subscriptions/domain/tag/tag'
import { Title } from '@modules/subscriptions/domain/tag/title'
import { InMemoryTagsRepository } from '@modules/subscriptions/repositories/in-memory/InMemoryTagsRepository'
import { GetAllTags } from './GetAllTags'
let tagsRepository: InMemoryTagsRepository
let getAllTags: GetAllTags
describe('Get All Tags', () => {
beforeEach(() => {
tagsRepository = new InMemoryTagsRepository()
getAllTags = new GetAllTags(tagsRepository)
})
it('should be able to get all tags', async () => {
const tag1 = Tag.create({
title: Title.create('Tag 01').value as Title,
}).value as Tag
const tag2 = Tag.create({
title: Title.create('Tag 02').value as Title,
}).value as Tag
tagsRepository.create(tag1)
tagsRepository.create(tag2)
const response = await getAllTags.execute()
expect(response.length).toBe(2)
expect(response[0].title.value).toEqual('Tag 01')
expect(response[1].title.value).toEqual('Tag 02')
})
})
|
SELECT COUNT(DISTINCT column_name)
FROM table_name;
|
class Review < ApplicationRecord
belongs_to :product
end
|
#!/bin/sh
TCP_SERVER_PORT="${DRACHTIO_PORT:-4000}"
nc -v -z localhost $TCP_SERVER_PORT
# if last command exited with non zero
if [ $? != 0 ]
then
exit 1
fi
HTTP_SERVER_PORT="${HTTP_PORT:-3000}"
printf 'GET /system-health HTTP/1.1\r\nHost: localhost\r\n\r\n' | nc -v localhost 3000 | grep calls
# grep will automatically exit with 1 if string is not matched, however, will leave that call there in case
# we pivot to pipe to dev/null
if [ $? != 0 ]
then
exit 1
fi
exit 0
|
/*
* Copyright 2017 Realm Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.realm;
import org.bson.types.Decimal128;
import org.bson.types.ObjectId;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import javax.annotation.Nullable;
import io.realm.annotations.RealmClass;
import io.realm.annotations.Required;
import io.realm.internal.ColumnInfo;
import io.realm.internal.OsObjectStore;
import io.realm.internal.Table;
import io.realm.internal.fields.FieldDescriptor;
/**
* Class for interacting with the schema for a given RealmObject class. This makes it possible to inspect,
* add, delete or change the fields for given class.
* <p>
* If this {@link RealmObjectSchema} is retrieved from an immutable {@link RealmSchema}, this {@link RealmObjectSchema}
* will be immutable as well.
*
* @see io.realm.RealmMigration
*/
public abstract class RealmObjectSchema {
static final Map<Class<?>, FieldMetaData> SUPPORTED_SIMPLE_FIELDS;
static {
Map<Class<?>, FieldMetaData> m = new HashMap<>();
m.put(String.class, new FieldMetaData(RealmFieldType.STRING, RealmFieldType.STRING_LIST, true));
m.put(short.class, new FieldMetaData(RealmFieldType.INTEGER, RealmFieldType.INTEGER_LIST, false));
m.put(Short.class, new FieldMetaData(RealmFieldType.INTEGER, RealmFieldType.INTEGER_LIST, true));
m.put(int.class, new FieldMetaData(RealmFieldType.INTEGER, RealmFieldType.INTEGER_LIST, false));
m.put(Integer.class, new FieldMetaData(RealmFieldType.INTEGER, RealmFieldType.INTEGER_LIST, true));
m.put(long.class, new FieldMetaData(RealmFieldType.INTEGER, RealmFieldType.INTEGER_LIST, false));
m.put(Long.class, new FieldMetaData(RealmFieldType.INTEGER, RealmFieldType.INTEGER_LIST, true));
m.put(float.class, new FieldMetaData(RealmFieldType.FLOAT, RealmFieldType.FLOAT_LIST, false));
m.put(Float.class, new FieldMetaData(RealmFieldType.FLOAT, RealmFieldType.FLOAT_LIST, true));
m.put(double.class, new FieldMetaData(RealmFieldType.DOUBLE, RealmFieldType.DOUBLE_LIST, false));
m.put(Double.class, new FieldMetaData(RealmFieldType.DOUBLE, RealmFieldType.DOUBLE_LIST, true));
m.put(boolean.class, new FieldMetaData(RealmFieldType.BOOLEAN, RealmFieldType.BOOLEAN_LIST, false));
m.put(Boolean.class, new FieldMetaData(RealmFieldType.BOOLEAN, RealmFieldType.BOOLEAN_LIST, true));
m.put(byte.class, new FieldMetaData(RealmFieldType.INTEGER, RealmFieldType.INTEGER_LIST, false));
m.put(Byte.class, new FieldMetaData(RealmFieldType.INTEGER, RealmFieldType.INTEGER_LIST, true));
m.put(byte[].class, new FieldMetaData(RealmFieldType.BINARY, RealmFieldType.BINARY_LIST, true));
m.put(Date.class, new FieldMetaData(RealmFieldType.DATE, RealmFieldType.DATE_LIST, true));
m.put(ObjectId.class, new FieldMetaData(RealmFieldType.OBJECT_ID, RealmFieldType.OBJECT_ID_LIST, true));
m.put(Decimal128.class, new FieldMetaData(RealmFieldType.DECIMAL128, RealmFieldType.DECIMAL128_LIST, true));
SUPPORTED_SIMPLE_FIELDS = Collections.unmodifiableMap(m);
}
static final Map<Class<?>, FieldMetaData> SUPPORTED_LINKED_FIELDS;
static {
Map<Class<?>, FieldMetaData> m = new HashMap<>();
m.put(RealmObject.class, new FieldMetaData(RealmFieldType.OBJECT, null, false));
m.put(RealmList.class, new FieldMetaData(RealmFieldType.LIST, null, false));
SUPPORTED_LINKED_FIELDS = Collections.unmodifiableMap(m);
}
final RealmSchema schema;
final BaseRealm realm;
final Table table;
final ColumnInfo columnInfo;
/**
* Creates a schema object for a given Realm class.
*
* @param realm Realm holding the objects.
* @param table table representation of the Realm class
* @param columnInfo mapping between field names and column indexes for the given table
*/
RealmObjectSchema(BaseRealm realm, RealmSchema schema, Table table, ColumnInfo columnInfo) {
this.schema = schema;
this.realm = realm;
this.table = table;
this.columnInfo = columnInfo;
}
/**
* Returns the name of the RealmObject class being represented by this schema.
* <p>
* <ul>
* <li>When using a normal {@link Realm} this name is the same as the {@link RealmObject} class.</li>
* <li>When using a {@link DynamicRealm} this is the name used in all API methods requiring a class name.</li>
* </ul>
*
* @return the name of the RealmObject class represented by this schema.
* @throws IllegalStateException if this schema defintion is no longer part of the Realm.
*/
public String getClassName() {
return table.getClassName();
}
/**
* Sets a new name for this RealmObject class. This is equivalent to renaming it.
*
* @param className the new name for this class.
* @throws IllegalArgumentException if className is {@code null} or an empty string, or its length exceeds 56
* characters.
* @throws UnsupportedOperationException if this {@link RealmObjectSchema} is immutable or from a synced Realm.
* @see RealmSchema#rename(String, String)
*/
public abstract RealmObjectSchema setClassName(String className);
/**
* Adds a new simple field to the RealmObject class. The type must be one supported by Realm. See
* {@link RealmObject} for the list of supported types. If the field should allow {@code null} values use the boxed
* type instead e.g., {@code Integer.class} instead of {@code int.class}.
* <p>
* To add fields that reference other RealmObjects or RealmLists use
* {@link #addRealmObjectField(String, RealmObjectSchema)} or {@link #addRealmListField(String, RealmObjectSchema)}
* instead.
*
* @param fieldName name of the field to add.
* @param fieldType type of field to add. See {@link RealmObject} for the full list.
* @param attributes set of attributes for this field.
* @return the updated schema.
* @throws IllegalArgumentException if the type isn't supported, field name is illegal or a field with that name
* already exists.
* @throws UnsupportedOperationException if this {@link RealmObjectSchema} is immutable or if adding a
* a field with {@link FieldAttribute#PRIMARY_KEY} attribute to a schema of a synced Realm.
*/
public abstract RealmObjectSchema addField(String fieldName, Class<?> fieldType, FieldAttribute... attributes);
/**
* Adds a new field that references another {@link RealmObject}.
*
* @param fieldName name of the field to add.
* @param objectSchema schema for the Realm type being referenced.
* @return the updated schema.
* @throws IllegalArgumentException if field name is illegal or a field with that name already exists.
* @throws UnsupportedOperationException if this {@link RealmObjectSchema} is immutable.
*/
public abstract RealmObjectSchema addRealmObjectField(String fieldName, RealmObjectSchema objectSchema);
/**
* Adds a new field that contains a {@link RealmList} with references to other Realm model classes.
* <p>
* If the list contains primitive types, use {@link #addRealmListField(String, Class)} instead.
*
* @param fieldName name of the field to add.
* @param objectSchema schema for the Realm type being referenced.
* @return the updated schema.
* @throws IllegalArgumentException if the field name is illegal or a field with that name already exists.
* @throws UnsupportedOperationException if this {@link RealmObjectSchema} is immutable.
*/
public abstract RealmObjectSchema addRealmListField(String fieldName, RealmObjectSchema objectSchema);
/**
* Adds a new field that references a {@link RealmList} with primitive values. See {@link RealmObject} for the
* list of supported types.
* <p>
* Nullability of elements are defined by using the correct class e.g., {@code Integer.class} instead of
* {@code int.class}. Alternatively {@link #setRequired(String, boolean)} can be used.
* <p>
* Example:
* <pre>
* {@code
* // Defines the list of Strings as being non null.
* RealmObjectSchema schema = schema.create("Person")
* .addRealmListField("children", String.class)
* .setRequired("children", true)
* }
* </pre>
* If the list contains references to other Realm classes, use
* {@link #addRealmListField(String, RealmObjectSchema)} instead.
*
* @param fieldName name of the field to add.
* @param primitiveType simple type of elements in the array.
* @return the updated schema.
* @throws IllegalArgumentException if the field name is illegal, a field with that name already exists or
* the element type isn't supported.
* @throws UnsupportedOperationException if this {@link RealmObjectSchema} is immutable.
*/
public abstract RealmObjectSchema addRealmListField(String fieldName, Class<?> primitiveType);
/**
* Removes a field from the class.
*
* @param fieldName field name to remove.
* @return the updated schema.
* @throws IllegalArgumentException if field name doesn't exist.
* @throws UnsupportedOperationException if this {@link RealmObjectSchema} is immutable or for a synced Realm.
*/
public abstract RealmObjectSchema removeField(String fieldName);
/**
* Renames a field from one name to another.
*
* @param currentFieldName field name to rename.
* @param newFieldName the new field name.
* @return the updated schema.
* @throws IllegalArgumentException if field name doesn't exist or if the new field name already exists.
* @throws UnsupportedOperationException if this {@link RealmObjectSchema} is immutable or for a synced Realm.
*/
public abstract RealmObjectSchema renameField(String currentFieldName, String newFieldName);
/**
* Tests if the class has field defined with the given name.
*
* @param fieldName field name to test.
* @return {@code true} if the field exists, {@code false} otherwise.
*/
public boolean hasField(String fieldName) {
return table.getColumnKey(fieldName) != Table.NO_MATCH;
}
/**
* Adds an index to a given field. This is the equivalent of adding the {@link io.realm.annotations.Index}
* annotation on the field.
*
* @param fieldName field to add index to.
* @return the updated schema.
* @throws IllegalArgumentException if field name doesn't exist, the field cannot be indexed or it already has a
* index defined.
* @throws UnsupportedOperationException if this {@link RealmObjectSchema} is immutable.
*/
public abstract RealmObjectSchema addIndex(String fieldName);
/**
* Checks if a given field has an index defined.
*
* @param fieldName existing field name to check.
* @return {@code true} if field is indexed, {@code false} otherwise.
* @throws IllegalArgumentException if field name doesn't exist.
* @see io.realm.annotations.Index
*/
public boolean hasIndex(String fieldName) {
checkLegalName(fieldName);
checkFieldExists(fieldName);
return table.hasSearchIndex(table.getColumnKey(fieldName));
}
/**
* Removes an index from a given field. This is the same as removing the {@code @Index} annotation on the field.
*
* @param fieldName field to remove index from.
* @return the updated schema.
* @throws IllegalArgumentException if field name doesn't exist or the field doesn't have an index.
* @throws UnsupportedOperationException if this {@link RealmObjectSchema} is immutable or of a synced Realm.
*/
public abstract RealmObjectSchema removeIndex(String fieldName);
/**
* Adds a primary key to a given field. This is the same as adding the {@link io.realm.annotations.PrimaryKey}
* annotation on the field. Further, this implicitly adds {@link io.realm.annotations.Index} annotation to the field
* as well.
*
* @param fieldName field to set as primary key.
* @return the updated schema.
* @throws IllegalArgumentException if field name doesn't exist, the field cannot be a primary key or it already
* has a primary key defined.
* @throws UnsupportedOperationException if this {@link RealmObjectSchema} is immutable or of a synced Realm.
*/
public abstract RealmObjectSchema addPrimaryKey(String fieldName);
/**
* Removes the primary key from this class. This is the same as removing the {@link io.realm.annotations.PrimaryKey}
* annotation from the class. Further, this implicitly removes {@link io.realm.annotations.Index} annotation from
* the field as well.
*
* @return the updated schema.
* @throws IllegalArgumentException if the class doesn't have a primary key defined.
* @throws UnsupportedOperationException if this {@link RealmObjectSchema} is immutable or of a synced Realm.
*/
public abstract RealmObjectSchema removePrimaryKey();
/**
* Sets a field to be required i.e., it is not allowed to hold {@code null} values. This is equivalent to switching
* between boxed types and their primitive variant e.g., {@code Integer} to {@code int}.
* <p>
* If the type of designated field is a list of values (not {@link RealmObject}s , specified nullability
* only affects its elements, not the field itself. Value list itself is always non-nullable.
*
* @param fieldName name of field in the class.
* @param required {@code true} if field should be required, {@code false} otherwise.
* @return the updated schema.
* @throws IllegalArgumentException if the field name doesn't exist, cannot have the {@link Required} annotation or
* the field already have been set as required.
* @throws UnsupportedOperationException if this {@link RealmObjectSchema} is immutable.
* @see Required
*/
public abstract RealmObjectSchema setRequired(String fieldName, boolean required);
/**
* Sets a field to be nullable i.e., it should be able to hold {@code null} values. This is equivalent to switching
* between primitive types and their boxed variant e.g., {@code int} to {@code Integer}.
* <p>
* If the type of designated field is a list of values (not {@link RealmObject}s , specified nullability
* only affects its elements, not the field itself. Value list itself is always non-nullable.
*
* @param fieldName name of field in the class.
* @param nullable {@code true} if field should be nullable, {@code false} otherwise.
* @return the updated schema.
* @throws IllegalArgumentException if the field name doesn't exist, or cannot be set as nullable.
* @throws UnsupportedOperationException if this {@link RealmObjectSchema} is immutable.
*/
public abstract RealmObjectSchema setNullable(String fieldName, boolean nullable);
/**
* Checks if a given field is required i.e., it is not allowed to contain {@code null} values.
*
* @param fieldName field to check.
* @return {@code true} if it is required, {@code false} otherwise.
* @throws IllegalArgumentException if field name doesn't exist.
* @see #setRequired(String, boolean)
*/
public boolean isRequired(String fieldName) {
long columnIndex = getColumnKey(fieldName);
return !table.isColumnNullable(columnIndex);
}
/**
* Checks if a given field is nullable i.e., it is allowed to contain {@code null} values.
*
* @param fieldName field to check.
* @return {@code true} if it is required, {@code false} otherwise.
* @throws IllegalArgumentException if field name doesn't exist.
* @see #setNullable(String, boolean)
*/
public boolean isNullable(String fieldName) {
long columnIndex = getColumnKey(fieldName);
return table.isColumnNullable(columnIndex);
}
/**
* Checks if a given field is the primary key field.
*
* @param fieldName field to check.
* @return {@code true} if it is the primary key field, {@code false} otherwise.
* @throws IllegalArgumentException if field name doesn't exist.
* @see #addPrimaryKey(String)
*/
public boolean isPrimaryKey(String fieldName) {
checkFieldExists(fieldName);
return fieldName.equals(OsObjectStore.getPrimaryKeyForObject(realm.sharedRealm, getClassName()));
}
/**
* Checks if the class has a primary key defined.
*
* @return {@code true} if a primary key is defined, {@code false} otherwise.
* @see io.realm.annotations.PrimaryKey
*/
public boolean hasPrimaryKey() {
return OsObjectStore.getPrimaryKeyForObject(realm.sharedRealm, getClassName()) != null;
}
/**
* Returns the name of the primary key field.
*
* @return the name of the primary key field.
* @throws IllegalStateException if the class doesn't have a primary key defined.
*/
public String getPrimaryKey() {
String pkField = OsObjectStore.getPrimaryKeyForObject(realm.sharedRealm, getClassName());
if (pkField == null) {
throw new IllegalStateException(getClassName() + " doesn't have a primary key.");
}
return pkField;
}
/**
* Returns all fields in this class.
*
* @return a list of all the fields in this class.
*/
public Set<String> getFieldNames() {
int columnCount = (int) table.getColumnCount();
Set<String> columnNames = new LinkedHashSet<>(columnCount);
for (String column : table.getColumnNames()) {
columnNames.add(column);
}
return columnNames;
}
/**
* Runs a transformation function on each RealmObject instance of the current class. The object will be represented
* as a {@link DynamicRealmObject}.
* <p>
* There is no guarantees in which order the objects are returned.
*
* @param function transformation function.
* @return this schema.
* @throws UnsupportedOperationException if this {@link RealmObjectSchema} is immutable.
*/
public abstract RealmObjectSchema transform(Function function);
/**
* Returns the type used by the underlying storage engine to represent this field.
*
* @param fieldName name of the target field.
* @return the underlying type used by Realm to represent this field.
*/
public RealmFieldType getFieldType(String fieldName) {
long columnKey = getColumnKey(fieldName);
return table.getColumnType(columnKey);
}
/**
* Returns {@code true} if objects of this type are considered "embedded".
* See {@link RealmClass#embedded()} for further details.
*
* @return {@code true} if objects of this type are embedded. {@code false} if not.
*/
public boolean isEmbedded() {
return table.isEmbedded();
}
/**
* Converts the class to be embedded or not.
* <p>
* A class can only be marked as embedded if the following invariants are satisfied:
* <ul>
* <li>
* The class is not allowed to have a primary key defined.
* </li>
* <li>
* All existing objects of this type, must have one and exactly one parent object
* already pointing to it. If 0 or more than 1 object has a reference to an object
* about to be marked embedded an {@link IllegalStateException} will be thrown.
* </li>
* </ul>
*
* @throws IllegalStateException if the class could not be converted because it broke some of the Embedded Objects invariants.
* @see RealmClass#embedded()
*/
public void setEmbedded(boolean embedded) {
if (hasPrimaryKey()) {
throw new IllegalStateException("Embedded classes cannot have primary keys. This class " +
"has a primary key defined so cannot be marked as embedded: " + getClassName());
}
boolean setEmbedded = table.setEmbedded(embedded);
if (!setEmbedded && embedded) {
throw new IllegalStateException("The class could not be marked as embedded as some " +
"objects of this type break some of the Embedded Objects invariants. In order to convert " +
"all objects to be embedded, they must have one and exactly one parent object" +
"pointing to them.");
}
}
/**
* Returns a string with the class name of a given property.
* @param propertyName the property for which we want to know the class name.
* @return the name of the class for the given property.
* @throws IllegalArgumentException if the given property is not found in the schema.
*/
abstract String getPropertyClassName(String propertyName);
/**
* Checks whether a given property's {@code RealmFieldType} could host an acceptable embedded
* object reference in a parent - acceptable embedded object types are
* {@link RealmFieldType#OBJECT} and {@link RealmFieldType#LIST}, i.e. for the property to be
* acceptable it has to be either a subclass of {@code RealmModel} or a {@code RealmList}.
* <p>
* This method does not check the existence of a backlink between the child and the parent nor
* that the parent points at the correct child in their respective schemas nor that the object
* is a suitable parent/child.
* @param property the field type to be checked.
* @return whether the property could host an embedded object in a parent.
*/
boolean isPropertyAcceptableForEmbeddedObject(RealmFieldType property) {
return property == RealmFieldType.OBJECT
|| property == RealmFieldType.LIST;
}
/**
* Get a parser for a field descriptor.
*
* @param fieldDescription fieldName or link path to a field name.
* @param validColumnTypes valid field type for the last field in a linked field
* @return a FieldDescriptor
*/
abstract FieldDescriptor getFieldDescriptors(String fieldDescription, RealmFieldType... validColumnTypes);
RealmObjectSchema add(String name, RealmFieldType type, boolean primary, boolean indexed, boolean required) {
long columnIndex = table.addColumn(type, name, (required) ? Table.NOT_NULLABLE : Table.NULLABLE);
if (indexed) { table.addSearchIndex(columnIndex); }
if (primary) {
OsObjectStore.setPrimaryKeyForObject(realm.sharedRealm, getClassName(), name);
}
return this;
}
RealmObjectSchema add(String name, RealmFieldType type, RealmObjectSchema linkedTo) {
table.addColumnLink(
type,
name,
realm.getSharedRealm().getTable(Table.getTableNameForClass(linkedTo.getClassName())));
return this;
}
long getAndCheckFieldColumnKey(String fieldName) {
long columnKey = columnInfo.getColumnKey(fieldName);
if (columnKey < 0) {
throw new IllegalArgumentException("Field does not exist: " + fieldName);
}
return columnKey;
}
Table getTable() {
return table;
}
static final Map<Class<?>, FieldMetaData> getSupportedSimpleFields() {
return SUPPORTED_SIMPLE_FIELDS;
}
protected final SchemaConnector getSchemaConnector() {
return new SchemaConnector(schema);
}
/**
* Function interface, used when traversing all objects of the current class and apply a function on each.
*
* @see #transform(Function)
*/
public interface Function {
void apply(DynamicRealmObject obj);
}
/**
* Returns the column index in the underlying table for the given field name.
* <b>FOR TESTING USE ONLY!</b>
*
* @param fieldName field name to find index for.
* @return column index or -1 if it doesn't exists.
*/
//@VisibleForTesting(otherwise = VisibleForTesting.NONE)
long getFieldColumnKey(String fieldName) {
return columnInfo.getColumnKey(fieldName);
}
static void checkLegalName(String fieldName) {
//noinspection ConstantConditions
if (fieldName == null || fieldName.isEmpty()) {
throw new IllegalArgumentException("Field name can not be null or empty");
}
if (fieldName.contains(".")) {
throw new IllegalArgumentException("Field name can not contain '.'");
}
if (fieldName.length() > 63) {
throw new IllegalArgumentException("Field name is currently limited to max 63 characters.");
}
}
void checkFieldExists(String fieldName) {
if (table.getColumnKey(fieldName) == Table.NO_MATCH) {
throw new IllegalArgumentException("Field name doesn't exist on object '" + getClassName() + "': " + fieldName);
}
}
long getColumnKey(String fieldName) {
long columnKey = table.getColumnKey(fieldName);
if (columnKey == -1) {
throw new IllegalArgumentException(
String.format(Locale.US,
"Field name '%s' does not exist on schema for '%s'",
fieldName, getClassName()
));
}
return columnKey;
}
static final class DynamicColumnIndices extends ColumnInfo {
private final Table table;
DynamicColumnIndices(Table table) {
super(null, false);
this.table = table;
}
@Override
public long getColumnKey(String columnName) {
return table.getColumnKey(columnName);
}
@Override
public ColumnDetails getColumnDetails(String columnName) {
throw new UnsupportedOperationException("DynamicColumnIndices do not support 'getColumnDetails'");
}
@Override
public void copyFrom(ColumnInfo src) {
throw new UnsupportedOperationException("DynamicColumnIndices cannot be copied");
}
@Override
protected ColumnInfo copy(boolean immutable) {
throw new UnsupportedOperationException("DynamicColumnIndices cannot be copied");
}
@Override
protected void copy(ColumnInfo src, ColumnInfo dst) {
throw new UnsupportedOperationException("DynamicColumnIndices cannot copy");
}
}
// Tuple containing data about each supported Java type.
static final class FieldMetaData {
final RealmFieldType fieldType; // Underlying Realm type for fields with this type
final RealmFieldType listType; // Underlying Realm type for RealmLists containing this type
final boolean defaultNullable;
FieldMetaData(RealmFieldType fieldType, @Nullable RealmFieldType listType, boolean defaultNullable) {
this.fieldType = fieldType;
this.listType = listType;
this.defaultNullable = defaultNullable;
}
}
}
|
<filename>packages/components/src/components/behavioural-components/keyboard-navigable/keyboard-navigation-listener.tsx
import { Component, h, Prop, Host, Listen } from '@stencil/core'
import { KeyboardNavigationHandler } from './keyboard-navigation-handler'
import { KeyboardNavigationAction } from './keyboard-navigable'
import { KeyCodes } from '../../../utils/keycodes'
@Component({
tag: 'keyboard-navigation-listener',
shadow: true
})
export class KeyboardNavigationListener {
@Prop() handler!: KeyboardNavigationHandler
@Listen('keyboardNavigation')
protected navigationHandler(event: CustomEvent<KeyboardNavigationAction>) {
event?.stopPropagation()
switch (event.detail.key) {
case KeyCodes.UP:
this.focus(this.handler.getUpItem(event.detail))
break
case KeyCodes.DOWN:
this.focus(this.handler.getDownItem(event.detail))
break
case KeyCodes.LEFT:
this.focus(this.handler.getLeftItem(event.detail))
break
case KeyCodes.RIGHT:
this.focus(this.handler.getRightItem(event.detail))
break
case KeyCodes.HOME:
this.focus(this.handler.getFirstItem(event.detail))
break
case KeyCodes.END:
this.focus(this.handler.getLastItem(event.detail))
break
}
}
private focus = (element: HTMLElement | undefined): void => {
if (element instanceof HTMLElement) element.focus()
}
render() {
return (
<Host role="none">
<slot />
</Host>
)
}
}
|
const initialState = {
miningBonus: 0,
researchSpeed: 1,
inserterTarget: 'default',
inserterCapacity: 5,
costFactor: 1.0,
costFactory: 100,
costInput: 10,
costIgnored: 0,
};
const settingsReducer = (state = initialState, action) => {
switch (action.type) {
case SettingsActionType.SET_MINING_BONUS:
return { ...state, ...{ miningBonus: action.payload } };
case SettingsActionType.SET_RESEARCH_SPEED:
return { ...state, ...{ researchSpeed: action.payload } };
case SettingsActionType.SET_INSERTER_TARGET:
return { ...state, ...{ inserterTarget: action.payload } };
case SettingsActionType.SET_INSERTER_CAPACITY:
return { ...state, ...{ inserterCapacity: action.payload } };
case SettingsActionType.SET_COST_FACTOR:
return { ...state, ...{ costFactor: action.payload } };
case SettingsActionType.SET_COST_FACTORY:
return { ...state, ...{ costFactory: action.payload } };
case SettingsActionType.SET_COST_INPUT:
return { ...state, ...{ costInput: action.payload } };
case SettingsActionType.SET_COST_IGNORED:
return { ...state, ...{ costIgnored: action.payload } };
default:
return state;
}
};
|
#!/bin/sh
# failure
../examples/test4 -Bs --Bs asdf > tmp.out 2>&1
if cmp -s tmp.out $srcdir/test18.out; then
exit 0
else
exit 1
fi
|
/*
* Created Date: Thu, 6th May 2021, 15:11:26 pm
* Author: <NAME>
* Email: <EMAIL>
* Copyright (c) 2021 The Distance
*/
import React, {useState} from 'react';
import {View, Text, Image, TouchableOpacity} from 'react-native';
import {ScaleHook} from 'react-native-design-to-component';
import useTheme from '../../hooks/theme/UseTheme';
import useDictionary from '../../hooks/localisation/useDictionary';
import TDIcon from 'the-core-ui-component-tdicon';
import {format} from 'date-fns';
import IconTextView from '../Infographics/IconTextView';
import isRTL from '../../utils/isRTL';
import PersistentImage from '../Utility/PersistedImage';
const homeIcon = require('../../../assets/icons/homeWorkout.png');
const gymIcon = require('../../../assets/icons/gymIcon.png');
const newStarIcon = require('../../../assets/icons/newStar.png');
export default function OnDemandWorkoutCard({
workout,
title,
duration,
intensity,
image,
onPressCard,
}) {
// ** ** ** ** ** SETUP ** ** ** ** **
const {getHeight, getWidth, fontSize, radius} = ScaleHook();
const {colors, fonts, textStyles} = useTheme();
const {dictionary} = useDictionary();
const {OnDemandDict, ButtonDict, WorkoutDict} = dictionary;
const trainerName = workout.programme.trainer.name.toUpperCase();
const programmeEnvironment = workout.programme.environment;
const environmentName =
programmeEnvironment === 'GYM' ? ButtonDict.Gym : ButtonDict.Home;
// ** ** ** ** ** STYLES ** ** ** ** **
const styles = {
card: {
width: '100%',
height: getHeight(85),
flexDirection: 'row',
alignItems: 'center',
backgroundColor: colors.white100,
shadowColor: colors.black10,
shadowOffset: {width: 0, height: 3},
shadowRadius: 6,
shadowOpacity: 1,
elevation: 6,
marginBottom: getHeight(15),
},
touch: {
flex: 1,
flexDirection: 'row',
alignItems: 'center',
},
image: {
width: getWidth(119),
height: getHeight(85),
resizeMode: 'cover',
},
iconContainer: {
marginRight: getWidth(6),
},
icon: {
resizeMode: 'contain',
tintColor: colors.brownishGrey100,
width: getWidth(12),
solid: true,
size: fontSize(22),
},
completeIconContainer: {
marginRight: getWidth(7),
},
completeIcon: {
color: colors.brownGrey100,
},
textContainer: {
padding: getWidth(15),
flex: 1,
flexDirection: 'column',
justifyContent: 'space-between',
},
trainerName: {
...textStyles.semiBold10_brownGrey100,
color: colors.paleGrey100,
letterSpacing: 1.0,
lineHeight: getHeight(10),
textAlign: 'left',
},
nameContainer: {},
workoutName: {
fontFamily: fonts.semiBold,
color: colors.black100,
fontSize: fontSize(15),
letterSpacing: -0.22,
textAlign: 'left',
},
detailContainer: {
flexDirection: 'row',
alignSelf: 'flex-start',
alignItems: 'center',
},
iconTextContainer: {
flexDirection: 'row',
alignItems: 'center',
},
dotContainer: {
backgroundColor: colors.brownGrey100,
height: getWidth(2),
width: getWidth(2),
borderRadius: radius(1),
alignItems: 'center',
marginLeft: getWidth(8),
marginRight: getWidth(8),
},
newContainer: {
position: 'absolute',
width: '100%',
paddingTop: getHeight(12),
flexDirection: 'row',
justifyContent: 'flex-end',
alignItems: 'center',
alignSelf: 'flex-start',
},
newStar: {
marginLeft: 4,
marginRight: 12,
},
newStarText: {
...textStyles.bold12_newWorkoutBlue100,
},
greyText: {
...textStyles.medium10_brownishGrey100,
lineHeight: fontSize(16),
},
};
// ** ** ** ** ** FUNCTIONS ** ** ** ** **
// ** ** ** ** ** RENDER ** ** ** ** **
return (
<View style={styles.card}>
<TouchableOpacity
activeOpacity={1}
style={styles.touch}
onPress={() => onPressCard(workout)}>
{image ? (
<PersistentImage
imageUrl={image}
style={styles.image}
showLoading={true}
//fallback={fallback}
//placeholder={true}
//overlayStyle={overlayStyle}
//customOverlay={() => <></>}
//callbackSetLoaded={() => {}}
/>
) : (
<View style={styles.image} />
)}
{workout.isNew && (
<View style={styles.newContainer}>
<Text style={styles.newStarText}>{OnDemandDict.newWorkout}</Text>
<TDIcon
input={newStarIcon}
inputStyle={{style: {...styles.newStar}}}
/>
</View>
)}
<View style={{}}>
<View style={styles.textContainer}>
<View style={styles.nameContainer}>
<Text style={styles.trainerName}>{trainerName}</Text>
<Text style={styles.workoutName}>{title}</Text>
</View>
<View style={styles.detailContainer}>
<View
style={{
...styles.iconTextContainer,
...styles.leftIconContainer,
}}>
<View style={styles.iconContainer}>
<TDIcon
input={programmeEnvironment === 'HOME' ? homeIcon : gymIcon}
inputStyle={{
style: {
...styles.icon,
},
}}
/>
</View>
<Text style={styles.greyText}>{environmentName}</Text>
</View>
<View style={styles.dotContainer} />
<View style={styles.iconTextContainer}>
<Text style={styles.greyText}>{`${duration}`}</Text>
<Text style={styles.greyText}>
{` ${WorkoutDict.Mins}`.toUpperCase()}
</Text>
</View>
</View>
</View>
</View>
</TouchableOpacity>
</View>
);
}
|
<filename>src/contexts/ThemeContext.tsx
import { createContext, FC, useContext, useEffect, useState } from 'react';
interface ThemeContextData {
isDark: boolean;
toggleDarkMode: () => void;
}
export const ThemeContext = createContext<ThemeContextData>({
isDark: false,
toggleDarkMode() {
return;
},
});
const ThemeContextProvider: FC = ({ children }) => {
const [isDark, setIsDark] = useState(false);
function toggleDarkMode(): void {
const newMode = !isDark;
document.documentElement.className = newMode ? `dark` : ``;
setIsDark(!isDark);
localStorage.setItem(`theme`, newMode ? `dark` : `light`);
}
useEffect(() => {
if (window) {
const storageMode = window.localStorage.getItem(`theme`) === `dark`;
if (storageMode) {
setIsDark(window.localStorage.getItem(`theme`) === `dark`);
document.documentElement.className = storageMode ? `dark` : ``;
}
}
}, []);
return (
<ThemeContext.Provider
value={{
isDark,
toggleDarkMode,
}}
>
{children}
</ThemeContext.Provider>
);
};
const useTheme = (): ThemeContextData => {
return useContext(ThemeContext);
};
export { ThemeContextProvider, useTheme };
|
#!/bin/bash
# Validates a CloudFromation stack
TEMPLATELOCATION=${1:-file://$(pwd)/rds.yml}
VALIDATE="aws cloudformation validate-template --template-body $TEMPLATELOCATION"
echo $VALIDATE
$VALIDATE
|
package api
// GatewayIntents is an extension of the Bit structure used when identifying with discord
type GatewayIntents int64
// Constants for the different bit offsets of GatewayIntents
const (
GatewayIntentsGuilds GatewayIntents = 1 << iota
GatewayIntentsGuildMembers
GatewayIntentsGuildBans
GatewayIntentsGuildEmojis
GatewayIntentsGuildIntegrations
GatewayIntentsGuildWebhooks
GatewayIntentsGuildInvites
GatewayIntentsGuildVoiceStates
GatewayIntentsGuildPresences
GatewayIntentsGuildMessages
GatewayIntentsGuildMessageReactions
GatewayIntentsGuildMessageTyping
GatewayIntentsDirectMessages
GatewayIntentsDirectMessageReactions
GatewayIntentsDirectMessageTyping
GatewayIntentsNonPrivileged = GatewayIntentsGuilds |
GatewayIntentsGuildBans |
GatewayIntentsGuildEmojis |
GatewayIntentsGuildIntegrations |
GatewayIntentsGuildWebhooks |
GatewayIntentsGuildInvites |
GatewayIntentsGuildVoiceStates |
GatewayIntentsGuildMessages |
GatewayIntentsGuildMessageReactions |
GatewayIntentsGuildMessageTyping |
GatewayIntentsDirectMessages |
GatewayIntentsDirectMessageReactions |
GatewayIntentsDirectMessageTyping
GatewayIntentsPrivileged = GatewayIntentsGuildMembers |
GatewayIntentsGuildPresences
GatewayIntentsAll = GatewayIntentsNonPrivileged |
GatewayIntentsPrivileged
GatewayIntentsNone GatewayIntents = 0
)
// Add allows you to add multiple bits together, producing a new bit
func (p GatewayIntents) Add(bits ...GatewayIntents) GatewayIntents {
total := GatewayIntents(0)
for _, bit := range bits {
total |= bit
}
p |= total
return p
}
// Remove allows you to subtract multiple bits from the first, producing a new bit
func (p GatewayIntents) Remove(bits ...GatewayIntents) GatewayIntents {
total := GatewayIntents(0)
for _, bit := range bits {
total |= bit
}
p &^= total
return p
}
// HasAll will ensure that the bit includes all of the bits entered
func (p GatewayIntents) HasAll(bits ...GatewayIntents) bool {
for _, bit := range bits {
if !p.Has(bit) {
return false
}
}
return true
}
// Has will check whether the Bit contains another bit
func (p GatewayIntents) Has(bit GatewayIntents) bool {
return (p & bit) == bit
}
// MissingAny will check whether the bit is missing any one of the bits
func (p GatewayIntents) MissingAny(bits ...GatewayIntents) bool {
for _, bit := range bits {
if !p.Has(bit) {
return true
}
}
return false
}
// Missing will do the inverse of Bit.Has
func (p GatewayIntents) Missing(bit GatewayIntents) bool {
return !p.Has(bit)
}
|
<filename>packager/react-packager/src/node-haste/crawlers/index.js
'use strict';
const nodeCrawl = require('./node');
const watchmanCrawl = require('./watchman');
function crawl(roots, options) {
const {fileWatcher} = options;
return (fileWatcher ? fileWatcher.isWatchman() : Promise.resolve(false)).then(
isWatchman => isWatchman ? watchmanCrawl(roots, options) : nodeCrawl(roots, options)
);
}
module.exports = crawl;
|
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for CESA-2013:0522
#
# Security announcement date: 2013-03-09 00:40:29 UTC
# Script generation date: 2017-01-01 21:10:41 UTC
#
# Operating System: CentOS 6
# Architecture: i386
#
# Vulnerable packages fix on version:
# - gdb.i686:7.2-60.el6
# - gdb-gdbserver.i686:7.2-60.el6
#
# Last versions recommanded by security team:
# - gdb.i686:7.2-60.el6
# - gdb-gdbserver.i686:7.2-60.el6
#
# CVE List:
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install gdb.i686-7.2 -y
sudo yum install gdb-gdbserver.i686-7.2 -y
|
#!/bin/bash
# Install Dependencies and Prerequisites
./scripts/installDependencies.sh
|
from typing import List
class CombinedRelation:
def __init__(self, relation_type: str, relation_data):
self.relation_type = relation_type
self.relation_data = relation_data
def filter_and_sort_relations(relations: List[CombinedRelation]) -> List[CombinedRelation]:
filtered_relations = [rel for rel in relations if rel.relation_type == "important" and len(rel.relation_data) > 5]
sorted_relations = sorted(filtered_relations, key=lambda rel: len(rel.relation_data))
return sorted_relations
# Test the function with the given example
relations = [
CombinedRelation("important", "abcdefg"),
CombinedRelation("unimportant", "xyz"),
CombinedRelation("important", "abcdefghij"),
CombinedRelation("important", "ab"),
CombinedRelation("important", "klmnopqrst")
]
filtered_sorted_relations = filter_and_sort_relations(relations)
print(filtered_sorted_relations)
|
#!/bin/bash
#bdereims@vmware.com
. ./env
[ "$1" == "" -o "$2" == "" ] && echo "usage: $0 <name_of_transportzone> <name_of_logicalswitch>" && exit 1
TZ_ID=$( ${NETWORK_DIR}/id_transportzone.sh ${1} )
[ "${TZ_ID}" == "" ] && echo "${1} doesn't exist!" && exit 1
#NEW_LOGICALSWITCH="<virtualWireCreateSpec><name>${2}</name><description>Logical Switch via REST API</description><tenantId></tenantId><controlPlaneMode>HYBRID_MODE</controlPlaneMode><guestVlanAllowed>true</guestVlanAllowed></virtualWireCreateSpec>"
NEW_LOGICALSWITCH="<virtualWireCreateSpec><name>${2}</name><description>Logical Switch via REST API</description><tenantId></tenantId><controlPlaneMode>HYBRID_MODE</controlPlaneMode><guestVlanAllowed>true</guestVlanAllowed></virtualWireCreateSpec>"
curl -s -k -u ${NSX_ADMIN}:${NSX_PASSWD} -H "Content-Type:text/xml;charset=UTF-8" -X POST --data "${NEW_LOGICALSWITCH}" https://${NSX}/api/2.0/vdn/scopes/${TZ_ID}/virtualwires 2>&1 > /dev/null
LS_PROPS=$( ${NETWORK_DIR}/props_logicialswitch.sh $1 $2 )
[ "${LS_PROPS}" != "" ] && echo "Logicial Switch '${2}' has been sucessfully created in '${1}'." && exit 0
echo "Logical Switch '${2}' does not seem to be created." && exit 1
|
#!/bin/bash
# Hosts the vocabularies.
# These files are pointed at by purl.org:
pushd /var/www/vocab
# http://purl.org/twc/vocab/datafaqs# redirects to:
sudo rm datafaqs.ttl
sudo wget https://raw.github.com/timrdf/DataFAQs/master/ontology/datafaqs.ttl
sudo wget https://raw.github.com/timrdf/DataFAQs/master/ontology/datafaqs.ttl.owl
# http://purl.org/twc/vocab/conversion/ redirects to:
sudo rm conversion.ttl
sudo wget -O conversion.ttl https://raw.github.com/timrdf/csv2rdf4lod-automation/master/doc/ontology/vocab.ttl
sudo wget -O conversion.owl https://raw.github.com/timrdf/csv2rdf4lod-automation/master/doc/ontology/vocab.owl
ls -lt /var/www/vocab
|
/**
* <a href="http://www.openolat.org">
* OpenOLAT - Online Learning and Training</a><br>
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); <br>
* you may not use this file except in compliance with the License.<br>
* You may obtain a copy of the License at the
* <a href="http://www.apache.org/licenses/LICENSE-2.0">Apache homepage</a>
* <p>
* Unless required by applicable law or agreed to in writing,<br>
* software distributed under the License is distributed on an "AS IS" BASIS, <br>
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <br>
* See the License for the specific language governing permissions and <br>
* limitations under the License.
* <p>
* Initial code contributed and copyrighted by<br>
* frentix GmbH, http://www.frentix.com
* <p>
*/
package org.olat.modules.video.ui;
import java.util.List;
import org.olat.core.gui.UserRequest;
import org.olat.core.gui.components.Component;
import org.olat.core.gui.components.stack.TooledStackedPanel;
import org.olat.core.gui.control.Event;
import org.olat.core.gui.control.WindowControl;
import org.olat.core.gui.control.controller.BasicController;
import org.olat.core.gui.control.generic.dtabs.Activateable2;
import org.olat.core.id.context.ContextEntry;
import org.olat.core.id.context.StateEntry;
/**
* This site implements a YouTube stile video library for self-study.
*
* Initial date: 08.05.2016<br>
*
* @author gnaegi, <EMAIL>, http://www.frentix.com
*
*/
public class VideoSiteController extends BasicController implements Activateable2 {
private final TooledStackedPanel toolbarPanel;
private VideoListingController videoListingCtr;
public VideoSiteController(UserRequest ureq, WindowControl wControl) {
super(ureq, wControl);
toolbarPanel = new TooledStackedPanel("videosStackPanel", getTranslator(), this);
toolbarPanel.setInvisibleCrumb(0); // show root level
toolbarPanel.setShowCloseLink(true, false);
toolbarPanel.setToolbarEnabled(false);
putInitialPanel(toolbarPanel);
videoListingCtr = new VideoListingController(ureq, wControl, toolbarPanel);
listenTo(videoListingCtr);
toolbarPanel.pushController(translate("topnav.video"), videoListingCtr);
}
@Override
protected void event(UserRequest ureq, Component source, Event event) {
// no events to catch
}
@Override
public void activate(UserRequest ureq, List<ContextEntry> entries, StateEntry state) {
// delegate to video listing
if (videoListingCtr != null) {
videoListingCtr.activate(ureq, entries, state);
}
}
}
|
// https://open.kattis.com/problems/evenup
#include <iostream>
#include <vector>
#include <stack>
using namespace std;
typedef vector<int> vi;
typedef stack<int> si;
int main() {
int n;
cin >> n;
vi v(n);
for (int i = 0; i < n; i++) cin >> v[i];
int i = 0;
int c = n;
si s;
while (i < n) {
if (!s.empty() && s.top() % 2 == v[i] % 2) {
s.pop();
i++;
c -= 2;
} else if (i < n - 1 && v[i] % 2 == v[i + 1] % 2) {
c -= 2;
i += 2;
} else {
s.push(v[i]);
i++;
}
}
cout << c << endl;;
}
|
<filename>src/config/aws.s3.config.ts
import aws from 'aws-sdk'
import multer from 'multer'
import multerS3 from 'multer-s3'
import dotenv from 'dotenv'
dotenv.config()
aws.config.update({
apiVersion: '2006-03-01',
accessKeyId: process.env.AWSAccessKeyId,
secretAccessKey: process.env.AWSSecretKey,
region: process.env.AWSRegion
})
class AWSConfig {
upload: any
constructor() {
this.upload()
}
async uploadToS3() {
const s3 = new aws.S3()
this.upload = multer({
storage: multerS3({
s3: s3,
bucket: process.env.AWSBucket || '',
acl: 'public-read',
contentType: multerS3.AUTO_CONTENT_TYPE,
metadata: (req, file, cb) => {
cb(null, { fieldName: file.fieldname })
},
key: (req, file, cb) => {
cb(null, Date.now().toString() + file.originalname)
}
})
})
await this.uploadToS3()
}
}
export default new AWSConfig().upload
|
<?php
function isAnagram($string1, $string2) {
$string1 = preg_replace('/[^a-z ]/i', '', $string1);
$string2 = preg_replace('/[^a-z ]/i', '', $string2);
$charCount1 = array_count_values(str_split($string1));
$charCount2 = array_count_values(str_split($string2));
if (count(array_diff_assoc($charCount1, $charCount2)) == 0) {
return true;
}
return false;
}
echo isAnagram('Maroon Five', 'Five Maroon');
// Output: true
|
<reponame>RoCci/FrontendStarter-Boilerplate
'use strict';
var fs = require( 'fs' );
var path = require( 'path' );
var config = require( '../config' );
var exphbs = require( 'express-handlebars' );
var logger = require( '../logging' );
var include;
var handlebars = exphbs.create( {
helpers: { include: include }
} ).handlebars;
function extendJSON( json ) {
Object
.keys( json )
.forEach( function( key ) {
if ( typeof json[ key ] !== 'object' && key === 'json' ) {
var data = fs.readFileSync(
path.join( config.frontendDir + config.distDir + config.dataDir + json[ key ] + '.json' )
).toString();
Object.assign( json, JSON.parse( data ) );
}
else if ( typeof json[ key ] === 'object' ) {
extendJSON( json[ key ] );
}
} );
return json;
}
include = function( context, options ) {
var hbs;
var model;
var resourceType;
var resourcePath;
var template;
var view;
// get data
if ( options.hash.path ) {
model = context[ options.hash.path ];
}
else {
model = options.data.root;
}
model = extendJSON( model );
// get resourceType
resourceType = options.hash.resourceType || model.resourceType;
// template
template = options.hash.template;
// template
if ( template ) {
resourceType = resourceType.replace( /[^\/]*$/, template );
}
// get view
resourcePath = path.join( config.frontendDir, config.distDir, config.viewDir, resourceType + '.handlebars' );
view = fs.readFileSync( resourcePath ).toString();
hbs = handlebars.compile( view, { noEscape: true } );
return hbs( model, {
helpers: {
include: include
}
} );
};
module.exports = include;
|
/*
* //******************************************************************
* //
* // Copyright 2016 Samsung Electronics All Rights Reserved.
* //
* //-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
* //
* // Licensed under the Apache License, Version 2.0 (the "License");
* // you may not use this file except in compliance with the License.
* // You may obtain a copy of the License at
* //
* // http://www.apache.org/licenses/LICENSE-2.0
* //
* // Unless required by applicable law or agreed to in writing, software
* // distributed under the License is distributed on an "AS IS" BASIS,
* // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* // See the License for the specific language governing permissions and
* // limitations under the License.
* //
* //-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
*/
package org.iotivity.cloud.base.protocols.enums;
public enum ResponseStatus {
// Success 2.xx
CREATED, DELETED, VALID, CHANGED, CONTENT,
// Client Error 4.xx
BAD_REQUEST, UNAUTHORIZED, BAD_OPTION, FORBIDDEN,
//
NOT_FOUND, METHOD_NOT_ALLOWED, NOT_ACCEPTABLE,
//
PRECONDITION_FAILED, REQUEST_ENTITY_TOO_LARGE,
//
UNSUPPORTED_CONTENT_FORMAT,
// Server Error 5.xx
INTERNAL_SERVER_ERROR, NOT_IMPLEMENTED, BAD_GATEWAY,
//
SERVICE_UNAVAILABLE, GATEWAY_TIMEOUT, PROXY_NOT_SUPPORTED;
}
|
def is_anagram_of(string1, string2):
char_map = [0] * 26
for ch in string1:
char_map[ord(ch) - ord('a')] += 1
for ch in string2:
char_map[ord(ch) - ord('a')] -= 1
for freq in char_map:
if freq != 0:
return False
return True
|
<filename>client/src/components/TicketComponent.tsx
import React from "react";
import { Ticket } from "../api";
export type TicketComponentProps = {
ticket: Ticket;
hideTicket: (id: string) => void;
fontSize: number;
};
export default class TicketComponent extends React.Component<TicketComponentProps> {
state = {
showLess: false,
};
changeShowLess = () => {
const { showLess } = this.state;
this.setState({
showLess: !showLess,
});
};
render() {
const { ticket, hideTicket, fontSize } = this.props;
const { showLess } = this.state;
return (
<li key={ticket.id} className="ticket">
<a className="hideBtn" onClick={() => hideTicket(ticket.id)}>
Hide
</a>
<h5 className="title">{ticket.title}</h5>
<p
style={{ fontSize }}
className={showLess ? "content less" : "content more"}
>
{ticket.content}
</p>
<a onClick={this.changeShowLess}>
{showLess ? "See more" : "See less"}
</a>
<footer>
<div className="meta-data">
By {ticket.userEmail} |{" "}
{new Date(ticket.creationTime).toLocaleString()}
</div>
</footer>
</li>
);
}
}
|
#!/usr/bin/env bash
ray_version=""
commit=""
ray_branch=""
usage() {
echo "Start one microbenchmark trial."
}
for i in "$@"
do
case $i in
--ray-version=*)
ray_version="${i#*=}"
;;
--commit=*)
commit="${i#*=}"
;;
--ray-branch=*)
ray_branch="${i#*=}"
;;
--help)
usage
exit
;;
*)
echo "unknown arg, $2"
exit 1
;;
esac
done
if [[ $ray_version == "" || $commit == "" || $ray_branch == "" ]]
then
echo "Provide --ray-version, --commit, and --ray-branch"
exit 1
fi
echo "version: $ray_version"
echo "commit: $commit"
echo "branch: $ray_branch"
rm ray-$ray_version-cp36-cp36m-manylinux1_x86_64.whl || true
wget https://s3-us-west-2.amazonaws.com/ray-wheels/$ray_branch/$commit/ray-$ray_version-cp36-cp36m-manylinux1_x86_64.whl
pip uninstall -y -q ray
pip install -U ray-$ray_version-cp36-cp36m-manylinux1_x86_64.whl
OMP_NUM_THREADS=64 ray microbenchmark
|
package api
import "database/sql"
// Ok interface is to be implemented by any object which can be validated
type Ok interface {
OK() error
}
// DatabaseErr is the error returned if an error occurs with database access
type DatabaseErr struct {
Msg string `json:"error"`
}
func (e DatabaseErr) Error() string {
return e.Msg
}
// ErrRequired is the error returned if a required field in a request is missing
type ErrRequired struct {
Msg string `json:"error"`
}
func (e ErrRequired) Error() string {
return e.Msg
}
// Request is the in memory representation of a JSON request
type Request struct {
URL string `json:"url"`
}
// OK validates a received request
func (r *Request) OK() error {
if len(r.URL) == 0 {
return ErrRequired{Msg: "url must be specified"}
}
return nil
}
// Response is the in memory representation of a JSON response
type Response struct {
ShortURL string `json:"shortened"`
}
// DbResult is the value returned from a database
type DbResult struct {
Value string
Error error
}
// Db is an interface wrapper for a sql.DB to allow for testing
type Db interface {
Exec(query string, args ...interface{}) (sql.Result, error)
Query(query string, args ...interface{}) (Rows, error)
}
// Rows is an interface wrapper for a sql.Rows to allow for testing
type Rows interface {
Close() error
Next() bool
Scan(dest ...interface{}) error
}
// Database is a wrapper for a sql.DB object
type Database struct {
Db *sql.DB
}
// RowsImpl is the implementation of Rows
type RowsImpl struct {
R *sql.Rows
}
// Exec executes a query without returning any rows.
// The args are for any placeholder parameters in the query.
func (d *Database) Exec(query string, args ...interface{}) (sql.Result, error) {
return d.Db.Exec(query, args...)
}
// Query executes a query that returns rows, typically a SELECT.
// The args are for any placeholder parameters in the query.
func (d *Database) Query(query string, args ...interface{}) (Rows, error) {
r, err := d.Db.Query(query, args...)
return RowsImpl{R: r}, err
}
// Close closes the Rows, preventing further enumeration. If Next is called
// and returns false and there are no further result sets,
// the Rows are closed automatically and it will suffice to check the
// result of Err. Close is idempotent and does not affect the result of Err.
func (r RowsImpl) Close() error {
return r.R.Close()
}
// Next prepares the next result row for reading with the Scan method. It
// returns true on success, or false if there is no next result row or an error
// happened while preparing it. Err should be consulted to distinguish between
// the two cases.
//
// Every call to Scan, even the first one, must be preceded by a call to Next.
func (r RowsImpl) Next() bool {
return r.R.Next()
}
// Scan copies the columns in the current row into the values pointed
// at by dest. The number of values in dest must be the same as the
// number of columns in Rows.
func (r RowsImpl) Scan(dest ...interface{}) error {
return r.R.Scan(dest...)
}
|
<reponame>DerSchmale/spirv4web
import { PlsFormat } from "./glsl";
export class PlsRemap
{
id: number;
format: PlsFormat;
constructor(id: number, format: PlsFormat)
{
this.id = id;
this.format = format;
}
}
|
def reverse_string(input_str: str) -> str:
return input_str[::-1]
|
import React from "react";
import { IconProps, withIcon } from "../withIcon";
import { ReactComponent as Icon } from "./spinner.svg";
import styles from "./styles.module.css";
export const IconSpinner: React.FC<IconProps> = (props) => (
<span className={styles.icon}>{withIcon(Icon)(props)}</span>
);
|
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
# The version of Kibosh to use for testing.
# If you update this, also update tests/docker/Dockerfile
export KIBOSH_VERSION=8841dd392e6fbf02986e2fb1f1ebf04df344b65a
path_to_jdk_cache() {
jdk_version=$1
echo "/tmp/jdk-${jdk_version}.tar.gz"
}
fetch_jdk_tgz() {
jdk_version=$1
path=$(path_to_jdk_cache $jdk_version)
if [ ! -e $path ]; then
mkdir -p $(dirname $path)
curl -s -L "https://s3-us-west-2.amazonaws.com/kafka-packages/jdk-${jdk_version}.tar.gz" -o $path
fi
}
JDK_MAJOR="${JDK_MAJOR:-8}"
JDK_FULL="${JDK_FULL:-8u202-linux-x64}"
if [ -z `which javac` ]; then
apt-get -y update
apt-get install -y software-properties-common python-software-properties binutils java-common
echo "===> Installing JDK..."
mkdir -p /opt/jdk
cd /opt/jdk
rm -rf $JDK_MAJOR
mkdir -p $JDK_MAJOR
cd $JDK_MAJOR
fetch_jdk_tgz $JDK_FULL
tar x --strip-components=1 -zf $(path_to_jdk_cache $JDK_FULL)
for bin in /opt/jdk/$JDK_MAJOR/bin/* ; do
name=$(basename $bin)
update-alternatives --install /usr/bin/$name $name $bin 1081 && update-alternatives --set $name $bin
done
echo -e "export JAVA_HOME=/opt/jdk/$JDK_MAJOR\nexport PATH=\$PATH:\$JAVA_HOME/bin" > /etc/profile.d/jdk.sh
echo "JDK installed: $(javac -version 2>&1)"
fi
chmod a+rw /opt
if [ -h /opt/kafka-dev ]; then
# reset symlink
rm /opt/kafka-dev
fi
ln -s /vagrant /opt/kafka-dev
get_kafka() {
version=$1
scala_version=$2
kafka_dir=/opt/kafka-$version
url=https://s3-us-west-2.amazonaws.com/kafka-packages/kafka_$scala_version-$version.tgz
# the .tgz above does not include the streams test jar hence we need to get it separately
url_streams_test=https://s3-us-west-2.amazonaws.com/kafka-packages/kafka-streams-$version-test.jar
if [ ! -d /opt/kafka-$version ]; then
pushd /tmp
curl -O $url
curl -O $url_streams_test || true
file_tgz=`basename $url`
file_streams_jar=`basename $url_streams_test` || true
tar -xzf $file_tgz
rm -rf $file_tgz
file=`basename $file_tgz .tgz`
mv $file $kafka_dir
mv $file_streams_jar $kafka_dir/libs || true
popd
fi
}
# Install Kibosh
apt-get update -y && apt-get install -y git cmake pkg-config libfuse-dev
pushd /opt
rm -rf /opt/kibosh
git clone -q https://github.com/confluentinc/kibosh.git
pushd "/opt/kibosh"
git reset --hard $KIBOSH_VERSION
mkdir "/opt/kibosh/build"
pushd "/opt/kibosh/build"
../configure && make -j 2
popd
popd
popd
# Install iperf
apt-get install -y iperf traceroute
# Test multiple Kafka versions
# We want to use the latest Scala version per Kafka version
# Previously we could not pull in Scala 2.12 builds, because Scala 2.12 requires Java 8 and we were running the system
# tests with Java 7. We have since switched to Java 8, so 2.0.0 and later use Scala 2.12.
get_kafka 0.8.2.2 2.11
chmod a+rw /opt/kafka-0.8.2.2
get_kafka 0.9.0.1 2.11
chmod a+rw /opt/kafka-0.9.0.1
get_kafka 0.10.0.1 2.11
chmod a+rw /opt/kafka-0.10.0.1
get_kafka 0.10.1.1 2.11
chmod a+rw /opt/kafka-0.10.1.1
get_kafka 0.10.2.2 2.11
chmod a+rw /opt/kafka-0.10.2.2
get_kafka 0.11.0.3 2.11
chmod a+rw /opt/kafka-0.11.0.3
get_kafka 1.0.2 2.11
chmod a+rw /opt/kafka-1.0.2
get_kafka 1.1.1 2.11
chmod a+rw /opt/kafka-1.1.1
get_kafka 2.0.1 2.12
chmod a+rw /opt/kafka-2.0.1
get_kafka 2.1.1 2.12
chmod a+rw /opt/kafka-2.1.1
get_kafka 2.2.1 2.12
chmod a+rw /opt/kafka-2.2.1
get_kafka 2.3.0 2.12
chmod a+rw /opt/kafka-2.3.0
# For EC2 nodes, we want to use /mnt, which should have the local disk. On local
# VMs, we can just create it if it doesn't exist and use it like we'd use
# /tmp. Eventually, we'd like to also support more directories, e.g. when EC2
# instances have multiple local disks.
if [ ! -e /mnt ]; then
mkdir /mnt
fi
chmod a+rwx /mnt
# Run ntpdate once to sync to ntp servers
# use -u option to avoid port collision in case ntp daemon is already running
ntpdate -u pool.ntp.org
# Install ntp daemon - it will automatically start on boot
apt-get -y install ntp
# Increase the ulimit
mkdir -p /etc/security/limits.d
echo "* soft nofile 128000" >> /etc/security/limits.d/nofile.conf
echo "* hard nofile 128000" >> /etc/security/limits.d/nofile.conf
ulimit -Hn 128000
ulimit -Sn 128000
|
<reponame>advatar/esp
require "spec_helper"
require "webmock/rspec"
require "ffaker"
if ENV['CF_V2_RUN_INTEGRATION']
describe 'A new user tries to use CF against v2 production', :ruby19 => true do
before(:all) do
WebMock.allow_net_connect!
end
after(:all) do
WebMock.disable_net_connect!
end
let(:target) { ENV['CF_V2_TEST_TARGET'] }
let(:username) { ENV['CF_V2_TEST_USER'] }
let(:password) { ENV['CF_V2_TEST_PASSWORD'] }
let(:organization) { ENV['CF_V2_TEST_ORGANIZATION'] }
let(:client) do
client = CFoundry::V2::Client.new("https://#{target}")
client.login(:username => username, :password => password)
client
end
let(:new_user) { Faker::Internet.disposable_email("cf-test-user-#{Time.now.to_i}") }
before do
Interact::Progress::Dots.start!
login
end
after do
# TODO: do this when cf delete-user is implemented
#BlueShell::Runner.run("#{cf_bin} delete-user #{email}") do |runner|
# expect(runner).to say "Really delete user #{email}?>"
# runner.send_keys "y"
# expect(runner).to say "Deleting #{email}... OK"
#end
# TODO: not this.
client.login(:username => new_user, :password => password)
user = client.current_user
guid = user.guid
client.login(:username => username, :password => password)
user.delete!
logout
Interact::Progress::Dots.stop!
end
it "creates a new user" do
BlueShell::Runner.run("#{cf_bin} create-user") do |runner|
expect(runner).to say "Email>"
runner.send_keys new_user
expect(runner).to say "Password>"
runner.send_keys password
expect(runner).to say "Verify Password>"
runner.send_keys password
expect(runner).to say "Creating user... OK"
expect(runner).to say "Adding user to #{organization}... OK"
end
BlueShell::Runner.run("#{cf_bin} login #{new_user} --password #{password}") do |runner|
expect(runner).to say "Authenticating... OK"
end
end
end
else
$stderr.puts 'Skipping v2 integration specs; please provide environment variables'
end
|
def find_maximum(nums):
max = 0
for x in nums:
if x > max:
max = x
return max
nums = [5, 2, 4, 8, 9]
max_val = find_maximum(nums)
print(max_val)
|
import random
def random_element(lst):
return random.choice(lst)
|
<gh_stars>0
# Copyright (c) 2020, Pensando Systems
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Author: <NAME> <EMAIL>
import json
def get_web_call(url, session, *payload):
if not payload:
data = {}
else:
data = payload[0]
try:
api_ref = session.get(url, data=json.dumps(data))
except requests.exceptions.Timeout:
print('Network Timeout')
except requests.exceptions.TooManyRedirects:
print('too Many Redirects')
except requests.exceptions.RequestException as err:
print('Something went wrong')
raise SystemExit(err)
print (api_ref)
return api_ref
def post_web_call(url, session, data):
try:
api_ref = session.post(url, data)
except requests.exceptions.Timeout:
print('Network Timeout')
except requests.exceptions.TooManyRedirects:
print('too Many Redirects')
except requests.exceptions.RequestException as err:
print('Something went wrong')
raise SystemExit(err)
return api_ref
def get_psm_workloads(psm_ip, session):
url = psm_ip + 'configs/workload/v1/workloads'
return get_web_call(url, session).json()
def get_psm_cluster(psm_ip, session):
url = psm_ip +'configs/cluster/v1/cluster'
return get_web_call(url, session).json()
def get_flow_export_policy(psm_ip, session):
url = psm_ip + 'configs/monitoring/v1/flowExportPolicy'
return get_web_call(url, session).json()
def get_dsc(psm_ip, session):
url = psm_ip + 'configs/cluster/v1/distributedservicecards'
dsc = get_web_call(url, session).json()
# pull out mac address of DSCs
num_dsc = (dsc['list-meta']['total-count'])
dsc_list = []
for dscs in range(num_dsc):
dsc_list.append((dsc['items'][dscs]['meta']['name']))
return dsc, dsc_list
def get_config_snapshot(psm_ip, session):
url = psm_ip + '/configs/cluster/v1/config-snapshot'
return get_web_call(url, session).json()
def get_node1(psm_ip, session):
url = psm_ip + '/configs/cluster/v1/nodes/node1'
return get_web_call(url, session).json()
def get_alertpolices(psm_ip, session, tenant):
url = psm_ip + '/configs/monitoring/v1/watch/tenant/{t}/alertPolicies'.format(t=tenant)
return get_web_call(url, session).json()
def get_networksecuritypolicy(psm_ip, session, tenant):
url = psm_ip + '/configs/security/v1/{t}/default/networksecuritypolicies'.format(t=tenant)
return get_web_call(url, session).json()
def get_users(psm_ip, session, tenant):
url = psm_ip + '/configs/auth/v1/tenant/{t}/users'.format(t=tenant)
return get_web_call(url, session).json()
def get_images(psm_ip, session):
url = psm_ip + '/objstore/v1/tenant/default/images/objects'
return get_web_call(url, session).json()
def get_psm_metrics(psm_ip, session, psm_tenant, st, et):
data = {
"queries": [
{
"Kind": "Node",
"start-time": st,
"end-time": et
}
]
}
url = psm_ip + 'telemetry/v1/metrics'
return get_web_call(url, session, data).json()
def get_dsc_metrics(psm_ip, session, psm_tenant, interface, st, et):
data = {
"queries": [
{
"Kind": "DistributedServiceCard",
"selector": {
"requirements": [
{
"key": "reporterID",
"operator": "equals",
"values": [interface]
}
]
},
"start-time": st,
"end-time": et
}
]
}
url = psm_ip + 'telemetry/v1/metrics'
return get_web_call(url, session, data).json()
def get_uplink_metrics(psm_ip, session, psm_tenant, st, et):
data = {
"queries": [
{
"Kind": "MacMetrics",
"start-time": st,
"end-time": et
}
]
}
url = psm_ip + 'telemetry/v1/metrics'
return get_web_call(url, session, data).json()
def get_pf_metrics(psm_ip, session, psm_tenant, st, et):
data = {
"queries": [
{
"Kind": "LifMetrics",
"start-time": st,
"end-time": et
}
]
}
url = psm_ip + 'telemetry/v1/metrics'
return get_web_call(url, session, data).json()
def get_cluster_metrics(psm_ip, session, psm_tenant, st, et):
data = {
"queries": [
{
"Kind": "Cluster",
"start-time": st,
"end-time": et
}
]
}
url = psm_ip + 'telemetry/v1/metrics'
return get_web_call(url, session, data).json()
def get_fw_logs(psm_ip, session, psm_tenant, interface, st, et):
connector = '_'
extension = '.csv.gzip'
#generate the log first
url1 = '{psm}objstore/v1/tenant/{tenant}/fwlogs/objects?field-selector=' \
'start-time={start},end-time={end},dsc-id={int},vrf-name={tenant}'.format \
(psm=psm_ip, int=interface, tenant=psm_tenant, start=st, end=et)
t = get_web_call(url1, session)
#pull the download link from the log generation response
link = str(t.json()['items'][0]['meta']['name'])
formatLink = link.replace("/", "_")
#craft download url and download the data
url = '{psm}objstore/v1/downloads/tenant/default/fwlogs/{link}'.format(psm=psm_ip, link=formatLink)
w = get_web_call(url, session)
return w.content
def get_alerts(psm_ip, session, tenant):
data = {
"kind": "AlertPolicy",
"api-version": "v1",
"meta": {
"name": "alertPolicy1",
"tenant": tenant,
"namespace": "default"
}
}
url = psm_ip + 'configs/monitoring/v1/alerts'
return get_web_call(url, session, data).json()
|
//BSRR set and reset register
//5 set and 21 reset register
#include "stm32f4xx.h" // Device header
void delay(int seconds);
int main(void){
RCC->AHB1ENR |= 1;
GPIOA->MODER |= 0x400;
for(int i=0;i<5;i++){
GPIOA->BSRR |= 0x20; //Set register
delay(1000);
GPIOA->BSRR |= 0x200000; //Re-set register
delay(1000);
}
return 0;
}
void delay(int seconds){
for(;seconds>0;seconds--){
for(int count1=0;count1<3000;count1++);
}
}
|
export default function get<
T extends { [K in keyof T]: T[K] } | any[],
K1 extends keyof T = keyof T,
K2 extends keyof T[K1] = keyof T[K1],
K3 extends keyof T[K1][K2] = keyof T[K1][K2],
K4 extends keyof T[K1][K2][K3] = keyof T[K1][K3],
K5 extends keyof T[K1][K2][K3][K4] = keyof T[K1][K3][K4],
K6 extends keyof T[K1][K2][K3][K4][K5] = keyof T[K1][K3][K4][K5],
K7 extends keyof T[K1][K2][K3][K4][K5][K6] = keyof T[K1][K3][K4][K5][K6],
K8 extends keyof T[K1][K2][K3][K4][K5][K6][K7] = keyof T[K1][K3][K4][K5][K6][K7],
K9 extends keyof T[K1][K2][K3][K4][K5][K6][K7][K8] = keyof T[K1][K3][K4][K5][K6][K7][K8],
K10 extends keyof T[K1][K2][K3][K4][K5][K6][K7][K8][K9] = keyof T[K1][K2][K3][K4][K5][K6][K7][K8][K9]
>(
object: T,
path: [K1, K2, K3, K4, K5, K6, K7, K8, K9, K10, ...string[]],
defaultValue?: any,
): any
export default function get<
T extends { [K in keyof T]: T[K] } | any[],
K1 extends keyof T = keyof T,
K2 extends keyof T[K1] = keyof T[K1],
K3 extends keyof T[K1][K2] = keyof T[K1][K2],
K4 extends keyof T[K1][K2][K3] = keyof T[K1][K3],
K5 extends keyof T[K1][K2][K3][K4] = keyof T[K1][K3][K4],
K6 extends keyof T[K1][K2][K3][K4][K5] = keyof T[K1][K3][K4][K5],
K7 extends keyof T[K1][K2][K3][K4][K5][K6] = keyof T[K1][K3][K4][K5][K6],
K8 extends keyof T[K1][K2][K3][K4][K5][K6][K7] = keyof T[K1][K3][K4][K5][K6][K7],
K9 extends keyof T[K1][K2][K3][K4][K5][K6][K7][K8] = keyof T[K1][K3][K4][K5][K6][K7][K8],
K10 extends keyof T[K1][K2][K3][K4][K5][K6][K7][K8][K9] = keyof T[K1][K2][K3][K4][K5][K6][K7][K8][K9]
>(
object: T,
path: [K1, K2, K3, K4, K5, K6, K7, K8, K9, K10],
defaultValue?: any,
): T[K1][K2][K3][K4][K5][K6][K7][K8][K9][K10]
export default function get<
T extends { [K in keyof T]: T[K] } | any[],
K1 extends keyof T = keyof T,
K2 extends keyof T[K1] = keyof T[K1],
K3 extends keyof T[K1][K2] = keyof T[K1][K2],
K4 extends keyof T[K1][K2][K3] = keyof T[K1][K3],
K5 extends keyof T[K1][K2][K3][K4] = keyof T[K1][K3][K4],
K6 extends keyof T[K1][K2][K3][K4][K5] = keyof T[K1][K3][K4][K5],
K7 extends keyof T[K1][K2][K3][K4][K5][K6] = keyof T[K1][K3][K4][K5][K6],
K8 extends keyof T[K1][K2][K3][K4][K5][K6][K7] = keyof T[K1][K3][K4][K5][K6][K7],
K9 extends keyof T[K1][K2][K3][K4][K5][K6][K7][K8] = keyof T[K1][K3][K4][K5][K6][K7][K8]
>(
object: T,
path: [K1, K2, K3, K4, K5, K6, K7, K8, K9],
defaultValue?: any,
): T[K1][K2][K3][K4][K5][K6][K7][K8][K9]
export default function get<
T extends { [K in keyof T]: T[K] } | any[],
K1 extends keyof T = keyof T,
K2 extends keyof T[K1] = keyof T[K1],
K3 extends keyof T[K1][K2] = keyof T[K1][K2],
K4 extends keyof T[K1][K2][K3] = keyof T[K1][K3],
K5 extends keyof T[K1][K2][K3][K4] = keyof T[K1][K3][K4],
K6 extends keyof T[K1][K2][K3][K4][K5] = keyof T[K1][K3][K4][K5],
K7 extends keyof T[K1][K2][K3][K4][K5][K6] = keyof T[K1][K3][K4][K5][K6],
K8 extends keyof T[K1][K2][K3][K4][K5][K6][K7] = keyof T[K1][K3][K4][K5][K6][K7]
>(
object: T,
path: [K1, K2, K3, K4, K5, K6, K7, K8],
defaultValue?: any,
): T[K1][K2][K3][K4][K5][K6][K7][K8]
export default function get<
T extends { [K in keyof T]: T[K] } | any[],
K1 extends keyof T = keyof T,
K2 extends keyof T[K1] = keyof T[K1],
K3 extends keyof T[K1][K2] = keyof T[K1][K2],
K4 extends keyof T[K1][K2][K3] = keyof T[K1][K3],
K5 extends keyof T[K1][K2][K3][K4] = keyof T[K1][K3][K4],
K6 extends keyof T[K1][K2][K3][K4][K5] = keyof T[K1][K3][K4][K5],
K7 extends keyof T[K1][K2][K3][K4][K5][K6] = keyof T[K1][K3][K4][K5][K6]
>(
object: T,
path: [K1, K2, K3, K4, K5, K6, K7],
defaultValue?: any,
): T[K1][K2][K3][K4][K5][K6][K7]
export default function get<
T extends { [K in keyof T]: T[K] } | any[],
K1 extends keyof T = keyof T,
K2 extends keyof T[K1] = keyof T[K1],
K3 extends keyof T[K1][K2] = keyof T[K1][K2],
K4 extends keyof T[K1][K2][K3] = keyof T[K1][K3],
K5 extends keyof T[K1][K2][K3][K4] = keyof T[K1][K3][K4],
K6 extends keyof T[K1][K2][K3][K4][K5] = keyof T[K1][K3][K4][K5]
>(
object: T,
path: [K1, K2, K3, K4, K5, K6],
defaultValue?: any,
): T[K1][K2][K3][K4][K5][K6]
export default function get<
T extends { [K in keyof T]: T[K] } | any[],
K1 extends keyof T = keyof T,
K2 extends keyof T[K1] = keyof T[K1],
K3 extends keyof T[K1][K2] = keyof T[K1][K2],
K4 extends keyof T[K1][K2][K3] = keyof T[K1][K3],
K5 extends keyof T[K1][K2][K3][K4] = keyof T[K1][K3][K4]
>(
object: T,
path: [K1, K2, K3, K4, K5],
defaultValue?: any,
): T[K1][K2][K3][K4][K5]
export default function get<
T extends { [K in keyof T]: T[K] } | any[],
K1 extends keyof T = keyof T,
K2 extends keyof T[K1] = keyof T[K1],
K3 extends keyof T[K1][K2] = keyof T[K1][K2],
K4 extends keyof T[K1][K2][K3] = keyof T[K1][K3]
>(object: T, path: [K1, K2, K3, K4], defaultValue?: any): T[K1][K2][K3][K4]
export default function get<
T extends { [K in keyof T]: T[K] } | any[],
K1 extends keyof T = keyof T,
K2 extends keyof T[K1] = keyof T[K1],
K3 extends keyof T[K1][K2] = keyof T[K1][K2]
>(object: T, path: [K1, K2, K3], defaultValue?: any): T[K1][K2][K3]
export default function get<
T extends { [K in keyof T]: T[K] } | any[],
K1 extends keyof T = keyof T,
K2 extends keyof T[K1] = keyof T[K1]
>(object: T, path: [K1, K2], defaultValue?: any): T[K1][K2]
export default function get<
T extends { [K in keyof T]: T[K] } | any[],
K extends keyof T = keyof T
>(object: T, path: [K], defaultValue?: any): T[K]
/**
* Type-safe get function returns a nested value from an object. TypeScript
* will preserve the types up to 10 levels deep.
*/
export default function get<T extends object | any[], K1 extends keyof T>(
object: T,
path: [K1, ...string[]],
defaultValue?: any,
) {
const key = path[0]
if (!(key in object)) {
return defaultValue
}
const target = object[key]
return typeof target === 'object' && path.length > 1
? get(target, path.slice(1) as any, defaultValue)
: target
}
// @ts-ignore
module.exports = Object.assign(exports.default, exports)
|
import React, { useState } from 'react';
import { View, Image, TextInput, Button } from 'react-native';
const PhotoAlbum = () => {
const [photos, setPhotos] = useState([]);
const [caption, setCaption] = useState('');
const addPhoto = () => {
const newPhotos = [...photos, { url, caption }];
setPhotos(newPhotos);
setCaption('');
};
return (
<View style={{ flex: 1 }}>
{photos.map((photo, index) => (
<View key={index}>
<Image source={photo.url} />
<TextInput
placeholder='Caption'
value={photo.caption}
onChangeText={text => {
const newPhotos = [...photos];
newPhotos[index].caption = text;
setPhotos(newPhotos);
}}
/>
</View>
))}
<View>
<TextInput
placeholder='Image URL'
value={url}
onChangeText={setUrl}
/>
<TextInput
placeholder='Caption'
value={caption}
onChangeText={setCaption}
/>
<Button title='Add Photo' onPress={addPhoto} />
</View>
</View>
);
};
export default PhotoAlbum;
|
//
// BspTree.hpp
// walls3duino
//
// Created by <NAME> on 5/9/20.
// Copyright © 2020 <NAME>. All rights reserved.
//
#ifndef BspTree_hpp
#define BspTree_hpp
#include <stdint.h>
#include "Vec2.hpp"
#include "Wall.hpp"
// this class represents a binary space partitioning tree, and is a much-scaled-back version of the class
// of the same name in the walls3d program
// it supports only loading a pre-built tree from memory (and does not support building the tree itself)
// a number of steps have been taken to reduce the RAM footprint of this class (and subclasses) as much
// as possible, due to very-tight memory constraints on the embedded hardware
class BspTree
{
public:
using TraversalCbType = bool (*)(const Wall&, void* ptr);
private:
using NodeIdx = uint8_t;
static constexpr NodeIdx NullNodeIdx {0xFF};
[[noreturn]] static void Error();
// this node class is the most optimized in terms of RAM footprint
// (as it has the most instantiations)
class BspNode
{
public:
BspNode(const uint8_t* bytes, size_t& offset);
~BspNode() = default;
bool TraverseRender(BspNode* nodes, const Vec2& cameraLoc, TraversalCbType renderFunc, void* ptr);
// while the original walls3d version of this class uses pointers here (like a typical
// tree implementation would), we are instead using 1-byte indices, rather than 2-byte pointers
// on the embedded hardware, in order to make this class as compact as possible
// (requires that we have less than 255 nodes total)
NodeIdx backNodeIdx;
NodeIdx frontNodeIdx;
private:
Wall wall;
};
// a specialized helper class used for loading a tree from a file without
// using recursion
// (error handling is minimal/non-existent - it is assumed that this is used
// properly)
class NodeStack
{
public:
typedef struct
{
NodeIdx nodeIdx;
uint8_t numChildrenProcessed; // 0, 1, or 2 (only)
} NodeItem;
NodeStack():
count{0}
{}
size_t Count() { return count; }
void Push(NodeItem&& ni)
{
if (count >= MaxDepth)
BspTree::Error();
data[count++] = ni;
}
NodeItem& Peek()
{
if (count == 0)
BspTree::Error();
return data[count - 1];
}
void Pop()
{
if (count == 0)
BspTree::Error();
count--;
}
private:
static constexpr uint8_t MaxDepth {14};
uint8_t count;
NodeItem data[MaxDepth];
};
public:
BspTree();
~BspTree();
void LoadBin(const uint8_t* bytes);
void TraverseRender(const Vec2& cameraLoc, TraversalCbType renderFunc, void* ptr);
private:
NodeIdx ParseNode(const uint8_t* bytes, size_t& offset);
// very-specially-purposed helper functions for otherwise-duplicated code in loading tree from file
void ParseAndPush(const uint8_t* bytes, size_t& offset, NodeStack& ns, NodeIdx& nodeIdx);
void PopAndIncParent(NodeStack& ns);
// all nodes are stored here as a contiguous array in heap memory (vs. the typical implementation of
// a tree in which the nodes would be allocated individually and scattered throughout memory)
// this is done to reduce "wasted" memory on the embedded hardware by having a single heap allocation
// rather than one per node - each allocation was found to have an overhead of 2 bytes
BspNode* nodes;
uint8_t numNodes;
NodeIdx rootNodeIdx; // should always be either 0 or BspNode::NullNodeIdx
static constexpr size_t MaxNodes {50};
// this value represents a "null node" in the serialized data, and is chosen to
// not conflict with any reasonable value for what is otherwise a fixed-point
// representation of an x coordinate in the case of a "real" node
static constexpr int32_t SerNullNode {static_cast<int32_t>(0x7FFFFFFF)};
};
#endif /* BspTree_hpp */
|
<reponame>kennethdavidbuck/ember-cli-path-inspector<gh_stars>1-10
export {default} from '../../base';
|
class MonoMdk327 < Cask
version '3.2.7'
sha256 '261a9ed737f2e8185149857c1a8238bf26a32e7e28ae412c668841fcd77ebfcb'
url 'http://download.mono-project.com/archive/3.2.7/macos-10-x86/MonoFramework-MDK-3.2.7.macos10.xamarin.x86.pkg'
homepage 'http://mono-project.com/'
pkg 'MonoFramework-MDK-3.2.7.macos10.xamarin.x86.pkg'
uninstall :pkgutil => 'com.xamarin.mono-MDK.pkg'
end
|
/*******************************************************************************
* Copyright (c) 2016 comtel inc.
*
* Licensed under the Apache License, version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*******************************************************************************/
package org.jfxvnc.net.rfb.codec;
public interface ClientEventType {
int SET_PIXEL_FORMAT = 0;
int SET_ENCODINGS = 2;
int FRAMEBUFFER_UPDATE_REQUEST = 3;
int KEY_EVENT = 4;
int POINTER_EVENT = 5;
int CLIENT_CUT_TEXT = 6;
int AL = 255;
int VMWare_A = 254;
int VMWare_B = 127;
int GII = 253;
int TIGHT = 252;
int PO_SET_DESKTOP_SIZE = 251;
int CD_XVP = 250;
int OLIVE_CALL_CONTROL = 249;
}
|
class Curl {
public function setOpt($option, $value) {
// Implementation of setOpt method
}
public function get($url) {
// Implementation of get method
// Make HTTP GET request using cURL and return the response body
}
}
class HttpHandler {
const PATH = '/api/data';
public function makeGetRequest($originalUrl) {
$url = str_replace("{path}", self::PATH, $originalUrl);
$curl = new \Curl();
$curl->setOpt(CURLOPT_TIMEOUT, 10);
return $curl->get($url);
}
}
// Example usage
$httpHandler = new HttpHandler();
$response = $httpHandler->makeGetRequest('https://example.com/{path}/endpoint');
echo $response;
|
<filename>tapestry-core/src/main/java/org/apache/tapestry5/corelib/components/Doctype.java
// Copyright 2011 The Apache Software Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.apache.tapestry5.corelib.components;
import org.apache.tapestry5.BindingConstants;
import org.apache.tapestry5.MarkupWriter;
import org.apache.tapestry5.annotations.Parameter;
/**
* Overrides the DOCTYPE of the rendered document (via {@link org.apache.tapestry5.dom.Document#dtd(String, String, String)}
* which can be useful when different component templates that render to the same document disagree about what the correct DOCTYPE
* is.
*
* @tapestrydoc
* @since 5.3
*/
public class Doctype
{
@Parameter(required = true, allowNull = false, defaultPrefix = BindingConstants.LITERAL)
private String name;
@Parameter(defaultPrefix = BindingConstants.LITERAL)
private String publicId, systemId;
boolean beginRender(MarkupWriter writer)
{
writer.getDocument().dtd(name, publicId, systemId);
return false;
}
}
|
#!/bin/sh
python -u -c 'import torch; print(torch.__version__)'
CODE_PATH=codes
DATA_PATH=data
SAVE_PATH=models
#The first four parameters must be provided
MODE=$1
MODEL=$2
DATASET=$3
GPU_DEVICE=$4
SAVE_ID=$5
FULL_DATA_PATH=$DATA_PATH/$DATASET
SAVE=$SAVE_PATH/"$MODEL"_"$DATASET"_"$SAVE_ID"
#Only used in training
BATCH_SIZE=$6
NEGATIVE_SAMPLE_SIZE=$7
HIDDEN_DIM=$8
GAMMA=$9
ALPHA=${10}
LEARNING_RATE=${11}
MAX_STEPS=${12}
TEST_BATCH_SIZE=${13}
GAMMA1=${15}
if [ $MODE == "train" ]
then
echo "Start Training......"
CUDA_VISIBLE_DEVICES=$GPU_DEVICE python3 -u $CODE_PATH/run.py --do_train \
--cuda \
--do_valid \
--do_test \
--data_path $FULL_DATA_PATH \
--model $MODEL \
-n $NEGATIVE_SAMPLE_SIZE -b $BATCH_SIZE -d $HIDDEN_DIM \
-g $GAMMA -a $ALPHA -adv \
-lr $LEARNING_RATE --max_steps $MAX_STEPS \
-save $SAVE --test_batch_size $TEST_BATCH_SIZE \
${14} ${16} ${17} ${18} ${19} ${20}
elif [ $MODE == "valid" ]
then
echo "Start Evaluation on Valid Data Set......"
CUDA_VISIBLE_DEVICES=$GPU_DEVICE python3 -u $CODE_PATH/run.py --do_valid --cuda -init $SAVE
elif [ $MODE == "test" ]
then
echo "Start Evaluation on Test Data Set......"
CUDA_VISIBLE_DEVICES=$GPU_DEVICE python3 -u $CODE_PATH/run.py --do_test --cuda -init $SAVE
elif [ $MODE == "experiment" ]
then
CUDA_VISIBLE_DEVICES=$GPU_DEVICE python3 -u $CODE_PATH/run.py --do_experiment \
--do_valid \
--do_test \
--data_path $FULL_DATA_PATH \
--model $MODEL \
-n $NEGATIVE_SAMPLE_SIZE -b $BATCH_SIZE -d $HIDDEN_DIM \
-g $GAMMA -a $ALPHA -adv \
-lr $LEARNING_RATE --max_steps $MAX_STEPS \
--log_steps 1000 \
-save $SAVE --test_batch_size $TEST_BATCH_SIZE \
--gamma1 ${15} \
${14} ${16} ${17} ${18} ${19} ${20}
else
echo "Unknown MODE" $MODE
fi
|
<reponame>ksh9241/java8_in_action<gh_stars>0
package java_8_in_action.stream_collector;
import static java.util.Comparator.comparing;
import static java.util.stream.Collectors.toList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import java.util.Optional;
public class Practice {
public static void main(String [] args) {
Trader raoul = new Trader("Raoul", "Combridge");
Trader mario = new Trader("Mario", "Milan");
Trader alan = new Trader("Alan", "Combridge");
Trader brian = new Trader("Brian", "Combridge");
List<Transaction> transaction = Arrays.asList(
new Transaction(brian, 2011, 300),
new Transaction(raoul, 2012, 1000),
new Transaction(raoul, 2011, 400),
new Transaction(mario, 2012, 710),
new Transaction(mario, 2012, 700),
new Transaction(alan, 2012, 950)
);
// 문제1. 2011년에 일어난 모든 트랜잭션을 찾아 오름차순으로 정리하시오
List<Transaction> result = transaction.stream().filter(a -> a.getYear() == 2011).sorted(Comparator.comparing(Transaction::getValue)).collect(toList());
System.out.println(result);
System.out.println();
// 문제2. 거래자가 근무하는 모든 도시를 중복 없이 나열하시오.
List<String> resultCity = transaction.stream().map(t -> t.getTrader().getCity()).distinct().collect(toList());
System.out.println(resultCity);
System.out.println();
// 문제3. 케임브리지에서 근무하는 모든 거래자를 찾아서 이름순으로 정렬하시오
List<Trader> resultName = transaction.stream().map(Transaction::getTrader).filter(a -> a.getCity().equals("Combridge")).distinct().sorted(comparing(Trader::getName)).collect(toList());
System.out.println(resultName);
System.out.println();
// 문제4. 모든 거래자의 이름을 알파벳순으로 정렬해서 반환하시오
List<Trader> resultNameAll = transaction.stream().map(Transaction::getTrader).sorted(comparing(Trader::getName)).distinct().collect(toList());
String traderStr = transaction.stream().map(t -> t.getTrader().getName()).distinct().sorted().reduce("" , (n1, n2) -> n1 + n2);
System.out.println(traderStr);
System.out.println();
// 문제5. 밀라노에 거래자가 있는가?
boolean resultCheck = transaction.stream().anyMatch(d -> d.getTrader().getCity().equals("Milan"));
if(resultCheck) {System.out.println("거래자있음");}
// 문제6. 케임브리지에 거주하는 거래자의 모든 트랜잭션값을 출력하시오
List<Integer> value = transaction.stream().filter(t -> t.getTrader().getCity().equals("Combridge")).map(Transaction::getValue).collect(toList());
System.out.println(value);
transaction.stream().filter(t -> t.getTrader().getCity().equals("Combridge")).map(Transaction::getValue).forEach(System.out::println);
System.out.println();
// 문제7. 전체 트랜잭션 중 최댓값은 얼마인가?
int max = transaction.stream().map(t -> t.getValue()).reduce(0, (a, b) -> a > b ? a : b);
System.out.println(max);
// 문제8. 전체 트랜잭션 중 최솟값은 얼마인가?
//Optional<Integer> minOp = transaction.stream().map(t -> t.getValue()).reduce(Integer::min);
Optional<Integer> minOp = transaction.stream().map(t -> t.getValue()).reduce((a, b) -> a > b ? b : a);
int min = minOp.get();
System.out.println(min);
}
}
class Trader {
String name;
String city;
public Trader() {}
public Trader(String name, String city) {
this.name = name;
this.city = city;
}
@Override
public String toString() {
return "name == " + this.name + " city == "+this.city;
}
public String getName() {
return this.name;
}
public String getCity () {
return this.city;
}
}
class Transaction {
Trader trader;
int year;
int value;
public Transaction () {}
public Transaction (Trader trader, int year, int value) {
this.trader = trader;
this.year = year;
this.value = value;
}
@Override
public String toString() {
return "trader == "+this.trader+" year == "+this.year+" value == "+this.value;
}
public int getYear() {
return this.year;
}
public Trader getTrader() {
return this.trader;
}
public int getValue() {
return this.value;
}
}
|
<filename>Algorithms_Training/Difficulty_Easy/Problem_8_Reverse_LinkedList/ts/Main.js
"use strict";
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
var _a;
Object.defineProperty(exports, "__esModule", { value: true });
var ListNode_1 = __importDefault(require("./ListNode"));
var root = new ListNode_1.default(1);
var next = null;
root.next = new ListNode_1.default(2);
next = root.next;
next.next = new ListNode_1.default(3);
next = next.next;
next.next = new ListNode_1.default(4);
next = next.next;
next.next = new ListNode_1.default(5);
root.Print();
function reverseList(head) {
var prev = null;
var node = head;
while (node !== null) {
var tmp = node.next;
node.next = prev;
prev = node;
node = tmp;
}
return prev;
}
;
(_a = reverseList(root)) === null || _a === void 0 ? void 0 : _a.Print();
|
<filename>Documentation/SoftwareGuide/Cover/Source/ImageReadExtractWriteRGB.cxx
/*=========================================================================
Program: Insight Segmentation & Registration Toolkit
Language: C++
Copyright (c) 2002 Insight Consortium. All rights reserved.
See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notices for more information.
=========================================================================*/
#include "itkImageFileReader.h"
#include "itkImageFileWriter.h"
#include "itkRegionOfInterestImageFilter.h"
#include "itkImage.h"
#include "itkRGBPixel.h"
int main( int argc, char ** argv )
{
// Verify the number of parameters in the command line
if( argc < 9 )
{
std::cerr << "Usage: " << std::endl;
std::cerr << argv[0] << " inputImageFile outputImageFile " << std::endl;
std::cerr << " startX startY startZ sizeX sizeY sizeZ" << std::endl;
return -1;
}
typedef unsigned char PixelComponentType;
typedef itk::RGBPixel< PixelComponentType > PixelType;
const unsigned int Dimension = 3;
typedef itk::Image< PixelType, Dimension > ImageType;
typedef itk::ImageFileReader< ImageType > ReaderType;
typedef itk::ImageFileWriter< ImageType > WriterType;
typedef itk::RegionOfInterestImageFilter< ImageType, ImageType > FilterType;
FilterType::Pointer filter = FilterType::New();
ImageType::IndexType start;
start[0] = atoi( argv[3] );
start[1] = atoi( argv[4] );
start[2] = atoi( argv[5] );
ImageType::SizeType size;
size[0] = atoi( argv[6] );
size[1] = atoi( argv[7] );
size[2] = atoi( argv[8] );
ImageType::RegionType wantedRegion;
wantedRegion.SetSize( size );
wantedRegion.SetIndex( start );
filter->SetRegionOfInterest( wantedRegion );
ReaderType::Pointer reader = ReaderType::New();
WriterType::Pointer writer = WriterType::New();
//
// Here we recover the file names from the command line arguments
//
const char * inputFilename = argv[1];
const char * outputFilename = argv[2];
reader->SetFileName( inputFilename );
writer->SetFileName( outputFilename );
filter->SetInput( reader->GetOutput() );
writer->SetInput( filter->GetOutput() );
try
{
writer->Update();
}
catch( itk::ExceptionObject & err )
{
std::cout << "ExceptionObject caught !" << std::endl;
std::cout << err << std::endl;
return -1;
}
return 0;
}
|
export const INCREMENT = 'counter/INCREMENT'
export const DECREMENT = 'counter/DECREMENT'
const initialState = {
count: 0
}
export default (state = initialState, action) => {
switch (action.type) {
case INCREMENT:
return {
...state,
count: state.count + 1,
}
case DECREMENT:
return {
...state,
count: state.count - 1,
}
default:
return state
}
}
export const increment = () => {
return dispatch => {
dispatch({
type: INCREMENT
})
}
}
export const decrement = () => {
return dispatch => {
// dispatch({
// type: DECREMENT_REQUESTED
// })
dispatch({
type: DECREMENT
})
}
}
|
<gh_stars>0
package core
import (
"encoding/json"
"errors"
"github.com/aws/aws-lambda-go/events"
)
type SwitchableAPIGatewayRequest struct {
v interface{} // v is Always nil, or a pointer of APIGatewayProxyRequest or APIGatewayV2HTTPRequest
}
// NewSwitchableAPIGatewayRequestV1 creates a new SwitchableAPIGatewayRequest from APIGatewayProxyRequest
func NewSwitchableAPIGatewayRequestV1(v *events.APIGatewayProxyRequest) *SwitchableAPIGatewayRequest {
return &SwitchableAPIGatewayRequest{
v: v,
}
}
// NewSwitchableAPIGatewayRequestV2 creates a new SwitchableAPIGatewayRequest from APIGatewayV2HTTPRequest
func NewSwitchableAPIGatewayRequestV2(v *events.APIGatewayV2HTTPRequest) *SwitchableAPIGatewayRequest {
return &SwitchableAPIGatewayRequest{
v: v,
}
}
// MarshalJSON is a pass through serialization
func (s *SwitchableAPIGatewayRequest) MarshalJSON() ([]byte, error) {
return json.Marshal(s.v)
}
// UnmarshalJSON is a switching serialization based on the presence of fields in the
// source JSON, multiValueQueryStringParameters for APIGatewayProxyRequest and rawQueryString for
// APIGatewayV2HTTPRequest.
func (s *SwitchableAPIGatewayRequest) UnmarshalJSON(b []byte) error {
delta := map[string]json.RawMessage{}
if err := json.Unmarshal(b, &delta); err != nil {
return err
}
_, v1test := delta["multiValueQueryStringParameters"]
_, v2test := delta["rawQueryString"]
s.v = nil
if v1test && !v2test {
s.v = &events.APIGatewayProxyRequest{}
} else if !v1test && v2test {
s.v = &events.APIGatewayV2HTTPRequest{}
} else {
return errors.New("unable to determine request version")
}
return json.Unmarshal(b, s.v)
}
// Version1 returns the contained events.APIGatewayProxyRequest or nil
func (s *SwitchableAPIGatewayRequest) Version1() *events.APIGatewayProxyRequest {
switch v := s.v.(type) {
case *events.APIGatewayProxyRequest:
return v
case events.APIGatewayProxyRequest:
return &v
}
return nil
}
// Version2 returns the contained events.APIGatewayV2HTTPRequest or nil
func (s *SwitchableAPIGatewayRequest) Version2() *events.APIGatewayV2HTTPRequest {
switch v := s.v.(type) {
case *events.APIGatewayV2HTTPRequest:
return v
case events.APIGatewayV2HTTPRequest:
return &v
}
return nil
}
|
#!/bin/bash
cd
mkdir -p software
cd software
git clone https://aur.archlinux.org/yay.git
cd yay
makepkg -si --noconfirm
cd ..
yay --builddir .
yay --save
|
<gh_stars>10-100
# -*- coding:utf-8 -*-
#
# Copyright (C) 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittests for the hooks.py module."""
from __future__ import print_function
import hooks
import unittest
class RepoHookShebang(unittest.TestCase):
"""Check shebang parsing in RepoHook."""
def test_no_shebang(self):
"""Lines w/out shebangs should be rejected."""
DATA = (
'',
'# -*- coding:utf-8 -*-\n',
'#\n# foo\n',
'# Bad shebang in script\n#!/foo\n'
)
for data in DATA:
self.assertIsNone(hooks.RepoHook._ExtractInterpFromShebang(data))
def test_direct_interp(self):
"""Lines whose shebang points directly to the interpreter."""
DATA = (
('#!/foo', '/foo'),
('#! /foo', '/foo'),
('#!/bin/foo ', '/bin/foo'),
('#! /usr/foo ', '/usr/foo'),
('#! /usr/foo -args', '/usr/foo'),
)
for shebang, interp in DATA:
self.assertEqual(hooks.RepoHook._ExtractInterpFromShebang(shebang),
interp)
def test_env_interp(self):
"""Lines whose shebang launches through `env`."""
DATA = (
('#!/usr/bin/env foo', 'foo'),
('#!/bin/env foo', 'foo'),
('#! /bin/env /bin/foo ', '/bin/foo'),
)
for shebang, interp in DATA:
self.assertEqual(hooks.RepoHook._ExtractInterpFromShebang(shebang),
interp)
|
var structarmnn_utils_1_1_csv_row =
[
[ "values", "structarmnn_utils_1_1_csv_row.xhtml#af80d24ad6806a497ec21dc835c28b7ce", null ]
];
|
<filename>app/controllers/storageBackends/AzureObjectBackend.scala
/*
* Copyright 2017 - Swiss Data Science Center (SDSC)
* A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
* Eidgenössische Technische Hochschule Zürich (ETHZ).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.storageBackends
import java.io.{ PipedInputStream, PipedOutputStream }
import java.util.concurrent.TimeUnit
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{ Source, StreamConverters }
import akka.util.ByteString
import com.microsoft.azure.storage._
import com.microsoft.azure.storage.blob.{ CloudBlobClient, CopyStatus }
import javax.inject._
import models.Repository
import play.api.Configuration
import play.api.libs.concurrent.ActorSystemProvider
import play.api.libs.streams.Accumulator
import play.api.mvc.Results._
import play.api.mvc._
import scala.concurrent.duration._
import scala.concurrent.{ Await, ExecutionContext, Future, blocking }
import scala.util.Try
import scala.util.matching.Regex
@Singleton
class AzureObjectBackend @Inject() (
config: Configuration,
actorSystemProvider: ActorSystemProvider,
implicit val ec: ExecutionContext
) extends ObjectBackend {
private[this] val subConfig = config.get[Configuration]( "storage.backend.azure" )
lazy val account: CloudStorageAccount = CloudStorageAccount.parse( subConfig.get[String]( "connection_string" ) )
lazy val serviceClient: CloudBlobClient = account.createCloudBlobClient
val RangePattern: Regex = """bytes=(\d+)?-(\d+)?.*""".r
def read( request: RequestHeader, bucket: String, name: String ): Option[Source[ByteString, _]] = {
val container = serviceClient.getContainerReference( bucket )
if ( container.exists() ) {
val blob = container.getBlockBlobReference( name )
if ( blob.exists() ) {
Some( StreamConverters.fromInputStream( () => {
val CHUNK_SIZE = 1048576
// Pipe for getting data from the download thread
val outputStream = new PipedOutputStream()
val inputStream = new PipedInputStream( outputStream )
// listening to the download completion to close the pipe
val oc = new OperationContext()
val sem = new StorageEventMultiCaster[RequestCompletedEvent, StorageEvent[RequestCompletedEvent]]()
sem.addListener( new StorageEvent[RequestCompletedEvent] {
override def eventOccurred( eventArg: RequestCompletedEvent ): Unit = {
outputStream.close()
}
} )
oc.setRequestCompletedEventHandler( sem )
// asynchronously start the download
new Thread( new Runnable {
override def run() = {
request.headers.get( "Range" ) match {
case Some( RangePattern( null, to ) ) => blob.downloadRange( 0, to.toLong, outputStream, null, null, oc )
case Some( RangePattern( from, null ) ) => blob.downloadRange( from.toLong, null, outputStream, null, null, oc )
case Some( RangePattern( from, to ) ) => blob.downloadRange( from.toLong, to.toLong, outputStream, null, null, oc )
case _ => blob.downloadRange( 0, null, outputStream, null, null, oc )
}
}
} ).start()
inputStream
} ) )
}
else {
None
}
}
else {
None
}
}
def write( req: RequestHeader, bucket: String, name: String, callback: ( Any, Future[String] ) => Any ): Accumulator[ByteString, Result] = {
implicit val actorSystem: ActorSystem = actorSystemProvider.get
implicit val mat: ActorMaterializer = ActorMaterializer()
val container = serviceClient.getContainerReference( bucket )
val size = req.headers.get( "Content-Length" )
if ( container.exists() ) {
Accumulator.source[ByteString].mapFuture { source =>
Future {
val inputStream = source.alsoToMat( new ChecksumSink() )( callback ).runWith(
StreamConverters.asInputStream( FiniteDuration( 3, TimeUnit.SECONDS ) )
)
val blob = container.getBlockBlobReference( name )
// for some reason the declared size cannot be exactly the size of the input !!
blob.upload( inputStream, size.map( _.toLong.+( 1 ) ).getOrElse( -1 ) )
inputStream.close()
Created
}
}
}
else
Accumulator.done( NotFound )
}
def createRepo( request: Repository ): Future[Option[String]] = Future {
Try {
val uuid = request.uuid.toString
serviceClient.getContainerReference( uuid ).createIfNotExists()
uuid
}.toOption
}
def duplicateFile( request: RequestHeader, fromBucket: String, fromName: String, toBucket: String, toName: String ): Boolean = {
val fromContainer = serviceClient.getContainerReference( fromBucket )
val toContainer = serviceClient.getContainerReference( toBucket )
( fromContainer.exists() && toContainer.exists() ) && {
val fromBlob = fromContainer.getBlockBlobReference( fromName )
val toBlob = toContainer.getBlockBlobReference( toName )
toBlob.startCopy( fromBlob )
def waitForIt( timeout: Deadline ): Future[Boolean] = {
toBlob.downloadAttributes()
if ( toBlob.getCopyState.getStatus ne CopyStatus.PENDING ) {
if ( timeout.hasTimeLeft() )
Future {
blocking( Thread.sleep( 1000 ) )
}.flatMap { _ => waitForIt( timeout ) }
else
Future.successful( false )
}
else {
Future.successful( toBlob.getCopyState.getStatus eq CopyStatus.SUCCESS )
}
}
Await.result( waitForIt( Deadline( 5.minutes ) ), Duration.Inf )
}
}
}
|
def substrings_freq(string):
result = {}
n = len(string)
for i in range(n):
for j in range(i+1, n+1):
substring = string[i:j]
if len(substring) == 3:
if substring in result:
result[substring] += 1
else:
result[substring] = 1
return result
s = "Hello World"
result = substrings_freq(s)
print(result)
|
<filename>src/App.js
import React from "react"
import { BrowserRouter, Route, Redirect, Switch } from "react-router-dom";
// pages
import NucleoIcons from "views/NucleoIcons.js";
import LandingPage from "views/examples/LandingPage.js";
import ProfilePage from "views/examples/ProfilePage.js";
import RegisterPage from "views/examples/RegisterPage.js";
const App = () => {
return (
<BrowserRouter>
<Switch>
<Route
path="/nucleo"
render={(props) => <NucleoIcons {...props} />}
/>
<Route
path="/landing"
render={(props) => <LandingPage {...props} />}
/>
<Route
path="/profile"
render={(props) => <ProfilePage {...props} />}
/>
<Route
path="/register"
render={(props) => <RegisterPage {...props} />}
/>
<Redirect exact to="/landing" />
<Route path="*" render={() => "404 Not found!"} />
</Switch>
</BrowserRouter>
)
}
export default App
|
<gh_stars>0
// Copyright (c) 2017-2018 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package updateprocessors_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
kapiv1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
apiv3 "github.com/unai-ttxu/libcalico-go/lib/apis/v3"
"github.com/unai-ttxu/libcalico-go/lib/backend/k8s/conversion"
"github.com/unai-ttxu/libcalico-go/lib/backend/model"
"github.com/unai-ttxu/libcalico-go/lib/backend/syncersv1/updateprocessors"
cnet "github.com/unai-ttxu/libcalico-go/lib/net"
"github.com/unai-ttxu/libcalico-go/lib/numorstring"
)
func mustParseCIDR(cidr string) *cnet.IPNet {
ipn := cnet.MustParseCIDR(cidr)
return &ipn
}
var _ = Describe("Test the NetworkPolicy update processor", func() {
name1 := "name1"
name2 := "name2"
ns1 := "namespace1"
ns2 := "namespace2"
v3NetworkPolicyKey1 := model.ResourceKey{
Kind: apiv3.KindNetworkPolicy,
Name: name1,
Namespace: ns1,
}
v3NetworkPolicyKey2 := model.ResourceKey{
Kind: apiv3.KindNetworkPolicy,
Name: name2,
Namespace: ns2,
}
v1NetworkPolicyKey1 := model.PolicyKey{
Name: ns1 + "/" + name1,
}
v1NetworkPolicyKey2 := model.PolicyKey{
Name: ns2 + "/" + name2,
}
It("should handle conversion of valid NetworkPolicys", func() {
up := updateprocessors.NewNetworkPolicyUpdateProcessor()
By("converting a NetworkPolicy with minimum configuration")
res := apiv3.NewNetworkPolicy()
res.Name = name1
res.Namespace = ns1
kvps, err := up.Process(&model.KVPair{
Key: v3NetworkPolicyKey1,
Value: res,
Revision: "abcde",
})
Expect(err).NotTo(HaveOccurred())
Expect(kvps).To(HaveLen(1))
Expect(kvps[0]).To(Equal(&model.KVPair{
Key: v1NetworkPolicyKey1,
Value: &model.Policy{
Namespace: ns1,
Selector: "projectcalico.org/namespace == 'namespace1'",
ApplyOnForward: true,
},
Revision: "abcde",
}))
By("adding another NetworkPolicy with a full configuration")
res = apiv3.NewNetworkPolicy()
v4 := 4
itype := 1
intype := 3
icode := 4
incode := 6
iproto := numorstring.ProtocolFromString("TCP")
inproto := numorstring.ProtocolFromString("UDP")
port80 := numorstring.SinglePort(uint16(80))
port443 := numorstring.SinglePort(uint16(443))
irule := apiv3.Rule{
Action: apiv3.Allow,
IPVersion: &v4,
Protocol: &iproto,
ICMP: &apiv3.ICMPFields{
Type: &itype,
Code: &icode,
},
NotProtocol: &inproto,
NotICMP: &apiv3.ICMPFields{
Type: &intype,
Code: &incode,
},
Source: apiv3.EntityRule{
Nets: []string{"10.100.10.1"},
Selector: "mylabel = value1",
Ports: []numorstring.Port{port80},
NotNets: []string{"192.168.40.1"},
NotSelector: "has(label1)",
NotPorts: []numorstring.Port{port443},
},
Destination: apiv3.EntityRule{
Nets: []string{"10.100.1.1"},
Selector: "",
Ports: []numorstring.Port{port443},
NotNets: []string{"192.168.80.1"},
NotSelector: "has(label2)",
NotPorts: []numorstring.Port{port80},
},
}
etype := 2
entype := 7
ecode := 5
encode := 8
eproto := numorstring.ProtocolFromInt(uint8(30))
enproto := numorstring.ProtocolFromInt(uint8(62))
erule := apiv3.Rule{
Action: apiv3.Allow,
IPVersion: &v4,
Protocol: &eproto,
ICMP: &apiv3.ICMPFields{
Type: &etype,
Code: &ecode,
},
NotProtocol: &enproto,
NotICMP: &apiv3.ICMPFields{
Type: &entype,
Code: &encode,
},
Source: apiv3.EntityRule{
Nets: []string{"10.100.1.1"},
Selector: "pcns.namespacelabel1 == 'value1'",
Ports: []numorstring.Port{port443},
NotNets: []string{"192.168.80.1"},
NotSelector: "has(label2)",
NotPorts: []numorstring.Port{port80},
},
Destination: apiv3.EntityRule{
Nets: []string{"10.100.10.1"},
Selector: "pcns.namespacelabel2 == 'value2'",
Ports: []numorstring.Port{port80},
NotNets: []string{"192.168.40.1"},
NotSelector: "has(label1)",
NotPorts: []numorstring.Port{port443},
},
}
order := float64(101)
selector := "mylabel == selectme"
res.Name = name2
res.Namespace = ns2
res.Spec.Order = &order
res.Spec.Ingress = []apiv3.Rule{irule}
res.Spec.Egress = []apiv3.Rule{erule}
res.Spec.Selector = selector
res.Spec.Types = []apiv3.PolicyType{apiv3.PolicyTypeIngress}
kvps, err = up.Process(&model.KVPair{
Key: v3NetworkPolicyKey2,
Value: res,
Revision: "1234",
})
Expect(err).NotTo(HaveOccurred())
namespacedSelector := "(" + selector + ") && projectcalico.org/namespace == '" + ns2 + "'"
v1irule := updateprocessors.RuleAPIV2ToBackend(irule, ns2)
v1erule := updateprocessors.RuleAPIV2ToBackend(erule, ns2)
Expect(kvps).To(Equal([]*model.KVPair{
{
Key: v1NetworkPolicyKey2,
Value: &model.Policy{
Namespace: ns2,
Order: &order,
InboundRules: []model.Rule{v1irule},
OutboundRules: []model.Rule{v1erule},
Selector: namespacedSelector,
ApplyOnForward: true,
Types: []string{"ingress"},
},
Revision: "1234",
},
}))
By("deleting the first network policy")
kvps, err = up.Process(&model.KVPair{
Key: v3NetworkPolicyKey1,
Value: nil,
})
Expect(err).NotTo(HaveOccurred())
Expect(kvps).To(Equal([]*model.KVPair{
{
Key: v1NetworkPolicyKey1,
Value: nil,
},
}))
})
It("should fail to convert an invalid resource", func() {
up := updateprocessors.NewNetworkPolicyUpdateProcessor()
By("trying to convert with the wrong key type")
res := apiv3.NewNetworkPolicy()
_, err := up.Process(&model.KVPair{
Key: model.GlobalBGPPeerKey{
PeerIP: cnet.MustParseIP("1.2.3.4"),
},
Value: res,
Revision: "abcde",
})
Expect(err).To(HaveOccurred())
By("trying to convert with the wrong value type")
wres := apiv3.NewHostEndpoint()
kvps, err := up.Process(&model.KVPair{
Key: v3NetworkPolicyKey1,
Value: wres,
Revision: "abcde",
})
Expect(err).NotTo(HaveOccurred())
Expect(kvps).To(Equal([]*model.KVPair{
{
Key: v1NetworkPolicyKey1,
Value: nil,
},
}))
By("trying to convert without enough information to create a v1 key")
eres := apiv3.NewNetworkPolicy()
v3NetworkPolicyKeyEmpty := model.ResourceKey{
Kind: apiv3.KindNetworkPolicy,
}
_, err = up.Process(&model.KVPair{
Key: v3NetworkPolicyKeyEmpty,
Value: eres,
Revision: "abcde",
})
Expect(err).To(HaveOccurred())
})
})
// Define network policies and the corresponding expected v1 KVPairs.
//
// np1 is a NetworkPolicy with a single Egress rule, which contains ports only,
// and no selectors.
var protocol = kapiv1.ProtocolTCP
var port = intstr.FromInt(80)
var np1 = networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "test.policy",
Namespace: "default",
},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
Egress: []networkingv1.NetworkPolicyEgressRule{
networkingv1.NetworkPolicyEgressRule{
Ports: []networkingv1.NetworkPolicyPort{
networkingv1.NetworkPolicyPort{
Protocol: &protocol,
Port: &port,
},
},
},
},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress},
},
}
// expected1 is the expected v1 KVPair representation of np1 from above.
var tcp = numorstring.ProtocolFromStringV1("tcp")
var port80 = numorstring.SinglePort(uint16(80))
var order float64 = 1000.0
var expected1 = []*model.KVPair{
&model.KVPair{
Key: model.PolicyKey{Name: "default/knp.default.test.policy"},
Value: &model.Policy{
Namespace: "default",
Order: &order,
Selector: "(projectcalico.org/orchestrator == 'k8s') && projectcalico.org/namespace == 'default'",
Types: []string{"egress"},
ApplyOnForward: true,
OutboundRules: []model.Rule{
{
Action: "allow",
Protocol: &tcp,
SrcSelector: "",
DstSelector: "",
DstPorts: []numorstring.Port{port80},
},
},
},
},
}
// np2 is a NeteworkPolicy with a single Ingress rule which allows from all namespaces.
var np2 = networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "test.policy",
Namespace: "default",
},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
Ingress: []networkingv1.NetworkPolicyIngressRule{
networkingv1.NetworkPolicyIngressRule{
From: []networkingv1.NetworkPolicyPeer{
{
NamespaceSelector: &metav1.LabelSelector{},
},
},
},
},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},
},
}
var expected2 = []*model.KVPair{
&model.KVPair{
Key: model.PolicyKey{Name: "default/knp.default.test.policy"},
Value: &model.Policy{
Namespace: "default",
Order: &order,
Selector: "(projectcalico.org/orchestrator == 'k8s') && projectcalico.org/namespace == 'default'",
Types: []string{"ingress"},
ApplyOnForward: true,
InboundRules: []model.Rule{
{
Action: "allow",
SrcSelector: "(has(projectcalico.org/namespace)) && (projectcalico.org/orchestrator == 'k8s')",
DstSelector: "",
OriginalSrcSelector: "projectcalico.org/orchestrator == 'k8s'",
OriginalSrcNamespaceSelector: "all()",
},
},
},
},
}
var _ = Describe("Test the NetworkPolicy update processor + conversion", func() {
up := updateprocessors.NewNetworkPolicyUpdateProcessor()
DescribeTable("NetworkPolicy update processor + conversion tests",
func(np networkingv1.NetworkPolicy, expected []*model.KVPair) {
// First, convert the NetworkPolicy using the k8s conversion logic.
c := conversion.Converter{}
kvp, err := c.K8sNetworkPolicyToCalico(&np)
Expect(err).NotTo(HaveOccurred())
// Next, run the policy through the update processor.
out, err := up.Process(kvp)
Expect(err).NotTo(HaveOccurred())
// Finally, assert the expected result.
Expect(out).To(Equal(expected))
},
Entry("should handle a NetworkPolicy with no rule selectors", np1, expected1),
Entry("should handle a NetworkPolicy with an empty ns selector", np2, expected2),
)
})
|
#include <tree.h>
#include <kstring.h>
void tree_node_init(tree_node_t* node) {
memset(node, 0, sizeof(tree_node_t));
}
void tree_add(tree_node_t* father, tree_node_t* node) {
if(father == NULL || node == NULL)
return;
node->father = father;
if(father->eChild == NULL) {
father->fChild = node;
}
else {
father->eChild->next = node;
node->prev = father->eChild;
}
father->size++;
father->eChild = node;
}
void tree_del(tree_node_t* node, free_func_t fr) {
if(node == NULL)
return;
/*free children*/
tree_node_t* c = node->fChild;
while(c != NULL) {
tree_node_t* next = c->next;
tree_del(c, fr);
c = next;
}
tree_node_t* father = node->father;
if(father != NULL) {
if(father->fChild == node)
father->fChild = node->next;
if(father->eChild == node)
father->eChild = node->prev;
father->size--;
}
if(node->next != NULL)
node->next->prev = node->prev;
if(node->prev != NULL)
node->prev->next = node->next;
/*free node content*/
if(node->data != NULL)
fr(node->data);
fr(node);
}
|
<filename>webpack.mix.js<gh_stars>0
const { mix } = require('laravel-mix');
/*
|--------------------------------------------------------------------------
| Mix Asset Management
|--------------------------------------------------------------------------
|
| Mix provides a clean, fluent API for defining some Webpack build steps
| for your Laravel application. By default, we are compiling the Sass
| file for the application as well as bundling up all the JS files.
|
*/
mix.js([
'resources/assets/js/app.js',
'resources/assets/bootstrap/js/daterangepicker.js',
'resources/assets/bootstrap/js/bootstrap-datepicker.js',
'resources/assets/bootstrap/js/jquery.slimscroll.min.js',
'resources/assets/bootstrap/js/fastclick.min.js',
'resources/assets/bootstrap/js/app.js'
], 'public/js/app.js')
.styles([
'resources/assets/bootstrap/css/bootstrap.min.css',
'resources/assets/bootstrap/css/font-awesome.min.css',
'resources/assets/bootstrap/css/ionicons.min.css',
'resources/assets/bootstrap/css/AdminLTE.min.css',
'resources/assets/bootstrap/css/_all-skins.min.css',
'resources/assets/bootstrap/css/blue.css'
], 'public/css/app.css')
.options({
processCssUrls: false
})
.version();
mix.copy('resources/assets/bootstrap/fonts', 'public/fonts');
|
function resolveModuleDependencies(modules: ModuleMetadata[]): string[] {
const moduleMap = new Map<string, ModuleMetadata>();
// Populate module map for easy access
modules.forEach(module => {
moduleMap.set(module.declarations[0], module);
});
const visited = new Set<string>();
const result: string[] = [];
function dfs(moduleName: string) {
if (visited.has(moduleName)) return;
visited.add(moduleName);
const module = moduleMap.get(moduleName);
if (module) {
module.imports.forEach(importedModule => {
dfs(importedModule);
});
module.exports.forEach(exportedComponent => {
result.push(exportedComponent);
});
}
}
modules.forEach(module => {
dfs(module.declarations[0]);
});
return Array.from(new Set(result)); // Remove duplicates
}
|
<filename>reinvent-2020/RhythmCloud/analytics/rhythm-analyzer/flink-application/src/main/java/com/amazonaws/rhythmcloud/io/TimestreamDBSink.java
package com.amazonaws.rhythmcloud.io;
import org.apache.flink.api.common.state.ListState;
import org.apache.flink.api.common.state.ListStateDescriptor;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.FunctionInitializationContext;
import org.apache.flink.runtime.state.FunctionSnapshotContext;
import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.flink.util.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration;
import software.amazon.awssdk.core.retry.RetryPolicy;
import software.amazon.awssdk.core.retry.backoff.BackoffStrategy;
import software.amazon.awssdk.http.apache.ApacheHttpClient;
import software.amazon.awssdk.services.timestreamwrite.TimestreamWriteClient;
import software.amazon.awssdk.services.timestreamwrite.model.Dimension;
import software.amazon.awssdk.services.timestreamwrite.model.Record;
import software.amazon.awssdk.services.timestreamwrite.model.WriteRecordsRequest;
import software.amazon.awssdk.services.timestreamwrite.model.WriteRecordsResponse;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class TimestreamDBSink extends RichSinkFunction<TimestreamPoint>
implements CheckpointedFunction {
private static final Logger LOG = LoggerFactory.getLogger(TimestreamDBSink.class);
private final TimestreamDBConfig timestreamDBConfig;
private transient TimestreamWriteClient timestreamWriteClient;
private List<Record> bufferedRecords;
private transient ListState<Record> checkpointedState;
private long emptyListTimestamp;
public TimestreamDBSink(TimestreamDBConfig timestreamDBConfig) {
this.timestreamDBConfig =
Preconditions.checkNotNull(
timestreamDBConfig, "TimestreamDB client config should not be null");
this.bufferedRecords = new ArrayList<>();
this.emptyListTimestamp = System.currentTimeMillis();
}
@Override
public void snapshotState(FunctionSnapshotContext functionSnapshotContext) throws Exception {
checkpointedState.clear();
for (Record element : bufferedRecords) {
checkpointedState.add(element);
}
}
@Override
public void initializeState(FunctionInitializationContext functionInitializationContext)
throws Exception {
ListStateDescriptor<Record> descriptor = new ListStateDescriptor<>("recordList", Record.class);
checkpointedState =
functionInitializationContext.getOperatorStateStore().getListState(descriptor);
if (functionInitializationContext.isRestored()) {
for (Record element : checkpointedState.get()) {
bufferedRecords.add(element);
}
}
}
@Override
public void open(Configuration parameters) throws Exception {
super.open(parameters);
RetryPolicy.Builder retryPolicy =
RetryPolicy.builder()
.numRetries(timestreamDBConfig.getMaxErrorRetryLimit())
.backoffStrategy(BackoffStrategy.defaultThrottlingStrategy());
ApacheHttpClient.Builder httpClientBuilder =
ApacheHttpClient.builder().maxConnections(timestreamDBConfig.getMaxConnections());
ClientOverrideConfiguration.Builder overrideConfig =
ClientOverrideConfiguration.builder()
.apiCallAttemptTimeout(Duration.ofMillis(timestreamDBConfig.getRequestTimeout()))
.retryPolicy(retryPolicy.build());
this.timestreamWriteClient =
TimestreamWriteClient.builder()
.httpClientBuilder(httpClientBuilder)
.overrideConfiguration(overrideConfig.build())
.region(timestreamDBConfig.getRegion())
.build();
}
@Override
public void close() throws Exception {
super.close();
}
@Override
public void invoke(TimestreamPoint value, Context context) throws Exception {
// Add the record to the buffer
List<Dimension> dimensions = new ArrayList<>();
for (Map.Entry<String, String> entry : value.getDimensions().entrySet()) {
Dimension dim = Dimension.builder().name(entry.getKey()).value(entry.getValue()).build();
dimensions.add(dim);
}
Record measure =
Record.builder()
.dimensions(dimensions)
.measureName(value.getMeasureName())
.measureValueType(value.getMeasureValueType())
.measureValue(value.getMeasureValue())
.timeUnit(value.getTimeUnit())
.time(String.valueOf(value.getTime()))
.build();
bufferedRecords.add(measure);
// If buffer is full or time to publish
if (shouldPublish()) {
WriteRecordsRequest writeRecordsRequest =
WriteRecordsRequest.builder()
.databaseName(this.timestreamDBConfig.getDatabaseName())
.tableName(this.timestreamDBConfig.getTableName())
.records(this.bufferedRecords)
.build();
try {
WriteRecordsResponse writeRecordsResponse =
this.timestreamWriteClient.writeRecords(writeRecordsRequest);
LOG.debug("writeRecords Status: " + writeRecordsResponse.sdkHttpResponse().statusCode());
this.bufferedRecords.clear();
this.emptyListTimestamp = System.currentTimeMillis();
} catch (Exception e) {
LOG.error("Error: " + e);
}
}
}
// Method to validate if record batch should be published.
// This method would return true if the accumulated records has reached the batch size.
// Or if records have been accumulated for last RECORDS_FLUSH_INTERVAL_MILLISECONDS time interval.
private boolean shouldPublish() {
if (bufferedRecords.size() == timestreamDBConfig.getBatchSize()) {
LOG.debug("Batch of size " + bufferedRecords.size() + " should get published");
return true;
} else if (System.currentTimeMillis() - emptyListTimestamp
>= timestreamDBConfig.getRecordFlushInterval()) {
LOG.debug("Records after flush interval should get published");
return true;
}
return false;
}
}
|
<reponame>chlds/util<filename>lib/car/obj/src/encode_surrogate_w.c
/*
Encode a character into bytes based on UTF-8.
Remarks:
Return the number of encoded bytes.
*/
# define CAR
# include "../../../incl/config.h"
signed(__cdecl encode_surrogate_w(signed char(**di),signed short(second),signed short(first))) {
auto signed utf_16 = (0xFFFF);
auto signed char *b;
auto signed i,r;
auto signed short surrog;
if(!di) return(0x00);
if(*di) return(0x00);
b = (0x00);
if(!(concatenate(0x04,&b,0x00))) return(0x00);
*(0x04+(b)) = (0x00);
second = (utf_16&(second));
first = (utf_16&(first));
*(0x03+(b)) = (0x00);
surrog = (second);
AND(surrog,0x3F); // 6-bit range
OR(*(0x03+(b)),surrog);
OR(*(0x03+(b)),0x80);
*(0x02+(b)) = (0x00);
surrog = (second);
SHR(surrog,0x06);
AND(surrog,0x0F); // 4-bit range
OR(*(0x02+(b)),surrog);
surrog = (first);
AND(surrog,0x03); // 2-bit range
SHL(surrog,0x04);
OR(*(0x02+(b)),surrog);
OR(*(0x02+(b)),0x80);
*(0x01+(b)) = (0x00);
surrog = (first);
SHR(surrog,0x02);
AND(surrog,0x3F); // 6-bit range
ADD(surrog,0x10); // Additional
OR(*(0x01+(b)),surrog);
OR(*(0x01+(b)),0x80);
*(0x00+(b)) = (0x00);
surrog = (first);
SHR(surrog,0x08);
AND(surrog,0x03); // 2-bit range
OR(*(0x00+(b)),surrog);
OR(*(0x00+(b)),0xF0);
*di = (b);
b = (0x00);
return(0x04);
}
|
#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script computes Spark's classpath and prints it to stdout; it's used by both the "run"
# script and the ExecutorRunner in standalone cluster mode.
# Figure out where Spark is installed
#FWDIR="$(cd "`dirname "$0"`"/..; pwd)"
FWDIR="$SPARK_HOME"
#. "$FWDIR"/bin/load-spark-env.sh # not executable by defult in $SPARK_HOME/bin
"$MAHOUT_HOME"/bin/mahout-load-spark-env.sh
# compute the Scala version Note: though Mahout has not bee tested with Scala 2.11
# Setting SPARK_SCALA_VERSION if not already set.
if [ -z "$SPARK_SCALA_VERSION" ]; then
ASSEMBLY_DIR2="$FWDIR/assembly/target/scala-2.11"
ASSEMBLY_DIR1="$FWDIR/assembly/target/scala-2.10"
if [[ -d "$ASSEMBLY_DIR2" && -d "$ASSEMBLY_DIR1" ]]; then
echo -e "Presence of build for both scala versions(SCALA 2.10 and SCALA 2.11) detected." 1>&2
echo -e 'Either clean one of them or, export SPARK_SCALA_VERSION=2.11 in spark-env.sh.' 1>&2
exit 1
fi
if [ -d "$ASSEMBLY_DIR2" ]; then
export SPARK_SCALA_VERSION="2.11"
else
export SPARK_SCALA_VERSION="2.10"
fi
fi
function appendToClasspath(){
if [ -n "$1" ]; then
if [ -n "$CLASSPATH" ]; then
CLASSPATH="$CLASSPATH:$1"
else
CLASSPATH="$1"
fi
fi
}
appendToClasspath "$SPARK_CLASSPATH"
appendToClasspath "$SPARK_SUBMIT_CLASSPATH"
# Build up classpath
if [ -n "$SPARK_CONF_DIR" ]; then
appendToClasspath "$SPARK_CONF_DIR"
else
appendToClasspath "$FWDIR/conf"
fi
ASSEMBLY_DIR="$FWDIR/assembly/target/scala-$SPARK_SCALA_VERSION"
if [ -n "$JAVA_HOME" ]; then
JAR_CMD="$JAVA_HOME/bin/jar"
else
JAR_CMD="jar"
fi
# A developer option to prepend more recently compiled Spark classes
if [ -n "$SPARK_PREPEND_CLASSES" ]; then
echo "NOTE: SPARK_PREPEND_CLASSES is set, placing locally compiled Spark"\
"classes ahead of assembly." >&2
# Spark classes
appendToClasspath "$FWDIR/core/target/scala-$SPARK_SCALA_VERSION/classes"
appendToClasspath "$FWDIR/repl/target/scala-$SPARK_SCALA_VERSION/classes"
appendToClasspath "$FWDIR/mllib/target/scala-$SPARK_SCALA_VERSION/classes"
appendToClasspath "$FWDIR/bagel/target/scala-$SPARK_SCALA_VERSION/classes"
appendToClasspath "$FWDIR/graphx/target/scala-$SPARK_SCALA_VERSION/classes"
appendToClasspath "$FWDIR/streaming/target/scala-$SPARK_SCALA_VERSION/classes"
appendToClasspath "$FWDIR/tools/target/scala-$SPARK_SCALA_VERSION/classes"
appendToClasspath "$FWDIR/sql/catalyst/target/scala-$SPARK_SCALA_VERSION/classes"
appendToClasspath "$FWDIR/sql/core/target/scala-$SPARK_SCALA_VERSION/classes"
appendToClasspath "$FWDIR/sql/hive/target/scala-$SPARK_SCALA_VERSION/classes"
appendToClasspath "$FWDIR/sql/hive-thriftserver/target/scala-$SPARK_SCALA_VERSION/classes"
appendToClasspath "$FWDIR/yarn/stable/target/scala-$SPARK_SCALA_VERSION/classes"
# Jars for shaded deps in their original form (copied here during build)
appendToClasspath "$FWDIR/core/target/jars/*"
fi
# Use spark-assembly jar from either RELEASE or assembly directory
if [ -f "$FWDIR/RELEASE" ]; then
assembly_folder="$FWDIR"/lib
else
assembly_folder="$ASSEMBLY_DIR"
fi
num_jars=0
for f in "${assembly_folder}"/spark-assembly*hadoop*.jar; do
if [[ ! -e "$f" ]]; then
echo "Failed to find Spark assembly in $assembly_folder" 1>&2
echo "You need to build Spark before running this program." 1>&2
exit 1
fi
ASSEMBLY_JAR="$f"
num_jars=$((num_jars+1))
done
if [ "$num_jars" -gt "1" ]; then
echo "Found multiple Spark assembly jars in $assembly_folder:" 1>&2
ls "${assembly_folder}"/spark-assembly*hadoop*.jar 1>&2
echo "Please remove all but one jar." 1>&2
exit 1
fi
# Only able to make this check if 'jar' command is available
if [ $(command -v "$JAR_CMD") ] ; then
# Verify that versions of java used to build the jars and run Spark are compatible
jar_error_check=$("$JAR_CMD" -tf "$ASSEMBLY_JAR" nonexistent/class/path 2>&1)
if [[ "$jar_error_check" =~ "invalid CEN header" ]]; then
echo "Loading Spark jar with '$JAR_CMD' failed. " 1>&2
echo "This is likely because Spark was compiled with Java 7 and run " 1>&2
echo "with Java 6. (see SPARK-1703). Please use Java 7 to run Spark " 1>&2
echo "or build Spark with Java 6." 1>&2
exit 1
fi
fi
appendToClasspath "$ASSEMBLY_JAR"
# When Hive support is needed, Datanucleus jars must be included on the classpath.
# Datanucleus jars do not work if only included in the uber jar as plugin.xml metadata is lost.
# Both sbt and maven will populate "lib_managed/jars/" with the datanucleus jars when Spark is
# built with Hive, so first check if the datanucleus jars exist, and then ensure the current Spark
# assembly is built for Hive, before actually populating the CLASSPATH with the jars.
# Note that this check order is faster (by up to half a second) in the case where Hive is not used.
if [ -f "$FWDIR/RELEASE" ]; then
datanucleus_dir="$FWDIR"/lib
else
datanucleus_dir="$FWDIR"/lib_managed/jars
fi
datanucleus_jars="$(find "$datanucleus_dir" 2>/dev/null | grep "datanucleus-.*\\.jar$")"
datanucleus_jars="$(echo "$datanucleus_jars" | tr "\n" : | sed s/:$//g)"
if [ -n "$datanucleus_jars" ]; then
appendToClasspath "$datanucleus_jars"
fi
# Add test classes if we're running from SBT or Maven with SPARK_TESTING set to 1
if [[ $SPARK_TESTING == 1 ]]; then
appendToClasspath "$FWDIR/core/target/scala-$SPARK_SCALA_VERSION/test-classes"
appendToClasspath "$FWDIR/repl/target/scala-$SPARK_SCALA_VERSION/test-classes"
appendToClasspath "$FWDIR/mllib/target/scala-$SPARK_SCALA_VERSION/test-classes"
appendToClasspath "$FWDIR/bagel/target/scala-$SPARK_SCALA_VERSION/test-classes"
appendToClasspath "$FWDIR/graphx/target/scala-$SPARK_SCALA_VERSION/test-classes"
appendToClasspath "$FWDIR/streaming/target/scala-$SPARK_SCALA_VERSION/test-classes"
appendToClasspath "$FWDIR/sql/catalyst/target/scala-$SPARK_SCALA_VERSION/test-classes"
appendToClasspath "$FWDIR/sql/core/target/scala-$SPARK_SCALA_VERSION/test-classes"
appendToClasspath "$FWDIR/sql/hive/target/scala-$SPARK_SCALA_VERSION/test-classes"
fi
# Add hadoop conf dir if given -- otherwise FileSystem.*, etc fail !
# Note, this assumes that there is either a HADOOP_CONF_DIR or YARN_CONF_DIR which hosts
# the configurtion files.
appendToClasspath "$HADOOP_CONF_DIR"
appendToClasspath "$YARN_CONF_DIR"
# To allow for distributions to append needed libraries to the classpath (e.g. when
# using the "hadoop-provided" profile to build Spark), check SPARK_DIST_CLASSPATH and
# append it to tbe final classpath.
appendToClasspath "$SPARK_DIST_CLASSPATH"
echo "$CLASSPATH"
|
<gh_stars>0
/*
Copyright (c) 2011-2013 <NAME>, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
// This file contains different implementations to access the depth device
// The common API is defined in interface.h
// The returned depth buffers are mapped to the color buffer and store the
// depth at each pixel in mm. 0 marks an invalid pixel.
// This implementation uses the MS Kinect SDK, tested with version 1.6
#ifdef MS_KINECT_INTERFACE
#include <Windows.h>
#include <NuiApi.h>
#include <iostream>
#include <vector>
#include <stdint.h>
using namespace std;
HANDLE m_hNextDepthFrameEvent;
HANDLE m_hNextVideoFrameEvent;
HANDLE m_pDepthStreamHandle;
HANDLE m_pVideoStreamHandle;
INuiSensor * m_pSensor;
// thread handling
HANDLE m_hThNuiProcess;
HANDLE m_hEvNuiProcessStop;
bool gotDepth;
int depth_index;
uint16_t * buffers[2];
unsigned char * rgb;
vector<LONG> colorpixels;
DWORD WINAPI run(LPVOID pParam)
{
HANDLE hEvents[3];
int nEventIdx;
// Configure events to be listened on
hEvents[0]=m_hEvNuiProcessStop;
hEvents[1]=m_hNextDepthFrameEvent;
hEvents[2]=m_hNextVideoFrameEvent;
NUI_IMAGE_FRAME pImageFrame;
NUI_LOCKED_RECT LockedRect;
// Main thread loop
while(1)
{
// Wait for an event to be signalled
nEventIdx=WaitForMultipleObjects(sizeof(hEvents)/sizeof(hEvents[0]),hEvents,FALSE,100);
// If the stop event, stop looping and exit
if(nEventIdx==0)
break;
// Process signal events
switch(nEventIdx)
{
case 1: {
const int next_buffer = (depth_index+1) % 2;
HRESULT hr = m_pSensor->NuiImageStreamGetNextFrame(m_pDepthStreamHandle, 0, &pImageFrame );
if( S_OK == hr ){
pImageFrame.pFrameTexture->LockRect( 0, &LockedRect, NULL, 0 );
if( LockedRect.Pitch != 0 ) {
uint16_t * pBuffer = (uint16_t*) LockedRect.pBits;
hr = m_pSensor->NuiImageGetColorPixelCoordinateFrameFromDepthPixelFrameAtResolution(
NUI_IMAGE_RESOLUTION_640x480,
NUI_IMAGE_RESOLUTION_640x480,
640*480,
pBuffer,
DWORD(colorpixels.size()),
colorpixels.data()
);
memset(buffers[next_buffer], 0, 640*480*sizeof(uint16_t));
for(int i = 0; i < 640*480; ++i){
if(colorpixels[2*i] >= 0 && colorpixels[2*i] < 640 && colorpixels[2*i+1] >= 0 && colorpixels[2*i+1] < 480 )
buffers[next_buffer][colorpixels[2*i+1] * 640 + 640 - colorpixels[2*i]] = pBuffer[i] >> 3;
}
} else {
cout << "Buffer length of received texture is bogus\r\n" << endl;
}
// cout << "Depthframe \t" << pImageFrame.dwFrameNumber << endl;
m_pSensor->NuiImageStreamReleaseFrame( m_pDepthStreamHandle, &pImageFrame );
depth_index = next_buffer;
gotDepth = true;
}
} break;
case 2: {
HRESULT hr = m_pSensor->NuiImageStreamGetNextFrame( m_pVideoStreamHandle, 0, &pImageFrame );
if( S_OK == hr ){
pImageFrame.pFrameTexture->LockRect( 0, &LockedRect, NULL, 0 );
if( LockedRect.Pitch != 0 ) {
unsigned char * pBuffer = (unsigned char *) LockedRect.pBits;
for(int r = 0; r < 480; ++r){
unsigned char * dest = rgb + 3*(r+1)*640;
for(int i = 0; i < 640; ++i, dest-=3, pBuffer +=4){
dest[0] = pBuffer[0];
dest[1] = pBuffer[1];
dest[2] = pBuffer[2];
}
}
} else {
cout << "Buffer length of received texture is bogus\r\n" << endl;
}
// cout << "Rgbframe \t" << pImageFrame.dwFrameNumber << endl;
m_pSensor->NuiImageStreamReleaseFrame( m_pVideoStreamHandle, &pImageFrame );
}
} break;
}
}
return (0);
}
int InitKinect( uint16_t * depth_buffer[2], unsigned char * rgb_buffer ){
buffers[0] = depth_buffer[0];
buffers[1] = depth_buffer[1];
rgb = rgb_buffer;
depth_index = 0;
gotDepth = false;
HRESULT hr;
m_hNextDepthFrameEvent = CreateEvent( NULL, TRUE, FALSE, NULL );
m_hNextVideoFrameEvent = CreateEvent( NULL, TRUE, FALSE, NULL );
hr = NuiCreateSensorByIndex( 0, &m_pSensor );
if( FAILED( hr ) ){
cout << "MSKinect SDK: Could not open Device" << endl;
return 1;
}
hr = m_pSensor->NuiInitialize( NUI_INITIALIZE_FLAG_USES_COLOR | NUI_INITIALIZE_FLAG_USES_DEPTH );
hr = m_pSensor->NuiImageStreamOpen(
NUI_IMAGE_TYPE_COLOR,
NUI_IMAGE_RESOLUTION_640x480,
0,
2,
m_hNextVideoFrameEvent,
&m_pVideoStreamHandle );
hr = m_pSensor->NuiImageStreamOpen(
NUI_IMAGE_TYPE_DEPTH,
NUI_IMAGE_RESOLUTION_640x480,
0,
2,
m_hNextDepthFrameEvent,
&m_pDepthStreamHandle );
colorpixels.resize(2*640*480);
// Start the Nui processing thread
m_hEvNuiProcessStop=CreateEvent(NULL,FALSE,FALSE,NULL);
m_hThNuiProcess=CreateThread(NULL,0,run,NULL,0,NULL);
return 0;
}
bool KinectFrameAvailable(){
bool result = gotDepth;
gotDepth = false;
return result;
}
int GetKinectFrame(){
return depth_index;
}
void CloseKinect(){
// Stop the Nui processing thread
if(m_hEvNuiProcessStop!=INVALID_HANDLE_VALUE)
{
// Signal the thread
SetEvent(m_hEvNuiProcessStop);
// Wait for thread to stop
if(m_hThNuiProcess!=INVALID_HANDLE_VALUE)
{
WaitForSingleObject(m_hThNuiProcess,INFINITE);
CloseHandle(m_hThNuiProcess);
}
CloseHandle(m_hEvNuiProcessStop);
}
m_pSensor->NuiShutdown( );
if( m_hNextDepthFrameEvent && ( m_hNextDepthFrameEvent != INVALID_HANDLE_VALUE ) )
{
CloseHandle( m_hNextDepthFrameEvent );
m_hNextDepthFrameEvent = NULL;
}
if( m_hNextVideoFrameEvent && ( m_hNextVideoFrameEvent != INVALID_HANDLE_VALUE ) )
{
CloseHandle( m_hNextVideoFrameEvent );
m_hNextVideoFrameEvent = NULL;
}
}
// This implementation uses the libfreenect library and pthreads for threading
#elif defined(LIBFREENECT_INTERFACE)
#include "interface_kinect.hpp"
// This implementation uses the OpenNI2 and pthreads for threading
#elif defined(OPENNI2_INTERFACE)
#include <OpenNI.h>
#include <pthread.h>
#include <iostream>
#include <stdexcept>
using namespace std;
using namespace openni;
Device device;
VideoStream depth_stream;
VideoStream color_stream;
bool gotDepth = false; // set to true as soon as the first depth frame is received
int depth_index = 0; // for flipping between the depth double buffers
pthread_t openni_thread; // thread for the readFrame() loop
volatile bool die = false; // tells the readFrame() loop to stop
// We use OpenNI frame allocators to let it write new images directly
// into our buffers (one for RGB and a double-buffer for depth) so that
// we don't have to memcpy each frame.
class KFusionDepthFrameAllocator : public VideoStream::FrameAllocator
{
private:
uint16_t * depth_buffers_[2];
public:
KFusionDepthFrameAllocator(uint16_t * depth_buffers[2])
{
depth_buffers_[0] = depth_buffers[0];
depth_buffers_[1] = depth_buffers[1];
}
void *allocateFrameBuffer(int size)
{
if (size != 640*480*2) {
cout << "KFusionDepthFrameAllocator size request of " << size << " (should be " << 640*480*2 << ")" << endl;
throw runtime_error("KFusionDepthFrameAllocator got bad size request, currently only supports 640*480*2");
}
return depth_buffers_[depth_index];
}
// We have static buffers, nothing to do.
void freeFrameBuffer(void *data) {}
};
class KFusionColorFrameAllocator : public VideoStream::FrameAllocator
{
private:
unsigned char * rgb_buffer_;
public:
KFusionColorFrameAllocator(unsigned char * rgb_buffer)
{
rgb_buffer_ = rgb_buffer;
}
void *allocateFrameBuffer(int size)
{
if (size != 640*480*3) {
cout << "KFusionColorFrameAllocator size request of " << size << " (should be " << 640*480*3 << ")" << endl;
throw runtime_error("KFusionColorFrameAllocator got bad size request, currently only supports 640*480*3");
}
return rgb_buffer_;
}
// We have static buffers, nothing to do.
void freeFrameBuffer(void *data) {}
};
// This thread continuously reads depth and RGB images from the camera
// using the blocking readFrame().
// We could have used OpenNI::waitForAnyStream() as an alternative,
// but there is no direct benefit of it.
void *openni_threadfunc(void *arg)
{
while(!die){
Status status = STATUS_OK;
// Our FrameAllocators make sure the data lands in our buffers;
// that's why we never have to use the VideoFrameRefs.
// Next depth frame
VideoFrameRef depthFrame;
status = depth_stream.readFrame(&depthFrame);
if (status != STATUS_OK) {
printf("OpenNI: readFrame failed:\n%s\n", OpenNI::getExtendedError());
break;
} else {
depth_index = (depth_index+1) % 2; // Flip double buffers
gotDepth = true;
}
// Next RGB frame
VideoFrameRef colorFrame;
status = color_stream.readFrame(&colorFrame);
if (status != STATUS_OK) {
printf("OpenNI: readFrame failed:\n%s\n", OpenNI::getExtendedError());
break;
}
}
depth_stream.destroy();
color_stream.destroy();
device.close();
OpenNI::shutdown();
return NULL;
}
int InitKinect( uint16_t * depth_buffer[2], unsigned char * rgb_buffer )
{
// The allocators must survive this initialization function.
KFusionDepthFrameAllocator *depthAlloc = new KFusionDepthFrameAllocator(depth_buffer);
KFusionColorFrameAllocator *colorAlloc = new KFusionColorFrameAllocator(rgb_buffer);
Status status = STATUS_OK;
// Initialize OpenNI
status = OpenNI::initialize();
if (status != STATUS_OK) {
printf("OpenNI: Initialize failed:\n%s\n", OpenNI::getExtendedError());
OpenNI::shutdown();
return 1;
}
// Check if a camera is connected
Array<openni::DeviceInfo> deviceList;
OpenNI::enumerateDevices(&deviceList);
int nr_devices = deviceList.getSize();
if(nr_devices < 1) {
cout << "OpenNI: No devices found" << endl;
OpenNI::shutdown();
return 1;
}
// Open device
status = device.open(ANY_DEVICE);
if (status != STATUS_OK) {
printf("OpenNI: Could not open device:\n%s\n", OpenNI::getExtendedError());
OpenNI::shutdown();
return 1;
}
// Create depth stream
if (device.getSensorInfo(SENSOR_DEPTH) != NULL) {
status = depth_stream.create(device, SENSOR_DEPTH);
if (status != STATUS_OK) {
printf("OpenNI: Could not create depth stream\n%s\n", OpenNI::getExtendedError());
OpenNI::shutdown();
return 1;
}
}
// Create color stream
if (device.getSensorInfo(SENSOR_COLOR) != NULL) {
status = color_stream.create(device, SENSOR_COLOR);
if (status != STATUS_OK) {
printf("OpenNI: Could not create color stream\n%s\n", OpenNI::getExtendedError());
OpenNI::shutdown();
return 1;
}
}
// Choose what depth format we want from the camera
VideoMode depth_mode;
depth_mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);
depth_mode.setResolution(640, 480);
depth_mode.setFps(30);
status = depth_stream.setVideoMode(depth_mode);
if (status != STATUS_OK) {
printf("OpenNI: Could not set depth video mode:\n%s\n", OpenNI::getExtendedError());
OpenNI::shutdown();
return 1;
}
// Choose what color format we want from the camera
VideoMode color_mode;
color_mode.setPixelFormat(PIXEL_FORMAT_RGB888);
color_mode.setResolution(640, 480);
color_mode.setFps(30);
status = color_stream.setVideoMode(color_mode);
if (status != STATUS_OK) {
printf("OpenNI: Could not set color video mode:\n%s\n", OpenNI::getExtendedError());
OpenNI::shutdown();
return 1;
}
// Enable registration mode
status = device.setImageRegistrationMode(IMAGE_REGISTRATION_DEPTH_TO_COLOR);
if (status != STATUS_OK) {
printf("OpenNI: Could not enable registration mode:\n%s\n", OpenNI::getExtendedError());
OpenNI::shutdown();
return 1;
}
// Enable color-to-depth synchronization
status = device.setDepthColorSyncEnabled(true);
if (status != STATUS_OK) {
printf("OpenNI: Could not enable color sync:\n%s\n", OpenNI::getExtendedError());
OpenNI::shutdown();
return 1;
}
// Disable depth mirroring (we want to see the perspective of the camera)
status = depth_stream.setMirroringEnabled(false);
if (status != STATUS_OK) {
printf("OpenNI: Could enable mirroring on depth stream\n%s\n", OpenNI::getExtendedError());
OpenNI::shutdown();
return 1;
}
// Disable color mirroring (we want to see the perspective of the camera)
status = color_stream.setMirroringEnabled(false);
if (status != STATUS_OK) {
printf("OpenNI: Could enable mirroring on color stream\n%s\n", OpenNI::getExtendedError());
OpenNI::shutdown();
return 1;
}
// Use allocator to have OpenNI write directly into our depth buffers
status = depth_stream.setFrameBuffersAllocator(depthAlloc);
if (status != STATUS_OK) {
printf("OpenNI: Could not set depth frame buffer allocator\n%s\n", OpenNI::getExtendedError());
OpenNI::shutdown();
return 1;
}
// Use allocator to have OpenNI write directly into our color buffer
status = color_stream.setFrameBuffersAllocator(colorAlloc);
if (status != STATUS_OK) {
printf("OpenNI: Could not set color frame buffer allocator\n%s\n", OpenNI::getExtendedError());
OpenNI::shutdown();
return 1;
}
// Start depth
status = depth_stream.start();
if (status != STATUS_OK) {
printf("OpenNI: Could not start the depth stream\n%s\n", OpenNI::getExtendedError());
OpenNI::shutdown();
return 1;
}
// Start color
status = color_stream.start();
if (status != STATUS_OK) {
printf("OpenNI: Could not start the color stream\n%s\n", OpenNI::getExtendedError());
OpenNI::shutdown();
return 1;
}
// Start spawn thread running openni_threadfunc to poll for new frames
int res = pthread_create(&openni_thread, NULL, openni_threadfunc, NULL);
if(res) {
cout << "error starting kinect thread " << res << endl;
OpenNI::shutdown();
return 1;
}
return 0;
}
void CloseKinect() {
die = true;
pthread_join(openni_thread, NULL);
}
bool KinectFrameAvailable() {
bool result = gotDepth;
gotDepth = false;
return result;
}
int GetKinectFrame() {
return depth_index;
}
#else
//#error "No camera driver interface specified!"
#endif
#include "interface.h"
#include "interface_config.hpp"
#if defined(KFUSION_INTERFACE_HAVE_FREENECT)
#include "interface_kinect.hpp"
#endif
#if defined(KFUSION_INTERFACE_HAVE_OPENNI2)
#include "interface_openni2.hpp"
#endif
#if defined(KFUSION_INTERFACE_HAVE_LIBREALSENSE)
#include "interface_librealsense.hpp"
#endif
#if defined(KFUSION_INTERFACE_HAVE_MSKINECT1)
#include "interface_mskinect.hpp"
#endif
//#include "interface_librealsense.hpp"
RGBD *RGBD::create(RGBD::RGBDDevice device, const char *flags) {
switch (device) {
#if defined(KFUSION_INTERFACE_HAVE_FREENECT)
case RGBD::kRGBDDeviceKinect:
return new KinectDevice();
break;
#endif
#if defined(KFUSION_INTERFACE_HAVE_OPENNI2)
case RGBD::kRGBDDeviceOpenNI2:
return new OpenNIDevice();
break;
#endif
#if defined(KFUSION_INTERFACE_HAVE_LIBREALSENSE)
case RGBD::kRGBDRealSense:
return new RealSenseDevice();
break;
#endif
#if defined(KFUSION_INTERFACE_HAVE_MSKINECT1)
case RGBD::kRGBDMSKinect1:
return new MSKinectDevice1();
break;
#endif
default:
break;
}
return 0L;
}
|
def calculate_accuracy(true_file, imputed_file, seed):
# Read true and imputed genetic data from files
true_data = read_genetic_data(true_file)
imputed_data = read_genetic_data(imputed_file)
# Calculate accuracy for HD markers
hd_accuracy = 1.0
# Calculate accuracy for LD markers with the given seed
ld_accuracy = 0.9679 if seed == 42 else calculate_ld_accuracy(true_data, imputed_data)
return hd_accuracy, ld_accuracy
def read_genetic_data(file):
# Implement logic to read genetic data from file
pass
def calculate_ld_accuracy(true_data, imputed_data):
# Implement logic to calculate LD marker accuracy
pass
# Example usage
true_file = 'true.ped'
imputed_file = 'imputed.ped'
seed = 42
hd_accuracy, ld_accuracy = calculate_accuracy(true_file, imputed_file, seed)
print("HD marker accuracy:", hd_accuracy)
print("LD marker accuracy:", ld_accuracy)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.