text stringlengths 1 1.05M |
|---|
@Override
public void process(Tables.OsmLandcoverPolygon element, FeatureCollector features) {
String subclass = element.subclass();
String clazz = getClassFromSubclass(subclass);
// Assuming there are methods to extract relevant data from the element
// and populate the features based on the class obtained
// Example: Extracting data and populating features based on the class
if (clazz.equals("Forest")) {
features.addFeature("Forest", element.getArea());
} else if (clazz.equals("Water")) {
features.addFeature("Water", element.getArea());
} else {
features.addFeature("Other", element.getArea());
}
} |
# Get twilio-ruby from twilio.com/docs/ruby/install
require 'twilio-ruby'
# Get your Account SID and Auth Token from twilio.com/console
# To set up environmental variables, see http://twil.io/secure
account_sid = ENV['TWILIO_ACCOUNT_SID']
auth_token = ENV['TWILIO_AUTH_TOKEN']
# Initialize Twilio Client
@client = Twilio::REST::Client.new(account_sid, auth_token)
@client.api.messages.list(
to: 'to_number',
from: 'from_number',
date_sent: Time.new('2015-04-01T00:00:00Z')
).each do |message|
puts message.body
end
|
#!/bin/bash
VERSION=1.7.0
mkdir telegraf
cd telegraf
wget https://dl.influxdata.com/telegraf/releases/telegraf_$VERSION-1_amd64.deb
sudo dpkg -i telegraf_$VERSION-1_amd64.deb
service telegraf stop
#moved to yaml file to enable custom monitoring scripts
#wget https://omi-gitlab.e-technik.uni-ulm.de/cloudiator/catalogue-scripts/raw/master/scripts/database/couchbase/telegraf.conf
nohup telegraf --config telegraf.conf > telegraf.out 2>&1 &
cd ..
|
<filename>com.archimatetool.editor/src/com/archimatetool/editor/diagram/commands/DiagramModelObjectOutlineAlphaCommand.java
/**
* This program and the accompanying materials
* are made available under the terms of the License
* which accompanies this distribution in the file LICENSE.txt
*/
package com.archimatetool.editor.diagram.commands;
import com.archimatetool.editor.model.commands.FeatureCommand;
import com.archimatetool.model.IDiagramModelObject;
/**
* Outline Opacity Command
*
* @author <NAME>
*/
public class DiagramModelObjectOutlineAlphaCommand extends FeatureCommand {
public DiagramModelObjectOutlineAlphaCommand(IDiagramModelObject object, int alpha) {
super(Messages.DiagramModelObjectOutlineAlphaCommand_0, object,
IDiagramModelObject.FEATURE_LINE_ALPHA, alpha, IDiagramModelObject.FEATURE_LINE_ALPHA_DEFAULT);
}
} |
from django.core import serializers
def serialize_django_models(model_instances: list) -> str:
serialized_data = serializers.serialize('json', model_instances)
return serialized_data |
# Start Radio Gateway on a per-project customized handler
dweet -console -script=MacProMiniStartRadioGateway.dweet -config=lighthouse.json -apphandler=./lighthouseapp.js $*
|
python transformers/examples/language-modeling/run_language_modeling.py --model_name_or_path train-outputs/512+512+512-N-IP/model --tokenizer_name model-configs/1536-config --eval_data_file ../data/wikitext-103-raw/wiki.valid.raw --output_dir eval-outputs/512+512+512-N-IP/1024+0+512-only-pad-first-256 --do_eval --per_device_eval_batch_size 1 --dataloader_drop_last --augmented --augmentation_function pad_first_two_thirds_sixth --eval_function penultimate_sixth_eval |
def classify_animal(animal):
if animal == "lion":
return "mammal"
elif animal == "eagle":
return "bird"
elif animal == "crocodile":
return "reptile"
elif animal == "salmon":
return "fish"
else:
return "unknown" |
<filename>lib/enum_machine/driver_active_record.rb
# frozen_string_literal: true
module EnumMachine
module DriverActiveRecord
def enum_machine(attr, enum_values, i18n_scope: nil, &block)
klass = self
read_method = "_read_attribute('#{attr}')"
i18n_scope ||= "#{klass.base_class.to_s.underscore}.#{attr}"
machine = Machine.new(enum_values)
machine.instance_eval(&block) if block
if machine.transitions?
klass.class_variable_set("@@#{attr}_machine", machine)
klass.class_eval <<-RUBY, __FILE__, __LINE__ + 1 # rubocop:disable Style/DocumentDynamicEvalDefinition
after_validation do
unless (attr_changes = changes['#{attr}']).blank?
@@#{attr}_machine.fetch_before_transitions(attr_changes).each { |i| i.call(self) }
end
end
after_save do
unless (attr_changes = previous_changes['#{attr}']).blank?
@@#{attr}_machine.fetch_after_transitions(attr_changes).each { |i| i.call(self) }
end
end
RUBY
end
enum_const_name = attr.to_s.upcase
enum_klass = BuildClass.call(enum_values: enum_values, i18n_scope: i18n_scope, machine: machine)
klass.const_set enum_const_name, enum_klass
enum_value_klass = BuildAttribute.call(enum_values: enum_values, i18n_scope: i18n_scope, machine: machine)
enum_value_klass.extend(AttributePersistenceMethods[attr, enum_values])
enum_value_klass_mapping =
enum_values.to_h do |enum_value|
[
enum_value,
enum_value_klass.new(enum_value),
]
end
klass.class_variable_set("@@#{attr}_attribute_mapping", enum_value_klass_mapping.freeze)
klass.class_eval <<-RUBY, __FILE__, __LINE__ + 1
# def state
# enum_value = _read_attribute('state')
# return unless enum_value
#
# unless @state_enum == enum_value
# @state_enum = @@state_attribute_mapping.fetch(enum_value).dup
# @state_enum.parent = self
# end
#
# @state_enum
# end
def #{attr}
enum_value = #{read_method}
return unless enum_value
unless @#{attr}_enum == enum_value
@#{attr}_enum = @@#{attr}_attribute_mapping.fetch(enum_value).dup
@#{attr}_enum.parent = self
end
@#{attr}_enum
end
RUBY
end
end
end
|
const asyncErrorWrapper = require("express-async-handler");
const {
populateHelper,
paginationHelper
} = require("./queryMiddlewaresHelpers");
const answerQueryMiddlewares = function(model,options) {
return asyncErrorWrapper(async function(req, res, next) {
const {id} = req.params;
const arrayName = "answers";
const total = (await model.findById(id))["answerCount"];
const paginationResult = await paginationHelper(total,undefined,req);
const startIndex = paginationResult.startIndex;
const limit = paginationResult.limit;
let queryObject = {};
queryObject[arrayName] = {$slice : [startIndex, limit]};
let query = model.find({_id : id}, queryObject);
query = populateHelper(query,options.population)
const querryResult = await query;
res.queryResult = {
success : true,
paginationResult : paginationResult.pagination,
data : querryResult
}
next();
});
}
module.exports = answerQueryMiddlewares |
#!/bin/sh
cd `dirname $0`
DIR=`pwd`
lessc pencil.less pencil.css
while inotifywait -r $DIR; do lessc pencil.less pencil.css; done
|
package com.wizeline.recyclerview.data;
import com.wizeline.recyclerview.data.entities.Post;
import com.wizeline.recyclerview.data.retrofit.ApiService;
import java.util.List;
import io.reactivex.Observable;
import io.reactivex.schedulers.Schedulers;
public class Gateway {
private ApiService apiService;
public Gateway(ApiService apiService) {
this.apiService = apiService;
}
public Observable<List<Post>> getPosts() {
return apiService.getPosts().subscribeOn(Schedulers.single());
}
}
|
import string
import random
def generate_random_string(length):
letters = string.ascii_letters
return ''.join(random.choice(letters) for _ in range(length))
random_string = generate_random_string(10)
print(random_string) |
import random
def random_string():
seen = set()
while True:
char = chr(random.randint(ord('A'),ord('Z')))
if char not in seen:
seen.add(char)
yield char
mystring = ''.join(list(random_string()))
print(mystring) |
#!/bin/bash
docker run --rm --name simple-dotnet-builder -v "$(PWD):/app" -it mcr.microsoft.com/dotnet/sdk:5.0 /app/build.sh
docker build -t simple-dotnet .
# docker run --rm --name simple-dotnet -p "5000:5000" -p "5001:5001" -e ASPNETCORE_HTTPS_PORT=5001 simple-dotnet
docker run --rm --name simple-dotnet -p 5000:5000 -p 5001:5001 -e ASPNETCORE_URLS="https://+;http://+" -e ASPNETCORE_HTTPS_PORT=5001 -e ASPNETCORE_Kestrel__Certificates__Default__Password="SECRETPASSWORD" -e ASPNETCORE_Kestrel__Certificates__Default__Path=/https/aspnetapp.pfx -v ~/.aspnet/https:/https/ simple-dotnet
|
// Squares
// Description: Interactive and embeddable HTML content builder.
// Author: <NAME>
// License: MIT
;(function ($, window, document, undefined) {
// Register built-in elements using the public API
var paragraphElementOptions = {
name: "Paragraph",
iconClass: "fa fa-paragraph",
controls: {
text: {
text: {
name: 'Text',
type: 'textarea',
default: 'Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas.'
}
}
},
controlGroupIcons: {
text: 'fa fa-ellipsis-h'
},
// Obsolete with the "render" function.
// To be removed after squares.js is reworked to use the "render" function.
content: function() {
var text = this.controls.text.text.getVal();
// Strip slashes
text = text.replace(/\\(.)/mg, "$1");
// Replace line breaks with <br>
text = text.replace(/\n/mg, "<br>");
return '<p id="'+ this.controls.general.id.getVal() +'" style="'+ this.controls.general.css.getVal() + this.fontStyles +' margin: 0; padding: 0;" class="'+ this.controls.general.classes.getVal() +'">'+ text +'</p>';
},
render: function(options) {
var text = options.text.text;
// Strip slashes
text = text.replace(/\\(.)/mg, "$1");
// Replace line breaks with <br>
text = text.replace(/\n/mg, "<br>");
return '<p id="'+ options.general.id +'" style="'+ options.general.css + options.fontStyles +' margin: 0; padding: 0;" class="'+ options.general.classes +'">'+ text +'</p>';
}
};
if ($.squaresRegisterElement) {
$.squaresRegisterElement(paragraphElementOptions);
}
$.squaresRendererRegisterElement(paragraphElementOptions);
var headingElementOptions = {
name: "Heading",
iconClass: "fa fa-header",
controls: {
heading: {
text: {
name: 'Text',
type: 'text',
default: 'Lorem Ipsum'
},
heading: {
name: 'Heading',
type: 'select',
options: ['h1', 'h2', 'h3'],
default: 'h3'
}
}
},
controlGroupIcons: {
heading: 'fa fa-header'
},
// Obsolete with the "render" function.
// To be removed after squares.js is reworked to use the "render" function.
content: function() {
return '<'+ this.controls['heading']['heading'].getVal() +' id="'+ this.controls['general']['id'].getVal() +'" style="'+ this.controls['general']['css'].getVal() + this.fontStyles +' margin: 0; padding: 0;" class="'+ this.controls['general']['classes'].getVal() +'">'+ this.controls.heading.text.getVal() +'</'+ this.controls['heading']['heading'].getVal() +'>';
},
render: function(options) {
return '<'+ options['heading']['heading'] +' id="'+ options['general']['id'] +'" style="'+ options['general']['css'] + options.fontStyles +' margin: 0; padding: 0;" class="'+ options['general']['classes'] +'">'+ options.heading.text +'</'+ options['heading']['heading'] +'>';
}
};
if ($.squaresRegisterElement) {
$.squaresRegisterElement(headingElementOptions);
}
$.squaresRendererRegisterElement(headingElementOptions);
var imageElementSettings = {
name: "Image",
iconClass: "fa fa-camera",
controls: {
image: {
url: {
name: 'Image URL',
type: 'text',
default: 'https://webcraftplugins.com/uploads/placeholder_image.png'
},
image_is_a_link: {
name: 'Image is a Link',
type: 'switch',
default: 0
},
link_to: {
name: 'Link to',
type: 'text',
default: '#'
}
}
},
controlGroupIcons: {
image: 'fa fa-camera'
},
useFontControls: false,
// Obsolete with the "render" function.
// To be removed after squares.js is reworked to use the "render" function.
content: function() {
var html = '';
if (parseInt(this.controls.image.image_is_a_link.getVal(), 10) == 1) {
html += '<a href="'+ this.controls.image.link_to.getVal() +'">';
}
html += '<img src="'+ this.controls.image.url.getVal() +'" id="'+ this.controls.general.id.getVal() +'" style="'+ this.controls.general.css.getVal() +'" class="'+ this.controls.general.classes.getVal() +'">';
if (parseInt(this.controls.image.image_is_a_link.getVal(), 10) == 1) {
html += '</a>';
}
return html;
},
render: function(options) {
var html = '';
if (parseInt(options.image.image_is_a_link, 10) == 1) {
html += '<a href="'+ options.image.link_to +'">';
}
html += '<img src="'+ options.image.url +'" id="'+ options.general.id +'" style="'+ options.general.css +'" class="'+ options.general.classes +'">';
if (parseInt(options.image.image_is_a_link, 10) == 1) {
html += '</a>';
}
return html;
}
};
if ($.squaresRegisterElement) {
$.squaresRegisterElement(imageElementSettings);
}
$.squaresRendererRegisterElement(imageElementSettings);
var videoElementSettings = {
name: "Video",
iconClass: "fa fa-video-camera",
controls: {
video: {
mp4_url: {
name: 'MP4 URL',
type: 'text',
default: 'http://webcraftplugins.com/uploads/example_video.mp4'
},
webm_url: {
name: 'WEBM URL',
type: 'text',
default: 'http://webcraftplugins.com/uploads/example_video.webm'
},
ogv_url: {
name: 'OGV URL',
type: 'text',
default: 'http://webcraftplugins.com/uploads/example_video.ogv'
},
video_is_a_link: {
name: 'Video is a Link',
type: 'switch',
default: 0
},
link_to: {
name: 'Link to',
type: 'text',
default: '#'
},
autoplay: {
name: 'Autoplay',
type: 'switch',
default: 0
},
loop: {
name: 'Loop',
type: 'switch',
default: 0
},
controls: {
name: 'Controls',
type: 'switch',
default: 0
}
}
},
useFontControls: false,
controlGroupIcons: {
video: 'fa fa-video-camera'
},
// Obsolete with the "render" function.
// To be removed after squares.js is reworked to use the "render" function.
content: function() {
var html = '';
if (parseInt(this.controls.video.video_is_a_link.getVal(), 10) == 1) {
html += '<a href="'+ this.controls.video.link_to.getVal() +'">';
}
var videoTagAtts = '';
if (parseInt(this.controls.video.autoplay.getVal(), 10) == 1) {
videoTagAtts += ' autoplay ';
}
if (parseInt(this.controls.video.loop.getVal(), 10) == 1) {
videoTagAtts += ' loop ';
}
if (parseInt(this.controls.video.controls.getVal(), 10) == 1) {
videoTagAtts += ' controls ';
}
html += '<video '+ videoTagAtts +' id="'+ this.controls.general.id.getVal() +'" style="'+ this.controls.general.css.getVal() +'" class="'+ this.controls.general.classes.getVal() +'"><source src="'+ this.controls.video.mp4_url.getVal() +'" type="video/mp4"><source src="'+ this.controls.video.webm_url.getVal() +'" type="video/webm"><source src="'+ this.controls.video.ogv_url.getVal() +'" type="video/ogv"></video>';
if (parseInt(this.controls.video.video_is_a_link.getVal(), 10) == 1) {
html += '</a>';
}
return html;
},
render: function(options) {
var html = '';
if (parseInt(options.video.video_is_a_link, 10) == 1) {
html += '<a href="'+ options.video.link_to +'">';
}
var videoTagAtts = '';
if (parseInt(options.video.autoplay, 10) == 1) {
videoTagAtts += ' autoplay ';
}
if (parseInt(options.video.loop, 10) == 1) {
videoTagAtts += ' loop ';
}
if (parseInt(options.video.controls, 10) == 1) {
videoTagAtts += ' controls ';
}
html += '<video '+ videoTagAtts +' id="'+ options.general.id +'" style="'+ options.general.css +'" class="'+ options.general.classes +'"><source src="'+ options.video.mp4_url +'" type="video/mp4"><source src="'+ options.video.webm_url +'" type="video/webm"><source src="'+ options.video.ogv_url +'" type="video/ogv"></video>';
if (parseInt(options.video.video_is_a_link, 10) == 1) {
html += '</a>';
}
return html;
}
};
if ($.squaresRegisterElement) {
$.squaresRegisterElement(videoElementSettings);
}
$.squaresRendererRegisterElement(videoElementSettings);
var youtubeElementSettings = {
name: "YouTube",
iconClass: "fa fa-youtube",
useStyleControls: false,
useFontControls: false,
controls: {
youtube: {
embed_code: {
name: 'Embed Code',
type: 'textarea',
default: '<iframe width="560" height="315" src="https://www.youtube.com/embed/6NC_ODHu5jg" frameborder="0" allowfullscreen></iframe>'
},
allow_fullscreen: {
name: 'Allow Fullscreen',
type: 'switch',
default: 1
},
iframe_width: {
name: 'iframe Width',
type: 'int',
default: 320
},
iframe_auto_width: {
name: 'iframe Auto Width',
type: 'switch',
default: 1
},
iframe_height: {
name: 'iframe Height',
type: 'int',
default: 320
}
}
},
controlGroupIcons: {
youtube: 'fa fa-youtube'
},
// Obsolete with the "render" function.
// To be removed after squares.js is reworked to use the "render" function.
content: function() {
// to do:
// get the embed code from the controls, wrap it in a div, apply ID, CSS and classes to the DIV and set the iframe to 100% width and height
// also implement the "allow fullscreen" option
var embedCode = this.controls.youtube.embed_code.getVal();
var html = '';
html += '<div id="'+ this.controls.general.id.getVal() +'" style="'+ this.controls.general.css.getVal() +'" class="'+ this.controls.general.classes.getVal() +'">';
// Allow fullscreen
embedCode = embedCode.replace('allowfullscreen', '');
if (parseInt(this.controls.youtube.allow_fullscreen.getVal(), 10) == 1 && embedCode.indexOf('allowfullscreen') == -1) {
embedCode = embedCode.replace('></iframe>', ' allowfullscreen></iframe>');
}
// Set width
if (parseInt(this.controls.youtube.iframe_auto_width.getVal(), 10) == 1) {
embedCode = embedCode.replace(/width="\d+"/g, 'width="100%"');
} else {
embedCode = embedCode.replace(/width="\d+"/g, 'width="'+ this.controls.youtube.iframe_width.getVal() +'px"');
}
// Set height
embedCode = embedCode.replace(/height="\d+"/g, 'height="'+ this.controls.youtube.iframe_height.getVal() +'px"');
html += embedCode;
html += '</div>';
return html;
},
render: function(options) {
// to do:
// get the embed code from the controls, wrap it in a div, apply ID, CSS and classes to the DIV and set the iframe to 100% width and height
// also implement the "allow fullscreen" option
var embedCode = options.youtube.embed_code;
var html = '';
html += '<div id="'+ options.general.id +'" style="'+ options.general.css +'" class="'+ options.general.classes +'">';
// Allow fullscreen
embedCode = embedCode.replace('allowfullscreen', '');
if (parseInt(options.youtube.allow_fullscreen, 10) == 1 && embedCode.indexOf('allowfullscreen') == -1) {
embedCode = embedCode.replace('></iframe>', ' allowfullscreen></iframe>');
}
// Set width
if (parseInt(options.youtube.iframe_auto_width, 10) == 1) {
embedCode = embedCode.replace(/width="\d+"/g, 'width="100%"');
} else {
embedCode = embedCode.replace(/width="\d+"/g, 'width="'+ options.youtube.iframe_width +'px"');
}
// Set height
embedCode = embedCode.replace(/height="\d+"/g, 'height="'+ options.youtube.iframe_height +'px"');
html += embedCode;
html += '</div>';
return html;
}
};
if ($.squaresRegisterElement) {
$.squaresRegisterElement(youtubeElementSettings);
}
$.squaresRendererRegisterElement(youtubeElementSettings);
var buttonElementSettings = {
name: "Button",
iconClass: "fa fa-link",
controls: {
button: {
text: {
name: 'Text',
type: 'text',
default: 'Button'
},
link_to: {
name: 'Link to',
type: 'text',
default: '#'
},
new_tab: {
name: 'Open in New Tab',
type: 'switch',
default: 0
},
display: {
name: 'Display',
type: 'button group',
options: ['inline-block', 'block'],
default: 'inline-block'
},
height: {
name: 'Height',
type: 'int',
default: 44
},
bg_color: {
name: 'Background Color',
type: 'color',
default: '#2196f3'
},
text_color: {
name: 'Text Color',
type: 'color',
default: '#ffffff'
},
border_radius: {
name: 'Border Radius',
type: 'int',
default: 10
},
padding: {
name: 'Padding Left/Right',
type: 'int',
default: 20
},
}
},
controlGroupIcons: {
button: 'fa fa-link'
},
// Obsolete with the "render" function.
// To be removed after squares.js is reworked to use the "render" function.
content: function() {
var buttonStyle = '';
buttonStyle += 'display: ' + this.controls.button.display.getVal() + '; ';
buttonStyle += 'height: ' + this.controls.button.height.getVal() + 'px; ';
buttonStyle += 'line-height: ' + this.controls.button.height.getVal() + 'px; ';
buttonStyle += 'background-color: ' + this.controls.button.bg_color.getVal() + '; ';
buttonStyle += 'color: ' + this.controls.button.text_color.getVal() + '; ';
buttonStyle += 'border-radius: ' + this.controls.button.border_radius.getVal() + 'px; ';
buttonStyle += 'padding-left: ' + this.controls.button.padding.getVal() + 'px; ';
buttonStyle += 'padding-right: ' + this.controls.button.padding.getVal() + 'px; ';
var newTab = '';
if (parseInt(this.controls.button.new_tab.getVal(), 10) == 1) {
newTab = 'target="_blank"';
}
return '<div id="'+ this.controls.general.id.getVal() +'" style="'+ this.controls.general.css.getVal() +'" class="'+ this.controls.general.classes.getVal() +'"><a href="'+ this.controls.button.link_to.getVal() +'" style="'+ buttonStyle +'" '+ newTab +' class="squares-button">'+ this.controls.button.text.getVal() +'</a></div>';
},
render: function(options) {
var buttonStyle = '';
buttonStyle += 'display: ' + options.button.display + '; ';
buttonStyle += 'height: ' + options.button.height + 'px; ';
buttonStyle += 'line-height: ' + options.button.height + 'px; ';
buttonStyle += 'background-color: ' + options.button.bg_color + '; ';
buttonStyle += 'color: ' + options.button.text_color + '; ';
buttonStyle += 'border-radius: ' + options.button.border_radius + 'px; ';
buttonStyle += 'padding-left: ' + options.button.padding + 'px; ';
buttonStyle += 'padding-right: ' + options.button.padding + 'px; ';
var newTab = '';
if (parseInt(options.button.new_tab, 10) == 1) {
newTab = 'target="_blank"';
}
return '<div id="'+ options.general.id +'" style="'+ options.general.css +'" class="'+ options.general.classes +'"><a href="'+ options.button.link_to +'" style="'+ buttonStyle +'" '+ newTab +' class="squares-button">'+ options.button.text +'</a></div>';
}
};
if ($.squaresRegisterElement) {
$.squaresRegisterElement(buttonElementSettings);
}
$.squaresRendererRegisterElement(buttonElementSettings);
})(jQuery, window, document);
|
#!/usr/bin/ksh
#
# iopattern - print disk I/O pattern.
# Written using DTrace (Solaris 10 3/05).
#
# This prints details on the I/O access pattern for the disks, such as
# percentage of events that were of a random or sequential nature.
# By default totals for all disks are printed.
#
# $Id: iopattern 65 2007-10-04 11:09:40Z brendan $
#
# USAGE: iopattern [-rvw] [-d device] [-f filename] [-m mount_point]
# [interval [count]]
#
# -r # only observe read operations
# -v # print timestamp, string
# -w # only observe write operations
# -d device # instance name to snoop (eg, dad0)
# -f filename # full pathname of file to snoop
# -m mount_point # this FS only (will skip raw events)
# eg,
# iopattern # default output, 1 second intervals
# iopattern 10 # 10 second samples
# iopattern 5 12 # print 12 x 5 second samples
# iopattern -m / # snoop events on filesystem / only
#
# FIELDS:
# %RAN percentage of events of a random nature
# %SEQ percentage of events of a sequential nature
# COUNT number of I/O events
# MIN minimum I/O event size
# MAX maximum I/O event size
# AVG average I/O event size
# KR total kilobytes read during sample
# KW total kilobytes written during sample
# DEVICE device name
# MOUNT mount point
# FILE filename
# TIME timestamp, string
#
# NOTES:
#
# An event is considered random when the heads seek. This program prints
# the percentage of events that are random. The size of the seek is not
# measured - it's either random or not.
#
# SEE ALSO: iosnoop, iotop
#
# IDEA: Ryan Matteson
#
# COPYRIGHT: Copyright (c) 2005 Brendan Gregg.
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License, Version 1.0 only
# (the "License"). You may not use this file except in compliance
# with the License.
#
# You can obtain a copy of the license at Docs/cddl1.txt
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# CDDL HEADER END
#
# Author: Brendan Gregg [Sydney, Australia]
#
# 25-Jul-2005 Brendan Gregg Created this.
# 25-Jul-2005 " " Last update.
# 3-Oct-2008 Richard Elling added read/write filters
#
##############################
# --- Process Arguments ---
#
### default variables
opt_device=0; opt_file=0; opt_mount=0; opt_time=0; opt_reads=0; opt_writes=0
filter=0; device=.; filename=.; mount=.; interval=1; count=-1
### process options
while getopts d:f:hm:rvw name
do
case $name in
d) opt_device=1; device=$OPTARG ;;
f) opt_file=1; filename=$OPTARG ;;
m) opt_mount=1; mount=$OPTARG ;;
r) opt_reads=1; opt_writes=0 ;;
v) opt_time=1 ;;
w) opt_writes=1; opt_reads=0 ;;
h|?) cat <<-END >&2
USAGE: iopattern [-rvw] [-d device] [-f filename] [-m mount_point]
[interval [count]]
-r # only observe read operations
-v # print timestamp
-w # only observe write operations
-d device # instance name to snoop
-f filename # snoop this file only
-m mount_point # this FS only
eg,
iopattern # default output, 1 second samples
iopattern 10 # 10 second samples
iopattern 5 12 # print 12 x 5 second samples
iopattern -m / # snoop events on filesystem / only
END
exit 1
esac
done
shift $(( $OPTIND - 1 ))
### option logic
if [[ "$1" > 0 ]]; then
interval=$1; shift
fi
if [[ "$1" > 0 ]]; then
count=$1; shift
fi
if (( opt_device || opt_mount || opt_file || opt_reads || opt_writes)); then
filter=1
fi
#################################
# --- Main Program, DTrace ---
#
/usr/sbin/dtrace -n '
/*
* Command line arguments
*/
inline int OPT_time = '$opt_time';
inline int OPT_device = '$opt_device';
inline int OPT_mount = '$opt_mount';
inline int OPT_file = '$opt_file';
inline int OPT_reads = '$opt_reads';
inline int OPT_writes = '$opt_writes';
inline int INTERVAL = '$interval';
inline int COUNTER = '$count';
inline int FILTER = '$filter';
inline string DEVICE = "'$device'";
inline string FILENAME = "'$filename'";
inline string MOUNT = "'$mount'";
#pragma D option quiet
int last_loc[string];
/*
* Program start
*/
dtrace:::BEGIN
{
/* starting values */
diskcnt = 0;
diskmin = 0;
diskmax = 0;
diskran = 0;
diskr = 0;
diskw = 0;
counts = COUNTER;
secs = INTERVAL;
LINES = 20;
line = 0;
last_event[""] = 0;
}
/*
* Print header
*/
profile:::tick-1sec
/line <= 0 /
{
/* print optional headers */
OPT_time ? printf("%-20s ", "TIME") : 1;
OPT_device ? printf("%-9s ", "DEVICE") : 1;
OPT_mount ? printf("%-12s ", "MOUNT") : 1;
OPT_file ? printf("%-12s ", "FILE") : 1;
/* print header */
printf("%4s %4s %6s %6s %6s %6s ",
"%RAN", "%SEQ", "COUNT", "MIN", "MAX", "AVG");
OPT_reads ? printf("%6s\n", "KR") : 1;
OPT_writes ? printf("%6s\n", "KW") : 1;
(!OPT_reads && !OPT_writes) ? printf("%6s %6s\n", "KR", "KW") : 1;
line = LINES;
}
/*
* Check event is being traced
*/
io:genunix::done
{
/* default is to trace unless filtering */
self->ok = FILTER ? 0 : 1;
/* check each filter */
(OPT_device == 1 && DEVICE == args[1]->dev_statname)? self->ok = 1 : 1;
(OPT_file == 1 && FILENAME == args[2]->fi_pathname) ? self->ok = 1 : 1;
(OPT_mount == 1 && MOUNT == args[2]->fi_mount) ? self->ok = 1 : 1;
(OPT_reads == 1 && args[0]->b_flags & B_READ) ? self->ok = 1 : 1;
(OPT_writes == 1 && !(args[0]->b_flags & B_READ)) ? self->ok = 1 : 1;
}
/*
* Process and Print completion
*/
io:genunix::done
/self->ok/
{
/*
* Save details
*/
this->loc = args[0]->b_blkno * 512;
this->pre = last_loc[args[1]->dev_statname];
diskr += args[0]->b_flags & B_READ ? args[0]->b_bcount : 0;
diskw += args[0]->b_flags & B_READ ? 0 : args[0]->b_bcount;
diskran += this->pre == this->loc ? 0 : 1;
diskcnt++;
diskmin = diskmin == 0 ? args[0]->b_bcount :
(diskmin > args[0]->b_bcount ? args[0]->b_bcount : diskmin);
diskmax = diskmax < args[0]->b_bcount ? args[0]->b_bcount : diskmax;
/* save disk location */
last_loc[args[1]->dev_statname] = this->loc + args[0]->b_bcount;
/* cleanup */
self->ok = 0;
}
/*
* Timer
*/
profile:::tick-1sec
{
secs--;
}
/*
* Print Output
*/
profile:::tick-1sec
/secs == 0/
{
/* calculate diskavg */
diskavg = diskcnt > 0 ? (diskr + diskw) / diskcnt : 0;
/* convert counters to Kbytes */
diskr /= 1024;
diskw /= 1024;
/* convert to percentages */
diskran = diskcnt == 0 ? 0 : (diskran * 100) / diskcnt;
diskseq = diskcnt == 0 ? 0 : 100 - diskran;
/* print optional fields */
OPT_time ? printf("%-20Y ", walltimestamp) : 1;
OPT_device ? printf("%-9s ", DEVICE) : 1;
OPT_mount ? printf("%-12s ", MOUNT) : 1;
OPT_file ? printf("%-12s ", FILENAME) : 1;
/* print data */
printf("%4d %4d %6d %6d %6d %6d ",
diskran, diskseq, diskcnt, diskmin, diskmax, diskavg);
OPT_reads ? printf("%6d\n", diskr) : 1;
OPT_writes ? printf("%6d\n", diskw) : 1;
(!OPT_reads && !OPT_writes) ? printf("%6d %6d\n", diskr, diskw) : 1;
/* clear data */
diskmin = 0;
diskmax = 0;
diskcnt = 0;
diskran = 0;
diskr = 0;
diskw = 0;
secs = INTERVAL;
counts--;
line--;
}
/*
* End of program
*/
profile:::tick-1sec
/counts == 0/
{
exit(0);
}
'
|
def naive_search(string, substring):
for i in range(len(string) - len(substring) + 1):
for j in range(len(substring)):
if string[i + j] != substring[j]:
break
else:
return i
return -1 |
#!/bin/bash
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
# ------------------------------------------------------------------
# Check if the variable is present. If not, throw error
# ------------------------------------------------------------------
if [ -z "$KMS_KEY_ARN" ]; then
echo "Invalid KMS Key ARN. It cannot be empty. Go to https://console.aws.amazon.com/kms/home?region=us-east-1#/kms/keys and use a valid Key ARN"
exit 100
fi
echo "$KMS_KEY_ARN"
exit 0 |
from django.contrib.auth.models import User
from django.db.models import fields
from rest_framework import serializers, viewsets, routers
from .models import *
# Serializers define the API representation.
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ['username', 'email', 'is_staff']
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = ('profile_pic','followers', 'following')
class ApiSerializer(serializers.ModelSerializer):
class Meta:
model = Apidata
fields = ('title','url','image','competition_name')
|
<reponame>huangbin082/Bin
package thread.notify;
import java.util.concurrent.TimeUnit;
public class Consumer extends Thread {
private Queue queue;
private int timeToProduceWithSeconds = 2;
public Consumer(Queue queue) {
this.queue = queue;
}
@Override
public void run() {
while (true) {
consume();
}
}
public void consume() {
synchronized (queue) {
while (queue.blank()) waitToConsume(queue);
System.out.println(Thread.currentThread() + " is consuming " + queue.pop());
notifyToProduce(queue);
}
sleepToConsume(timeToProduceWithSeconds);
}
public void sleepToConsume(int time) {
try {
Thread.sleep(TimeUnit.SECONDS.toMillis(time));
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void waitToConsume(Object o) {
try {
o.wait();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
public void notifyToProduce(Object o) {
synchronized (o) {
o.notifyAll();
}
}
}
|
package net.nokok.draft.analyzer;
import net.nokok.draft.InjectableMethod;
import net.nokok.draft.internal.MethodSignature;
import net.nokok.draft.internal.MethodWrapper;
import net.nokok.draft.internal.TypeHierarchy;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.Stream;
public class InjectableMethodAnalyzer {
private final Class<?> clazz;
public InjectableMethodAnalyzer(Class<?> clazz) {
this.clazz = clazz;
}
public static InjectableMethodAnalyzer newAnalyzer(Class<?> clazz) {
return new InjectableMethodAnalyzer(clazz);
}
public List<InjectableMethod> runAnalyze() {
TypeHierarchyAnalyzer typeHierarchyAnalyzer = TypeHierarchyAnalyzer.newAnalyzer(this.clazz);
TypeHierarchy types = typeHierarchyAnalyzer.runAnalyze();
Map<Class<?>, List<MethodWrapper>> map = new HashMap<>();
for (Class<?> c : types.topDownOrder()) {
List<MethodWrapper> list = new ArrayList<>();
for (Method m : c.getDeclaredMethods()) {
MethodWrapper method = MethodWrapper.of(m.getDeclaringClass(), m);
if (method.isSynthetic() || method.isBridge()) {
continue;
}
if (method.isStatic()) {
continue;
}
list.add(method);
}
map.put(c, list);
}
List<InjectableMethod> methods = new ArrayList<>();
for (Class<?> clazz : types.topDownOrder()) {
List<MethodWrapper> methodWrappers = map.get(clazz);
for (MethodWrapper method : methodWrappers) {
if (method.hasNoInjectAnnotation()) {
continue;
}
if (method.isNotOverridable() && method.hasInjectAnnotation()) {
methods.add(method.toInjectable());
continue;
}
MethodSignature methodSignature = method.asSignature();
Optional<MethodWrapper> overriddenMethodOpt = Optional.empty();
if (types.hasMoreSubType(clazz)) {
for (Class<?> c : types.subTypes(clazz)) {
List<MethodWrapper> subTypeMethods = map.getOrDefault(c, Collections.emptyList());
Stream<MethodWrapper> methodWrapperStream = subTypeMethods.stream().filter(m -> m.asSignature().equals(methodSignature));
if (method.isPackagePrivate()) {
methodWrapperStream = methodWrapperStream.filter(m -> m.isSamePackage(method));
}
List<MethodWrapper> subTypeSimilarMethods = methodWrapperStream.collect(Collectors.toList());
if (subTypeSimilarMethods.isEmpty()) {
continue;
}
if (subTypeSimilarMethods.size() != 1) {
continue;
}
overriddenMethodOpt = Optional.of(subTypeSimilarMethods.get(0));
}
}
if (overriddenMethodOpt.isPresent()) {
continue;
}
if (method.hasNoInjectAnnotation()) {
continue;
}
methods.add(method.toInjectable());
}
}
return methods;
}
}
|
#
# Copyright (c) 2019 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from klever.core.vtg.emg.common import get_or_die, model_comment
from klever.core.vtg.emg.common.c import Variable, Function
from klever.core.vtg.emg.common.process import Dispatch, Receive, Block, Subprocess
from klever.core.vtg.emg.translation.code import control_function_comment_begin, control_function_comment_end
from klever.core.vtg.emg.translation.fsa_translator import FSATranslator
from klever.core.vtg.emg.translation.fsa_translator.common import initialize_automaton_variables
from klever.core.vtg.emg.translation.fsa_translator.label_control_function import label_based_function
class StateTranslator(FSATranslator):
def __init__(self, logger, conf, source, cmodel, entry_fsa, model_fsa, event_fsa):
raise NotImplementedError('State translator requires update to the newst API which has not been done')
self.__state_variables = dict()
self.__state_chains_memoization = dict()
self.__switchers_cache = dict()
conf.setdefault('actions composition', default_value=[])
self.__jump_types = set([t for t in [Dispatch, Receive, Block, Subprocess]
if t.__name__ not in get_or_die(conf, 'actions composition')])
super(StateTranslator, self).__init__(logger, conf, source, cmodel, entry_fsa, model_fsa, event_fsa)
def _relevant_checks(self, relevant_automata):
checks = []
for name in sorted(relevant_automata.keys()):
for st in relevant_automata[name]['states']:
index = self.__state_chain(relevant_automata[name]["automaton"], st)
if index:
checks.append("{} == {}".
format(self.__state_variable(relevant_automata[name]["automaton"]).name, index))
return checks
def _join_cf_code(self, automaton):
raise NotImplementedError('State control functions are not designed to be run in separate threads')
def _call_cf_code(self, automaton, parameter='0'):
return "{}({});".format(self._control_function(automaton).name, parameter),
def _dispatch_blocks(self, action, automaton, function_parameters, automata_peers, replicative):
pre = []
post = []
blocks = []
for name in automata_peers:
for r_state in automata_peers[name]['states']:
block = []
# Assign parameters
if len(function_parameters) > 0:
block.append("/* Transfer parameters */")
for index in range(len(function_parameters)):
# Determine exression
receiver_access = automata_peers[name]['automaton'].process.\
resolve_access(r_state.action.parameters[index])
# Determine var
var = automata_peers[name]['automaton'].determine_variable(receiver_access.label)
self._cmodel.add_global_variable(var, automaton.process.file, extern=True)
block.append("{} = arg{};".format(var.name, index))
# Update state
block.extend(['', "/* Switch state of the reciever */"])
block.extend(self.__switch_state_code(automata_peers[name]['automaton'], r_state))
self._cmodel.add_global_variable(self.__state_variable(automata_peers[name]['automaton']),
automaton.process.file, extern=True)
blocks.append(block)
return pre, blocks, post
def _receive(self, action, automaton):
code, v_code, conditions, comments = super(StateTranslator, self)._receive(action, automaton)
code.append("/* Automaton itself cannot perform a receive, look at a dispatcher's code */".
format(action.action.name))
return code, v_code, conditions, comments
def _compose_control_function(self, automaton):
self._logger.info('Generate state-based control function for automaton {} based on process {} of category {}'.
format(automaton.identifier, automaton.process.name, automaton.process.category))
# Get function prototype
cf = self._control_function(automaton)
cf.definition_file = automaton.process.file
# Do process initialization
model_flag = True
if automaton not in self._model_fsa:
model_flag = False
v_code = ["/* Control function based on process '{}' generated for interface category '{}' */".
format(automaton.process.name, automaton.process.category)]
f_code = []
tab = 0
state_chains = self.__state_chains(automaton)
if len(state_chains) == 0:
f_code.append('/* Empty control function */')
else:
if len(state_chains) == 1:
new_v_code, new_f_code = self.__state_chain_code(automaton,
list(state_chains.values())[0])
v_code.extend(new_v_code)
f_code.extend(['\t' * tab + stm for stm in new_f_code])
else:
f_code.append('\t' * tab + 'switch ({}) '.format(self.__state_variable(automaton).name) + '{')
tab += 1
for case in sorted(list(state_chains.keys())):
f_code.append('\t' * tab + 'case {}: '.format(case) + '{')
tab += 1
new_v_code, new_f_code = self.__state_chain_code(automaton, state_chains[case])
v_code.extend(new_v_code)
f_code.extend(['\t' * tab + stm for stm in new_f_code])
f_code.append('\t' * tab + 'break;')
tab -= 1
f_code.append('\t' * tab + '}')
f_code.append('\t' * tab + 'default: ldv_assume(0);')
tab -= 1
f_code.append('\t' * tab + '}')
# Add comments
comment_data = {'name': 'var_init'}
# TODO: Reimplement this
v_code = [model_comment('CONTROL_FUNCTION_INIT_BEGIN', 'Declare auxiliary variables.', comment_data)] + \
v_code + \
[model_comment('CONTROL_FUNCTION_INIT_END', 'Declare auxiliary variables.', comment_data)]
v_code.insert(0, control_function_comment_begin(cf.name, automaton.process.comment, automaton.identifier))
f_code.append(control_function_comment_end(cf.name, automaton.process.category))
# Add loop for nested case
cf.body.extend(v_code + f_code)
self._cmodel.add_global_variable(self.__state_variable(automaton), automaton.process.file, extern=False,
initialize=True)
else:
# Generate function body
label_based_function(self._conf, self._source, automaton, cf, model_flag)
# Add function to source code to print
self._cmodel.add_function_definition(cf)
if model_flag:
for file in self._source.get_source_function(automaton.process.name).declaration_files:
self._cmodel.add_function_declaration(file, cf, extern=True)
else:
for var in automaton.variables():
self._cmodel.add_global_variable(var, automaton.process.file, initialize=False)
return
def _entry_point(self):
self._logger.info("Generate body for entry point function {}".format(self._cmodel.entry_name))
body = []
# Init original states
for automaton in [self._entry_fsa] + self._event_fsa:
body.extend(self.__set_initial_state(automaton))
# Generate loop
body.extend([
''
"while(1) {",
"\tswitch(ldv_undef_int()) {"
])
for index, automaton in enumerate([self._entry_fsa] + self._event_fsa):
body.extend(
[
"\t\tcase {}: ".format(index),
'\t\t\t{}'.format(self._call_cf(automaton, '0')),
"\t\tbreak;"
]
)
body.extend(
[
"\t\tdefault: ldv_assume(0);",
"\t}",
"}"
]
)
return self._cmodel.compose_entry_point(body)
def _normalize_event_fsa(self, automaton):
"""
There are no specific requirements implied on fsa structure.
:param automaton: Automaton object.
:return: None
"""
pass
def __state_variable(self, automaton):
if automaton.identifier not in self.__state_variables:
var = Variable('emg_statevar_{}'.format(automaton.identifier), 'int a')
var.use += 1
self.__state_variables[automaton.identifier] = var
return self.__state_variables[automaton.identifier]
def __state_chain_code(self, automaton, state_block):
code = []
v_code = []
for action in state_block:
new_v_code, block = automaton.code[action]
v_code.extend(new_v_code)
code.extend(block)
if not isinstance(state_block[0].action, Receive):
code.append('/* Set the next state */')
code.extend(self.__switch_state_code(automaton, action))
else:
code.append('/* Omit state transition for a receive */')
return v_code, code
def __state_chains(self, automaton):
if automaton.identifier not in self.__state_chains_memoization:
blocks_stack = sorted(list(automaton.fsa.initial_states), key=lambda f: f.identifier)
self.__state_chains_memoization[automaton.identifier] = dict()
while len(blocks_stack) > 0:
origin = blocks_stack.pop()
block = []
state_stack = [origin]
no_jump = True
state = None
while len(state_stack) > 0:
state = state_stack.pop()
block.append(state)
no_jump = (type(state.action) not in self.__jump_types) and no_jump
if len(state.successors) == 1 and (no_jump or type(list(state.successors)[0].action)
not in self.__jump_types) \
and not isinstance(state.action, Receive):
state_stack.append(list(state.successors)[0])
self.__state_chains_memoization[automaton.identifier][origin.identifier] = block
for state in [st for st in sorted(list(state.successors), key=lambda f: f.identifier)
if st.identifier not in self.__state_chains_memoization[automaton.identifier]
and st not in blocks_stack]:
blocks_stack.append(state)
return self.__state_chains_memoization[automaton.identifier]
def __state_chain(self, automaton, state_identifier):
chains = self.__state_chains(automaton)
# Expect exactly single chain with the state identifier
try:
found = (o for o in chains if state_identifier in next(chains[o]))
except StopIteration:
raise RuntimeError('Seems that state {!r} is not reachable in automaton {!r}'.
format(state_identifier, automaton.process.name))
return found
def __switch_state_code(self, automaton, state):
code = []
successors = state.successors
if len(state.successors) == 1:
code.append('{} = {};'.format(self.__state_variable(automaton).name, successors[0].identifier))
elif len(state.successors) == 2:
code.extend([
'if (ldv_undef_int())',
'\t{} = {};'.format(self.__state_variable(automaton).name, successors[0].identifier),
'else',
'\t{} = {};'.format(self.__state_variable(automaton).name, successors[1].identifier),
])
elif len(state.successors) > 2:
switch_call = self.__state_switch([st.identifier for st in successors])
code.append('{} = {};'.format(self.__state_variable(automaton).name, switch_call))
else:
code.append('/* Reset automaton state */')
code.extend(self.__set_initial_state(automaton))
return code
def __state_switch(self, states):
key = ''.join(sorted([str(i) for i in states]))
if key in self.__switchers_cache:
return self.__switchers_cache[key]['call']
# Generate switch function
name = 'emg_switch_{}'.format(len(list(self.__switchers_cache.keys())))
func = Function(name, 'int f(void)')
# todo: Incorrect file
func.definition_file = self._cmodel.entry_file
# Generate switch body
code = list()
code.append('switch (ldv_undef_int()) {')
for index in range(len(states)):
code.append('\tcase {}: '.format(index) + '{')
code.append('\t\treturn {};'.format(states[index]))
code.append('\t\tbreak;')
code.append('\t}')
code.append('\tdefault: ldv_assume(0);')
code.append('}')
func.body.extend(code)
# Add function
self._cmodel.add_function_definition(func)
invoke = '{}()'.format(name)
self.__switchers_cache[key] = {
'call': invoke,
'function': func
}
return invoke
def __set_initial_state(self, automaton):
body = list()
body.append('/* Initialize initial state of automaton {!r} with process {!r} of category {!r} */'.
format(automaton.identifier, automaton.process.name, automaton.process.category))
body.extend(initialize_automaton_variables(self._conf, automaton))
initial_states = sorted(list(automaton.fsa.initial_states), key=lambda s: s.identifier)
if len(initial_states) == 1:
body.append('{} = {};'.format(self.__state_variable(automaton).name, initial_states[0].identifier))
elif len(initial_states) == 2:
body.extend([
'if (ldv_undef_int())',
'\t{} = {};'.format(self.__state_variable(automaton).name, initial_states[0].identifier),
'else',
'\t{} = {};'.format(self.__state_variable(automaton).name, initial_states[1].identifier),
])
elif len(initial_states) > 2:
body.append('switch (ldv_undef_int()) {')
for index in range(len(initial_states)):
body.append('\tcase {}: '.format(index) + '{')
body.append('\t\t{} = {};'.format(self.__state_variable(automaton).name,
initial_states[index].identifier))
body.append('\t\tbreak;'.format(self.__state_variable(automaton).name,
initial_states[index].identifier))
body.append('\t}')
body.append('\tdefault: ldv_assume(0);')
body.append('}')
return body
|
#!/bin/bash
expMatrixToBarchartBed 2> /dev/null || [[ $? == 1 ]]
|
"""
Develop a voice recognition app
"""
# imports
import speech_recognition as sr
# initialize the recognizer
r = sr.Recognizer()
# set the target language
r.langauge = 'en_US'
# set the device platform
platform = 'Android'
# set the minimum confidence
r.energy_threshold = 4000
# start the listening process
with sr.Microphone() as source:
print("Speak now: ")
audio = r.listen(source, phrase_time_limit=5)
# recognize the speech
res = r.recognize_google(audio, show_all=True, platform=platform)
# print the result
for result in res['alternative']:
print(result['transcript']) |
<gh_stars>0
export function cl(...classes) {
return classes.filter(x => x).join(" ");
}
|
SELECT *
FROM table_name
WHERE name LIKE '%John%'
AND age > 25; |
#!/usr/bin/env bash
set -ex
case $1 in
"dev")
echo "Building local image and mounting codebase..."
chmod -R 777 ../wordpress ../api ../pythonapp ../app1 ../app2
docker build --no-cache -t tippexs/nginxspace:latest .
docker-compose up -d
;;
"no-build")
docker-compose up -d
;;
"stop")
echo "Stoping Services"
docker-compose down
docker rm --force dev_wordpress_1
;;
esac
|
<reponame>ansell/pipelines
package org.gbif.pipelines.fragmenter.strategy;
import java.io.IOException;
import java.nio.file.Path;
import java.time.Instant;
import java.util.Collections;
import java.util.List;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import org.gbif.dwc.DwcFiles;
import org.gbif.dwc.record.Record;
import org.gbif.dwc.record.StarRecord;
import org.gbif.dwc.terms.DwcTerm;
import org.gbif.pipelines.fragmenter.common.StarRecordCopy;
import org.gbif.pipelines.fragmenter.record.DwcaExtensionOccurrenceRecord;
import org.gbif.pipelines.fragmenter.record.DwcaOccurrenceRecord;
import org.gbif.pipelines.fragmenter.record.OccurrenceRecord;
import org.gbif.utils.file.ClosableIterator;
import lombok.NoArgsConstructor;
import lombok.SneakyThrows;
/**
* Processing strategy for DWCA archives
*/
@NoArgsConstructor(staticName = "create")
public class DwcaStrategy implements Strategy {
@SneakyThrows
@Override
public void process(Path path, Consumer<OccurrenceRecord> pushRecordFn) {
try (ClosableIterator<StarRecord> starRecordIterator = readDwca(path)) {
while (starRecordIterator.hasNext()) {
StarRecord starRecord = StarRecordCopy.create(starRecordIterator.next());
convertToOccurrenceRecords(starRecord).forEach(pushRecordFn);
}
}
}
private ClosableIterator<StarRecord> readDwca(Path path) throws IOException {
if (path.toString().endsWith(".dwca")) {
Path tmp = path.getParent().resolve("tmp" + Instant.now().toEpochMilli());
return DwcFiles.fromCompressed(path, tmp).iterator();
} else {
return DwcFiles.fromLocation(path).iterator();
}
}
private List<OccurrenceRecord> convertToOccurrenceRecords(StarRecord starRecord) {
List<Record> records = starRecord.extension(DwcTerm.Occurrence);
if (records == null || records.isEmpty()) {
return Collections.singletonList(DwcaOccurrenceRecord.create(starRecord));
} else {
return records.stream()
.map(r -> DwcaExtensionOccurrenceRecord.create(starRecord.core(), r))
.collect(Collectors.toList());
}
}
}
|
export interface IAuthOptions {
locale: string,
}
export interface IError {
code: string,
customData: object,
name: string
}
export function useAuth(options: IAuthOptions): {
user: object;
error: IError;
isBusy: boolean;
locale: string;
signInAnonymously: () => void;
signInWithEmailAndPassword: (email: string, password: string) => void;
signInWithGoogle: (scopes: Array<string>, options: IAuthOptions) => void;
signInWithFacebook: (scopes: Array<string>, options: IAuthOptions) => void;
signInWithTwitter: (scopes: Array<string>, options: IAuthOptions) => void;
signInWithGithub: (scopes: Array<string>, options: IAuthOptions) => void;
signInWithMicrosoft: (scopes: Array<string>, options: IAuthOptions) => void;
signInWithApple: (scopes: Array<string>, options: IAuthOptions) => void;
signInWithYahoo: (scopes: Array<string>, options: IAuthOptions) => void;
signOut: () => void;
}; |
<gh_stars>100-1000
from aiogram.utils.helper import Helper, HelperMode, ListItem
class TestStates(Helper):
mode = HelperMode.snake_case
TEST_STATE_0 = ListItem()
TEST_STATE_1 = ListItem()
TEST_STATE_2 = ListItem()
TEST_STATE_3 = ListItem()
TEST_STATE_4 = ListItem()
TEST_STATE_5 = ListItem()
if __name__ == '__main__':
print(TestStates.all())
|
import requests
from django.core.files.base import ContentFile
from django.core.management.base import BaseCommand
import os
class Command(BaseCommand):
help = 'Download and save an image from a RESTful API'
def handle(self, *args, **options):
api_url = 'https://example.com/api/image' # Replace with the actual API endpoint
output_file_path = 'downloaded_image.jpg' # Specify the file path for the downloaded image
response = requests.get(api_url)
if response.status_code == 200:
with open(output_file_path, 'wb') as f:
f.write(response.content)
self.stdout.write(self.style.SUCCESS(f"Image downloaded and saved at {os.path.abspath(output_file_path)}"))
else:
self.stdout.write(self.style.ERROR("Failed to download the image")) |
def xor_encrypt(message, key):
encrypted = ""
for i in range(len(message)):
encrypted += chr(ord(message[i]) ^ ord(key[i % len(key)]))
return encrypted
message = 'secretmessage'
key = 'iwonttellyou'
encrypted = xor_encrypt(message, key)
print(encrypted) # U5m3pD1DQyN1UB1WAo9VA== |
<filename>React/react-projects/23-quiz/setup/src/SetupForm.js
import React from 'react'
import { useGlobalContext } from './context'
const SetupForm = () => {
return <h2>setup form</h2>
}
export default SetupForm
|
#!/bin/bash -e
##-------------------------------------------------------------------
## File : nh_backup.sh $service_name
## Author : Bill <billn@naehas.com>
## Description :
## --
## Created : <2014-07-30>
## Updated: Time-stamp: <2014-09-24 09:20:08>
##-------------------------------------------------------------------
# Backup critical data for nh system:
<<<<<<< HEAD
<<<<<<< HEAD
#
set -x
=======
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
# nh_backup.sh nagios
# nh_backup.sh neo4j
# nh_backup.sh all
#
<<<<<<< HEAD
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
. /usr/lib/nh_devops_lib.sh
####################################################################
function prodmergemaster()
{
log "Backup prodmergemaster"
src_dir=/export/data1/
dst_dir=/data2/mergemaster
<<<<<<< HEAD
<<<<<<< HEAD
loginaccount
rsync -azvt naehas@prodmergemaster4.naehas.com:/export/data1/ /data2/mergemaster
=======
rsync -azvt /export/data1/ db8:/data2/mergemaster
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
rsync -azvt /export/data1/ db8:/data2/mergemaster
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
email_check_status
}
function drmergemaster()
{
log "Backup drmergemaster"
<<<<<<< HEAD
<<<<<<< HEAD
src_dir=/data2/mergemaster
dst_dir=/data/mergemaster
loginaccount
rsync -azvt $src_dir/ naehas@drbuild1.naehas.com:$dst_dir
email_check_status
}
function prodcompart()
=======
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
src_dir=/data2/mergemaster/
dst_dir=/data/mergemaster
rsync -azvt $src_dir naehas@172.16.111.223:$dst_dir
email_check_status
}
function compart()
<<<<<<< HEAD
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
{
log "Backup compart-n"
src_dir=/export/data1/
dst_dir=/data2/compart
# TODO
<<<<<<< HEAD
<<<<<<< HEAD
loginaccount
rsync -azvt naehas@compart-n.naehas.com:$src_dir/ $dst_dir
=======
rsync -azvt /export/data1/ db8:/data2/compart
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
rsync -azvt /export/data1/ db8:/data2/compart
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
email_check_status
}
function drcompart()
{
log "Backup drcompart-n"
<<<<<<< HEAD
<<<<<<< HEAD
src_dir=/data2/compart
dst_dir=/data/compart
# TODO
loginaccount
rsync -azvt $src_dir/ naehas@drbuild1.naehas.com:$dst_dir
=======
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
src_dir=/data2/compart/
dst_dir=/data/compart
# TODO
rsync -azvt $src_dir naehas@172.16.111.223:$dst_dir
<<<<<<< HEAD
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
email_check_status
}
function proddashboards()
{
log "Backup proddashboards"
<<<<<<< HEAD
<<<<<<< HEAD
src_dir=/usr/java
dst_dir=/data2/proddashboards
exclude="--exclude logs"
# TODO
loginaccount
# rsync -azvt naehas@web3.naehas.com:$src_dir/ /data2/proddashboards
/usr/bin/rsync --progress -avSHPrt --delete naehas@prodwebe1.naehas.com:$src_dir/ /data2/proddashboards
/usr/bin/rsync --progress -avSHPrt --delete naehas@prodwebf1.naehas.com:$src_dir/ /data2/proddashboards
/usr/bin/rsync --progress -avSHPrt --delete naehas@prodwebg1.naehas.com:$src_dir/ /data2/proddashboards
/usr/bin/rsync --progress -avSHPrt --delete naehas@prodwebh1.naehas.com:$src_dir/ /data2/proddashboards
=======
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
src_dir=/usr/java/
dst_dir=/data2/proddashboards
# TODO
rsync -azvt /export/data1/ db8:/data2/proddashboards
<<<<<<< HEAD
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
email_check_status
}
function drdashboards()
{
loghistory="/data2/logs/drdashboards.log"
log "Backup drdashboards start" >> $loghistory
<<<<<<< HEAD
<<<<<<< HEAD
src_dir="/data2/proddashboards"
dst_dir="/usr/java"
loginaccount
rsync -azvt $src_dir/ naehas@drweb1.naehas.com:$dst_dir
=======
src_dir="/data2/proddashboards/"
dst_dir="/usr/java"
rsync -azvt $srv_dir naehas@drweb2:$dst_dir
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
src_dir="/data2/proddashboards/"
dst_dir="/usr/java"
rsync -azvt $srv_dir naehas@drweb2:$dst_dir
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
log "Backup drdashboards completed" >> $loghistory
email_check_status
}
<<<<<<< HEAD
<<<<<<< HEAD
function prodhaproxy()
{
log "Backup prodhaproxy"
src_dir=/etc/haproxy
dst_dir=/data2/haproxy
exclude="--exclude logs"
loginaccount
/usr/bin/rsync --progress -avSHPrt --delete naehas@prodlbf1.naehas.com:$src_dir/ $dst_dir
email_check_status
}
function prodstatic()
{
log "Backup prodstatic"
src_dir=/var/www/html
dst_dir=/data2/html
exclude="--exclude logs"
loginaccount
/usr/bin/rsync --progress -avSHPrt --delete naehas@prodlbf1.naehas.com:$src_dir/ $dst_dir
email_check_status
}
function prodha()
{
log "Backup prodha"
src_dir=/etc/ha.d
dst_dir=/data2/ha
exclude="--exclude logs"
loginaccount
/usr/bin/rsync --progress -avSHPrt --delete naehas@prodlbf1.naehas.com:$src_dir/ $dst_dir
email_check_status
}
function drhaproxy()
{
loghistory="/data2/logs/drhaproxy.log"
log "Backup drdashboards starts" >> $loghistory
src_dir="/data2/haproxy"
dst_dir="/etc/haproxy"
scp -r -i /home/ubuntu/.ssh/id_rsa $src_dir/ ubuntu@drhaproxy1.naehas.com:$dst_dir
log "Backup drhaproxy completed" >> $loghistory
email_check_status
}
function drstatic()
{
loghistory="/data2/logs/drstatic.log"
log "Backup drdashboards starts" >> $loghistory
src_dir=/data2/html
dst_dir=/var/www/html
loginaccount
/usr/bin/rsync --progress -avSHPrt --delete $src_dir/ naehas@drhaproxy1.naehas.com:$dst_dir
log "Backup drstatic completed" >> $loghistory
email_check_status
}
function drha()
{
loghistory="/data2/logs/drha.log"
log "Backup drdashboards starts" >> $loghistory
src_dir=/data2/ha
dst_dir=/etc/ha.d
scp -r -i /home/ubuntu/.ssh/id_rsa $src_dir/ ubuntu@drhaproxy1.naehas.com:$dst_dir
log "Backup drha completed" >> $loghistory
email_check_status
}
function prodnaehas()
{
log "Backup proddashboards"
src_dir=/home/naehas
dst_dir=/data2/prodnaehas
# TODO
loginaccount
rsync -azvt naehas@web3.naehas.com:$src_dir/ /data2/prodnaehas
email_check_status
}
function drnaehas()
{
loghistory="/data2/logs/drdashboards.log"
log "Backup drdashboards start" >> $loghistory
src_dir="/data2/prodnaehas"
dst_dir="/home/naehas"
loginaccount
rsync -azvt $src_dir/ naehas@drweb1.naehas.com:
log "Backup drdashboards completed" >> $loghistory
email_check_status
}
function prodadapters()
{
log "Backup adapters"
src_dir=/usr/java
dst_dir=/data2/prodadapters
# TODO
loginaccount
rsync -azvt naehas@prodintg1:/usr/java/ /data2/prodadapters
=======
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
function adapters()
{
log "Backup adapters"
src_dir=/export/data1/
dst_dir=/data2/adapters
# TODO
rsync -azvt /export/data1/ db8:/data2/adapters
<<<<<<< HEAD
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
email_check_status
}
function dradapters()
{
service="$1"
log "Backup adapters"
<<<<<<< HEAD
<<<<<<< HEAD
src_dir=/data2/prodadapters
dst_dir=/data/adapters
# TODO
loginaccount
rsync -azvt $src_dir/ naehas@drbuild1.naehas.com:$dst_dir
echo $service_name
email_check_status
}
function prodsvn()
=======
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
src_dir=/data2/adapters/
dst_dir=/data/adapters
# TODO
rsync -azvt $src_dir naehas@172.16.111.223:$dst_dir
echo $service_name
email_check_status
}
function svn()
<<<<<<< HEAD
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
{
log "Backup svn"
src_dir=/etc
dst_dir=/data2/svn
# TODO
<<<<<<< HEAD
<<<<<<< HEAD
loginaccount
rsync -PrltDvO root@svn.naehas.com:$src_dir/ $dst_dir
=======
rsync -PrltDvO /etc db8:/data2/svn
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
rsync -PrltDvO /etc db8:/data2/svn
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
email_check_status
}
function drsvn()
{
log "Backup drsvn"
<<<<<<< HEAD
<<<<<<< HEAD
src_dir=/data2/svn
dst_dir=/data/svn
# TODO
loginaccount
rsync -PrltDvO $src_dir/ naehas@drbuild1.naehas.com:$dst_dir
email_check_status
}
function prodbuild()
=======
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
src_dir=/data2/svn/
dst_dir=/data/svn
# TODO
rsync -PrltDvO $src_dir naehas@172.16.111.223:$dst_dir
email_check_status
}
function build()
<<<<<<< HEAD
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
{
log "Backup build"
src_dir=/backup/hudson/.hudson
dst_dir=/data2/build
# TODO
<<<<<<< HEAD
<<<<<<< HEAD
loginaccount
rsync -avzt --exclude builds --exclude logs --exclude jobs_backup naehas@dev.naehas.com:$src_dir $dst_dir
=======
rsync -avzt --exclude builds --exclude logs --exclude jobs_backup $src_dir db8:$dst_dir
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
rsync -avzt --exclude builds --exclude logs --exclude jobs_backup $src_dir db8:$dst_dir
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
email_check_status
}
function drbuild()
{
log "Backup drbuild"
src_dir="/data2/build/.hudson/"
dst_dir="/data/hudson"
# TODO
<<<<<<< HEAD
<<<<<<< HEAD
loginaccount
rsync -avzt --exclude builds --exclude logs --exclude jobs_backup $src_dir naehas@drbuild1.naehas.com:$dst_dir
email_check_status
}
function prodjira()
=======
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
rsync -avzt --exclude builds --exclude logs --exclude jobs_backup $src_dir naehas@172.16.126.63:$dst_dir
email_check_status
}
function jira()
<<<<<<< HEAD
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
{
log "Backup jira"
src_dir=/home/naehas/jira_424-data/
dst_dir=/data2/jira
# TODO
<<<<<<< HEAD
<<<<<<< HEAD
loginaccount
rsync -azvt naehas@jira-confluence.naehas.com:/home/naehas/jira_424-data/ $dst_dir/data
rsync -azvt naehas@jira-confluence.naehas.com:/export/data2/java/atlassian-jira-4.3.4-standalone/ $dst_dir/installation/
=======
rsync -azvt /home/naehas/jira_424-data/ db8:$dst_dir/data
rsync -azvt /export/data2/java/atlassian-jira-4.3.4-standalone/ db8:$dst_dir/installation/
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
rsync -azvt /home/naehas/jira_424-data/ db8:$dst_dir/data
rsync -azvt /export/data2/java/atlassian-jira-4.3.4-standalone/ db8:$dst_dir/installation/
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
email_check_status
}
function drjira()
{
log "Backup drjira"
<<<<<<< HEAD
<<<<<<< HEAD
src_dir="/data2/jira"
dst_dir="/data/jira"
# TODO
loginaccount
rsync -azvt $src_dir/ naehas@drbuild1.naehas.com:$dst_dir
email_check_status
}
function prodconfluence()
{
log "Backup confluence"
src_dir=/home/naehas/confluence-data
dst_dir=/data2/confluence
# TODO
loginaccount
rsync -azvt naehas@jira-confluence.naehas.com:$src_dir/ $dst_dir/data
email_check_status
#rsync -azvt /export/data2/java/confluence-3.5.7-std/ $dst_dir/installation
=======
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
src_dir="/data2/jira/"
dst_dir="/data/jira"
# TODO
rsync -azvt $src_dir naehas@172.16.126.63:$dst_dir
email_check_status
}
function confluence()
{
log "Backup confluence"
src_dir=/data2/confluence/
dst_dir=/data/confluence
# TODO
rsync -azvt /home/naehas/confluence-data/ $dst_dir/data
email_check_status
rsync -azvt /export/data2/java/confluence-3.5.7-std/ $dst_dir/installation
<<<<<<< HEAD
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
email_check_status
}
function drconfluence()
{
log "Backup drConfluence"
<<<<<<< HEAD
<<<<<<< HEAD
src_dir=/data2/confluence
dst_dir=/data/confluence
# TODO
loginaccount
rsync -azvt $src_dir/ naehas@drbuild1.naehas.com:$dst_dir
email_check_status
}
function prodmergedb()
{
log "Backup prodmergedb"
src_dir=/data/backup
dst_dir=/data2/mergedb
backupmergedb="mysqldump -unaehas -p1234rty7890 --all-databases |gzip -9 > /data/backup/prodmergedb3-all-databases_`date +%F-%H-%M`.sql.gz"
# TODO
loginaccount
#ssh root@prodmergedb3.naehas.com -C "$backupmergedb"
rsync -azvt root@prodmergedb3.naehas.com:$src_dir/ $dst_dir
ssh root@prodmergedb3.naehas.com -C "rm -rf $src_dir/*"
email_check_status
#rsync -azvt /export/data2/java/confluence-3.5.7-std/ $dst_dir/installation
email_check_status
}
function drmergedb()
{
log "Backup drmergedb"
src_dir=/data2/mergedb
dst_dir=/root/backup
# TODO
loginaccount
rsync -azvt $src_dir/ root@drdb4.naehas.com:$dst_dir/
email_check_status
}
function loginaccount
{
cd
eval `ssh-agent`
ssh-add ~/.au/id_rsa
}
=======
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
src_dir=/data2/confluence/
dst_dir=/data/confluence
# TODO
rsync -azvt $src_dir naehas@172.16.126.63:$dst_dir
email_check_status
}
<<<<<<< HEAD
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
function email_check_status()
{
echo "From email function - $service_name"
if [ $? -eq 0 ]; then
mail -s "Backup $service_name status - SUCCESSFUL" billn@naehas.com < /dev/null
elif [ $? -eq 1 ]; then
mail -s "Backup $service_name status - FAILED" billn@naehas.com < /dev/null
fi;
}
<<<<<<< HEAD
<<<<<<< HEAD
function perfweb2dashboards()
{
log "Backup perfweb2dashboards"
src_dir=/usr/java
dst_dir=/data7/perfweb2
# TODO
loginaccount
rsync -azvt naehas@perfweb2.naehas.com:$src_dir/ $dst_dir
email_check_status
}
function perfweb1dashboards()
{
log "Backup perfweb1dashboards"
src_dir=/usr/java
dst_dir=/data7/perfweb1
# TODO
loginaccount
rsync -azvt naehas@perfweb1.naehas.com:$src_dir/ $dst_dir
email_check_status
}
function drall()
{
drcompart
drdashboards
drmergemaster
drjira
drsvn
drconfluence
dradapters
drdashboards
drmergedb
drstatic
mail_check_status
exit 1
}
function prodall()
{
prodcompart
prodmergemaster
proddashboards
prodnaehas
prodadapters
prodsvn
prodjira
prodconfluence
prodmergedb
prodstatic
mail_check_status
=======
function drall()
{
mail_check_status
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
function drall()
{
mail_check_status
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
exit 1
}
# Action performed
service_name=${1?}
case $service_name in
<<<<<<< HEAD
<<<<<<< HEAD
"prodcompart")
prodcompart
=======
"compart")
compart
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
"compart")
compart
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
;;
"drcompart")
drcompart
;;
"prodmergemaster")
prodmergemaster
;;
"drmergemaster")
drmergemaster
;;
"proddashboards")
proddashboards
;;
"drdashboards")
drdashboards
;;
<<<<<<< HEAD
<<<<<<< HEAD
"prodnaehas")
prodnaehas
;;
"drnaehas")
drnaehas
;;
"prodadapters")
prodadapters
=======
"adapters")
adapters
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
"adapters")
adapters
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
;;
"dradapters")
dradapters
;;
<<<<<<< HEAD
<<<<<<< HEAD
"prodsvn")
prodsvn
=======
"svn")
svn
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
"svn")
svn
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
;;
"drsvn")
drsvn
;;
<<<<<<< HEAD
<<<<<<< HEAD
"prodbuild")
prodbuild
=======
"build")
build
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
"build")
build
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
;;
"drbuild")
drbuild
;;
<<<<<<< HEAD
<<<<<<< HEAD
"prodjira")
prodjira
=======
"jira")
jira
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
"jira")
jira
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
;;
"drjira")
drjira
;;
<<<<<<< HEAD
<<<<<<< HEAD
"prodconfluence")
prodconfluence
=======
"confluence")
confluence
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
"confluence")
confluence
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
;;
"drconfluence")
drconfluence
;;
<<<<<<< HEAD
<<<<<<< HEAD
"prodmergedb")
prodmergedb
;;
"drmergedb")
drmergedb
;;
"prodhaproxy")
prodhaproxy
;;
"prodstatic")
prodstatic
;;
"prodha")
prodha
;;
"drhaproxy")
drhaproxy
;;
"drstatic")
drstatic
;;
"drha")
drha
;;
"perfweb1dashboards")
perfweb1dashboards
;;
"perfweb2dashboards")
perfweb2dashboards
;;
"all")
prodall
drall
=======
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
"drall")
drdashboards
dradapters
drcompart
drmergemaster
drsvn
drbuild
drjira
drconfluence
<<<<<<< HEAD
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
=======
>>>>>>> 4a4eaaa47f616fdfc5699327b8fd1f321bdb02b3
;;
*)
echo "ERROR: unsupported service_name($service_name) for backup"
esac
### File : nh_backup.sh ends
|
import { openDB } from 'idb';
const initdb = async () =>{
console.log("Initializing DB...");
// debugger;
openDB('jate', 1, {
upgrade(db) {
if (db.objectStoreNames.contains('jate')) {
console.log('jate database already exists');
return;
}
db.createObjectStore('jate', { keyPath: 'id', autoIncrement: true });
//db.createObjectStore('jate', { keyPath: 1, autoIncrement: true });
console.log('jate database created');
},
});
}
// TODO: Add logic to a method that accepts some content and adds it to the database
export const putDb = async (content) => {
//console.error('putDb not implemented');
const jateDb = await openDB('jate', 1);
const tx = jateDb.transaction('jate', 'readwrite');
const objStore = tx.objectStore('jate');
//const req = objStore.getAll()
const req = objStore.put({ id: 'content', value: content })
const res = await req;
console.log('data saved to the jateDB', res);
};
// TODO: Add logic for a method that gets all the content from the database
export const getDb = async () => {
//console.error('getDb not implemented');
const jateDb = await openDB('jate', 1);
const tx = jateDb.transaction('jate', 'readwrite');
const objStore = tx.objectStore('jate');
//const req = objStore.put({ id: id, value: value })
const req = objStore.getAll()
const res = await req;
console.log('data receieved to the jateDB', res);
};
initdb();
|
<gh_stars>1-10
package client.model;
import lombok.Getter;
import lombok.Setter;
import java.io.Serializable;
import java.util.List;
@Setter
@Getter
public class ComplexModel<T> implements Serializable {
private Integer id;
private Person person;
private List<T> points;
} |
<filename>gulpfile.js
"use strict";
var gulp = require("gulp");
var istanbul = require("gulp-istanbul");
var isparta = require("isparta");
var mocha = require("gulp-mocha");
var eslint = require("gulp-eslint");
var babel = require("gulp-babel");
require("babel-core/register");
var config = {
src: ["src/**/*.js"],
test: ["test/**/*.spec.js"]
};
gulp.task("test", function() {
return gulp.src(config.test)
.pipe(mocha({
useColor: true,
reporter: "spec"
}));
});
gulp.task("test:watch", function() {
gulp.watch(config.test.concat(config.src), ["test"]);
});
gulp.task("lint", function() {
return gulp.src(config.src)
.pipe(eslint())
.pipe(eslint.format())
.pipe(eslint.failAfterError());
});
gulp.task("coverage", function() {
return gulp.src(config.src)
.pipe(istanbul({
instrumenter: isparta.Instrumenter,
includeUntested: true
}))
.pipe(istanbul.hookRequire())
.on("finish", function() {
gulp.src(config.test)
.pipe(mocha())
.pipe(istanbul.writeReports());
});
});
gulp.task("build", function () {
return gulp.src(config.src)
.pipe(babel())
.pipe(gulp.dest("dist"));
});
gulp.task("default", ["coverage"]);
|
const fs = require('fs');
const path = require('path');
// node generator.js ./shnm8x16r.bdf
const type = 'uint8_t';
const data_element_length = ({
uint8_t: 2,
uint16_t: 4,
uint32_t: 8,
uint64_t: 16,
})[type];
process.argv.filter((_, i) => i >= 2).forEach(arg => {
const char_data = fs.readFileSync(arg).toString().split('STARTCHAR').filter(x => {
const i = x.indexOf('ENCODING');
const code = parseInt(x.substring(i + 8, x.indexOf('\n', i + 8)));
return code >= 32 && code < (32 + 128);
}).map(x => {
const i = x.indexOf('ENCODING');
const code = parseInt(x.substring(i + 8, x.indexOf('\n', i + 8))) - 32;
const data = x.substring(x.indexOf('BITMAP') + 7, x.indexOf('ENDCHAR') - 1)
.split('\n')
.map(line => line.match(new RegExp(`.{${data_element_length}}`, 'g')).map(n => parseInt(n, 16)));
return { code, data };
});
const font = Array(128).fill(0).map((_, i) => {
return char_data.find(({ code }) => code == i) || ({ code: i, data: false });
});
const size = { width: 8, height: 16 };
const name = path.basename(arg, '.bdf');
const fd = fs.openSync(path.join(__dirname, '..', 'src', name + '.hpp'), 'w');
fs.writeSync(fd, `#pragma once
#include "font.hpp"
namespace Font {
class ${name} : public FontBase {
public:
${name}();
private:
static ${type} empty[];
${char_data.map(x => `\tstatic ${type} ${name}_${x.code}[];`).join('\n')}
};
${type} ${name}::empty[] = {${Array(size.width * size.height / 8).fill(0).join(',')}};
${char_data.filter(x => x.data).map(x => `${type} ${name}::${name}_${x.code}[] = {${x.data.map(line => line.join(', ')).join(',')}};`).join('\n')}
${name}::${name}() : FontBase() {
width = ${size.width};
height = ${size.height};
${font.map(x => `\t${name}::font[${x.code}] = ${x.data ? `${name}::${name}_${x.code}` : 'empty'};`).join('\n')}
}
}
`);
console.log(`Success generate ${name}.hpp`);
});
|
/*
* Copyright 2018-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.filippov.data.validation.tool.datasource;
import com.filippov.data.validation.tool.model.Datasource;
import com.filippov.data.validation.tool.model.DatasourceConfig;
import com.filippov.data.validation.tool.model.datasource.ColumnData;
import com.filippov.data.validation.tool.model.datasource.DatasourceMetadata;
import com.filippov.data.validation.tool.model.datasource.DatasourceQuery;
import com.filippov.data.validation.tool.model.datastorage.Query;
import com.filippov.data.validation.tool.model.datastorage.RelationType;
import lombok.SneakyThrows;
public class HttpDatasource implements Datasource {
private final HttpDatasourceConfig config;
private final HttpDatasourceConnector connector;
public HttpDatasource(HttpDatasourceConfig config, HttpDatasourceConnector connector) {
this.config = config;
this.connector = connector;
}
@Override
public DatasourceConfig getConfig() {
return config;
}
@Override
@SneakyThrows
public DatasourceMetadata getMetadata() {
return connector.getMetadata();
}
@Override
@SneakyThrows
public ColumnData getColumnData(DatasourceQuery query) {
// TODO: implement batching
return connector.getColumnData(query, 0, connector.getSize(query));
}
@Override
public DatasourceQuery toDatasourceQuery(Query query, RelationType relationType) {
return DatasourceQuery.builder()
.table(query.getTablePair().getDatasourceTableFor(relationType))
.keyColumn(query.getTablePair().getKeyColumnPair().getColumnFor(relationType))
.dataColumn(query.getColumnPair().getColumnFor(relationType))
.build();
}
public String toString() {
return "HttpDatasource(datasourceConfig=" + this.config + ")";
}
}
|
# import necessary packages
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import to_categorical
# Define model
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
# Compile the model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) |
<filename>src/connect.ts
import type { Adapter, ConnectionOptions } from "./adapters/adapter.ts";
import { joinPath } from "../deps.ts";
import { MysqlAdapter } from "./adapters/mysql.ts";
import { PostgresAdapter } from "./adapters/postgres.ts";
import { SqliteAdapter } from "./adapters/sqlite.ts";
import { BaseModel, ObjectType } from "./basemodel.ts";
export type DatabaseDialect = "mysql" | "postgres" | "sqlite";
interface ConnectionConfig extends ConnectionOptions {
type: DatabaseDialect;
models?: ObjectType<BaseModel>[];
}
/**
* Connect to database and automatically chose the driver
*/
export async function connect(): Promise<Adapter>;
/**
* Connect to database and automatically chose the driver
*
* @param options Connection options
*/
export async function connect(options: ConnectionConfig): Promise<Adapter>;
/**
* Connect to database and automatically chose the driver
*
* @param filePath Path to the database configuration file (default: "ormconfig.json")
*/
export async function connect(filePath: string): Promise<Adapter>;
/**
* Connect to database and automatically chose the driver
*
* @param options Connection options
*/
export async function connect(
options?: ConnectionConfig | string,
): Promise<Adapter> {
let adapter: Adapter;
let connectionOptions: ConnectionConfig;
// If connections options is not provided, look up for "ormconfig.json" file.
if (!options || typeof options === "string") {
try {
const path = joinPath(Deno.cwd(), options ? options : "./ormconfig.json");
const decoder = new TextDecoder("utf-8");
const result = decoder.decode(await Deno.readFile(path));
// TODO: validate connection options
connectionOptions = JSON.parse(result) as any;
} catch (err) {
if (err instanceof Deno.errors.NotFound) {
throw new Error(
"Cannot connect to database without connection options!",
);
} else {
throw err;
}
}
} else {
connectionOptions = options;
}
switch (connectionOptions.type) {
case "mysql":
adapter = new MysqlAdapter(connectionOptions);
break;
case "postgres":
adapter = new PostgresAdapter(connectionOptions);
break;
case "sqlite":
adapter = new SqliteAdapter(connectionOptions);
break;
default:
throw new Error("Database type invalid!");
}
if (Array.isArray(connectionOptions.models)) {
for (let i = 0; i < connectionOptions.models.length; i++) {
if (connectionOptions.models[i].prototype instanceof BaseModel) {
(connectionOptions.models[i] as any).manager = adapter.getManager();
}
}
}
await adapter.connect();
return adapter;
}
|
package com.honyum.elevatorMan.activity.common;
import android.content.Intent;
import android.os.Bundle;
import android.view.View;
import android.view.ViewGroup;
import android.widget.AdapterView;
import android.widget.BaseAdapter;
import android.widget.ImageView;
import android.widget.ListView;
import android.widget.TextView;
import com.baidu.mapapi.SDKInitializer;
import com.baidu.mapapi.map.BaiduMap;
import com.baidu.mapapi.map.BitmapDescriptor;
import com.baidu.mapapi.map.BitmapDescriptorFactory;
import com.baidu.mapapi.map.MapStatusUpdate;
import com.baidu.mapapi.map.MapStatusUpdateFactory;
import com.baidu.mapapi.map.MapView;
import com.baidu.mapapi.map.Marker;
import com.baidu.mapapi.map.MarkerOptions;
import com.baidu.mapapi.map.OverlayOptions;
import com.baidu.mapapi.model.LatLng;
import com.honyum.elevatorMan.R;
import com.honyum.elevatorMan.activity.property.AddPermanentAddressActivity;
import com.honyum.elevatorMan.base.BaseFragmentActivity;
import com.honyum.elevatorMan.data.PropertyAddressInfo;
import com.honyum.elevatorMan.net.PropertyAddressListRequest;
import com.honyum.elevatorMan.net.PropertyAddressListResponse;
import com.honyum.elevatorMan.net.base.NetConstant;
import com.honyum.elevatorMan.net.base.NetTask;
import com.honyum.elevatorMan.net.base.RequestHead;
import java.util.List;
public class PermanentAddressActivity extends BaseFragmentActivity {
private MapView mapView;
private BaiduMap baiduMap;
private Marker marker;
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
SDKInitializer.initialize(this.getApplicationContext());
setContentView(R.layout.activity_permanent_address);
initTitleBar();
initMapView();
}
@Override
protected void onResume() {
mapView.onResume();
super.onResume();
requestAdd();
}
@Override
protected void onPause() {
mapView.onPause();
super.onPause();
}
@Override
protected void onDestroy() {
mapView.onDestroy();
super.onDestroy();
}
private void requestAdd() {
String server = getConfig().getServer() + NetConstant.GET_PROPERTY_ADDRESS_LIST;
PropertyAddressListRequest request = new PropertyAddressListRequest();
RequestHead head = new RequestHead();
PropertyAddressListRequest.PalReqBody body = request.new PalReqBody();
head.setAccessToken(getConfig().getToken());
head.setUserId(getConfig().getUserId());
body.setBranchId(getConfig().getBranchId());
request.setHead(head);
request.setBody(body);
NetTask netTask = new NetTask(server, request) {
@Override
protected void onResponse(NetTask task, String result) {
PropertyAddressListResponse pal = PropertyAddressListResponse.getPal(result);
if (pal.getBody() == null) {
return;
}
List<PropertyAddressInfo> body = pal.getBody();
initListView(body);
}
};
addTask(netTask);
}
private void initListView(List<PropertyAddressInfo> body) {
ListView listView = (ListView) findViewById(R.id.listView);
final MyAdapter adapter = new MyAdapter(body);
listView.setAdapter(adapter);
listView.setEmptyView(findViewById(R.id.tv_empty_tip));
listView.setOnItemClickListener(new AdapterView.OnItemClickListener() {
@Override
public void onItemClick(AdapterView<?> parent, View view, int position, long id) {
if (adapter.getSelectedItem() == position) {
return;
}
PropertyAddressInfo info = (PropertyAddressInfo) adapter.getItem(position);
if (info.getLat() == 0 || info.getLng() == 0) {
showToast("位置信息不明确");
return;
}
adapter.setSelectedItem(position);
markLocation(info);
}
});
}
private void markLocation(PropertyAddressInfo info) {
LatLng point = new LatLng(info.getLat(), info.getLng());
MapStatusUpdate update = MapStatusUpdateFactory.newLatLngZoom(point, 15);
baiduMap.animateMapStatus(update);
MarkerOptions options = (MarkerOptions) initOptions();
options.position(point);
if (marker == null) {
marker = (Marker) baiduMap.addOverlay(options);
}
marker.setPosition(point);
}
/**
* 返回marker的基本设置
*/
private OverlayOptions initOptions() {
View view = View.inflate(this, R.layout.layout_location_marker, null);
ImageView imgMarker = (ImageView) view.findViewById(R.id.img_marker);
imgMarker.setImageResource(R.drawable.marker_alarm_old);
BitmapDescriptor bitmapDescriptor = BitmapDescriptorFactory
.fromView(view);
return new MarkerOptions().icon(bitmapDescriptor).zIndex(9)
.draggable(false);
}
private void initMapView() {
mapView = (MapView) findViewById(R.id.mapView);
baiduMap = mapView.getMap();
}
private void initTitleBar() {
initTitleBar(R.id.title, "常驻地址",
R.drawable.back_normal, backClickListener,
R.drawable.icon_add, new View.OnClickListener() {
@Override
public void onClick(View v) {
Intent intent = new Intent(PermanentAddressActivity.this,
AddressActivity.class);
intent.putExtra("category","wy");
startActivity(intent);
}
});
}
private class MyAdapter extends BaseAdapter {
private int selectedItem = -1;
private List<PropertyAddressInfo> propertyAddressInfos;
public MyAdapter(List<PropertyAddressInfo> propertyAddressInfos) {
this.propertyAddressInfos = propertyAddressInfos;
}
@Override
public int getCount() {
return propertyAddressInfos.size();
}
@Override
public Object getItem(int i) {
return propertyAddressInfos.get(i);
}
@Override
public long getItemId(int i) {
return i;
}
@Override
public View getView(int position, View view, ViewGroup viewGroup) {
if (view == null) {
view = View.inflate(PermanentAddressActivity.this, R.layout.layout_list_text1_item, null);
}
PropertyAddressInfo info = propertyAddressInfos.get(position);
if (position == selectedItem) {
view.setBackgroundColor(getResources().getColor(R.color.grey));
} else {
view.setBackgroundColor(getResources().getColor(R.color.transparent));
}
TextView tvTexT = (TextView) view.findViewById(R.id.tv_text);
tvTexT.setText(info.getAddress());
return view;
}
int getSelectedItem() {
return selectedItem;
}
void setSelectedItem(int selectedItem) {
this.selectedItem = selectedItem;
notifyDataSetChanged();
}
}
}
|
# This is a shell script that calls functions and scripts from
# tml@iki.fi's personal work environment. It is not expected to be
# usable unmodified by others, and is included only for reference.
MOD=atk
VER=1.32.0
REV=1
ARCH=win32
THIS=${MOD}_${VER}-${REV}_${ARCH}
RUNZIP=${MOD}_${VER}-${REV}_${ARCH}.zip
DEVZIP=${MOD}-dev_${VER}-${REV}_${ARCH}.zip
HEX=`echo $THIS | md5sum | cut -d' ' -f1`
TARGET=c:/devel/target/$HEX
usedev
usemsvs6
(
set -x
DEPS=`latest --arch=${ARCH} zlib gettext-runtime glib`
GETTEXT_RUNTIME=`latest --arch=${ARCH} gettext-runtime`
for D in $DEPS; do
PATH="/devel/dist/${ARCH}/$D/bin:$PATH"
PKG_CONFIG_PATH=/devel/dist/${ARCH}/$D/lib/pkgconfig:$PKG_CONFIG_PATH
done
lt_cv_deplibs_check_method='pass_all' \
CC='gcc -mtune=pentium3 -mthreads' \
CPPFLAGS="-I/devel/dist/${ARCH}/${GETTEXT_RUNTIME}/include" \
LDFLAGS="-L/devel/dist/${ARCH}/${GETTEXT_RUNTIME}/lib \
-Wl,--enable-auto-image-base" \
CFLAGS=-O2 \
./configure --disable-gtk-doc --disable-static --prefix=c:/devel/target/$HEX
(cd atk; make atkmarshal.h atkmarshal.c) &&
PATH=/devel/target/$HEX/bin:.libs:$PATH make install &&
./atk-zip.sh &&
mv /tmp/${MOD}-${VER}.zip /tmp/$RUNZIP &&
mv /tmp/${MOD}-dev-${VER}.zip /tmp/$DEVZIP
) 2>&1 | tee /devel/src/tml/packaging/$THIS.log
(cd /devel && zip /tmp/$DEVZIP src/tml/packaging/$THIS.{sh,log}) &&
manifestify /tmp/$RUNZIP /tmp/$DEVZIP
|
<reponame>soberich/spanner-hibernate<gh_stars>1-10
package nl.topicus.hibernate.dialect;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.hibernate.boot.Metadata;
import org.hibernate.dialect.unique.DefaultUniqueDelegate;
import org.hibernate.engine.config.spi.ConfigurationService;
import org.hibernate.engine.config.spi.StandardConverters;
import org.hibernate.mapping.UniqueKey;
public class CloudSpannerUniqueDelegate extends DefaultUniqueDelegate
{
private static final class UniqueIndex
{
private String name;
private String table;
private Set<String> columns = new HashSet<>();
private UniqueIndex(String name, String table, String column)
{
this.name = name;
this.table = table;
this.columns.add(column);
}
@Override
public boolean equals(Object other)
{
if (!(other instanceof UniqueIndex))
return false;
return ((UniqueIndex) other).name.equals(name);
}
@Override
public int hashCode()
{
return name.hashCode();
}
}
private static final class UniqueIndices
{
private Map<String, UniqueIndex> map = new HashMap<>();
public void addIndexedColumn(String indexName, String table, String column)
{
String key = table + "." + indexName;
UniqueIndex idx = map.get(key);
if (idx == null)
{
idx = new UniqueIndex(indexName, table, column.toUpperCase());
map.put(key, idx);
}
else
{
idx.columns.add(column.toUpperCase());
}
}
public UniqueIndex getIndex(UniqueKey uniqueKey)
{
for (UniqueIndex idx : map.values())
{
if (idx.table.equalsIgnoreCase(uniqueKey.getTable().getName()))
{
List<String> cols = uniqueKey.getColumns().stream().map(x -> x.getName().toUpperCase())
.collect(Collectors.toList());
if (idx.columns.containsAll(cols) && cols.containsAll(idx.columns))
{
return idx;
}
}
}
return null;
}
public void removeIndex(UniqueKey uniqueKey)
{
String key = uniqueKey.getTable().getName() + "." + uniqueKey.getName();
map.remove(key);
}
}
private UniqueIndices indices;
public CloudSpannerUniqueDelegate(CloudSpannerDialect dialect)
{
super(dialect);
}
private void initIndices()
{
if (indices == null)
{
DatabaseMetaData dbMetadata = ((CloudSpannerDialect) this.dialect).getMetadata();
if (dbMetadata != null)
{
indices = new UniqueIndices();
try (ResultSet rs = dbMetadata.getIndexInfo("", "", null, true, false))
{
while (rs.next())
{
String indexName = rs.getString("INDEX_NAME");
String tableName = rs.getString("TABLE_NAME");
String column = rs.getString("COLUMN_NAME");
indices.addIndexedColumn(indexName, tableName, column);
}
}
catch (SQLException e)
{
// unable to access index info
}
}
}
}
@Override
public String getAlterTableToAddUniqueKeyCommand(UniqueKey uniqueKey, Metadata metadata)
{
ConfigurationService config = metadata.getDatabase().getBuildingOptions().getServiceRegistry()
.getService(ConfigurationService.class);
if (config != null)
{
String value = config.getSetting("hibernate.hbm2ddl.auto", StandardConverters.STRING);
if (!value.equalsIgnoreCase("update"))
{
// We should only check whether it is already present in an
// update scenario, in all other scenarios, just return the
// actual create statement.
return org.hibernate.mapping.Index.buildSqlCreateIndexString(dialect, uniqueKey.getName(),
uniqueKey.getTable(), uniqueKey.columnIterator(), uniqueKey.getColumnOrderMap(), true,
metadata);
}
}
// First check that this unique key is not already present, as this is a
// lot faster than trying to create it and then fail.
initIndices();
UniqueIndex idx = indices.getIndex(uniqueKey);
if (idx != null)
{
return null;
}
return org.hibernate.mapping.Index.buildSqlCreateIndexString(dialect, uniqueKey.getName(), uniqueKey.getTable(),
uniqueKey.columnIterator(), uniqueKey.getColumnOrderMap(), true, metadata);
}
@Override
public String getAlterTableToDropUniqueKeyCommand(UniqueKey uniqueKey, Metadata metadata)
{
// First check that this unique key actually is present, as this is a
// lot faster than trying to drop it and then fail.
initIndices();
UniqueIndex idx = indices.getIndex(uniqueKey);
if (idx == null)
{
return null;
}
// Remove from cache
indices.removeIndex(uniqueKey);
final StringBuilder buf = new StringBuilder("DROP INDEX ");
buf.append(dialect.quote(uniqueKey.getName()));
return buf.toString();
}
}
|
#!/bin/bash
# Arguments -v for valgrind
usage()
{
cat <<EOF
usage $0 options
OPTIONS:
-h usage
-v run the tests using valgrind
EOF
}
valgrind=0
while getopts "h:v" OPTION
do
case $OPTION in
h)
usage
exit 1
;;
v)
valgrind=1
;;
esac
done
for i in `find . -name test_\* | grep -v '\.dSYM'`
do
if [ $valgrind -eq 1 ]
then
CMD="valgrind --leak-check=full $i"
echo $CMD
$CMD
else
echo $i
$i
fi
if [ $? != 0 ];
then
echo "$i failed!"
exit 1
fi
done
|
require 'spec_helper'
describe Praxis::ResourceDefinition do
subject(:resource_definition) { PeopleResource }
its(:description) { should eq('People resource') }
its(:media_type) { should eq(Person) }
its(:responses) { should eq(Hash.new) }
its(:version) { should eq('1.0') }
its(:routing_config) { should be_kind_of(Proc) }
its(:params) { should be_nil }
its(:payload) { should be_nil }
its(:headers) { should be_nil }
its(:actions) { should have(2).items }
context '.describe' do
subject(:describe) { resource_definition.describe }
its([:description]) { should eq(resource_definition.description) }
its([:media_type]) { should eq(resource_definition.media_type.name) }
its([:actions]) { should have(2).items }
end
it 'creates ActionDefinitions for actions' do
index = resource_definition.actions[:index]
expect(index).to be_kind_of(Praxis::ActionDefinition)
expect(index.description).to eq("index description")
end
context 'setting other values' do
subject(:resource_definition) { Class.new {include Praxis::ResourceDefinition } }
let(:some_proc) { Proc.new {} }
let(:some_hash) { Hash.new }
it 'accepts a string as media_type' do
resource_definition.media_type('Something')
expect(resource_definition.media_type).to be_kind_of(Praxis::SimpleMediaType)
expect(resource_definition.media_type.identifier).to eq('Something')
end
context 'sets responses' do
before do
resource_definition.response(:some_response)
end
subject(:responses){ resource_definition.responses }
it { should be_kind_of(Hash) }
end
context 'setting params' do
it 'uses the right default values' do
resource_definition.params &some_proc
expect(resource_definition.params[0]).to be(Attributor::Struct)
expect(resource_definition.params[1]).to eq({})
expect(resource_definition.params[2]).to be(some_proc)
end
it 'accepts specific a type and options' do
resource_definition.params Person, required: true
expect(resource_definition.params[0]).to be(Person)
expect(resource_definition.params[1]).to eq({required: true})
expect(resource_definition.params[2]).to be(nil)
end
end
context 'setting payload' do
it 'uses the right default values' do
resource_definition.payload &some_proc
expect(resource_definition.payload[0]).to be(Attributor::Struct)
expect(resource_definition.payload[1]).to eq({})
expect(resource_definition.payload[2]).to be(some_proc)
end
it 'accepts specific a type and options' do
resource_definition.payload Person, required: true
expect(resource_definition.payload[0]).to be(Person)
expect(resource_definition.payload[1]).to eq({required: true})
expect(resource_definition.payload[2]).to be(nil)
end
end
it "sets headers" do
resource_definition.headers(some_hash, &some_proc)
expect(subject.headers[0]).to be(some_hash)
expect(subject.headers[1]).to be(some_proc)
end
end
context '.use' do
subject(:resource_definition) { Class.new {include Praxis::ResourceDefinition } }
it 'raises an error for missing traits' do
expect { resource_definition.use(:stuff) }.to raise_error(Praxis::Exceptions::InvalidTrait)
end
it 'has a spec for actually using a trait'
end
end
|
class DependencyLifetimeEnum:
SESSION = "session"
MODULE = "module"
CLASS = "class"
INSTANCE = "instance"
FUNCTION = "function" |
from typing import List
def count_set_bits(num: int) -> List[int]:
ans = []
for i in range(num+1):
ans.append(bin(i).count('1'))
return ans |
/******************************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2016, <NAME> (<EMAIL>)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
/**
* @file laserscan_kinect.h
* @author <NAME> (<EMAIL>)
* @brief laserscan_kinect package
*/
#pragma once
#include <ros/console.h>
#include <sensor_msgs/Image.h>
#include <sensor_msgs/LaserScan.h>
#include <sensor_msgs/image_encodings.h>
#include <image_geometry/pinhole_camera_model.h>
#include <vector>
#include <string>
constexpr double SCAN_TIME = 1.0 / 30.0;
namespace laserscan_kinect
{
class LaserScanKinect
{
public:
LaserScanKinect(): scan_msg_(new sensor_msgs::LaserScan()) { }
~LaserScanKinect() = default;
/**
* @brief prepareLaserScanMsg converts depthimage and prepare new LaserScan message
*
* @param depth_msg Message that contains depth image which will be converted to LaserScan.
* @param info_msg Message which contains depth sensor parameters.
*
* @return Return pointer to LaserScan message.
*/
sensor_msgs::LaserScanPtr getLaserScanMsg(const sensor_msgs::ImageConstPtr& depth_msg,
const sensor_msgs::CameraInfoConstPtr& info_msg);
/**
* @brief setOutputFrame sets the frame to output laser scan
* @param frame
*/
void setOutputFrame (const std::string frame) { output_frame_id_ = frame; }
/**
* @brief setRangeLimits sets depth sensor min and max ranges
*
* @param rmin Minimum sensor range (below it is death zone) in meters.
* @param rmax Maximum sensor range in meters.
*/
void setRangeLimits(const float rmin, const float rmax);
/**
* @brief setScanHeight sets height of depth image which will be used in conversion process
*
* @param scan_height Height of used part of depth image in pixels.
*/
void setScanHeight(const int scan_height);
/**
* @brief setDepthImgRowStep
*
* @param row_step
*/
void setDepthImgRowStep(const int row_step);
/**
* @brief setCamModelUpdate sets the camera parameters
*
* @param enable
*/
void setCamModelUpdate (const bool enable) { cam_model_update_ = enable; }
/**
* @brief setSensorMountHeight sets the height of sensor mount (in meters)
*/
void setSensorMountHeight (const float height);
/**
* @brief setSensorTiltAngle sets the sensor tilt angle (in degrees)
*
* @param angle
*/
void setSensorTiltAngle (const float angle);
/**
* @brief setGroundRemove enables or disables the feature which remove ground from scan
*
* @param enable
*/
void setGroundRemove (const bool enable) { ground_remove_enable_ = enable; }
/**
* @brief setGroundMargin sets the floor margin (in meters)
*
* @param margin
*/
void setGroundMargin (const float margin);
/**
* @brief setTiltCompensation enables or disables the feature which compensates sensor tilt
*
* @param enable
*/
void setTiltCompensation (const bool enable) { tilt_compensation_enable_ = enable; }
/**
* @brief setScanConfigurated sets the configuration status
*
* @param enable
*/
void setScanConfigurated (const bool configurated) { is_scan_msg_configurated_ = configurated; }
protected:
/**
* @brief lengthOfVector calculate length of 3D vector
*
* @param ray
* @return
*/
double lengthOfVector(const cv::Point3d& ray) const;
/**
* @brief angleBetweenRays calculate angle between two rays in degrees
* @return
*/
double angleBetweenRays(const cv::Point3d& ray1, const cv::Point3d& ray2) const;
/**
* @brief fieldOfView calculate field of view (angle)
*/
void calcFieldOfView( const cv::Point2d && left, const cv::Point2d && center,
const cv::Point2d && right, double & min, double & max);
/**
* @brief calcGroundDistancesForImgRows calculate coefficients used in ground removing from scan
*
* @param vertical_fov
*/
void calcGroundDistancesForImgRows(double vertical_fov);
/**
* @brief calcTiltCompensationFactorsForImgRows calculate factors used in tilt compensation
*
* @param vertical_fov
*/
void calcTiltCompensationFactorsForImgRows(double vertical_fov);
/**
* @brief calcScanMsgIndexForImgCols
*
* @param depth_msg
*/
void calcScanMsgIndexForImgCols(const sensor_msgs::ImageConstPtr& depth_msg);
/**
* @brief getSmallestValueInColumn finds smallest values in depth image columns
*/
template <typename T>
float getSmallestValueInColumn(const T* depth_row, int row_size, int col)
{
float depth_min = std::numeric_limits<float>::max();
const unsigned range_min_mm = range_min_ * 1000;
const unsigned range_max_mm = range_max_ * 1000;
// Loop over pixels in column. Calculate z_min in column
for (size_t i = image_vertical_offset_; i < image_vertical_offset_ + scan_height_;
i += depth_img_row_step_)
{
unsigned depth_raw_mm;
float depth_m;
if (typeid(T) == typeid(uint16_t))
{
depth_raw_mm = static_cast<unsigned>(depth_row[row_size * i + col]);
depth_m = static_cast<float>(depth_raw_mm) / 1000.0;
}
else if (typeid(T) == typeid(float))
{
depth_m = static_cast<float>(depth_row[row_size * i + col]);
depth_raw_mm = static_cast<unsigned>(depth_m * 1000.0);
}
if (tilt_compensation_enable_) // Check if tilt compensation is enabled
{
depth_m *= tilt_compensation_factor_[i];
}
// Check if point is in ranges and find min value in column
if (depth_raw_mm >= range_min_mm && depth_raw_mm <= range_max_mm)
{
if (ground_remove_enable_)
{
if (depth_m < depth_min && depth_raw_mm < dist_to_ground_corrected[i])
{
depth_min = depth_m;
}
}
else
{
if (depth_m < depth_min)
{
depth_min = depth_m;
}
}
}
}
return depth_min;
}
/**
* @brief convertDepthToPolarCoords converts depth map to 2D
*
* @param depth_msg
*/
template <typename T>
void convertDepthToPolarCoords(const sensor_msgs::ImageConstPtr& depth_msg);
private:
//-----------------------------------------------------------------------------------------------
// ROS parameters configurated with configuration file or dynamic_reconfigure
std::string output_frame_id_; ///< Output frame_id for laserscan message.
float range_min_{0}; ///< Stores the current minimum range to use
float range_max_{0}; ///< Stores the current maximum range to use
unsigned scan_height_{0}; ///< Number of pixel rows used to scan computing
unsigned depth_img_row_step_{0}; ///< Row step in depth map processing
bool cam_model_update_{false}; ///< If continously calibration update required
float sensor_mount_height_{0}; ///< Height of sensor mount from ground
float sensor_tilt_angle_{0}; ///< Angle of sensor tilt
bool ground_remove_enable_{false}; ///< Determines if remove ground from output scan
float ground_margin_{0}; ///< Margin for floor remove feature (in meters)
bool tilt_compensation_enable_{false}; ///< Determines if tilt compensation feature is on
//-----------------------------------------------------------------------------------------------
/// Published scan message
sensor_msgs::LaserScanPtr scan_msg_;
/// Class for managing CameraInfo messages
image_geometry::PinholeCameraModel cam_model_;
/// Determines if laser scan message is configurated
bool is_scan_msg_configurated_{false};
/// Calculated laser scan msg indexes for each depth image column
std::vector<unsigned> scan_msg_index_;
/// Calculated maximal distances for measurements not included as floor
std::vector<unsigned> dist_to_ground_corrected;
/// Calculated sensor tilt compensation factors
std::vector<float> tilt_compensation_factor_;
/// The vertical offset of image based on calibration data
int image_vertical_offset_{0};
};
};
|
use std::io;
fn main() {
let mut name = String::new();
println!("Hello, what's your name? ");
io::stdin().read_line(&mut name)
.expect("Failed to read line.");
println!("Hello, {}! Let's start the quiz.", name.trim());
let mut score = 0;
// Question 1
println!("What is the capital of France?");
println!("A) London");
println!("B) Paris");
println!("C) Berlin");
let mut answer = String::new();
io::stdin().read_line(&mut answer)
.expect("Failed to read line.");
if answer.trim() == "B" {
println!("Correct!");
score += 1;
} else {
println!("Incorrect! The correct answer is B");
}
// Question 2
println!("What is the largest planet in our solar system?");
println!("A) Earth");
println!("B) Jupiter");
println!("C) Mars");
let mut answer = String::new();
io::stdin().read_line(&mut answer)
.expect("Failed to read line.");
if answer.trim() == "B" {
println!("Correct!");
score += 1;
} else {
println!("Incorrect! The correct answer is B");
}
// Question 3
println!("What is the powerhouse of the cell?");
println!("A) Nucleus");
println!("B) Mitochondria");
println!("C) Ribosome");
let mut answer = String::new();
io::stdin().read_line(&mut answer)
.expect("Failed to read line.");
if answer.trim() == "B" {
println!("Correct!");
score += 1;
} else {
println!("Incorrect! The correct answer is B");
}
println!("Thank you for taking the quiz, {}!", name.trim());
println!("Your score is: {}/3", score);
} |
/*
* RequestParser.h
*
* Created on: Aug 8, 2015
* Author: richard
*
* Copyright 2017 <NAME>
* Licensed under the MIT License
*/
#ifndef GUI_REQUESTPARSER_H_
#define GUI_REQUESTPARSER_H_
#include <boost/logic/tribool.hpp>
#include <boost/tuple/tuple.hpp>
#include <boost/tuple/detail/tuple_basic.hpp>
namespace http {
struct Request;
/// Parser for incoming requests.
class RequestParser {
public:
/// Construct ready to parse the request method.
RequestParser();
/// Reset to initial parser state.
void reset();
/// Parse some data. The tribool return value is true when a complete request
/// has been parsed, false if the data is invalid, indeterminate when more
/// data is required. The InputIterator return value indicates how much of the
/// input has been consumed.
template<typename InputIterator>
boost::tuple<boost::tribool, InputIterator> parse(Request& req, InputIterator begin, InputIterator end) {
while (begin != end) {
boost::tribool result = consume(req, *begin++);
if (result || !result)
return boost::make_tuple(result, begin);
}
boost::tribool result = boost::indeterminate;
return boost::make_tuple(result, begin);
}
private:
/// Handle the next character of input.
boost::tribool consume(Request& req, char input);
/// Check if a byte is an HTTP character.
static bool is_char(int c);
/// Check if a byte is an HTTP control character.
static bool is_ctl(int c);
/// Check if a byte is defined as an HTTP tspecial character.
static bool is_tspecial(int c);
/// Check if a byte is a digit.
static bool is_digit(int c);
/// The current state of the parser.
enum state {
method_start,
method,
uri,
http_version_h,
http_version_t_1,
http_version_t_2,
http_version_p,
http_version_slash,
http_version_major_start,
http_version_major,
http_version_minor_start,
http_version_minor,
expecting_newline_1,
header_line_start,
header_lws,
header_name,
space_before_header_value,
header_value,
expecting_newline_2,
expecting_newline_3
} state_;
};
} // namespace http
#endif // GUI_REQUESTPARSER_H_
|
<reponame>TomTyack/jss<filename>packages/sitecore-jss-react/src/enhancers/withPlaceholder.tsx
import React from 'react';
import { ComponentRendering, RouteData, HtmlElementRendering } from '@sitecore-jss/sitecore-jss';
import { PlaceholderProps, PlaceholderCommon } from '../components/PlaceholderCommon';
import { withComponentFactory } from './withComponentFactory';
export interface WithPlaceholderOptions {
/**
* Function to map incoming placeholder props into rendering data to use for the placeholder data.
* Normally in a JSS component, props.rendering is passed the component data, and that is the default.
* However, if your component data is in a different prop, like say 'route' in a sample app,
* this lets you map that.
*/
resolvePlaceholderDataFromProps?: (props: any) => ComponentRendering | RouteData;
/**
* Function to alter the placeholder props from within the HOC. Enables the props to be
* transformed before being used by the placeholder/HOC, for example to customize the
* error or missing component display
*/
propsTransformer?: (props: PlaceholderProps) => PlaceholderProps;
}
export interface PlaceholderToPropMapping {
/**
* The name of the placeholder this component will expose
*/
placeholder: string;
/**
* The name of the prop on your wrapped component that you would like the placeholder data injected on
*/
prop: string;
}
export type WithPlaceholderSpec = (string | PlaceholderToPropMapping) | (string | PlaceholderToPropMapping)[];
export function withPlaceholder(placeholders: WithPlaceholderSpec, options?: WithPlaceholderOptions) {
return (WrappedComponent: React.ComponentClass<any> | React.SFC<any>) => {
class WithPlaceholder extends PlaceholderCommon {
static propTypes = PlaceholderCommon.propTypes;
constructor(props: any) {
super(props);
}
render() {
let childProps: any = { ...this.props };
delete childProps.componentFactory;
if (options && options.propsTransformer) {
childProps = options.propsTransformer(childProps);
}
if (this.state.error) {
if (childProps.errorComponent) {
return <childProps.errorComponent error={this.state.error} />;
}
return (
<div className="sc-jss-placeholder-error">
A rendering error occurred: {this.state.error.message}.
</div>
);
}
const renderingData = options && options.resolvePlaceholderDataFromProps
? options.resolvePlaceholderDataFromProps(childProps)
: childProps.rendering;
const definitelyArrayPlacholders = !Array.isArray(placeholders)
? [ placeholders ] : placeholders;
definitelyArrayPlacholders.forEach((placeholder: any) => {
let placeholderData: (ComponentRendering | HtmlElementRendering)[];
if (placeholder.placeholder && placeholder.prop) {
placeholderData = PlaceholderCommon.getPlaceholderDataFromRenderingData(renderingData, placeholder.placeholder);
if (placeholderData) {
childProps[placeholder.prop] = this.getComponentsForRenderingData(placeholderData);
}
} else {
placeholderData = PlaceholderCommon.getPlaceholderDataFromRenderingData(renderingData, placeholder);
if (placeholderData) {
childProps[placeholder] = this.getComponentsForRenderingData(placeholderData);
}
}
});
return <WrappedComponent {...childProps} />;
}
}
return withComponentFactory(WithPlaceholder);
};
} |
import React from 'react'
import Notification from './index'
const stories = {
component: 'Notification',
props: [
{
name: 'children',
type: 'Node',
default: '',
description: 'The content of the component',
},
{
name: 'color',
type: 'Color',
default: 'primary',
description: 'The color of the badge',
},
],
stories: [
{
name: 'Notification',
description: 'Simple Notification',
render: (
<>
<Notification
notification={{ id: 1, message: 'Message testing 1' }}
type="success"
duration={1000}
position="top-center"
/>
<Notification
notification={{ id: 1, message: 'Message testing 2' }}
type="error"
duration={2000}
position="bottom-right"
/>
<Notification
notification={{ id: 1, message: 'Message testing 3' }}
type="info"
duration={3000}
position="bottom-left"
/>
<Notification
notification={{ id: 1, message: 'Message testing 4' }}
type="warning"
duration={4000}
position="bottom-center"
/>
<Notification
notification={{ id: 1, message: 'Message testing 5' }}
type="warning"
duration={5000}
position="top-left"
/>
<Notification
notification={{ id: 1, message: 'Message testing 6' }}
type="warning"
duration={6000}
position="top-right"
/>
</>
),
prop: false,
code: `
<Checkbox color="primary" /> Foo
`,
},
],
}
export default stories
|
def maxProfit(prices):
max_profit = 0
min_price = float('inf')
for price in prices:
min_price = min(min_price, price)
profit = price - min_price
max_profit = max(max_profit, profit)
return max_profit |
/* Generated By:JJTree&JavaCC: Do not edit this line. JavaParserTokenManager.java */
/** Token Manager. */
public class JavaParserTokenManager {
} |
<reponame>schinmayee/nimbus
//#####################################################################
// Copyright 2006-2007, <NAME>.
// This file is part of PhysBAM whose distribution is governed by the license contained in the accompanying file PHYSBAM_COPYRIGHT.txt.
//#####################################################################
// Class PARTICLE_LEVELSET_IMPLICIT_OBJECT
//#####################################################################
#include <PhysBAM_Tools/Matrices/MATRIX_3X3.h>
#include <PhysBAM_Geometry/Topology_Based_Geometry/TRIANGULATED_SURFACE.h>
#include <PhysBAM_Dynamics/Level_Sets/PARTICLE_LEVELSET_IMPLICIT_OBJECT.h>
#include <PhysBAM_Dynamics/Level_Sets/PARTICLE_LEVELSET_UNIFORM.h>
using namespace PhysBAM;
//#####################################################################
// Constructor
//#####################################################################
template<class TV> PARTICLE_LEVELSET_IMPLICIT_OBJECT<TV>::
PARTICLE_LEVELSET_IMPLICIT_OBJECT(T_PARTICLE_LEVELSET& particle_levelset_input)
:LEVELSET_IMPLICIT_OBJECT<TV>(particle_levelset_input.levelset.grid,particle_levelset_input.levelset.phi),particle_levelset(particle_levelset_input)
{
particle_influence.Resize(levelset.grid.Node_Indices(1));p_grid=levelset.grid.Get_Regular_Grid();
}
//#####################################################################
// Create
//#####################################################################
template<class TV> PARTICLE_LEVELSET_IMPLICIT_OBJECT<TV>* PARTICLE_LEVELSET_IMPLICIT_OBJECT<TV>::
Create()
{
int ghost_cells=3;
PARTICLE_LEVELSET_IMPLICIT_OBJECT* levelset_implicit_object=new PARTICLE_LEVELSET_IMPLICIT_OBJECT(*(new T_PARTICLE_LEVELSET(*(new GRID<TV>),*(new T_ARRAYS_SCALAR),ghost_cells)));
levelset_implicit_object->need_destroy_data=true;return levelset_implicit_object;
}
//#####################################################################
#define INSTANTIATION_HELPER(T,d) \
template PARTICLE_LEVELSET_IMPLICIT_OBJECT<VECTOR<T,d> >::PARTICLE_LEVELSET_IMPLICIT_OBJECT(LEVELSET_POLICY<GRID<VECTOR<T,d> > >::PARTICLE_LEVELSET&); \
template PARTICLE_LEVELSET_IMPLICIT_OBJECT<VECTOR<T,d> >* PARTICLE_LEVELSET_IMPLICIT_OBJECT<VECTOR<T,d> >::Create();
INSTANTIATION_HELPER(float,1)
INSTANTIATION_HELPER(float,2)
INSTANTIATION_HELPER(float,3)
#ifndef COMPILE_WITHOUT_DOUBLE_SUPPORT
INSTANTIATION_HELPER(double,1)
INSTANTIATION_HELPER(double,2)
INSTANTIATION_HELPER(double,3)
#endif
|
package com.alipay.api.response;
import com.alipay.api.internal.mapping.ApiField;
import com.alipay.api.AlipayResponse;
/**
* ALIPAY API: koubei.shop.wxlogin.query response.
*
* @author <NAME>
* @since 1.0, 2021-07-13 10:42:02
*/
public class KoubeiShopWxloginQueryResponse extends AlipayResponse {
private static final long serialVersionUID = 6363139518489922299L;
/**
* 用户唯一标识的 openid
*/
@ApiField("openid")
private String openid;
/**
* 会话密钥
*/
@ApiField("session_key")
private String sessionKey;
public void setOpenid(String openid) {
this.openid = openid;
}
public String getOpenid( ) {
return this.openid;
}
public void setSessionKey(String sessionKey) {
this.sessionKey = sessionKey;
}
public String getSessionKey( ) {
return this.sessionKey;
}
}
|
package seoul.democracy.opinion.domain;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import javax.persistence.Embeddable;
import java.io.Serializable;
@Data
@Embeddable
@NoArgsConstructor
@AllArgsConstructor(staticName = "of")
public class UserOpinionId implements Serializable {
private Long userId;
private Long opinionId;
}
|
<filename>internal/operator/boom/application/applications/logcollection/logging/output.go
package logging
type ConfigOutput struct {
Name string
Namespace string
URL string
ClusterOutput bool
RemoveKeys []string
Labels map[string]string
ExtraLabels map[string]string
ExtractKubernetesLabels bool
ConfigureKubernetesLabels bool
EnabledNamespaces []string
Username *SecretKeyRef
Password *SecretKeyRef
}
type Buffer struct {
Timekey string `yaml:"timekey"`
TimekeyWait string `yaml:"timekey_wait"`
TimekeyUseUtc bool `yaml:"timekey_use_utc"`
}
type Loki struct {
URL string `yaml:"url"`
ConfigureKubernetesLabels bool `yaml:"configure_kubernetes_labels,omitempty"`
ExtractKubernetesLabels bool `yaml:"extract_kubernetes_labels,omitempty"`
ExtraLabels map[string]string `yaml:"extra_labels,omitempty"`
Labels map[string]string `yaml:"labels,omitempty"`
RemoveKeys []string `yaml:"remove_keys,omitempty"`
Username *Value `yaml:"username,omitempty"`
Password *Value `yaml:"password,omitempty"`
Buffer *Buffer `yaml:"buffer"`
}
type Value struct {
ValueFrom *ValueFrom `yaml:"valueFrom,omitempty"`
}
type ValueFrom struct {
SecretKeyRef *SecretKeyRef `yaml:"secretKeyRef,omitempty"`
}
type SecretKeyRef struct {
Key string `yaml:"key,omitempty"`
Name string `yaml:"name,omitempty"`
}
type OutputSpec struct {
Loki *Loki `yaml:"loki"`
EnabledNamespaces []string `yaml:"enabledNamespaces,omitempty"`
}
type Output struct {
APIVersion string `yaml:"apiVersion"`
Kind string `yaml:"kind"`
Metadata *Metadata `yaml:"metadata"`
Spec *OutputSpec `yaml:"spec"`
}
func NewOutput(clusterOutput bool, conf *ConfigOutput) *Output {
kind := "Output"
meta := &Metadata{
Name: conf.Name,
Namespace: conf.Namespace,
}
if clusterOutput {
kind = "ClusterOutput"
meta.Namespace = ""
}
ret := &Output{
APIVersion: "logging.banzaicloud.io/v1beta1",
Kind: kind,
Metadata: meta,
Spec: &OutputSpec{
Loki: &Loki{
URL: conf.URL,
ExtractKubernetesLabels: conf.ExtractKubernetesLabels,
ConfigureKubernetesLabels: conf.ConfigureKubernetesLabels,
Buffer: &Buffer{
Timekey: "1m",
TimekeyWait: "30s",
TimekeyUseUtc: true,
},
},
},
}
if conf.EnabledNamespaces != nil {
ret.Spec.EnabledNamespaces = conf.EnabledNamespaces
}
if conf.Username != nil {
ret.Spec.Loki.Username = &Value{
ValueFrom: &ValueFrom{
SecretKeyRef: &SecretKeyRef{
Key: conf.Username.Key,
Name: conf.Username.Name,
},
},
}
}
if conf.Password != nil {
ret.Spec.Loki.Password = &Value{
ValueFrom: &ValueFrom{
SecretKeyRef: &SecretKeyRef{
Key: conf.Password.Key,
Name: conf.Password.Name,
},
},
}
}
if conf.ExtraLabels != nil {
ret.Spec.Loki.ExtraLabels = conf.ExtraLabels
}
if conf.Labels != nil {
ret.Spec.Loki.Labels = conf.Labels
}
if conf.RemoveKeys != nil {
ret.Spec.Loki.RemoveKeys = conf.RemoveKeys
}
return ret
}
|
#!/bin/bash
SRT_REPO="https://github.com/Haivision/srt.git"
SRT_COMMIT="3d26644e2b029b7da94713e1fd16e77006acc715"
ffbuild_enabled() {
return 0
}
ffbuild_dockerbuild() {
git-mini-clone "$SRT_REPO" "$SRT_COMMIT" srt
cd srt
mkdir build && cd build
cmake -DCMAKE_TOOLCHAIN_FILE="$FFBUILD_CMAKE_TOOLCHAIN" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX="$FFBUILD_PREFIX" \
-DENABLE_SHARED=OFF -DENABLE_STATIC=ON -DENABLE_ENCRYPTION=ON -DENABLE_APPS=OFF ..
make -j$(nproc)
make install
}
ffbuild_configure() {
echo --enable-libsrt
}
ffbuild_unconfigure() {
echo --disable-libsrt
}
|
<reponame>invader777/tleaf<filename>test/specs/defaults/templates.spec.js
'use strict';
var assert = require('chai').assert;
var fs = require('fs');
function pathFor(relative) {
return 'src/defaults/templates/' + relative + '.tpl.js';
}
describe('defaults/templates', function () {
describe('tabs for indentation', function () {
var paths = [
'constant', 'controller', 'directive', 'factory',
'filter', 'provider', 'service', 'value',
'dependencies/constant', 'dependencies/factory', 'dependencies/provider',
'dependencies/service', 'dependencies/value'
];
paths.forEach(function (path) {
it('should for ' + path, function (done) {
fs.readFile(pathFor(path), 'utf8', function (err, source) {
assert.isNull(err);
assert.ok(source.indexOf(' ') < 0);
done();
});
});
});
});
}); |
package tkohdk.lib.calcstr.tokenizer;
import java.math.BigDecimal;
public class NumericTokenElement extends TokenElementObject {
protected BigDecimal bc;
public NumericTokenElement(int index, BigDecimal bc){
super(index);
this.bc = bc;
}
@Override
public String getStr() {
return this.bc.toString();
}
@Override
public BigDecimal getNumberObject() {
return this.bc;
}
@Override
public String setStr(String str) {
this.bc = null;
this.bc = new BigDecimal(str);
return str;
}
@Override
public boolean isNumeric() {
return true;
}
@Override
public boolean isIncompleteDecimal() {
return false;
}
@Override
public boolean isLeftBracket() {
return false;
}
@Override
public boolean isExclamation() {
return false;
}
@Override
public boolean isPlusOperator() {
return false;
}
@Override
public boolean isMinusOperator() {
return false;
}
@Override
public boolean isMultiplicationOperator() {
return false;
}
@Override
public boolean isDivisionOperator() {
return false;
}
@Override
public boolean isRightBracket() {
return false;
}
@Override
public boolean isSineFunc() {
return false;
}
@Override
public boolean isCosineFunc() {
return false;
}
@Override
public boolean isTangentFunc() {
return false;
}
@Override
public boolean isLogarithmFunc() {
return false;
}
@Override
public boolean isFunction() {
return false;
}
@Override
public boolean isPeriodStr() {
return false;
}
@Override
public boolean isInvolutionOperator() {
return false;
}
@Override
public boolean isEMark(){
return false;
}
@Override
public boolean isBinaryOperator() {
return false;
}
@Override
public boolean isUnaryOperator() {
return false;
}
}
|
package com.cjy.flb.qrcode.capture;
import android.app.AlertDialog;
import android.content.DialogInterface;
import android.content.Intent;
import android.content.res.AssetFileDescriptor;
import android.graphics.Bitmap;
import android.media.AudioManager;
import android.media.MediaPlayer;
import android.media.MediaPlayer.OnCompletionListener;
import android.os.Bundle;
import android.os.Handler;
import android.os.Vibrator;
import android.util.Log;
import android.view.SurfaceHolder;
import android.view.SurfaceHolder.Callback;
import android.view.SurfaceView;
import android.widget.CompoundButton;
import android.widget.Toast;
import com.cjy.flb.R;
import com.cjy.flb.activity.BaseActivity;
import com.cjy.flb.qrcode.camera.CameraManager;
import com.cjy.flb.qrcode.decoding.CaptureActivityHandler;
import com.cjy.flb.qrcode.decoding.InactivityTimer;
import com.cjy.flb.qrcode.view.ViewfinderView;
import com.google.zxing.BarcodeFormat;
import com.google.zxing.Result;
import java.io.IOException;
import java.util.Vector;
/**
* Initial the camera
*
* @author Ryan.Tang
*/
public class CaptureActivity extends BaseActivity implements Callback, CompoundButton.OnCheckedChangeListener {
private CaptureActivityHandler handler;
private ViewfinderView viewfinderView;
private boolean hasSurface;
private Vector<BarcodeFormat> decodeFormats;
private String characterSet;
private InactivityTimer inactivityTimer;
private MediaPlayer mediaPlayer;
private boolean playBeep;
private static final float BEEP_VOLUME = 0.10f;
private boolean vibrate;
//开关闪光灯
private android.hardware.Camera camera;
private android.hardware.Camera.Parameters parameter;
/**
* Called when the activity is first created.
*/
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
}
@Override
public void initView() {
setContentView(R.layout.camera);
CameraManager.init(context);
viewfinderView = (ViewfinderView) findViewById(R.id.viewfinder_view);
initBar(getString(R.string.scan_qr), false, false);
}
@Override
public void initData() {
hasSurface = false;
inactivityTimer = new InactivityTimer(this);
}
@Override
public void initListener() {
}
@Override
protected void onResume() {
super.onResume();
SurfaceView surfaceView = (SurfaceView) findViewById(R.id.preview_view);
SurfaceHolder surfaceHolder = surfaceView.getHolder();
if (hasSurface) {
initCamera(surfaceHolder);
} else {
//保持屏幕的高亮,不要锁定屏幕
surfaceView.getHolder().setKeepScreenOn(true);
surfaceHolder.addCallback(this);
//下面设置Surface不维护自己的缓冲区,而是等待屏幕的渲染引擎将内容推送到用户面前
surfaceHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS);
}
decodeFormats = null;
characterSet = null;
playBeep = true;
AudioManager audioService = (AudioManager) getSystemService(AUDIO_SERVICE);
if (audioService.getRingerMode() != AudioManager.RINGER_MODE_NORMAL) {
playBeep = false;
}
initBeepSound();
vibrate = true;
}
@Override
protected void onPause() {
super.onPause();
if (handler != null) {
handler.quitSynchronously();
handler = null;
}
CameraManager.get().closeDriver();
}
@Override
protected void onDestroy() {
inactivityTimer.shutdown();
super.onDestroy();
}
/**
* Handler scan result
*
* @param result
* @param barcode
*/
public void handleDecode(Result result, Bitmap barcode) {
inactivityTimer.onActivity();
playBeepSoundAndVibrate();
String resultString = result.getText();
// FIXME
if (resultString.equals("")) {
Toast.makeText(CaptureActivity.this, "Scan failed!", Toast.LENGTH_SHORT).show();
} else {
// System.out.println("Result:"+resultString);
Intent resultIntent = new Intent();
Bundle bundle = new Bundle();
bundle.putString("result", resultString);
resultIntent.putExtras(bundle);
this.setResult(RESULT_OK, resultIntent);
}
CaptureActivity.this.finish();
}
private void initCamera(SurfaceHolder surfaceHolder) {
if (surfaceHolder == null) {
throw new IllegalStateException("No SurfaceHolder provided");
}
if (CameraManager.get().isOpen()) {
Log.w("TAG", "initCamera() while already open -- late SurfaceView callback?");
return;
}
try {
CameraManager.get().openDriver(surfaceHolder);
if (handler == null) {
handler = new CaptureActivityHandler(this, decodeFormats, characterSet);
}
} catch (IOException ioe) {
displayFrameWorkBugMessageAndExit();
return;
} catch (RuntimeException e) {
displayFrameWorkBugMessageAndExit();
return;
}
}
//打开相机出错时,提示
private void displayFrameWorkBugMessageAndExit() {
// camera error
AlertDialog.Builder builder = new AlertDialog.Builder(this);
builder.setTitle(getString(R.string.app_name));
builder.setMessage(getString(R.string.scan_error));
builder.setPositiveButton(getString(R.string.btn_ok), new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
finish();
}
});
builder.setOnCancelListener(new DialogInterface.OnCancelListener() {
@Override
public void onCancel(DialogInterface dialog) {
dialog.dismiss();
finish();
}
});
builder.show();
}
@Override
public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) {
}
@Override
public void surfaceCreated(SurfaceHolder holder) {
if (!hasSurface) {
hasSurface = true;
initCamera(holder);
}
}
@Override
public void surfaceDestroyed(SurfaceHolder holder) {
hasSurface = false;
}
public ViewfinderView getViewfinderView() {
return viewfinderView;
}
public Handler getHandler() {
return handler;
}
public void drawViewfinder() {
viewfinderView.drawViewfinder();
}
private void initBeepSound() {
if (playBeep && mediaPlayer == null) {
// The volume on STREAM_SYSTEM is not adjustable, and users found it
// too loud,
// so we now play on the music stream.
setVolumeControlStream(AudioManager.STREAM_MUSIC);
mediaPlayer = new MediaPlayer();
mediaPlayer.setAudioStreamType(AudioManager.STREAM_MUSIC);
mediaPlayer.setOnCompletionListener(beepListener);
AssetFileDescriptor file = getResources().openRawResourceFd(R.raw.beep);
try {
mediaPlayer.setDataSource(file.getFileDescriptor(), file.getStartOffset(), file.getLength());
file.close();
mediaPlayer.setVolume(BEEP_VOLUME, BEEP_VOLUME);
mediaPlayer.prepare();
} catch (IOException e) {
mediaPlayer = null;
}
}
}
private static final long VIBRATE_DURATION = 200L;
private void playBeepSoundAndVibrate() {
if (playBeep && mediaPlayer != null) {
mediaPlayer.start();
}
if (vibrate) {
Vibrator vibrator = (Vibrator) getSystemService(VIBRATOR_SERVICE);
vibrator.vibrate(VIBRATE_DURATION);
}
}
/**
* When the beep has finished playing, rewind to queue up another one.
*/
private final OnCompletionListener beepListener = new OnCompletionListener() {
public void onCompletion(MediaPlayer mediaPlayer) {
mediaPlayer.seekTo(0);
}
};
@Override
public void onCheckedChanged(CompoundButton buttonView, boolean isChecked) {
if (isChecked) {
camera.startPreview();
parameter = camera.getParameters();
parameter.setFocusMode(android.hardware.Camera.Parameters.FLASH_MODE_TORCH);
camera.setParameters(parameter);
} else {
parameter = camera.getParameters();
parameter.setFlashMode(android.hardware.Camera.Parameters.FLASH_MODE_OFF);
camera.setParameters(parameter);
}
}
} |
from sequana.tools import gc_content
from sequana import genbank_features_parser
from sequana import sequana_data
def calculate_gc_content(sequence, window_size, circular=False):
data = {'sequence': sequence} # Create a dictionary with the sequence
gc_content_result = gc_content(data, window_size, circular=circular) # Calculate GC content
return gc_content_result
def parse_genbank_features(file_path):
data = sequana_data(file_path) # Load the GenBank file
features = genbank_features_parser(data) # Parse the features
return features |
#!/bin/sh
#set -e
is_live_path()
{
DIRECTORY="${1}/${LIVE_MEDIA_PATH}"
for FILESYSTEM in squashfs ext2 ext3 ext4 xfs dir jffs
do
if ls "${DIRECTORY}/"*.${FILESYSTEM} > /dev/null 2>&1
then
return 0
fi
done
return 1
}
matches_uuid ()
{
if [ "${IGNORE_UUID}" ] || [ ! -e /conf/uuid.conf ]
then
return 0
fi
path="${1}"
uuid="$(cat /conf/uuid.conf)"
for try_uuid_file in "${path}/.disk/live-uuid"*
do
[ -e "${try_uuid_file}" ] || continue
try_uuid="$(cat "${try_uuid_file}")"
if [ "${uuid}" = "${try_uuid}" ]
then
return 0
fi
done
return 1
}
get_backing_device ()
{
case "${1}" in
*.squashfs|*.ext2|*.ext3|*.ext4|*.jffs2|*.*.verity|*.*.fec)
echo $(setup_loop "${1}" "loop" "/sys/block/loop*" '0' "${2}")
;;
*.dir)
echo "directory"
;;
*)
panic "Unrecognized live filesystem: ${1}"
;;
esac
}
mount_images_in_directory ()
{
directory="${1}"
rootmnt="${2}"
mac="${3}"
if is_live_path "${directory}"
then
[ -n "${mac}" ] && adddirectory="${directory}/${LIVE_MEDIA_PATH}/${mac}"
setup_unionfs "${directory}/${LIVE_MEDIA_PATH}" "${rootmnt}" "${adddirectory}"
else
panic "No supported filesystem images found at /${LIVE_MEDIA_PATH}."
fi
}
is_nice_device ()
{
sysfs_path="${1#/sys}"
if udevadm info --query=all --path="${sysfs_path}" | egrep -q "DEVTYPE=disk"
then
return 0
elif echo "${sysfs_path}" | grep -q '^/block/vd[a-z]$'
then
return 0
elif echo ${sysfs_path} | grep -q "^/block/dm-"
then
return 0
elif echo ${sysfs_path} | grep -q "^/block/mtdblock"
then
return 0
fi
return 1
}
check_dev ()
{
local force fix
sysdev="${1}"
devname="${2}"
skip_uuid_check="${3}"
# support for fromiso=.../isofrom=....
if [ -n "$FROMISO" ]
then
ISO_DEVICE=$(dirname $FROMISO)
if ! [ -b $ISO_DEVICE ]
then
# to support unusual device names like /dev/cciss/c0d0p1
# as well we have to identify the block device name, let's
# do that for up to 15 levels
i=15
while [ -n "$ISO_DEVICE" ] && [ "$i" -gt 0 ]
do
ISO_DEVICE=$(dirname ${ISO_DEVICE})
[ -b "$ISO_DEVICE" ] && break
i=$(($i -1))
done
fi
if [ "$ISO_DEVICE" = "/" ]
then
# not a block device, check if it's an iso file, for
# example an ISO when booting on an ONIE system
if echo "${FROMISO}" | grep -q "\.iso$"
then
fs_type=$(get_fstype "${FROMISO}")
if is_supported_fs ${fs_type}
then
mkdir /run/live/fromiso
mount -t $fs_type "${FROMISO}" /run/live/fromiso
if [ "$?" != 0 ]
then
echo "Warning: unable to mount ${FROMISO}." >>/boot.log
fi
devname="/run/live/fromiso"
fi
else
echo "Warning: device for bootoption fromiso= ($FROMISO) not found.">>/boot.log
fi
else
fs_type=$(get_fstype "${ISO_DEVICE}")
if is_supported_fs ${fs_type}
then
mkdir /run/live/fromiso
mount -t $fs_type "$ISO_DEVICE" /run/live/fromiso
ISO_NAME="$(echo $FROMISO | sed "s|$ISO_DEVICE||")"
loopdevname=$(setup_loop "/run/live/fromiso/${ISO_NAME}" "loop" "/sys/block/loop*" "")
devname="${loopdevname}"
else
echo "Warning: unable to mount $ISO_DEVICE." >>/boot.log
fi
fi
fi
if [ -z "${devname}" ]
then
devname=$(sys2dev "${sysdev}")
fi
if [ -d "${devname}" ]
then
mount -o bind "${devname}" $mountpoint || continue
if is_live_path $mountpoint
then
echo $mountpoint
return 0
else
umount $mountpoint
fi
fi
IFS=","
for device in ${devname}
do
case "$device" in
*mapper*)
# Adding lvm support
if [ -x /scripts/local-top/lvm2 ]
then
ROOT="$device" resume="" /scripts/local-top/lvm2 >>/boot.log
fi
;;
/dev/md*)
# Adding raid support
if [ -x /scripts/local-top/mdadm ]
then
[ -r /conf/conf.d/md ] && cp /conf/conf.d/md /conf/conf.d/md.orig
echo "MD_DEVS=$device " >> /conf/conf.d/md
/scripts/local-top/mdadm >>/boot.log
[ -r /conf/conf.d/md.orig ] && mv /conf/conf.d/md.orig /conf/conf.d/md
fi
;;
esac
done
unset IFS
[ -n "$device" ] && devname="$device"
[ -e "$devname" ] || continue
if [ -n "${LIVE_MEDIA_OFFSET}" ]
then
loopdevname=$(setup_loop "${devname}" "loop" "/sys/block/loop*" "${LIVE_MEDIA_OFFSET}")
devname="${loopdevname}"
fi
fstype=$(get_fstype "${devname}")
if is_supported_fs ${fstype}
then
devuid=$(blkid -o value -s UUID "$devname")
[ -n "$devuid" ] && grep -qs "\<$devuid\>" /var/lib/live/boot/devices-already-tried-to-mount && continue
for _PARAMETER in ${LIVE_BOOT_CMDLINE}
do
case "${_PARAMETER}" in
forcefsck)
FORCEFSCK="true"
;;
esac
done
if [ "${PERSISTENCE_FSCK}" = "true" ] || [ "${PERSISTENCE_FSCK}" = "yes" ] || [ "${FORCEFSCK}" = "true" ]
then
force=""
if [ "$FORCEFSCK" = "true" ]
then
force="-f"
fi
fix="-a"
if [ "$FSCKFIX" = "true" ] || [ "$FSCKFIX" = "yes" ]
then
fix="-y"
fi
fsck $fix $force ${devname} >> fsck.log 2>&1
fi
mount -t ${fstype} -o ro,noatime "${devname}" ${mountpoint} || continue
[ -n "$devuid" ] && echo "$devuid" >> /var/lib/live/boot/devices-already-tried-to-mount
if [ -n "${FINDISO}" ]
then
if [ -f ${mountpoint}/${FINDISO} ]
then
umount ${mountpoint}
mkdir -p /run/live/findiso
mount -t ${fstype} -o ro,noatime "${devname}" /run/live/findiso
loopdevname=$(setup_loop "/run/live/findiso/${FINDISO}" "loop" "/sys/block/loop*" 0)
devname="${loopdevname}"
mount -t iso9660 -o ro,noatime "${devname}" ${mountpoint}
else
umount ${mountpoint}
fi
fi
if is_live_path ${mountpoint} && \
([ "${skip_uuid_check}" ] || matches_uuid ${mountpoint})
then
echo ${mountpoint}
return 0
else
umount ${mountpoint} 2>/dev/null
fi
fi
if [ -n "${LIVE_MEDIA_OFFSET}" ]
then
losetup -d "${loopdevname}"
fi
return 1
}
find_livefs ()
{
timeout="${1}"
# don't start autodetection before timeout has expired
if [ -n "${LIVE_MEDIA_TIMEOUT}" ]
then
if [ "${timeout}" -lt "${LIVE_MEDIA_TIMEOUT}" ]
then
return 1
fi
fi
# first look at the one specified in the command line
case "${LIVE_MEDIA}" in
removable-usb)
for sysblock in $(removable_usb_dev "sys")
do
for dev in $(subdevices "${sysblock}")
do
if check_dev "${dev}"
then
return 0
fi
done
done
return 1
;;
removable)
for sysblock in $(removable_dev "sys")
do
for dev in $(subdevices "${sysblock}")
do
if check_dev "${dev}"
then
return 0
fi
done
done
return 1
;;
*)
if [ ! -z "${LIVE_MEDIA}" ]
then
if check_dev "null" "${LIVE_MEDIA}" "skip_uuid_check"
then
return 0
fi
fi
;;
esac
# or do the scan of block devices
# prefer removable devices over non-removable devices, so scan them first
devices_to_scan="$(removable_dev 'sys') $(non_removable_dev 'sys')"
for sysblock in $devices_to_scan
do
devname=$(sys2dev "${sysblock}")
[ -e "$devname" ] || continue
fstype=$(get_fstype "${devname}")
if /lib/udev/cdrom_id ${devname} > /dev/null
then
if check_dev "null" "${devname}"
then
return 0
fi
elif is_nice_device "${sysblock}"
then
for dev in $(subdevices "${sysblock}")
do
if check_dev "${dev}"
then
return 0
fi
done
elif [ "${fstype}" = "squashfs" -o \
"${fstype}" = "btrfs" -o \
"${fstype}" = "ext2" -o \
"${fstype}" = "ext3" -o \
"${fstype}" = "ext4" -o \
"${fstype}" = "jffs2" ]
then
# This is an ugly hack situation, the block device has
# an image directly on it. It's hopefully
# live-boot, so take it and run with it.
ln -s "${devname}" "${devname}.${fstype}"
echo "${devname}.${fstype}"
return 0
fi
done
return 1
}
is_in_list_separator_helper ()
{
local sep element list
sep=${1}
shift
element=${1}
shift
list=${*}
echo ${list} | grep -qe "^\(.*${sep}\)\?${element}\(${sep}.*\)\?$"
}
is_in_space_sep_list ()
{
local element
element=${1}
shift
is_in_list_separator_helper "[[:space:]]" "${element}" "${*}"
}
is_in_comma_sep_list ()
{
local element
element=${1}
shift
is_in_list_separator_helper "," "${element}" "${*}"
}
sys2dev ()
{
sysdev=${1#/sys}
echo "/dev/$(udevadm info -q name -p ${sysdev} 2>/dev/null|| echo ${sysdev##*/})"
}
subdevices ()
{
sysblock=${1}
r=""
for dev in "${sysblock}"/* "${sysblock}"
do
if [ -e "${dev}/dev" ]
then
r="${r} ${dev}"
fi
done
echo ${r}
}
storage_devices()
{
black_listed_devices="${1}"
white_listed_devices="${2}"
for sysblock in $(echo /sys/block/* | tr ' ' '\n' | grep -vE "loop|ram|fd")
do
fulldevname=$(sys2dev "${sysblock}")
if is_in_space_sep_list ${fulldevname} ${black_listed_devices} || \
[ -n "${white_listed_devices}" ] && \
! is_in_space_sep_list ${fulldevname} ${white_listed_devices}
then
# skip this device entirely
continue
fi
for dev in $(subdevices "${sysblock}")
do
devname=$(sys2dev "${dev}")
if is_in_space_sep_list ${devname} ${black_listed_devices}
then
# skip this subdevice
continue
else
echo "${devname}"
fi
done
done
}
is_supported_fs ()
{
fstype="${1}"
# Validate input first
if [ -z "${fstype}" ]
then
return 1
fi
# get_fstype might report "unknown" or "swap", ignore it as no such kernel module exists
if [ "${fstype}" = "unknown" ] || [ "${fstype}" = "swap" ]
then
return 1
fi
# Try to look if it is already supported by the kernel
# For ntfs, since user space program ntfs-3g will be used. Check ntfs-3g instead of kernel module.
if [ "${fstype}" = "ntfs" ]; then
if type ntfs-3g >/dev/null 2>&1; then
return 0
else
return 1
fi
fi
if grep -q ${fstype} /proc/filesystems
then
return 0
else
# Then try to add support for it the gentle way using the initramfs capabilities
modprobe -q -b ${fstype}
if grep -q ${fstype} /proc/filesystems
then
return 0
# Then try the hard way if /root is already reachable
else
kmodule="/root/lib/modules/`uname -r`/${fstype}/${fstype}.ko"
if [ -e "${kmodule}" ]
then
insmod "${kmodule}"
if grep -q ${fstype} /proc/filesystems
then
return 0
fi
fi
fi
fi
return 1
}
get_fstype ()
{
blkid -s TYPE -o value $1 2>/dev/null
}
where_is_mounted ()
{
device=${1}
# return first found
grep -m1 "^${device} " /proc/mounts | cut -f2 -d ' '
}
trim_path ()
{
# remove all unnecessary /:s in the path, including last one (except
# if path is just "/")
echo ${1} | sed 's|//\+|/|g' | sed 's|^\(.*[^/]\)/$|\1|'
}
what_is_mounted_on ()
{
local dir
dir="$(trim_path ${1})"
grep -m1 "^[^ ]\+ ${dir} " /proc/mounts | cut -d' ' -f1
}
chown_ref ()
{
local reference targets owner
reference="${1}"
shift
targets=${@}
owner=$(stat -c %u:%g "${reference}")
chown -h ${owner} ${targets}
}
chmod_ref ()
{
local reference targets rights
reference="${1}"
shift
targets=${@}
rights=$(stat -c %a "${reference}")
chmod ${rights} ${targets}
}
lastline ()
{
while read lines
do
line=${lines}
done
echo "${line}"
}
base_path ()
{
testpath="${1}"
mounts="$(awk '{print $2}' /proc/mounts)"
testpath="$(realpath ${testpath})"
while true
do
if echo "${mounts}" | grep -qs "^${testpath}"
then
set -- $(echo "${mounts}" | grep "^${testpath}" | lastline)
echo ${1}
break
else
testpath=$(dirname $testpath)
fi
done
}
fs_size ()
{
# Returns used/free fs kbytes + 5% more
# You could pass a block device as ${1} or the mount point as ${2}
dev="${1}"
mountp="${2}"
used="${3}"
if [ -z "${mountp}" ]
then
mountp="$(where_is_mounted ${dev})"
if [ -z "${mountp}" ]
then
mountp="/mnt/tmp_fs_size"
mkdir -p "${mountp}"
mount -t $(get_fstype "${dev}") -o ro "${dev}" "${mountp}" || log_warning_msg "cannot mount -t $(get_fstype ${dev}) -o ro ${dev} ${mountp}"
doumount=1
fi
fi
if [ "${used}" = "used" ]
then
size=$(du -ks ${mountp} | cut -f1)
size=$(expr ${size} + ${size} / 20 ) # FIXME: 5% more to be sure
else
# free space
size="$(df -kP | grep -s ${mountp} | awk '{print $4}')"
fi
if [ -n "${doumount}" ]
then
umount "${mountp}" || log_warning_msg "cannot umount ${mountp}"
rmdir "${mountp}"
fi
echo "${size}"
}
load_keymap ()
{
# Load custom keymap
if [ -x /bin/loadkeys -a -r /etc/boottime.kmap.gz ]
then
loadkeys --quiet /etc/boottime.kmap.gz
fi
}
setup_loop ()
{
local fspath module pattern offset readonly
fspath=${1}
module=${2}
pattern=${3}
offset=${4}
readonly=${5}
# the output of setup_loop is evaluated in other functions,
# modprobe leaks kernel options like "libata.dma=0"
# as "options libata dma=0" on stdout, causing serious
# problems therefor, so instead always avoid output to stdout
modprobe -q -b "${module}" 1>/dev/null
udevadm settle
for loopdev in ${pattern}
do
if [ "$(cat ${loopdev}/size)" -eq 0 ]
then
dev=$(sys2dev "${loopdev}")
options=''
if [ -n "${readonly}" ]
then
if losetup --help 2>&1 | grep -q -- "-r\b"
then
options="${options} -r"
fi
fi
if [ -n "${offset}" ] && [ 0 -lt "${offset}" ]
then
options="${options} -o ${offset}"
fi
losetup ${options} "${dev}" "${fspath}"
echo "${dev}"
return 0
fi
done
panic "No loop devices available"
}
try_mount ()
{
dev="${1}"
mountp="${2}"
opts="${3}"
fstype="${4}"
old_mountp="$(where_is_mounted ${dev})"
if [ -n "${old_mountp}" ]
then
if [ "${opts}" != "ro" ]
then
mount -o remount,"${opts}" "${dev}" "${old_mountp}" || panic "Remounting ${dev} ${opts} on ${old_mountp} failed"
fi
mount -o bind "${old_mountp}" "${mountp}" || panic "Cannot bind-mount ${old_mountp} on ${mountp}"
else
if [ -z "${fstype}" ]
then
fstype=$(get_fstype "${dev}")
fi
mount -t "${fstype}" -o "${opts}" "${dev}" "${mountp}" || \
( echo "SKIPPING: Cannot mount ${dev} on ${mountp}, fstype=${fstype}, options=${opts}" > boot.log && return 0 )
fi
}
# Try to mount $device to the place expected by live-boot. If $device
# is already mounted somewhere, move it to the expected place. If $device
# ends with a "/" this is a directory path.
# If we're only probing $device (to check if it has custom persistence)
# $probe should be set, which suppresses warnings upon failure. On
# success, print the mount point for $device.
mount_persistence_media ()
{
local device probe backing old_backing fstype mount_opts
device=${1}
probe=${2}
# get_custom_mounts() might call this with a directory path instead
# of a block device path. This means we have found sub-directory path
# underneath /run/live/persistence, so we're done
if [ -d "${device}" ]
then
echo "${device}"
return 0
fi
if [ ! -b "${device}" ]
then
return 1
fi
backing="/run/live/persistence/$(basename ${device})"
mkdir -p "${backing}"
old_backing="$(where_is_mounted ${device})"
if [ -z "${old_backing}" ]
then
fstype="$(get_fstype ${device})"
mount_opts="rw,noatime"
if [ -n "${PERSISTENCE_READONLY}" ]
then
mount_opts="ro,noatime"
fi
if mount -t "${fstype}" -o "${mount_opts}" "${device}" "${backing}" >/dev/null 2>&1
then
echo ${backing}
return 0
else
[ -z "${probe}" ] && log_warning_msg "Failed to mount persistence media ${device}"
rmdir "${backing}"
return 1
fi
elif [ "${backing}" != "${old_backing}" ]
then
if ! mount -o move ${old_backing} ${backing} >/dev/null
then
[ -z "${probe}" ] && log_warning_msg "Failed to move persistence media ${device}"
rmdir "${backing}"
return 1
fi
mount_opts="rw,noatime"
if [ -n "${PERSISTENCE_READONLY}" ]
then
mount_opts="ro,noatime"
fi
if ! mount -o "remount,${mount_opts}" "${backing}" >/dev/null
then
log_warning_msg "Failed to remount persistence media ${device} writable"
# Don't unmount or rmdir the new mountpoint in this case
fi
echo ${backing}
return 0
else
# This means that $device has already been mounted on
# the place expected by live-boot, so we're done.
echo ${backing}
return 0
fi
}
close_persistence_media ()
{
local device backing
device=${1}
backing="$(where_is_mounted ${device})"
if [ -d "${backing}" ]
then
umount "${backing}" >/dev/null 2>&1
rmdir "${backing}" >/dev/null 2>&1
fi
if is_active_luks_mapping ${device}
then
cryptsetup luksClose ${device}
fi
}
open_luks_device ()
{
dev="${1}"
name="$(basename ${dev})"
opts="--key-file=-"
if [ -n "${PERSISTENCE_READONLY}" ]
then
opts="${opts} --readonly"
fi
if cryptsetup status "${name}" >/dev/null 2>&1
then
re="^[[:space:]]*device:[[:space:]]*\([^[:space:]]*\)$"
opened_dev=$(cryptsetup status ${name} 2>/dev/null | grep "${re}" | sed "s|${re}|\1|")
if [ "${opened_dev}" = "${dev}" ]
then
luks_device="/dev/mapper/${name}"
echo ${luks_device}
return 0
else
log_warning_msg "Cannot open luks device ${dev} since ${opened_dev} already is opened with its name"
return 1
fi
fi
load_keymap
# check for plymouth
if [ -x /bin/plymouth ]
then
_PLYMOUTH="true"
fi
case "${_PLYMOUTH}" in
true)
plymouth --ping
cryptkeyscript="plymouth ask-for-password --prompt"
# Plymouth will add a : if it is a non-graphical prompt
cryptkeyprompt="Please unlock disk ${dev}"
;;
*)
cryptkeyscript="/lib/cryptsetup/askpass"
cryptkeyprompt="Please unlock disk ${dev}: "
;;
esac
while true
do
$cryptkeyscript "$cryptkeyprompt" | \
cryptsetup -T 1 luksOpen ${dev} ${name} ${opts}
if [ 0 -eq ${?} ]
then
luks_device="/dev/mapper/${name}"
echo ${luks_device}
return 0
fi
echo >&6
retryprompt="There was an error decrypting ${dev} ... Retry? [Y/n]"
case "${_PLYMOUTH}" in
true)
plymouth display-message --text "${retryprompt}"
answer=$(plymouth watch-keystroke --keys="YNyn")
;;
*)
echo -n "${retryprompt} " >&6
read answer
;;
esac
if [ "$(echo "${answer}" | cut -b1 | tr A-Z a-z)" = "n" ]
then
case "${_PLYMOUTH}" in
true)
plymouth display-message --text ""
;;
esac
return 2
fi
done
}
get_gpt_name ()
{
local dev
dev="${1}"
blkid -s PART_ENTRY_NAME -p -o value ${dev} 2>/dev/null
}
is_gpt_device ()
{
local dev
dev="${1}"
[ "$(blkid -s PART_ENTRY_SCHEME -p -o value ${dev} 2>/dev/null)" = "gpt" ]
}
probe_for_gpt_name ()
{
local overlays dev gpt_dev gpt_name
overlays="${1}"
dev="${2}"
gpt_dev="${dev}"
if is_active_luks_mapping ${dev}
then
# if $dev is an opened luks device, we need to check
# GPT stuff on the backing device
gpt_dev=$(get_luks_backing_device "${dev}")
fi
if ! is_gpt_device ${gpt_dev}
then
return
fi
gpt_name=$(get_gpt_name ${gpt_dev})
for label in ${overlays}
do
if [ "${gpt_name}" = "${label}" ]
then
echo "${label}=${dev}"
fi
done
}
probe_for_fs_label ()
{
local overlays dev
overlays="${1}"
dev="${2}"
for label in ${overlays}
do
if [ "$(blkid -s LABEL -o value $dev 2>/dev/null)" = "${label}" ]
then
echo "${label}=${dev}"
fi
done
}
probe_for_file_name ()
{
local overlays dev ret backing
overlays="${1}"
dev="${2}"
ret=""
backing="$(mount_persistence_media ${dev} probe)"
if [ -z "${backing}" ]
then
return
fi
for label in ${overlays}
do
path=${backing}/${PERSISTENCE_PATH}/${label}
if [ -f "${path}" ]
then
local loopdev
loopdev=$(setup_loop "${path}" "loop" "/sys/block/loop*")
ret="${ret} ${label}=${loopdev}"
fi
done
if [ -n "${ret}" ]
then
echo ${ret}
else
# unmount and remove mountpoint
umount ${backing} > /dev/null 2>&1 || true
rmdir ${backing} > /dev/null 2>&1 || true
fi
}
probe_for_directory_name ()
{
local overlays dev ret backing
overlays="${1}"
dev="${2}"
ret=""
backing="$(mount_persistence_media ${dev} probe)"
if [ -z "${backing}" ]
then
return
fi
for label in ${overlays}
do
path=${backing}/${PERSISTENCE_PATH}/${label}
if [ -d "${path}" ]
then
# in this case the "device" ends with a "/"
ret="${ret} ${label}=${backing}/${PERSISTENCE_PATH}/${label%%/}/"
fi
done
if [ -n "${ret}" ]
then
echo ${ret}
else
# unmount and remove mountpoint
umount ${backing} > /dev/null 2>&1 || true
rmdir ${backing} > /dev/null 2>&1 || true
fi
}
find_persistence_media ()
{
# Scans devices for overlays, and returns a whitespace
# separated list of how to use them. Only overlays with a partition
# label or file name in ${overlays} are returned.
#
# When scanning a LUKS device, the user will be asked to enter the
# passphrase; on failure to enter it, or if no persistence partitions
# or files were found, the LUKS device is closed.
#
# For all other cases (overlay partition and overlay file) the
# return value is "${label}=${device}", where ${device} a device that
# can mount the content. In the case of an overlay file, the device
# containing the file will remain mounted as a side-effect.
#
# No devices in ${black_listed_devices} will be scanned, and if
# ${white_list_devices} is non-empty, only devices in it will be
# scanned.
local overlays white_listed_devices ret black_listed_devices
overlays="${1}"
white_listed_devices="${2}"
ret=""
#
# The devices that are hosting the actual live rootfs should not be
# used for persistence storage since otherwise you might mount a
# parent directory on top of a sub-directory of the same filesystem
# in one union together.
#
black_listed_devices=""
for d in /run/live/medium /run/live/rootfs/* /run/live/findiso /run/live/fromiso
do
black_listed_devices="${black_listed_devices} $(what_is_mounted_on ${d})"
done
for dev in $(storage_devices "${black_listed_devices}" "${white_listed_devices}")
do
local result luks_device
result=""
luks_device=""
# Check if it's a luks device; we'll have to open the device
# in order to probe any filesystem it contains, like we do
# below. activate_custom_mounts() also depends on that any luks
# device already has been opened.
if is_in_comma_sep_list luks ${PERSISTENCE_ENCRYPTION} && is_luks_partition ${dev}
then
if luks_device=$(open_luks_device "${dev}")
then
dev="${luks_device}"
else
# skip $dev since we failed/chose not to open it
continue
fi
elif ! is_in_comma_sep_list none ${PERSISTENCE_ENCRYPTION}
then
# skip $dev since we don't allow unencrypted storage
continue
fi
# Probe for matching GPT partition names or filesystem labels
if is_in_comma_sep_list filesystem ${PERSISTENCE_STORAGE}
then
result=$(probe_for_gpt_name "${overlays}" ${dev})
if [ -n "${result}" ]
then
ret="${ret} ${result}"
continue
fi
result=$(probe_for_fs_label "${overlays}" ${dev})
if [ -n "${result}" ]
then
ret="${ret} ${result}"
continue
fi
fi
# Probe for files with matching name on mounted partition
if is_in_comma_sep_list file ${PERSISTENCE_STORAGE}
then
result=$(probe_for_file_name "${overlays}" ${dev})
if [ -n "${result}" ]
then
local loopdevice
loopdevice=${result##*=}
if is_in_comma_sep_list luks ${PERSISTENCE_ENCRYPTION} && is_luks_partition ${loopdevice}
then
local luksfile
luksfile=""
if luksfile=$(open_luks_device "${loopdevice}")
then
result=${result%%=*}
result="${result}=${luksfile}"
else
losetup -d $loopdevice
result=""
fi
fi
ret="${ret} ${result}"
continue
fi
fi
# Probe for directory with matching name on mounted partition
if is_in_comma_sep_list directory ${PERSISTENCE_STORAGE}
then
result=$(probe_for_directory_name "${overlays}" ${dev})
if [ -n "${result}" ]
then
ret="${ret} ${result}"
continue
fi
fi
# Close luks device if it isn't used
if [ -z "${result}" ] && [ -n "${luks_device}" ] && is_active_luks_mapping "${luks_device}"
then
cryptsetup luksClose "${luks_device}"
fi
done
if [ -n "${ret}" ]
then
echo ${ret}
fi
}
get_mac ()
{
mac=""
for adaptor in /sys/class/net/*
do
status="$(cat ${adaptor}/iflink)"
if [ "${status}" -eq 2 ]
then
mac="$(cat ${adaptor}/address)"
mac="$(echo ${mac} | sed 's/:/-/g' | tr '[a-z]' '[A-Z]')"
fi
done
echo ${mac}
}
is_luks_partition ()
{
device="${1}"
cryptsetup isLuks "${device}" 1>/dev/null 2>&1
}
is_active_luks_mapping ()
{
device="${1}"
cryptsetup status "${device}" 1>/dev/null 2>&1
}
get_luks_backing_device ()
{
device=${1}
cryptsetup status ${device} 2> /dev/null | \
awk '{if ($1 == "device:") print $2}'
}
removable_dev ()
{
output_format="${1}"
want_usb="${2}"
ret=
for sysblock in $(echo /sys/block/* | tr ' ' '\n' | grep -vE "/(loop|ram|dm-|fd)")
do
if [ ! -d "${sysblock}" ]; then
continue
fi
dev_ok=
if [ "$(cat ${sysblock}/removable)" = "1" ]
then
if [ -z "${want_usb}" ]
then
dev_ok="true"
else
if readlink ${sysblock} | grep -q usb
then
dev_ok="true"
fi
fi
fi
if [ "${dev_ok}" = "true" ]
then
case "${output_format}" in
sys)
ret="${ret} ${sysblock}"
;;
*)
devname=$(sys2dev "${sysblock}")
ret="${ret} ${devname}"
;;
esac
fi
done
echo "${ret}"
}
removable_usb_dev ()
{
output_format="${1}"
removable_dev "${output_format}" "want_usb"
}
non_removable_dev ()
{
output_format="${1}"
ret=
for sysblock in $(echo /sys/block/* | tr ' ' '\n' | grep -vE "/(loop|ram|dm-|fd)")
do
if [ ! -d "${sysblock}" ]; then
continue
fi
if [ "$(cat ${sysblock}/removable)" = "0" ]
then
case "${output_format}" in
sys)
ret="${ret} ${sysblock}"
;;
*)
devname=$(sys2dev "${sysblock}")
ret="${ret} ${devname}"
;;
esac
fi
done
echo "${ret}"
}
link_files ()
{
# create source's directory structure in dest, and recursively
# create symlinks in dest to to all files in source. if mask
# is non-empty, remove mask from all source paths when
# creating links (will be necessary if we change root, which
# live-boot normally does (into $rootmnt)).
local src_dir dest_dir src_transform
# remove multiple /:s and ensure ending on /
src_dir="$(trim_path ${1})/"
dest_dir="$(trim_path ${2})/"
src_transform="${3}"
# This check can only trigger on the inital, non-recursive call since
# we create the destination before recursive calls
if [ ! -d "${dest_dir}" ]
then
log_warning_msg "Must link_files into a directory"
return
fi
find "${src_dir}" -mindepth 1 -maxdepth 1 | \
while read src
do
local dest final_src
dest="${dest_dir}$(basename "${src}")"
if [ -d "${src}" ]
then
if [ -z "$(ls -A "${src}")" ]
then
continue
fi
if [ ! -d "${dest}" ]
then
mkdir -p "${dest}"
chown_ref "${src}" "${dest}"
chmod_ref "${src}" "${dest}"
fi
link_files "${src}" "${dest}" "${src_transform}"
else
final_src=${src}
if [ -n "${src_transform}" ]
then
final_src="$(echo ${final_src} | sed "${src_transform}")"
fi
rm -rf "${dest}" 2> /dev/null
ln -s "${final_src}" "${dest}"
chown_ref "${src}" "${dest}"
fi
done
}
do_union ()
{
local unionmountpoint unionrw unionro
unionmountpoint="${1}" # directory where the union is mounted
shift
unionrw="${1}" # branch where the union changes are stored
shift
unionro="${*}" # space separated list of read-only branches (optional)
case "${UNIONTYPE}" in
overlay)
# XXX: can unionro be optional? i.e. can overlay skip lowerdir?
if [ -z "${unionro}" ]
then
panic "overlay needs at least one lower filesystem (read-only branch)."
fi
# Multiple lower layers can now be given using the the colon (":") as a
# separator character between the directory names.
unionro="$(echo ${unionro} | sed -e 's| |:|g')"
# overlayfs requires:
# + a workdir to become mounted
# + workdir and upperdir to reside under the same mount
# + workdir and upperdir to be in separate directories
mkdir -p "${unionrw}/rw"
mkdir -p "${unionrw}/work"
unionmountopts="-o noatime,lowerdir=${unionro},upperdir=${unionrw}/rw,workdir=${unionrw}/work"
;;
esac
mount -t ${UNIONTYPE} ${unionmountopts} ${UNIONTYPE} "${unionmountpoint}"
}
get_custom_mounts ()
{
# Side-effect: leaves $devices with persistence.conf mounted in /run/live/persistence
# Side-effect: prints info to file $custom_mounts
local custom_mounts devices bindings links
custom_mounts=${1}
shift
devices=${@}
bindings="/tmp/bindings.list"
links="/tmp/links.list"
rm -rf ${bindings} ${links} 2> /dev/null
for device in ${devices}
do
local device_name backing include_list
device_name="$(basename ${device})"
backing=$(mount_persistence_media ${device})
if [ -z "${backing}" ]
then
continue
fi
if [ -r "${backing}/${persistence_list}" ]
then
include_list="${backing}/${persistence_list}"
else
continue
fi
if [ -n "${LIVE_BOOT_DEBUG}" ] && [ -e "${include_list}" ]
then
cp ${include_list} /run/live/persistence/${persistence_list}.${device_name}
fi
while read dir options # < ${include_list}
do
if echo ${dir} | grep -qe "^[[:space:]]*\(#.*\)\?$"
then
# skipping empty or commented lines
continue
fi
if trim_path ${dir} | grep -q -e "^[^/]" -e "^/lib" -e "^/run/live\(/.*\)\?$" -e "^/\(.*/\)\?\.\.\?\(/.*\)\?$"
then
log_warning_msg "Skipping unsafe custom mount ${dir}: must be an absolute path containing neither the \".\" nor \"..\" special dirs, and cannot be \"/lib\", or \"/run/live\" or any of its sub-directories."
continue
fi
local opt_source opt_link source full_source full_dest
opt_source=""
opt_link=""
for opt in $(echo ${options} | tr ',' ' ');
do
case "${opt}" in
source=*)
opt_source=${opt#source=}
;;
link)
opt_link="true"
;;
union|bind)
;;
*)
log_warning_msg "Skipping custom mount with unknown option: ${opt}"
continue 2
;;
esac
done
source="${dir}"
if [ -n "${opt_source}" ]
then
if echo ${opt_source} | grep -q -e "^/" -e "^\(.*/\)\?\.\.\?\(/.*\)\?$" && [ "${opt_source}" != "." ]
then
log_warning_msg "Skipping unsafe custom mount with option source=${opt_source}: must be either \".\" (the media root) or a relative path w.r.t. the media root that contains neither comas, nor the special \".\" and \"..\" path components"
continue
else
source="${opt_source}"
fi
fi
full_source="$(trim_path ${backing}/${source})"
full_dest="$(trim_path ${rootmnt}/${dir})"
if [ -n "${opt_link}" ]
then
echo "${device} ${full_source} ${full_dest} ${options}" >> ${links}
else
echo "${device} ${full_source} ${full_dest} ${options}" >> ${bindings}
fi
done < ${include_list}
done
# We sort the list according to destination so we're sure that
# we won't hide a previous mount. We also ignore duplicate
# destinations in a more or less arbitrary way.
[ -e "${bindings}" ] && sort -k3 -sbu ${bindings} >> ${custom_mounts} && rm ${bindings}
# After all mounts are considered we add symlinks so they
# won't be hidden by some mount.
[ -e "${links}" ] && cat ${links} >> ${custom_mounts} && rm ${links}
# We need to make sure that no two custom mounts have the same sources
# or are nested; if that is the case, too much weird stuff can happen.
local prev_source prev_dest
prev_source="impossible source" # first iteration must not match
prev_dest=""
# This sort will ensure that a source /a comes right before a source
# /a/b so we only need to look at the previous source
[ -e ${custom_mounts} ] && sort -k2 -b ${custom_mounts} |
while read device source dest options
do
if echo ${source} | grep -qe "^${prev_source}\(/.*\)\?$"
then
panic "Two persistence mounts have the same or nested sources: ${source} on ${dest}, and ${prev_source} on ${prev_dest}"
fi
prev_source=${source}
prev_dest=${dest}
done
}
activate_custom_mounts ()
{
local custom_mounts used_devices
custom_mounts="${1}" # the ouput from get_custom_mounts()
used_devices=""
while read device source dest options # < ${custom_mounts}
do
local opt_bind opt_link opt_union
opt_bind="true"
opt_link=""
opt_union=""
for opt in $(echo ${options} | tr ',' ' ');
do
case "${opt}" in
bind)
opt_bind="true"
unset opt_link opt_union
;;
link)
opt_link="true"
unset opt_bind opt_union
;;
union)
opt_union="true"
unset opt_bind opt_link
;;
esac
done
if [ -n "$(what_is_mounted_on "${dest}")" ]
then
if [ "${dest}" = "${rootmnt}" ]
then
umount "${dest}"
else
log_warning_msg "Skipping custom mount ${dest}: $(what_is_mounted_on "${dest}") is already mounted there"
continue
fi
fi
if [ ! -d "${dest}" ]
then
# create the destination and delete existing files in
# its path that are in the way
path="/"
for dir in $(echo ${dest} | sed -e 's|/\+| |g')
do
path=$(trim_path ${path}/${dir})
if [ -f ${path} ]
then
rm -f ${path}
fi
if [ ! -e ${path} ]
then
mkdir -p ${path}
if echo ${path} | grep -qe "^${rootmnt}/*home/[^/]\+"
then
# if ${dest} is in /home try fixing proper ownership by assuming that the intended user is the first, which is usually the case
# FIXME: this should really be handled by live-config since we don't know for sure which uid a certain user has until then
chown 1000:1000 ${path}
fi
fi
done
fi
# if ${source} doesn't exist on our persistence media
# we bootstrap it with $dest from the live filesystem.
# this both makes sense and is critical if we're
# dealing with /etc or other system dir.
if [ ! -d "${source}" ]
then
if [ -n "${PERSISTENCE_READONLY}" ]
then
continue
elif [ -n "${opt_union}" ] || [ -n "${opt_link}" ]
then
# unions and don't need to be bootstrapped
# link dirs can't be bootstrapped in a sensible way
mkdir -p "${source}"
chown_ref "${dest}" "${source}"
chmod_ref "${dest}" "${source}"
elif [ -n "${opt_bind}" ]
then
# ensure that $dest is not copied *into* $source
mkdir -p "$(dirname ${source})"
cp -a "${dest}" "${source}"
fi
fi
# XXX: If CONFIG_AUFS_ROBR is added to the Debian kernel we can
# ignore the loop below and set rootfs_dest_backing=$dest
local rootfs_dest_backing
rootfs_dest_backing=""
if [ -n "${opt_link}" ] || [ -n "${opt_union}" ]
then
for d in /run/live/rootfs/*
do
if [ -n "${rootmnt}" ]
then
fs="${d}/$(echo ${dest} | sed -e "s|${rootmnt}||")"
else
fs="${d}/${dest}"
fi
if [ -d "${fs}" ]
then
rootfs_dest_backing="${rootfs_dest_backing} ${fs}"
fi
done
fi
local cow_dir links_source
if [ -n "${opt_link}" ] && [ -z "${PERSISTENCE_READONLY}" ]
then
link_files ${source} ${dest} ""
elif [ -n "${opt_link}" ] && [ -n "${PERSISTENCE_READONLY}" ]
then
mkdir -p /run/live/persistence
links_source=$(mktemp -d /run/live/persistence/links-source-XXXXXX)
chown_ref ${source} ${links_source}
chmod_ref ${source} ${links_source}
# We put the cow dir in the below strange place to
# make it absolutely certain that the link source
# has its own directory and isn't nested with some
# other custom mount (if so that mount's files would
# be linked, causing breakage.
cow_dir="/run/live/overlay/run/live/persistence/$(basename ${links_source})"
mkdir -p ${cow_dir}
chown_ref "${source}" "${cow_dir}"
chmod_ref "${source}" "${cow_dir}"
do_union ${links_source} ${cow_dir} ${source} ${rootfs_dest_backing}
link_files ${links_source} ${dest} "s|^${rootmnt}||"
elif [ -n "${opt_union}" ] && [ -z "${PERSISTENCE_READONLY}" ]
then
do_union ${dest} ${source} ${rootfs_dest_backing}
elif [ -n "${opt_bind}" ] && [ -z "${PERSISTENCE_READONLY}" ]
then
mount -o bind "${source}" "${dest}"
elif [ -n "${opt_bind}" -o -n "${opt_union}" ] && [ -n "${PERSISTENCE_READONLY}" ]
then
# bind-mount and union mount are handled the same
# in read-only mode, but note that rootfs_dest_backing
# is non-empty (and necessary) only for unions
cow_dir="/run/live/overlay/${dest}"
if [ -e "${cow_dir}" ] && [ -z "${opt_link}" ]
then
# If an earlier custom mount has files here
# it will "block" the current mount's files
# which is undesirable
rm -rf "${cow_dir}"
fi
mkdir -p ${cow_dir}
chown_ref "${source}" "${cow_dir}"
chmod_ref "${source}" "${cow_dir}"
if [ "${UNIONTYPE}" = "overlay" ]
then
# When we use overlay we add the "/rw" postfix to our source when using it
# as upper layer. Therefore we also have to add it here when using it as
# the lower layer.
source="${source}/rw"
fi
do_union ${dest} ${cow_dir} ${source} ${rootfs_dest_backing}
fi
PERSISTENCE_IS_ON="1"
export PERSISTENCE_IS_ON
if echo ${used_devices} | grep -qve "^\(.* \)\?${device}\( .*\)\?$"
then
used_devices="${used_devices} ${device}"
fi
done < ${custom_mounts}
echo ${used_devices}
}
is_mountpoint ()
{
directory="$1"
[ $(stat -fc%d:%D "${directory}") != $(stat -fc%d:%D "${directory}/..") ]
}
# Commandline is image1:roothash1,image2:roothash2 etc.
get_dm_verity_hash ()
{
local image dmverity
image="$1"
dmverity="$2"
echo "${dmverity}" | tr "," "\n" | awk -v image=${image} -F ":" '$1==image {print $2}'
}
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.housepower.data.type;
import com.github.housepower.misc.SQLLexer;
import com.github.housepower.serde.BinaryDeserializer;
import com.github.housepower.serde.BinarySerializer;
import java.io.IOException;
import java.sql.SQLException;
public class DataTypeInt8 implements BaseDataTypeInt8<Byte, Byte> {
@Override
public String name() {
return "Int8";
}
@Override
public Byte defaultValue() {
return 0;
}
@Override
public Class<Byte> javaType() {
return Byte.class;
}
@Override
public int getPrecision() {
return 4;
}
@Override
public void serializeBinary(Byte data, BinarySerializer serializer) throws SQLException, IOException {
serializer.writeByte(data);
}
@Override
public Byte deserializeBinary(BinaryDeserializer deserializer) throws IOException {
return deserializer.readByte();
}
@Override
public String[] getAliases() {
return new String[]{"TINYINT"};
}
@Override
public Byte deserializeText(SQLLexer lexer) throws SQLException {
return lexer.numberLiteral().byteValue();
}
@Override
public boolean isSigned() {
return true;
}
}
|
from monitor import MONITOR_VERBOSE_DMSG_LEVEL
from concurrent_base import ConcurrentBase
WORKERS_TO_START = 25
CCJ_INMATE_DETAILS_URL = 'http://www2.cookcountysheriff.org/search2/details.asp?jailnumber='
class InmatesScraper(ConcurrentBase):
def __init__(self, http, inmates, inmate_details_class, monitor, workers_to_start=WORKERS_TO_START):
super(InmatesScraper, self).__init__(monitor, workers_to_start)
self._http = http
self._inmates = inmates
self._inmate_details_class = inmate_details_class
def create_if_exists(self, arg):
self._put(self._create_if_exists, arg)
def _create_if_exists(self, inmate_id):
self._debug('check for inmate - %s' % inmate_id, MONITOR_VERBOSE_DMSG_LEVEL)
worked, inmate_details_in_html = self._http.get(CCJ_INMATE_DETAILS_URL + inmate_id)
if worked:
self._inmates.add(inmate_id, self._inmate_details_class(inmate_details_in_html))
def resurrect_if_found(self, inmate_id):
self._put(self._resurrect_if_found, inmate_id)
def _resurrect_if_found(self, inmate_id):
self._debug('check if really discharged inmate %s' % inmate_id, MONITOR_VERBOSE_DMSG_LEVEL)
worked, inmate_details_in_html = self._http.get(CCJ_INMATE_DETAILS_URL + inmate_id)
if worked:
self._debug('resurrected discharged inmate %s' % inmate_id, MONITOR_VERBOSE_DMSG_LEVEL)
self._inmates.update(inmate_id, self._inmate_details_class(inmate_details_in_html))
def update_inmate_status(self, inmate_id):
self._put(self._update_inmate_status, inmate_id)
def _update_inmate_status(self, inmate_id):
worked, inmate_details_in_html = self._http.get(CCJ_INMATE_DETAILS_URL + inmate_id)
if worked:
self._inmates.update(inmate_id, self._inmate_details_class(inmate_details_in_html))
else:
self._inmates.discharge(inmate_id) |
<gh_stars>1-10
import React from 'react'
import { makeStyles } from '@material-ui/core/styles'
import { Divider, Typography, Tooltip } from '@material-ui/core'
import ItemCardBody from '../item/item-card-body'
const useTooltipStyles = makeStyles(theme => ({
arrow: {
color: theme.palette.common.black
},
tooltip: {
backgroundColor: theme.palette.common.black
}
}))
const useStyles = makeStyles(theme => ({
subtitles: {
padding: theme.spacing(0.5)
},
subtitle1: {
fontSize: 16
},
caption: {
fontSize: 14,
color: theme.palette.text.secondary
},
actions: {
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
margin: theme.spacing(1)
},
button: {
backgroundColor: theme.palette.error.main,
color: theme.palette.common.white,
'&:hover': {
backgroundColor: theme.palette.error.dark
}
}
}))
const BuildItemTooltipContent = ({ buildItem }) => {
const { name, level, type, effects } = buildItem
const classes = useStyles()
return (
<>
<div className={classes.subtitles}>
<Typography variant='subtitle1' className={classes.subtitle1}>
{name}
</Typography>
<Typography variant='caption' className={classes.caption}>
{type} - Level {level}
</Typography>
</div>
<Divider />
<ItemCardBody effects={effects} size='small' />
</>
)
}
const BuildItemTooltip = ({
children,
buildItem,
placement,
config
}) => {
const classes = useTooltipStyles()
if (!buildItem) return children
return (
<Tooltip
interactive
arrow
placement={placement}
classes={classes}
title={
<BuildItemTooltipContent
buildItem={buildItem}
config={config}
/>
}
>
{children}
</Tooltip>
)
}
export default BuildItemTooltip
|
#!/bin/sh
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c) 2016, Even Rouault <even.rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
if ! test -d fix_typos; then
# Get even rouault fork of codespell that adds --words-white-list and full filename support for -S option
mkdir fix_typos
cd fix_typos
git clone https://github.com/rouault/codespell
cd codespell
git checkout gdal_improvements
cd ..
# Aggregate base dictionary + QGIS one + Debian Lintian one
curl https://raw.githubusercontent.com/qgis/QGIS/master/scripts/spelling.dat | sed "s/:/->/" | grep -v "colour->" | grep -v "colours->" > qgis.txt
curl https://anonscm.debian.org/cgit/lintian/lintian.git/plain/data/spelling/corrections| grep "||" | grep -v "#" | sed "s/||/->/" > debian.txt
cat codespell/data/dictionary.txt qgis.txt debian.txt | awk 'NF' > otb_dict.txt
cd ..
fi
#Build exclude files and directory list
EXCLUDED_FILES="*/.git*"
#Exclude third parties code, patches from superbuild and doxygen files
EXCLUDED_FILES="$EXCLUDED_FILES,*/Modules/ThirdParty/6S/*,*/Modules/ThirdParty/SiftFast/*,*/Modules/ThirdParty/SPTW/*,*/SuperBuild/patches/*"
#Exclude also documentation for now
EXCLUDED_FILES="$EXCLUDED_FILES,*/Documentation/SoftwareGuide/Art/*,*/Documentation/Cookbook/Art/*,*/Documentation/SoftwareGuide/Latex/Hyperspectral.txt,*/Documentation/Latex/Insight.bib"
#Exclude also Copyright folder
EXCLUDED_FILES="$EXCLUDED_FILES,*/Copyright/*,*/SuperBuild/Copyright/*"
# exclude maintenance, translation, license
EXCLUDED_FILES="$EXCLUDED_FILES,*/Utilities/Maintenance/fix_typos.sh,*/fix_typos/*,LICENSE,*/i18n/*"
#We use also words white list to handle deprecated classes/methods which are still there and contains some typos
#This list should be updated after each release when deprecated classes/methods are removed
# use with --words-white-list=$WORDS_WHITE_LIST
# for "Biologie du Developpement et de la Reproduction, INRA de Jouy-en-Josas, France" in LabelMapToLabelImageFilter
WORDS_WHITE_LIST="Developement"
# for dum variable in prosail
WORDS_WHITE_LIST="$WORDS_WHITE_LIST,dum"
# for pary variable
WORDS_WHITE_LIST="$WORDS_WHITE_LIST,pary"
python3 fix_typos/codespell/codespell.py -w -i 3 -q 2 -S $EXCLUDED_FILES \
--words-white-list=$WORDS_WHITE_LIST \
-D fix_typos/otb_dict.txt ../..
|
#!/bin/bash
set -e
HOOK_NAMES="pre_commit"
GIT_ROOT=$(git rev-parse --show-toplevel)
DEFAULT_HOOKS_DIR=${GIT_ROOT}/.git/hooks
CUSTOM_HOOKS_DIR=${GIT_ROOT}/git/hooks
for HOOK in ${HOOK_NAMES}; do
# if custom hook exists and is executable then create symlink
if [ -e ${CUSTOM_HOOKS_DIR}/${HOOK}.sh ] && [ -x ${CUSTOM_HOOKS_DIR}/${HOOK}.sh ]
then
echo "hook enabled: ${CUSTOM_HOOKS_DIR}/${HOOK}.sh"
ln -s -f ../../git/hooks/${HOOK}.sh ${DEFAULT_HOOKS_DIR}/${HOOK}
fi
done
|
use PHPUnit\Framework\TestCase;
class YourClassTest extends TestCase
{
public function testContainerInteractions()
{
// Create mock objects
$mockContainer = $this->getMockBuilder('Container')
->setMethods(['get', 'getParameter'])
->getMock();
$mockOrganizationRepository = $this->getMockBuilder('OrganizationRepository')
->setMethods(['find'])
->getMock();
$mockEbiConfigRepository = $this->getMockBuilder('EbiConfigRepository')
->setMethods(['findOneBy'])
->getMock();
$mockRepositoryResolver = $this->getMockBuilder('RepositoryResolver')
->setMethods(['getRepository'])
->getMock();
// Set up method stubs
$mockRepositoryResolver->method('getRepository')
->willReturnMap([
['SynapseCoreBundle:EbiConfig', $mockEbiConfigRepository],
['SynapseCoreBundle:Organization', $mockOrganizationRepository]
]);
$mockContainer->method('getParameter')
->willReturn('mockedParameterValue');
// Instantiate and test the class under test
$yourClass = new YourClass($mockContainer, $mockRepositoryResolver);
// Test method calls and interactions
// Example:
$yourClass->methodUnderTest();
// Assertions for expected behavior
// Example:
$this->assertEquals('expectedValue', $yourClass->getResult());
}
} |
#!/bin/bash
#Template Version: 2021010401
#Generic Password
pw=XyzAbc.12345
#Assumes SCRIPTNAME###.sh for the file name format
rg=$(basename -- "$0" | tr -d '0123456789' | cut -d '.' -f 1)
echo "resource group: $rg"
az network vnet create \
--resource-group $rg \
--name vehicleAppVnet \
--address-prefix 10.0.0.0/16 \
--subnet-name webServerSubnet \
--subnet-prefix 10.0.1.0/24
git clone https://github.com/MicrosoftDocs/mslearn-load-balance-web-traffic-with-application-gateway module-files
az vm create \
--resource-group $rg \
--name webServer1 \
--image UbuntuLTS \
--admin-username azureuser \
--generate-ssh-keys \
--vnet-name vehicleAppVnet \
--subnet webServerSubnet \
--public-ip-address "" \
--nsg "" \
--custom-data module-files/scripts/vmconfig.sh \
az vm create \
--resource-group $rg \
--name webServer2 \
--image UbuntuLTS \
--admin-username azureuser \
--generate-ssh-keys \
--vnet-name vehicleAppVnet \
--subnet webServerSubnet \
--public-ip-address "" \
--nsg "" \
--custom-data module-files/scripts/vmconfig.sh
az vm list \
--resource-group $rg \
--show-details \
--output table
rm -r module-files --force
|
/*
* Copyright (C) 2015 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.cloud.dataflow.sdk.testing;
import com.google.cloud.dataflow.sdk.runners.worker.logging.DataflowWorkerLoggingFormatter;
import org.junit.rules.ExternalResource;
/**
* Saves and restores the current thread-local logging parameters for tests.
*/
public class RestoreDataflowLoggingFormatter extends ExternalResource {
private String previousJobId;
private String previousWorkerId;
private String previousWorkId;
public RestoreDataflowLoggingFormatter() {
}
@Override
protected void before() throws Throwable {
previousJobId = DataflowWorkerLoggingFormatter.getJobId();
previousWorkerId = DataflowWorkerLoggingFormatter.getWorkerId();
previousWorkId = DataflowWorkerLoggingFormatter.getWorkId();
}
@Override
protected void after() {
DataflowWorkerLoggingFormatter.setJobId(previousJobId);
DataflowWorkerLoggingFormatter.setWorkerId(previousWorkerId);
DataflowWorkerLoggingFormatter.setWorkId(previousWorkId);
}
}
|
#!/bin/bash
ENABLEBLUETOOTH=0
source /tmp/quickpi.txt
if [ $ENABLEBLUETOOTH -ne "1" ]; then
exit;
fi
/sbin/brctl addbr pan0
/usr/bin/bt-network -s nap pan0 &
sleep 1
/usr/bin/bt-agent -c NoInputNoOutput &
/usr/bin/bt-adapter --set Alias $NAME
/usr/bin/bt-adapter --set DiscoverableTimeout 0
/usr/bin/bt-adapter --set Discoverable 1
|
<reponame>arfcodes/frontend-toolkit<gh_stars>0
/**
* Demo Button
*/
import React from 'react';
const Button = () => (
<div className="d-button">
<h1>Button</h1>
</div>
);
export default Button;
|
#!/bin/bash
#echo > sat4jresults
#{ java -Xss4g -Xmx16g -jar Policy2QBF.jar s 1000 1 0.1 10 1 0.1 10 0; } 2> sat4jresults
#echo 'RAReQS (AVG, STD):'
#echo 'GhostQ (AVG, STD):'
#COMMAND='{ java -Xss4g -Xmx16g -jar Policy2QBF.jar s 1000 1 0.1 10 1 0.1 10 0; } 2> sat4jresults'
echo > sat4J_1.dat
echo > sat4J_2.dat
echo > sat4J_3.dat
echo > logs
echo > logs2
echo > logs3
echo > nohup.out
#-XX:-UseGCOverheadLimit
#-Xss64g
COUNTER=10000
while [ $COUNTER -lt 100001 ]; do
java -server -Xms40g -Xmx60g -jar Policy2QBF_ME.jar 20 s 1 3 3 $COUNTER 4 200 5 5 4 0.2 1 900 2>&1 sat4jresults
let COUNTER=COUNTER+10000
done
|
#!/bin/bash
mkdir -p /etc/audit/rules.d
echo "-w /run/something -p wa -k session" >> /etc/audit/rules.d/login.rules
|
#!/bin/bash
#
# see https://github.com/MiczFlor/RPi-Jukebox-RFID for details
# Especially the docs folder for documentation
# The absolute path to the folder which contains this script
PATHDATA="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
clear
echo "#####################################################
# ___ __ ______ _ __________ ____ __ _ _ #
# / _ \/ // / __ \/ |/ / _/ __/( _ \ / \( \/ ) #
# / ___/ _ / /_/ / // // _/ ) _ (( O )) ( #
# /_/ /_//_/\____/_/|_/___/____/ (____/ \__/(_/\_) #
# #
#####################################################
Welcome to the installation script.
This script will install Phoniebox on your Raspberry Pi.
To do so, you must be online. The install script can
automatically configure:
* WiFi settings (SSID, password and static IP)
All these are optional and can also be done later
manually.
If you are ready, hit ENTER"
read INPUT
#####################################################
# CONFIG FILE
# This file will contain all the data given in the
# following dialogue
# At a later stage, the install should also be done
# from such a config file with no user input.
# Remove existing config file
rm PhonieboxInstall.conf
# Create empty config file
touch PhonieboxInstall.conf
echo "# Phoniebox config" > $PATHDATA/PhonieboxInstall.conf
#####################################################
# Ask if wifi config
clear
echo "#####################################################
#
# CONFIGURE WIFI
#
# Requires SSID, WiFi password and the static IP you want
# to assign to your Phoniebox.
# (Note: can be done manually later, if you are unsure.)
"
read -r -p "Do you want to configure your WiFi? [Y/n] " response
case "$response" in
[nN][oO]|[nN])
WIFIconfig=NO
echo "You want to configure WiFi later."
echo "Hit ENTER to proceed to the next step."
read INPUT
# append variables to config file
echo "WIFIconfig=$WIFIconfig" >> $PATHDATA/PhonieboxInstall.conf
# make a fallback for WiFi Country Code, because we need that even without WiFi config
echo "WIFIcountryCode=DE" >> $PATHDATA/PhonieboxInstall.conf
;;
*)
WIFIconfig=YES
#Ask for ssid
echo "* Type SSID name"
read INPUT
WIFIssid="$INPUT"
#Ask for wifi country code
echo "* WiFi Country Code (e.g. DE, GB, CZ or US)"
read INPUT
WIFIcountryCode="$INPUT"
#Ask for password
echo "* Type password"
read INPUT
WIFIpass="$INPUT"
#Ask for IP
echo "* Static IP (e.g. 192.168.1.199)"
read INPUT
WIFIip="$INPUT"
#Ask for Router IP
echo "* Router IP (e.g. 192.168.1.1)"
read INPUT
WIFIipRouter="$INPUT"
echo "Your WiFi config:"
echo "SSID : $WIFIssid"
echo "WiFi Country Code : $WIFIcountryCode"
echo "Password : $WIFIpass"
echo "Static IP : $WIFIip"
echo "Router IP : $WIFIipRouter"
read -r -p "Are these values correct? [Y/n] " response
case "$response" in
[nN][oO]|[nN])
echo "The values are incorrect."
echo "Hit ENTER to exit and start over."
read INPUT; exit
;;
*)
# append variables to config file
echo "WIFIconfig=\"$WIFIconfig\"" >> $PATHDATA/PhonieboxInstall.conf
echo "WIFIcountryCode=\"$WIFIcountryCode\"" >> $PATHDATA/PhonieboxInstall.conf
echo "WIFIssid=\"$WIFIssid\"" >> $PATHDATA/PhonieboxInstall.conf
echo "WIFIpass=\"$WIFIpass\"" >> $PATHDATA/PhonieboxInstall.conf
echo "WIFIip=\"$WIFIip\"" >> $PATHDATA/PhonieboxInstall.conf
echo "WIFIipRouter=\"$WIFIipRouter\"" >> $PATHDATA/PhonieboxInstall.conf
;;
esac
;;
esac
#####################################################
# Check for existing Phoniebox
#
# In case there is no existing install,
# set the var now for later use:
EXISTINGuse=NO
# The install will be in the home dir of user pi
# Move to home directory now to check
cd
if [ -d /home/pi/RPi-Jukebox-RFID ]; then
# Houston, we found something!
clear
echo "#####################################################
#
# . . . * alert * alert * alert * alert * . . .
#
# WARNING: an existing Phoniebox installation was found.
#
"
# check if we find the version number
if [ -f /home/pi/RPi-Jukebox-RFID/settings/version ]; then
echo "The version of your installation is: $(cat RPi-Jukebox-RFID/settings/version)"
fi
echo "IMPORTANT: you can use the existing content and configuration files for your new install."
echo "Whatever you chose to keep will be moved to the new install."
echo "Everything else will remain in a folder called 'BACKUP'.
"
# Delete or use existing installation?
read -r -p "Re-use config, audio and RFID codes for the new install? [Y/n] " response
case "$response" in
[nN][oO]|[nN])
EXISTINGuse=NO
echo "Phoniebox will be a fresh install. The existing version will be dropped."
echo "Hit ENTER to proceed to the next step."
sudo rm -rf RPi-Jukebox-RFID
read INPUT
;;
*)
EXISTINGuse=YES
# CREATE BACKUP
# delete existing BACKUP dir if exists
if [ -d BACKUP ]; then
sudo rm -r BACKUP
fi
# move install to BACKUP dir
mv RPi-Jukebox-RFID BACKUP
# delete .git dir
if [ -d BACKUP/.git ]; then
sudo rm -r BACKUP/.git
fi
# delete placeholder files so moving the folder content back later will not create git pull conflicts
rm BACKUP/shared/audiofolders/placeholder
rm BACKUP/shared/shortcuts/placeholder
# ask for things to use
echo "Ok. You want to use stuff from the existing installation."
echo "What would you want to keep? Answer now."
read -r -p "RFID config for system control (e.g. 'volume up' etc.)? [Y/n] " response
case "$response" in
[nN][oO]|[nN])
EXISTINGuseRfidConf=NO
;;
*)
EXISTINGuseRfidConf=YES
;;
esac
# append variables to config file
echo "EXISTINGuseRfidConf=$EXISTINGuseRfidConf" >> $PATHDATA/PhonieboxInstall.conf
read -r -p "RFID shortcuts to play audio folders? [Y/n] " response
case "$response" in
[nN][oO]|[nN])
EXISTINGuseRfidLinks=NO
;;
*)
EXISTINGuseRfidLinks=YES
;;
esac
# append variables to config file
echo "EXISTINGuseRfidLinks=$EXISTINGuseRfidLinks" >> $PATHDATA/PhonieboxInstall.conf
read -r -p "Audio folders: use existing? [Y/n] " response
case "$response" in
[nN][oO]|[nN])
EXISTINGuseAudio=NO
;;
*)
EXISTINGuseAudio=YES
;;
esac
# append variables to config file
echo "EXISTINGuseAudio=$EXISTINGuseAudio" >> $PATHDATA/PhonieboxInstall.conf
read -r -p "GPIO: use existing file? [Y/n] " response
case "$response" in
[nN][oO]|[nN])
EXISTINGuseGpio=NO
;;
*)
EXISTINGuseGpio=YES
;;
esac
# append variables to config file
echo "EXISTINGuseGpio=$EXISTINGuseGpio" >> $PATHDATA/PhonieboxInstall.conf
read -r -p "Sound effects: use existing startup / shutdown sounds? [Y/n] " response
case "$response" in
[nN][oO]|[nN])
EXISTINGuseSounds=NO
;;
*)
EXISTINGuseSounds=YES
;;
esac
# append variables to config file
echo "EXISTINGuseSounds=$EXISTINGuseSounds" >> $PATHDATA/PhonieboxInstall.conf
echo "Thanks. Got it."
echo "The existing install can be found in the BACKUP directory."
echo "Hit ENTER to proceed to the next step."
read INPUT
;;
esac
fi
# append variables to config file
echo "EXISTINGuse=$EXISTINGuse" >> $PATHDATA/PhonieboxInstall.conf
#####################################################
# Audio iFace
clear
echo "#####################################################
#
# CONFIGURE AUDIO INTERFACE (iFace)
#
# By default for the RPi the audio interface would be 'PCM'.
# But this does not work for every setup, alternatives are
# 'Master' or 'Speaker'. Other external sound cards might
# use different interface names.
# To list all available iFace names, type 'amixer scontrols'
# in the terminal.
"
read -r -p "Use PCM as iFace? [Y/n] " response
case "$response" in
[nN][oO]|[nN])
echo "Type the iFace name you want to use:"
read INPUT
AUDIOiFace="$INPUT"
;;
*)
AUDIOiFace="PCM"
;;
esac
# append variables to config file
echo "AUDIOiFace=\"$AUDIOiFace\"" >> $PATHDATA/PhonieboxInstall.conf
echo "Your iFace is called'$AUDIOiFace'"
echo "Hit ENTER to proceed to the next step."
read INPUT
#####################################################
# Configure MPD
clear
echo "#####################################################
#
# CONFIGURE MPD
#
# MPD (Music Player Daemon) runs the audio output and must
# be configured. Do it now, if you are unsure.
# (Note: can be done manually later.)
"
read -r -p "Do you want to configure MPD? [Y/n] " response
case "$response" in
[nN][oO]|[nN])
MPDconfig=NO
echo "You want to configure MPD later."
echo "Hit ENTER to proceed to the next step."
read INPUT
;;
*)
MPDconfig=YES
echo "MPD will be set up with default values."
echo "Hit ENTER to proceed to the next step."
read INPUT
;;
esac
# append variables to config file
echo "MPDconfig=\"$MPDconfig\"" >> $PATHDATA/PhonieboxInstall.conf
#####################################################
# Folder path for audio files
# default: /home/pi/RPi-Jukebox-RFID/shared/audiofolders
clear
echo "#####################################################
#
# FOLDER CONTAINING AUDIO FILES
#
# The default location for folders containing audio files:
# /home/pi/RPi-Jukebox-RFID/shared/audiofolders
#
# If unsure, keep it like this. If your files are somewhere
# else, you can specify the folder in the next step.
# IMPORTANT: the folder will not be created, only the path
# will be remembered. If you use a custom folder, you must
# create it.
"
read -r -p "Do you want to use the default location? [Y/n] " response
case "$response" in
[nN][oO]|[nN])
echo "Please type the absolute path here (no trailing slash)."
echo "Default would be for example:"
echo "/home/pi/RPi-Jukebox-RFID/shared/audiofolders"
read INPUT
DIRaudioFolders="$INPUT"
;;
*)
DIRaudioFolders="/home/pi/RPi-Jukebox-RFID/shared/audiofolders"
;;
esac
# append variables to config file
echo "DIRaudioFolders=\"$DIRaudioFolders\"" >> $PATHDATA/PhonieboxInstall.conf
echo "Your audio folders live in this dir:"
echo $DIRaudioFolders
echo "Hit ENTER to proceed to the next step."
read INPUT
clear
echo "#####################################################
#
# START INSTALLATION
#
# Good news: you completed the input.
# Let the install begin.
#
# Get yourself a cup of something. The install takes
# between 15 minutes to half an hour, depending on
# your Raspberry Pi and Internet connectivity.
#
# You will be prompted later to complete the installation.
"
read -r -p "Do you want to start the installation? [Y/n] " response
case "$response" in
[nN][oO]|[nN])
echo "Exiting the installation."
echo "Your configuration data was saved in this file:"
echo $PATHDATA/PhonieboxInstall.conf
echo
exit
;;
esac
#####################################################
# INSTALLATION
# Read install config as written so far
# (this might look stupid so far, but makes sense once
# the option to install from config file is introduced.)
. $PATHDATA/PhonieboxInstall.conf
# power management of wifi: switch off to avoid disconnecting
sudo iwconfig wlan0 power off
# Install required packages
sudo apt-get update
sudo apt-get --yes --force-yes install apt-transport-https samba samba-common-bin python-dev python-pip gcc linux-headers-4.9 lighttpd php7.0-common php7.0-cgi php7.0 php7.0-fpm at mpd mpc mpg123 git ffmpeg python-mutagen python3-gpiozero
# Get github code
cd /home/pi/
git clone https://github.com/MiczFlor/RPi-Jukebox-RFID.git
# the following three lines are needed as long as this is not the master branch:
cd /home/pi/RPi-Jukebox-RFID
git fetch
# Install more required packages
sudo pip install -r requirements.txt
# actually, for the time being most of the requirements are run here.
# the requirements.txt version seems to throw errors. Help if you can to fix this:
sudo pip install "evdev == 0.7.0"
sudo pip install --upgrade youtube_dl
sudo pip install git+git://github.com/lthiery/SPI-Py.git#egg=spi-py
sudo pip install pyserial
sudo pip install spidev
sudo pip install RPi.GPIO
sudo pip install pi-rc522
# Switch of WiFi power management
sudo iwconfig wlan0 power off
# Samba configuration settings
# -rw-r--r-- 1 root root 9416 Apr 30 09:02 /etc/samba/smb.conf
sudo cp /home/pi/RPi-Jukebox-RFID/misc/sampleconfigs/smb.conf.stretch-default2.sample /etc/samba/smb.conf
sudo chown root:root /etc/samba/smb.conf
sudo chmod 644 /etc/samba/smb.conf
# for $DIRaudioFolders using | as alternate regex delimiter because of the folder path slash
sudo sed -i 's|%DIRaudioFolders%|'"$DIRaudioFolders"'|' /etc/samba/smb.conf
# Samba: create user 'pi' with password 'raspberry'
(echo "raspberry"; echo "raspberry") | sudo smbpasswd -s -a pi
# Web server configuration settings
# -rw-r--r-- 1 root root 1040 Apr 30 09:19 /etc/lighttpd/lighttpd.conf
sudo cp /home/pi/RPi-Jukebox-RFID/misc/sampleconfigs/lighttpd.conf.stretch-default.sample /etc/lighttpd/lighttpd.conf
sudo chown root:root /etc/lighttpd/lighttpd.conf
sudo chmod 644 /etc/lighttpd/lighttpd.conf
# Web server PHP7 fastcgi conf
# -rw-r--r-- 1 root root 398 Apr 30 09:35 /etc/lighttpd/conf-available/15-fastcgi-php.conf
sudo cp /home/pi/RPi-Jukebox-RFID/misc/sampleconfigs/15-fastcgi-php.conf.stretch-default.sample /etc/lighttpd/conf-available/15-fastcgi-php.conf
sudo chown root:root /etc/lighttpd/conf-available/15-fastcgi-php.conf
sudo chmod 644 /etc/lighttpd/conf-available/15-fastcgi-php.conf
# settings for php.ini to support upload
# -rw-r--r-- 1 root root 70999 Jun 14 13:50 /etc/php/7.0/fpm/php.ini
sudo cp /home/pi/RPi-Jukebox-RFID/misc/sampleconfigs/php.ini.stretch-default.sample /etc/php/7.0/fpm/php.ini
sudo chown root:root /etc/php/7.0/fpm/php.ini
sudo chmod 644 /etc/php/7.0/fpm/php.ini
# SUDO users (adding web server here)
# -r--r----- 1 root root 703 Nov 17 21:08 /etc/sudoers
sudo cp /home/pi/RPi-Jukebox-RFID/misc/sampleconfigs/sudoers.stretch-default.sample /etc/sudoers
sudo chown root:root /etc/sudoers
sudo chmod 440 /etc/sudoers
# copy shell script for player
cp /home/pi/RPi-Jukebox-RFID/settings/rfid_trigger_play.conf.sample /home/pi/RPi-Jukebox-RFID/settings/rfid_trigger_play.conf
# creating files containing editable values for configuration
# DISCONTINUED: now done by MPD? echo "PCM" > /home/pi/RPi-Jukebox-RFID/settings/Audio_iFace_Name
echo "$AUDIOiFace" > /home/pi/RPi-Jukebox-RFID/settings/Audio_iFace_Name
echo "$DIRaudioFolders" > /home/pi/RPi-Jukebox-RFID/settings/Audio_Folders_Path
echo "3" > /home/pi/RPi-Jukebox-RFID/settings/Audio_Volume_Change_Step
echo "100" > /home/pi/RPi-Jukebox-RFID/settings/Max_Volume_Limit
echo "0" > /home/pi/RPi-Jukebox-RFID/settings/Idle_Time_Before_Shutdown
echo "RESTART" > /home/pi/RPi-Jukebox-RFID/settings/Second_Swipe
echo "/home/pi/RPi-Jukebox-RFID/playlists" > /home/pi/RPi-Jukebox-RFID/settings/Playlists_Folders_Path
echo "ON" > /home/pi/RPi-Jukebox-RFID/settings/ShowCover
# The new way of making the bash daemon is using the helperscripts
# creating the shortcuts and script from a CSV file.
# see scripts/helperscripts/AssignIDs4Shortcuts.php
# create config file for web app from sample
sudo cp /home/pi/RPi-Jukebox-RFID/htdocs/config.php.sample /home/pi/RPi-Jukebox-RFID/htdocs/config.php
# Starting web server and php7
sudo lighttpd-enable-mod fastcgi
sudo lighttpd-enable-mod fastcgi-php
sudo service lighttpd force-reload
sudo service php7.0-fpm restart
# create copy of GPIO script
sudo cp /home/pi/RPi-Jukebox-RFID/misc/sampleconfigs/gpio-buttons.py.sample /home/pi/RPi-Jukebox-RFID/scripts/gpio-buttons.py
sudo chmod +x /home/pi/RPi-Jukebox-RFID/scripts/gpio-buttons.py
# make sure bash scripts have the right settings
sudo chown pi:www-data /home/pi/RPi-Jukebox-RFID/scripts/*.sh
sudo chmod +x /home/pi/RPi-Jukebox-RFID/scripts/*.sh
sudo chown pi:www-data /home/pi/RPi-Jukebox-RFID/scripts/*.py
sudo chmod +x /home/pi/RPi-Jukebox-RFID/scripts/*.py
# services to launch after boot using systemd
# -rw-r--r-- 1 root root 304 Apr 30 10:07 phoniebox-rfid-reader.service
# 1. delete old services (this is legacy, might throw errors but is necessary. Valid for versions < 1.1.8-beta)
echo "### Deleting older versions of service daemons. This might throw errors, ignore them"
sudo systemctl disable idle-watchdog
sudo systemctl disable rfid-reader
sudo systemctl disable startup-sound
sudo systemctl disable gpio-buttons
sudo rm /etc/systemd/system/rfid-reader.service
sudo rm /etc/systemd/system/startup-sound.service
sudo rm /etc/systemd/system/gpio-buttons.service
sudo rm /etc/systemd/system/idle-watchdog.service
echo "### Done with erasing old daemons. Stop ignoring errors!"
# 2. install new ones - this is version > 1.1.8-beta
sudo cp /home/pi/RPi-Jukebox-RFID/misc/sampleconfigs/phoniebox-rfid-reader.service.stretch-default.sample /etc/systemd/system/phoniebox-rfid-reader.service
sudo cp /home/pi/RPi-Jukebox-RFID/misc/sampleconfigs/phoniebox-startup-sound.service.stretch-default.sample /etc/systemd/system/phoniebox-startup-sound.service
sudo cp /home/pi/RPi-Jukebox-RFID/misc/sampleconfigs/phoniebox-gpio-buttons.service.stretch-default.sample /etc/systemd/system/phoniebox-gpio-buttons.service
sudo cp /home/pi/RPi-Jukebox-RFID/misc/sampleconfigs/phoniebox-idle-watchdog.service.sample /etc/systemd/system/phoniebox-idle-watchdog.service
sudo chown root:root /etc/systemd/system/phoniebox-rfid-reader.service
sudo chown root:root /etc/systemd/system/phoniebox-startup-sound.service
sudo chown root:root /etc/systemd/system/phoniebox-gpio-buttons.service
sudo chown root:root /etc/systemd/system/phoniebox-idle-watchdog.service
sudo chmod 644 /etc/systemd/system/phoniebox-rfid-reader.service
sudo chmod 644 /etc/systemd/system/phoniebox-startup-sound.service
sudo chmod 644 /etc/systemd/system/phoniebox-gpio-buttons.service
sudo chmod 644 /etc/systemd/system/phoniebox-idle-watchdog.service
# enable the services needed
sudo systemctl enable phoniebox-idle-watchdog
sudo systemctl enable phoniebox-rfid-reader
sudo systemctl enable phoniebox-startup-sound
sudo systemctl enable phoniebox-gpio-buttons
# copy mp3s for startup and shutdown sound to the right folder
cp /home/pi/RPi-Jukebox-RFID/misc/sampleconfigs/startupsound.mp3.sample /home/pi/RPi-Jukebox-RFID/shared/startupsound.mp3
cp /home/pi/RPi-Jukebox-RFID/misc/sampleconfigs/shutdownsound.mp3.sample /home/pi/RPi-Jukebox-RFID/shared/shutdownsound.mp3
# MPD configuration
# -rw-r----- 1 mpd audio 14043 Jul 17 20:16 /etc/mpd.conf
sudo cp /home/pi/RPi-Jukebox-RFID/misc/sampleconfigs/mpd.conf.sample /etc/mpd.conf
# Change vars to match install config
sudo sed -i 's/%AUDIOiFace%/'"$AUDIOiFace"'/' /etc/mpd.conf
# for $DIRaudioFolders using | as alternate regex delimiter because of the folder path slash
sudo sed -i 's|%DIRaudioFolders%|'"$DIRaudioFolders"'|' /etc/mpd.conf
echo "classic" > /home/pi/RPi-Jukebox-RFID/settings/edition
sudo chown mpd:audio /etc/mpd.conf
sudo chmod 640 /etc/mpd.conf
# update mpc / mpd DB
mpc update
###############################
# WiFi settings (SSID password)
#
# https://www.raspberrypi.org/documentation/configuration/wireless/wireless-cli.md
#
# $WIFIssid
# $WIFIpass
# $WIFIip
# $WIFIipRouter
if [ $WIFIconfig == "YES" ]
then
# DHCP configuration settings
#-rw-rw-r-- 1 root netdev 0 Apr 17 11:25 /etc/dhcpcd.conf
sudo cp /home/pi/RPi-Jukebox-RFID/misc/sampleconfigs/dhcpcd.conf.stretch-default2-noHotspot.sample /etc/dhcpcd.conf
# Change IP for router and Phoniebox
sudo sed -i 's/%WIFIip%/'"$WIFIip"'/' /etc/dhcpcd.conf
sudo sed -i 's/%WIFIipRouter%/'"$WIFIipRouter"'/' /etc/dhcpcd.conf
sudo sed -i 's/%WIFIcountryCode%/'"$WIFIcountryCode"'/' /etc/dhcpcd.conf
# Change user:group and access mod
sudo chown root:netdev /etc/dhcpcd.conf
sudo chmod 664 /etc/dhcpcd.conf
# WiFi SSID & Password
# -rw-rw-r-- 1 root netdev 137 Jul 16 08:53 /etc/wpa_supplicant/wpa_supplicant.conf
sudo cp /home/pi/RPi-Jukebox-RFID/misc/sampleconfigs/wpa_supplicant.conf.stretch.sample /etc/wpa_supplicant/wpa_supplicant.conf
sudo sed -i 's/%WIFIssid%/'"$WIFIssid"'/' /etc/wpa_supplicant/wpa_supplicant.conf
sudo sed -i 's/%WIFIpass%/'"$WIFIpass"'/' /etc/wpa_supplicant/wpa_supplicant.conf
sudo sed -i 's/%WIFIcountryCode%/'"$WIFIcountryCode"'/' /etc/wpa_supplicant/wpa_supplicant.conf
sudo chown root:netdev /etc/wpa_supplicant/wpa_supplicant.conf
sudo chmod 664 /etc/wpa_supplicant/wpa_supplicant.conf
fi
# start DHCP
sudo service dhcpcd start
sudo systemctl enable dhcpcd
# / WiFi settings (SSID password)
###############################
# / INSTALLATION
#####################################################
#####################################################
# EXISTING ASSETS TO USE FROM EXISTING INSTALL
if [ $EXISTINGuse == "YES" ]
then
# RFID config for system control
if [ $EXISTINGuseRfidConf == "YES" ]
then
# read old values and write them into new file (copied above already)
# do not overwrite but use 'sed' in case there are new vars in new version installed
# Read the existing RFID config file line by line and use
# only lines which are separated (IFS) by '='.
while IFS== read -r key val ; do
# $var should be stripped of possible leading or trailing "
val=${val%\"}
val=${val#\"}
key=${key}
# Additional error check: key should not start with a hash and not be empty.
if ([ ! ${key:0:1} == '#' ] && [ ! -z "$key" ])
then
# Replace the matching value in the newly created conf file
sed -i 's/%'"$key"'%/'"$val"'/' /home/pi/RPi-Jukebox-RFID/settings/rfid_trigger_play.conf
fi
done </home/pi/BACKUP/settings/rfid_trigger_play.conf
fi
# RFID shortcuts for audio folders
if [ $EXISTINGuseRfidLinks == "YES" ]
then
# copy from backup to new install
mv /home/pi/BACKUP/shared/shortcuts/* /home/pi/RPi-Jukebox-RFID/shared/shortcuts/
fi
# Audio folders: use existing
if [ $EXISTINGuseAudio == "YES" ]
then
# copy from backup to new install
mv /home/pi/BACKUP/shared/audiofolders/* "$DIRaudioFolders/"
fi
# GPIO: use existing file
if [ $EXISTINGuseGpio == "YES" ]
then
# copy from backup to new install
mv /home/pi/BACKUP/scripts/gpio-buttons.py /home/pi/RPi-Jukebox-RFID/scripts/gpio-buttons.py
fi
# Sound effects: use existing startup / shutdown sounds
if [ $EXISTINGuseSounds == "YES" ]
then
# copy from backup to new install
mv /home/pi/BACKUP/shared/startupsound.mp3 /home/pi/RPi-Jukebox-RFID/shared/startupsound.mp3
mv /home/pi/BACKUP/shared/shutdownsound.mp3 /home/pi/RPi-Jukebox-RFID/shared/shutdownsound.mp3
fi
fi
# / EXISTING ASSETS TO USE FROM EXISTING INSTALL
#####################################################
#####################################################
# Folders and Access Settings
# create playlists folder
mkdir /home/pi/RPi-Jukebox-RFID/playlists
sudo chown -R pi:www-data /home/pi/RPi-Jukebox-RFID/playlists
sudo chmod -R 775 /home/pi/RPi-Jukebox-RFID/playlists
# make sure the shared folder is accessible by the web server
sudo chown -R pi:www-data /home/pi/RPi-Jukebox-RFID/shared
sudo chmod -R 775 /home/pi/RPi-Jukebox-RFID/shared
# make sure the htdocs folder can be changed by the web server
sudo chown -R pi:www-data /home/pi/RPi-Jukebox-RFID/htdocs
sudo chmod -R 775 /home/pi/RPi-Jukebox-RFID/htdocs
sudo chown -R pi:www-data /home/pi/RPi-Jukebox-RFID/settings
sudo chmod -R 775 /home/pi/RPi-Jukebox-RFID/settings
# audio folders might be somewhere else, so treat them separately
sudo chown pi:www-data "$DIRaudioFolders"
sudo chmod 775 "$DIRaudioFolders"
# make sure bash scripts have the right settings
sudo chown pi:www-data /home/pi/RPi-Jukebox-RFID/scripts/*.sh
sudo chmod +x /home/pi/RPi-Jukebox-RFID/scripts/*.sh
sudo chown pi:www-data /home/pi/RPi-Jukebox-RFID/scripts/*.py
sudo chmod +x /home/pi/RPi-Jukebox-RFID/scripts/*.py
# set audio volume to 100%
# see: https://github.com/MiczFlor/RPi-Jukebox-RFID/issues/54
sudo amixer cset numid=1 100%
# / Access settings
#####################################################
echo "
#
# INSTALLATION FINISHED
#
#####################################################
"
#####################################################
# Register external device(s)
echo "If you are using an USB RFID reader, connect it to your RPi."
echo "(In case your RFID reader required soldering, consult the manual.)"
read -r -p "Have you connected your USB Reader? [Y/n] " response
case "$response" in
[nN][oO]|[nN])
;;
*)
cd /home/pi/RPi-Jukebox-RFID/scripts/
python2 RegisterDevice.py
sudo chown pi:www-data /home/pi/RPi-Jukebox-RFID/scripts/deviceName.txt
sudo chmod 644 /home/pi/RPi-Jukebox-RFID/scripts/deviceName.txt
;;
esac
echo
echo "DONE. Let the sounds begin."
echo "Find more information and documentation on the github account:"
echo "https://github.com/MiczFlor/RPi-Jukebox-RFID/wiki/"
#####################################################
# notes for things to do
# Soundcard
# PCM is currently set
# This needs to be done for mpd and in settings folder
#Ask if Spotify config
#If Spotify
#Ask for user
#Ask for password
#Ask ssh password
# get existing install
# new config should be done with sed using existing conf and user input
# CLEANUP
## remove dir BACKUP (possibly not, because we do this at the beginning after user confirms for latest config)
|
<gh_stars>1-10
package com.firebase.ui.auth.ui;
import android.content.IntentSender;
import android.os.Bundle;
import android.support.annotation.Nullable;
import android.support.annotation.RestrictTo;
import android.support.v4.app.Fragment;
import android.view.ContextThemeWrapper;
import com.firebase.ui.auth.data.model.FlowParameters;
import com.firebase.ui.auth.util.AuthHelper;
@RestrictTo(RestrictTo.Scope.LIBRARY_GROUP)
public class FragmentBase extends Fragment {
private FlowParameters mFlowParameters;
private AuthHelper mAuthHelper;
private ProgressDialogHolder mProgressDialogHolder;
@Override
public void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
mAuthHelper = new AuthHelper(getFlowParams());
ContextThemeWrapper context = new ContextThemeWrapper(
getContext(), getFlowParams().themeId);
mProgressDialogHolder = new ProgressDialogHolder(context);
}
@Override
public void onDestroy() {
super.onDestroy();
mProgressDialogHolder.dismissDialog();
}
public FlowParameters getFlowParams() {
if (mFlowParameters == null) {
mFlowParameters = FlowParameters.fromBundle(getArguments());
}
return mFlowParameters;
}
public AuthHelper getAuthHelper() {
return mAuthHelper;
}
public ProgressDialogHolder getDialogHolder() {
return mProgressDialogHolder;
}
public void startIntentSenderForResult(IntentSender sender, int requestCode)
throws IntentSender.SendIntentException {
startIntentSenderForResult(sender, requestCode, null, 0, 0, 0, null);
}
}
|
"""
Example of of simple souce RayPencil with two apertures.
"""
from poptics.ray import RayPencil, RayPath, SourcePoint
from poptics.surface import CircularAperture, IrisAperture
from poptics.vector import Vector3d
import matplotlib.pyplot as plt
def main():
# Form two apertures both 20mm with Iris closed to 0.5 ratio
ca = CircularAperture(50,20)
iris = IrisAperture(80,20,0.5)
# source for the rays at (0,10,-50) in global coordinates
source = SourcePoint(Vector3d(0.0,10,-50))
# Form a pencil is the circular aperture as specified angle of 0.45 microns
# and add a RayPath to ech ray
pencil = RayPencil().addBeam(ca,source,wavelength = 0.65).addMonitor(RayPath())
# Propgate throgh the the both aperture and another 30 mm to make it visible
pencil *= ca
pencil *= iris
pencil += 30
# Make a diagram
ca.draw()
iris.draw()
pencil.draw()
plt.axis("equal")
plt.show()
main()
|
<reponame>NikitaChernykh/BestGoogleExtension2017
/* global chrome */
const moment = require('moment-timezone');
console.log("Background loaded");
function loadData(key,callback){
chrome.storage.local.get(key, function(data){
callback(data);
return;
});
}
//clear storage
function clearStorage(){
chrome.storage.local.clear();
console.log("clearStorage ran");
}
init();
function init(){
chrome.browserAction.setBadgeText({ text: "BETA" });
loadData('daily_summaries',data => {
if(isEmpty(data)){
console.log(data);
console.log("localStorage is empty");
chrome.storage.local.set({'daily_summaries':[
{
date: moment(),
websiteList:[],
totalVisits: 0,
totalTime: null
}
]})
console.log("localStorage initialised...");
}else{
console.log(data);
console.log("localStorage is not empty");
}
});
}
function isEmpty(obj) {
return Object.keys(obj).length === 0;
}
chrome.tabs.onActivated.addListener(function (activeInfo) {
console.log("onActivated");
console.log(activeInfo);
chrome.tabs.get(activeInfo.tabId,(tab)=>{ processTab(tab)});
});
chrome.tabs.onUpdated.addListener(function (tabId, changeInfo, tab) {
console.log("onUpdated");
processTab(tab);
});
function processTab (tab) {
console.log(tab);
const websiteURL = tab.url;
const websiteName = extractDomain(tab.url);
// if(existCheck(websiteName ,daily_summaries[0])){
// console.log("exist");
// // update site entry
// } else{
// console.log("new site");
// // create new site entry
// }
console.log(`${websiteURL}, ${websiteName}`);
}
// chrome.windows.onFocusChanged.addListener(function(window) {
// console.log("onFocusChanged");
// console.log(window);
// });
function existCheck(name, list){
console.log(list);
for (let i = 0; i < list.length; i += 1) {
if (list[i].websiteName === name) {
return true;
}
}
return false;
}
function distructureArray (val, devider, index) {
return val.split(devider)[index];
}
function extractDomain (url){
if (url !== undefined) {
let result = url;
// find & remove protocol (http, ftp, etc.) and get hostname
if (url.indexOf('://') > -1) {
result = distructureArray(url, '/', 2);
} else {
result = distructureArray(url, '/', 0);
}
// find & remove port number after hostname
result = distructureArray(result, ':', 0);
// if no dots in the striped url => return empty
if (result.match(/[.]/gi) === null) {
return '';
}
return result;
}
} |
#!/bin/bash
export HOME=/root/
source $HOME/.bashrc
source $HOME/conda/bin/activate
conda activate tali
cd $CODE_DIR
git pull
pip install -r $CODE_DIR/requirements.txt
source $CODE_DIR/setup_scripts/setup_base_experiment_disk.sh
source $CODE_DIR/setup_scripts/setup_wandb_credentials.sh
cd $CODE_DIR
fuser -k /dev/nvidia*; \
python $CODE_DIR/run.py \
hydra.verbose=True \
trainer=default \
resume=True \
batch_size=32 \
trainer.gpus=16 \
trainer.auto_scale_batch_size=True \
datamodule.dataset_config.rescan_paths=True \
datamodule.prefetch_factor=3 \
datamodule.num_workers=96 \
model=base_modus_prime_vi-transformer16 \
datamodule.dataset_config.dataset_size_identifier=base \
datamodule.dataset_config.modality_config.image=True \
datamodule.dataset_config.modality_config.text=True \
datamodule.dataset_config.modality_config.audio=True \
datamodule.dataset_config.modality_config.video=False
|
<!DOCTYPE html>
<html>
<head>
<title>My Blog</title>
</head>
<body>
<h1>My Blog</h1>
<div>
<h2> Post 1 </h2>
<h3> Title </h3>
<p> Post content... </p>
</div>
<div>
<h2> Post 2 </h2>
<h3> Title </h3>
<p> Post content... </p>
</div>
</body>
</html> |
#!/usr/bin/env bash
if [[ -n "$SOUND_DISABLE_AIRPLAY" ]]; then
echo "Airplay is disabled, exiting..."
exit 0
fi
# --- ENV VARS ---
# SOUND_DEVICE_NAME: Set the device broadcast name for AirPlay
SOUND_DEVICE_NAME=${SOUND_DEVICE_NAME:-"balenaSound AirPlay $(echo "$BALENA_DEVICE_UUID" | cut -c -4)"}
echo "Starting AirPlay plugin..."
echo "Device name: $SOUND_DEVICE_NAME"
# Wait for audioblock to start. This is a bit hacky, but necessary for the time being as
# shairport-sync will fail silently if audioblock is not ready when it starts up
# See: https://github.com/mikebrady/shairport-sync/issues/1054
# Remove when above issue is addressed
SOUND_SUPERVISOR_PORT=${SOUND_SUPERVISOR_PORT:-80}
SOUND_SUPERVISOR="$(ip route | awk '/dev br/ { print $9 }'):$SOUND_SUPERVISOR_PORT"
SOUND_SUPERVISOR_MODE=$(curl --silent "$SOUND_SUPERVISOR/mode" || true)
while ! curl --silent --output /dev/null "$SOUND_SUPERVISOR/ping"; do sleep 5; echo "Waiting for audioblock to start..."; done
# Start AirPlay
exec shairport-sync \
--name "$SOUND_DEVICE_NAME" \
--output pa \
| echo "Shairport-sync started. Device is discoverable as $SOUND_DEVICE_NAME" |
print "Mary had a little lamp."
print "Its fleece was white as %s." % 'snow'
print "And everywhere that Mary went."
# Now we'll add the string 10 times using multiplication symbol * (asterisk)
print "ab" * 10 # what'd that do?
end1 = "C"
end2 = "h"
end3 = "e"
end4 = "e"
end5 = "s"
end6 = "e"
end7 = "B"
end8 = "u"
end9 = "r"
end10 = "g"
end11 = "e"
end12 = "r"
# watch that comma at the end. try removing it to see what happens
# A comma at the end tells python to continue in same line adding a space
print end1 + end2 + end3 + end4 + end5 + end6,
print end7 + end8 + end9 + end10 + end11 + end12
|
#!/bin/bash
rsyslogd
printenv | sed 's/^\(.*\)\=\(.*\)$/export \1\="\2"/g' > /root/project_env.sh
chmod 777 /root/project_env.sh
cron -L15 -f |
<gh_stars>1-10
package implementation;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.StringTokenizer;
public class Boj2605 {
public static final String SPACE = " ";
public static void main(String[] args) throws Exception {
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
StringBuilder sb = new StringBuilder();
int N = Integer.parseInt(br.readLine());
ArrayList<Integer> idx = new ArrayList<>();
int[] num = new int[N + 1];
StringTokenizer st = new StringTokenizer(br.readLine(), " ");
for (int i = 0; i < N; i++) {
num[i] = Integer.parseInt(st.nextToken());
}
idx.add(0, 1);
for (int i = 1; i < N; i++) {
for (int j = 0; j < N; j++) {
if(j == num[i]){
idx.add(i - num[i], i+1);
}
}
}
for(int i = 0; i < N; i++){
sb.append(idx.get(i)).append(SPACE);
}
System.out.println(sb.toString());
}
}
|
#!/bin/bash -e
# Generates SSH configuration based on provided variables.
mkdir -p ~/.ssh
SSH_KEY_1_HOST=${SSH_KEY_1_HOST:-"example.com"}
SSH_KEY_2_HOST=${SSH_KEY_2_HOST:-"example.edu"}
SSH_KEY_3_HOST=${SSH_KEY_3_HOST:-"example.net"}
SSH_KEY_4_HOST=${SSH_KEY_4_HOST:-"example.org"}
SSH_KEY_1_USER=${SSH_KEY_1_USER:-"git"}
SSH_KEY_2_USER=${SSH_KEY_2_USER:-"git"}
SSH_KEY_3_USER=${SSH_KEY_3_USER:-"git"}
SSH_KEY_4_USER=${SSH_KEY_4_USER:-"git"}
if [[ ! -z "$SSH_KEY_1_BASE64" ]]; then
echo "Created SSH config for host '$SSH_KEY_1_HOST' and user '$SSH_KEY_1_USER'..."
echo $SSH_KEY_1_BASE64 | base64 -d > ~/.ssh/id_key_1
fi
if [[ ! -z "$SSH_KEY_2_BASE64" ]]; then
echo "Created SSH config for host '$SSH_KEY_2_HOST' and user '$SSH_KEY_2_USER'..."
echo $SSH_KEY_2_BASE64 | base64 -d > ~/.ssh/id_key_2
fi
if [[ ! -z "$SSH_KEY_3_BASE64" ]]; then
echo "Created SSH config for host '$SSH_KEY_3_HOST' and user '$SSH_KEY_3_USER'..."
echo $SSH_KEY_3_BASE64 | base64 -d > ~/.ssh/id_key_3
fi
if [[ ! -z "$SSH_KEY_4_BASE64" ]]; then
echo "Created SSH config for host '$SSH_KEY_4_HOST' and user '$SSH_KEY_4_USER'..."
echo $SSH_KEY_4_BASE64 | base64 -d > ~/.ssh/id_key_4
fi
cat > ~/.ssh/config <<EOF
Host $SSH_KEY_1_HOST
User $SSH_KEY_1_USER
IdentityFile ~/.ssh/id_key_1
Host $SSH_KEY_2_HOST
User $SSH_KEY_2_USER
IdentityFile ~/.ssh/id_key_2
Host $SSH_KEY_3_HOST
User $SSH_KEY_3_USER
IdentityFile ~/.ssh/id_key_3
Host $SSH_KEY_4_HOST
User $SSH_KEY_4_USER
IdentityFile ~/.ssh/id_key_4
Host *
StrictHostKeyChecking=no
UserKnownHostsFile=/dev/null
EOF
chmod 700 ~/.ssh
chmod 600 ~/.ssh/id_key_*
chmod 600 ~/.ssh/config
|
# the data dir contains those corpus
#this folder will be created, and all the models and results will be list there.
CONFIG_DIR=/forecasting/u0n-1_s0n/hlstm_p8_snt_selfatt_elmo_pre1024_f2/
WORK_DIR=$ROOT_DIR/Expt/workdir/$CONFIG_DIR/
TRAINING_DIR=$WORK_DIR/training/
# cluster strategy for psyc dataset
CLUSTER_STRATEGY=MISC11_ML
INPUT_DIR=psyc_${CLUSTER_STRATEGY}_17_padding
# use pretrained word and char embedding using prepare.sh
VOCAB_DIR=$DATA_DIR/vocab_elmo_pre1024_u0n_s0n_u_8/
mkdir -p $VOCAB_DIR
# Use ../../utils/preprocess.py to compute score for each paragraph
#TRAIN_FILENAME1=${INPUT_DIR}/train_sc20.json
TRAIN_FILENAME1=${INPUT_DIR}/train.json
#DEV_FILENAME1=${INPUT_DIR}/dev_sc10.json
DEV_FILENAME1=${INPUT_DIR}/dev.json
TEST_FILENAME1=${INPUT_DIR}/test.json
#TEST_FILENAME1=${INPUT_DIR}/dev_sc20.json
RO_TRAIN_FILE1=$RO_DATA_DIR/$TRAIN_FILENAME1
RO_DEV_FILE1=$RO_DATA_DIR/$DEV_FILENAME1
RO_TEST_FILE1=$RO_DATA_DIR/$TEST_FILENAME1
TRAIN_FILE1=$DATA_DIR/prep_data/rmstop_0_rpt_no/$TRAIN_FILENAME1
DEV_FILE1=$DATA_DIR/prep_data/rmstop_0_rpt_no/$DEV_FILENAME1
TEST_FILE1=$DATA_DIR/prep_data/rmstop_0_rpt_no/$TEST_FILENAME1
ALGO="GMLSTM"
LEARNING_RATE=0.0001
#WEIGHT_DECAY=0.0001
WEIGHT_DECAY=0.0
BATCH_SIZE=64
CONTEXT_WINDOW=8
QUESTION_WINDOW=1
HIDDEN_SIZE=128
EPOCH=100
STEPS_PER_CHECKPOINT=100
# DROP_KEPP_PROB in (0, 1], 1 is no dropout
DROP_KEEP_PROB=0.3
USE_SELFATT=
USE_CHAR_EMBED=
MAX_NUM_CHAR_TO_KEEP_FORWARD=4
MAX_NUM_CHAR_TO_KEEP_BACKWARD=4
#USE_CHAR_EMBED=x
# Whether to training the original embedding.
TRAIN_EMBEDDING=
# max_grad_norm / max(global_norm, max_grad_norm), set to inf to disable.
MAX_GRAD_NORM=1.0
# leave it empty to use random initial WORD_EMB
WORD_EMB_FILE=$RO_DATA_DIR/glove.840B.300d.txt
# WORD_EMB_FILE=
#WORD_EMB_FILE=$DATA_DIR/vectors_words.txt
CHAR_EMB_FILE=
#CHAR_EMB_FILE=$RO_DATA_DIR/glove.840B.300d-char.txt
#CHAR_EMB_FILE=$DATA_DIR/vectors_chars.txt
EMA=0.9999
MAX_P_LEN=1000
MAX_Q_LEN=60
NUM_FILTERS=25
ACC_SUM_PROB=0.9
#flat Context-aware question attention
FLAT_C_Q_ATT=
# pos_weight for balanced cross entropy
POS_WEIGHT=0.9
# set gama = 0, decay to standard cross entropy
# key for tokenization to use
TOKEN_KEY_TO_USE=tokenized_utterance
# whether adding p encoding to decode
DECODE_P=
# whether adding q encoding to decode
DECODE_Q=
# TOPK, a list of integers for K values in Recall@K
TOPK_LIST=1,2,3,5,10
# TOPM_FOR_ACC_PROB, with ACC_SUM_PROB in topM
TOPM_FOR_ACC_PROB=5
# WORD_EMBED_SIZE, default 300, exclusive with WORD_EMB_FILE
WORD_EMBED_SIZE=300
# CHAR_EMBED_SIZE, default 100, exclusive with CHAR_EMB_FILE
CHAR_EMBED_SIZE=300
# flat Question-aware context attention
FLAT_Q_C_ATT=
# Dropout keep prob for embedding, 1.0=no_dropout
DROPOUT_KEEP_PROB_EMB=0.7
# Method to encode the dialogue
DIAL_ENCODE=HIERARCHY
# max_length for a single utterance
MAX_U_LEN=60
# whether to hierarchy_q_pu_att
HIERARCHY_Q_PU_ATT=
# self-att for hierarchy, only can be useful when dial_encode=HIERARCHY
USE_HIERARCHY_SELFATT=
# ema_decay is decay ratio for EMA, 0.0 to disable, 0.9999+ to enable
EMA_DECAY=0.0
# loss_func, default=X_ENTROPY
#LOSS_FUNC=X_ENTROPY
#LOSS_FUNC=WEIGHTED_CE
LOSS_FUNC=WEIGHTED_FOCAL
# rnn_type, bi-lstm, bi-gru, bi-rnn, lstm, gru, rnn
RNN_TYPE=bi-gru
# whether to use shared encode layer for utterance
USE_SHARED_ENCODING=
# all training files to use
TRAIN_FILES=$TRAIN_FILE1
#TRAIN_FILES=`find ${TRAIN_FILE1}_splits -name "split*" | tr '\n' ','`
# all dev files to use
DEV_FILES=$DEV_FILE1
# all test files to use
TEST_FILES=$TEST_FILE1
# elmo pretrained LM weight file
#ELMO_WEIGHT_FILE=$DATA_DIR/psyc_elmo/elmo_2x1024_128_2048cnn_1xhighway_weights.hdf5
ELMO_WEIGHT_FILE=$DATA_DIR/psyc_elmo/elmo_2x4096_512_2048cnn_2xhighway_5.5B_weights.hdf5
# elmo corresponding to options file
#https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway_5.5B/elmo_2x4096_512_2048cnn_2xhighway_5.5B_weights.hdf5
#https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway_5.5B/elmo_2x4096_512_2048cnn_2xhighway_5.5B_options.json
#ELMO_OPTION_FILE=$DATA_DIR/psyc_elmo/elmo_2x1024_128_2048cnn_1xhighway_options.json
ELMO_OPTION_FILE=$DATA_DIR/psyc_elmo/elmo_2x4096_512_2048cnn_2xhighway_5.5B_options.json
# elmo max num character
# elmo embedding output size to be projected into
ELMO_EMB_OUTPUT=1024
# whether use character elmo emebdding
USE_CHARACTER_ELMO=x
# positions to inject elmo, keep empty to disable
ELMO_POSITIONS=input
# elmo vocabulary file to write and read
ELMO_VOCAB_FILE=$VOCAB_DIR/psyc_elmo_vocab_file
# elmo snt dict file used for utterance cache
ELMO_SNT_DICT_FILE=$DATA_DIR/prep_data/rmstop_0_rpt_no/psyc_MISC11_ML_17_padding/elmo_snt_dict_file.without
# elmo utterance cache file
ELMO_U_CACHE_FILE=$VOCAB_DIR/u_cache.hdf5
# elmo passage cache file
ELMO_P_CACHE_FILE=$VOCAB_DIR/p_cache.hdf5
# elmo question cache file
ELMO_Q_CACHE_FILE=$VOCAB_DIR/q_cache.hdf5
# input used to decode
DECODE_INPUTS=p_final
#MEMNET PARAMS
GATED_MEMNET=x
PASSAGE_HOPS=2
MEMNET_SHARE_WEIGHTS=x
# whether to use concat p
USE_CONCAT_P=
# decode_func to use for multiclass decoding
DECODE_FUC=FC
# flat Context-ware response attention
FLAT_C_R_ATT=
# flat response-ware context attention
FLAT_R_C_ATT=
# whether to hierarchy_r_pu_att
HIERARCHY_R_PU_ATT=
# whether adding r encoding to cnn decode
DECODE_R=
# r pass memnet hops
RESPONSE_HOPS=2
# use response utterance or not
USE_RESPONSE_U=
# decode goal
DECODE_GOAL=P_LABEL
# Whether to use speaker embedding
USE_SPEAKER_EMBEDDING=x
# Whether to use label embedding
USE_LABEL_EMBEDDING=
# dim of label embedding
LABEL_EMBED_DIM=32
# dim of speaker embedding
SPEAKER_EMBED_DIM=8
# filter sizes for cnn
FILTER_SIZES=3,4,5
# whether to decode r with flatten pu_labels embdding
DECODE_R_WITH_FLATTEN_PU_LABELS=
# whether to use response speaker info
USE_RESPONSE_S=
# whether to train speaker emb
TRAIN_SPEAKER_EMBEDDING=x
# whether to train label emb
TRAIN_LABEL_EMBEDDING=x
# dropout keep rate for MLP
DROPOUT_KEEP_PROB_MLP=0.8
# num_attention_heads for snt seq attention
NUM_ATT_HEAD=4
# snt-levl attention algorithm, leave empty for disabling
SNT_ATT_ALGO=snt_self_att
# snt-levl attention hops
SNT_SEQ_HOPS=2
# snt rnn type, for snt-lvl rnn
SNT_RNN_TYPE=gru
# loss_weights for each label, sep with comma, can be float
LOSS_WEIGHTS=1.0,1.0,0.25
# LOSS_WEIGHTS=1.0,1.0,0.25,0.50,0.75,0.75,0.50,0.75,0.75,1.0,1.0
# LOSS_WEIGHTS=0.50,0.75,0.75,0.50,0.75,0.75,1.0,1.0
# focal loss gama for each label, sep with comma, int
# FOCAL_LOSS_GAMA=0,0,0,0,0,0,0,0,0,0,0
FOCAL_LOSS_GAMA=2,2,2
# use response in context seq, without its label
USE_R_IN_SEQ=
# how to combine the final input states
DECODE_COMBINE=additive
#config file for P model
P_MODEL_CONFIG=
#config file for T model
T_MODEL_CONFIG=
# whether use r in word matching
USE_R_IN_WM_SEQ=
# whether use batch normalization
USE_BATCH_NORM=
|
// The MIT License (MIT)
//
// Copyright (c) 2018 Mervin
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package codec
import (
"encoding/binary"
"github.com/mervinkid/matcha/buffer"
"github.com/vmihailenco/msgpack"
)
type ApolloEntity interface {
TypeCode() uint16
}
type ApolloConfig struct {
TLVConfig
entityConstructors map[uint16]func() ApolloEntity
}
func (c *ApolloConfig) RegisterEntity(constructor func() ApolloEntity) {
c.initConfig()
if constructor != nil {
if testEntity := constructor(); testEntity != nil {
c.entityConstructors[testEntity.TypeCode()] = constructor
}
}
}
func (c *ApolloConfig) createEntity(typeCode uint16) ApolloEntity {
c.initConfig()
if constructor := c.entityConstructors[typeCode]; constructor != nil {
return constructor()
}
return nil
}
func (c *ApolloConfig) initConfig() {
if c.entityConstructors == nil {
c.entityConstructors = make(map[uint16]func() ApolloEntity)
}
}
// ApolloFrameDecoder is a bytes to ApolloEntity decode implementation of FrameDecode based on TLVFrameDecoder
// using MessagePack for payload data deserialization.
// +----------+-----------+---------------------------+
// | TAG | LENGTH | VALUE |
// | (1 byte) | (4 bytes) | 2 bytes | serialized |
// | | | type code | data |
// +----------+-----------+---------------------------+
// Decode:
// []byte → ApolloEntity(*pointer)
type ApolloFrameDecoder struct {
Config ApolloConfig
tlvDecoder FrameDecoder
}
func (d *ApolloFrameDecoder) Decode(in buffer.ByteBuf) (interface{}, error) {
if in.ReadableBytes() == 0 {
return d.decodeNothing()
}
// Decode inbound with TLVFrameDecoder
d.initTLVDecoder()
tlvPayload, tlvErr := d.tlvDecoder.Decode(in)
if tlvPayload == nil && tlvErr == nil {
return d.decodeNothing()
}
if tlvErr != nil {
return d.decodeFailure(tlvErr.Error())
}
// Init ByteBuf for MessagePack deserialization.
tlvPayloadByteBuffer := buffer.NewElasticUnsafeByteBuf(len(tlvPayload.([]byte)))
tlvPayloadByteBuffer.WriteBytes(tlvPayload.([]byte))
// Parse 2 bytes of message type code.
if tlvPayloadByteBuffer.ReadableBytes() < 2 {
return d.decodeFailure("illegal payload")
}
var typeCode uint16
binary.Read(tlvPayloadByteBuffer, binary.BigEndian, &typeCode)
// Parse reset bytes for serialized data.
serializedBytes := tlvPayloadByteBuffer.ReadBytes(tlvPayloadByteBuffer.ReadableBytes())
if entity := d.Config.createEntity(typeCode); entity != nil {
if unmarshalErr := msgpack.Unmarshal(serializedBytes, entity); unmarshalErr != nil {
return d.decodeFailure(unmarshalErr.Error())
} else {
return d.decodeSuccess(entity)
}
}
return d.decodeNothing()
}
func (d *ApolloFrameDecoder) initTLVDecoder() {
if d.tlvDecoder == nil {
d.tlvDecoder = NewTLVFrameDecoder(d.Config.TLVConfig)
}
}
func (d *ApolloFrameDecoder) decodeNothing() (interface{}, error) {
return d.decodeSuccess(nil)
}
func (d *ApolloFrameDecoder) decodeSuccess(result interface{}) (interface{}, error) {
return result, nil
}
func (d *ApolloFrameDecoder) decodeFailure(cause string) (interface{}, error) {
return nil, NewDecodeError("ApolloFrameDecoder", cause)
}
// NewApolloFrameDecoder create a new ApolloFrameDecoder instance with configuration.
func NewApolloFrameDecoder(config ApolloConfig) FrameDecoder {
return &ApolloFrameDecoder{Config: config}
}
// ApolloFrameEncoder is a ApolloEntity to bytes encoder implementation of FrameEncode based on TLVFrameEncoder
// using MessagePack for payload data serialization.
// +----------+-----------+---------------------------+
// | TAG | LENGTH | VALUE |
// | (1 byte) | (4 bytes) | 2 bytes | serialized |
// | | | type code | data |
// +----------+-----------+---------------------------+
// Encode:
// ApolloEntity(*pointer) → []byte
type ApolloFrameEncoder struct {
Config ApolloConfig
tlvEncoder FrameEncoder
}
func (e *ApolloFrameEncoder) Encode(msg interface{}) ([]byte, error) {
// Message must be an implementation of ApolloEntity interface.
var entity ApolloEntity
switch message := msg.(type) {
case ApolloEntity:
entity = message
default:
return e.encodeFailure("message is not valid implementation of ApolloEntity interface")
}
// Marshal entity to bytes.
typeCode := entity.TypeCode()
marshaledBytes, marshalErr := msgpack.Marshal(entity)
if marshalErr != nil {
return e.encodeFailure(marshalErr.Error())
}
// Build frame payload with marshaled bytes and type code.
payloadByteBuffer := buffer.NewElasticUnsafeByteBuf(2 + len(marshaledBytes))
binary.Write(payloadByteBuffer, binary.BigEndian, typeCode)
binary.Write(payloadByteBuffer, binary.BigEndian, marshaledBytes)
// Encode with TLVEncoder
e.initTLVEncoder()
frameBytes, encodeErr := e.tlvEncoder.Encode(payloadByteBuffer.ReadBytes(payloadByteBuffer.ReadableBytes()))
if encodeErr != nil {
return e.encodeFailure(encodeErr.Error())
}
return e.encodeSuccess(frameBytes)
}
func (e *ApolloFrameEncoder) initTLVEncoder() {
if e.tlvEncoder == nil {
e.tlvEncoder = NewTLVFrameEncoder(e.Config.TLVConfig)
}
}
func (e *ApolloFrameEncoder) encodeSuccess(result []byte) ([]byte, error) {
return result, nil
}
func (e *ApolloFrameEncoder) encodeFailure(cause string) ([]byte, error) {
return nil, NewEncodeError("ApolloFrameEncoder", cause)
}
// NewApolloFrameEncoder create a new ApolloFrameEncoder instance with configuration.
func NewApolloFrameEncoder(config ApolloConfig) FrameEncoder {
return &ApolloFrameEncoder{Config: config}
}
|
#!/bin/sh
AUTOCONF_FILES="Makefile.in aclocal.m4 ar-lib autom4te.cache compile \
config.guess config.h.in config.sub configure depcomp install-sh \
ltmain.sh missing *libtool test-driver"
case $1 in
clean)
test -f Makefile && make maintainer-clean
test -f linux/bt-bmc.h && rm -rf linux/bt-bmc.h
test -d linux && find linux -type d -empty | xargs -r rm -rf
for file in ${AUTOCONF_FILES}; do
find -name "$file" | xargs -r rm -rf
done
exit 0
;;
esac
autoreconf -i
echo 'Run "./configure ${CONFIGURE_FLAGS} && make"'
|
#!/usr/bin/env bash
set -ex
[[ $(uname) = Linux ]] || exit 1
[[ $USER = root ]] || exit 1
# Install libssl-dev to be compatible with binaries built on an Ubuntu machine...
apt-get update
apt-get --assume-yes install libssl-dev
# Install libssl1.1 to be compatible with binaries built in the
# solanalabs/rust docker image
#
# cc: https://github.com/solana-labs/solana/issues/1090
# cc: https://packages.ubuntu.com/bionic/amd64/libssl1.1/download
wget http://security.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.0g-2ubuntu4.1_amd64.deb
dpkg -i libssl1.1_1.1.0g-2ubuntu4.1_amd64.deb
rm libssl1.1_1.1.0g-2ubuntu4.1_amd64.deb
|
function generateResponse(obj) {
let response = {};
// add the name
response.name = obj.name;
// add the age
response.age = obj.age;
// add the children
response.children = [];
for (let child of obj.children) {
let newChild = {};
newChild.name = child.name;
newChild.age = child.age;
response.children.push(newChild);
}
// return
return response;
} |
#!/bin/sh
# Copyright 2018 The pdfcpu Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# eg: ./optimizeFile.sh ~/pdf/1mb/a.pdf ~/pdf/out
if [ $# -ne 2 ]; then
echo "usage: ./optimizeFile.sh inFile outDir"
exit 1
fi
new=_new
f=${1##*/}
f1=${f%.*}
out=$2
#rm -drf $out/*
#set -e
cp $1 $out/$f
out1=$out/$f1$new.pdf
pdfcpu optimize -verbose $out/$f $out1 &> $out/$f1.log
if [ $? -eq 1 ]; then
echo "optimization error: $1 -> $out1"
exit $?
else
echo "optimization success: $1 -> $out1"
fi
out2=$out/$f1$new$new.pdf
pdfcpu optimize -verbose $out1 $out2 &> $out/$f1$new.log
if [ $? -eq 1 ]; then
echo "optimization error: $out1 -> $out2"
exit $?
else
echo "optimization success: $out1 -> $out2"
fi
|
<gh_stars>1-10
"""Tests for the models of the ``cmsplugin_markdown`` app."""
from django.test import TestCase
from mixer.backend.django import mixer
class MarkdownPluginTestCase(TestCase):
"""Tests for the ``MarkdownPlugin`` model."""
longMessage = True
def test_model(self):
obj = mixer.blend('cmsplugin_markdown.MarkdownPlugin')
self.assertTrue(str(obj), msg=(
'Should be able to instantiate and save the model.'))
|
public class SumOfList {
public static void main(String[] args) {
int arr[]={1,3,5,7,9};
int sum = 0;
for (int i=0; i<arr.length; i++) {
sum+= arr[i];
}
System.out.println("Sum of all numbers in the list is : "+sum);
}
} |
import {
React,
O,
currentLocation,
isLoading,
fromSelector,
createLoader,
fromActionCreator,
portableReducer,
hydrate,
rootState,
globalErrorSink,
createErrorSink,
OblongError,
makeEvent,
dispatch,
getState,
} from 'oblong'
import { Link } from 'react-router-dom'
const twoSeconds = () =>
new Promise((resolve) => {
setTimeout(resolve, 2000)
})
const twoSecondsFail = () =>
new Promise((_, reject) => {
setTimeout(reject, 2000)
})
const defaultAge = O.query()
.with({ currentLocation })
.as((o) => o.currentLocation.pathname.length)
const age = O.state<number>('user.age').as(defaultAge)
const profile = O.state('user.profile').setEquality('shallow').as({ name: '<NAME>' })
const firstName = O.query()
.with({ profile })
.as((o) => o.profile.name.split(' ')[0])
const testQueryMutation = O.query()
.with({ profile, age })
.as((o) => ({ name: o.profile.name, age: o.age }))
const flipCase = O.command('flipCase')
.with({ profile, age, testQueryMutation })
.as<[boolean], void>((o) => {
const [upper] = o.args
o.profile = {
...o.profile,
name: o.profile.name[upper ? 'toUpperCase' : 'toLowerCase'](),
}
o.age = 15
})
const Links = O.view('Links').as(() => (
<div>
<Link to="/apple">Apple</Link> | <Link to="/banana">Banana</Link>
</div>
))
const Profile = O.view('Profile')
.with({ profile, flipCase, age })
.as((o) => (
<>
<div>
<label>
Name:
<input
type="text"
value={o.profile.name}
onChange={(e) => {
o.profile = {
...o.profile,
name: e.target.value,
}
}}
/>
</label>
<button type="button" onClick={() => o.flipCase(true)}>
Upper
</button>
<button type="button" onClick={() => o.flipCase(false)}>
Lower
</button>
</div>
<div>
<label>
Age:
<input
type="text"
value={o.age}
onChange={(e) => {
o.age = parseInt(e.target.value) || 0
}}
/>
</label>
</div>
<Links />
</>
))
const Greeter = O.view('Greeter')
.with({ firstName })
.if((o) => !!o.firstName)
.as((o) => {
const [ignore] = React.useState()
if (ignore) return null
return <h2>Hello, {o.firstName}</h2>
})
const LocationViewer = O.view('LocationViewer')
.with({ currentLocation })
.as((o) => <div>{JSON.stringify(o.currentLocation)}</div>)
const isOnBananaRoute = O.query()
.with({ currentLocation })
.as((o) => o.currentLocation.pathname === '/banana')
const BananaRoute = O.view('BananaRoute')
.with({ isOnBananaRoute })
.as((o) => {
if (!o.isOnBananaRoute) return null
return <h4>BANANA TIME</h4>
})
const doGoodSlow = O.command('doGoodSlow').as(async () => {
await twoSeconds()
})
const doBadSlow = O.command('doBadSlow').as(async () => {
await twoSecondsFail()
})
const withoutLoader = O.command('withoutLoader')
.with({})
.ignoreLoading()
.as(async () => {
await twoSeconds()
})
const namedLoader = createLoader().named('namedLoader')
const withNamedLoader = O.command('withNamedLoader')
.with({ namedLoader })
.ignoreLoading()
.as(async (o) => {
await o.namedLoader.track(async () => {
await twoSeconds()
})
})
const loaderState = fromSelector((state) => JSON.stringify(state?.oblong?.loading, undefined, 1))
const LoaderTest = O.view('LoaderTest')
.with({
isLoading,
doGoodSlow,
doBadSlow,
loaderState,
withoutLoader,
withNamedLoader,
namedLoader,
})
.as<{ user: { name: string } }>((o) => (
<div>
<h4>LoaderTest</h4>
<div>Is Global Loading: {o.isLoading.toString()}</div>
<div>
<button type="button" onClick={o.doGoodSlow}>
Good
</button>
<button type="button" onClick={o.doBadSlow}>
Bad
</button>
<button type="button" onClick={o.withoutLoader}>
Without Loader
</button>
<button type="button" onClick={o.withNamedLoader} disabled={o.namedLoader.isLoading}>
With Named Loader
</button>
</div>
<pre style={{ fontSize: '0.9rem' }}>
<code>{o.loaderState}</code>
</pre>
<div>Child: {o.user.name}</div>
</div>
))
const doWeirdRaw = (name: string, age: number) => ({ type: 'doWeird', payload: { name, age } })
const doWeird = fromActionCreator(doWeirdRaw)
const DoWierdTest = O.view('DoWierdTest')
.with({ doWeird })
.as((o) => (
<div>
<button type="button" onClick={() => o.doWeird('john', 1)}>
doWeird
</button>
</div>
))
const counter = portableReducer('counter', (previous: number = 0) => previous + 1)
const Counter = O.view('Counter')
.with({ counter })
.as((o) => <div>Counter: {o.counter}</div>)
const hydrateSet = O.command('hydrateSet')
.with({ rootState })
.as((o) => {
sessionStorage.setItem('oblong.playground', JSON.stringify(o.rootState))
})
const hydrateGet = O.command('hydrateGet')
.with({ hydrate })
.as((o) => {
const stored = sessionStorage.getItem('oblong.playground')
if (stored) {
o.hydrate(JSON.parse(stored))
}
})
const Hydrate = O.view()
.with({ hydrateGet, hydrateSet })
.as(function Hydrate(o) {
return (
<div>
Hydrate:
<button onClick={o.hydrateSet}>Set</button>
<button onClick={o.hydrateGet}>Get</button>
</div>
)
})
const failRaw = O.command('failRaw')
.with({})
.as(() => {
throw new TypeError('Wow, such error. Much fail.')
})
const fooSink = createErrorSink('foo')
const failAsync = O.command('failAsync').as(async () => {
throw new OblongError('failAsync single')
})
const failAsyncMultiple = O.command('failAsyncMultiple').as(async () => {
throw new OblongError(['failAsync multi', 'failAsync ple'])
})
const failNatural = O.command('failNatural').as(async () => {
throw new TypeError('oh wow this is bad')
})
const Errors = O.view('Errors')
.with({ failRaw, globalErrorSink, fooSink, failAsync, failAsyncMultiple, failNatural })
.as((o) => (
<>
<div>
<button onClick={o.failRaw}>failRaw</button>
<button onClick={() => o.globalErrorSink.logError('Special')}>log Special</button>
<button onClick={() => o.globalErrorSink.logError(['Special', 'Another Special'])}>
log Special[]
</button>
<button onClick={() => o.fooSink.logError(`${Math.random()}`)}>Foo</button>
<button
onClick={() => {
o.fooSink.clear()
o.globalErrorSink.clear()
}}
>
Clear all
</button>
<button onClick={o.failAsync}>failAsync</button>
<button onClick={o.failAsyncMultiple}>failAsyncMultiple</button>
<button onClick={o.failNatural}>failNatural</button>
</div>
<ul>
{o.globalErrorSink.errors.map((i, index) => (
<li key={index}>
{i}
<button onClick={() => o.globalErrorSink.dismiss(i)}>X</button>
</li>
))}
</ul>
<h5>Foo:</h5>
<ul>
{o.fooSink.errors.map((i, index) => (
<li key={index}>
{i}
<button onClick={() => o.fooSink.dismiss(i)}>X</button>
</li>
))}
</ul>
</>
))
const myEvent = makeEvent('myEvent')
const onMyEvent = O.command('onMyEvent')
.with({ dispatch, getState })
.on(myEvent)
.as((o) => {
console.log('myEvent seen')
console.log(o.getState())
o.dispatch({ type: 'waddup', payload: 'foo' })
})
const Events = O.view()
.with({ myEvent, onMyEvent })
.as((o) => (
<div>
<h2>Events</h2>
<button onClick={() => o.myEvent.emit()}>Emit</button>
</div>
))
export const App = () => (
<>
<h1>Playground</h1>
<Greeter />
<Profile />
<LocationViewer />
<BananaRoute />
<LoaderTest user={{ name: 'Titus' }} />
<DoWierdTest />
<Counter />
<Hydrate />
<Errors />
<Events />
</>
)
|
<filename>src/api/events.js<gh_stars>0
import request from '@/utils/request'
export function eventsList(params) {
return request({
url: '/admin/eventList',
method: 'get',
params
})
}
export function approveEvent(data) {
return request({
url: 'admin/approveEvent',
method: 'post',
data
})
}
// /admin/addEvent
export function addEvent(data) {
return request({
url: 'admin/addEvent',
method: 'post',
data
})
}
|
<gh_stars>0
/**
* 关于 Heap 的最基本的操作的接口定义
*/
export default interface HeapInterface {
/**
* 如果一个节点比它的父节点大(最大堆)或者小(最小堆),那么需要将它同父节点交换位置。这样是这个节点在数组的位置上升。
*/
shiftUp ();
/**
* 在堆的尾部添加一个新的元素,然后使用 shiftUp 来修复堆。
* 时间复杂度:O(log n)
* @param value @{any} 在尾部插入的新的元素
*/
insert (value: any);
/**
* 移除并返回最大值(最大堆)或者最小值(最小堆)。为了将这个节点删除后的空位填补上,需要将最后一个元素移到根节点的位置,然后使用 shiftDown 方法来修复堆。
* 时间复杂度:O(log n)
* @return 移除的值
*/
remove (): any;
/**
* 和 remove() 一样,差别在于可以移除堆中任意节点,而不仅仅是根节点。当它与子节点比较位置时无序时使用 shiftDown(),如果与父节点比较发现无序则使用 shiftUp()。
* @param index @{integer} 堆数组中的索引
*/
removeAtIndex (index: number);
/**
* 将一个更小的值(最小堆)或者更大的值(最大堆)赋值给一个节点。由于这个操作破坏了堆属性,所以需要使用 shiftUp() 来修复堆属性。
* @param index @{integer} 堆数组中的索引
* @param value @{any} 需要替换的节点的值
*/
replace (index: number, value: any);
/**
* 搜索值在堆中的索引
* 堆不是为快速搜索而建立的,但是 replace() 和 removeAtIndex() 操作需要找到节点在数组中的index,所以你需要先找到这个index。
* 时间复杂度:O(n)
* @param value 需要搜索的堆
* @return @{interger} 堆中的索引
*/
search (value: any): number;
/**
* 通过反复调用 insert() 方法将一个(无序)数组转换成一个堆。
* @param array 需要转换为堆的数组
*/
buildHeap (array);
/**
* 由于堆就是一个数组,我们可以使用它独特的属性[最大(小)堆]将数组从低到高排序。
* 时间复杂度:O(n lg n)。
*/
sort ();
/**
* 不删除节点并返回最大值(最大堆)或者最小值(最小堆)
* 时间复杂度 O(1)
* @return @{any} 堆中的最大(小)值
*/
peek (): any;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.