text
stringlengths 1
1.05M
|
|---|
<reponame>digirati-co-uk/taxonomy-manager<gh_stars>1-10
package com.digirati.taxman.common.taxonomy;
import com.digirati.taxman.common.rdf.RdfModel;
import com.digirati.taxman.common.rdf.RdfModelContext;
import com.digirati.taxman.common.rdf.annotation.RdfConstructor;
import com.digirati.taxman.common.rdf.annotation.RdfContext;
import com.digirati.taxman.common.rdf.annotation.RdfType;
import com.google.common.collect.Multimap;
import com.google.common.collect.Streams;
import org.apache.jena.rdf.model.Resource;
import org.apache.jena.rdf.model.Statement;
import org.apache.jena.vocabulary.DCTerms;
import org.apache.jena.vocabulary.RDFS;
import java.util.Map;
import java.util.stream.Stream;
/**
* Models the RDF representation of a project.
*/
@RdfType("http://www.w3.org/2000/01/rdf-schema#Dataset")
@RdfContext({"dcterms=" + DCTerms.NS, "rdfs="+ RDFS.uri})
public class ProjectModel implements RdfModel {
private final RdfModelContext context;
@RdfConstructor
public ProjectModel(RdfModelContext context) {
this.context = context;
}
public RdfModelContext getContext() {
return context;
}
public String getSlug() {
return getStringProperty(DCTerms.identifier);
}
public Multimap<String, String> getTitle() {
return getPlainLiteral(DCTerms.title);
}
/**
* Gets a stream of all {@link ConceptSchemeModel}s associated with this project.
*
* @return the concept schemes associated with this project
*/
public Stream<ConceptSchemeModel> getConceptSchemes() {
return getResources(ConceptSchemeModel.class, DCTerms.hasPart);
}
}
|
/**
* @license
* Copyright 2016 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @suppress {duplicate} */
var shakaDemo = shakaDemo || {};
/** @private */
shakaDemo.setupInfo_ = function() {
window.setInterval(shakaDemo.updateDebugInfo_, 125);
shakaDemo.player_.addEventListener(
'trackschanged', shakaDemo.onTracksChanged_);
shakaDemo.player_.addEventListener(
'adaptation', shakaDemo.onAdaptation_);
document.getElementById('videoTracks').addEventListener(
'change', shakaDemo.onTrackSelected_);
document.getElementById('audioTracks').addEventListener(
'change', shakaDemo.onTrackSelected_);
document.getElementById('textTracks').addEventListener(
'change', shakaDemo.onTrackSelected_);
};
/**
* @param {!Event} event
* @private
*/
shakaDemo.onTracksChanged_ = function(event) {
// Update the track lists.
var lists = {
video: document.getElementById('videoTracks'),
audio: document.getElementById('audioTracks'),
text: document.getElementById('textTracks')
};
var formatters = {
video: function(track) {
return track.width + 'x' + track.height + ', ' +
track.bandwidth + ' bits/s';
},
audio: function(track) {
return 'language: ' + track.language + ', ' +
track.bandwidth + ' bits/s';
},
text: function(track) {
return 'language: ' + track.language + ' ' +
'(' + track.kind + ')';
}
};
// Clear the old track lists.
Object.keys(lists).forEach(function(type) {
var list = lists[type];
while (list.firstChild) {
list.removeChild(list.firstChild);
}
});
// Populate with the new tracks.
var tracks = shakaDemo.player_.getTracks();
tracks.sort(function(t1, t2) {
// Sort by language, then by bandwidth.
if (t1.language) {
var ret = t1.language.localeCompare(t2.language);
if (ret) return ret;
}
return t1.bandwidth - t2.bandwidth;
});
tracks.forEach(function(track) {
var list = lists[track.type];
if (!list) return;
var option = document.createElement('option');
option.textContent = formatters[track.type](track);
option.track = track;
option.value = track.id;
option.selected = track.active;
list.appendChild(option);
});
};
/**
* @param {!Event} event
* @private
*/
shakaDemo.onAdaptation_ = function(event) {
var lists = {
video: document.getElementById('videoTracks'),
audio: document.getElementById('audioTracks'),
text: document.getElementById('textTracks')
};
// Find the rows for the active tracks and select them.
var tracks = shakaDemo.player_.getTracks();
tracks.forEach(function(track) {
if (!track.active) return;
var list = lists[track.type];
for (var i = 0; i < list.options.length; ++i) {
var option = list.options[i];
if (option.value == track.id) {
option.selected = true;
break;
}
}
});
};
/**
* @param {!Event} event
* @private
*/
shakaDemo.onTrackSelected_ = function(event) {
var list = event.target;
var option = list.options[list.selectedIndex];
var track = option.track;
var player = shakaDemo.player_;
player.selectTrack(track, /* clearBuffer */ true);
// Adaptation might have been changed by calling selectTrack().
// Update the adaptation checkbox.
var enableAdaptation = player.getConfiguration().abr.enabled;
document.getElementById('enableAdaptation').checked = enableAdaptation;
};
/** @private */
shakaDemo.updateDebugInfo_ = function() {
var video = shakaDemo.video_;
document.getElementById('videoResDebug').textContent =
video.videoWidth + ' x ' + video.videoHeight;
var behind = 0;
var ahead = 0;
var currentTime = video.currentTime;
var buffered = video.buffered;
for (var i = 0; i < buffered.length; ++i) {
if (buffered.start(i) <= currentTime && buffered.end(i) >= currentTime) {
ahead = buffered.end(i) - currentTime;
behind = currentTime - buffered.start(i);
break;
}
}
document.getElementById('bufferedDebug').textContent =
'- ' + behind.toFixed(0) + 's / ' + '+ ' + ahead.toFixed(0) + 's';
};
|
<filename>app.js
'use strict';
const fs = require('fs');
const Router = require('router');
const router = Router();
const fh = require('finalhandler');
const dotenv = require('dotenv'); // environment variable access instance
dotenv.config(); // loading env variables
const server = require('http').createServer(); // http server handler
const template = {
'/': 'app.html'
}
server.on('request', function (request, response){
router(request, response, fh(request, response))
});
server.listen(process.env.PORT);
console.log(process.env.PORT);
// Routing logic
router.get('/', (request, response) => {
response.writeHead(200, {'Content-Type': 'text/html'});
fs.createReadStream(template['/']).pipe(response);
});
// manifest.json
router.get('/manifest.json', (request, response) => {
response.writeHead(200, {'Content-Type': 'application/json'});
fs.createReadStream('manifest.json').pipe(response);
});
// service-worker.js
router.get('/service-worker.js', (request, response) => {
response.writeHead(200, {'Content-Type': 'text/javascript'});
fs.createReadStream('service-worker.js').pipe(response);
});
// css
router.get(/css\/\w+\.css/, (request, response) => {
fs.readdir('css', (error, files) => {
if (error) return console.error(error);
for (let i = 0; i < files.length; i++) {
if (`/css/${files[i]}` === request.url) {
response.writeHead(200, {'Content-type': 'text/css'});
fs.createReadStream(request.url.slice(1,)).pipe(response);
return true;
}
}
response.writeHead(404);
response.end();
});
});
// js
router.get(/js\/[\w.-]+\.js/, (request, response) => {
fs.readdir('js', (error, files) => {
if (error) return console.error(error);
for (let i = 0; i < files.length; i++) {
if (`/js/${files[i]}` === request.url) {
response.writeHead(200, {'Content-type': 'text/js'});
fs.createReadStream(request.url.slice(1,)).pipe(response);
return true;
}
}
response.writeHead(404);
response.end();
});
});
// images
router.get(/img\/[\w\.\-]+/, (request, response) => {
let dir = 'img';
let url = request.url;
fs.readdir(dir, (error, files) => {
if (error) return console.error(error);
let ext = url.slice(url.lastIndexOf('.') + 1, );
for (let i = 0; i < files.length; i++) {
if (`/${dir}/${files[i]}` === request.url) {
if (ext === 'svg'){
response.writeHead(200, {'Content-type': 'image/svg+xml'});
}
else if (ext in ['jpg, jpeg']) {
response.writeHead(200, {'Content-type': 'image/jpeg'});
}
else if (ext === 'png') {
response.writeHead(200, {'Content-type': 'image/png'});
}
fs.createReadStream(url.slice(1,)).pipe(response);
return true;
}
}
response.writeHead(404);
response.end();
});
});
router.get(/sound\/.*/, (request, response) => {
fs.readdir('sound', (error, files) => {
if (error) return console.error(error);
for (let i = 0; i < files.length; i++) {
if (`/sound/${files[i]}` === request.url) {
response.writeHead(200, {'Content-type': 'audio/mpeg'});
fs.createReadStream(request.url.slice(1,)).pipe(response);
return true;
}
}
response.writeHead(404);
response.end();
});
});
|
<gh_stars>0
package edu.cmu.lti.nlp.amr.FastFeatureVector
import edu.cmu.lti.nlp.amr._
import edu.cmu.lti.nlp.amr.Train.AbstractFeatureVector
import scala.math.sqrt
//import scala.collection.mutable.Map
import scala.collection.concurrent.{TrieMap => Map}
import scala.collection.immutable
import scala.io.Source
case class fastmul(scale: Double, v: List[(String, ValuesList)])
case class fastmul2(scale: Double, v: FeatureVector)
// Trickyness below: see p.452 Programming Scala 2nd Edition (21.5 Implicit conversions)
case class FastMulAssoc(x: Double) { def * (v: List[(String, ValuesList)]) = fastmul(x, v) }
case class FastMul2Assoc(x: Double) { def * (v: FeatureVector) = fastmul2(x, v) }
// def * (v: FeatureVector) = fastmul(x, v.fmap.view.toList.map(x => (x._1, ValuesList(x._2.unconjoined, x._2.conjoined.view.toList.map(y => Conjoined(y._1, y._2)).toList))).toList) // TODO: change this to be faster
// In package.scala:
//implicit def doubleToFastMulAssoc(x: Double) = new FastMulAssoc(x)
//implicit def doubleToFastMul2Assoc(x: Double) = new FastMul2Assoc(x)
case class Conjoined(labelIndex: Int, value: Double)
case class ValuesList(var unconjoined: Double, var conjoined: List[Conjoined])
case class ValuesMap(var unconjoined: Double, var conjoined: Map[Int, Double]) {
override def clone : ValuesMap = { ValuesMap(unconjoined, conjoined.clone) }
}
object ValuesMap {
def apply() : ValuesMap = {
return ValuesMap(0.0, Map())
}
}
case class Value(unconjoined: Double, conjoined: Double)
case class FeatureVector(labelset : Array[String],
fmap : Map[String, ValuesMap] = Map()) extends AbstractFeatureVector(labelset) {
val labelToIndex : Map[String, Int] = Map()
labelToIndex ++= labelset.zipWithIndex
def iterateOverLabels(v: List[(String, Value)], f: (Conjoined) => Unit) {
var unconjoinedTotal : Double = 0.0
val conjoinedTotal : Array[Double] = labelset.map(x => 0.0)
for ((feature, value) <- v if fmap.contains(feature)) {
val myValues : ValuesMap = fmap(feature)
unconjoinedTotal += myValues.unconjoined * value.unconjoined
for (myValue <- myValues.conjoined) {
conjoinedTotal(myValue._1) += myValue._2 * value.conjoined
}
}
for (i <- 0 until labelset.size) {
f(Conjoined(i, unconjoinedTotal + conjoinedTotal(i)))
}
}
def iterateOverLabels2(v: List[(String, Value, immutable.Map[Int, Double])], f: (Conjoined) => Unit) {
var unconjoinedTotal : Double = 0.0
val conjoinedTotal : Array[Double] = labelset.map(x => 0.0)
for ((feature, value, conjoinedMap) <- v if fmap.contains(feature)) {
val myValues : ValuesMap = fmap(feature)
unconjoinedTotal += myValues.unconjoined * value.unconjoined
for ((labelIndex, value) <- conjoinedMap) { // reusing value name, but it's ok
conjoinedTotal(labelIndex) += myValues.unconjoined * value
}
for (myValue <- myValues.conjoined) {
conjoinedTotal(myValue._1) += myValue._2 * value.conjoined
}
}
for (i <- 0 until labelset.size) {
f(Conjoined(i, unconjoinedTotal + conjoinedTotal(i)))
}
}
def apply(feature: String, label: Option[Int]) : Double = {
if (fmap.contains(feature)) {
if (label == None) {
fmap(feature).unconjoined
} else {
fmap(feature).conjoined.getOrElse(label.get, 0.0)
}
} else {
0.0
}
}
def updateList(v: List[(String, ValuesList)], f: (String, Option[Int], Double, Double) => Double) {
for ((feature, value) <- v) {
val myValues : ValuesMap = fmap.getOrElseUpdate(feature, ValuesMap(0.0, Map()))
myValues.unconjoined = f(feature, None, myValues.unconjoined, value.unconjoined)
for (conjoined <- value.conjoined) {
myValues.conjoined(conjoined.labelIndex) = f(feature,
Some(conjoined.labelIndex),
myValues.conjoined.getOrElse(conjoined.labelIndex, 0.0),
conjoined.value)
}
}
}
def += (v: AbstractFeatureVector) = { this.+=(v.asInstanceOf[FeatureVector]) }
def -= (v: AbstractFeatureVector) = { this.-=(v.asInstanceOf[FeatureVector]) }
def += (v: List[(String, ValuesList)]) = updateList(v, (feat, label, x, y) => x + y)
def -= (v: List[(String, ValuesList)]) = updateList(v, (feat, label, x, y) => x - y)
def updateAll(f: (String, Option[Int], Double) => Double) {
for ((feature, values) <- fmap) {
values.unconjoined = f(feature, None, values.unconjoined)
for (conjoined <- values.conjoined) {
values.conjoined(conjoined._1) = f(feature,
Some(conjoined._1),
conjoined._2)
}
}
}
def *= (scalar : Double) = updateAll((feat, label, x) => scalar * x)
def update(v: FeatureVector, f: (String, Option[Int], Double, Double) => Double) {
for ((feature, values) <- v.fmap) {
val myValues : ValuesMap = fmap.getOrElseUpdate(feature, ValuesMap(0.0, Map()))
myValues.unconjoined = f(feature, None, myValues.unconjoined, values.unconjoined)
for (conjoined <- values.conjoined) {
myValues.conjoined(conjoined._1) = f(feature,
Some(conjoined._1),
myValues.conjoined.getOrElse(conjoined._1, 0.0),
conjoined._2)
}
}
}
def += (v: FeatureVector) = update(v, (feat, label, x, y) => x + y)
def -= (v: FeatureVector) = update(v, (feat, label, x, y) => x - y)
def updateAll(v: FeatureVector, f: (String, Option[Int], Double, Double) => Double) { // TODO: maybe this should be f: (String, ValuesMap, ValuesMap) => ValuesMap. And also have + and - for ValuesMap objects
for ((feature, myValues) <- fmap) {
val values = v.fmap.getOrElse(feature, ValuesMap())
myValues.unconjoined = f(feature, None, myValues.unconjoined, values.unconjoined)
for (conjoined <- myValues.conjoined) {
myValues.conjoined(conjoined._1) = f(feature,
Some(conjoined._1),
conjoined._2,
values.conjoined.getOrElse(conjoined._1, 0.0))
}
}
}
def dotDivide(v: FeatureVector) = updateAll(v, (feat, label, x, y) => { if ( y==0.0 ) { x } else { x / y } } )
def updateWithFilter(v: FeatureVector, featNames: Iterator[String], f: (String, Option[Int], Double, Double) => Double) {
for (feature <- featNames) {
val values = v.fmap.getOrElse(feature, ValuesMap())
val myValues : ValuesMap = fmap.getOrElseUpdate(feature, ValuesMap())
myValues.unconjoined = f(feature, None, myValues.unconjoined, values.unconjoined)
for (conjoined <- values.conjoined) {
myValues.conjoined(conjoined._1) = f(feature,
Some(conjoined._1),
myValues.conjoined.getOrElse(conjoined._1, 0.0),
conjoined._2)
}
}
}
def plusEqFilter(v: FeatureVector, featNames: Iterator[String]) = updateWithFilter(v, featNames, (feat, label, x, y) => x + y)
def minusEqFilter(v: FeatureVector, featNames: Iterator[String]) = updateWithFilter(v, featNames, (feat, label, x, y) => x - y)
def dot(v: List[(String, ValuesList)]) : Double = {
var total : Double = 0.0
for ((feature, value) <- v if fmap.contains(feature)) {
val myValues : ValuesMap = fmap(feature)
total += myValues.unconjoined * value.unconjoined
for (conjoined <- value.conjoined if myValues.conjoined.contains(conjoined.labelIndex)) {
total += myValues.conjoined(conjoined.labelIndex) * conjoined.value
}
}
return total
}
def dot(v: FeatureVector) : Double = {
//logger(1, "Computing dot product")
var total : Double = 0.0
for ((feature, value) <- v.fmap if fmap.contains(feature)) {
val myValues : ValuesMap = fmap(feature)
total += myValues.unconjoined * value.unconjoined
//logger(1, feature + " "+myValues.unconjoined.toString+" * "+value.unconjoined.toString)
for (conjoined <- value.conjoined if myValues.conjoined.contains(conjoined._1)) {
total += myValues.conjoined(conjoined._1) * conjoined._2
//logger(1, feature + "+L="+labelset(conjoined._1)+" "+myValues.conjoined(conjoined._1).toString+" * "+conjoined._2.toString)
}
}
return total
}
//def l2norm : Double = sqrt(this.dot(this))
def += (m: fastmul) = updateList(m.v, (feat, label, x, y) => x + m.scale * y)
def -= (m: fastmul) = updateList(m.v, (feat, label, x, y) => x - m.scale * y)
def += (m: fastmul2) = update(m.v, (feat, label, x, y) => x + m.scale * y)
def -= (m: fastmul2) = update(m.v, (feat, label, x, y) => x - m.scale * y)
def plusEqFilter(m: fastmul2, featNames: Iterator[String]) = updateWithFilter(m.v, featNames, (feat, label, x, y) => x + m.scale * y)
def minusEqFilter(m: fastmul2, featNames: Iterator[String]) = updateWithFilter(m.v, featNames, (feat, label, x, y) => x - m.scale * y)
/*def nonzero : Boolean = {
var result = false
for ((feat, value) <- fmap) {
result = result || (value != 0.0)
}
return result
}
def slice(v: FeatureVector) : FeatureVector = {
val f = new FeatureVector()
for ((feat, _) <- v.fmap) {
f.fmap(feat) = fmap.getOrElse(feat,0.0)
}
return f
} */
def read(iterator: Iterator[String]) {
val regex = ("""(.*?)(\+L=("""+labelset.mkString("|")+"""))?[ \t]([^ \t]*)""").r // .*? is non-greedy
// (feature, _, label, value)
// matches featurename+L=label 1.0
fmap.clear()
for (line <- iterator) {
val regex(feature, _, label, value) = line
if (!fmap.contains(feature)) {
fmap(feature) = ValuesMap(0.0, Map())
}
if (label == null) {
fmap(feature).unconjoined = value.toDouble
} else {
fmap(feature).conjoined(labelToIndex(label)) = value.toDouble // TODO: catch invalid label errors and print labels
}
}
}
def fromFile(filename: String) {
val iterator = Source.fromFile(filename).getLines()
read(iterator)
}
def toFile(filename: String) {
val file = new java.io.PrintWriter(new java.io.File(filename), "UTF-8")
try { file.print(this.toString) }
finally { file.close }
}
override def toString() : String = {
var strings : List[String] = List()
for ((feature, values) <- fmap) {
if (values.unconjoined != 0.0) {
strings = feature + " " + values.unconjoined.toString + "\n" :: strings
}
for ((labelIndex, value) <- values.conjoined if value != 0.0) {
strings = feature + "+L=" + labelset(labelIndex) + " " + value.toString + "\n" :: strings
}
}
return strings.sorted.mkString
}
def unsorted() : String = {
val string = new StringBuilder
for ((feature, values) <- fmap) {
if (values.unconjoined != 0.0) {
string.append(feature + " " + values.unconjoined.toString + "\n")
}
for ((labelIndex, value) <- values.conjoined if value != 0.0) {
string.append(feature + "+L=" + labelset(labelIndex) + " " + value.toString + "\n")
}
}
return string.toString
}
def set(input: ((String, Option[Int]), Double)) {
val ((feature, label), value) = input
if (!fmap.contains(feature)) {
fmap(feature) = ValuesMap(0.0, Map())
}
if (label == None) {
fmap(feature).unconjoined = value
} else {
fmap(feature).conjoined(label.get) = value
}
}
def filter(f: (String) => Boolean) : FeatureVector = {
val newvec = FeatureVector(labelset)
for ((feature, value) <- fmap if f(feature)) {
newvec.fmap(feature) = value.clone
}
return newvec
}
}
object FeatureVector {
def apply(labelset: Array[String], v: List[(String, ValuesList)]) : FeatureVector = {
val newvec = FeatureVector(labelset)
newvec += v
return newvec
}
}
|
// Source : https://leetcode.com/problems/longest-increasing-subsequence/
// Author : <NAME>
/**
* @param {number[]} nums
* @return {number}
*/
// Runtime: 152 ms
// Your runtime beats 100.00% of javascript submissions.
function binarySearch(a, target) {
var start = 0
, end = a.length - 1;
while(start <= end) {
var mid = ~~((start + end) >> 1);
if (a[mid] >= target)
end = mid - 1;
else
start = mid + 1;
}
return end;
}
var lengthOfLIS = function(nums) {
var a = [];
nums.forEach(function(item) {
var index = binarySearch(a, item) + 1;
if (a[index] === undefined)
a[index] = item;
else
a[index] = Math.min(a[index], item);
});
return a.length;
};
|
#!/usr/bin/env bats
load '../lib/helper'
load '../bats/extensions/bats-support/load'
load '../bats/extensions/bats-assert/load'
load '../bats/extensions/bats-file/load'
@test "enc: helm enc" {
run helm secrets enc
assert_failure
assert_output --partial 'Error: secrets file required.'
}
@test "enc: helm enc --help" {
run helm secrets enc --help
assert_success
assert_output --partial 'Encrypt secrets'
}
@test "enc: File not exits" {
run helm secrets enc nonexists
assert_failure
assert_output --partial 'File does not exist: nonexists'
}
@test "enc: Encrypt secrets.yaml" {
if ! is_driver "sops"; then
skip
fi
FILE="${TEST_TEMP_DIR}/assets/values/${HELM_SECRETS_DRIVER}/secrets.dec.yaml"
run helm secrets enc "${FILE}"
assert_output --partial "Encrypting ${FILE}"
assert_output --partial "Encrypted secrets.dec.yaml"
run helm secrets view "${FILE}"
assert_success
assert_output --partial 'global_secret: '
assert_output --partial 'global_bar'
}
@test "enc: Encrypt some-secrets.yaml" {
if ! is_driver "sops"; then
skip
fi
FILE="${TEST_TEMP_DIR}/assets/values/${HELM_SECRETS_DRIVER}/some-secrets.dec.yaml"
run helm secrets enc "${FILE}"
assert_output --partial "Encrypting ${FILE}"
assert_output --partial "Encrypted some-secrets.dec.yaml"
run helm secrets view "${FILE}"
assert_success
assert_output --partial 'global_secret: '
assert_output --partial 'global_bar'
}
@test "enc: Encrypt secrets.yaml.dec" {
if ! is_driver "sops"; then
skip
fi
FILE="${TEST_TEMP_DIR}/assets/values/${HELM_SECRETS_DRIVER}/secrets.dec.yaml"
cp "${FILE}" "${FILE}.dec"
run helm secrets enc "${FILE}"
assert_output --partial "Encrypting ${FILE}"
assert_output --partial "Encrypted ./secrets.dec.yaml.dec to secrets.dec.yaml"
run helm secrets view "${FILE}"
assert_success
assert_output --partial 'global_secret: '
assert_output --partial 'global_bar'
}
@test "enc: Encrypt secrets.yaml + special char directory name" {
if ! is_driver "sops"; then
skip
fi
if on_windows; then
skip "Skip on Windows"
fi
FILE="${SPECIAL_CHAR_DIR}/assets/values/${HELM_SECRETS_DRIVER}/secrets.dec.yaml"
run helm secrets enc "${FILE}"
assert_output --partial "Encrypting ${FILE}"
assert_output --partial "Encrypted secrets.dec.yaml"
run helm secrets view "${FILE}"
assert_success
assert_output --partial 'global_secret: '
assert_output --partial 'global_bar'
}
@test "enc: Encrypt secrets.yaml with HELM_SECRETS_DEC_SUFFIX" {
if ! is_driver "sops"; then
skip
fi
FILE="${TEST_TEMP_DIR}/assets/values/${HELM_SECRETS_DRIVER}/secrets.dec.yaml"
cp "${FILE}" "${FILE}.test"
HELM_SECRETS_DEC_SUFFIX=.yaml.test
export HELM_SECRETS_DEC_SUFFIX
run helm secrets enc "${FILE}"
assert_success
assert_output --partial "Encrypting ${FILE}"
assert_output --partial "Encrypted ./secrets.dec.yaml.test to secrets.dec.yaml"
run helm secrets view "${FILE}"
assert_success
assert_output --partial 'global_secret: '
assert_output --partial 'global_bar'
}
@test "enc: Encrypt secrets.tmp.yaml" {
if ! is_driver "sops"; then
skip
fi
FILE="${TEST_TEMP_DIR}/assets/values/${HELM_SECRETS_DRIVER}/secrets.tmp.yaml"
YAML="hello: world"
echo "${YAML}" > "${FILE}"
run helm secrets enc "${FILE}"
assert_success
assert_output --partial "Encrypting ${FILE}"
run helm secrets dec "${FILE}"
assert_success
assert_file_exist "${FILE}.dec"
assert_file_contains "${FILE}.dec" 'hello: world'
}
|
window.onload = () => {
document.querySelector(".form").addEventListener("click", function(a) {
fetch("/admin", {
method: "POST",
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
action: a.target.name
})
})
})
}
|
import multiprocessing as mp
def sum_of_squares(numbers):
return sum(x**2 for x in numbers)
def main():
numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] # Input list of numbers
num_processes = mp.cpu_count() # Number of available CPU cores
with mp.Pool(num_processes) as pool:
chunk_size = len(numbers) // num_processes
chunks = [numbers[i:i + chunk_size] for i in range(0, len(numbers), chunk_size)]
results = pool.map(sum_of_squares, chunks)
final_result = sum(results)
print(f"The sum of squares of the numbers is: {final_result}")
if __name__ == '__main__':
mp.freeze_support()
main()
|
<gh_stars>100-1000
/**
* Created by Administrator on 2017/3/16.
*/
module.exports={
name:'环银电影城(世贸广场店)',
code:'k1045',
pol:[15,68],
adress:'江汉区解放大道686号世贸广场8楼',
movies:
[
{
title:'金刚狼3:殊死一战',
id:1,
rating:8.8,
jiamjie:'101分钟|动作|休.杰克曼',
cover:'http://localhost:8888/public/img/movie-cover/4.png',
sessions:[
[
{start:'11:00',
end:'12:59',
effects:'英语3D',
room:'9号厅(大)',
price:35,
discount:26},
{start:'11:20',
end:'13:19',
effects:'英语3D',
room:'3号厅-4DX',
price:48,
discount:39}
],
[ {start:'10:20',
end:'12:19',
effects:'英语IMAX3D',
room:'1号厅(大)',
price:43,
discount:30},
{start:'10:40',
end:'12:39',
effects:'英语3D',
room:'7号厅',
price:35,
discount:26},
{start:'11:00',
end:'12:59',
effects:'英语3D',
room:'9号厅(大)',
price:35,
discount:26},
{start:'11:20',
end:'13:19',
effects:'英语3D',
room:'3号厅-4DX',
price:48,
discount:39}
]
]
},
{
title:'生化危机:终章',
id:2,
rating:7.6,
jiamjie:'101分钟|动作|米拉.乔沃维奇',
cover:'http://localhost:8888/public/img/movie-cover/1.png'
,
sessions:[
[
{start:'11:00',
end:'12:59',
effects:'英语3D',
room:'9号厅(大)',
price:35,
discount:26},
{start:'11:20',
end:'13:19',
effects:'英语3D',
room:'3号厅-4DX',
price:48,
discount:39}
],
[ {start:'10:20',
end:'12:19',
effects:'英语IMAX3D',
room:'1号厅(大)',
price:43,
discount:30},
{start:'10:40',
end:'12:39',
effects:'英语3D',
room:'7号厅',
price:35,
discount:26},
{start:'11:00',
end:'12:59',
effects:'英语3D',
room:'9号厅(大)',
price:35,
discount:26},
{start:'11:20',
end:'13:19',
effects:'英语3D',
room:'3号厅-4DX',
price:48,
discount:39}
]
]
},
{
title:'刺客信条',
id:3,
rating:6.9,
jiamjie:'101分钟|动作|迈克尔.法斯宾德',
cover:'http://localhost:8888/public/img/movie-cover/2.png'
,
sessions:[
[
{start:'11:00',
end:'12:59',
effects:'英语3D',
room:'9号厅(大)',
price:35,
discount:26},
{start:'11:20',
end:'13:19',
effects:'英语3D',
room:'3号厅-4DX',
price:48,
discount:39}
],
[ {start:'10:20',
end:'12:19',
effects:'英语IMAX3D',
room:'1号厅(大)',
price:43,
discount:30},
{start:'10:40',
end:'12:39',
effects:'英语3D',
room:'7号厅',
price:35,
discount:26},
{start:'11:00',
end:'12:59',
effects:'英语3D',
room:'9号厅(大)',
price:35,
discount:26},
{start:'11:20',
end:'13:19',
effects:'英语3D',
room:'3号厅-4DX',
price:48,
discount:39}
]
]
},
{
title:'爱乐之城',
id:4,
rating:9.1,
jiamjie:'101分钟|动作|爱玛.斯通',
cover:'http://localhost:8888/public/img/movie-cover/3.png'
,
sessions:[
[
{start:'11:00',
end:'12:59',
effects:'英语3D',
room:'9号厅(大)',
price:35,
discount:26},
{start:'11:20',
end:'13:19',
effects:'英语3D',
room:'3号厅-4DX',
price:48,
discount:39}
],
[ {start:'10:20',
end:'12:19',
effects:'英语IMAX3D',
room:'1号厅(大)',
price:43,
discount:30},
{start:'10:40',
end:'12:39',
effects:'英语3D',
room:'7号厅',
price:35,
discount:26},
{start:'11:00',
end:'12:59',
effects:'英语3D',
room:'9号厅(大)',
price:35,
discount:26},
{start:'11:20',
end:'13:19',
effects:'英语3D',
room:'3号厅-4DX',
price:48,
discount:39}
]
]
},
{
title:'欢乐好声音',
id:5,
rating:6.7,
jiamjie:'101分钟|动作|马修.麦康纳',
cover:'http://localhost:8888/public/img/movie-cover/4.png'
,
sessions:[
[
{start:'11:00',
end:'12:59',
effects:'英语3D',
room:'9号厅(大)',
price:35,
discount:26},
{start:'11:20',
end:'13:19',
effects:'英语3D',
room:'3号厅-4DX',
price:48,
discount:39}
],
[ {start:'10:20',
end:'12:19',
effects:'英语IMAX3D',
room:'1号厅(大)',
price:43,
discount:30},
{start:'10:40',
end:'12:39',
effects:'英语3D',
room:'7号厅',
price:35,
discount:26},
{start:'11:00',
end:'12:59',
effects:'英语3D',
room:'9号厅(大)',
price:35,
discount:26},
{start:'11:20',
end:'13:19',
effects:'英语3D',
room:'3号厅-4DX',
price:48,
discount:39}
]
]
}
]
}
|
function is_unique_string(str){
let set = new Set();
for(let i=0; i<str.length; i++){
let current_char = str[i];
if(set.has(current_char)){
return false;
}
set.add(current_char);
}
return true;
}
let result = is_unique_string("abcd");
console.log(result); // Outputs: true
|
<filename>infra-sk/modules/multi-input-sk/index.ts
import './multi-input-sk';
import './multi-input-sk.scss';
|
<filename>src/com/bgi/suggester/Suggester.java
package com.bgi.suggester;
import java.util.List;
import com.bgi.bktree.trie.Trie;
import com.bgi.suggester.trie.PTrie;
import com.bgi.util.SystemConfig;
public class Suggester {
private PTrie trie = SystemConfig.PTRIE;
public Suggester() {
}
public Suggester(Trie trie) {
this.trie = new PTrie(trie);
}
public List<String> autoCompletion(String prefix) {
return trie.queryKey(prefix);
}
}
|
<gh_stars>0
import tokenService from "./tokenService";
const BASE_URL = "/api/items/";
export function create(createItem) {
// Make a post request to the server
return fetch(BASE_URL, {
method: "POST",
// We are sending over a picture
// multipart/form-data < - is the content type
body: createItem, // <- postInfoFromTheForm has to be formData
headers: {
Authorization: "Bearer " + tokenService.getToken(),
},
}).then((res) => {
// Valid login if we have a status of 2xx (res.ok)
if (res.ok) return res.json();
throw new Error(
"Error submitting the Form! Hey Check the Express Terminal"
);
});
}
export function getAll() {
return fetch(BASE_URL, {
headers: {
Authorization: "Bearer " + tokenService.getToken(),
},
}).then((res) => {
if (res.ok) return res.json();
throw new Error("Problem Fetching Get All");
});
}
|
<filename>pkg/core/block/block_base.go
package block
import (
"encoding/json"
"errors"
"github.com/nspcc-dev/neo-go/pkg/config/netmode"
"github.com/nspcc-dev/neo-go/pkg/core/transaction"
"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
"github.com/nspcc-dev/neo-go/pkg/encoding/address"
"github.com/nspcc-dev/neo-go/pkg/io"
"github.com/nspcc-dev/neo-go/pkg/util"
)
// Base holds the base info of a block
type Base struct {
// Version of the block.
Version uint32
// hash of the previous block.
PrevHash util.Uint256
// Root hash of a transaction list.
MerkleRoot util.Uint256
// Timestamp is a millisecond-precision timestamp.
// The time stamp of each block must be later than previous block's time stamp.
// Generally the difference of two block's time stamp is about 15 seconds and imprecision is allowed.
// The height of the block must be exactly equal to the height of the previous block plus 1.
Timestamp uint64
// index/height of the block
Index uint32
// Contract address of the next miner
NextConsensus util.Uint160
// Script used to validate the block
Script transaction.Witness
// Network magic number this block belongs to. This one actually is not
// a part of the wire-representation of Block, but it's absolutely
// necessary for correct signing/verification.
Network netmode.Magic
// Hash of this block, created when binary encoded (double SHA256).
hash util.Uint256
// Hash of the block used to verify it (single SHA256).
verificationHash util.Uint256
}
// baseAux is used to marshal/unmarshal to/from JSON, it's almost the same
// as original Base, but with Nonce and NextConsensus fields differing and
// Hash added.
type baseAux struct {
Hash util.Uint256 `json:"hash"`
Version uint32 `json:"version"`
PrevHash util.Uint256 `json:"previousblockhash"`
MerkleRoot util.Uint256 `json:"merkleroot"`
Timestamp uint64 `json:"time"`
Index uint32 `json:"index"`
NextConsensus string `json:"nextconsensus"`
Witnesses []transaction.Witness `json:"witnesses"`
}
// Verify verifies the integrity of the Base.
func (b *Base) Verify() bool {
// TODO: Need a persisted blockchain for this.
return true
}
// Hash returns the hash of the block.
func (b *Base) Hash() util.Uint256 {
if b.hash.Equals(util.Uint256{}) {
b.createHash()
}
return b.hash
}
// VerificationHash returns the hash of the block used to verify it.
func (b *Base) VerificationHash() util.Uint256 {
if b.verificationHash.Equals(util.Uint256{}) {
b.createHash()
}
return b.verificationHash
}
// DecodeBinary implements Serializable interface.
func (b *Base) DecodeBinary(br *io.BinReader) {
b.decodeHashableFields(br)
witnessCount := br.ReadVarUint()
if br.Err == nil && witnessCount != 1 {
br.Err = errors.New("wrong witness count")
return
}
b.Script.DecodeBinary(br)
}
// EncodeBinary implements Serializable interface
func (b *Base) EncodeBinary(bw *io.BinWriter) {
b.encodeHashableFields(bw)
bw.WriteVarUint(1)
b.Script.EncodeBinary(bw)
}
// GetSignedPart returns serialized hashable data of the block.
func (b *Base) GetSignedPart() []byte {
buf := io.NewBufBinWriter()
buf.WriteU32LE(uint32(b.Network))
// No error can occure while encoding hashable fields.
b.encodeHashableFields(buf.BinWriter)
return buf.Bytes()
}
// createHash creates the hash of the block.
// When calculating the hash value of the block, instead of calculating the entire block,
// only first seven fields in the block head will be calculated, which are
// version, PrevBlock, MerkleRoot, timestamp, and height, the nonce, NextMiner.
// Since MerkleRoot already contains the hash value of all transactions,
// the modification of transaction will influence the hash value of the block.
func (b *Base) createHash() {
bb := b.GetSignedPart()
b.verificationHash = hash.Sha256(bb)
b.hash = hash.Sha256(b.verificationHash.BytesBE())
}
// encodeHashableFields will only encode the fields used for hashing.
// see Hash() for more information about the fields.
func (b *Base) encodeHashableFields(bw *io.BinWriter) {
bw.WriteU32LE(b.Version)
bw.WriteBytes(b.PrevHash[:])
bw.WriteBytes(b.MerkleRoot[:])
bw.WriteU64LE(b.Timestamp)
bw.WriteU32LE(b.Index)
bw.WriteBytes(b.NextConsensus[:])
}
// decodeHashableFields decodes the fields used for hashing.
// see Hash() for more information about the fields.
func (b *Base) decodeHashableFields(br *io.BinReader) {
b.Version = br.ReadU32LE()
br.ReadBytes(b.PrevHash[:])
br.ReadBytes(b.MerkleRoot[:])
b.Timestamp = br.ReadU64LE()
b.Index = br.ReadU32LE()
br.ReadBytes(b.NextConsensus[:])
// Make the hash of the block here so we dont need to do this
// again.
if br.Err == nil {
b.createHash()
}
}
// MarshalJSON implements json.Marshaler interface.
func (b Base) MarshalJSON() ([]byte, error) {
aux := baseAux{
Hash: b.Hash(),
Version: b.Version,
PrevHash: b.PrevHash,
MerkleRoot: b.MerkleRoot,
Timestamp: b.Timestamp,
Index: b.Index,
NextConsensus: address.Uint160ToString(b.NextConsensus),
Witnesses: []transaction.Witness{b.Script},
}
return json.Marshal(aux)
}
// UnmarshalJSON implements json.Unmarshaler interface.
func (b *Base) UnmarshalJSON(data []byte) error {
var aux = new(baseAux)
var nextC util.Uint160
err := json.Unmarshal(data, aux)
if err != nil {
return err
}
nextC, err = address.StringToUint160(aux.NextConsensus)
if err != nil {
return err
}
if len(aux.Witnesses) != 1 {
return errors.New("wrong number of witnesses")
}
b.Version = aux.Version
b.PrevHash = aux.PrevHash
b.MerkleRoot = aux.MerkleRoot
b.Timestamp = aux.Timestamp
b.Index = aux.Index
b.NextConsensus = nextC
b.Script = aux.Witnesses[0]
if !aux.Hash.Equals(b.Hash()) {
return errors.New("json 'hash' doesn't match block hash")
}
return nil
}
|
<reponame>vharsh/cattle2
package io.cattle.platform.object.meta;
import io.cattle.platform.eventing.model.Event;
import io.github.ibuildthecloud.gdapi.factory.SchemaFactory;
import io.github.ibuildthecloud.gdapi.model.Schema;
import io.github.ibuildthecloud.gdapi.util.TypeUtils;
import java.util.Map;
public interface ObjectMetaDataManager {
String TRANSITIONING_YES = Event.TRANSITIONING_YES;
String TRANSITIONING_NO = Event.TRANSITIONING_NO;
String TRANSITIONING_ERROR = Event.TRANSITIONING_ERROR;
String TRANSITIONING_ERROR_OVERRIDE = Event.TRANSITIONING_ERROR + "Override";
String TRANSITIONING_FIELD = "transitioning";
String TRANSITIONING_MESSAGE_FIELD = "transitioningMessage";
String STATE_FIELD = "state";
String ACCOUNT_FIELD = "accountId";
String CAPABILITIES_FIELD = "capabilities";
String CLUSTER_FIELD = "clusterId";
String CREATED_FIELD = "created";
String CREATOR_FIELD = "creatorId";
String DATA_FIELD = "data";
String ID_FIELD = TypeUtils.ID_FIELD;
String KIND_FIELD = "kind";
String NAME_FIELD = "name";
String REMOVED_FIELD = "removed";
String REMOVE_TIME_FIELD = "removeTime";
String TYPE_FIELD = "type";
String UUID_FIELD = "uuid";
String convertToPropertyNameString(Class<?> recordClass, Object key);
Object convertFieldNameFor(String type, Object key);
Map<String, String> getLinks(SchemaFactory schemaFactory, String type);
Map<String, Relationship> getLinkRelationships(SchemaFactory schemaFactory, String type);
Relationship getRelationship(String type, String linkName);
Relationship getRelationship(Class<?> clz, String linkName);
Map<String, Object> getTransitionFields(Schema schema, Object obj);
Map<String, ActionDefinition> getActionDefinitions(Object obj);
boolean isTransitioningState(Class<?> resourceType, String state);
}
|
<reponame>devilry/devilry-django<filename>devilry/devilry_admin/tests/dashboard/test_student_feedbackfeed_wizard/test_assignment_listview.py
from django import test
from django.conf import settings
from django.template import defaultfilters
from django.utils import timezone
from model_bakery import baker
from cradmin_legacy import cradmin_testhelpers
from devilry.devilry_account.models import PermissionGroup
from devilry.devilry_admin.views.dashboard.student_feedbackfeed_wizard import assignment_list
from devilry.devilry_dbcache.customsql import AssignmentGroupDbCacheCustomSql
from devilry.devilry_group.devilry_group_baker_factories import make_first_feedbackset_in_group
class TestStudentAssignmentGroupListView(test.TestCase, cradmin_testhelpers.TestCaseMixin):
viewclass = assignment_list.StudentAssignmentGroupListView
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_title(self):
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
mockresponse = self.mock_http200_getrequest_htmls(
viewkwargs={'user_id': testuser.id}
)
self.assertEqual(mockresponse.selector.one('title').alltext_normalized, 'Assignments for <EMAIL>')
def test_user_can_only_see_assignments_where_user_is_admin_on_subject(self):
adminuser = baker.make(settings.AUTH_USER_MODEL)
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testsubject1 = baker.make('core.Subject')
testsubject2 = baker.make('core.Subject')
baker.make('devilry_account.PermissionGroupUser', user=adminuser,
permissiongroup=baker.make('devilry_account.SubjectPermissionGroup',
permissiongroup__grouptype=PermissionGroup.GROUPTYPE_SUBJECTADMIN,
subject=testsubject1).permissiongroup)
testgroup1 = baker.make('core.AssignmentGroup', parentnode__long_name='Accessible Assignment',
parentnode__parentnode__parentnode=testsubject1)
testgroup2 = baker.make('core.AssignmentGroup', parentnode__long_name='Inaccessible Assignment',
parentnode__parentnode__parentnode=testsubject2)
baker.make('core.Candidate', assignment_group=testgroup1, relatedstudent__user=testuser)
baker.make('core.Candidate', assignment_group=testgroup2, relatedstudent__user=testuser)
selector = self.mock_http200_getrequest_htmls(
requestuser=adminuser,
viewkwargs={'user_id': testuser.id}
).selector
self.assertEqual(selector.count('.cradmin-legacy-listbuilder-itemvalue'), 1)
self.assertEqual(
selector.one('.cradmin-legacy-listbuilder-itemvalue-titledescription-title').alltext_normalized,
'Accessible Assignment')
def test_user_can_only_see_assignments_where_user_is_admin_on_period(self):
adminuser = baker.make(settings.AUTH_USER_MODEL)
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testsubject = baker.make('core.Subject')
testperiod1 = baker.make('core.Period', parentnode=testsubject)
testperiod2 = baker.make('core.Period', parentnode=testsubject)
baker.make('devilry_account.PermissionGroupUser', user=adminuser,
permissiongroup=baker.make('devilry_account.PeriodPermissionGroup',
permissiongroup__grouptype=PermissionGroup.GROUPTYPE_PERIODADMIN,
period=testperiod1).permissiongroup)
testgroup1 = baker.make('core.AssignmentGroup', parentnode__long_name='Accessible Assignment',
parentnode__parentnode=testperiod1)
testgroup2 = baker.make('core.AssignmentGroup', parentnode__long_name='Inaccessible Assignment',
parentnode__parentnode=testperiod2)
baker.make('core.Candidate', assignment_group=testgroup1, relatedstudent__user=testuser)
baker.make('core.Candidate', assignment_group=testgroup2, relatedstudent__user=testuser)
selector = self.mock_http200_getrequest_htmls(
requestuser=adminuser,
viewkwargs={'user_id': testuser.id}
).selector
self.assertEqual(selector.count('.cradmin-legacy-listbuilder-itemvalue'), 1)
self.assertEqual(
selector.one('.cradmin-legacy-listbuilder-itemvalue-titledescription-title').alltext_normalized,
'Accessible Assignment')
def __make_simple_assignment_group(self, assignment_long_name='Assignment AAA'):
group = baker.make('core.AssignmentGroup',
parentnode__short_name='assingmentaaa',
parentnode__long_name=assignment_long_name,
parentnode__parentnode__short_name='periodaaa',
parentnode__parentnode__long_name='Period AAA',
parentnode__parentnode__start_time=timezone.now() - timezone.timedelta(days=100),
parentnode__parentnode__end_time=timezone.now() + timezone.timedelta(days=100),
parentnode__parentnode__parentnode__short_name='subjectaaa',
parentnode__parentnode__parentnode__long_name='Subject AAA')
return group
def test_default_ordering_by_deadline_datetime_ascending(self):
now = timezone.now()
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testgroup1 = baker.make('core.AssignmentGroup', parentnode__long_name='Assignment A')
make_first_feedbackset_in_group(group=testgroup1, deadline_datetime=now + timezone.timedelta(days=40))
testgroup2 = baker.make('core.AssignmentGroup', parentnode__long_name='Assignment B')
make_first_feedbackset_in_group(group=testgroup2, deadline_datetime=now + timezone.timedelta(days=30))
testgroup3 = baker.make('core.AssignmentGroup', parentnode__long_name='Assignment C')
baker.make('core.Candidate', assignment_group=testgroup1, relatedstudent__user=testuser)
baker.make('core.Candidate', assignment_group=testgroup2, relatedstudent__user=testuser)
baker.make('core.Candidate', assignment_group=testgroup3, relatedstudent__user=testuser)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={'user_id': testuser.id}
).selector
assignment_longname_list = [element.alltext_normalized for element in
selector.list('.cradmin-legacy-listbuilder-itemvalue-titledescription-title')]
self.assertListEqual(
[
'Assignment C',
'Assignment B',
'Assignment A'
],
assignment_longname_list
)
def test_assignment_info_assignment_name(self):
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testgroup = self.__make_simple_assignment_group()
baker.make('core.Candidate', assignment_group=testgroup, relatedstudent__user=testuser)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={'user_id': testuser.id}
).selector
self.assertEqual(
selector.one('.cradmin-legacy-listbuilder-itemvalue-titledescription-title').alltext_normalized,
testgroup.parentnode.long_name
)
def test_assignment_info_deadline_datetime(self):
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testgroup = self.__make_simple_assignment_group()
testgroup.cached_data.last_feedbackset.deadline_datetime = timezone.now()
testgroup.cached_data.last_feedbackset.save()
baker.make('core.Candidate', assignment_group=testgroup, relatedstudent__user=testuser)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={'user_id': testuser.id}
).selector
self.assertEqual(
selector.one('.devilry-admin-groupitem-deadline').alltext_normalized,
'Deadline: {}'.format(
defaultfilters.date(timezone.localtime(testgroup.cached_data.last_feedbackset.deadline_datetime),
'DATETIME_FORMAT'))
)
def test_assignment_info_period_name(self):
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testgroup = self.__make_simple_assignment_group()
baker.make('core.Candidate', assignment_group=testgroup, relatedstudent__user=testuser)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={'user_id': testuser.id}
).selector
self.assertEqual(
selector.one('.devilry-admin-groupitem-by-assignment-periodname').alltext_normalized,
'Semester: {}'.format(testgroup.parentnode.parentnode.long_name)
)
def test_assignment_info_subject_name(self):
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testgroup = self.__make_simple_assignment_group()
baker.make('core.Candidate', assignment_group=testgroup, relatedstudent__user=testuser)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={'user_id': testuser.id}
).selector
self.assertEqual(
selector.one('.devilry-admin-groupitem-by-assignment-subjectname').alltext_normalized,
'Subject: {}'.format(testgroup.parentnode.parentnode.parentnode.long_name)
)
def test_assignment_info_delivery_status(self):
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testgroup = self.__make_simple_assignment_group()
baker.make('core.Candidate', assignment_group=testgroup, relatedstudent__user=testuser)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={'user_id': testuser.id}
).selector
self.assertEqual(
selector.one('.devilry-cradmin-groupitemvalue-status').alltext_normalized,
'Status: {}'.format('waiting for feedback')
)
def test_search_candidate_in_group_nomatch(self):
teststudent = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testgroup = self.__make_simple_assignment_group()
baker.make('core.Candidate', assignment_group=testgroup, relatedstudent__user=teststudent)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={'user_id': teststudent.id, 'filters_string': 'search-NotInGroups'}
).selector
self.assertEqual(selector.count('.cradmin-legacy-listbuilder-itemvalue'), 0)
def test_search_other_candidate_in_group_fullname(self):
teststudent = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
other_student = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testgroup = self.__make_simple_assignment_group()
baker.make('core.Candidate', assignment_group=testgroup, relatedstudent__user=teststudent)
baker.make('core.Candidate', assignment_group=testgroup, relatedstudent__user=other_student)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={'user_id': teststudent.id, 'filters_string': 'search-Other User'}
).selector
self.assertEqual(selector.count('.cradmin-legacy-listbuilder-itemvalue'), 1)
def test_search_other_candidate_in_group_shortname(self):
teststudent = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
other_student = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='Other User')
testgroup = self.__make_simple_assignment_group()
baker.make('core.Candidate', assignment_group=testgroup, relatedstudent__user=teststudent)
baker.make('core.Candidate', assignment_group=testgroup, relatedstudent__user=other_student)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={'user_id': teststudent.id, 'filters_string': '<EMAIL>'}
).selector
self.assertEqual(selector.count('.cradmin-legacy-listbuilder-itemvalue'), 1)
def test_search_examiner_nomatch(self):
teststudent = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
examiner_user = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='Examiner')
testgroup = self.__make_simple_assignment_group()
baker.make('core.Candidate', assignment_group=testgroup, relatedstudent__user=teststudent)
baker.make('core.Examiner', assignmentgroup=testgroup, relatedexaminer__user=examiner_user)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={'user_id': teststudent.id, 'filters_string': 'search-NoExaminerMatch'}
).selector
self.assertEqual(selector.count('.cradmin-legacy-listbuilder-itemvalue'), 0)
def test_search_examiner_fullname(self):
teststudent = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
examiner_user = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='Examiner')
testgroup = self.__make_simple_assignment_group()
baker.make('core.Candidate', assignment_group=testgroup, relatedstudent__user=teststudent)
baker.make('core.Examiner', assignmentgroup=testgroup, relatedexaminer__user=examiner_user)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={'user_id': teststudent.id, 'filters_string': 'search-Examiner'}
).selector
self.assertEqual(selector.count('.cradmin-legacy-listbuilder-itemvalue'), 1)
def test_search_examiner_shortname(self):
teststudent = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
examiner_user = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='Examiner')
testgroup = self.__make_simple_assignment_group()
baker.make('core.Candidate', assignment_group=testgroup, relatedstudent__user=teststudent)
baker.make('core.Examiner', assignmentgroup=testgroup, relatedexaminer__user=examiner_user)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={'user_id': teststudent.id, 'filters_string': 'search-examiner@<EMAIL>'}
).selector
self.assertEqual(selector.count('.cradmin-legacy-listbuilder-itemvalue'), 1)
def test_search_assignment_long_name(self):
teststudent = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testgroup = self.__make_simple_assignment_group()
baker.make('core.Candidate', assignment_group=testgroup, relatedstudent__user=teststudent)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={'user_id': teststudent.id, 'filters_string': 'search-{}'.format(testgroup.parentnode.long_name)}
).selector
self.assertEqual(selector.count('.cradmin-legacy-listbuilder-itemvalue'), 1)
def test_search_assignment_short_name(self):
teststudent = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testgroup = self.__make_simple_assignment_group()
baker.make('core.Candidate', assignment_group=testgroup, relatedstudent__user=teststudent)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={'user_id': teststudent.id, 'filters_string': 'search-{}'.format(testgroup.parentnode.short_name)}
).selector
self.assertEqual(selector.count('.cradmin-legacy-listbuilder-itemvalue'), 1)
def test_search_period_long_name(self):
teststudent = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testgroup = self.__make_simple_assignment_group()
baker.make('core.Candidate', assignment_group=testgroup, relatedstudent__user=teststudent)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={
'user_id': teststudent.id,
'filters_string': 'search-{}'.format(testgroup.parentnode.parentnode.long_name)
}
).selector
self.assertEqual(selector.count('.cradmin-legacy-listbuilder-itemvalue'), 1)
def test_search_period_short_name(self):
teststudent = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testgroup = self.__make_simple_assignment_group()
baker.make('core.Candidate', assignment_group=testgroup, relatedstudent__user=teststudent)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={
'user_id': teststudent.id,
'filters_string': 'search-{}'.format(testgroup.parentnode.parentnode.short_name)
}
).selector
self.assertEqual(selector.count('.cradmin-legacy-listbuilder-itemvalue'), 1)
def test_search_subject_long_name(self):
teststudent = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testgroup = self.__make_simple_assignment_group()
baker.make('core.Candidate', assignment_group=testgroup, relatedstudent__user=teststudent)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={
'user_id': teststudent.id,
'filters_string': 'search-{}'.format(testgroup.parentnode.parentnode.parentnode.long_name)
}
).selector
self.assertEqual(selector.count('.cradmin-legacy-listbuilder-itemvalue'), 1)
def test_search_subject_short_name(self):
teststudent = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testgroup = self.__make_simple_assignment_group()
baker.make('core.Candidate', assignment_group=testgroup, relatedstudent__user=teststudent)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={
'user_id': teststudent.id,
'filters_string': 'search-{}'.format(testgroup.parentnode.parentnode.parentnode.short_name)
}
).selector
self.assertEqual(selector.count('.cradmin-legacy-listbuilder-itemvalue'), 1)
def test_order_deadline_ascending(self):
now = timezone.now()
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testgroup1 = baker.make('core.AssignmentGroup', parentnode__long_name='Assignment A')
make_first_feedbackset_in_group(group=testgroup1, deadline_datetime=now + timezone.timedelta(days=40))
testgroup2 = baker.make('core.AssignmentGroup', parentnode__long_name='Assignment B')
make_first_feedbackset_in_group(group=testgroup2, deadline_datetime=now + timezone.timedelta(days=30))
testgroup3 = baker.make('core.AssignmentGroup', parentnode__long_name='Assignment C')
baker.make('core.Candidate', assignment_group=testgroup1, relatedstudent__user=testuser)
baker.make('core.Candidate', assignment_group=testgroup2, relatedstudent__user=testuser)
baker.make('core.Candidate', assignment_group=testgroup3, relatedstudent__user=testuser)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={'user_id': testuser.id, 'filters_string': 'orderby_deadline-'}
).selector
assignment_longname_list = [element.alltext_normalized for element in
selector.list('.cradmin-legacy-listbuilder-itemvalue-titledescription-title')]
self.assertListEqual(
[
'Assignment C',
'Assignment B',
'Assignment A'
],
assignment_longname_list
)
def test_order_deadline_descending(self):
now = timezone.now()
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testgroup1 = baker.make('core.AssignmentGroup', parentnode__long_name='Assignment A')
make_first_feedbackset_in_group(group=testgroup1, deadline_datetime=now + timezone.timedelta(days=40))
testgroup2 = baker.make('core.AssignmentGroup', parentnode__long_name='Assignment B')
make_first_feedbackset_in_group(group=testgroup2, deadline_datetime=now + timezone.timedelta(days=30))
testgroup3 = baker.make('core.AssignmentGroup', parentnode__long_name='Assignment C')
baker.make('core.Candidate', assignment_group=testgroup1, relatedstudent__user=testuser)
baker.make('core.Candidate', assignment_group=testgroup2, relatedstudent__user=testuser)
baker.make('core.Candidate', assignment_group=testgroup3, relatedstudent__user=testuser)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={'user_id': testuser.id, 'filters_string': 'orderby_deadline-deadline_descending'}
).selector
assignment_longname_list = [element.alltext_normalized for element in
selector.list('.cradmin-legacy-listbuilder-itemvalue-titledescription-title')]
self.assertListEqual(
[
'Assignment A',
'Assignment B',
'Assignment C'
],
assignment_longname_list
)
def test_filter_semesters_blank_shows_all_periods(self):
testperiod_old = baker.make_recipe('devilry.apps.core.period_old')
testperiod_active = baker.make_recipe('devilry.apps.core.period_active')
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testgroup_old_period = baker.make('core.AssignmentGroup',
parentnode__long_name='Assignment Old',
parentnode__parentnode=testperiod_old)
testgroup_active_period = baker.make('core.AssignmentGroup',
parentnode__long_name='Assignment Active',
parentnode__parentnode=testperiod_active)
baker.make('core.Candidate', assignment_group=testgroup_old_period, relatedstudent__user=testuser)
baker.make('core.Candidate', assignment_group=testgroup_active_period, relatedstudent__user=testuser)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={'user_id': testuser.id, 'filters_string': 'semester_is_active-'}
).selector
self.assertEqual(selector.count('.cradmin-legacy-listbuilder-itemvalue'), 2)
def test_filter_active_periods(self):
testperiod_old = baker.make_recipe('devilry.apps.core.period_old')
testperiod_active = baker.make_recipe('devilry.apps.core.period_active')
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testgroup_old_period = baker.make('core.AssignmentGroup',
parentnode__long_name='Assignment Old',
parentnode__parentnode=testperiod_old)
testgroup_active_period = baker.make('core.AssignmentGroup',
parentnode__long_name='Assignment Active',
parentnode__parentnode=testperiod_active)
baker.make('core.Candidate', assignment_group=testgroup_old_period, relatedstudent__user=testuser)
baker.make('core.Candidate', assignment_group=testgroup_active_period, relatedstudent__user=testuser)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={'user_id': testuser.id, 'filters_string': 'semester_is_active-true'}
).selector
self.assertEqual(selector.count('.cradmin-legacy-listbuilder-itemvalue'), 1)
self.assertEqual(
selector.one('.cradmin-legacy-listbuilder-itemvalue-titledescription-title').alltext_normalized,
'Assignment Active'
)
def test_filter_inactive_periods(self):
testperiod_old = baker.make_recipe('devilry.apps.core.period_old')
testperiod_active = baker.make_recipe('devilry.apps.core.period_active')
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testgroup_old_period = baker.make('core.AssignmentGroup',
parentnode__long_name='Assignment Old',
parentnode__parentnode=testperiod_old)
testgroup_active_period = baker.make('core.AssignmentGroup',
parentnode__long_name='Assignment Active',
parentnode__parentnode=testperiod_active)
baker.make('core.Candidate', assignment_group=testgroup_old_period, relatedstudent__user=testuser)
baker.make('core.Candidate', assignment_group=testgroup_active_period, relatedstudent__user=testuser)
selector = self.mock_http200_getrequest_htmls(
requestuser=baker.make(settings.AUTH_USER_MODEL, is_superuser=True),
viewkwargs={'user_id': testuser.id, 'filters_string': 'semester_is_active-false'}
).selector
self.assertEqual(selector.count('.cradmin-legacy-listbuilder-itemvalue'), 1)
self.assertEqual(
selector.one('.cradmin-legacy-listbuilder-itemvalue-titledescription-title').alltext_normalized,
'Assignment Old'
)
def test_query_count(self):
adminuser = baker.make(settings.AUTH_USER_MODEL)
testuser = baker.make(settings.AUTH_USER_MODEL, shortname='<EMAIL>', fullname='<NAME>')
testsubjects = baker.make('core.Subject', _quantity=10)
for testsubject in testsubjects:
baker.make('devilry_account.PermissionGroupUser', user=adminuser,
permissiongroup=baker.make('devilry_account.SubjectPermissionGroup',
permissiongroup__grouptype=PermissionGroup.GROUPTYPE_SUBJECTADMIN,
subject=testsubject).permissiongroup)
# Create ten assignments for each subject
for num in range(10):
testgroup = baker.make('core.AssignmentGroup', parentnode__long_name='Accessible Assignment',
parentnode__parentnode__parentnode=testsubject)
baker.make('core.Candidate', assignment_group=testgroup, relatedstudent__user=testuser)
with self.assertNumQueries(8):
selector = self.mock_http200_getrequest_htmls(
requestuser=adminuser,
viewkwargs={'user_id': testuser.id}
).selector
self.assertEqual(selector.count('.cradmin-legacy-listbuilder-itemvalue'), 15)
|
<filename>src/indicator/volume/moneyFlowIndex.test.ts
// Copyright (c) 2022 <NAME>. All Rights Reserved.
// https://github.com/cinar/indicatorts
import { roundDigitsAll } from '../../helper/numArray';
import { moneyFlowIndex } from './moneyFlowIndex';
describe('Money Flow Index (MFI)', () => {
it('should be able to compute MFI', () => {
const highs = [10, 9, 12, 14, 12];
const lows = [6, 7, 9, 12, 10];
const closings = [9, 11, 7, 10, 8];
const volumes = [100, 110, 80, 120, 90];
const expected = [100, 100, 57.01, 65.85, 61.54];
const period = 2;
const actual = moneyFlowIndex(period, highs, lows, closings, volumes);
expect(roundDigitsAll(2, actual)).toStrictEqual(expected);
});
});
|
#!/bin/bash
echo "appending entry to your crontab"
tmp="$(crontab -l)"
if [ "$tmp" != "" ]
then
#if there already an entry we need to add a new line.
tmp="$tmp \n"
fi
echo -e "$tmp@hourly `pwd`/git_fetch.sh > $HOME/.logs/git_fetch.log 2>&1" | crontab
mkdir -p "$HOME/.logs"
echo "crontab now contains the following entries"
crontab -l
echo
echo "logs will be written to $HOME/.logs/git_fetch.log"
|
/*
* MIT License
*
* Copyright (c) 2021 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package net.jamsimulator.jams.configuration;
import net.jamsimulator.jams.event.Event;
import net.jamsimulator.jams.event.EventBroadcast;
import net.jamsimulator.jams.event.SimpleEventBroadcast;
import org.json.JSONObject;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.Reader;
import java.lang.reflect.Method;
import java.nio.file.Files;
import java.util.HashMap;
import java.util.Map;
/**
* Represents the root of a configuration. This instance should be created using a JSON string or
* a file that contains it.
*/
public class RootConfiguration extends Configuration implements EventBroadcast {
private final SimpleEventBroadcast broadcast;
private File file;
/**
* Creates an empty root configuration.
*/
public RootConfiguration() {
super(null, new HashMap<>(), null);
root = this;
file = null;
broadcast = new SimpleEventBroadcast();
}
/**
* Creates a root configuration using a file that contains a JSON string.
*
* @param json the json file to parse.
* @throws IOException when the file cannot be readed.
*/
public RootConfiguration(File json) throws IOException {
super(null, loadJSON(json), null);
root = this;
file = json;
broadcast = new SimpleEventBroadcast();
}
/**
* Creates a root configuration using a JSON string.
*
* @param json the json.
*/
public RootConfiguration(String json) {
super(null, new JSONObject(json).toMap(), null);
root = this;
file = null;
broadcast = new SimpleEventBroadcast();
}
/**
* Creates a root configuration using a {@link Reader} that contains a JSON string.
*
* @param reader the reader.
* @throws IOException when the file cannot be readed.
*/
public RootConfiguration(Reader reader) throws IOException {
super(null, loadJSON(reader), null);
root = this;
file = null;
broadcast = new SimpleEventBroadcast();
}
private static Map<String, Object> loadJSON(File file) throws IOException {
if (!file.isFile()) return new HashMap<>();
return new JSONObject(Files.readString(file.toPath())).toMap();
}
private static Map<String, Object> loadJSON(Reader r) throws IOException {
BufferedReader reader = new BufferedReader(r);
//Loads the string first. This allows us to check if the file is empty.
StringBuilder builder = new StringBuilder();
boolean first = true;
String line;
while ((line = reader.readLine()) != null) {
if (!first) {
builder.append('\n');
} else first = false;
builder.append(line);
}
String string = builder.toString();
//If empty, return a new HashMap.
if (string.isEmpty()) return new HashMap<>();
return new JSONObject(string).toMap();
}
/**
* Sets the default save file.
*
* @param file the default save file.
*/
public void setFile(File file) {
this.file = file;
}
/**
* Saves the {@link RootConfiguration} into the file that loaded it, if present.
*
* @param useFormat whether the output text should be formatted.
* @throws IOException writer IOException.
*/
public void save(boolean useFormat) throws IOException {
if (file != null)
save(file, useFormat);
}
//region broadcast methods
@Override
public boolean registerListener(Object instance, Method method, boolean useWeakReferences) {
return broadcast.registerListener(instance, method, useWeakReferences);
}
@Override
public int registerListeners(Object instance, boolean useWeakReferences) {
return broadcast.registerListeners(instance, useWeakReferences);
}
@Override
public boolean unregisterListener(Object instance, Method method) {
return broadcast.unregisterListener(instance, method);
}
@Override
public int unregisterListeners(Object instance) {
return broadcast.unregisterListeners(instance);
}
@Override
public <T extends Event> T callEvent(T event) {
return broadcast.callEvent(event, this);
}
@Override
public void transferListenersTo(EventBroadcast broadcast) {
this.broadcast.transferListenersTo(broadcast);
}
//endregion
}
|
#!/bin/bash -u
# This is an example of how use double quotes to escape the shell.
# Things to remember:
# - when you surround a part of your command line with double quotes that part
# of the command line is going to be passed as ONE argument to the program activated.
# - environment and shell variables which are mentioned inside the double quoted are
# ARE going to be interpolated.
# - if you want these variables not to be interpolated, put a backslash before the '$'
# sign
#
# References:
# - https://www.shellscript.sh/escape.html
# This is passed as one argument
echo "Hello, World!"
# Intepolation
echo "HOME is $HOME"
echo "HOME is ${HOME}"
# No interpolation
echo "HOME is \$HOME"
echo "HOME is \${HOME}"
|
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
/root/issuer &
krb5kdc -n &
sleep 4
kadmind -nofork &
sleep 2
tail -f /var/log/krb5kdc.log &
tail -f /var/log/kadmind.log
|
export type UnknownObject = {
[key: string]: unknown;
};
export type EmptyObject = Record<string, never>;
|
<reponame>achouman/IKVM.NET
/*
Copyright (C) 2006, 2007 <NAME>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
<NAME>
<EMAIL>
*/
package ikvm.internal;
import cli.System.GC;
import cli.System.WeakReference;
@ikvm.lang.Internal
public final class WeakIdentityMap
{
private WeakReference[] keys = new WeakReference[16];
private Object[] values = new Object[keys.length];
public WeakIdentityMap()
{
for (int i = 0; i < keys.length; i++)
{
keys[i] = new WeakReference(null, true);
// NOTE we suppress finalization, to make sure the WeakReference continues to work
// while the AppDomain is finalizing for unload (note that for this to work,
// the code that instantiates us also has to call SuppressFinalize on us.)
GC.SuppressFinalize(keys[i]);
}
}
protected void finalize()
{
for (int i = 0; i < keys.length; i++)
{
if (keys[i] != null)
{
GC.ReRegisterForFinalize(keys[i]);
}
}
}
public synchronized Object remove(Object key)
{
for (int i = 0; i < keys.length; i++)
{
if (keys[i].get_Target() == key)
{
Object value = values[i];
keys[i].set_Target(null);
values[i] = null;
return value;
}
}
return null;
}
// Note that null values are supported, null keys are not
public synchronized void put(Object key, Object value)
{
if (key == null)
throw new NullPointerException();
putImpl(key, value, true);
}
private void putImpl(Object key, Object value, boolean tryGC)
{
int emptySlot = -1;
int keySlot = -1;
for (int i = 0; i < keys.length; i++)
{
Object k = keys[i].get_Target();
if (k == null)
{
emptySlot = i;
values[i] = null;
}
else if (k == key)
{
keySlot = i;
}
}
if (keySlot != -1)
{
values[keySlot] = value;
}
else if (emptySlot != -1)
{
keys[emptySlot].set_Target(key);
values[emptySlot] = value;
}
else
{
if (tryGC)
{
GC.Collect(0);
putImpl(key, value, false);
return;
}
int len = keys.length;
WeakReference[] newkeys = new WeakReference[len * 2];
Object[] newvalues = new Object[newkeys.length];
cli.System.Array.Copy((cli.System.Array)(Object)keys, (cli.System.Array)(Object)newkeys, len);
cli.System.Array.Copy((cli.System.Array)(Object)values, (cli.System.Array)(Object)newvalues, len);
keys = newkeys;
values = newvalues;
for (int i = len; i < keys.length; i++)
{
keys[i] = new WeakReference(null, true);
GC.SuppressFinalize(keys[i]);
}
keys[len].set_Target(key);
values[len] = value;
}
}
public synchronized Object get(Object key)
{
for (int i = 0; i < keys.length; i++)
{
if (keys[i].get_Target() == key)
{
return values[i];
}
}
return null;
}
public synchronized boolean containsKey(Object key)
{
for (int i = 0; i < keys.length; i++)
{
if (keys[i].get_Target() == key)
{
return true;
}
}
return false;
}
}
|
import { ForeignKey, Model, Table } from 'sequelize-typescript';
import { Filling } from '../../fillings/models';
import { DishOrder } from './dish_order.model';
@Table({
tableName: 'filling_orders',
createdAt: false,
updatedAt: false,
})
export class FillingOrder extends Model {
@ForeignKey(() => Filling)
filling_id: number;
@ForeignKey(() => DishOrder)
dish_order_id: number;
}
|
#!/usr/bin/env bash
# Copyright The Dragonfly Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
curDir=$(cd "$(dirname "$0")" && pwd)
SUPERNODE_SOURCE_HOME="${curDir}/../../src/supernode"
ACTION=$1
. "${curDir}/../log.sh"
usage() {
echo "Usage: $0 {source|image|all}"
echo " source compile supernode's source"
echo " image build docker image of supernode"
echo " all compile source and build image"
exit 2; # bad usage
}
if [ $# -lt 1 ]; then
usage
fi
compileSupernode() {
echo "====================================================================="
info "supernode:source" "compiling source..."
mvn clean package
}
buildDockerImage() {
echo "====================================================================="
info "supernode:image" "building image..."
mvn clean package -DskipTests docker:build
}
check() {
which docker > /dev/null && docker ps > /dev/null 2>&1
if [ $? != 0 ]; then
echo "Please install docker and start docker daemon first." && exit 3
fi
}
main() {
cd "${SUPERNODE_SOURCE_HOME}"
case "${ACTION}" in
source)
compileSupernode
;;
image)
check && buildDockerImage
;;
all)
check && compileSupernode && buildDockerImage
;;
*)
usage
;;
esac
}
main
|
import React, { Component } from "react";
import { NavLink } from "react-router-dom";
import PropTypes from "prop-types";
import { connect } from "react-redux";
import { logoutUser } from "../../actions/authAction";
import { clearCurrentProfile } from "../../actions/profileAction";
class Navbar extends Component {
onLogoutClick = e => {
e.preventDefault();
this.props.clearCurrentProfile();
this.props.logoutUser();
};
render() {
const { isAuthenticated, user } = this.props.auth;
const authLinks = (
<ul className="right valign-wrapper">
<li>
<NavLink to="/post">Post Feed</NavLink>
</li>
<li>
<NavLink to="/dashboard">Dashboard</NavLink>
</li>
<li>
<a onClick={this.onLogoutClick}>
<img
src={user.avatar}
alt={user.name}
className="circle responsive-img"
style={{ width: "35px", marginRight: "10px" }}
/>
Logout
</a>
</li>
</ul>
);
const guestLinks = (
<ul className="right">
<li>
<NavLink to="/signup">Sign Up</NavLink>
</li>
<li>
<NavLink to="/login">Log in</NavLink>
</li>
</ul>
);
return (
<nav className="indigo darken-4">
<div className="nav-wrapper container">
<ul className="left">
<li>
<NavLink
to="/"
className="logo mg-70 scale-transition transparent"
>
DevNet
</NavLink>
</li>
<li>
<NavLink to="/profile">Developers</NavLink>
</li>
</ul>
{isAuthenticated ? authLinks : guestLinks}
</div>
</nav>
);
}
}
Navbar.propTypes = {
logoutUser: PropTypes.func.isRequired,
auth: PropTypes.object.isRequired
};
const mapStateToProps = state => ({
auth: state.auth
});
export default connect(
mapStateToProps,
{ logoutUser, clearCurrentProfile }
)(Navbar);
|
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2011-2018 Mike Shal <marfey@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Same as t4041, but in a subdirectory.
. ./tup.sh
tmkdir sub
cd sub
cat > ok.sh << HERE
echo hey > foo.txt
echo yo > bar.txt
ls *.txt
rm foo.txt bar.txt
HERE
chmod +x ok.sh
cat > Tupfile << HERE
: |> ./ok.sh > %o |> output.dat
HERE
cd ..
update
if ! grep 'foo.txt' sub/output.dat > /dev/null; then
echo "Error: 'foo.txt' should be in the output file" 1>&2
exit 1
fi
if ! grep 'bar.txt' sub/output.dat > /dev/null; then
echo "Error: 'bar.txt' should be in the output file" 1>&2
exit 1
fi
eotup
|
<filename>mobile-app/app/screens/AppNavigator/screens/Settings/components/RowLanguageItem.tsx
import { ThemedIcon, ThemedText, ThemedTouchableOpacity } from '@components/themed'
import { WalletAlert } from '@components/WalletAlert'
import { useLanguageContext } from '@shared-contexts/LanguageProvider'
import { NavigationProp, useNavigation } from '@react-navigation/native'
import { tailwind } from '@tailwind'
import { AppLanguageItem, translate } from '@translations'
import * as React from 'react'
import { View } from 'react-native'
import { SettingsParamList } from '../SettingsNavigator'
export function RowLanguageItem ({ languageItem }: { languageItem: AppLanguageItem }): JSX.Element {
const navigation = useNavigation<NavigationProp<SettingsParamList>>()
const {
language,
setLanguage
} = useLanguageContext()
const onPress = async (): Promise<void> => {
if (languageItem.locale === language) {
return
}
WalletAlert({
title: translate('screens/Settings', 'Switch Language'),
message: translate(
'screens/Settings', 'You are about to change your language to {{language}}. Do you want to proceed?', { language: translate('screens/Settings', languageItem.language) }),
buttons: [
{
text: translate('screens/Settings', 'No'),
style: 'cancel'
},
{
text: translate('screens/Settings', 'Yes'),
style: 'destructive',
onPress: async () => {
await setLanguage(languageItem.locale)
navigation.goBack()
}
}
]
})
}
return (
<ThemedTouchableOpacity
onPress={onPress}
style={tailwind('flex flex-row p-4 pr-2 items-center justify-between')}
testID={`button_language_${languageItem.language}`}
>
<View>
<ThemedText testID='language_option' style={tailwind('font-medium')}>
{languageItem.displayName}
</ThemedText>
<ThemedText
testID='language_option_description'
dark={tailwind('text-gray-400')}
light={tailwind('text-gray-500')}
style={tailwind('text-sm')}
>
{translate('screens/Settings', languageItem.language)}
</ThemedText>
</View>
{
language.startsWith(languageItem.locale) &&
(
<ThemedIcon
dark={tailwind('text-darkprimary-500')}
iconType='MaterialIcons'
light={tailwind('text-primary-500')}
name='check'
size={24}
testID={`button_network_${languageItem.language}_check`}
/>
)
}
</ThemedTouchableOpacity>
)
}
|
from flask import Flask, request
from flask_sqlalchemy import SQLAlchemy
import json
app = Flask(name)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
@app.route("/sql", methods=["POST"])
def run_sql():
query = request.json['query']
result = db.session.execute(query)
return json.dumps([dict(row) for row in result])
if name == 'main':
app.run(debug=True)
|
<reponame>HelloPb/jss-templatea<filename>packages/sitecore-jss-vue/src/components/RichText.test.ts
/* eslint-disable no-unused-expressions */
import { mount } from '@vue/test-utils';
import { richTextField as eeRichTextData } from '../test/data/field-data-EE-on';
import { RichText } from './RichText';
describe('<RichText />', () => {
it('should render nothing with missing field', () => {
// Need to mock console.error as Vue will log an error for the missing "field" prop
// that is marked as required.
const errorSpy = jest.spyOn(console, 'error');
errorSpy.mockImplementation(() => {});
const rendered = mount(RichText);
expect(rendered.isEmpty()).toBe(true);
errorSpy.mockRestore();
});
it('should render nothing with missing editable and value', () => {
const props = {
field: null,
};
// Need to mock console.error as Vue will log an error for the null "field" prop
// that is marked as an Object.
const errorSpy = jest.spyOn(console, 'error');
errorSpy.mockImplementation(() => {});
const rendered = mount(RichText, { context: { props } });
expect(rendered.isEmpty()).toBe(true);
errorSpy.mockRestore();
});
it('should render editable with editable value', () => {
const props = {
field: {
value: 'value',
editable: 'editable',
},
};
const rendered = mount(RichText, { context: { props } }).find('div');
expect(rendered.element.innerHTML).toBe(props.field.editable);
});
it('should render value with editing explicitly disabled', () => {
const props = {
field: {
value: 'value',
editable: 'editable',
},
editable: false,
};
const rendered = mount(RichText, { context: { props } }).find('div');
expect(rendered.element.innerHTML).toBe(props.field.value);
});
it('should render value with with just a value', () => {
const props = {
field: {
value: 'value',
},
};
const rendered = mount(RichText, { context: { props } }).find('div');
expect(rendered.element.innerHTML).toBe(props.field.value);
});
it('should render embedded html as-is', () => {
const props = {
field: {
value: '<input type="text">some crazy stuff<script code="whaaaat">uh oh</script>',
},
};
const rendered = mount(RichText, { context: { props } }).find('div');
expect(rendered.element.innerHTML).toBe(props.field.value);
});
it('should render ee HTML', () => {
const props = {
field: {
editable: eeRichTextData,
},
};
const rendered = mount(RichText, { context: { props } }).find('div');
expect(rendered.html().indexOf('<input')).toBeGreaterThan(-1);
expect(rendered.html().indexOf('<span class="scChromeData">')).toBeGreaterThan(-1);
});
it('should render tag with a tag provided', () => {
const props = {
field: {
value: 'value',
},
tag: 'p',
};
const rendered = mount(RichText, { context: { props } }).find('p');
expect(rendered.exists()).toBe(true);
expect(rendered.element.innerHTML).toBe(props.field.value);
});
it('should render other attributes with other props provided', () => {
const props = {
field: {
value: 'value',
},
tag: 'h1',
};
const attrs = {
class: 'cssClass',
id: 'lorem',
};
const rendered = mount(RichText, { context: { props, attrs } }).find('h1');
expect(rendered.exists()).toBe(true);
expect(rendered.element.tagName).toBe(props.tag.toUpperCase());
expect(rendered.attributes()).toMatchObject(attrs);
expect(rendered.element.innerHTML).toBe(props.field.value);
});
});
|
#include <iostream>
#include <string>
#include <vector>
class CustomData {
private:
std::vector<int> data; // Example data structure to store integers
public:
bool read(std::istream& in) {
int num;
while (in >> num) {
data.push_back(num);
}
return true;
}
bool write(std::ostream& out) const {
for (const auto& num : data) {
out << num << " ";
}
out << std::endl;
return true;
}
};
int main() {
CustomData customObj;
std::cout << "Enter integers separated by spaces: ";
customObj.read(std::cin);
std::cout << "Data read from input: ";
customObj.write(std::cout);
return 0;
}
|
systemctl stop nginx.service # Stop the nginx server as it is listening on port 80 which is used by standalone verification.
certbot certonly --expand --renew-by-default --standalone \
-d tradrec.com \
-d www.tradrec.com \
-d dev.tradrec.com
systemctl start nginx.service # Restart the nginx server.
|
#!/bin/bash
set -eou pipefail
echo -e "Dumping IMAGE env vars\n"
env | grep IMAGE
echo -e "\n\n"
IMAGE_CLUSTER_LOGGING_OPERATOR=${IMAGE_CLUSTER_LOGGING_OPERATOR:-quay.io/openshift/origin-cluster-logging-operator:latest}
IMAGE_OAUTH_PROXY=${IMAGE_OAUTH_PROXY:-quay.io/openshift/origin-oauth-proxy:latest}
IMAGE_LOGGING_CURATOR5=${IMAGE_LOGGING_CURATOR5:-quay.io/openshift/origin-logging-curator5:latest}
IMAGE_LOGGING_FLUENTD=${IMAGE_LOGGING_FLUENTD:-quay.io/openshift/origin-logging-fluentd:latest}
IMAGE_ELASTICSEARCH6=${IMAGE_ELASTICSEARCH6:-quay.io/openshift/origin-logging-elasticsearch6:latest}
IMAGE_LOGGING_KIBANA6=${IMAGE_LOGGING_KIBANA6:-quay.io/openshift/origin-logging-kibana6:latest}
# update the manifest with the image built by ci
sed -i "s,quay.io/openshift/origin-cluster-logging-operator:latest,${IMAGE_CLUSTER_LOGGING_OPERATOR}," /manifests/*/*clusterserviceversion.yaml
sed -i "s,quay.io/openshift/origin-oauth-proxy:latest,${IMAGE_OAUTH_PROXY}," /manifests/*/*clusterserviceversion.yaml
sed -i "s,quay.io/openshift/origin-logging-curator5:latest,${IMAGE_LOGGING_CURATOR5}," /manifests/*/*clusterserviceversion.yaml
sed -i "s,quay.io/openshift/origin-logging-fluentd:latest,${IMAGE_LOGGING_FLUENTD}," /manifests/*/*clusterserviceversion.yaml
sed -i "s,quay.io/openshift/origin-logging-elasticsearch6:latest,${IMAGE_ELASTICSEARCH6}," /manifests/*/*clusterserviceversion.yaml
sed -i "s,quay.io/openshift/origin-logging-kibana6:latest,${IMAGE_LOGGING_KIBANA6}," /manifests/*/*clusterserviceversion.yaml
# update the manifest to pull always the operator image for non-CI environments
if [ "${OPENSHIFT_CI:-false}" == "false" ] ; then
echo -e "Set operator deployment's imagePullPolicy to 'Always'\n\n"
sed -i 's,imagePullPolicy:\ IfNotPresent,imagePullPolicy:\ Always,' /manifests/*/*clusterserviceversion.yaml
fi
echo -e "substitution complete, dumping new csv\n\n"
cat /manifests/*/*clusterserviceversion.yaml
echo "generating sqlite database"
/usr/bin/initializer --manifests=/manifests --output=/bundle/bundles.db --permissive=true
|
#!/usr/bin/env bash
set -e
tkg_plus_kind="TKG+"
input_kind="{cluster_kind}"
kubeadm_config_path=/root/kubeadm-defaults.conf
while [ `systemctl is-active docker` != 'active' ]; do echo 'waiting for docker'; sleep 5; done
# use kubeadm config if TKG plus cluster
if [ "$input_kind" == "$tkg_plus_kind" ]; then
kubeadm init --config=$kubeadm_config_path > /root/kubeadm-init.out
else
lsb_release -a | grep -q 20.04
if [ $? == 0 ]; then
# if os is ubuntu 20.04, then use containerd cri-socket
kubeadm init --kubernetes-version=v{k8s_version} --cri-socket=/run/containerd/containerd.sock > /root/kubeadm-init.out
else
kubeadm init --kubernetes-version=v{k8s_version} > /root/kubeadm-init.out
fi
fi
mkdir -p /root/.kube
cp -f /etc/kubernetes/admin.conf /root/.kube/config
chown $(id -u):$(id -g) /root/.kube/config
export kubever=$(kubectl version --client | base64 | tr -d '\n')
WEAVE_VERSIONED_FILE="/root/weave_v$(echo {cni_version} | sed -r 's/\./\-/g').yml"
kubectl apply -f $WEAVE_VERSIONED_FILE
systemctl restart kubelet
while [ `systemctl is-active kubelet` != 'active' ]; do echo 'waiting for kubelet'; sleep 5; done
|
def sum_even(arr):
'''
Function to calculate the sum of even numbers in an array
Parameters
arr: list of integers
Returns
sum of even numbers in the array
'''
total = 0
for num in arr:
if num % 2 == 0:
total += num
return total
|
<gh_stars>0
/*
* CPAchecker is a tool for configurable software verification.
* This file is part of CPAchecker.
*
* Copyright (C) 2007-2017 <NAME>
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
* CPAchecker web page:
* http://cpachecker.sosy-lab.org
*/
package org.sosy_lab.cpachecker.util.cwriter;
import static com.google.common.collect.FluentIterable.from;
import static com.google.common.truth.Truth.assertThat;
import java.math.BigInteger;
import org.junit.Test;
import org.sosy_lab.cpachecker.cfa.ast.FileLocation;
import org.sosy_lab.cpachecker.cfa.ast.c.CFunctionDeclaration;
import org.sosy_lab.cpachecker.cfa.ast.c.CIntegerLiteralExpression;
import org.sosy_lab.cpachecker.cfa.model.CFANode;
import org.sosy_lab.cpachecker.cfa.model.c.CAssumeEdge;
import org.sosy_lab.cpachecker.cfa.types.c.CNumericTypes;
public class FunctionBodyTest {
private static CAssumeEdge createAssumeEdge() {
return new CAssumeEdge(
"",
FileLocation.DUMMY,
new CFANode(CFunctionDeclaration.DUMMY),
new CFANode(CFunctionDeclaration.DUMMY),
new CIntegerLiteralExpression(FileLocation.DUMMY, CNumericTypes.INT, BigInteger.ZERO),
true);
}
@Test
public void test_enter_leave_iterationOrder() {
FunctionBody body = new FunctionBody(0, "dummy");
body.enterBlock(1, createAssumeEdge(), "if (0)");
body.enterBlock(2, createAssumeEdge(), "if (0)");
assertThat(body.getCurrentBlock().getStateId()).isEqualTo(2);
assertThat(from(body).transform(BasicBlock::getStateId)).containsExactly(0, 1, 2).inOrder();
body.leaveBlock();
body.enterBlock(3, createAssumeEdge(), "if (0");
assertThat(body.getCurrentBlock().getStateId()).isEqualTo(3);
assertThat(from(body).transform(BasicBlock::getStateId)).containsExactly(0, 1, 3).inOrder();
body.leaveBlock();
body.leaveBlock();
body.leaveBlock();
assertThat(body).isEmpty();
}
}
|
@app.route('/api/profile', methods=['GET'])
def get_user_profile():
user_id = request.args.get('user_id')
user = User.query.filter_by(id=user_id).first_or_404()
return jsonify({
'username': user.username,
'name': user.name,
'email': user.email
})
|
<filename>Kamek/src/playerCurrentMultiplierMgr.cpp
#include <common.h>
#include <game.h>
#include <profile.h>
class dPlCrMgr_c : public dStageActor_c {
public:
int onCreate();
int onExecute();
static dActor_c *build();
};
const char *PlCrMgrFileList[] = {0};
const SpriteData PlCrMgrSpriteData = { ProfileId::PlCrMgr, 8, -8 , 0 , 0, 0x100, 0x100, 0, 0, 0, 0, 0 };
Profile PlCrMgrProfile(&dPlCrMgr_c::build, SpriteId::PlCrMgr, &PlCrMgrSpriteData, ProfileId::PlCrMgr, ProfileId::PlCrMgr, "PlCrMgr", PlCrMgrFileList);
dActor_c *dPlCrMgr_c::build() {
void *buffer = AllocFromGameHeap1(sizeof(dPlCrMgr_c));
dPlCrMgr_c *c = new(buffer) dPlCrMgr_c;
return c;
}
float currentMultiplier;
float currentMultiplier2;
bool doesPlayerCurrentMultiplierMgrExist() {
return fBase_c::search(PlCrMgr) != NULL;
}
int dPlCrMgr_c::onCreate() {
currentMultiplier = 0.01f * (this->settings & 0xFFFFFFFF);
currentMultiplier2 = 0.001f * (this->settings & 0xFFFFFFFF);
return true;
}
int dPlCrMgr_c::onExecute() {
return true;
}
|
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
# Check
if ! command -v java; then
echo "Please check java in PATH"
exit 1
fi
if [[ -z $HADOOP_HOME ]]; then
echo "Please export HADOOP_HOME"
exit 1
fi
# Configure
tee $HADOOP_HOME/etc/hadoop/core-site.xml << EOF_core_site
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://localhost:8020</value>
</property>
</configuration>
EOF_core_site
tee $HADOOP_HOME/etc/hadoop/hdfs-site.xml << EOF_hdfs_site
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:///tmp/db_data/hdfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:///tmp/db_data/hdfs/data</value>
</property>
</configuration>
EOF_hdfs_site
tee -a $HADOOP_HOME/etc/hadoop/hadoop-env.sh << EOF_hadoop_env
export JAVA_HOME=$(java -XshowSettings:properties -version 2>&1 | sed -nE 's|.*java.home = (.*)|\1|p')
EOF_hadoop_env
# Clean
$HADOOP_HOME/sbin/stop-dfs.sh
rm -rf /tmp/db_data/hdfs/name /tmp/db_data/hdfs/data
# Initialize
install -d /tmp/db_data/hdfs/name
install -d /tmp/db_data/hdfs/data
hdfs namenode -format
# Start
$HADOOP_HOME/sbin/start-dfs.sh
# Connect
hdfs dfsadmin -report
hdfs dfs -ls /
|
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
python3 -m pip install "pylint<2.6"
cp $DIR/pre-commit $DIR/../../.git/hooks/pre-commit
echo "Linter installed as pre-commit hook."
|
var structCatch_1_1IRunner =
[
[ "~IRunner", "structCatch_1_1IRunner.html#a5f539a88a7772d68de8a2e4028774209", null ],
[ "aborting", "structCatch_1_1IRunner.html#a03713202dd2e041e30b8030088ab0116", null ]
];
|
prune(){
python cifar_prune.py \
--arch $1 \
--depth $2 \
--dataset cifar100 \
--percent 0.9 \
--resume ../../../iclr2021_checkpoints/norm_pruning/weights/cifar/cifar100/$1_$2.pt \
--save_dir ../../../result/norm_pruning/weights/$1$2/onecycle
}
retrain(){
python cifar_finetune.py \
--arch $1 \
--depth $2 \
--dataset cifar100 \
--resume ../../../result/norm_pruning/weights/$1$2/onecycle/pruned.pth.tar \
--save_dir ../../../result/norm_pruning/weights/$1$2/onecycle/$4epochs \
--lr $3 \
--epochs $4 \
--use_onecycle \
--wandb_name $1_$2_90_onecycle_$4epochs
}
densenet40(){
prune densenet 40
retrain densenet 40 0.1 40
retrain densenet 40 0.1 56
retrain densenet 40 0.1 72
retrain densenet 40 0.1 88
retrain densenet 40 0.1 104
retrain densenet 40 0.1 120
retrain densenet 40 0.1 136
retrain densenet 40 0.1 152
}
preresnet110(){
prune preresnet 110
retrain preresnet 110 0.1 40
retrain preresnet 110 0.1 56
retrain preresnet 110 0.1 72
retrain preresnet 110 0.1 88
retrain preresnet 110 0.1 104
retrain preresnet 110 0.1 120
retrain preresnet 110 0.1 136
retrain preresnet 110 0.1 152
}
resnet110(){
prune resnet 110
retrain resnet 110 0.1 40
retrain resnet 110 0.1 56
retrain resnet 110 0.1 72
retrain resnet 110 0.1 88
retrain resnet 110 0.1 104
retrain resnet 110 0.1 120
retrain resnet 110 0.1 136
retrain resnet 110 0.1 152
}
resnet56(){
prune resnet 56
retrain resnet 56 0.1 40
retrain resnet 56 0.1 56
retrain resnet 56 0.1 72
retrain resnet 56 0.1 88
retrain resnet 56 0.1 104
retrain resnet 56 0.1 120
retrain resnet 56 0.1 136
retrain resnet 56 0.1 152
}
vgg19_bn(){
prune vgg19_bn 19
retrain vgg19_bn 19 0.1 40
retrain vgg19_bn 19 0.1 56
retrain vgg19_bn 19 0.1 72
retrain vgg19_bn 19 0.1 88
retrain vgg19_bn 19 0.1 104
retrain vgg19_bn 19 0.1 120
retrain vgg19_bn 19 0.1 136
retrain vgg19_bn 19 0.1 152
}
resnet56
resnet110
preresnet110
densenet40
vgg19_bn
|
#!/bin/bash
cd ~/Images/Wallpapers
echo "Primeira Execução"
`#Baixando`
COUNTER=0 `#Contara ate 60 (60 tentativas)`
while [ $COUNTER -lt 60 ]; do `#Enquanto nao estourou as tentativas`
`#Baixa o novo papel de parede`
wget http://www.opentopia.com/images/cams/world_sunlight_map_rectangular.jpg -O world.jpg -nv
temp=$(stat -c%s world.jpg) `#Verifica o tamanho`
if [[ $temp > 1000 ]] ; then `#Se o tamanho esta ok, troca o arquivo world_sunlight`
echo "Moving world.jpg to Walpapers folder."
#mv world.jpg ~/Images/Wallpapers/world.jpg
echo "Running python script:"
python ~/Images/Wallpapers/pin_locator.py
echo "Removing unused images (world.jpg, world_pinned.jpg)."
rm ~/Images/Wallpapers/world.jpg
rm ~/Images/Wallpapers/world_pinned.jpg
echo "Removing old wallpaper."
rm ~/Images/Wallpapers/world_sunlight_Wallpaper.jpg
echo "Setting world_time as new wallpaper"
mv ~/Images/Wallpapers/world_time.jpg ~/Images/Wallpapers/world_sunlight_Wallpaper.jpg
break
fi
sleep 5
let COUNTER=COUNTER+1
done
`#Final da tarefa de baixar`
while [ 1 ]; do
actual_min=$(date +%M)
echo $(date)
if [[ $actual_min > 5 && $actual_min < 15 ]] ; then
echo "Sincronizado"
`#Baixando`
COUNTER=0 `#Contara ate 60 (60 tentativas)`
while [ $COUNTER -lt 60 ]; do `#Enquanto nao estourou as tentativas`
`#Baixa o novo papel de parede`
wget http://www.opentopia.com/images/cams/world_sunlight_map_rectangular.jpg -O world.jpg
temp=$(stat -c%s world.jpg) `#Verifica o tamanho`
if [[ $temp > 1000 ]] ; then `#Se o tamanho esta ok, troca o arquivo world_sunlight`
echo "Moving world.jpg to Walpapers folder."
#mv world.jpg ~/Images/Wallpapers/world.jpg
echo "Running python script:"
python ~/Images/Wallpapers/pin_locator.py
echo "Removing unused images (world.jpg, world_pinned.jpg)."
rm ~/Images/Wallpapers/world.jpg
rm ~/Images/Wallpapers/world_pinned.jpg
echo "Removing old wallpaper."
rm ~/Images/Wallpapers/world_sunlight_Wallpaper.jpg
echo "Setting world_time as new wallpaper"
mv ~/Images/Wallpapers/world_time.jpg ~/Images/Wallpapers/world_sunlight_Wallpaper.jpg
break
fi
sleep 5
let COUNTER=COUNTER+1
done
`#Final da tarefa de baixar`
if [[ $actual_min < 10 ]] ; then `#Espera um tempo ate a proxima tarefa`
echo "Waiting 61:40 Minutes..."
sleep 3700
else
echo "Waiting 58:40 Minutes..."
sleep 3500
fi
else
echo "Ainda nao sincronizado..."
sleep 600
fi
done
|
#!/usr/bin/env bash
# update javadocs for RxJava2.x
git checkout RxJava2.x
./gradlew clean androidJavadocs
git checkout gh-pages
rm -rf javadoc/RxJava2.x/*
cp -avr library/build/docs/javadoc/* ./javadoc/RxJava2.x
git add -A
git commit -m "updating JavaDoc for RxJava2.x"
rm -rf library/build/docs
echo "javadocs for RxJava2.x updated"
echo "javadocs for nd RxJava2.x updated - now you can push your changes"
|
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for CESA-2011:1371
#
# Security announcement date: 2011-10-14 18:48:12 UTC
# Script generation date: 2017-01-01 21:10:14 UTC
#
# Operating System: CentOS 5
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - finch.i386:2.6.6-5.el5_7.1
# - finch-devel.i386:2.6.6-5.el5_7.1
# - libpurple.i386:2.6.6-5.el5_7.1
# - libpurple-devel.i386:2.6.6-5.el5_7.1
# - pidgin.i386:2.6.6-5.el5_7.1
# - pidgin-devel.i386:2.6.6-5.el5_7.1
# - finch.x86_64:2.6.6-5.el5_7.1
# - finch-devel.x86_64:2.6.6-5.el5_7.1
# - libpurple.x86_64:2.6.6-5.el5_7.1
# - libpurple-devel.x86_64:2.6.6-5.el5_7.1
# - libpurple-perl.x86_64:2.6.6-5.el5_7.1
# - libpurple-tcl.x86_64:2.6.6-5.el5_7.1
# - pidgin.x86_64:2.6.6-5.el5_7.1
# - pidgin-devel.x86_64:2.6.6-5.el5_7.1
# - pidgin-perl.x86_64:2.6.6-5.el5_7.1
#
# Last versions recommanded by security team:
# - finch.i386:2.6.6-32.el5
# - finch-devel.i386:2.6.6-32.el5
# - libpurple.i386:2.6.6-32.el5
# - libpurple-devel.i386:2.6.6-32.el5
# - pidgin.i386:2.6.6-32.el5
# - pidgin-devel.i386:2.6.6-32.el5
# - finch.x86_64:2.6.6-32.el5
# - finch-devel.x86_64:2.6.6-32.el5
# - libpurple.x86_64:2.6.6-32.el5
# - libpurple-devel.x86_64:2.6.6-32.el5
# - libpurple-perl.x86_64:2.6.6-32.el5
# - libpurple-tcl.x86_64:2.6.6-32.el5
# - pidgin.x86_64:2.6.6-32.el5
# - pidgin-devel.x86_64:2.6.6-32.el5
# - pidgin-perl.x86_64:2.6.6-32.el5
#
# CVE List:
# - CVE-2011-1091
# - CVE-2011-3594
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install finch.i386-2.6.6 -y
sudo yum install finch-devel.i386-2.6.6 -y
sudo yum install libpurple.i386-2.6.6 -y
sudo yum install libpurple-devel.i386-2.6.6 -y
sudo yum install pidgin.i386-2.6.6 -y
sudo yum install pidgin-devel.i386-2.6.6 -y
sudo yum install finch.x86_64-2.6.6 -y
sudo yum install finch-devel.x86_64-2.6.6 -y
sudo yum install libpurple.x86_64-2.6.6 -y
sudo yum install libpurple-devel.x86_64-2.6.6 -y
sudo yum install libpurple-perl.x86_64-2.6.6 -y
sudo yum install libpurple-tcl.x86_64-2.6.6 -y
sudo yum install pidgin.x86_64-2.6.6 -y
sudo yum install pidgin-devel.x86_64-2.6.6 -y
sudo yum install pidgin-perl.x86_64-2.6.6 -y
|
#!/usr/bin/env bash
scp -r * jamcp@oatmealforbreakfast.com:oatmealforbreakfast.com/xsacqikktkunni/
|
#ifndef TRIUMF_NMR_HEBEL_SLICHTER_HPP
#define TRIUMF_NMR_HEBEL_SLICHTER_HPP
#include <cmath>
#include <complex>
#include <boost/math/quadrature/exp_sinh.hpp>
#include <boost/math/quadrature/gauss_kronrod.hpp>
#include <triumf/constants/codata_2018.hpp>
#include <triumf/statistical_mechanics/fermi_dirac.hpp>
#include <triumf/superconductivity/bcs.hpp>
#include <triumf/superconductivity/dynes.hpp>
// TRIUMF: Canada's particle accelerator centre
namespace triumf {
// Nuclear Magnetic Resonance (NMR)
namespace nmr {
// Hebel-Slichter
namespace hebel_slichter {
template <typename T = double>
T integrand(T energy, T temperature, T critical_temperature, T gap_meV, T alpha,
T Gamma) {
// alias some values
T Delta = triumf::superconductivity::bcs::gap<T>(
temperature, critical_temperature, gap_meV);
constexpr T E_0 = 0.0;
constexpr T E_F = 0.0;
//
T E = energy;
T E_p = E + alpha * gap_meV;
// calculate the Fermi factors
// Note: 1e-3 used to convert energyies from meV to eV
T f_E = triumf::statistical_mechanics::fermi_dirac::distribution<T>(
temperature, E * 1e-3, E_0, E_F);
T f_E_p = triumf::statistical_mechanics::fermi_dirac::distribution<T>(
temperature, E_p * 1e-3, E_0, E_F);
//
return (triumf::superconductivity::dynes::N(E, Gamma * gap_meV, Delta) *
triumf::superconductivity::dynes::N(E_p, Gamma * gap_meV, Delta) +
triumf::superconductivity::dynes::M(E, Gamma * gap_meV, Delta) *
triumf::superconductivity::dynes::M(E_p, Gamma * gap_meV,
Delta)) *
f_E * (1.0 - f_E_p);
}
// ratio of SLR rates in the superconducting and normal states
template <typename T = double>
T slr_ratio(T temperature, T critical_temperature, T gap_meV, T alpha,
T Gamma) {
// define some convenience values
T reduced_temperature = temperature / critical_temperature;
constexpr T k_B =
1e3 *
triumf::constants::codata_2018::Boltzmann_constant_in_eV_K<T>::value();
T beta = 1.0 / (k_B * temperature);
// return limiting values...
// if (temperature >= critical_temperature) {
// return 1.0;
// } else if (temperature <= 0.0) {
// return 0.0;
if (temperature <= 0.0) {
return 0.0;
// ...before attempting to evaluate the intergral!
} else {
// define the integrand
auto hs_integrand = [&](T E) -> T {
return integrand<T>(E, temperature, critical_temperature, gap_meV, alpha,
Gamma);
};
// setup values for numeric integration
// const T tolerance = std::numeric_limits<T>::epsilon();
// const T tolerance = std::pow(std::numeric_limits<T>::epsilon(), 2.0
// / 3.0);
const T tolerance = std::sqrt(std::numeric_limits<T>::epsilon());
const std::size_t max_refinements = 15;
static boost::math::quadrature::exp_sinh<T> hs_integrator(max_refinements);
return 2.0 * beta *
hs_integrator.integrate(hs_integrand, tolerance, nullptr, nullptr,
nullptr);
/*
const unsigned max_depth = 15;
return 2.0 * beta *
boost::math::quadrature::gauss_kronrod<T, 61>::integrate(
hs_integrand, 0.0, std::numeric_limits<T>::infinity(), max_depth,
tolerance, nullptr, nullptr);
*/
}
}
} // namespace hebel_slichter
} // namespace nmr
} // namespace triumf
#endif // TRIUMF_NMR_HEBEL_SLICHTER_HPP
|
<gh_stars>1-10
class RetrospectivesUser < ApplicationRecord
belongs_to :user
belongs_to :retrospective
validates :retrospective_id, uniqueness: {scope: :user_id}
end
|
/* Routine optimized for shuffling a buffer for a type size of 16 bytes. */
shuffle16_neon(uint8_t* const dest, const uint8_t* const src,
const size_t vectorizable_elements, const size_t total_elements)
{
size_t i, j, k;
static const size_t bytesoftype = 16;
uint8x8x2_t r0[8];
uint16x4x2_t r1[8];
uint32x2x2_t r2[8];
for(i = 0, k = 0; i < vectorizable_elements*bytesoftype; i += 128, k++) {
/* Load and interleave groups of 16 bytes (128 bytes) to the structure r0 */
r0[0] = vzip_u8(vld1_u8(src + i + 0*8), vld1_u8(src + i + 2*8));
r0[1] = vzip_u8(vld1_u8(src + i + 1*8), vld1_u8(src + i + 3*8));
r0[2] = vzip_u8(vld1_u8(src + i + 4*8), vld1_u8(src + i + 6*8));
r0[3] = vzip_u8(vld1_u8(src + i + 5*8), vld1_u8(src + i + 7*8));
r0[4] = vzip_u8(vld1_u8(src + i + 8*8), vld1_u8(src + i + 10*8));
r0[5] = vzip_u8(vld1_u8(src + i + 9*8), vld1_u8(src + i + 11*8));
r0[6] = vzip_u8(vld1_u8(src + i + 12*8), vld1_u8(src + i + 14*8));
r0[7] = vzip_u8(vld1_u8(src + i + 13*8), vld1_u8(src + i + 15*8));
/* Interleave 16 bytes */
r1[0] = vzip_u16(vreinterpret_u16_u8(r0[0].val[0]), vreinterpret_u16_u8(r0[2].val[0]));
r1[1] = vzip_u16(vreinterpret_u16_u8(r0[0].val[1]), vreinterpret_u16_u8(r0[2].val[1]));
r1[2] = vzip_u16(vreinterpret_u16_u8(r0[1].val[0]), vreinterpret_u16_u8(r0[3].val[0]));
r1[3] = vzip_u16(vreinterpret_u16_u8(r0[1].val[1]), vreinterpret_u16_u8(r0[3].val[1]));
r1[4] = vzip_u16(vreinterpret_u16_u8(r0[4].val[0]), vreinterpret_u16_u8(r0[6].val[0]));
r1[5] = vzip_u16(vreinterpret_u16_u8(r0[4].val[1]), vreinterpret_u16_u8(r0[6].val[1]));
r1[6] = vzip_u16(vreinterpret_u16_u8(r0[5].val[0]), vreinterpret_u16_u8(r0[7].val[0]));
r1[7] = vzip_u16(vreinterpret_u16_u8(r0[5].val[1]), vreinterpret_u16_u8(r0[7].val[1]));
/* Interleave 32 bytes */
r2[0] = vzip_u32(vreinterpret_u32_u16(r1[0].val[0]), vreinterpret_u32_u16(r1[4].val[0]));
r2[1] = vzip_u32(vreinterpret_u32_u16(r1[0].val[1]), vreinterpret_u32_u16(r1[4].val[1]));
r2[2] = vzip_u32(vreinterpret_u32_u16(r1[1].val[0]), vreinterpret_u32_u16(r1[5].val[0]));
r2[3] = vzip_u32(vreinterpret_u32_u16(r1[1].val[1]), vreinterpret_u32_u16(r1[5].val[1]));
r2[4] = vzip_u32(vreinterpret_u32_u16(r1[2].val[0]), vreinterpret_u32_u16(r1[6].val[0]));
r2[5] = vzip_u32(vreinterpret_u32_u16(r1[2].val[1]), vreinterpret_u32_u16(r1[6].val[1]));
r2[6] = vzip_u32(vreinterpret_u32_u16(r1[3].val[0]), vreinterpret_u32_u16(r1[7].val[0]));
r2[7] = vzip_u32(vreinterpret_u32_u16(r1[3].val[1]), vreinterpret_u32_u16(r1[7].val[1]));
/* Store the results to the destination vector */
vst1_u8(dest + k*8 + 0*total_elements, vreinterpret_u8_u32(r2[0].val[0]));
vst1_u8(dest + k*8 + 1*total_elements, vreinterpret_u8_u32(r2[0].val[1]));
vst1_u8(dest + k*8 + 2*total_elements, vreinterpret_u8_u32(r2[1].val[0]));
vst1_u8(dest + k*8 + 3*total_elements, vreinterpret_u8_u32(r2[1].val[1]));
vst1_u8(dest + k*8 + 4*total_elements, vreinterpret_u8_u32(r2[2].val[0]));
vst1_u8(dest + k*8 + 5*total_elements, vreinterpret_u8_u32(r2[2].val[1]));
vst1_u8(dest + k*8 + 6*total_elements, vreinterpret_u8_u32(r2[3].val[0]));
vst1_u8(dest + k*8 + 7*total_elements, vreinterpret_u8_u32(r2[3].val[1]));
vst1_u8(dest + k*8 + 8*total_elements, vreinterpret_u8_u32(r2[4].val[0]));
vst1_u8(dest + k*8 + 9*total_elements, vreinterpret_u8_u32(r2[4].val[1]));
vst1_u8(dest + k*8 + 10*total_elements, vreinterpret_u8_u32(r2[5].val[0]));
vst1_u8(dest + k*8 + 11*total_elements, vreinterpret_u8_u32(r2[5].val[1]));
vst1_u8(dest + k*8 + 12*total_elements, vreinterpret_u8_u32(r2[6].val[0]));
vst1_u8(dest + k*8 + 13*total_elements, vreinterpret_u8_u32(r2[6].val[1]));
vst1_u8(dest + k*8 + 14*total_elements, vreinterpret_u8_u32(r2[7].val[0]));
vst1_u8(dest + k*8 + 15*total_elements, vreinterpret_u8_u32(r2[7].val[1]));
}
}
|
'use strict';
require( 'babel-polyfill' );
// Adds html <picture> element to IE
require( 'picturefill' );
// Adds element.classList object to IE
require( 'classlist-polyfill' );
// Adds promises to IE
require( 'promise-polyfill' );
require( './modules/overlay' );
// modules
const MainFlag = require( './modules/mainFlag' );
let mainFlagElement = document.querySelector( '.js-mainFlagToggle' );
if ( mainFlagElement ) {
new MainFlag( mainFlagElement );
}
const NavigationActiveClass = require( './modules/navigationActiveClass' );
let navigationActiveClassElement = document.querySelector( '.js-navigationActiveClass' );
if ( navigationActiveClassElement ) {
new NavigationActiveClass( navigationActiveClassElement );
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.redis
import java.util
import scala.collection.JavaConverters._
import redis.clients.jedis._
import redis.clients.util.JedisClusterCRC16
import org.apache.spark._
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.apache.spark.sql.types._
import org.apache.spark.sql.xsql.manager.RedisManager._
class RedisRDD(
sc: SparkContext,
val relation: RedisRelation,
val filters: Map[String, Any],
val requiredColumns: Array[String],
val partitionNum: Int = 1)
extends RDD[Row](sc, Seq.empty)
with RedisTableScanTrait {
override var key: Option[Any] = _
override var range: Option[(Long, Long)] = _
override var score: Option[(Double, Double)] = _
analyzeFilter()
override protected def getPreferredLocations(split: Partition): Seq[String] = {
Seq(split.asInstanceOf[RedisPartition].redisConfig.initialAddr)
}
/**
* hosts(ip:String, port:Int, startSlot:Int, endSlot:Int) are generated by the redis-cluster's
* hash tragedy and partitionNum to divide the cluster to partitionNum
*
* @return hosts
*/
private def scaleHostsWithPartitionNum(): Seq[(String, Int, Int, Int)] = {
def split(host: RedisNode, cnt: Int) = {
val endpoint = host.endpoint
val start = host.startSlot
val end = host.endSlot
val range = (end - start) / cnt
(0 until cnt).map(i => {
(
endpoint.host,
endpoint.port,
if (i == 0) start else (start + range * i + 1),
if (i != cnt - 1) (start + range * (i + 1)) else end)
})
}
val hosts = relation.redisConfig.hosts.sortBy(_.startSlot)
if (hosts.size == partitionNum) {
hosts.map(x => (x.endpoint.host, x.endpoint.port, x.startSlot, x.endSlot))
} else if (hosts.size < partitionNum) {
val presExtCnt = partitionNum / hosts.size
val lastExtCnt = if (presExtCnt * hosts.size < partitionNum) {
(presExtCnt + (partitionNum - presExtCnt * hosts.size))
} else presExtCnt
hosts.zipWithIndex.flatMap {
case (host, idx) =>
split(host, if (idx == hosts.size - 1) lastExtCnt else presExtCnt)
}
} else {
val presExtCnt = hosts.size / partitionNum
(0 until partitionNum).map { idx =>
{
val ip = hosts(idx * presExtCnt).endpoint.host
val port = hosts(idx * presExtCnt).endpoint.port
val start = hosts(idx * presExtCnt).startSlot
val end = hosts(if (idx == partitionNum - 1) {
(hosts.size - 1)
} else {
((idx + 1) * presExtCnt - 1)
}).endSlot
(ip, port, start, end)
}
}
}
}
override protected def getPartitions: Array[Partition] = {
val hosts = scaleHostsWithPartitionNum()
(0 until partitionNum)
.map(i => {
new RedisPartition(i, relation.redisConfig, (hosts(i)._3, hosts(i)._4))
})
.toArray
}
override def getRowkeys(sPos: Int, ePos: Int): Seq[String] = {
if (key.isDefined) {
val keys = if (key.get.isInstanceOf[Seq[String]]) {
key.get.asInstanceOf[Seq[String]].map(_.toString)
} else {
Array(key.get.toString).toSeq
}
keys.filter(key => {
val slot = JedisClusterCRC16.getSlot(key)
slot >= sPos && slot <= ePos
})
} else {
getKeys(sPos, ePos, getKeyPattern).asScala.toSeq
}
}
override def compute(split: Partition, context: TaskContext): Iterator[Row] = {
val partition: RedisPartition = split.asInstanceOf[RedisPartition]
val rowkeys = getRowkeys(partition.slots._1, partition.slots._2)
groupKeysByNode(relation.redisConfig.hosts, rowkeys.iterator)
.flatMap { x =>
val conn = x._1.endpoint.connect()
conn.select(dbNum)
getKeyValue(x._2.toSeq, conn)
}
.toIterator
.map(ite => Row.fromSeq(ite))
}
}
class RedisTableScanUtil(
val relation: RedisRelationTrait,
val requiredColumns: Array[String],
val filters: Map[String, Any])
extends RedisTableScanTrait {
override var key: Option[Any] = _
override var range: Option[(Long, Long)] = _
override var score: Option[(Double, Double)] = _
analyzeFilter()
}
trait RedisTableScanTrait {
val relation: RedisRelationTrait
val requiredColumns: Array[String]
val filters: Map[String, Any]
var key: Option[Any]
var range: Option[(Long, Long)]
var score: Option[(Double, Double)]
lazy val redisType: String = relation.parameters.getOrElse(REDIS_TYPE, "string")
lazy val tableName: String = relation.parameters.getOrElse(TABLE, "")
lazy val delimiter: String = relation.parameters.getOrElse(SUFFIX_DELIMITER, ":")
lazy val dbNum: Int = relation.parameters.getOrElse(DATABASE, "0").toInt
def getKeyPattern(): String = {
if (tableName.equals("")) {
"*"
} else {
tableName + delimiter + "*"
}
}
// INLINE: call analyze manual
def analyzeFilter(): Unit = {
if (filters.forall(exp => Seq(KEY, SUFFIX, SCORE, RANGE).contains(exp._1))) {
val keySet = filters.keySet
key = if (keySet.contains(KEY)) {
val ink = filters.get(KEY).get.asInstanceOf[Seq[Any]]
assert(ink.size == 1)
if (ink.head.isInstanceOf[String]) {
assert(
ink.head.asInstanceOf[String].startsWith(tableName),
"key must start with table name")
} else {
assert(
ink.head.asInstanceOf[Seq[String]].forall(_.startsWith(tableName)),
"key must start with table name")
}
Option(ink.head)
} else if (keySet.contains(SUFFIX)) {
val ink = filters.get(SUFFIX).get.asInstanceOf[Seq[Any]]
assert(ink.size == 1)
val tmp = ink.head
Option(if (tmp.isInstanceOf[Seq[String]]) {
tmp.asInstanceOf[Seq[String]].map(tableName + delimiter + _)
} else {
tableName + delimiter + tmp.toString
})
} else {
Option.empty
}
range = if (keySet.contains(RANGE)) {
val ranges = filters.get(RANGE).get.asInstanceOf[Seq[Any]]
assert(ranges.size == 2)
Option((ranges(0).toString.toLong, ranges(1).toString.toLong))
} else {
Option.empty
}
score = if (keySet.contains(SCORE)) {
val scores = filters.get(SCORE).get.asInstanceOf[Seq[Any]]
assert(scores.size == 2)
Option((scores(0).toString.toDouble, scores(1).toString.toDouble))
} else {
Option.empty
}
if (range.isDefined && Seq("string", "hash", "set").contains(redisType)) {
throw new SparkException(s"cann't apply range to ${redisType} record")
}
if (score.isDefined) {
if (range.isDefined) {
throw new SparkException(s"cann't apply range and score at same time")
}
if (Seq("string", "hash", "set", "list").contains(redisType)) {
throw new SparkException(s"cann't apply score to ${redisType} record")
}
}
} else {
throw new SparkException("only support key, suffix, value, range, score for now")
}
}
def getValuesByBatch(rowkeys: Seq[String], conn: Jedis): Seq[Seq[Any]] = {
val pipeline = conn.pipelined
redisType match {
case "hash" =>
val noKeyColumns = (requiredColumns.toBuffer - "key").toArray
rowkeys.foreach(pipeline.hmget(_, noKeyColumns: _*))
val rows = pipeline
.syncAndReturnAll()
.asScala
.asInstanceOf[Seq[java.util.List[String]]]
.map(_.asScala)
val requiredColumnsType = noKeyColumns.map(getDataType(relation.schema, _))
rows.map(
row =>
row
.zip(requiredColumnsType)
.map {
case (col, targetType) =>
castToTarget(col, targetType)
})
case "string" =>
rowkeys.foreach(pipeline.get)
pipeline.syncAndReturnAll().asScala.map(Seq(_))
case "list" =>
val (start: Long, end: Long) = if (range.isEmpty) (0L, -1L) else range.get
rowkeys.foreach(pipeline.lrange(_, start, end))
pipeline
.syncAndReturnAll()
.asScala
.asInstanceOf[Seq[java.util.List[String]]]
.map(list => Seq(list.asScala))
case "set" =>
rowkeys.foreach(pipeline.smembers)
pipeline
.syncAndReturnAll()
.asScala
.asInstanceOf[Seq[java.util.Set[String]]]
.map(set => Seq(set.asScala.toSeq))
case "zset" =>
if (score.isDefined) {
val (min: Double, max: Double) = if (score.isEmpty) {
(Double.MinValue, Double.MaxValue)
} else score.get
rowkeys.foreach(pipeline.zrangeByScoreWithScores(_, min, max))
} else {
val (start: Long, end: Long) = if (range.isEmpty) (0L, -1L) else range.get
rowkeys.foreach(pipeline.zrangeWithScores(_, start, end))
}
pipeline
.syncAndReturnAll()
.asScala
.asInstanceOf[Seq[java.util.Set[Tuple]]]
.map { tups =>
Seq(
tups.asScala
.map {
case tup =>
(tup.getElement, tup.getScore)
}
.toMap[String, Double])
}
}
}
def getKeyValue(rowkeys: Seq[String], conn: Jedis): Seq[Seq[Any]] = {
val default_col_name = Seq("key", "value")
val noKeyColumns = (requiredColumns.toBuffer - "key")
val toReturn = if (noKeyColumns.size == 0) {
rowkeys.map(Seq(_))
} else {
val col_names = redisType match {
case "hash" =>
Seq("key") ++ noKeyColumns
case "string" | "list" | "set" | "zset" =>
default_col_name
}
val res = rowkeys
.grouped(10000)
.flatMap(keys => {
keys.zip(getValuesByBatch(keys, conn)).map(tup => Seq(tup._1) ++ tup._2)
})
val indexs = requiredColumns.map(col_names.indexOf)
res.filter(_.tail.head != null).map(ite => indexs.map(ite).toSeq).toSeq
}
conn.close
toReturn
}
private def getDataType(schema: StructType, attr: String) = {
schema.fields(schema.fieldIndex(attr)).dataType
}
private def castToTarget(value: String, dataType: DataType): Any = {
dataType match {
case IntegerType => value.toString.toInt
case DoubleType => value.toString.toDouble
case StringType => value.toString
case _ => value.toString
}
}
/**
* @param key
* @return true if the key is a RedisRegex
*/
private def isRedisRegex(key: String) = {
def judge(key: String, escape: Boolean): Boolean = {
if (key.length == 0) {
false
} else {
escape match {
case true => judge(key.substring(1), false)
case false =>
key.charAt(0) match {
case '*' => true
case '?' => true
case '[' => true
case '\\' => judge(key.substring(1), true)
case _ => judge(key.substring(1), false)
}
}
}
}
judge(key, false)
}
def getRowkeys(sPos: Int, ePos: Int): Seq[String] = {
if (key.isDefined) {
if (key.get.isInstanceOf[Seq[String]]) {
key.get.asInstanceOf[Seq[String]].map(_.toString)
} else {
Array(key.get.toString).toSeq
}
} else {
getKeys(sPos, ePos, getKeyPattern).asScala.toSeq
}
}
/**
* @param nodes list of RedisNode
* @param sPos start position of slots
* @param ePos end position of slots
* @param keyPattern
* return keys whose slot is in [sPos, ePos]
*/
def getKeys(sPos: Int, ePos: Int, keyPattern: String): util.HashSet[String] = {
val nodes = relation.redisConfig.getNodesBySlots(sPos, ePos)
val keys = new util.HashSet[String]()
if (isRedisRegex(keyPattern)) {
nodes.foreach(node => {
val conn = node.endpoint.connect()
conn.select(dbNum)
val params = new ScanParams().`match`(keyPattern)
val res = keys.addAll(
scanKeys(conn, params).asScala
.filter(key => {
val slot = JedisClusterCRC16.getSlot(key)
slot >= sPos && slot <= ePos
})
.asJava)
conn.close
res
})
} else {
val slot = JedisClusterCRC16.getSlot(keyPattern)
if (slot >= sPos && slot <= ePos) keys.add(keyPattern)
}
keys
}
/**
* @param nodes list of RedisNode
* @param keys list of keys
* return (node: (key1, key2, ...), node2: (key3, key4,...), ...)
*/
def groupKeysByNode(
nodes: Array[RedisNode],
keys: Iterator[String]): Array[(RedisNode, Array[String])] = {
def getNode(key: String): RedisNode = {
val slot = JedisClusterCRC16.getSlot(key)
/* Master only */
nodes
.filter(node => { node.startSlot <= slot && node.endSlot >= slot })
.filter(_.idx == 0)(0)
}
keys
.map(key => (getNode(key), key))
.toArray
.groupBy(_._1)
.map(x => (x._1, x._2.map(_._2)))
.toArray
}
/**
* @param conn
* @param keys
* keys are guaranteed that they belongs with the server jedis connected to.
* return keys of "t" type
*/
def filterKeysByType(conn: Jedis, keys: Array[String], t: String): Array[String] = {
val pipeline = conn.pipelined
keys.foreach(pipeline.`type`)
val types = pipeline.syncAndReturnAll
(keys).zip(types.asScala).filter(x => (x._2 == t)).map(x => x._1)
}
}
|
<reponame>alemesa1991/School-Projects
void send( short *to, short *from, int count ) // Tom Duff's device
{
int n=(count+7)/8;
switch(count%8){
case 0: do { *to = *from++;
case 7: *to = *from++;
case 6: *to = *from++;
case 5: *to = *from++;
case 4: *to = *from++;
case 3: *to = *from++;
case 2: *to = *from++;
case 1: *to = *from++;
} while(--n>0);
}
}
void *memcpy(void *dst, void *src, int count) // GB Tom Duff's device applied to memcpy
{
char *d = (char*) dst;
char *s = (char*) src;
int n=(count+7)/8;
switch(count%8){
case 0: do { *d++ = *s++;
case 7: *d++ = *s++;
case 6: *d++ = *s++;
case 5: *d++ = *s++;
case 4: *d++ = *s++;
case 3: *d++ = *s++;
case 2: *d++ = *s++;
case 1: *d++ = *s++;
} while(--n>0);
}
return dst;
}
// GB see /usr/include/boost/numeric/ublas/detail/duff.hpp
//
// Copyright (c) 2000-2002
// <NAME>, <NAME>
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// The authors gratefully acknowledge the support of
// GeNeSys mbH & Co. KG in producing this work.
//
#ifndef _BOOST_UBLAS_DUFF_
#define _BOOST_UBLAS_DUFF_
#define DD_SWITCH(n, d, r, expr) \
{ \
unsigned r = ((n) + (d) - 1) / (d); \
switch ((n) % (d)) { \
case 0: do { expr;
#define DD_CASE_I(i, expr) \
case (i): expr;
#define DD_WHILE(r) \
} while (-- (r) > 0); \
} \
}
#define DD_1T(n, d, r, expr) \
DD_WHILE(r)
#define DD_2T(n, d, r, expr) \
DD_CASE_I(1, expr) \
DD_1T(n, d, r, expr)
#define DD_3T(n, d, r, expr) \
DD_CASE_I(2, expr) \
DD_2T(n, d, r, expr)
#define DD_4T(n, d, r, expr) \
DD_CASE_I(3, expr) \
DD_3T(n, d, r, expr)
#define DD_5T(n, d, r, expr) \
DD_CASE_I(4, expr) \
DD_4T(n, d, r, expr)
#define DD_6T(n, d, r, expr) \
DD_CASE_I(5, expr) \
DD_5T(n, d, r, expr)
#define DD_7T(n, d, r, expr) \
DD_CASE_I(6, expr) \
DD_6T(n, d, r, expr)
#define DD_8T(n, d, r, expr) \
DD_CASE_I(7, expr) \
DD_7T(n, d, r, expr)
#define DD(n, d, r, expr) \
DD_SWITCH(n, d, r, expr) \
DD_##d##T(n, d, r, expr)
#endif
int main()
{
}
|
<gh_stars>1-10
import { modifiers } from '../data/course-modifiers.js'
const stringSimilarity = require('string-similarity');
/**
* Count number of modifier search matches (ie 'spring')
* @param {object} clazz
* @param {string} query Must be lowercase
* @return {number} matches
*/
function specialModifierMatches(clazz, query) {
let matches = 0;
for (const [key, value] of Object.entries(modifiers)) {
if (value.search.some(v => query.includes(v.toLowerCase())) && clazz.modifiers.includes(key))
matches++;
}
return matches;
}
/**
* Default sort for 2 classes
* @param {object} a Class A
* @param {object} b Class B
* @return {number} Comparison
*/
function defaultSort(a, b) {
return a.ID - b.ID;
}
/**
* Rank how well a class matches a certain query
* @param {object} clazz
* @param {Array{string}} processedQuery Query.lower().split(' ') with hypen removed
* @return {number} Score
*/
function rankClassSearch(clazz, processedQuery) {
let score = 0;
// Exact match of PREFIX or ID
if (processedQuery.includes(clazz.prefix.toLowerCase()))
score += 10000;
if (processedQuery.includes(clazz.ID))
score += 10000;
// General closeness score
let query = processedQuery.join(' ');
score += 1000 * stringSimilarity.compareTwoStrings(query, clazz.name);
score += 100 * stringSimilarity.compareTwoStrings(query, clazz.description);
// Check special filters
score += 100 * specialModifierMatches(clazz, query);
return score;
}
/**
* Search a list of classes
* @param {Array{Object}} classes
* @param {string} query
* @return {Array{Object}} filtered classes (by query)
*/
export default function search(classes, query) {
if (!query) return classes.sort(defaultSort);
let words = query
.toLowerCase()
.replace(/([A-Za-z]{4})-(\d{4})/, '$1 $2') // Remove hypens between PREFIX-ID
.split(' ')
let filtered = classes.filter(clazz => {
if (specialModifierMatches(clazz, query) > 0)
return true;
let passingWords = words.filter(word =>
(`${clazz.name} ${clazz.ID} ${clazz.prefix} ${clazz.description}`)
.toLowerCase()
.includes(word));
// At least 80% of search query must be present
return passingWords.length > words.length * 0.8;
});
return filtered.sort((a, b) => {
let a_ = rankClassSearch(a, words);
let b_ = rankClassSearch(b, words);
if (a_ === b_) return defaultSort(a, b);
return b_ - a_;
});
}
|
source env/bin/activate;
python3 scraper_data.py $@;
|
#!/bin/sh
label=$1
base_ref=$2
head_ref=$3
# copy the problem matcher to the workspace, so it is accessible outside of the
# container
# See: https://github.com/actions/toolkit/issues/205#issuecomment-557647948
cp /git-diff-todos-problem-matcher.json "${HOME}/"
echo "::add-matcher::${HOME}/git-diff-todos-problem-matcher.json"
echo "Parse diff ${base_ref}..${head_ref} for ${label}."
echo "-----------------------------------------------------------"
python3 /git-diff-todos.py --parsable-output -o ${base_ref} -n ${head_ref} -l ${label}
result=$?
exit ${result}
|
To estimate the relative performance of the JavaScript operation compared to the Python operation, we can use benchmarking or algorithmic analysis. Specifically, we can compare the running time complexity of the two algorithms. If one algorithm is more complex than the other, it will likely have greater relative performance overhead. To make the comparison more accurate, we can also consider the implementation of the algorithms, such as the data structure and language-specific optimization techniques used.
|
<filename>src/main/java/net/anatolich/subscriptions/currency/infrastructure/rest/ExchangeRatePayload.java
package net.anatolich.subscriptions.currency.infrastructure.rest;
import io.swagger.v3.oas.annotations.media.Schema;
import javax.validation.constraints.NotEmpty;
import javax.validation.constraints.NotNull;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import net.anatolich.subscriptions.currency.domain.model.ExchangeRate;
import org.hibernate.validator.constraints.Length;
@Schema(name = "ExchangeRate", description = "exchange rate of two currencies")
@Data
@NoArgsConstructor
@AllArgsConstructor
public class ExchangeRatePayload {
@Schema(description = "source currency of exchange rate")
@NotNull
@NotEmpty
@Length(min = 3, max = 3)
private String from;
@Schema(description = "target currency of exchange rate")
@NotNull
@NotEmpty
@Length(min = 3, max = 3)
private String to;
@Schema(description = "conversion rate between source and target currency")
@NotNull
private double rate;
public ExchangeRate toExchangeRate() {
return ExchangeRate.of(from, to, rate);
}
}
|
<filename>packages/recipe-mail/src/example/templates/index.js
import DefaultLayout from '../../core/templates/layouts/default.js';
const overrideHead = (DefaultHead) => {
const Head = (props) => (
<>
<DefaultHead {...props}/>
</>
);
return Head;
};
const Index = (props) => (
<DefaultLayout {...props} head={overrideHead}>
</DefaultLayout>
);
export default Index;
|
docker exec -i cassandra bash -c 'cqlsh -u cassandra -p cassandra' < assets/scripts/cassandra/tables.cql
|
#!/bin/bash
echo ""
echo "Applying migration CurrentCountryOfRegistration"
echo "Adding routes to conf/app.routes"
echo "" >> ../conf/app.routes
echo "GET /currentCountryOfRegistration controllers.CurrentCountryOfRegistrationController.onPageLoad(mode: Mode = NormalMode)" >> ../conf/app.routes
echo "POST /currentCountryOfRegistration controllers.CurrentCountryOfRegistrationController.onSubmit(mode: Mode = NormalMode)" >> ../conf/app.routes
echo "GET /changeCurrentCountryOfRegistration controllers.CurrentCountryOfRegistrationController.onPageLoad(mode: Mode = CheckMode)" >> ../conf/app.routes
echo "POST /changeCurrentCountryOfRegistration controllers.CurrentCountryOfRegistrationController.onSubmit(mode: Mode = CheckMode)" >> ../conf/app.routes
echo "Adding messages to conf.messages"
echo "" >> ../conf/messages.en
echo "currentCountryOfRegistration.title = currentCountryOfRegistration" >> ../conf/messages.en
echo "currentCountryOfRegistration.heading = currentCountryOfRegistration" >> ../conf/messages.en
echo "currentCountryOfRegistration.option1 = Option 1" >> ../conf/messages.en
echo "currentCountryOfRegistration.option2 = Option 2" >> ../conf/messages.en
echo "currentCountryOfRegistration.checkYourAnswersLabel = currentCountryOfRegistration" >> ../conf/messages.en
echo "currentCountryOfRegistration.error.required = Select currentCountryOfRegistration" >> ../conf/messages.en
echo "currentCountryOfRegistration.change.hidden = CurrentCountryOfRegistration" >> ../conf/messages.en
echo "Adding to UserAnswersEntryGenerators"
awk '/trait UserAnswersEntryGenerators/ {\
print;\
print "";\
print " implicit lazy val arbitraryCurrentCountryOfRegistrationUserAnswersEntry: Arbitrary[(CurrentCountryOfRegistrationPage.type, JsValue)] =";\
print " Arbitrary {";\
print " for {";\
print " page <- arbitrary[CurrentCountryOfRegistrationPage.type]";\
print " value <- arbitrary[CurrentCountryOfRegistration].map(Json.toJson(_))";\
print " } yield (page, value)";\
print " }";\
next }1' ../test/generators/UserAnswersEntryGenerators.scala > tmp && mv tmp ../test/generators/UserAnswersEntryGenerators.scala
echo "Adding to PageGenerators"
awk '/trait PageGenerators/ {\
print;\
print "";\
print " implicit lazy val arbitraryCurrentCountryOfRegistrationPage: Arbitrary[CurrentCountryOfRegistrationPage.type] =";\
print " Arbitrary(CurrentCountryOfRegistrationPage)";\
next }1' ../test/generators/PageGenerators.scala > tmp && mv tmp ../test/generators/PageGenerators.scala
echo "Adding to ModelGenerators"
awk '/trait ModelGenerators/ {\
print;\
print "";\
print " implicit lazy val arbitraryCurrentCountryOfRegistration: Arbitrary[CurrentCountryOfRegistration] =";\
print " Arbitrary {";\
print " Gen.oneOf(CurrentCountryOfRegistration.values.toSeq)";\
print " }";\
next }1' ../test/generators/ModelGenerators.scala > tmp && mv tmp ../test/generators/ModelGenerators.scala
echo "Adding to UserAnswersGenerator"
awk '/val generators/ {\
print;\
print " arbitrary[(CurrentCountryOfRegistrationPage.type, JsValue)] ::";\
next }1' ../test/generators/UserAnswersGenerator.scala > tmp && mv tmp ../test/generators/UserAnswersGenerator.scala
echo "Migration CurrentCountryOfRegistration completed"
|
<filename>mlir/include/mlir/Dialect/Bufferization/Transforms/BufferUtils.h
//===- BufferUtils.h - Buffer optimization utilities ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides utilities for passes optimizing code that has already
// been converted to buffers.
//
//===----------------------------------------------------------------------===//
#ifndef MLIR_DIALECT_BUFFERIZATION_TRANSFORMS_BUFFERUTILS_H
#define MLIR_DIALECT_BUFFERIZATION_TRANSFORMS_BUFFERUTILS_H
#include "mlir/Analysis/BufferViewFlowAnalysis.h"
#include "mlir/Analysis/Liveness.h"
#include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Dominance.h"
#include "mlir/IR/Operation.h"
#include "mlir/Transforms/DialectConversion.h"
namespace mlir {
namespace memref {
class GlobalOp;
} // namespace memref
namespace bufferization {
/// A simple analysis that detects allocation operations.
class BufferPlacementAllocs {
public:
/// Represents a tuple of allocValue and deallocOperation.
using AllocEntry = std::tuple<Value, Operation *>;
/// Represents a list containing all alloc entries.
using AllocEntryList = SmallVector<AllocEntry, 8>;
/// Get the start operation to place the given alloc value within the
/// specified placement block.
static Operation *getStartOperation(Value allocValue, Block *placementBlock,
const Liveness &liveness);
public:
/// Initializes the internal list by discovering all supported allocation
/// nodes.
BufferPlacementAllocs(Operation *op);
/// Returns the begin iterator to iterate over all allocations.
AllocEntryList::const_iterator begin() const { return allocs.begin(); }
/// Returns the end iterator that can be used in combination with begin.
AllocEntryList::const_iterator end() const { return allocs.end(); }
/// Returns the begin iterator to iterate over all allocations.
AllocEntryList::iterator begin() { return allocs.begin(); }
/// Returns the end iterator that can be used in combination with begin.
AllocEntryList::iterator end() { return allocs.end(); }
/// Registers a new allocation entry.
void registerAlloc(const AllocEntry &entry) { allocs.push_back(entry); }
private:
/// Searches for and registers all supported allocation entries.
void build(Operation *op);
private:
/// Maps allocation nodes to their associated blocks.
AllocEntryList allocs;
};
/// The base class for all BufferPlacement transformations.
class BufferPlacementTransformationBase {
public:
using ValueSetT = BufferViewFlowAnalysis::ValueSetT;
/// Finds a common dominator for the given value while taking the positions
/// of the values in the value set into account. It supports dominator and
/// post-dominator analyses via template arguments.
template <typename DominatorT>
static Block *findCommonDominator(Value value, const ValueSetT &values,
const DominatorT &doms) {
// Start with the current block the value is defined in.
Block *dom = value.getParentBlock();
// Iterate over all aliases and their uses to find a safe placement block
// according to the given dominator information.
for (Value childValue : values) {
for (Operation *user : childValue.getUsers()) {
// Move upwards in the dominator tree to find an appropriate
// dominator block that takes the current use into account.
dom = doms.findNearestCommonDominator(dom, user->getBlock());
}
// Take values without any users into account.
dom = doms.findNearestCommonDominator(dom, childValue.getParentBlock());
}
return dom;
}
/// Returns true if the given operation represents a loop by testing whether
/// it implements the `LoopLikeOpInterface` or the `RegionBranchOpInterface`.
/// In the case of a `RegionBranchOpInterface`, it checks all region-based
/// control-flow edges for cycles.
static bool isLoop(Operation *op);
/// Constructs a new operation base using the given root operation.
BufferPlacementTransformationBase(Operation *op);
protected:
/// Alias information that can be updated during the insertion of copies.
BufferViewFlowAnalysis aliases;
/// Stores all internally managed allocations.
BufferPlacementAllocs allocs;
/// The underlying liveness analysis to compute fine grained information
/// about alloc and dealloc positions.
Liveness liveness;
};
// Support class to create global ops for tensor-valued constants in the
// program. Globals are created lazily at the top of the `moduleOp` with pretty
// names. Duplicates are avoided.
class GlobalCreator {
public:
GlobalCreator(ModuleOp module, unsigned alignment = 0)
: moduleOp(module), alignment(alignment) {}
memref::GlobalOp getGlobalFor(arith::ConstantOp constantOp);
private:
ModuleOp moduleOp;
unsigned alignment;
// This could use memref::GlobalOp key but we avoid introducing a new
// dependence to the memref dialect for this.
DenseMap<Attribute, Operation *> globals;
};
} // namespace bufferization
} // namespace mlir
#endif // MLIR_DIALECT_BUFFERIZATION_TRANSFORMS_BUFFERUTILS_H
|
<gh_stars>0
const AddressArray = artifacts.require("AddressArray")
const getBlockNumber = require('./blockNumber')(web3)
const InitIssueAndLockFactory = artifacts.require("InitIssueAndLockFactory")
const InitIssueAndLock = artifacts.require('InitIssueAndLock')
const GTTokenFactory = artifacts.require("GTTokenFactory");
const GTToken = artifacts.require('GTToken')
const MultiSigFactory = artifacts.require("MultiSigFactory");
const MultiSig = artifacts.require("MultiSig");
const TrustListFactory = artifacts.require("TrustListFactory");
const TrustList = artifacts.require("TrustList");
const FundAndDistributeFactory = artifacts.require("FundAndDistributeFactory");
const FundAndDistribute = artifacts.require("FundAndDistribute");
const USDT = artifacts.require("USDT")
async function performMigration(deployer, network, accounts) {
funders = [];
funder_amount = [30000000000, 10000000000, 10000000000];
usdt_address = '';
block_until = (await getBlockNumber()) + 100;
results = {}
if(network.includes("ropsten")){
console.log("reset for ropsten")
funders = ['0xC7f3e458A4EcFa84b37a2D00e6bA414bd57fDAa4', '0xD272Be26d62c0C2988Cfbd5Cb04EBcBe85bB5263', '0x7e2E17A940da45eF568410B1323bED161084455F'];
usdt = await deployer.deploy(USDT)
await usdt.issue("0xC7f3e458A4EcFa84b37a2D00e6bA414bd57fDAa4", 100000000);
await usdt.issue("0xD272Be26d62c0C2988Cfbd5Cb04EBcBe85bB5263", 100000000);
console.log("USDT: ", usdt.address);
usdt_address = usdt.address;
block_until = (await getBlockNumber()) + 100;
}else if(network.includes("main")){
console.log("reset for main")
funders = ['0xe855B4cb17eA22CAE1be5FeB7fCDC0Ef67DCa84D', '0x3e6F107Fd4A95AF86108c1F44E502A6136AD386e', '0x57955d7AA271DbDDE92D67e0EF52D90c6E4089cA'];
usdt_address = '0xdAC17F958D2ee523a2206206994597C13D831ec7';
block_until = (await getBlockNumber()) + 6*185000;
}
results['usdt'] = usdt_address;
console.log("network: ", network)
console.log("signers: ", funders)
multisig_factory = await MultiSigFactory.deployed();
console.log("MultiSigFactory: ", multisig_factory.address)
tokentx = await multisig_factory.createMultiSig(funders);
multisig = await MultiSig.at(tokentx.logs[0].args.addr);
console.log("MultiSig: ", multisig.address)
results['multisig'] = multisig.address;
trustlist_factory = await TrustListFactory.deployed();
console.log("TrustListFactory: ", trustlist_factory.address)
tokentx = await trustlist_factory.createTrustList([], multisig.address);
token_trustlist = await TrustList.at(tokentx.logs[0].args.addr);
console.log("GTToken TrustList: ", token_trustlist.address);
results['gotoken_issuers'] = token_trustlist.address;
tokentx = await trustlist_factory.createTrustList(funders, multisig.address);
fund_trustlist = await TrustList.at(tokentx.logs[0].args.addr);
console.log("FundAndDistribute TrustList: ", fund_trustlist.address);
results["funders"] = fund_trustlist.address;
token_factory = await GTTokenFactory.deployed();
console.log("GTTokenFactory: ", token_factory.address);
tokentx = await token_factory.createCloneToken('0x0000000000000000000000000000000000000000', 0, "GoToken", 6, "GOO", true, multisig.address, token_trustlist.address);
gttoken = await GTToken.at(tokentx.logs[0].args._cloneToken);
console.log("GTToken: ", gttoken.address);
results["gotoken"] = gttoken.address;
init_lock_factory = await InitIssueAndLockFactory.deployed();
console.log("InitIssueAndLockFactory: ", init_lock_factory.address);
tx = await init_lock_factory.createIssueAndLock(gttoken.address,
block_until,
funders,
funder_amount,
multisig.address);
init_lock = await InitIssueAndLock.at(tx.logs[0].args.addr);
console.log("InitIssueAndLock: ", init_lock.address);
results["init_lock"] = init_lock.address;
console.log('InitIssueAndLock lock until: ', block_until);
fund_factory = await FundAndDistributeFactory.deployed();
console.log("FundAndDistributeFactory: ", fund_factory.address);
ctx = await fund_factory.createStdERC20TokenIssuer(gttoken.address,
"USDT for GOO", "Only for Funders", usdt_address, multisig.address, fund_trustlist.address);
fund = await FundAndDistribute.at(ctx.logs[0].args.addr);
console.log("FundAndDistribute: ", fund.address);
results["fund"] = fund.address;
require('fs').writeFile (network + ".json", JSON.stringify(results), function(err) {
if (err) throw err;
console.log('complete');
}
);
}
module.exports = function(deployer, network, accounts){
deployer
.then(function() {
return performMigration(deployer, network, accounts)
})
.catch(error => {
console.log(error)
process.exit(1)
})
};
|
<reponame>nedphae/contact-center-client<gh_stars>1-10
import React, { useState } from 'react';
import { useQuery } from '@apollo/client';
import Viewer from 'react-viewer';
import clsx from 'clsx';
import Typography from '@material-ui/core/Typography';
import Grid from '@material-ui/core/Grid';
import Paper from '@material-ui/core/Paper';
import List from '@material-ui/core/List';
import ListItem from '@material-ui/core/ListItem';
import ListItemAvatar from '@material-ui/core/ListItemAvatar';
import ListItemText from '@material-ui/core/ListItemText';
import Avatar from '@material-ui/core/Avatar';
import { getDownloadOssStaffImgPath } from 'app/config/clientConfig';
import { Conversation } from 'app/domain/Conversation';
import { CreatorType } from 'app/domain/constant/Message';
import javaInstant2DateStr from 'app/utils/timeUtils';
import { ImageDecorator } from 'react-viewer/lib/ViewerProps';
import { StaffGraphql, QUERY_STAFF_BY_ID } from 'app/domain/graphql/Staff';
import {
createContent,
useMessageListStyles,
} from '../Chat/ChatBox/MessageList';
interface MessageListProps {
conversation: Conversation;
}
export default function MessageList(props: MessageListProps) {
const { conversation } = props;
const classes = useMessageListStyles();
const [showImageViewerDialog, toggleShowImageViewerDialog] = useState(false);
const [imageViewer, setImageViewer] = useState<ImageDecorator>({
src: '',
alt: undefined,
});
const { data } = useQuery<StaffGraphql>(QUERY_STAFF_BY_ID, {
variables: { staffId: conversation.staffId },
});
const staff = data?.getStaffById;
const messages = [...(conversation.chatMessages ?? [])].sort(
(a, b) =>
// 默认 seqId 为最大
(a.seqId ?? Number.MAX_SAFE_INTEGER) -
(b.seqId ?? Number.MAX_SAFE_INTEGER)
);
function openImageViewer(src: string, alt: string) {
setImageViewer({ src, alt });
toggleShowImageViewerDialog(true);
}
function closeImageViewerDialog() {
toggleShowImageViewerDialog(false);
}
return (
<Paper
square
className={classes.paper}
style={{ overflowY: 'auto', maxHeight: '80vh' }}
>
<List className={classes.list}>
{messages.map(({ uuid, createdAt, content, creatorType }) => (
<React.Fragment key={uuid}>
<ListItem alignItems="flex-start">
{/* 客户的消息的头像 */}
{creatorType === CreatorType.CUSTOMER && (
<ListItemAvatar className={classes.listItemAvatar}>
<Avatar alt="Profile Picture" />
</ListItemAvatar>
)}
{/* justifyContent="flex-end" 如果是收到的消息就不设置这个 */}
<Grid
container
justifyContent={
creatorType === CreatorType.CUSTOMER
? 'flex-start'
: 'flex-end'
}
>
<Grid item xs={12}>
<ListItemText
primary={
<Grid
container
alignItems="center"
justifyContent={
creatorType === CreatorType.CUSTOMER
? 'flex-start'
: 'flex-end'
}
>
{/* justifyContent="flex-end" */}
<Typography
variant="subtitle1"
gutterBottom
className={classes.inline}
>
{creatorType === CreatorType.CUSTOMER
? conversation.userName
: conversation.nickName}
</Typography>
<Typography
variant="body2"
gutterBottom
className={classes.inline}
>
{createdAt && javaInstant2DateStr(createdAt)}
</Typography>
</Grid>
}
/>
</Grid>
<Paper
elevation={4}
className={clsx(
creatorType === CreatorType.CUSTOMER
? classes.fromMessagePaper
: classes.toMessagePaper,
classes.baseMessagePaper
)}
>
{createContent(content, classes, openImageViewer)}
</Paper>
</Grid>
{/* 客服发送的消息的头像 */}
{creatorType === CreatorType.STAFF && (
<ListItemAvatar className={classes.listItemAvatar}>
<Avatar
src={
staff &&
staff.avatar &&
`${getDownloadOssStaffImgPath()}${staff.avatar}`
}
/>
</ListItemAvatar>
)}
</ListItem>
</React.Fragment>
))}
</List>
<Viewer
visible={showImageViewerDialog}
onClose={closeImageViewerDialog}
images={[imageViewer]}
zIndex={2000}
/>
</Paper>
);
}
|
<!DOCTYPE html>
<html>
<head>
<title>Form Submission</title>
</head>
<body>
<form method="post" action="process.php">
<label for="name">Enter your name:</label>
<input type="text" id="name" name="name">
<input type="submit" value="Submit">
</form>
</body>
</html>
<?php
if ($_SERVER["REQUEST_METHOD"] == "POST") {
// Form submitted, process the input
$name = $_POST["name"];
echo "Hello, $name! Thank you for submitting the form.";
// Additional processing logic can be added here
exit(); // Prevents rest of page from being loaded
} else {
// Prevent direct access to processing logic
header("Location: index.php");
exit();
}
?>
|
<filename>Visual Studio 2010/Projects/bjarneStroustrupC++PartIV/bjarneStroustrupC++PartIV/Chapter26Exercise14.cpp
/*
TITLE Random length string sorting Chapter26Exercise14.cpp
COMMENT
Objective: Write a program that generates random length [0,100) strings
and store them in std::map (or std::set?) to avoid explicit sorting.
Input: -
Output: Size: 50 Time: 0.002 [sec]
Size: 500 Time: 0.011
Size: 5000 Time: 0.11
Size: 50000 Time: 1.29
Size: 500000 Time: 19.556
Size: 5000000Time:
Author: <NAME>
Date: 08.06.2017
*/
#include <iostream>
#include <algorithm> // std::generate, std::copy
#include <string>
#include <vector>
#include <set>
#include <ctime> // std::time
#include <time.h> // std::clock
int random(int min, int max)
{
return min + (rand()) / (RAND_MAX / (max - min));
}
//---------------------------------------------------------------------------------------
std::string get_random_string()
{
static const char alphanum[] =
"0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz";
int size = random(0, 100);
std::string rand_str(size, '0');
for (int i = 0; i < size; ++i)
{
rand_str[i] = alphanum[ rand() % (sizeof(alphanum) - 1) ];
}
return rand_str;
}
//---------------------------------------------------------------------------------------
void populate_vector(std::vector<std::string>& v)
{
srand((unsigned int)time(0));
std::generate(v.data(), v.data() + v.size(), get_random_string);
}
//---------------------------------------------------------------------------------------
void test_sort()
{
int max = 5000000;
for (auto i = 50; i <= max; i *= 10)
{
std::vector<std::string> v(i);
populate_vector(v);
std::clock_t t = clock();
std::set<std::string> s(v.begin(), v.end());
t = clock() - t;
double time = double(t) / CLOCKS_PER_SEC;
std::cout <<"Size: "<< v.size() <<"\tTime: "<< time <<'\n';
}
}
//---------------------------------------------------------------------------------------
int main()
{
try
{
test_sort();
}
catch (std::exception& e)
{
std::cerr << e.what();
}
getchar();
}
|
#!/bin/sh
# Checks for trailing whitespace
git grep --cached -I -n --no-color -P '[ \t]+$' -- ':/' |
awk '
BEGIN {
FS = ":"
OFS = ":"
ret = 0
}
{
# Only warn for markdown files (*.md) to accomodate text editors
# which do not properly handle trailing whitespace.
# (e.g. GitHub web editor)
if ($1 ~ /\.md$/) {
severity = "WARN"
} else {
severity = "ERROR"
ret = 1
}
print severity, $1, $2, " trailing whitespace."
}
END {
exit ret
}
'
|
// import React from "react";
// import ReactDOM from "react-dom";
// import App from "../../src/App";
// ReactDOM.render(<App />, document.body);
|
<filename>altimeter/qj/lambdas/executor.py<gh_stars>10-100
"""Execute all known QJs"""
import hashlib
import json
from typing import Any, Dict, List
import uuid
import boto3
from altimeter.core.log import Logger
from altimeter.qj import schemas
from altimeter.qj.client import QJAPIClient
from altimeter.qj.config import ExecutorConfig
from altimeter.qj.log import QJLogEvents
def executor(event: Dict[str, Any]) -> None:
"""Execute all known QJs. If this was triggered by an sns message, use that message as part of
the deduplication id for each sqs message. Otherwise generate a unique id so that repeated
manual runs of executor will not be dedupe'd"""
sns_message = event.get("Records", [{}])[0].get("Sns", {}).get("Message")
if sns_message:
execution_hash = hashlib.sha256(sns_message.encode()).hexdigest()
else:
execution_hash = hashlib.sha256(str(uuid.uuid4()).encode()).hexdigest()
exec_config = ExecutorConfig()
logger = Logger()
logger.info(
event=QJLogEvents.InitConfig,
sns_triggered=bool(sns_message),
execution_hash=execution_hash,
)
qj_client = QJAPIClient(host=exec_config.api_host, port=exec_config.api_port)
jobs = qj_client.get_jobs(active_only=True)
logger.info(event=QJLogEvents.GetJobs, num_jobs=len(jobs))
enqueue_queries(
jobs=jobs,
queue_url=exec_config.query_queue_url,
execution_hash=execution_hash,
region=exec_config.region,
)
def enqueue_queries(
jobs: List[schemas.Job], queue_url: str, execution_hash: str, region: str
) -> None:
"""Enqueue querys by sending a message for each job key to queue_url"""
sqs_client = boto3.client("sqs", region_name=region)
logger = Logger()
with logger.bind(queue_url=queue_url, execution_hash=execution_hash):
for job in jobs:
job_hash = hashlib.sha256()
job_hash.update(json.dumps(job.json()).encode())
message_group_id = job_hash.hexdigest()
job_hash.update(execution_hash.encode())
message_dedupe_id = job_hash.hexdigest()
logger.info(
QJLogEvents.ScheduleJob,
job=job,
message_group_id=message_group_id,
message_dedupe_id=message_dedupe_id,
)
sqs_client.send_message(
QueueUrl=queue_url,
MessageBody=job.json(),
MessageGroupId=message_group_id,
MessageDeduplicationId=message_dedupe_id,
)
|
<reponame>alunny/globalize-tools<filename>globalize-webpack-plugin/index.js
var ProductionModePlugin = require("./ProductionModePlugin");
var DevelopmentModePlugin = require("./DevelopmentModePlugin");
/**
* Development Mode:
* - Automatically loads CLDR data (i.e., injects `Globalize.load(<necessary CLDR data>)`).
* - Automatically define default locale (i.e., injects `Globalize.locale(<defaultLocale>)`).
*
* Production Mode:
* - Have Globalize modules replaced with their runtime modules.
* - Statically extracts formatters and parsers from user code and pre-compile
* them into respective XXXX.
*/
function GlobalizePlugin(attributes) {
this.attributes = attributes || {};
}
GlobalizePlugin.prototype.apply = function(compiler) {
compiler.apply(
this.attributes.production ?
new ProductionModePlugin(this.attributes) :
new DevelopmentModePlugin(this.attributes)
);
};
module.exports = GlobalizePlugin;
|
#!/bin/bash
# Copyright (c) 2020, Oracle Corporation and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
export DOMAIN_HOME=${DOMAIN_HOME_DIR}
export namespace=${NAMESPACE}
export domainName=${CUSTOM_DOMAIN_NAME}
if [ -z "${JAVA_HOME}" ]; then
JAVA_HOME=/usr/java/latest
fi
echo 'namespace:' $namespace
echo 'frontEndHost ,frontEndHttpPort :' ${FRONTENDHOST} ${FRONTENDHTTPPORT}
echo 'domain name:' $domainName
# Create the domain
wlst.sh -skipWLSModuleScanning \
${CREATE_DOMAIN_SCRIPT_DIR}/createFMWDomain.py \
-oh /u01/oracle \
-jh ${JAVA_HOME} \
-parent ${DOMAIN_HOME}/.. \
-name ${CUSTOM_DOMAIN_NAME} \
-user `cat /weblogic-operator/secrets/username` \
-password `cat /weblogic-operator/secrets/password` \
-rcuDb ${CUSTOM_CONNECTION_STRING} \
-rcuPrefix ${CUSTOM_RCUPREFIX} \
-rcuSchemaPwd `cat /weblogic-operator/rcu-secrets/password` \
-adminListenPort ${CUSTOM_ADMIN_LISTEN_PORT} \
-adminName ${CUSTOM_ADMIN_NAME} \
-managedNameBase ${CUSTOM_MANAGED_BASE_NAME} \
-managedServerPort ${CUSTOM_MANAGEDSERVER_PORT} \
-prodMode ${CUSTOM_PRODUCTION_MODE} \
-managedServerCount ${CUSTOM_MANAGED_SERVER_COUNT} \
-clusterName ${CUSTOM_CLUSTER_NAME} \
-exposeAdminT3Channel ${EXPOSE_T3_CHANNEL_PREFIX} \
-t3ChannelPublicAddress ${T3_PUBLIC_ADDRESS} \
-t3ChannelPort ${T3_CHANNEL_PORT} \
-frontEndHost ${FRONTENDHOST} \
-frontEndHttpPort ${FRONTENDHTTPPORT}
if [ $? -ne 0 ]
then
# die with unsuccessful shell script termination exit status # 3
echo "An error occurred while Creating Domain".
exit 2
fi
# invoke offine config manager
export DOMAIN_HOME=${DOMAIN_HOME_DIR}
export JAVA_HOME=/usr/java/latest
chmod a+rx /u01/oracle/idm/server/bin/offlineConfigManager.sh
cd /u01/oracle/idm/server/bin/
offlineCmd="./offlineConfigManager.sh"
${offlineCmd}
retval=$?
if [ $retval -ne 0 ];
then
echo "ERROR: Offline config command failed. Please check the logs"
exit 4
fi
# invoke the command to remove the unnessary templates in the domain config
sed -i 's/<server-template>//g' $DOMAIN_HOME/config/config.xml
sed -i 's/<listen-port>7100<\/listen-port>//g' $DOMAIN_HOME/config/config.xml
sed -i 's/<\/server-template>//g' $DOMAIN_HOME/config/config.xml
sed -i 's/<name>soa-server-template<\/name>//g' $DOMAIN_HOME/config/config.xml
sed -i 's/<name>oim-server-template<\/name>//g' $DOMAIN_HOME/config/config.xml
sed -i 's/<name>wsm-cache-server-template<\/name>//g' $DOMAIN_HOME/config/config.xml
sed -i 's/<name>wsmpm-server-template<\/name>//g' $DOMAIN_HOME/config/config.xml
sed -i 's/<ssl>/<!--ssl>/g' $DOMAIN_HOME/config/config.xml
sed -i 's/<\/ssl>/<\/ssl-->/g' $DOMAIN_HOME/config/config.xml
sed -i "s/oimk8namespace/$domainName/g" $DOMAIN_HOME/config/config.xml
sed -i "s/applications\/$domainName\/em.ear/domains\/applications\/$domainName\/em.ear/g" $DOMAIN_HOME/config/config.xml
if [ ! -f /u01/oracle/idm/server/ConnectorDefaultDirectory/ConnectorConfigTemplate.xml ] && [ -d /u01/oracle/idm/server/ConnectorDefaultDirectory_orig ]; then
cp /u01/oracle/idm/server/ConnectorDefaultDirectory_orig/ConnectorConfigTemplate.xml /u01/oracle/idm/server/ConnectorDefaultDirectory
fi
if [ ! -f /u01/oracle/idm/server/ConnectorDefaultDirectory/ConnectorSchema.xsd ] && [ -d /u01/oracle/idm/server/ConnectorDefaultDirectory_orig ]; then
cp /u01/oracle/idm/server/ConnectorDefaultDirectory_orig/ConnectorSchema.xsd /u01/oracle/idm/server/ConnectorDefaultDirectory
fi
if [ ! -f /u01/oracle/idm/server/ConnectorDefaultDirectory/readme.txt ] && [ -d /u01/oracle/idm/server/ConnectorDefaultDirectory_orig ]; then
cp /u01/oracle/idm/server/ConnectorDefaultDirectory_orig/readme.txt /u01/oracle/idm/server/ConnectorDefaultDirectory
fi
if [ ! -d /u01/oracle/idm/server/ConnectorDefaultDirectory/targetsystems-lib ] && [ -d /u01/oracle/idm/server/ConnectorDefaultDirectory_orig ]; then
cp -rf /u01/oracle/idm/server/ConnectorDefaultDirectory_orig/targetsystems-lib /u01/oracle/idm/server/ConnectorDefaultDirectory
fi
|
#!/bin/sh
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2019 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
snmpCommunityVal=""
if [ -f /tmp/snmpd.conf ]; then
snmpCommunityVal=`head -n 1 /tmp/snmpd.conf | awk '{print $4}'`
fi
setSNMPEnv()
{
#Set env for SNMP client queries..."
export MIBS=ALL
export MIBDIRS=/mnt/nfs/bin/target-snmp/share/snmp/mibs:/usr/share/snmp/mibs
export PATH=$PATH:/mnt/nfs/bin/target-snmp/bin:
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/mnt/nfs/bin/target-snmp/lib:/mnt/nfs/usr/lib
}
## get Model No of the box
getModel()
{
grep 'MODEL' /etc/device.properties | cut -d '=' -f2
}
getFirmwareVersion()
{
setSNMPEnv
ret=`snmpget -OQ -v 2c -c $1 $2 sysDescr.0 | cut -d "=" -f2 | cut -d ":" -f5 | cut -d " " -f2 | cut -d ";" -f1`
if [[ $? -eq 0 ]] ; then
echo $ret
else
echo ""
fi
}
getECMMac()
{
setSNMPEnv
snmpCommunityVal=`head -n 1 /tmp/snmpd.conf | awk '{print $4}'`
ret=`snmpwalk -OQ -v 2c -c "$snmpCommunityVal" 192.168.100.1 IF-MIB::ifPhysAddress.2 | cut -d "=" -f2`
if [[ $? -eq 0 ]] ; then
echo $ret
else
echo ""
fi
}
|
#!/bin/bash
dieharder -d 201 -g 52 -S 401548227
|
#!/bin/bash -e
if [[ -z "$KAFKA_PORT" ]]; then
export KAFKA_PORT=9092
fi
create-topics.sh &
if [[ -z "$KAFKA_ADVERTISED_PORT" && \
-z "$KAFKA_LISTENERS" && \
-z "$KAFKA_ADVERTISED_LISTENERS" && \
-S /var/run/docker.sock ]]; then
KAFKA_ADVERTISED_PORT=$(docker port "$(hostname)" $KAFKA_PORT | sed -r 's/.*:(.*)/\1/g')
export KAFKA_ADVERTISED_PORT
fi
if [[ -z "$KAFKA_BROKER_ID" ]]; then
if [[ -n "$BROKER_ID_COMMAND" ]]; then
KAFKA_BROKER_ID=$(eval "$BROKER_ID_COMMAND")
export KAFKA_BROKER_ID
else
# By default auto allocate broker ID
export KAFKA_BROKER_ID=-1
fi
fi
if [[ -z "$KAFKA_LOG_DIRS" ]]; then
export KAFKA_LOG_DIRS="/kafka/kafka-logs-$HOSTNAME"
fi
if [[ -z "$KAFKA_ZOOKEEPER_CONNECT" ]]; then
KAFKA_ZOOKEEPER_CONNECT=$(env | grep 'ZK.*PORT_2181_TCP=' | sed -e 's|.*tcp://||' | paste -sd ,)
export KAFKA_ZOOKEEPER_CONNECT
fi
if [[ -n "$KAFKA_HEAP_OPTS" ]]; then
sed -r -i 's/(export KAFKA_HEAP_OPTS)="(.*)"/\1="'"$KAFKA_HEAP_OPTS"'"/g' "$KAFKA_HOME/bin/kafka-server-start.sh"
unset KAFKA_HEAP_OPTS
fi
if [[ -z "$KAFKA_ADVERTISED_HOST_NAME" && -n "$HOSTNAME_COMMAND" ]]; then
KAFKA_ADVERTISED_HOST_NAME=$(eval "$HOSTNAME_COMMAND")
export KAFKA_ADVERTISED_HOST_NAME
fi
if [[ -n "$KAFKA_LISTENER_SECURITY_PROTOCOL_MAP" ]]; then
if [[ -n "$KAFKA_ADVERTISED_PORT" && -n "$KAFKA_ADVERTISED_PROTOCOL_NAME" ]]; then
export KAFKA_ADVERTISED_LISTENERS="${KAFKA_ADVERTISED_PROTOCOL_NAME}://${KAFKA_ADVERTISED_HOST_NAME-}:${KAFKA_ADVERTISED_PORT}"
export KAFKA_LISTENERS="$KAFKA_ADVERTISED_PROTOCOL_NAME://:$KAFKA_ADVERTISED_PORT"
fi
if [[ -z "$KAFKA_PROTOCOL_NAME" ]]; then
export KAFKA_PROTOCOL_NAME="${KAFKA_ADVERTISED_PROTOCOL_NAME}"
fi
if [[ -n "$KAFKA_PORT" && -n "$KAFKA_PROTOCOL_NAME" ]]; then
export ADD_LISTENER="${KAFKA_PROTOCOL_NAME}://${KAFKA_HOST_NAME-}:${KAFKA_PORT}"
fi
if [[ -z "$KAFKA_INTER_BROKER_LISTENER_NAME" ]]; then
export KAFKA_INTER_BROKER_LISTENER_NAME=$KAFKA_PROTOCOL_NAME
fi
else
#DEFAULT LISTENERS
export KAFKA_ADVERTISED_LISTENERS="PLAINTEXT://${KAFKA_ADVERTISED_HOST_NAME-}:${KAFKA_ADVERTISED_PORT-$KAFKA_PORT}"
export KAFKA_LISTENERS="PLAINTEXT://${KAFKA_HOST_NAME-}:${KAFKA_PORT-9092}"
fi
if [[ -n "$ADD_LISTENER" && -n "$KAFKA_LISTENERS" ]]; then
export KAFKA_LISTENERS="${KAFKA_LISTENERS},${ADD_LISTENER}"
fi
if [[ -n "$ADD_LISTENER" && -z "$KAFKA_LISTENERS" ]]; then
export KAFKA_LISTENERS="${ADD_LISTENER}"
fi
if [[ -n "$ADD_LISTENER" && -n "$KAFKA_ADVERTISED_LISTENERS" ]]; then
export KAFKA_ADVERTISED_LISTENERS="${KAFKA_ADVERTISED_LISTENERS},${ADD_LISTENER}"
fi
if [[ -n "$ADD_LISTENER" && -z "$KAFKA_ADVERTISED_LISTENERS" ]]; then
export KAFKA_ADVERTISED_LISTENERS="${ADD_LISTENER}"
fi
if [[ -n "$KAFKA_INTER_BROKER_LISTENER_NAME" && ! "$KAFKA_INTER_BROKER_LISTENER_NAME"X = "$KAFKA_PROTOCOL_NAME"X ]]; then
if [[ -n "$KAFKA_INTER_BROKER_PORT" ]]; then
export KAFKA_INTER_BROKER_PORT=$(( KAFKA_PORT + 1 ))
fi
export INTER_BROKER_LISTENER="${KAFKA_INTER_BROKER_LISTENER_NAME}://:${KAFKA_INTER_BROKER_PORT}"
export KAFKA_LISTENERS="${KAFKA_LISTENERS},${INTER_BROKER_LISTENER}"
export KAFKA_ADVERTISED_LISTENERS="${KAFKA_ADVERTISED_LISTENERS},${INTER_BROKER_LISTENER}"
unset KAFKA_INTER_BROKER_PORT
unset KAFKA_SECURITY_INTER_BROKER_PROTOCOL
unset INTER_BROKER_LISTENER
fi
if [[ -n "$RACK_COMMAND" && -z "$KAFKA_BROKER_RACK" ]]; then
KAFKA_BROKER_RACK=$(eval "$RACK_COMMAND")
export KAFKA_BROKER_RACK
fi
#Issue newline to config file in case there is not one already
echo "" >> "$KAFKA_HOME/config/server.properties"
unset KAFKA_CREATE_TOPICS
unset KAFKA_ADVERTISED_PROTOCOL_NAME
unset KAFKA_PROTOCOL_NAME
if [[ -n "$KAFKA_ADVERTISED_LISTENERS" ]]; then
unset KAFKA_ADVERTISED_PORT
unset KAFKA_ADVERTISED_HOST_NAME
fi
if [[ -n "$KAFKA_LISTENERS" ]]; then
unset KAFKA_PORT
unset KAFKA_HOST_NAME
fi
for VAR in $(env)
do
if [[ $VAR =~ ^KAFKA_ && ! $VAR =~ ^KAFKA_HOME ]]; then
kafka_name=$(echo "$VAR" | sed -r 's/KAFKA_(.*)=.*/\1/g' | tr '[:upper:]' '[:lower:]' | tr _ .)
env_var=$(echo "$VAR" | sed -r 's/(.*)=.*/\1/g')
if grep -E -q '(^|^#)'"$kafka_name=" "$KAFKA_HOME/config/server.properties"; then
sed -r -i 's@(^|^#)('"$kafka_name"')=(.*)@\2='"${!env_var}"'@g' "$KAFKA_HOME/config/server.properties" #note that no config values may contain an '@' char
else
echo "$kafka_name=${!env_var}" >> "$KAFKA_HOME/config/server.properties"
fi
fi
if [[ $VAR =~ ^LOG4J_ ]]; then
log4j_name=$(echo "$VAR" | sed -r 's/(LOG4J_.*)=.*/\1/g' | tr '[:upper:]' '[:lower:]' | tr _ .)
log4j_env=$(echo "$VAR" | sed -r 's/(.*)=.*/\1/g')
if grep -E -q '(^|^#)'"$log4j_name=" "$KAFKA_HOME/config/log4j.properties"; then
sed -r -i 's@(^|^#)('"$log4j_name"')=(.*)@\2='"${!log4j_env}"'@g' "$KAFKA_HOME/config/log4j.properties" #note that no config values may contain an '@' char
else
echo "$log4j_name=${!log4j_env}" >> "$KAFKA_HOME/config/log4j.properties"
fi
fi
done
if [[ -n "$CUSTOM_INIT_SCRIPT" ]] ; then
eval "$CUSTOM_INIT_SCRIPT"
fi
exec "$KAFKA_HOME/bin/kafka-server-start.sh" "$KAFKA_HOME/config/server.properties"
|
#!/bin/bash
set -e
pushd docker
docker build -t bpftrace-builder -f Dockerfile.alpine .
popd
|
<reponame>josesentis/react-graphql-boilerplate
import React from 'react';
import TextWrapper from './styles';
class movingText extends React.PureComponent {
render() {
const { children } = this.props;
return (
<TextWrapper>
<span>{children}</span>
</TextWrapper>
);
}
}
export default movingText;
|
package game.backend;
public enum Checkpoint {
U(-1,0, 1),
UU(-2,0, 2),
D(1,0, 4),
DD(2,0, 8),
R(0,1, 16),
RR(0,2, 32),
L(0,-1, 64),
LL(0,-2, 128);
private int i;
private int j;
private int value;
Checkpoint(int i, int j, int value) {
this.i = i;
this.j = j;
this.value = value;
}
public int getI() {
return i;
}
public int getJ() {
return j;
}
public int getValue() {
return value;
}
}
|
def findAncestor(root, node1, node2):
if not root:
return None
if root == node1 or root == node2:
return root
left = findAncestor(root.left, node1, node2)
right = findAncestor(root.right, node1, node2)
if left and right:
return root
return left if left else right
|
<reponame>nkrios/lemongrenade<gh_stars>100-1000
package lemongrenade.core.database.mongo;
import com.mongodb.MongoClient;
import com.mongodb.WriteConcern;
import lemongrenade.core.util.LGProperties;
import org.mongodb.morphia.Datastore;
import org.mongodb.morphia.Morphia;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MorphiaService {
static private final Logger log = LoggerFactory.getLogger(MorphiaService.class);
static private Morphia morphia;
static private Datastore datastore;
static private MongoClient mongoClient;
static {
//Init all private variables
morphia = new Morphia();
open();
}
//Close the client then null client/datastore if close hasn't already been called
public void close () {
if(mongoClient != null) {
mongoClient.close();
mongoClient = null;
datastore = null;
}
}
//Open the connection if datastore or mongoClient is null
public static void open() {
if(mongoClient == null || datastore == null) {
String connectString = LGProperties.get("database.mongo.hostname") + ":" + LGProperties.get("database.mongo.port");
log.info("Mongo connection :" + connectString);
mongoClient = new MongoClient(connectString);
mongoClient.setWriteConcern(WriteConcern.JOURNALED);
String databaseName = LGProperties.get("database.mongo.databasename");
if (databaseName == null) {
databaseName = "lemongrenade_develop";
log.error("Invalid database name is in properties file. Defaulting to " + databaseName);
}
datastore = morphia.createDatastore(mongoClient, databaseName);
//this.datastore.setDefaultWriteConcern(WriteConcern.ACKNOWLEDGED);
datastore.setDefaultWriteConcern(WriteConcern.JOURNALED);
}
}
//Open the connection when declaring a new MorphiaService
public MorphiaService(){
open();
}
public Morphia getMorphia() {return morphia;}
public void setMorphia(Morphia morphia) {this.morphia = morphia;}
public Datastore getDatastore() {return datastore;}
public void setDatastore(Datastore datastore) {this.datastore = datastore;}
}
|
#!/bin/sh
AIRGAP_REGISTRY__DEFAULT_VALUE=""
AIRFLOW_OPERATOR_NAMESPACE__DEFAULT_VALUE="airflowop-system"
AIRFLOW_OPERATOR_IMAGE_TAG__DEFAULT_VALUE="ecp-5.3.0-rc3"
AIRFLOW_BASE_NAMESPACE__DEFAULT_VALUE="airflow-base"
AIRFLOW_BASE_IS_POSTGRES__DEFAULT_VALUE="true"
if [ ! -z "$AIRGAP_REGISTRY" ] && ! expr "$AIRGAP_REGISTRY" : '^.*\/$' 1>/dev/null ; then
AIRGAP_REGISTRY=${AIRGAP_REGISTRY}"/"
fi
AIRGAP_REGISTRY="${AIRGAP_REGISTRY:-$AIRGAP_REGISTRY__DEFAULT_VALUE}"
AIRFLOW_OPERATOR_NAMESPACE="${AIRFLOW_OPERATOR_NAMESPACE:-$AIRFLOW_OPERATOR_NAMESPACE__DEFAULT_VALUE}"
AIRFLOW_OPERATOR_IMAGE_TAG="${AIRFLOW_OPERATOR_IMAGE_TAG:-$AIRFLOW_OPERATOR_IMAGE_TAG__DEFAULT_VALUE}"
AIRFLOW_BASE_NAMESPACE="${AIRFLOW_BASE_NAMESPACE:-$AIRFLOW_BASE_NAMESPACE__DEFAULT_VALUE}"
AIRFLOW_BASE_IS_POSTGRES="${AIRFLOW_BASE_IS_POSTGRES:-$AIRFLOW_BASE_IS_POSTGRES__DEFAULT_VALUE}"
export AIRGAP_REGISTRY AIRFLOW_OPERATOR_NAMESPACE AIRFLOW_OPERATOR_IMAGE_TAG AIRFLOW_BASE_NAMESPACE
SCRIPTPATH=$(dirname ${0})
kubectl apply -k ${SCRIPTPATH}/../airflow-operator
if [ $AIRFLOW_BASE_IS_POSTGRES = "true" ]; then
kubectl apply -k ${SCRIPTPATH}/../airflow-base/overlays/postgres
else
kubectl apply -k ${SCRIPTPATH}/../airflow-base/overlays/mysql
fi
|
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
#`p_` takes two arguments to define a bazel workspace status variable:
#
# * the name of the variable
# * a default value
#
# If an environment variable with the corresponding name is set, its value is
# used. Otherwise, the provided default value is used.
p_() {
if (( $# == 2 )); then
echo "$1" "${!1:-$2}"
else
return 1
fi
}
git_commit="$(git rev-parse HEAD)"
git_desc="$(git describe --always --dirty --long)"
timestamp_utc_rfc3339=$(date -u +"%Y-%m-%d %H:%M:%S%z")
timestamp_utc_date_dashes="${timestamp_utc_rfc3339% *}"
timestamp_utc_date_no_dashes="${timestamp_utc_date_dashes//-/}"
image_tag="$git_desc"
p_ STABLE_TEST_STAGING_IMG_REPOSITORY gcr.io/k8s-staging-cip-test
p_ STABLE_IMG_REGISTRY gcr.io
p_ STABLE_IMG_REPOSITORY k8s-staging-artifact-promoter
p_ STABLE_IMG_NAME cip
p_ STABLE_GIT_COMMIT "${git_commit}"
p_ STABLE_GIT_DESC "${git_desc}"
p_ TIMESTAMP_UTC_RFC3339 "${timestamp_utc_rfc3339}"
p_ IMG_TAG "${timestamp_utc_date_no_dashes}-${image_tag}"
|
<reponame>Nickersoft/glide
import Rtl from './mutators/rtl'
import Gap from './mutators/gap'
import Grow from './mutators/grow'
import Peeking from './mutators/peeking'
import Focusing from './mutators/focusing'
/**
* Collection of transformers.
*
* @type {Array}
*/
const MUTATORS = [
Gap,
Grow,
Peeking,
Focusing,
// It's important that the Rtl component
// be last on the list, so it reflects
// all previous transformations.
Rtl
]
/**
* Applies diffrent transformers on translate value.
*
* @param {Object} Glide
* @param {Object} Components
* @return {Object}
*/
export default function (Glide, Components) {
return {
/**
* Piplines translate value with registered transformers.
*
* @param {Number} translate
* @return {Number}
*/
mutate (translate) {
for (var i = 0; i < MUTATORS.length; i++) {
translate = MUTATORS[i](Glide, Components).modify(translate)
}
return translate
}
}
}
|
import { Component, Value, Autowired } from '@malagu/core';
import { SecurityContextStore, SecurityContext, SecurityContextStrategy } from './context-protocol';
import { Context } from '@malagu/core/lib/node';
@Component(SecurityContextStore)
export class SessionSecurityContextStore implements SecurityContextStore {
@Value('malagu.security')
protected readonly options: any;
@Autowired(SecurityContextStrategy)
protected readonly securityContextStrategy: SecurityContextStrategy;
async load(): Promise<SecurityContext> {
const context = Context.getSession()[this.options.contextKey];
if (!context) {
return await this.securityContextStrategy.create();
}
return context;
}
async save(context: SecurityContext): Promise<void> {
Context.getSession()[this.options.contextKey] = context;
}
}
|
def text_summarizer(text):
"""
Summarizes a long text using Natural Language Processing techniques.
Args:
text (str): The text to be summarized.
Returns:
str: A summary of the text.
"""
# Clean and tokenize text
tokens = tokenizer(text)
# Calculate term frequency-inverse document frequency
tfidf = calculate_tfidf(tokens)
# Calculate sentence scores
sentence_scores = calculate_sentence_scores(tfidf)
# Find top N sentences
top_sentences = find_top_sentences(sentence_scores, N)
# Join top sentences into summary
summary = ' '.join(top_sentences)
return summary
|
package de.schwedt.weightlifting.app.news;
import android.util.Log;
import org.json.JSONArray;
import org.json.JSONObject;
import java.util.ArrayList;
import java.util.Date;
import de.schwedt.weightlifting.app.MainActivity;
import de.schwedt.weightlifting.app.UpdateableItem;
import de.schwedt.weightlifting.app.UpdateableWrapper;
import de.schwedt.weightlifting.app.WeightliftingApp;
import de.schwedt.weightlifting.app.helper.JsonParser;
public class News extends UpdateableWrapper {
public static final String FILE_NAME = "news.json";
public static final int navigationPosition = MainActivity.FRAGMENT_NEWS;
public static final int subPosition = 0;
public static ArrayList<NewsItem> itemsToMark = new ArrayList<>();
public static ArrayList<NewsItem> casteArray(ArrayList<UpdateableItem> array) {
ArrayList<NewsItem> convertedItems = new ArrayList<>();
for (int i = 0; i < array.size(); i++) {
convertedItems.add((NewsItem) array.get(i));
}
return convertedItems;
}
public static void markNewItems(ArrayList<UpdateableItem> oldItems, ArrayList<UpdateableItem> newItems) {
ArrayList<NewsItem> oldNewsItems = casteArray(oldItems);
ArrayList<NewsItem> newNewsItems = casteArray(newItems);
for (int i = 0; i < newNewsItems.size(); i++) {
boolean isNew = true;
for (int j = 0; j < oldNewsItems.size(); j++) {
if (newNewsItems.get(i).getContent().equals(oldNewsItems.get(j).getContent()) && newNewsItems.get(i).getDate().equals(oldNewsItems.get(j).getDate()) && newNewsItems.get(i).getHeading().equals(oldNewsItems.get(j).getHeading()) && newNewsItems.get(i).getImageURL().equals(oldNewsItems.get(j).getImageURL()) && newNewsItems.get(i).getURL().equals(oldNewsItems.get(j).getURL())) {
isNew = false;
break;
}
}
if (isNew) {
itemsToMark.add(newNewsItems.get(i));
}
}
}
public void refreshItems() {
String TAG = "News";
String UPDATE_URL = "https://raw.githubusercontent.com/WGierke/weightlifting_schwedt/updates/production/news.json";
super.update(UPDATE_URL, FILE_NAME, TAG);
}
protected void updateWrapper(String result) {
News newItems = new News();
newItems.parseFromString(result);
if (items.size() > 0) {
keepOldReferences(items, newItems.getItems());
markNewItems(items, newItems.getItems());
}
items = newItems.getItems();
}
public void parseFromString(String jsonString) {
//Log.d(WeightliftingApp.TAG, "Parsing news JSON...");
try {
ArrayList<UpdateableItem> newItems = new ArrayList<>();
JsonParser jsonParser = new JsonParser();
jsonParser.getJsonFromString(jsonString);
JSONArray articles = jsonParser.getJsonArray("articles");
for (int i = 0; i < articles.length(); i++) {
try {
JSONObject article = articles.getJSONObject(i);
NewsItem item = new NewsItem();
item.setHeading(article.getString("heading"));
item.setContent(article.getString("content"));
item.setDate(article.getString("date"));
item.setURL(article.getString("url"));
item.setImageURL(article.getString("image"));
newItems.add(item);
} catch (Exception ex) {
Log.e(WeightliftingApp.TAG, "Error while parsing feed item #" + i);
//ex.printStackTrace();
Log.e(WeightliftingApp.TAG, ex.getMessage());
}
}
setItems(newItems);
setLastUpdate((new Date()).getTime());
Log.i(WeightliftingApp.TAG, "News parsed, " + newItems.size() + " items found");
} catch (Exception ex) {
Log.e(WeightliftingApp.TAG, "News parsing failed");
ex.printStackTrace();
}
}
private void keepOldReferences(ArrayList<UpdateableItem> oldItems, ArrayList<UpdateableItem> newItems) {
ArrayList<NewsItem> oldNewItems = casteArray(oldItems);
ArrayList<NewsItem> newNewsItems = casteArray(newItems);
for (int i = 0; i < newNewsItems.size(); i++) {
for (int j = 0; j < oldNewItems.size(); j++) {
if ((newNewsItems.get(i)).equals(oldNewItems.get(j))) {
newNewsItems.set(i, oldNewItems.get(j));
}
}
}
}
public ArrayList<NewsItem> getFirstElements(int n) {
if (n <= items.size())
return new ArrayList(items.subList(0, n));
else
return casteArray(items);
}
}
|
/**
* This file is part of the Java Machine Learning Library
*
* The Java Machine Learning Library is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* The Java Machine Learning Library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with the Java Machine Learning Library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* Copyright (c) 2006-2012, <NAME>
*
* Project: http://java-ml.sourceforge.net/
*
*/
package net.sf.javaml.clustering;
import java.util.Random;
import net.sf.javaml.core.Dataset;
import net.sf.javaml.core.DefaultDataset;
import net.sf.javaml.core.Instance;
import net.sf.javaml.distance.DistanceMeasure;
import net.sf.javaml.distance.EuclideanDistance;
import net.sf.javaml.tools.DatasetTools;
/**
* Implementation of the K-medoids algorithm. K-medoids is a clustering
* algorithm that is very much like k-means. The main difference between the two
* algorithms is the cluster center they use. K-means uses the average of all
* instances in a cluster, while k-medoids uses the instance that is the closest
* to the mean, i.e. the most 'central' point of the cluster.
*
* Using an actual point of the data set to cluster makes the k-medoids
* algorithm more robust to outliers than the k-means algorithm.
*
*
* @author <NAME>
*
*/
public class KMedoids implements Clusterer {
/* Distance measure to measure the distance between instances */
private DistanceMeasure dm;
/* Number of clusters to generate */
private int numberOfClusters;
/* Random generator for selection of candidate medoids */
private Random rg;
/* The maximum number of iterations the algorithm is allowed to run. */
private int maxIterations;
/**
* default constructor
*/
public KMedoids() {
this(4, 100, new EuclideanDistance());
}
/**
* Creates a new instance of the k-medoids algorithm with the specified
* parameters.
*
* @param numberOfClusters
* the number of clusters to generate
* @param maxIterations
* the maximum number of iteration the algorithm is allowed to
* run
* @param DistanceMeasure
* dm the distance metric to use for measuring the distance
* between instances
*
*/
public KMedoids(int numberOfClusters, int maxIterations, DistanceMeasure dm) {
super();
this.numberOfClusters = numberOfClusters;
this.maxIterations = maxIterations;
this.dm = dm;
rg = new Random(System.currentTimeMillis());
}
@Override
public Dataset[] cluster(Dataset data) {
Instance[] medoids = new Instance[numberOfClusters];
Dataset[] output = new DefaultDataset[numberOfClusters];
for (int i = 0; i < numberOfClusters; i++) {
int random = rg.nextInt(data.size());
medoids[i] = data.instance(random);
}
boolean changed = true;
int count = 0;
while (changed && count < maxIterations) {
changed = false;
count++;
int[] assignment = assign(medoids, data);
changed = recalculateMedoids(assignment, medoids, output, data);
}
return output;
}
/**
* Assign all instances from the data set to the medoids.
*
* @param medoids candidate medoids
* @param data the data to assign to the medoids
* @return best cluster indices for each instance in the data set
*/
private int[] assign(Instance[] medoids, Dataset data) {
int[] out = new int[data.size()];
for (int i = 0; i < data.size(); i++) {
double bestDistance = dm.measure(data.instance(i), medoids[0]);
int bestIndex = 0;
for (int j = 1; j < medoids.length; j++) {
double tmpDistance = dm.measure(data.instance(i), medoids[j]);
if (dm.compare(tmpDistance, bestDistance)) {
bestDistance = tmpDistance;
bestIndex = j;
}
}
out[i] = bestIndex;
}
return out;
}
/**
* Return a array with on each position the clusterIndex to which the
* Instance on that position in the dataset belongs.
*
* @param medoids
* the current set of cluster medoids, will be modified to fit
* the new assignment
* @param assigment
* the new assignment of all instances to the different medoids
* @param output
* the cluster output, this will be modified at the end of the
* method
* @return the
*/
private boolean recalculateMedoids(int[] assignment, Instance[] medoids,
Dataset[] output, Dataset data) {
boolean changed = false;
for (int i = 0; i < numberOfClusters; i++) {
output[i] = new DefaultDataset();
for (int j = 0; j < assignment.length; j++) {
if (assignment[j] == i) {
output[i].add(data.instance(j));
}
}
if (output[i].size() == 0) { // new random, empty medoid
medoids[i] = data.instance(rg.nextInt(data.size()));
changed = true;
} else {
Instance centroid = DatasetTools.average(output[i]);
Instance oldMedoid = medoids[i];
medoids[i] = data.kNearest(1, centroid, dm).iterator().next();
if (!medoids[i].equals(oldMedoid))
changed = true;
}
}
return changed;
}
}
|
<gh_stars>0
package com.piglin.optimization;
/**
* Created by swyna on 2/16/15.
*/
public class AntColony {
}
|
import React from 'react';
import './index.scss';
import '../../img/logo.svg';
import '../../img/react.svg';
const Introduction: React.FunctionComponent = () => (
<div className="introduction-page">
<section className="introduction">
<h1>Algae-UI of React</h1>
<p>
<code>algae-ui</code>是为 React 制作的一套 UI 组件库,主要用来学习。
</p>
<div className="logo">
<svg>
<use xlinkHref="#logo" />
</svg>
<span>&</span>
<svg>
<use xlinkHref="#react" />
</svg>
</div>
</section>
<section>
<h2>支持环境</h2>
<p>
由于本库采用了 React 的新特性
<a
href="https://reactjs.org/docs/hooks-intro.html"
target="_Blank"
rel="noopener noreferrer"
>
Hook
</a>
, 所以请保证 React 的版本为 16.8 及以上。
</p>
</section>
<section>
<h2>使用 TypeScript</h2>
<p>
<code>algae-ui</code>使用
<a
href="https://www.typescriptlang.org/"
target="_Blank"
rel="noopener noreferrer"
>
TypeScript
</a>
编写。
<a
href="https://www.typescriptlang.org/"
target="_Blank"
rel="noopener noreferrer"
>
TypeScript
</a>
带来了可选的静态类型检查以及最新的 ECMAScript
特性,有效增强代码的健壮性。
</p>
</section>
<section>
<h2>依赖</h2>
<p>
<code>algae-ui</code>只依赖 React、ReactDOM 两个核心库。
</p>
</section>
</div>
);
export default Introduction;
|
var app = getApp(), api = app.api, is_loading_more = !1, is_no_more = !1;
Page({
data: {
page: 1,
video_list: [],
url: "",
hide: "hide",
show: !1,
animationData: {}
},
onLoad: function(o) {
app.page.onLoad(this, o);
this.loadMoreGoodsList(), is_no_more = is_loading_more = !1;
},
onReady: function() {},
onShow: function() {
app.page.onShow(this);
},
onHide: function() {
app.page.onHide(this);
},
onUnload: function() {
app.page.onUnload(this);
},
onPullDownRefresh: function() {},
loadMoreGoodsList: function() {
var t = this;
if (!is_loading_more) {
t.setData({
show_loading_bar: !0
}), is_loading_more = !0;
var i = t.data.page;
app.request({
url: api.default.video_list,
data: {
page: i
},
success: function(o) {
0 == o.data.list.length && (is_no_more = !0);
var a = t.data.video_list.concat(o.data.list);
t.setData({
video_list: a,
page: i + 1
});
},
complete: function() {
is_loading_more = !1, t.setData({
show_loading_bar: !1
});
}
});
}
},
play: function(o) {
var a = o.currentTarget.dataset.index;
getApp().core.createVideoContext("video_" + this.data.show_video).pause(), this.setData({
show_video: a,
show: !0
});
},
onReachBottom: function() {
is_no_more || this.loadMoreGoodsList();
},
more: function(o) {
var a = this, t = o.target.dataset.index, i = a.data.video_list, e = getApp().core.createAnimation({
duration: 1e3,
timingFunction: "ease"
});
this.animation = e, -1 != i[t].show ? (e.rotate(0).step(), i[t].show = -1) : (e.rotate(0).step(),
i[t].show = 0), a.setData({
video_list: i,
animationData: this.animation.export()
});
}
});
|
#!/bin/bash
set -e
name=$1
if [ $# != 1 ] ; then
echo "USAGE: $0 <controller1 name>"
echo " e.g.: $0 r5-multi-controller-1"
exit 1;
fi
echo "KVM: createing $name"
sudo virsh destroy $name || true
sudo virsh undefine $name || true
sudo rm -rf /var/lib/libvirt/images/$name-0.img
sudo qemu-img create -f qcow2 /var/lib/libvirt/images/$name-0.img 300G
sudo rm -rf /var/lib/libvirt/images/$name-1.img
sudo qemu-img create -f qcow2 /var/lib/libvirt/images/$name-1.img 250G
cp controller1.xml vms/$name.xml
sed -i -e "s,NAME,$name," \
-e "s,DISK0,/var/lib/libvirt/images/$name-0.img," \
-e "s,DISK1,/var/lib/libvirt/images/$name-1.img," \
vms/$name.xml
echo "KVM: $name xml ready"
cat vms/$name.xml | grep $name
sudo virsh define vms/$name.xml
echo "KVM: domain $name defined"
|
#!/bin/sh
# https://github.com/dxcSithLord/RHEL-Bench-Security/archive/master.zip
# https://github.com/dxcSithLord/RHEL-Bench-Security.git
# ------------------------------------------------------------------------------
# Docker Bench for Security
#
# Docker, Inc. (c) 2015-
#
# Checks for dozens of common best-practices around deploying Docker containers in production.
# ------------------------------------------------------------------------------
version='2.2.1'
# Load dependencies
. ./includes/helper_lib.sh
. ./includes/functions_lib.sh
# Setup the paths
this_path=$(abspath "$0") ## Path of this file including filename
myname=$(basename "${this_path}") ## file name of this script.
readonly version
readonly this_path
readonly myname
export PATH="$PATH:/bin:/sbin:/usr/bin:/usr/local/bin:/usr/sbin/"
# Check for required program(s)
req_progs='awk grep ss stat'
for p in $req_progs; do
command -v "$p" >/dev/null 2>&1 || { printf "%s command not found.\n" "$p"; exit 1; }
done
usage () {
cat <<EOF
usage: ${myname} [options]
-b optional Do not print colors
-h optional Print this help message
-l FILE optional Log output in FILE
-c CHECK optional Comma delimited list of specific check(s)
-e CHECK optional Comma delimited list of specific check(s) to exclude
-i INCLUDE optional Comma delimited list of patterns within a container or image name to check
-x EXCLUDE optional Comma delimited list of patterns within a container or image name to exclude from check
EOF
}
# Get the flags
# If you add an option here, please
# remember to update usage() above.
while getopts bhl:c:e:i:x:t: args
do
case $args in
b) export nocolor="nocolor";;
h) usage; exit 0 ;;
l) logger="$OPTARG" ;;
c) check="$OPTARG" ;;
e) checkexclude="$OPTARG" ;;
i) include="$OPTARG" ;;
x) exclude="$OPTARG" ;;
*) usage; exit 1 ;;
esac
done
if [ -z "$logger" ]; then
logger="${myname}.log"
fi
# Load output formatting
. ./includes/output_lib.sh
yell_info
# Warn if not root
ID=$(id -u)
if [ "x$ID" != "x0" ]; then
warn "Some tests might require root to run"
sleep 3
fi
# Total Score
# Warn Scored -1, Pass Scored +1, Not Score -0
totalChecks=0
currentScore=0
logit "Initializing $(date)\n"
beginjson "$version" "$(date +%s)"
#######################################
# Description: Load all the tests from tests/ and run them.
# Globals:
# Arguments:
# $@
#######################################
main () {
# Get configuration location
# If there is a container with label centos_bench_security, memorize it:
benchcont="nil"
for c in $(docker ps | sed '1d' | awk '{print $NF}'); do
if docker inspect --format '{{ .Config.Labels }}' "$c" | \
grep -e 'centos.bench.security' >/dev/null 2>&1; then
benchcont="$c"
fi
done
# get the image id of the docker_bench_security_image, memorize it:
benchimagecont="nil"
for c in $(docker images | sed '1d' | awk '{print $3}'); do
if docker inspect --format '{{ .Config.Labels }}' "$c" | \
grep -e 'centos.bench.security' >/dev/null 2>&1; then
benchimagecont="$c"
fi
done
if [ -n "$include" ]; then
pattern=$(echo "$include" | sed 's/,/|/g')
containers=$(docker ps | sed '1d' | awk '{print $NF}' | grep -v "$benchcont" | grep -E "$pattern")
images=$(docker images | sed '1d' | grep -E "$pattern" | awk '{print $3}' | grep -v "$benchimagecont")
elif [ -n "$exclude" ]; then
pattern=$(echo "$exclude" | sed 's/,/|/g')
containers=$(docker ps | sed '1d' | awk '{print $NF}' | grep -v "$benchcont" | grep -Ev "$pattern")
images=$(docker images | sed '1d' | grep -Ev "$pattern" | awk '{print $3}' | grep -v "$benchimagecont")
else
containers=$(docker ps | sed '1d' | awk '{print $NF}' | grep -v "$benchcont")
images=$(docker images -q | grep -v "$benchcont")
fi
# images unused?
if [ -z "$containers" ]; then
running_containers=0
else
running_containers=1
fi
for test in tests/*.sh; do
. ./"$test"
done
if [ -z "$check" ] && [ ! "$checkexclude" ]; then
cis
elif [ -z "$check" ] && [ "$checkexclude" ]; then
checkexcluded="$(echo ",$checkexclude" | sed -e 's/^/\^/g' -e 's/,/\$|/g' -e 's/$/\$/g')"
for c in $(grep -E 'check_[0-9]|check_[a-z]' includes/functions_lib.sh | grep -vE "$checkexcluded"); do
"$c"
done
else
for i in $(echo "$check" | sed "s/,/ /g"); do
if command -v "$i" 2>/dev/null 1>&2; then
"$i"
else
echo "Check \"$i\" doesn't seem to exist."
continue
fi
done
fi
printf "\n"
info "Checks: $totalChecks"
info "Score: $currentScore"
endjson "$totalChecks" "$currentScore" "$(date +%s)"
}
main "$@"
|
public interface Bird {
public void fly();
public void eat();
public void sleep();
public void makeNoise();
}
|
from flask import Flask, request, jsonify
app = Flask(__name__)
messages = []
@app.route('/message', methods=['GET', 'POST'])
def message():
if request.method == 'GET':
return jsonify({'messages': messages})
message = request.get_json().get('message')
messages.append(message)
return jsonify({'message': message})
if __name__ == '__main__':
app.run(debug=True)
|
<filename>src/components/Login/Login.js
import { useContext } from 'react'
import { useHistory } from 'react-router-dom'
import { ToastContainer } from 'react-toastify'
import UserContext from '../../contexts/UserContext'
import userService from '../../services/userService'
import errorHandler from '../../utils/errorHandler'
const Login = () => {
const history = useHistory()
const { setUserData } = useContext(UserContext)
const onLoginFormSubmitHandler = async (e) => {
e.preventDefault()
try {
const email = e.target.email.value
const password = e.target.password.value
let loginRes = await userService.login(email, password, setUserData)
if (loginRes) history.push('/')
} catch (err) { errorHandler(err) }
}
return (
<section>
<header>
<h1>Login</h1>
</header>
<article>
<form onSubmit={onLoginFormSubmitHandler}>
<fieldset>
<label>Email</label>
<input type="email" name="email" placeholder="Email" autoComplete="email" />
</fieldset>
<fieldset>
<label>Password</label>
<input type="password" name="password" placeholder="<PASSWORD>" autoComplete="current-password" />
</fieldset>
<button>Login</button>
</form>
</article>
<ToastContainer />
</section >
)
}
export default Login
|
<gh_stars>0
//
// handlers for misc routes
//
const handlers = {};
// ping handler
handlers.ping = (data, cb) => {
// call back with http status code 200
cb(200);
};
// not found handler
handlers.notFound = (data, cb) => {
cb(404);
};
export default handlers;
|
class EnvironmentMetrics:
def __init__(self, humidity, temperature):
self.humidity = humidity
self.temperature = temperature
def is_humidity_optimal(self):
return 40 <= self.humidity <= 60
def is_temperature_optimal(self):
return 20 <= self.temperature <= 25
|
export const version = "transactions/5.0.4";
//# sourceMappingURL=_version.js.map
|
// Generated by script, don't edit it please.
import createSvgIcon from '../../createSvgIcon';
import GitlabSvg from '@rsuite/icon-font/lib/legacy/Gitlab';
const Gitlab = createSvgIcon({
as: GitlabSvg,
ariaLabel: 'gitlab',
category: 'legacy',
displayName: 'Gitlab'
});
export default Gitlab;
|
const mongoose = require('mongoose');
const cloudinary = require('cloudinary');
const multer = require('multer');
// set up multer
// variable to limit image file size
const limits = { fileSize: 1024 * 1024 };
const upload = multer({
storage: multer.diskStorage({}),
limits,
fileFilter: (req, file, cb) => {
if (file.mimetype !== 'image/jpeg' && file.mimetype !== 'image/png') {
req.invalidFile = true;
}
cb(null, true);
},
});
// set up cloudinary
cloudinary.config({
cloud_name: process.env.CLOUD_NAME,
api_key: process.env.API_KEY,
api_secret: process.env.API_SECRET,
});
// set up mongoose connection
mongoose.connect(
process.env.MONGODB_URI || 'mongodb://localhost:27017/bashhub'
);
module.exports = { db: mongoose.connection, upload, cloudinary };
|
#!/bin/zsh
sock.ativo () #
{
[[ -z $(loadedx) ]] && return 1 || return 0
#[[ -e /tmp/mpid ]] && return 1 || return 0
} 2>/dev/null
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
get.socks () #
{
print -l /tmp/mpvsock*
} 2>/dev/null
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
get.title () #
{
youtube-dl --get-title "$@"
}
search ()
{
main () #
{
base="$(youtube-dl --get-title --get-id --get-duration "ytsearch$qt_return:"$@"" |xargs -0)"
control=1
controllerT=1
controllerI=1
controllerD=1
typeset -A Titles IDs Durations
for element in ${${(f)base}[@]}
{
case $control in
1 )
control=$(($control+1))
Titles+=([$controllerT]=$element)
controllerT=$(($controllerT+1)) ;;
2 )
control=$(($control+1))
IDs+=([$controllerI]="https://www.youtube.com/watch?v=$element")
controllerI=$(($controllerI+1)) ;;
3 )
control=1
Durations+=([$controllerD]=$element)
controllerD=$(($controllerD+1)) ;;
esac
}
if [[ $#Titles[@] -gt 1 ]]; then
for (( i = 1; i <= $#Titles[@]; i++ )); do
[[ $(($i%2)) -eq 0 ]] && \
{ echo "$(tput sgr0; tput setaf 7; tput setab 8; tput bold;) $(printf '%-'$(($(($(($COLUMNS-8))/4))*2))'s' "${Titles[$i][1,$(($(($(($COLUMNS-8))/4))*2))]}") $(printf '%8s' "$Durations[$i]") $(tput sgr0; tput setaf 12; tput setab 8;) $(printf '%'$(($(($COLUMNS-8))/4))'s' "$IDs[$i]") $(tput sgr0;)" } || \
{ echo "$(tput sgr0; tput setaf 7; tput setab 0; tput bold;) $(printf '%-'$(($(($(($COLUMNS-8))/4))*2))'s' "${Titles[$i][1,$(($(($(($COLUMNS-8))/4))*2))]}") $(printf '%8s' "$Durations[$i]") $(tput sgr0; tput setaf 12; tput setab 0;) $(printf '%'$(($(($COLUMNS-8))/4))'s' "$IDs[$i]") $(tput sgr0;)" }
done
else
echo "$(tput sgr0; tput setaf 7; tput setab 0; tput bold;) $(printf '%-'$(($(($(($COLUMNS-8))/4))*2))'s' "${Titles[1][1,$(($(($(($COLUMNS-8))/4))*2))]}") $(printf '%8s' "$Durations") $(tput sgr0; tput setaf 12; tput setab 0;) $(printf '%'$(($(($COLUMNS-8))/4))'s' "${IDs}") $(tput sgr0;)"
fi
}
_select () #
{
select sel in ${(f)"$(main $parms)"}; do
[[ -n $sel ]] && { add "${${(s: :)sel}[-2]}"; mpd start & } || break
done
printf '%s\n' "continuar com a mesma pesquisa? [s/n]:"
read select_continue
while true; do
if [[ $select_continue == "s" || $select_continue == "S" ]]; then
clear
_select
elif [[ $select_continue == "n" || $select_continue == "N" ]]; then
break
return 0
else
printf '%s\n' "E necessario informar s|S para continuar ou n|N para sair da pesquisa atual"
printf '%s\n' "continuar com a mesma pesquisa? [s/n]:"
read select_continue
continue
fi
done
}
unset qt_return
custom_return="false"
typeset -a parms
print "Search -> "
read parms
print "Return -> "
read qt_return
_select
exit 0
}
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
indice.selected () #Argumentos
{
echo $@|cut -d" " -f1 > $tmpcod
}
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
format.url () #Argumentos
{
if [[ "$url" =~ "https://www.youtube.com/playlist?|'https://www.youtube.com.*start_radio'" ]]; then
declare -x format="best"
declare -x new_class="youtube"
elif [[ "$url" =~ "painelcode.me" ]]; then
declare -x format="best"
declare -x new_class="iptv"
else
case "$url" in;
*www.xvideos.com* )
base=$(\
youtube-dl --list-formats "$url"\
|grep -Ev "[XV]ideos|format|info|Downloading"\
|awk '{print $1,$2,$3}'\
|sed -E 's/ |$/\|/g');;
*spankbang.com* )
base=$(\
youtube-dl --list-formats "$url"\
|grep -Ev "Spank[b|B]ang|format|info"\
|awk '{print $1,$2,$3}'\
|sed -E 's/ |$/\|/g');;
*youtube.com* )
base=$(\
youtube-dl --list-formats "$url"\
|grep -Ev "Spank[b|B]ang|format|info|youtube|video only"\
|awk '{print $1,$2,$3}'\
|sed -E 's/ |$/\|/g');;
*youtu.be* )
base=$(\
youtube-dl --list-formats "$url"\
|grep -Ev "Spank[b|B]ang|format|info|youtube|video only"\
|awk '{print $1,$2,$3}'\
|sed -E 's/ |$/\|/g');;
esac
format=$(yad --list \
--columns=3 --column "Format Code" --column "Extension" --column "Resolution/Audio Only" \
--button="BEST":"mp play.best" \
--title "Opções :" --selectable-labels --search-column=2 --search-column=3 --regex-search \
--text-align=center --geometry 400x320 --borders=5 ${(s:|:)base} |cut -d'|' -f1|grep -Ev '^$')
[[ -z $format ]] && exit 0
fi
}
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
loadedx ()
{
base=(${(f)"$(playlist |sed 's/'}','{'/\n/g;s/\]\|\[\|{\|}//g;s/,\"request_id".*//g;s/,\"current\":true\|,\"playing\":true//g;s/\"data\":\|\"filename\":\|\"title\":\|\"id\"://g;s/\",/\"|/g;s/\"//g'| sed 's/|[[:digit:]].*//g')"})
if [[ -z $@ ]];then
unset control
for media in $base[@]
{
(( control = control + 1 ))
print "$control|$media"
}
else
print "$@|$base[$@]"
fi
}
source.file ()
{
add ${(f)"$(<$@)"}
mpd start &
}
|
const sendEmail = require('./email')
const isPasswordCorrect = require('./isPasswordCorrect')
module.exports = {
sendEmail,
isPasswordCorrect,
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.