text stringlengths 1 1.05M |
|---|
package org.vincibean.akka.websocket.playground
import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.model.ws.{BinaryMessage, Message, TextMessage}
import akka.stream.scaladsl.{Flow, Keep, RunnableGraph, Sink, Source}
import akka.stream.{ActorMaterializer, OverflowStrategy}
import akka.{Done, NotUsed}
import org.reactivestreams.Publisher
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Random
object WebSocketDataProvider {
implicit val as: ActorSystem = ActorSystem("example")
implicit val am: ActorMaterializer = ActorMaterializer()
def webSocketFlow()(implicit as: ActorSystem, am: ActorMaterializer, ec: ExecutionContext): Flow[Message, TextMessage.Strict, NotUsed] = {
val publisher = Source.repeat(() => Random.alphanumeric.take(6).mkString)
.map(f => f())
.throttle(1, 1.second)
.runWith(Sink.asPublisher(fanout = false))
Flow.fromSinkAndSource(sink, source(publisher))
}
private def sink(implicit am: ActorMaterializer): Sink[Message, Future[Done]] = Sink.foreach[Message] {
case tm: TextMessage =>
tm.textStream.runWith(Sink.ignore)
()
case bm: BinaryMessage =>
bm.dataStream.runWith(Sink.ignore)
()
}
private def source(publisher: Publisher[String]): Source[TextMessage.Strict, NotUsed] =
Source.fromPublisher(publisher).map(TextMessage(_))
} |
python attack_models/wb_dpcgan.py -name dpcgan_eps100_10_mnist -posdir ./data/CGAN/mnist/eps100/diff_acc/10/train/ -negdir ./data/CGAN/mnist/eps100/diff_acc/10/test -gdir ./data/CGAN/mnist/eps100/diff_acc/10 --data_num 64
python attack_models/tools/eval_roc.py --attack_type wb -ldir ./attack_models/results/wb/dpcgan_eps100_10_mnist
python attack_models/wb_dpcgan.py -name dpcgan_eps100_20_mnist -posdir ./data/CGAN/mnist/eps100/diff_acc/20/train/ -negdir ./data/CGAN/mnist/eps100/diff_acc/20/test -gdir ./data/CGAN/mnist/eps100/diff_acc/20 --data_num 64
python attack_models/tools/eval_roc.py --attack_type wb -ldir ./attack_models/results/wb/dpcgan_eps100_20_mnist
python attack_models/wb_dpcgan.py -name dpcgan_eps100_30_mnist -posdir ./data/CGAN/mnist/eps100/diff_acc/30/train/ -negdir ./data/CGAN/mnist/eps100/diff_acc/30/test -gdir ./data/CGAN/mnist/eps100/diff_acc/30 --data_num 64
python attack_models/tools/eval_roc.py --attack_type wb -ldir ./attack_models/results/wb/dpcgan_eps100_30_mnist
python attack_models/wb_dpcgan.py -name dpcgan_eps100_40_mnist -posdir ./data/CGAN/mnist/eps100/diff_acc/40/train/ -negdir ./data/CGAN/mnist/eps100/diff_acc/40/test -gdir ./data/CGAN/mnist/eps100/diff_acc/40 --data_num 64
python attack_models/tools/eval_roc.py --attack_type wb -ldir ./attack_models/results/wb/dpcgan_eps100_40_mnist
python attack_models/wb_dpcgan.py -name dpcgan_eps100_50_mnist -posdir ./data/CGAN/mnist/eps100/diff_acc/50/train/ -negdir ./data/CGAN/mnist/eps100/diff_acc/50/test -gdir ./data/CGAN/mnist/eps100/diff_acc/50 --data_num 64
python attack_models/tools/eval_roc.py --attack_type wb -ldir ./attack_models/results/wb/dpcgan_eps100_50_mnist
python attack_models/wb_dpcgan.py -name dpcgan_eps100_60_mnist -posdir ./data/CGAN/mnist/eps100/diff_acc/60/train/ -negdir ./data/CGAN/mnist/eps100/diff_acc/60/test -gdir ./data/CGAN/mnist/eps100/diff_acc/60 --data_num 64
python attack_models/tools/eval_roc.py --attack_type wb -ldir ./attack_models/results/wb/dpcgan_eps100_60_mnist
python attack_models/wb_dpcgan.py -name dpcgan_eps100_70_mnist -posdir ./data/CGAN/mnist/eps100/diff_acc/70/train/ -negdir ./data/CGAN/mnist/eps100/diff_acc/70/test -gdir ./data/CGAN/mnist/eps100/diff_acc/70 --data_num 64
python attack_models/tools/eval_roc.py --attack_type wb -ldir ./attack_models/results/wb/dpcgan_eps100_70_mnist
|
const curry = require('lodash/curry');
// FIXME: export public API or somehow improve this
const { run } = require('bottender/dist/bot/Bot');
const repeat = (times, Action) => {
const Fn = async (context, props) => {
for (let i = 0; i < times; i += 1) {
await run(Action)(context, props); // eslint-disable-line no-await-in-loop
}
};
const name = `Repeat(${times}, ${Action.name || 'Anonymous'})`;
Object.defineProperty(Fn, 'name', { value: name });
return Fn;
};
module.exports = curry(repeat);
|
#!/bin/bash
npm install
bower install
grunt
grunt watch |
#!/bin/bash
# This approver is to be used during the frozen period in the
# second half of every sprint, where only pull requests that
# fix bugs are allowed to merge.
if [[ $# -ne 1 ]]; then
echo "[ERROR] Usage: $0 SEVERITY"
exit 127
else
# make severity lowercase
severity="${1,,}"
case "${severity}" in
"none")
echo "[ERROR] Only bugs and blocker bugs are allowed to merge during dev-cut."
exit 1
;;
*"lowrisk"*)
exit 0
;;
*"blocker"*)
exit 0
;;
*"bug"*)
exit 0
;;
*)
echo "[ERROR] Unknown severity '${severity}': only one of 'none', 'bug', 'blocker', or 'lowrisk' allowed."
exit 127
esac
fi
|
<filename>src/data/ParameterEnums.js
const Parliament = {
Number: [36, 37, 38, 39, 40, 41, 42, 43],
Session: [1, 2, 3]
}
const Bill = {
Chambers: {
senate: 'Senate',
commons: 'House+of+Commons'
},
Affiliation: {
BlocQuebecois: 'Bloc+Québécois',
CanadianAlliance: 'Canadian+Alliance',
CanadianSenatorsGroup: 'Canadian+Senators+Group',
Conservative: 'Conservative',
ConservativeIndependent: 'Conservative+Independent',
ForcesEtDemocratie: 'Forces+et+Démocratie',
GreenParty: 'Green+Party',
Independent: 'Independent',
IndependentConservative: 'Independent+Conservative',
IndependentSenatorsGroup: 'Independent+Senators+Group',
Liberal: 'Liberal',
NDP: 'NDP',
NonAffiliated: 'Non-affiliated',
PC: 'PC',
PCDR: 'PC%2fDR'
},
Type: {
senate: {
government: 'Senate+Government+Bill',
public: 'Senate+Public+Bill',
private: 'Senate+Private+Bill'
},
house: {
government: 'House+Government+Bill'
},
member: {
private: 'Private+Member%e2%80%99s+Bill'
}
},
Status: {
general: {
assented: 'RoyalAssentGiven',
defeated: 'BillDefeated',
tabled: 'WillNotBeProceededWith'
},
house: {
readings: {
first: 'HouseAt1stReading',
second: 'HouseAt2ndReading',
third: 'HouseAt3rdReading'
},
reports: {
report: 'HouseAtReportStage',
beforeReading2: 'HouseAtReportStageAndSecondReading'
},
committee: {
beforeReading2: 'HouseAtReferralToCommitteeBeforeSecondReading',
current: 'HouseInCommittee'
},
amendment: {
consideration: 'HouseConsiderationOfAmendments'
}
},
senate: {
readings: {
first: 'SenateAt1stReading',
second: 'SenateAt2ndReading',
third: 'SenateAt3rdReading'
},
amendment: {
consideration: 'SenateConsiderationOfAmendments'
},
committee: {
consideration: 'SenateConsiderationOfCommitteeReport',
current: 'SenateInCommittee'
}
}
}
}
const Politician = {
Caucus: {
unknown: 0,
reform: 7,
people: 24357,
progressiveConservative: 5,
newDemocratic: 3,
liberal: 4,
independentConservative: 1796,
independentBlocQuebec: 3638,
independentCanadianAlliance: 8,
green: 14130,
forceEtDemocratie: 20915,
CooperativeCommonwealthFederation: 24046,
conservativeIndependent: 20159,
conservative: 8781,
canadianAlliance: 103,
blocQuebecois: 6
},
Province: {
Alberta: 'AB',
BritishColumbia: 'BC',
Manitoba: 'MB',
NewBrunswick: 'NB',
Newfoundland: 'NL',
NorthWesTerritories: 'NT',
NovaScotia: 'NS',
Nunavut: 'NU',
Ontario: 'ON',
PrinceEdwardIsland: 'PE',
Quebec: 'QC',
Saskatchewan: 'SK',
Yukon: 'YT'
}
}
const Expenditure = {
Year: {
// Q1:'MER2020Q1-1019'
2019: 'MER2020Q2-1023', // Q2 YTD
2018: 'MER2019Q4',
2017: 'MER2018Q4',
2016: 'MER2017Q4B',
2015: 'MER2016Q4',
2014: 'MER2015FY',
2013: 'MER2014FY',
2012: 'MER2013FY'
},
YearToParliament: {
'MER2020Q2-1023': 43,
MER2019Q4: 42,
MER2018Q4: 42,
MER2017Q4B: 42,
MER2016Q4: 42,
MER2015FY: 41,
MER2014FY: 41,
MER2013FY: 40
}
}
const Vote = {
Outcome: {
passed: 15,
failed: 16,
tied: 17
},
Parliament: {
43: {
1: 153
},
42: {
1: 152
},
41: {
2: 151,
1: 150
},
40: {
3: 147,
2: 145,
1: 143
},
39: {
1: 142
},
38: {
1: 140
}
},
ParliamentExists: {
43: {
1: true
},
42: {
1: true
},
41: {
2: true,
1: true
},
40: {
3: true,
2: true,
1: true
},
39: {
1: true
},
38: {
1: true
}
},
ParliamentfromParlSession: {
153: 43,
152: 42,
151: 41,
150: 41,
147: 40,
145: 40,
143: 40,
142: 39,
140: 38
},
Type: {
government: {
house: 3,
senate: {
public: 80760,
private: 80761,
other: 80759
}
},
private: 4
},
Topic: {
budget: {
policy: 'E',
appropriations: 'W'
},
committee: {
report: 'K'
},
motion: {
government: 'G',
routine: 'R',
opposition: 'O',
private: 'M'
},
statutory: 'L',
other: 'X'
}
}
const Updates = {
Bill: 'bills',
Politician: 'politicians',
Vote: 'vote_records',
Voter: 'voters',
Role: 'roles',
TfIdf: 'raw',
Category: 'classifications',
All: 'root',
None: 'leaf',
Finance: 'finances'
}
module.exports = {
BillParameters: Object.freeze(Bill),
PoliticianParameters: Object.freeze(Politician),
VoteParameters: Object.freeze(Vote),
ExpenditureParameters: Object.freeze(Expenditure),
Parliament: Object.freeze(Parliament),
UpdateNode: Object.freeze(Updates)
}
|
// Licensed to Preferred Networks, Inc. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Preferred Networks, Inc. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package iface
import (
"context"
"github.com/pfnet-research/pftaskqueue/pkg/apis/worker"
"github.com/google/uuid"
"github.com/pfnet-research/pftaskqueue/pkg/apis/task"
"github.com/pfnet-research/pftaskqueue/pkg/apis/taskqueue"
"github.com/pkg/errors"
)
var (
TaskQueueNotFound = errors.New("Queue not found")
TaskQueueExisted = errors.New("Queue already exists")
TaskQueueEmptyError = errors.New("Queue is empty")
TaskSuspendedError = errors.New("Queue is suspended")
WorkerNotFound = errors.New("Worker not found")
WorkerExitedError = errors.New("Worker already exists")
WorkerSalvationNotAllowed = errors.New("Worker salvation not allowed")
)
type Backend interface {
CreateQueue(ctx context.Context, queueSpec taskqueue.TaskQueueSpec) (*taskqueue.TaskQueue, error)
GetAllQueues(ctx context.Context) ([]*taskqueue.TaskQueue, error)
GetQueueByName(ctx context.Context, queueName string) (*taskqueue.TaskQueue, error)
UpdateQueue(ctx context.Context, queueSpec taskqueue.TaskQueueSpec) (*taskqueue.TaskQueue, error)
DeleteQueue(ctx context.Context, queueName string) error
RegisterWorker(ctx context.Context, queueUID uuid.UUID, workerSpec worker.WorkerSpec) (*worker.Worker, error)
GetAllWorkers(ctx context.Context, queueUID uuid.UUID) ([]*worker.Worker, error)
GetWorker(ctx context.Context, queueUID, workerUID uuid.UUID) (*worker.Worker, error)
GetRunningWorkers(ctx context.Context, queueUID uuid.UUID) ([]*worker.Worker, error)
GetSucceededWorkers(ctx context.Context, queueUID uuid.UUID) ([]*worker.Worker, error)
GetFailedWorkers(ctx context.Context, queueUID uuid.UUID) ([]*worker.Worker, error)
GetLostWorker(ctx context.Context, queueUID uuid.UUID) ([]*worker.Worker, error)
GetWorkersToSalvage(ctx context.Context, queueUID uuid.UUID) ([]*worker.Worker, error)
SendWorkerHeartBeat(ctx context.Context, queueUID, workerUID uuid.UUID) (*worker.Worker, error)
SetWorkerSucceeded(ctx context.Context, queueUID, workerUID uuid.UUID) (*worker.Worker, error)
SetWorkerFailed(ctx context.Context, queueUID, workerUID uuid.UUID) (*worker.Worker, error)
SalvageWorker(ctx context.Context, queueUID, salvagingWorkerUID, salvageTargetWorkerUID uuid.UUID) (*worker.Worker, []*task.Task, error)
AddTask(ctx context.Context, queueName string, spec task.TaskSpec) (*task.Task, error)
NextTask(ctx context.Context, queueUID, workerUID uuid.UUID) (*task.Task, error)
GetAllTasks(ctx context.Context, queueName string) ([]*task.Task, error)
GetProcessingTasks(ctx context.Context, queueName string) ([]*task.Task, error)
GetPendingTasks(ctx context.Context, queueName string) ([]*task.Task, error)
GetReceivedTasks(ctx context.Context, queueName string) ([]*task.Task, error)
GetCompletedTasks(ctx context.Context, queueName string) ([]*task.Task, error)
GetSucceededTasks(ctx context.Context, queueName string) ([]*task.Task, error)
GetFailedTasks(ctx context.Context, queueName string) ([]*task.Task, error)
SetProcessing(ctx context.Context, queueUID, workerUID uuid.UUID, task *task.Task) error
SetSucceeded(ctx context.Context, queueUID, workerUID uuid.UUID, task *task.Task, resultPayload *string, message *string, onSuccessSpecs []task.TaskSpec) error
RecordFailure(ctx context.Context, queueUID, workerUID uuid.UUID, task *task.Task, resultPayload *string, message *string, reason task.TaskResultReason, onFailureSpecs []task.TaskSpec) error
GetDeadLetter(ctx context.Context, queueName string) ([]taskqueue.TaskToDeadletterError, error)
}
|
#!/bin/sh
pg_dump --host=$POSTGRES_URL --dbname=$POSTGRES_DB --port=$POSTGRES_PORT --username=$POSTGRES_USER | gzip -c > "dump-db.gz"
|
package de.unibi.agbi.biodwh2.core.model;
import com.fasterxml.jackson.annotation.JsonIgnore;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.List;
public final class DataSourceMetadata {
public Version version;
public String updateDateTime;
public List<String> sourceFileNames;
public Boolean parseSuccessfull;
public Boolean exportRDFSuccessfull;
public Boolean exportGraphMLSuccessfull;
public Boolean updateSuccessful;
public DataSourceMetadata() {
sourceFileNames = new ArrayList<>();
}
@JsonIgnore
public LocalDateTime getLocalUpdateDateTime() {
return LocalDateTime.parse(updateDateTime, DateTimeFormatter.ISO_LOCAL_DATE_TIME);
}
public void setUpdateDateTimeNow() {
updateDateTime = LocalDateTime.now().format(DateTimeFormatter.ISO_LOCAL_DATE_TIME);
}
}
|
WGET_ARGS=( http://download.kde.org/stable/plasma/5.8.3/ -A '*.tar.xz' )
|
package info.u250.c2d.box2d.model.fixture;
import info.u250.c2d.box2d.model.b2FixtureDefModel;
public class b2SegmentFixtureDefModel extends b2FixtureDefModel{
private static final long serialVersionUID = 1L;
}
|
<reponame>SR-SAYED/Online-Shopping-App
package com.example.shoppingapp;
import androidx.annotation.NonNull;
import androidx.appcompat.app.AlertDialog;
import androidx.appcompat.app.AppCompatActivity;
import androidx.recyclerview.widget.LinearLayoutManager;
import androidx.recyclerview.widget.RecyclerView;
import android.content.DialogInterface;
import android.content.Intent;
import android.os.Bundle;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.Button;
import android.widget.TextView;
import android.widget.Toast;
import com.example.shoppingapp.Model.Cart;
import com.example.shoppingapp.Model.Favorites;
import com.example.shoppingapp.Prevalent.Prevalent;
import com.example.shoppingapp.ViewHolder.CartViewHolder;
import com.example.shoppingapp.ViewHolder.WishViewHolder;
import com.firebase.ui.database.FirebaseRecyclerAdapter;
import com.firebase.ui.database.FirebaseRecyclerOptions;
import com.google.android.gms.tasks.OnCompleteListener;
import com.google.android.gms.tasks.Task;
import com.google.firebase.database.DatabaseReference;
import com.google.firebase.database.FirebaseDatabase;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.HashMap;
public class WishListActivity extends AppCompatActivity {
private String wish_name, wish_price;
private RecyclerView recyclerView;
private RecyclerView.LayoutManager layoutManager;
//edited recent
private String productId="";
//end
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_wish_list);
recyclerView = findViewById((R.id.wish_list));
recyclerView.setHasFixedSize(true);
layoutManager = new LinearLayoutManager(this);
recyclerView.setLayoutManager(layoutManager);
}
@Override
protected void onStart() {
super.onStart();
final DatabaseReference wishListRef = FirebaseDatabase.getInstance().getReference().child("Wish List");
FirebaseRecyclerOptions<Favorites> options =
new FirebaseRecyclerOptions.Builder<Favorites>()
.setQuery(wishListRef.child("User View")
.child(Prevalent.currentUser.getPhone()).child("Products"), Favorites.class).build();
FirebaseRecyclerAdapter<Favorites, WishViewHolder> adapter
= new FirebaseRecyclerAdapter<Favorites, WishViewHolder>(options) {
@Override
protected void onBindViewHolder(@NonNull WishViewHolder holder, int position, @NonNull final Favorites model)
{
holder.wish_pr_price.setText("Price = " + model.getPrice() + "tk");
holder.wish_pr_name.setText(model.getPname());
holder.itemView.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view)
{
CharSequence options[] = new CharSequence[]
{
"Yes",
"No"
};
AlertDialog.Builder builder = new AlertDialog.Builder(WishListActivity.this);
builder.setTitle("Want to Remove the Product from Wish List? ");
builder.setItems(options, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialogInterface, int i)
{
if (i == 0)
{
wishListRef.child("User View")
.child(Prevalent.currentUser.getPhone())
.child("Products")
.child(model.getPid())
.removeValue()
.addOnCompleteListener(new OnCompleteListener<Void>() {
@Override
public void onComplete(@NonNull Task<Void> task)
{
if (task.isSuccessful())
{
Toast.makeText(WishListActivity.this, "Item removed from wish list.", Toast.LENGTH_SHORT).show();
Intent intent = new Intent(WishListActivity.this, WishListActivity.class);
startActivity(intent);
}
}
});
}
else
{
finish();
}
}
});
builder.show();
}
});
}
@NonNull
@Override
public WishViewHolder onCreateViewHolder(@NonNull ViewGroup parent, int viewType) {
//View view1 = LayoutInflater.from(parent.getContext()).inflate(R.layout.product_items_layout, parent, false);
View view = LayoutInflater.from(parent.getContext()).inflate(R.layout.wishlist_items_layout, parent, false);
WishViewHolder holder = new WishViewHolder(view);
return holder;
}
};
recyclerView.setAdapter(adapter);
adapter.startListening();
}
}
|
/*
* Document : lists.js
* Author : andrestntx
* Description: Custom javascript code used in Forms Wizard page
*/
var VoterLists = function() {
var newVoter = function (voter) {
return "<div class='col-sm-6 col-md-4 col-lg-3'>" +
"<a href='#modal-voter' data-voter='" + JSON.stringify(voter) + "' data-toggle='modal' data-id='" + voter.id + "' id='voter-" + voter.id + "' class='widget'>" +
"<div class='widget-content text-right clearfix' style='height: 108px;'>" +
"<img src='/images/placeholders/avatars/avatar9.jpg' alt='avatar' class='img-circle img-thumbnail img-thumbnail-avatar pull-left'>" +
"<h3 class='widget-heading h4'><strong>" + voter.name + "</strong></h3>" +
"<span class='text-muted'>" + voter.doc + "</span>" +
"</div>" +
"</a>" +
"</div>"
;
};
var votersHtml = function(voters) {
var html = '';
$.each(voters, function(i, voter) {
html += newVoter(voter);
});
return html;
}
var getVoters = function(letter) {
$.ajax({
url: '/database/voters?order=' + letter,
method:'GET',
success:function(data) {
data = jQuery.parseJSON(data);
$('#show-voters').html(votersHtml(data.data));
$('#show-voters div').addClass('animation-fadeInQuick');
},
error:function(){
alert('falló la conexión')
}
});
};
var deleteVoter = function (voterId) {
$("#voter-" + voterId).parent().fadeOut(400, function() {
$(this).remove();
});
};
var postDeleteVoter = function (voterId) {
$.ajax({
url: '/database/voters/delete/' + voterId,
data: {'_token': token},
dataType:'json',
method:'DELETE',
success:function(data) {
if(data['success']) {
deleteVoter(productId);
//AppServices.notification('info', data['message']);
}
else{
console.log('no se pudo');
//AppServices.notification('danger', data['message']);
}
},
error:function(){
alert('fallo la conexion');
}
});
};
return {
init: function() {
$('#modal-voter').on('show.bs.modal', function (event) {
var button = $(event.relatedTarget); // Button that triggered the modal
var voter = button.data('voter'); // Extract info from data-* attributes
var modal = $(this);
modal.find('.modal-title').text(voter.doc + ', ' + voter.name);
modal.find('#modal-place-address').text(voter.address + ', ' + voter.location.name);
modal.find('#modal-telephone').text(voter.telephone);
modal.find('#modal-a-telephone').attr("href", "tel:" + voter.telephone);
modal.find('#modal-email').text(voter.email);
modal.find('#modal-birhtday').text(voter.date_of_birth);
modal.find('#modal-description').text(voter.description);
modal.find('#modal-refers').text(voter.voters.length);
modal.find('#modal-diaries').text('Organizadas: ' + voter.organized_diaries.length + ', Delegado: ' + voter.delegated_diaries.length + ', Asistencias: ' + voter.diaries.length);
var sum_diaries = voter.delegated_diaries.length + voter.organized_diaries.length + voter.diaries.length;
if( sum_diaries > 0 )
{
$("#modal-diaries").attr("href", "/database/voters/diaries/"+voter.doc);
}
if(voter.occupations)
{
modal.find('#modal-occupation').text(voter.occupations.name);
}
if(voter.superior_voter)
{
modal.find('#modal-superior').text(voter.superior_voter.name + ', cel: ' + voter.superior_voter.telephone);
}
if(voter.polling_station)
{
modal.find('#modal-polling-station').text(voter.polling_station.description + ' - Mesa: ' + voter.table_number);
}
if(voter.polling_station_day)
{
modal.find('#modal-polling-station-day-d').text(voter.polling_station_day.description);
}
var communities = [];
$.each( voter.communities, function( key, community ) {
communities.push(community.name);
});
modal.find('#modal-communities').text(communities.join(', '));
var roles = [];
$.each( voter.roles, function( key, community ) {
roles.push(community.name);
});
if(voter.colaborator == 1 && voter.delegate == 1)
{
modal.find('#modal-roles').text('Delegado, ' + roles.join(', '));
}
else if(voter.colaborator == 1)
{
modal.find('#modal-roles').text(roles.join(', '));
}
else
{
modal.find('#modal-roles').text('Votante');
}
var diary_id = $('#diary').data('diary');
if(diary_id)
{
$("#modal-edit").attr("href", "/database/voters/"+voter.doc+"/"+diary_id);
$("#modal-in").attr("href", "/database/voters/"+voter.id+"/add-to-team/"+diary_id);
$("#modal-out").attr("href", "/database/team/"+voter.doc+"/remove/"+diary_id);
}
else
{
$("#modal-edit").attr("href", "/database/voters/"+voter.doc);
$("#modal-in").attr("href", "/database/voters/"+voter.id+"/add-to-team");
$("#modal-out").attr("href", "/database/team/"+voter.doc+"/remove");
}
if(voter.colaborator == 1)
{
$("#modal-in").addClass('disabled');
$("#modal-out").removeClass('disabled');
}
else
{
$("#modal-out").addClass('disabled');
$("#modal-in").removeClass('disabled');
}
var buttonDelete = $('#modal-delete');
if(buttonDelete.length)
{
buttonDelete.bind("click", function() {
postDeleteVoter(voter.id);
//deleteModel('voter-' + voter.id);
});
}
var buttonDiaryRemove = $('#modal-diary-remove');
if(buttonDiaryRemove.length)
{
buttonDiaryRemove.bind("click", function() {
deleteModel('voter-' + voter.id, 'form-diary-remove');
});
}
});
$('#filter-voters a').on('click', function(){
getVoters($(this).text());
});
}
};
}();
|
import { namespaceActions } from './utils';
import {
pickBy,
} from 'lodash';
import {
DEFAULT_BIN_TYPE,
DEFAULT_INTERVAL,
DEFAULT_RANGES,
} from '@ncigdc/modern_components/ClinicalAnalysis/ClinicalVariableCard/helpers';
const sets: any = namespaceActions('sets', [
'ADD_ANALYSIS',
'REMOVE_ANALYSIS',
'REMOVE_ALL_ANALYSIS',
'ADD_CLINICAL_ANALYSIS_VARIABLE',
'REMOVE_CLINICAL_ANALYSIS_VARIABLE',
'UPDATE_CLINICAL_ANALYSIS_VARIABLE',
'UPDATE_CLINICAL_ANALYSIS_PROPERTY',
'UPDATE_CLINICAL_ANALYSIS_SET',
]);
interface IAnalysis {
created: string;
displayVariables?: any;
id: string;
message?: string;
sets: any;
type: string;
}
interface IAnalysisState {
saved: IAnalysis[];
}
type TClinicalAnalysisProperty = 'name'; // only type mutable properties
export interface IAnalysisPayload extends IAnalysis {
analysis?: IAnalysis;
continuousBinType?: 'default' | 'interval' | 'range';
customInterval?: any;
customRanges?: any;
fieldName?: string;
fieldType?: string;
id: string;
plotTypes?: 'categorical' | 'continuous';
property?: TClinicalAnalysisProperty;
scrollToCard?: boolean;
setId?: string;
setName?: string;
value?: string;
variable?: any;
variableKey?: string;
}
interface IAnalysisMultiPayload extends IAnalysis {
id: string;
fieldName: string;
newState: object;
}
interface IAnalysisAction {
type: string;
payload: IAnalysisPayload;
}
const addAnalysis = (payload: IAnalysis) => ({
type: sets.ADD_ANALYSIS,
payload,
});
const removeAnalysis = (payload: IAnalysis) => ({
type: sets.REMOVE_ANALYSIS,
payload,
});
const removeAllAnalysis = () => ({
type: sets.REMOVE_ALL_ANALYSIS,
});
const addClinicalAnalysisVariable = (payload: IAnalysisPayload) => ({
type: sets.ADD_CLINICAL_ANALYSIS_VARIABLE,
payload,
});
const removeClinicalAnalysisVariable = (payload: IAnalysisPayload) => ({
type: sets.REMOVE_CLINICAL_ANALYSIS_VARIABLE,
payload,
});
const updateClinicalAnalysisVariable = (payload: IAnalysisMultiPayload) => ({
type: sets.UPDATE_CLINICAL_ANALYSIS_VARIABLE,
payload,
});
const updateClinicalAnalysisProperty = (payload: IAnalysisPayload) => ({
type: sets.UPDATE_CLINICAL_ANALYSIS_PROPERTY,
payload,
});
const updateClinicalAnalysisSet = (payload: IAnalysisPayload) => ({
type: sets.UPDATE_CLINICAL_ANALYSIS_SET,
payload,
});
const initialState: IAnalysisState = {
saved: [],
};
const defaultVariableConfig = {
active_calculation: 'number',
active_chart: 'histogram',
active_survival: 'overall',
bins: {},
customSurvivalPlots: [],
isSurvivalCustom: false,
showOverallSurvival: false,
};
const defaultCategoricalVariableConfig = {
customBinsId: '',
customBinsSetId: '',
};
const defaultContinuousVariableConfig = {
continuousBinType: DEFAULT_BIN_TYPE,
customInterval: DEFAULT_INTERVAL,
customRanges: DEFAULT_RANGES,
};
interface ICurrentAnalysis {
currentAnalysisIndex: number;
currentAnalysis: IAnalysis;
}
type TGetCurrentAnalysis = (
currentState: IAnalysisState,
analysisId: string
) => ICurrentAnalysis;
const getCurrentAnalysis: TGetCurrentAnalysis = (currentState, analysisId) => {
const currentAnalysisIndex = (currentState.saved as [IAnalysis]).findIndex(
a => a.id === analysisId
) as number;
const currentAnalysis: IAnalysis = currentState.saved.slice(0)[
currentAnalysisIndex
];
return { currentAnalysisIndex, currentAnalysis };
};
const reducer = (
state: IAnalysisState = initialState,
action: IAnalysisAction
) => {
switch (action.type) {
case sets.ADD_ANALYSIS: {
return Object.assign(
{},
state,
{ saved: state.saved.concat(action.payload) },
);
}
case sets.REMOVE_ANALYSIS: {
return Object.assign(
{},
state,
{ saved: state.saved.filter(
analysis => analysis.id !== action.payload.id
) },
);
}
case sets.REMOVE_ALL_ANALYSIS: {
return Object.assign(
{},
state,
{ saved: [] },
);
}
// adds new card to analysis
case sets.ADD_CLINICAL_ANALYSIS_VARIABLE: {
const { currentAnalysisIndex, currentAnalysis } = getCurrentAnalysis(
state,
action.payload.id
);
return currentAnalysisIndex < 0
? state
: Object.assign(
{},
state,
{
saved: state.saved.slice(0, currentAnalysisIndex)
.concat(Object.assign(
{},
currentAnalysis,
{
displayVariables: Object.assign(
{},
currentAnalysis.displayVariables,
{
[action.payload.fieldName as string]: Object.assign(
{},
defaultVariableConfig,
action.payload.plotTypes === 'continuous'
? defaultContinuousVariableConfig
: defaultCategoricalVariableConfig,
{
type: action.payload.fieldType,
plotTypes: action.payload.plotTypes,
scrollToCard: action.payload.scrollToCard,
}
),
},
),
},
))
.concat(state.saved.slice(currentAnalysisIndex + 1, Infinity)),
}
);
};
// removes card from analysis
case sets.REMOVE_CLINICAL_ANALYSIS_VARIABLE: {
const { currentAnalysisIndex, currentAnalysis } = getCurrentAnalysis(
state,
action.payload.id
);
return currentAnalysisIndex < 0
? state
: Object.assign(
{},
state,
{
saved: state.saved.slice(0, currentAnalysisIndex)
.concat(Object.assign(
{},
currentAnalysis,
{
displayVariables: pickBy(
currentAnalysis.displayVariables,
(value, key) => key !== action.payload.fieldName
),
},
))
.concat(state.saved.slice(currentAnalysisIndex + 1, Infinity)),
},
);
}
// updates multiple values in displayVariables
case sets.UPDATE_CLINICAL_ANALYSIS_VARIABLE: {
const { currentAnalysisIndex, currentAnalysis } = getCurrentAnalysis(
state,
action.payload.id,
);
return currentAnalysisIndex < 0
? state
: Object.assign(
{},
state,
{
saved: state.saved.slice(0, currentAnalysisIndex)
.concat(Object.assign(
{},
currentAnalysis,
{
displayVariables: Object.assign(
{},
currentAnalysis.displayVariables,
{
[action.payload.fieldName as string]: Object.assign(
{},
currentAnalysis.displayVariables[action.payload.fieldName as string],
action.payload.variable,
),
},
),
},
))
.concat(state.saved.slice(currentAnalysisIndex + 1, Infinity)),
},
);
}
// updates non-variable key in analysis
case sets.UPDATE_CLINICAL_ANALYSIS_PROPERTY: {
const { currentAnalysisIndex, currentAnalysis } = getCurrentAnalysis(
state,
action.payload.id
);
return currentAnalysisIndex < 0
? state
: Object.assign(
{},
state,
{
saved: state.saved.slice(0, currentAnalysisIndex)
.concat(Object.assign(
{},
currentAnalysis,
{
[action.payload.property as TClinicalAnalysisProperty]: action.payload.value,
},
))
.concat(state.saved.slice(currentAnalysisIndex + 1, Infinity)),
}
);
}
case sets.UPDATE_CLINICAL_ANALYSIS_SET: {
const { currentAnalysisIndex, currentAnalysis } = getCurrentAnalysis(
state,
action.payload.id
);
return currentAnalysisIndex < 0
? state
: Object.assign(
{},
state,
{
saved: state.saved.slice(0, currentAnalysisIndex)
.concat(Object.assign(
{},
currentAnalysis,
{
sets: {
case: {
[action.payload.setId as TClinicalAnalysisProperty]: action.payload.setName,
},
},
},
))
.concat(state.saved.slice(currentAnalysisIndex + 1, Infinity)),
},
);
}
default:
return state;
}
};
/*----------------------------------------------------------------------------*/
export {
addAnalysis,
addClinicalAnalysisVariable,
removeAllAnalysis,
removeAnalysis,
removeClinicalAnalysisVariable,
updateClinicalAnalysisProperty,
updateClinicalAnalysisSet,
updateClinicalAnalysisVariable,
};
export default reducer;
|
//===========================================
// Lumina-DE source code
// Copyright (c) 2013-2016, <NAME>
// Available under the 3-clause BSD license
// See the LICENSE file for full details
//===========================================
#include "LUtils.h"
#include "LuminaOS.h"
#include "LuminaXDG.h"
#include <QApplication>
#include <QtConcurrent>
#include <unistd.h>
/*inline QStringList ProcessRun(QString cmd, QStringList args){
//Assemble outputs
QStringList out; out << "1" << ""; //error code, string output
QProcess proc;
QProcessEnvironment env = QProcessEnvironment::systemEnvironment();
env.insert("LANG", "C");
env.insert("LC_MESSAGES", "C");
proc.setProcessEnvironment(env);
proc.setProcessChannelMode(QProcess::MergedChannels);
if(args.isEmpty()){
proc.start(cmd, QIODevice::ReadOnly);
}else{
proc.start(cmd,args ,QIODevice::ReadOnly);
}
QString info;
while(!proc.waitForFinished(1000)){
if(proc.state() == QProcess::NotRunning){ break; } //somehow missed the finished signal
QString tmp = proc.readAllStandardOutput();
if(tmp.isEmpty()){ proc.terminate(); }
else{ info.append(tmp); }
}
out[0] = QString::number(proc.exitCode());
out[1] = info+QString(proc.readAllStandardOutput());
return out;
}*/
//=============
// LUtils Functions
//=============
QString LUtils::runCommand(bool &success, QString command, QStringList arguments, QString workdir, QStringList env){
QProcess proc;
proc.setProcessChannelMode(QProcess::MergedChannels); //need output
//First setup the process environment as necessary
QProcessEnvironment PE = QProcessEnvironment::systemEnvironment();
if(!env.isEmpty()){
for(int i=0; i<env.length(); i++){
if(!env[i].contains("=")){ continue; }
PE.insert(env[i].section("=",0,0), env[i].section("=",1,100));
}
}
proc.setProcessEnvironment(PE);
//if a working directory is specified, check it and use it
if(!workdir.isEmpty()){
proc.setWorkingDirectory(workdir);
}
//Now run the command (with any optional arguments)
if(arguments.isEmpty()){ proc.start(command); }
else{ proc.start(command, arguments); }
//Wait for the process to finish (but don't block the event loop)
QString info;
while(!proc.waitForFinished(1000)){
if(proc.state() == QProcess::NotRunning){ break; } //somehow missed the finished signal
QString tmp = proc.readAllStandardOutput();
if(tmp.isEmpty()){ proc.terminate(); }
else{ info.append(tmp); }
}
info.append(proc.readAllStandardOutput()); //make sure we don't miss anything in the output
success = (proc.exitCode()==0); //return success/failure
return info;
}
int LUtils::runCmd(QString cmd, QStringList args){
bool success;
LUtils::runCommand(success, cmd, args);
return success;
/*QFuture<QStringList> future = QtConcurrent::run(ProcessRun, cmd, args);
return future.result()[0].toInt(); //turn it back into an integer return code*/
}
QStringList LUtils::getCmdOutput(QString cmd, QStringList args){
bool success;
QString log = LUtils::runCommand(success, cmd, args);
return log.split("\n");
/*QFuture<QStringList> future = QtConcurrent::run(ProcessRun, cmd, args);
return future.result()[1].split("\n"); //Split the return message into lines*/
}
QStringList LUtils::readFile(QString filepath){
QStringList out;
QFile file(filepath);
if(file.open(QIODevice::Text | QIODevice::ReadOnly)){
QTextStream in(&file);
while(!in.atEnd()){
out << in.readLine();
}
file.close();
}
return out;
}
bool LUtils::writeFile(QString filepath, QStringList contents, bool overwrite){
QFile file(filepath);
if(file.exists() && !overwrite){ return false; }
bool ok = false;
if(contents.isEmpty()){ contents << "\n"; }
if( file.open(QIODevice::WriteOnly | QIODevice::Truncate) ){
QTextStream out(&file);
out << contents.join("\n");
if(!contents.last().isEmpty()){ out << "\n"; } //always end with a new line
file.close();
ok = true;
}
return ok;
}
bool LUtils::isValidBinary(QString& bin){
//Trim off any quotes
if(bin.startsWith("\"") && bin.endsWith("\"")){ bin.chop(1); bin = bin.remove(0,1); }
if(bin.startsWith("\'") && bin.endsWith("\'")){ bin.chop(1); bin = bin.remove(0,1); }
//Now look for relative/absolute path
if(!bin.startsWith("/")){
//Relative path: search for it on the current "PATH" settings
QStringList paths = QString(qgetenv("PATH")).split(":");
for(int i=0; i<paths.length(); i++){
if(QFile::exists(paths[i]+"/"+bin)){ bin = paths[i]+"/"+bin; break;}
}
}
//bin should be the full path by now
if(!bin.startsWith("/")){ return false; }
QFileInfo info(bin);
bool good = (info.exists() && info.isExecutable());
if(good){ bin = info.absoluteFilePath(); }
return good;
}
QSettings* LUtils::openSettings(QString org, QString name, QObject *parent){
//Start with the base configuration directory
QString path = QString(getenv("XDG_CONFIG_HOME")).simplified();
if(path.isEmpty()){ path = QDir::homePath()+"/.config"; }
//Now add the organization directory
path = path+"/"+org;
QDir dir(path);
if(!dir.exists()){ dir.mkpath(path); }
//Now generate/check the name of the file
unsigned int user = getuid();
QString filepath = dir.absoluteFilePath(name+".conf");
if(user==0){
//special case - make sure we don't clobber the user-permissioned file
QString rootfilepath = dir.absoluteFilePath(name+"_root.conf");
if(!QFileInfo::exists(rootfilepath) && QFileInfo::exists(filepath)){
QFile::copy(filepath, rootfilepath); //make a copy of the user settings before they start to diverge
}
return (new QSettings(rootfilepath, QSettings::IniFormat, parent));
}else{
return (new QSettings(filepath, QSettings::IniFormat, parent));
}
}
QStringList LUtils::systemApplicationDirs(){
//Returns a list of all the directories where *.desktop files can be found
QStringList appDirs = QString(getenv("XDG_DATA_HOME")).split(":");
appDirs << QString(getenv("XDG_DATA_DIRS")).split(":");
if(appDirs.isEmpty()){ appDirs << "/usr/local/share" << "/usr/share" << LOS::AppPrefix()+"/share" << LOS::SysPrefix()+"/share" << L_SHAREDIR; }
appDirs.removeDuplicates();
//Now create a valid list
QStringList out;
for(int i=0; i<appDirs.length(); i++){
if( QFile::exists(appDirs[i]+"/applications") ){
out << appDirs[i]+"/applications";
//Also check any subdirs within this directory
// (looking at you KDE - stick to the standards!!)
out << LUtils::listSubDirectories(appDirs[i]+"/applications");
}
}
//qDebug() << "System Application Dirs:" << out;
return out;
}
QString LUtils::GenerateOpenTerminalExec(QString term, QString dirpath){
//Check the input terminal application (default/fallback - determined by calling application)
//if(!LUtils::isValidBinary(term)){
if(term.endsWith(".desktop")){
//Pull the binary name out of the shortcut
XDGDesktop DF(term);
if(DF.type == XDGDesktop::BAD){ term = "xterm"; }
else{ term= DF.exec.section(" ",0,0); } //only take the binary name - not any other flags
}else{
term = "xterm"; //fallback
}
//}
//Now create the calling command for the designated terminal
// NOTE: While the "-e" routine is supposed to be universal, many terminals do not properly use it
// so add some special/known terminals here as necessary
QString exec;
qWarning() << " - Reached terminal initialization" << term;
if(term=="mate-terminal" || term=="lxterminal" || term=="gnome-terminal"){
exec = term+" --working-directory=\""+dirpath+"\"";
}else if(term=="xfce4-terminal"){
exec = term+" --default-working-directory=\""+dirpath+"\"";
}else if(term=="konsole" || term == "qterminal"){
exec = term+" --workdir \""+dirpath+"\"";
}else{
//-e is the parameter for most of the terminal appliction to execute an external command.
//In this case we start a shell in the selected directory
//Need the user's shell first
QString shell = QString(getenv("SHELL"));
if(!LUtils::isValidBinary(shell)){ shell = "/bin/sh"; } //universal fallback for a shell
exec = term + " -e \"cd " + dirpath + " && " + shell + " \" ";
}
qDebug() << exec;
return exec;
}
QStringList LUtils::listSubDirectories(QString dir, bool recursive){
//This is a recursive method for returning the full paths of all subdirectories (if recursive flag is enabled)
QDir maindir(dir);
QStringList out;
QStringList subs = maindir.entryList(QDir::NoDotAndDotDot | QDir::Dirs, QDir::Name);
for(int i=0; i<subs.length(); i++){
out << maindir.absoluteFilePath(subs[i]);
if(recursive){
out << LUtils::listSubDirectories(maindir.absoluteFilePath(subs[i]), recursive);
}
}
return out;
}
QString LUtils::PathToAbsolute(QString path){
//Convert an input path to an absolute path (this does not check existance ot anything)
if(path.startsWith("/")){ return path; } //already an absolute path
if(path.startsWith("~")){ path.replace(0,1,QDir::homePath()); }
if(!path.startsWith("/")){
//Must be a relative path
if(path.startsWith("./")){ path = path.remove(2); }
path.prepend( QDir::currentPath()+"/");
}
return path;
}
QString LUtils::AppToAbsolute(QString path){
if(path.startsWith("~/")){ path = path.replace("~/", QDir::homePath()+"/" ); }
if(path.startsWith("/") || QFile::exists(path)){ return path; }
if(path.endsWith(".desktop")){
//Look in the XDG dirs
QStringList dirs = systemApplicationDirs();
for(int i=0; i<dirs.length(); i++){
if(QFile::exists(dirs[i]+"/"+path)){ return (dirs[i]+"/"+path); }
}
}else{
//Look on $PATH for the binary
QStringList paths = QString(getenv("PATH")).split(":");
for(int i=0; i<paths.length(); i++){
if(QFile::exists(paths[i]+"/"+path)){ return (paths[i]+"/"+path); }
}
}
return path;
}
QStringList LUtils::videoExtensions() {
static QStringList vidExtensions;
vidExtensions << "avi" << "mkv" << "mp4" << "mov" << "webm" << "wmv";
return vidExtensions;
}
QStringList LUtils::imageExtensions(bool wildcards){
//Note that all the image extensions are lowercase!!
static QStringList imgExtensions;
static QStringList imgExtensionsWC;
if(imgExtensions.isEmpty()){
QList<QByteArray> fmt = QImageReader::supportedImageFormats();
for(int i=0; i<fmt.length(); i++){
imgExtensionsWC << "*."+QString::fromLocal8Bit(fmt[i]);
imgExtensions << QString::fromLocal8Bit(fmt[i]);
}
}
if(wildcards){ return imgExtensionsWC; }
return imgExtensions;
}
QTranslator* LUtils::LoadTranslation(QApplication *app, QString appname, QString locale, QTranslator *cTrans){
//Get the current localization
QString langEnc = "UTF-8"; //default value
QString langCode = locale; //provided locale
if(langCode.isEmpty()){ langCode = getenv("LC_ALL"); }
if(langCode.isEmpty()){ langCode = getenv("LANG"); }
if(langCode.isEmpty()){ langCode = "en_US.UTF-8"; } //default to US english
//See if the encoding is included and strip it out as necessary
if(langCode.contains(".")){
langEnc = langCode.section(".",-1);
langCode = langCode.section(".",0,0);
}
//Now verify the encoding for the locale
if(langCode =="C" || langCode=="POSIX" || langCode.isEmpty()){
langEnc = "System"; //use the Qt system encoding
}
if(app !=0){
qDebug() << "Loading Locale:" << appname << langCode << langEnc;
//If an existing translator was provided, remove it first (will be replaced)
if(cTrans!=0){ app->removeTranslator(cTrans); }
//Setup the translator
cTrans = new QTranslator();
//Use the shortened locale code if specific code does not have a corresponding file
if(!QFile::exists(LOS::LuminaShare()+"i18n/"+appname+"_" + langCode + ".qm") && langCode!="en_US" ){
langCode.truncate( langCode.indexOf("_") );
}
QString filename = appname+"_"+langCode+".qm";
//qDebug() << "FileName:" << filename << "Dir:" << LOS::LuminaShare()+"i18n/";
if( cTrans->load( filename, LOS::LuminaShare()+"i18n/" ) ){
app->installTranslator( cTrans );
}else{
//Translator could not be loaded for some reason
cTrans = 0;
if(langCode!="en_US"){
qWarning() << " - Could not load Locale:" << langCode;
}
}
}else{
//Only going to set the encoding since no application given
qDebug() << "Loading System Encoding:" << langEnc;
}
//Load current encoding for this locale
QTextCodec::setCodecForLocale( QTextCodec::codecForName(langEnc.toUtf8()) );
return cTrans;
}
QStringList LUtils::knownLocales(){
QDir i18n = QDir(LOS::LuminaShare()+"i18n");
if( !i18n.exists() ){ return QStringList(); }
QStringList files = i18n.entryList(QStringList() << "lumina-desktop_*.qm", QDir::Files, QDir::Name);
if(files.isEmpty()){ return QStringList(); }
//Now strip off the filename and just leave the locale tag
for(int i=0; i<files.length(); i++){
files[i].chop(3); //remove the ".qm" on the end
files[i] = files[i].section("_",1,50).simplified();
}
files << "en_US"; //default locale
files.sort();
return files;
}
void LUtils::setLocaleEnv(QString lang, QString msg, QString time, QString num,QString money,QString collate, QString ctype){
//Adjust the current locale environment variables
bool all = false;
if(msg.isEmpty() && time.isEmpty() && num.isEmpty() && money.isEmpty() && collate.isEmpty() && ctype.isEmpty() ){
if(lang.isEmpty()){ return; } //nothing to do - no changes requested
all = true; //set everything to the "lang" value
}
//If no lang given, but others are given, then use the current setting
if(lang.isEmpty()){ lang = getenv("LC_ALL"); }
if(lang.isEmpty()){ lang = getenv("LANG"); }
if(lang.isEmpty()){ lang = "en_US"; }
//Now go through and set/unset the environment variables
// - LANG & LC_ALL
if(!lang.contains(".")){ lang.append(".UTF-8"); }
setenv("LANG",lang.toUtf8() ,1); //overwrite setting (this is always required as the fallback)
if(all){ setenv("LC_ALL",lang.toUtf8() ,1); }
else{ unsetenv("LC_ALL"); } //make sure the custom settings are used
// - LC_MESSAGES
if(msg.isEmpty()){ unsetenv("LC_MESSAGES"); }
else{
if(!msg.contains(".")){ msg.append(".UTF-8"); }
setenv("LC_MESSAGES",msg.toUtf8(),1);
}
// - LC_TIME
if(time.isEmpty()){ unsetenv("LC_TIME"); }
else{
if(!time.contains(".")){ time.append(".UTF-8"); }
setenv("LC_TIME",time.toUtf8(),1);
}
// - LC_NUMERIC
if(num.isEmpty()){ unsetenv("LC_NUMERIC"); }
else{
if(!num.contains(".")){ num.append(".UTF-8"); }
setenv("LC_NUMERIC",num.toUtf8(),1);
}
// - LC_MONETARY
if(money.isEmpty()){ unsetenv("LC_MONETARY"); }
else{
if(!money.contains(".")){ money.append(".UTF-8"); }
setenv("LC_MONETARY",money.toUtf8(),1);
}
// - LC_COLLATE
if(collate.isEmpty()){ unsetenv("LC_COLLATE"); }
else{
if(!collate.contains(".")){ collate.append(".UTF-8"); }
setenv("LC_COLLATE",collate.toUtf8(),1);
}
// - LC_CTYPE
if(ctype.isEmpty()){ unsetenv("LC_CTYPE"); }
else{
if(!ctype.contains(".")){ ctype.append(".UTF-8"); }
setenv("LC_CTYPE",ctype.toUtf8(),1);
}
}
QString LUtils::currentLocale(){
QString curr = getenv("LC_ALL");// = QLocale::system();
if(curr.isEmpty()){ curr = getenv("LANG"); }
if(curr.isEmpty()){ curr = "en_US"; }
curr = curr.section(".",0,0); //remove any encodings off the end
return curr;
}
double LUtils::DisplaySizeToBytes(QString num){
//qDebug() << "Convert Num to Bytes:" << num;
num = num.toLower().simplified();
num = num.remove(" ");
if(num.isEmpty()){ return 0.0; }
if(num.endsWith("b")){ num.chop(1); } //remove the "bytes" marker (if there is one)
QString lab = "b";
if(!num[num.size()-1].isNumber()){
lab = num.right(1); num.chop(1);
}
double N = num.toDouble();
QStringList labs; labs <<"b"<<"k"<<"m"<<"g"<<"t"<<"p"; //go up to petabytes for now
for(int i=0; i<labs.length(); i++){
if(lab==labs[i]){ break; }//already at the right units - break out
N = N*1024.0; //Move to the next unit of measurement
}
//qDebug() << " - Done:" << QString::number(N) << lab << num;
return N;
}
QString LUtils::BytesToDisplaySize(qint64 ibytes){
static QStringList labs = QStringList();
if(labs.isEmpty()){ labs << "B" << "K" << "M" << "G" << "T" << "P"; }
//Now get the dominant unit
int c=0;
double bytes = ibytes; //need to keep decimel places for calculations
while(bytes>=1000 && c<labs.length() ){
bytes = bytes/1024;
c++;
} //labs[c] is the unit
//Bytes are now
//Now format the number (up to 3 digits, not including decimel places)
QString num;
if(bytes>=100){
//No decimel places
num = QString::number(qRound(bytes));
}else if(bytes>=10){
//need 1 decimel place
num = QString::number( (qRound(bytes*10)/10.0) );
}else if(bytes>=1){
//need 2 decimel places
num = QString::number( (qRound(bytes*100)/100.0) );
}else{
//Fully decimel (3 places)
num = "0."+QString::number(qRound(bytes*1000));
}
//qDebug() << "Bytes to Human-readable:" << bytes << c << num << labs[c];
return (num+labs[c]);
}
QString LUtils::SecondsToDisplay(int secs){
if(secs < 0){ return "??"; }
QString rem; //remaining
if(secs > 3600){
int hours = secs/3600;
rem.append( QString::number(hours)+"h ");
secs = secs - (hours*3600);
}
if(secs > 60){
int min = secs/60;
rem.append( QString::number(min)+"m ");
secs = secs - (min*60);
}
if(secs > 0){
rem.append( QString::number(secs)+"s");
}else{
rem.append( "0s" );
}
return rem;
}
|
<form action="">
<label for="username">Username:</label>
<input type="text" name="username" id="username" required>
<label for="password">Password:</label>
<input type="password" name="password" id="password" required>
<input type="submit" value="Submit">
</form> |
#!/bin/bash
# Default max for live device path on mac
root=~/"Music/Ableton/User Library/Presets/MIDI Effects/Max MIDI Effect/"
# Copy zoa and js
# TODO embed js in patch
cp ./Zoa.amxd "$root"
cp ./stringFormat.js "$root"
cp ./MockZoaHardware.amxd "$root" |
import { Either, right } from '@core/logic/Either'
import { ISendersRepository } from '@modules/senders/repositories/ISendersRepository'
type RemoveSenderRequest = {
senderId: string
}
type RemoveSenderResponse = Either<Error, null>
export class RemoveSender {
constructor(private sendersRepository: ISendersRepository) {}
async execute({
senderId,
}: RemoveSenderRequest): Promise<RemoveSenderResponse> {
await this.sendersRepository.delete(senderId)
return right(null)
}
}
|
#!/bin/bash
# ========== Experiment Seq. Idx. 171 / 8.6.2 / N. 27/0/0 - _S=8.6.2 D1_N=27 a=1 b=1 c=-1 d=1 e=1 f=-1 D3_N=0 g=-1 h=-1 i=-1 D4_N=0 j=0 ==========
set -u
# Prints header
echo -e '\n\n========== Experiment Seq. Idx. 171 / 8.6.2 / N. 27/0/0 - _S=8.6.2 D1_N=27 a=1 b=1 c=-1 d=1 e=1 f=-1 D3_N=0 g=-1 h=-1 i=-1 D4_N=0 j=0 ==========\n\n'
if [[ "No" == "Yes" ]]; then
echo 'FATAL: This treatment included an SVM layer.'>&2
echo ' Something very wrong happened!'>&2
exit 161
fi
# Prepares all environment variables
JBHI_DIR="$HOME/jbhi-special-issue"
DATASET_DIR="$JBHI_DIR/data/fulltrain.299.tfr"
MODEL_DIR="$JBHI_DIR/models/deep.27"
RESULTS_DIR="$JBHI_DIR/results"
RESULTS_PREFIX="$RESULTS_DIR/deep.27.layer.0.test.0.index.614.nosvm"
RESULTS_PATH="$RESULTS_PREFIX.results.txt"
# ...variables expected by jbhi-checks.include.sh and jbhi-footer.include.sh
SOURCES_GIT_DIR="$JBHI_DIR/jbhi-special-issue"
LIST_OF_INPUTS="$DATASET_DIR/finish.txt:$MODEL_DIR/finish.txt"
START_PATH="$RESULTS_PREFIX.start.txt"
FINISH_PATH="$RESULTS_PREFIX.finish.txt"
LOCK_PATH="$RESULTS_PREFIX.running.lock"
LAST_OUTPUT="$RESULTS_PATH"
# EXPERIMENT_STATUS=1
# STARTED_BEFORE=No
mkdir -p "$RESULTS_DIR"
#
# Assumes that the following environment variables where initialized
# SOURCES_GIT_DIR="$JBHI_DIR/jbhi-special-issue"
# LIST_OF_INPUTS="$DATASET_DIR/finish.txt:$MODELS_DIR/finish.txt:"
# START_PATH="$OUTPUT_DIR/start.txt"
# FINISH_PATH="$OUTPUT_DIR/finish.txt"
# LOCK_PATH="$OUTPUT_DIR/running.lock"
# LAST_OUTPUT="$MODEL_DIR/[[[:D1_MAX_NUMBER_OF_STEPS:]]].meta"
EXPERIMENT_STATUS=1
STARTED_BEFORE=No
# Checks if code is stable, otherwise alerts scheduler
pushd "$SOURCES_GIT_DIR" >/dev/null
GIT_STATUS=$(git status --porcelain)
GIT_COMMIT=$(git log | head -n 1)
popd >/dev/null
if [ "$GIT_STATUS" != "" ]; then
echo 'FATAL: there are uncommitted changes in your git sources file' >&2
echo ' for reproducibility, experiments only run on committed changes' >&2
echo >&2
echo ' Git status returned:'>&2
echo "$GIT_STATUS" >&2
exit 162
fi
# The experiment is already finished - exits with special code so scheduler won't retry
if [[ "$FINISH_PATH" != "-" ]]; then
if [[ -e "$FINISH_PATH" ]]; then
echo 'INFO: this experiment has already finished' >&2
exit 163
fi
fi
# The experiment is not ready to run due to dependencies - alerts scheduler
if [[ "$LIST_OF_INPUTS" != "" ]]; then
IFS=':' tokens_of_input=( $LIST_OF_INPUTS )
input_missing=No
for input_to_check in ${tokens_of_input[*]}; do
if [[ ! -e "$input_to_check" ]]; then
echo "ERROR: input $input_to_check missing for this experiment" >&2
input_missing=Yes
fi
done
if [[ "$input_missing" != No ]]; then
exit 164
fi
fi
# Sets trap to return error code if script is interrupted before successful finish
LOCK_SUCCESS=No
FINISH_STATUS=161
function finish_trap {
if [[ "$LOCK_SUCCESS" == "Yes" ]]; then
rmdir "$LOCK_PATH" &> /dev/null
fi
if [[ "$FINISH_STATUS" == "165" ]]; then
echo 'WARNING: experiment discontinued because other process holds its lock' >&2
else
if [[ "$FINISH_STATUS" == "160" ]]; then
echo 'INFO: experiment finished successfully' >&2
else
[[ "$FINISH_PATH" != "-" ]] && rm -f "$FINISH_PATH"
echo 'ERROR: an error occurred while executing the experiment' >&2
fi
fi
exit "$FINISH_STATUS"
}
trap finish_trap EXIT
# While running, locks experiment so other parallel threads won't attempt to run it too
if mkdir "$LOCK_PATH" --mode=u=rwx,g=rx,o=rx &>/dev/null; then
LOCK_SUCCESS=Yes
else
echo 'WARNING: this experiment is already being executed elsewhere' >&2
FINISH_STATUS="165"
exit
fi
# If the experiment was started before, do any cleanup necessary
if [[ "$START_PATH" != "-" ]]; then
if [[ -e "$START_PATH" ]]; then
echo 'WARNING: this experiment is being restarted' >&2
STARTED_BEFORE=Yes
fi
#...marks start
date -u >> "$START_PATH"
echo GIT "$GIT_COMMIT" >> "$START_PATH"
fi
# If the experiment was started before, do any cleanup necessary
if [[ "$STARTED_BEFORE" == "Yes" ]]; then
echo -n
fi
#...gets closest checkpoint file
MODEL_CHECKPOINT=$(ls "$MODEL_DIR/"model.ckpt-*.index | \
sed 's/.*ckpt-\([0-9]*\)\..*/\1/' | \
sort -n | \
awk -v c=1 -v t=20000 \
'NR==1{d=$c-t;d=d<0?-d:d;v=$c;next}{m=$c-t;m=m<0?-m:m}m<d{d=m;v=$c}END{print v}')
MODEL_PATH="$MODEL_DIR/model.ckpt-$MODEL_CHECKPOINT"
echo "$MODEL_PATH" >> "$START_PATH"
#...performs prediction
echo Testing on "$MODEL_PATH"
python \
"$SOURCES_GIT_DIR/predict_image_classifier.py" \
--model_name="inception_v4" \
--checkpoint_path="$MODEL_PATH" \
--dataset_name=skin_lesions \
--task_name=label \
--dataset_split_name=test \
--preprocessing_name=dermatologic \
--aggressive_augmentation="True" \
--add_rotations="True" \
--minimum_area_to_crop="0.20" \
--normalize_per_image="1" \
--batch_size=1 \
--id_field_name=id \
--pool_scores=avg \
--eval_replicas="1" \
--output_file="$RESULTS_PATH" \
--dataset_dir="$DATASET_DIR"
# Tip: leave last the arguments that make the command fail if they're absent,
# so if there's a typo or forgotten \ the entire thing fails
EXPERIMENT_STATUS="$?"
#
#...starts training
if [[ "$EXPERIMENT_STATUS" == "0" ]]; then
if [[ "$LAST_OUTPUT" == "" || -e "$LAST_OUTPUT" ]]; then
if [[ "$FINISH_PATH" != "-" ]]; then
date -u >> "$FINISH_PATH"
echo GIT "$GIT_COMMIT" >> "$FINISH_PATH"
fi
FINISH_STATUS="160"
fi
fi
|
class Actor {
constructor(){
this.DOM = document.createElement('div');
this.position = new Vector2(0,0);
this.rect = new Rect(this.DOM);
}
render(){
this.DOM.style.top = this.position.y + 'px';
this.DOM.style.left = this.position.x + 'px';
}
remove(){
SCENE.removeElement(this);
}
}
|
#!/bin/bash
export PYTHONPATH=~/vshare/cheetah/:$PYTHONPATH
export LD_LIBRARY_PATH=/home/kmehta/spack/opt/spack/linux-debian9-x86_64/gcc-6.3.0/adios2-develop-x4npn6vhoxlzt37b6bgxw46vbxpeieu4/lib/:$LD_LIBRARY_PATH
|
<reponame>Flytrex/mavlink
package io.dronefleet.mavlink.common;
import io.dronefleet.mavlink.annotations.MavlinkFieldInfo;
import io.dronefleet.mavlink.annotations.MavlinkMessageBuilder;
import io.dronefleet.mavlink.annotations.MavlinkMessageInfo;
import java.lang.Object;
import java.lang.Override;
import java.lang.String;
import java.util.Objects;
/**
* The filtered local position (e.g. fused computer vision and accelerometers). Coordinate
* frame is right-handed, Z-axis down (aeronautical frame, NED / north-east-down convention)
*/
@MavlinkMessageInfo(
id = 32,
crc = 185,
description = "The filtered local position (e.g. fused computer vision and accelerometers). Coordinate frame is right-handed, Z-axis down (aeronautical frame, NED / north-east-down convention)"
)
public final class LocalPositionNed {
private final long timeBootMs;
private final float x;
private final float y;
private final float z;
private final float vx;
private final float vy;
private final float vz;
private LocalPositionNed(long timeBootMs, float x, float y, float z, float vx, float vy,
float vz) {
this.timeBootMs = timeBootMs;
this.x = x;
this.y = y;
this.z = z;
this.vx = vx;
this.vy = vy;
this.vz = vz;
}
/**
* Returns a builder instance for this message.
*/
@MavlinkMessageBuilder
public static Builder builder() {
return new Builder();
}
/**
* Timestamp (time since system boot).
*/
@MavlinkFieldInfo(
position = 1,
unitSize = 4,
description = "Timestamp (time since system boot)."
)
public final long timeBootMs() {
return this.timeBootMs;
}
/**
* X Position
*/
@MavlinkFieldInfo(
position = 2,
unitSize = 4,
description = "X Position"
)
public final float x() {
return this.x;
}
/**
* Y Position
*/
@MavlinkFieldInfo(
position = 3,
unitSize = 4,
description = "Y Position"
)
public final float y() {
return this.y;
}
/**
* Z Position
*/
@MavlinkFieldInfo(
position = 4,
unitSize = 4,
description = "Z Position"
)
public final float z() {
return this.z;
}
/**
* X Speed
*/
@MavlinkFieldInfo(
position = 5,
unitSize = 4,
description = "X Speed"
)
public final float vx() {
return this.vx;
}
/**
* Y Speed
*/
@MavlinkFieldInfo(
position = 6,
unitSize = 4,
description = "Y Speed"
)
public final float vy() {
return this.vy;
}
/**
* Z Speed
*/
@MavlinkFieldInfo(
position = 7,
unitSize = 4,
description = "Z Speed"
)
public final float vz() {
return this.vz;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || !getClass().equals(o.getClass())) return false;
LocalPositionNed other = (LocalPositionNed)o;
if (!Objects.deepEquals(timeBootMs, other.timeBootMs)) return false;
if (!Objects.deepEquals(x, other.x)) return false;
if (!Objects.deepEquals(y, other.y)) return false;
if (!Objects.deepEquals(z, other.z)) return false;
if (!Objects.deepEquals(vx, other.vx)) return false;
if (!Objects.deepEquals(vy, other.vy)) return false;
if (!Objects.deepEquals(vz, other.vz)) return false;
return true;
}
@Override
public int hashCode() {
int result = 0;
result = 31 * result + Objects.hashCode(timeBootMs);
result = 31 * result + Objects.hashCode(x);
result = 31 * result + Objects.hashCode(y);
result = 31 * result + Objects.hashCode(z);
result = 31 * result + Objects.hashCode(vx);
result = 31 * result + Objects.hashCode(vy);
result = 31 * result + Objects.hashCode(vz);
return result;
}
@Override
public String toString() {
return "LocalPositionNed{timeBootMs=" + timeBootMs
+ ", x=" + x
+ ", y=" + y
+ ", z=" + z
+ ", vx=" + vx
+ ", vy=" + vy
+ ", vz=" + vz + "}";
}
public static final class Builder {
private long timeBootMs;
private float x;
private float y;
private float z;
private float vx;
private float vy;
private float vz;
/**
* Timestamp (time since system boot).
*/
@MavlinkFieldInfo(
position = 1,
unitSize = 4,
description = "Timestamp (time since system boot)."
)
public final Builder timeBootMs(long timeBootMs) {
this.timeBootMs = timeBootMs;
return this;
}
/**
* X Position
*/
@MavlinkFieldInfo(
position = 2,
unitSize = 4,
description = "X Position"
)
public final Builder x(float x) {
this.x = x;
return this;
}
/**
* Y Position
*/
@MavlinkFieldInfo(
position = 3,
unitSize = 4,
description = "Y Position"
)
public final Builder y(float y) {
this.y = y;
return this;
}
/**
* Z Position
*/
@MavlinkFieldInfo(
position = 4,
unitSize = 4,
description = "Z Position"
)
public final Builder z(float z) {
this.z = z;
return this;
}
/**
* X Speed
*/
@MavlinkFieldInfo(
position = 5,
unitSize = 4,
description = "X Speed"
)
public final Builder vx(float vx) {
this.vx = vx;
return this;
}
/**
* Y Speed
*/
@MavlinkFieldInfo(
position = 6,
unitSize = 4,
description = "Y Speed"
)
public final Builder vy(float vy) {
this.vy = vy;
return this;
}
/**
* Z Speed
*/
@MavlinkFieldInfo(
position = 7,
unitSize = 4,
description = "Z Speed"
)
public final Builder vz(float vz) {
this.vz = vz;
return this;
}
public final LocalPositionNed build() {
return new LocalPositionNed(timeBootMs, x, y, z, vx, vy, vz);
}
}
}
|
<reponame>STShenZhaoliang/java-day-by-day<gh_stars>0
package com.st.treeset;
import java.util.Comparator;
import java.util.TreeSet;
/*
需求:
用TreeSet集合存储多个学生信息(姓名,语文成绩,数学成绩),并遍历该集合
要求:按照总分从高到低出现
思路:
1:定义学生类
2:创建TreeSet集合对象,通过比较器排序进行排序
3:创建学生对象
4:把学生对象添加到集合
5:遍历集合
*/
public class TreeSetDemo {
public static void main(String[] args) {
//创建TreeSet集合对象,通过比较器排序进行排序
TreeSet<Student> ts = new TreeSet<Student>(new Comparator<Student>() {
@Override
public int compare(Student s1, Student s2) {
// int num = (s2.getChinese()+s2.getMath())-(s1.getChinese()+s1.getMath());
//主要条件
int num = s2.getSum() - s1.getSum();
//次要条件
int num2 = num == 0 ? s1.getChinese() - s2.getChinese() : num;
int num3 = num2 == 0 ? s1.getName().compareTo(s2.getName()) : num2;
return num3;
}
});
//创建学生对象
Student s1 = new Student("林青霞", 98, 100);
Student s2 = new Student("张曼玉", 95, 95);
Student s3 = new Student("王祖贤", 100, 93);
Student s4 = new Student("柳岩", 100, 97);
Student s5 = new Student("风清扬", 98, 98);
Student s6 = new Student("左冷禅", 97, 99);
// Student s7 = new Student("左冷禅", 97, 99);
Student s7 = new Student("赵云", 97, 99);
//把学生对象添加到集合
ts.add(s1);
ts.add(s2);
ts.add(s3);
ts.add(s4);
ts.add(s5);
ts.add(s6);
ts.add(s7);
//遍历集合
for (Student s : ts) {
System.out.println(s.getName() + "," + s.getChinese() + "," + s.getMath() + "," + s.getSum());
}
}
}
|
module DateTimeFields
class Railtie < Rails::Railtie
initializer "gems.date_time_fields" do
::ActiveRecord::Base.send :extend, DateTimeFields::ActiveRecord::ClassMethods
::ActionView::Base.send :include, DateTimeFields::ActionView::FormOptionsHelper
::ActionView::Base.send :include, DateTimeFields::ActionView::FormTagHelper
::ActionView::Helpers::FormBuilder.send :include, DateTimeFields::ActionView::FormBuilder
::ActionView::Helpers::InstanceTag.send :include, DateTimeFields::ActionView::InstanceTag
::ActiveModel::Validations.send :include, DateTimeFields::ActiveModel::Validations
::ActiveRecord::Base.send :include, DateTimeFields::ActiveModel::Validations
end
end
end
|
#!/bin/bash
THEME_NAME=wxt
git remote set-url origin "https://${GH_TOKEN}@github.com/intalgart/intalgart.github.io.git"
git submodule add -f "https://${GH_TOKEN}@github.com/intalgart/intalgart.github.io.git" public
echo -e "\033[0;32mDeploying updates to Github...\033[0m"
# Build the project.
hugo --theme=$THEME_NAME --buildDrafts
# Go To Public folder
cd public
# Add changes to git.
git add -A
# Commit changes.
msg="rebuilding site `date`"
if [ $# -eq 1 ]
then msg="$1"
fi
git commit -m "$msg"
# Push source and build repos.
git push origin master
# Come Back
cd ..
|
package istu.bacs.web.problem;
public class ProblemNotFoundException extends RuntimeException {
public ProblemNotFoundException(String message) {
super(message);
}
} |
package de.plushnikov.lombok.tests;
import de.plushnikov.lombok.LombokParsingTestCase;
import java.io.IOException;
/**
* Unit tests for IntelliJPlugin for Lombok, based on lombok test classes
* For this to work, the correct system property idea.home.path needs to be passed to the test runner.
*/
public class GetterTestCase extends LombokParsingTestCase {
public void testGetterAccessLevel() throws IOException {
doTest();
}
public void testGetterAlreadyExists() throws IOException {
doTest();
}
public void testGetterBoolean() throws IOException {
doTest();
}
public void testGetterDeprecated() throws IOException {
doTest();
}
public void testGetterEnum() throws IOException {
doTest();
}
public void testGetterLazy() throws IOException {
//TODO known problem, try to fix later
doTest();
}
public void testGetterLazyBoolean() throws IOException {
//TODO known problem, try to fix later
doTest();
}
public void testGetterLazyEahcToString() throws IOException {
//TODO known problem, try to fix later
doTest();
}
public void testGetterLazyInvalid() throws IOException {
doTest();
}
public void testGetterLazyNative() throws IOException {
//TODO known problem, try to fix later
doTest();
}
public void testGetterNone() throws IOException {
doTest();
}
public void testGetterOnClass() throws IOException {
doTest();
}
public void testGetterOnMethod() throws IOException {
doTest();
}
public void testGetterOnMethodErrors() throws IOException {
doTest();
}
public void testGetterOnMethodErrors2() throws IOException {
doTest();
}
public void testGetterOnStatic() throws IOException {
doTest();
}
public void testGetterPlain() throws IOException {
doTest();
}
public void testGetterWithDollar() throws IOException {
doTest();
}
public void testMultiFieldGetter() throws IOException {
doTest();
}
public void testTrickyTypeResolution() throws IOException {
doTest();
}
public void testClassNamedAfterGetter() throws IOException {
doTest();
}
public void testCommentsInterspersed() throws IOException {
doTest();
}
} |
#include "vtpch.h"
#include "Platform/OpenGL/OpenGLContext.h"
#include "GLFW/glfw3.h"
#include <glad/glad.h>
#include <gl/GL.h>
namespace Vortex
{
OpenGLContext::OpenGLContext(GLFWwindow* windowHandle)
:m_WindowHandle(windowHandle)
{
VT_CORE_ASSERT(windowHandle, "Window handle is null!");
}
void OpenGLContext::Init()
{
glfwMakeContextCurrent(m_WindowHandle);
int status = gladLoadGLLoader((GLADloadproc)glfwGetProcAddress);
VT_CORE_ASSERT(status, "Failed to initialize Glad!");
VT_CORE_INFO("OpenGL Info:");
VT_CORE_INFO(" Vendor: {0}", glGetString(GL_VENDOR));
VT_CORE_INFO(" Renderer: {0}", glGetString(GL_RENDERER));
VT_CORE_INFO(" Version: {0}", glGetString(GL_VERSION));
#ifdef VT_ENABLE_ASSERTS
int versionMajor;
int versionMinor;
glGetIntegerv(GL_MAJOR_VERSION, &versionMajor);
glGetIntegerv(GL_MINOR_VERSION, &versionMinor);
VT_CORE_ASSERT(versionMajor > 4 ||
(versionMajor == 4 && versionMinor >= 5),
"Vortex requires at least OpenGL version 4.5!");
#endif
}
void OpenGLContext::SwapBuffers()
{
glfwSwapBuffers(m_WindowHandle);
}
}
|
#!/bin/bash
set -eux
CRATE_NAME="$1"
VERSION_PART="$2"
GIT_EMAIL="$3"
declare -a GIT_TAGS
declare -a CRATES
function git_set_tags(){
git config user.name "robot"
git config user.email "$GIT_EMAIL"
git commit -am "bump version for $CRATE_NAME, $VERSION_PART"
local GIT_TAG
for GIT_TAG in "${GIT_TAGS[@]}"; do
git tag "$GIT_TAG"
done
}
function publish_crate() {
local CRATE_NAME="$1"
(
cd "$CRATE_NAME"
local SUCCESS=0
for TRY_COUNTER in $(seq 0 10); do
[ "$TRY_COUNTER" != "0" ] && echo "retry count: $TRY_COUNTER" && sleep 60
if cargo publish; then
SUCCESS=1
break
fi
done
if [ "$SUCCESS" == "0" ]; then
echo "Publish crate '$CRATE_NAME' failed."
return 1
fi
)
}
function version_get() {
local CRATE_NAME="$1"
local VERSION_LINE VERSION
VERSION_LINE="$(grep "^version\\s*=" "$CRATE_NAME/Cargo.toml")"
VERSION=$(echo "$VERSION_LINE" | cut -d '"' -f 2)
echo "$VERSION"
}
function version_increment()
{
local VERSION UP_PART VERSION_MAJOR VERSION_MINOR VERSION_PATCH
VERSION="$1"
UP_PART="$2"
VERSION_MAJOR=$(echo "$VERSION" | cut -d '.' -f 1)
VERSION_MINOR=$(echo "$VERSION" | cut -d '.' -f 2)
VERSION_PATCH=$(echo "$VERSION" | cut -d '.' -f 3)
case "$UP_PART" in
major)
VERSION_MAJOR=$((VERSION_MAJOR+1))
VERSION_MINOR=0
VERSION_MINOR=0
;;
minor)
VERSION_MINOR=$((VERSION_MINOR+1))
VERSION_PATCH=0
;;
patch)
VERSION_PATCH=$((VERSION_PATCH+1))
esac
echo "$VERSION_MAJOR.$VERSION_MINOR.$VERSION_PATCH"
}
function version_set() {
local CRATE_NAME="$1"
local VERSION="$2"
sed -i.bak -e "s/^version *=.*/version = \"$VERSION\"/" "$CRATE_NAME/Cargo.toml"
}
function version_dep_set() {
local CRATE_NAME="$1"
local DEP_NAME="$2"
local VERSION="$3"
sed -i.bak -e "s|^$DEP_NAME *=.*|$DEP_NAME = \\{ version = \"$VERSION\", path=\"../$DEP_NAME\"\\}|" "$CRATE_NAME/Cargo.toml"
}
function bump_version() {
local CRATE_NAME="$1"
local VERSION_PART="$2"
local VERSION
VERSION=$(version_get "$CRATE_NAME")
VERSION=$(version_increment "$VERSION" "$VERSION_PART")
version_set "$CRATE_NAME" "$VERSION"
GIT_TAGS+=("$CRATE_NAME-$VERSION")
CRATES+=("$CRATE_NAME")
case "$CRATE_NAME" in
ydb)
;;
ydb-grpc)
version_dep_set "ydb" "ydb-grpc" "$VERSION"
bump_version "ydb" patch
;;
ydb-grpc-helpers)
version_dep_set "ydb-grpc" "ydb-grpc-helpers" "$VERSION"
bump_version "ydb-grpc" patch
;;
*)
echo "Unexpected crate name '$CRATE_NAME'"
exit 1
esac
}
bump_version "$CRATE_NAME" "$VERSION_PART"
# Force update Cargo.toml for new versions
cargo check
git_set_tags
# push tags before publish - for fix repository state if failed in middle of publish crates
git push --tags
for CRATE in "${CRATES[@]}"; do
publish_crate "$CRATE"
done
# git push after publish crate - for run CI build check after all changed crates will published in crates repo
git push
|
<filename>Foundation/org.egovframe.rte.fdl.security/src/main/java/org/egovframe/rte/fdl/security/userdetails/jdbc/EgovUsersByUsernameMapping.java
/*
* Copyright 2008-2009 MOPAS(Ministry of Public Administration and Security).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.egovframe.rte.fdl.security.userdetails.jdbc;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Types;
import org.egovframe.rte.fdl.security.userdetails.EgovUserDetails;
import javax.sql.DataSource;
import org.springframework.jdbc.core.SqlParameter;
import org.springframework.jdbc.object.MappingSqlQuery;
/**
* 사용자 계정 정보를 DB에서 관리할 수 있도록 구현한 클래스
*
* <p><b>NOTE:</b>org.springframework.jdbc.object.MappingSqlQuery 를 확장하여 사용자 계정 정보를 DB에서 관리할 수 있도록 구현한 클래스이다.</p>
*
* @author 실행환경 개발팀 윤성종
* @since 2009.06.01
* @version 1.0
* <pre>
* 개정이력(Modification Information)
*
* 수정일 수정자 수정내용
* ----------------------------------------------
* 2009.06.01 윤성종 최초 생성
* 2014.01.22 한성곤 Spring Security 3.2.X 업그레이드 적용
* </pre>
*/
public abstract class EgovUsersByUsernameMapping extends MappingSqlQuery<EgovUserDetails> {
/**
* 사용자정보를 테이블에서 조회하여 사용자객체에 매핑한다.
* @param ds
* @param usersByUsernameQuery
*/
public EgovUsersByUsernameMapping(DataSource ds, String usersByUsernameQuery) {
super(ds, usersByUsernameQuery);
declareParameter(new SqlParameter(Types.VARCHAR));
compile();
}
@Override
protected abstract EgovUserDetails mapRow(ResultSet rs, int rownum) throws SQLException;
}
|
<filename>src/icon/IconExternalLink.tsx
import React from 'react';
export interface IconExternalLinkProps extends React.SVGAttributes<SVGElement> {
color?: string;
size?: string | number;
className?: string;
style?: React.CSSProperties;
}
export const IconExternalLink: React.SFC<IconExternalLinkProps> = (
props: IconExternalLinkProps
): React.ReactElement => {
const { color, size, style, ...restProps } = props;
return (
<svg
xmlns="http://www.w3.org/2000/svg"
width={size}
height={size}
viewBox="0 0 24 24"
fill="none"
stroke={color}
className="feather feather-external-link"
strokeWidth="2"
strokeLinecap="round"
strokeLinejoin="round"
style={{ verticalAlign: 'middle', ...style }}
{...restProps}
>
<path d="M18 13v6a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V8a2 2 0 0 1 2-2h6" />
<polyline points="15 3 21 3 21 9" />
<line x1="10" y1="14" x2="21" y2="3" />
</svg>
);
};
IconExternalLink.defaultProps = {
color: 'currentColor',
size: '1em',
};
export default IconExternalLink;
|
from subprocess import PIPE, Popen
with Popen(["git", "rev-parse", "--abbrev-ref", "HEAD"], stdout=PIPE) as branch_command:
GIT_BRANCH = branch_command.communicate()[0].decode().strip()
with Popen(["git", "rev-parse", "--short", "HEAD"], stdout=PIPE) as commit_command:
GIT_COMMIT = commit_command.communicate()[0].decode().strip()
with Popen(["git", "describe", "--tags", "--exact-match"], stdout=PIPE) as tag_command:
GIT_TAG = tag_command.communicate()[0].decode().strip()
if not GIT_TAG:
GIT_TAG = "UNRELEASED"
|
//
// CFWeakProxy.h
// CFFoundation
//
// Created by refraincc on 2019/3/26.
//
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
@interface CFWeakProxy : NSProxy
/**
利用NSProxy实现对target的弱引用
@param target 需要被弱引用的target
@return CFWeakProxy对象
*/
+ (instancetype)weakProxyForObject:(id)target;
@end
NS_ASSUME_NONNULL_END
|
<filename>transport/consumer/handler/setup.go<gh_stars>0
package consumerHandler
import (
"context"
"github.com/alhamsya/boilerplate-go/lib/helpers/custom_log"
"github.com/alhamsya/boilerplate-go/middleware/consumer"
"github.com/alhamsya/boilerplate-go/transport/consumer/routers"
)
func (h *Handler) Run(ctx context.Context) error {
consumerList := consumerRouters.New(&consumerRouters.ConsumerServer{
Cfg: h.Cfg,
ConsumerInteractor: h.Interactor,
}).Register()
for name, val := range h.Cfg.PubSub {
if val.IsActive {
for _, fun := range consumerList[name] {
subscription, ok := h.subscription[name]
if !ok {
continue
}
consumerMiddleware.InterceptorPubSub(ctx, subscription, fun)
}
} else {
customLog.InfoF("[CONSUMER] %s: is inactive", name)
}
}
return nil
}
|
/*
* MIT License
*
* Copyright (c) 2021 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package net.jamsimulator.jams.event;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* This annotation tags methods to be registered by the method {@link EventBroadcast#registerListeners(Object, boolean)}.
* These methods must have only one parameter with a type extending {@link Event}. They may be private or static.
* Static listeners will still be holded by an instance of its class, and they're not recommended.
*/
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
public @interface Listener {
/**
* The priority of the listener.
* Listeners with bigger priorities will be called first.
*
* @return the priority.
*/
int priority() default 0;
/**
* Returns whether this listener should run events even
* if they were cancelled by another listener with a higher priority.
*
* @return whether this listener should run cancelled events.
*/
boolean ignoreCancelled() default false;
}
|
from .node import SupervisorNode
from ..core.node.manager import NodeManager
from ..core.node.base_node import Node
from ..core.action.executor import action_executor, executor_forwarder
from ..core.common.coro import NoneCoroManager
class SupervisorManager(NodeManager):
def __init__(self):
super().__init__()
self.supervisor_nodes = []
def add_supervisor_node(self, node: SupervisorNode):
self.supervisor_nodes.append(node)
def remove_supervisor_node(self, node: SupervisorNode):
if node in self.supervisor_nodes:
self.supervisor_nodes.remove(node)
@action_executor
def execute_action(self, action, node: SupervisorNode):
# Forward the action to other nodes in the system
executor_forwarder(action, self.supervisor_nodes)
def manage_coroutines(self):
# Manage coroutines for supervisor nodes
coro_manager = NoneCoroManager()
for node in self.supervisor_nodes:
coro_manager.add_coroutine(node.coroutine)
coro_manager.run_coroutines() |
const initialState = []
export default (state = initialState, action) => {
switch (action.type) {
case "SET_MY_GAMES":
return action.games
case "UPDATE_GAME":
return state.map(game => game.id === action.game.id ? action.game : game)
default:
return state
}
}
|
#!/usr/bin/env -S bash -euET -o pipefail -O inherit_errexit
SCRIPT=$(readlink -f "$0") && cd $(dirname "$SCRIPT")
# --- Script Init ---
mkdir -p log
rm -R -f log/*
# --- Setup run dirs ---
find output -type f -not -name '*summary-info*' -not -name '*.json' -exec rm -R -f {} +
rm -R -f fifo/*
rm -R -f work/*
mkdir work/kat/
mkdir work/il_S1_summaryleccalc
mkfifo fifo/il_P1
mkfifo fifo/il_P2
mkfifo fifo/il_P3
mkfifo fifo/il_P4
mkfifo fifo/il_P5
mkfifo fifo/il_P6
mkfifo fifo/il_P7
mkfifo fifo/il_P8
mkfifo fifo/il_P9
mkfifo fifo/il_P10
mkfifo fifo/il_S1_summary_P1
mkfifo fifo/il_S1_summary_P1.idx
mkfifo fifo/il_S1_summary_P2
mkfifo fifo/il_S1_summary_P2.idx
mkfifo fifo/il_S1_summary_P3
mkfifo fifo/il_S1_summary_P3.idx
mkfifo fifo/il_S1_summary_P4
mkfifo fifo/il_S1_summary_P4.idx
mkfifo fifo/il_S1_summary_P5
mkfifo fifo/il_S1_summary_P5.idx
mkfifo fifo/il_S1_summary_P6
mkfifo fifo/il_S1_summary_P6.idx
mkfifo fifo/il_S1_summary_P7
mkfifo fifo/il_S1_summary_P7.idx
mkfifo fifo/il_S1_summary_P8
mkfifo fifo/il_S1_summary_P8.idx
mkfifo fifo/il_S1_summary_P9
mkfifo fifo/il_S1_summary_P9.idx
mkfifo fifo/il_S1_summary_P10
mkfifo fifo/il_S1_summary_P10.idx
mkfifo fifo/gul_lb_P1
mkfifo fifo/gul_lb_P2
mkfifo fifo/gul_lb_P3
mkfifo fifo/gul_lb_P4
mkfifo fifo/gul_lb_P5
mkfifo fifo/gul_lb_P6
mkfifo fifo/gul_lb_P7
mkfifo fifo/gul_lb_P8
mkfifo fifo/gul_lb_P9
mkfifo fifo/gul_lb_P10
mkfifo fifo/lb_il_P1
mkfifo fifo/lb_il_P2
mkfifo fifo/lb_il_P3
mkfifo fifo/lb_il_P4
mkfifo fifo/lb_il_P5
mkfifo fifo/lb_il_P6
mkfifo fifo/lb_il_P7
mkfifo fifo/lb_il_P8
mkfifo fifo/lb_il_P9
mkfifo fifo/lb_il_P10
# --- Do insured loss computes ---
tee < fifo/il_S1_summary_P1 work/il_S1_summaryleccalc/P1.bin > /dev/null & pid1=$!
tee < fifo/il_S1_summary_P1.idx work/il_S1_summaryleccalc/P1.idx > /dev/null & pid2=$!
tee < fifo/il_S1_summary_P2 work/il_S1_summaryleccalc/P2.bin > /dev/null & pid3=$!
tee < fifo/il_S1_summary_P2.idx work/il_S1_summaryleccalc/P2.idx > /dev/null & pid4=$!
tee < fifo/il_S1_summary_P3 work/il_S1_summaryleccalc/P3.bin > /dev/null & pid5=$!
tee < fifo/il_S1_summary_P3.idx work/il_S1_summaryleccalc/P3.idx > /dev/null & pid6=$!
tee < fifo/il_S1_summary_P4 work/il_S1_summaryleccalc/P4.bin > /dev/null & pid7=$!
tee < fifo/il_S1_summary_P4.idx work/il_S1_summaryleccalc/P4.idx > /dev/null & pid8=$!
tee < fifo/il_S1_summary_P5 work/il_S1_summaryleccalc/P5.bin > /dev/null & pid9=$!
tee < fifo/il_S1_summary_P5.idx work/il_S1_summaryleccalc/P5.idx > /dev/null & pid10=$!
tee < fifo/il_S1_summary_P6 work/il_S1_summaryleccalc/P6.bin > /dev/null & pid11=$!
tee < fifo/il_S1_summary_P6.idx work/il_S1_summaryleccalc/P6.idx > /dev/null & pid12=$!
tee < fifo/il_S1_summary_P7 work/il_S1_summaryleccalc/P7.bin > /dev/null & pid13=$!
tee < fifo/il_S1_summary_P7.idx work/il_S1_summaryleccalc/P7.idx > /dev/null & pid14=$!
tee < fifo/il_S1_summary_P8 work/il_S1_summaryleccalc/P8.bin > /dev/null & pid15=$!
tee < fifo/il_S1_summary_P8.idx work/il_S1_summaryleccalc/P8.idx > /dev/null & pid16=$!
tee < fifo/il_S1_summary_P9 work/il_S1_summaryleccalc/P9.bin > /dev/null & pid17=$!
tee < fifo/il_S1_summary_P9.idx work/il_S1_summaryleccalc/P9.idx > /dev/null & pid18=$!
tee < fifo/il_S1_summary_P10 work/il_S1_summaryleccalc/P10.bin > /dev/null & pid19=$!
tee < fifo/il_S1_summary_P10.idx work/il_S1_summaryleccalc/P10.idx > /dev/null & pid20=$!
summarycalc -m -f -1 fifo/il_S1_summary_P1 < fifo/il_P1 &
summarycalc -m -f -1 fifo/il_S1_summary_P2 < fifo/il_P2 &
summarycalc -m -f -1 fifo/il_S1_summary_P3 < fifo/il_P3 &
summarycalc -m -f -1 fifo/il_S1_summary_P4 < fifo/il_P4 &
summarycalc -m -f -1 fifo/il_S1_summary_P5 < fifo/il_P5 &
summarycalc -m -f -1 fifo/il_S1_summary_P6 < fifo/il_P6 &
summarycalc -m -f -1 fifo/il_S1_summary_P7 < fifo/il_P7 &
summarycalc -m -f -1 fifo/il_S1_summary_P8 < fifo/il_P8 &
summarycalc -m -f -1 fifo/il_S1_summary_P9 < fifo/il_P9 &
summarycalc -m -f -1 fifo/il_S1_summary_P10 < fifo/il_P10 &
eve -R 1 10 | getmodel | gulcalc -S100 -L100 -r -a0 -i - > fifo/gul_lb_P1 &
eve -R 2 10 | getmodel | gulcalc -S100 -L100 -r -a0 -i - > fifo/gul_lb_P2 &
eve -R 3 10 | getmodel | gulcalc -S100 -L100 -r -a0 -i - > fifo/gul_lb_P3 &
eve -R 4 10 | getmodel | gulcalc -S100 -L100 -r -a0 -i - > fifo/gul_lb_P4 &
eve -R 5 10 | getmodel | gulcalc -S100 -L100 -r -a0 -i - > fifo/gul_lb_P5 &
eve -R 6 10 | getmodel | gulcalc -S100 -L100 -r -a0 -i - > fifo/gul_lb_P6 &
eve -R 7 10 | getmodel | gulcalc -S100 -L100 -r -a0 -i - > fifo/gul_lb_P7 &
eve -R 8 10 | getmodel | gulcalc -S100 -L100 -r -a0 -i - > fifo/gul_lb_P8 &
eve -R 9 10 | getmodel | gulcalc -S100 -L100 -r -a0 -i - > fifo/gul_lb_P9 &
eve -R 10 10 | getmodel | gulcalc -S100 -L100 -r -a0 -i - > fifo/gul_lb_P10 &
load_balancer -i fifo/gul_lb_P1 fifo/gul_lb_P2 -o fifo/lb_il_P1 fifo/lb_il_P2 &
load_balancer -i fifo/gul_lb_P3 fifo/gul_lb_P4 -o fifo/lb_il_P3 fifo/lb_il_P4 &
load_balancer -i fifo/gul_lb_P5 fifo/gul_lb_P6 -o fifo/lb_il_P5 fifo/lb_il_P6 &
load_balancer -i fifo/gul_lb_P7 fifo/gul_lb_P8 -o fifo/lb_il_P7 fifo/lb_il_P8 &
load_balancer -i fifo/gul_lb_P9 fifo/gul_lb_P10 -o fifo/lb_il_P9 fifo/lb_il_P10 &
fmcalc -a2 < fifo/lb_il_P1 > fifo/il_P1 &
fmcalc -a2 < fifo/lb_il_P2 > fifo/il_P2 &
fmcalc -a2 < fifo/lb_il_P3 > fifo/il_P3 &
fmcalc -a2 < fifo/lb_il_P4 > fifo/il_P4 &
fmcalc -a2 < fifo/lb_il_P5 > fifo/il_P5 &
fmcalc -a2 < fifo/lb_il_P6 > fifo/il_P6 &
fmcalc -a2 < fifo/lb_il_P7 > fifo/il_P7 &
fmcalc -a2 < fifo/lb_il_P8 > fifo/il_P8 &
fmcalc -a2 < fifo/lb_il_P9 > fifo/il_P9 &
fmcalc -a2 < fifo/lb_il_P10 > fifo/il_P10 &
wait $pid1 $pid2 $pid3 $pid4 $pid5 $pid6 $pid7 $pid8 $pid9 $pid10 $pid11 $pid12 $pid13 $pid14 $pid15 $pid16 $pid17 $pid18 $pid19 $pid20
# --- Do insured loss kats ---
leccalc -r -Kil_S1_summaryleccalc -w output/il_S1_leccalc_wheatsheaf_oep.csv & lpid1=$!
wait $lpid1
rm -R -f work/*
rm -R -f fifo/*
|
<reponame>kunmi02/Question-catalogue
import React from 'react';
import { render } from '@testing-library/react';
import { Provider } from 'react-redux';
import { BrowserRouter } from 'react-router-dom';
import store from '../store';
import Navbar from '../components/Navbar';
describe('test the Navbar component', () => {
const mockClick = jest.fn();
const title = 'Questions Catalogue';
const { getByText } = render(
<>
<Provider store={store}>
<BrowserRouter>
<Navbar handleCategoryChange={mockClick} />
</BrowserRouter>
</Provider>
,
</>,
);
test('should render the main header on the navbar', () => {
expect(getByText(title)).not.toBeNull();
expect(getByText(title)).toBeTruthy();
});
});
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: <EMAIL>
<EMAIL>
*/
#include "onnx2tengine.hpp"
/*
* SELF DEFINE VARIABLE
* FOR ONNX SERIALIZER
*/
const int OP_VERSION=1;
/*
* ASSIST FUNCTIONS FOR ONNX SERIALIZER START
*/
bool onnx_serializer::find_op_load_method(const std::string& op_name)
{
if(op_load_map.count(op_name))
return true;
return false;
}
ir_tensor_t* find_tensor(ir_graph_t* graph, const std::string& tensor_name)
{
for (uint16_t i = 0; i < graph->tensor_num; i++)
{
ir_tensor_t* tensor = get_ir_graph_tensor(graph, i);
if (tensor->name == tensor_name)
return tensor;
}
return nullptr;
}
int change_node_op(ir_node_t* node, int new_op_type)
{
sys_free(node->op.param_mem);
node->op.type = new_op_type;
ir_method_t* ir_method = find_op_method(new_op_type, OP_VERSION);
if ((NULL != ir_method) && (NULL != ir_method->init) && (ir_method->init(&node->op) < 0))
{
return -1;
}
return 0;
}
onnx::TensorProto get_node_attr_tensor(const onnx::NodeProto& node, const char* key)
{
for (int i = 0; i < node.attribute_size(); i++)
{
const onnx::AttributeProto& attr = node.attribute(i);
if (attr.name() == key)
{
return attr.t();
}
}
return onnx::TensorProto();
}
/*
* ASSIST FUNCTIONS FOR ONNX SERIALIZER END
*/
int onnx_serializer::load_model_file(std::string model_file, onnx::ModelProto &model)
{
std::ifstream is(model_file, std::ios::in | std::ios::binary);
if(!is.is_open())
{
TLOG_ERR("cannot open file: %s \n", model_file.c_str());
return -1;
}
google::protobuf::io::IstreamInputStream input_stream(&is);
google::protobuf::io::CodedInputStream coded_input(&input_stream);
#if GOOGLE_PROTOBUF_VERSION >= 3011000
coded_input.SetTotalBytesLimit(INT_MAX);
#else
coded_input.SetTotalBytesLimit(INT_MAX, INT_MAX / 2);
#endif
bool ret = model.ParseFromCodedStream(&coded_input);
is.close();
if(!ret)
{
TLOG_ERR("onnx serializer: parse file: %s \n", model_file.c_str());
return -1;
}
return 0;
}
int onnx_serializer::load_constant_tensor(ir_graph_t* graph, const onnx::GraphProto& onnx_graph)
{
std::map<std::string, onnx::TensorProto> node_tensor;
int node_count = onnx_graph.node_size();
for (int i = 0; i < node_count; i++)
{
const onnx::NodeProto& node = onnx_graph.node(i);
const std::string& op = node.op_type();
if (op == "Constant")
{
onnx::TensorProto node_attr = get_node_attr_tensor(node, "value");
node_tensor.insert(std::pair<std::string, onnx::TensorProto>(node.output(0), node_attr));
}
}
if (node_tensor.size() == 0)
{
return 0;
}
for (int i = 0; i < node_count; i++)
{
const onnx::NodeProto& node = onnx_graph.node(i);
const std::string& op = node.op_type();
if ((op == "Reshape" || op == "Gather" || op == "Div" || op == "Resize") )
{
const onnx::TensorProto& onnx_tensor = node_tensor[node.input(1)];
std::pair<std::string, bool> t(node.input(1), 0);
tensor_check.insert(t);
int tensor_date_type = onnx_tensor.data_type() == 1 ? TENGINE_DT_FP32 : TENGINE_DT_INT32;
const char* name = node.input(1).c_str();
int dim_num = onnx_tensor.dims_size();
int *dims = new int[dim_num];
for (int j = 0; j < dim_num; j++)
{
dims[j] = onnx_tensor.dims(j);
}
// create ir tensor
ir_tensor_t* ir_tensor = create_ir_tensor(graph, name, tensor_date_type);
if (ir_tensor == NULL)
{
fprintf(stderr, "create ir tensor failed!\n");
return -1;
}
set_ir_tensor_shape(ir_tensor, dims, dim_num);
ir_tensor->tensor_type = TENSOR_TYPE_CONST;
// set tensor data
if ( 7 == onnx_tensor.data_type())
{
int tensor_size = ir_tensor->elem_num * sizeof(int64_t);
ir_tensor->data = sys_malloc(tensor_size);
int64_t* mem_buf = (int64_t*)ir_tensor->data;
if(onnx_tensor.has_raw_data())
{
int64_t* raw_data = (int64_t*)onnx_tensor.raw_data().data();
for (int j = 0; j < ir_tensor->elem_num; j++)
{
mem_buf[j] = raw_data[j];
}
}
else
{
int64_t* raw_data = (int64_t*)onnx_tensor.int64_data().data();
for (int j = 0; j < ir_tensor->elem_num; j++)
{
mem_buf[j] = raw_data[j];
}
}
}
else
{
int tensor_size = ir_tensor->elem_num * sizeof(uint8_t);
ir_tensor->data = sys_malloc(tensor_size);
uint8_t* mem_buf = (uint8_t*)ir_tensor->data;
if(onnx_tensor.has_raw_data())
{
uint8_t* raw_data = (uint8_t*)onnx_tensor.raw_data().data();
for (int j = 0; j < ir_tensor->elem_num; j++)
{
mem_buf[j] = raw_data[j];
}
}
else
{
uint8_t* raw_data = (uint8_t*)onnx_tensor.int32_data().data();
for (int j = 0; j < ir_tensor->elem_num; j++)
{
mem_buf[j] = raw_data[j];
}
}
}
ir_node_t* ir_node = create_ir_node(graph, name, OP_CONST, OP_VERSION);
set_ir_node_output_tensor(ir_node, 0, ir_tensor);
}
}
return 0;
}
int onnx_serializer::load_initializer_tensor(ir_graph_t* graph, const onnx::GraphProto& onnx_graph)
{
int const_tensor_num = onnx_graph.initializer_size();
for (int i = 0; i < const_tensor_num; i++)
{
const onnx::TensorProto& onnx_tensor = onnx_graph.initializer(i);
if (onnx_tensor.data_type() != 1 && onnx_tensor.data_type() != 6 && onnx_tensor.data_type() != 7) // fp32 int32 int64
{
fprintf(stderr, "const tensor data type is not fp32 or int32 or int64. \n");
fprintf(stderr, "onnx_tensor.data_type: %d \n", onnx_tensor.data_type());
return -1;
}
std::pair<std::string, int> t(onnx_tensor.name(), 0);
tensor_check.insert(t);
int tensor_date_type = onnx_tensor.data_type() == 1 ? TENGINE_DT_FP32 : TENGINE_DT_INT32;
const char* name = onnx_tensor.name().c_str();
int dim_num = onnx_tensor.dims_size();
int *dims = new int[dim_num];
for (int j = 0; j < dim_num; j++)
{
dims[j] = onnx_tensor.dims(j);
}
// create ir tensor
ir_tensor_t* ir_tensor = create_ir_tensor(graph, name, tensor_date_type);
if (ir_tensor == NULL)
{
fprintf(stderr, "create ir tensor failed!\n");
return -1;
}
set_ir_tensor_shape(ir_tensor, dims, dim_num);
ir_tensor->tensor_type = TENSOR_TYPE_CONST;
if (onnx_tensor.has_raw_data())
{
if (onnx_tensor.data_type() == 1) //fp32
{
int tensor_size = ir_tensor->elem_num * sizeof(float);
ir_tensor->data = sys_malloc(tensor_size);
float* mem_buf = (float*)ir_tensor->data;
float* raw_data = (float*)onnx_tensor.raw_data().c_str();
for (int j = 0; j < ir_tensor->elem_num; j++)
{
mem_buf[j] = raw_data[j];
}
}
else // int32
{
int tensor_size = ir_tensor->elem_num * sizeof(int64_t);
ir_tensor->data = sys_malloc(tensor_size);
int64_t* mem_buf = (int64_t*)ir_tensor->data;
int64_t* raw_data = (int64_t*)onnx_tensor.raw_data().data();
for (int j = 0; j < ir_tensor->elem_num; j++)
{
mem_buf[j] = raw_data[j];
}
}
}
else
{
if (onnx_tensor.data_type() == 1) //fp32
{
int tensor_size = ir_tensor->elem_num * sizeof(float);
ir_tensor->data = sys_malloc(tensor_size);
float* mem_buf = (float*)ir_tensor->data;
float* raw_data = (float*)onnx_tensor.float_data().data();
for (int j = 0; j < ir_tensor->elem_num; j++)
{
mem_buf[j] = raw_data[j];
}
}
else // int32
{
int tensor_size = ir_tensor->elem_num * sizeof(int32_t);
ir_tensor->data = sys_malloc(tensor_size);
int32_t* mem_buf = (int32_t*)ir_tensor->data;
int32_t* raw_data = (int32_t*)onnx_tensor.int32_data().data();
for (int j = 0; j < ir_tensor->elem_num; j++)
{
mem_buf[j] = raw_data[j];
}
}
}
ir_node_t* ir_node = create_ir_node(graph, name, OP_CONST, OP_VERSION);
set_ir_node_output_tensor(ir_node, 0, ir_tensor);
}
return 0;
}
int onnx_serializer::set_graph_input(ir_graph_t* graph, const onnx::GraphProto& onnx_graph)
{
std::vector<int16_t> input_nodes;
for (int i = 0; i < onnx_graph.input_size(); i++)
{
const onnx::ValueInfoProto& val = onnx_graph.input(i);
if(get_ir_tensor_index_from_name(graph, val.name().c_str()) != -1)
continue;
// now, catch an input tensor
const onnx::TypeProto& type = val.type();
const onnx::TypeProto::Tensor& tensor_type = type.tensor_type();
const onnx::TensorShapeProto& shape = tensor_type.shape();
int has_shape = 1;
int *dims = new int[shape.dim_size()];
for(int j = 0; j < shape.dim_size(); j++)
{
const onnx::TensorShapeProto::Dimension& dim = shape.dim(j);
if(dim.has_dim_param())
{
has_shape = 0;
break;
}
dims[j] = dim.dim_value();
}
ir_tensor_t* tensor = create_ir_tensor(graph, val.name().c_str(), TENGINE_DT_FP32);
if (has_shape)
set_ir_tensor_shape(tensor, dims, shape.dim_size());
ir_node_t* node = create_ir_node(graph, val.name().c_str(), OP_INPUT, OP_VERSION);
set_ir_node_output_tensor(node, 0, tensor);
input_nodes.push_back(node->index);
}
int16_t* node_idx = (int16_t*)sys_malloc(sizeof(int16_t) * input_nodes.size());
for (int i = 0; i < input_nodes.size(); i++)
{
node_idx[i] = input_nodes[i];
}
set_ir_graph_input_node(graph, node_idx, input_nodes.size());
return 0;
}
int onnx_serializer::load_graph_node(ir_graph_t* graph, const onnx::GraphProto& onnx_graph)
{
int i;
std::vector<std::string> no_supported_op;
for(i = 0; i < onnx_graph.node_size(); i++)
{
const onnx::NodeProto& onnx_node = onnx_graph.node(i);
const std::string& onnx_op_name = onnx_node.op_type();
if(!find_op_load_method(onnx_op_name))
{
auto it = find(no_supported_op.begin(),no_supported_op.end(),onnx_op_name);
if(it == no_supported_op.end())
{
if(onnx_op_name == "Constant")
continue;
no_supported_op.push_back(onnx_op_name);
}
}
}
if(no_supported_op.size())
{
TLOG_ERR("These %d op are not supported\n{ ", no_supported_op.size());
for(int j = 0; j < (int) no_supported_op.size(); j++)
{
TLOG_ERR("%s ", no_supported_op[j].c_str());
}
TLOG_ERR("}\n");
return -1;
}
for(i = 0; i < onnx_graph.node_size(); i++)
{
/* create ir node*/
const onnx::NodeProto& onnx_node = onnx_graph.node(i);
const std::string& op_name = onnx_node.op_type();
if (op_name == "Constant")
continue;
const std::string node_name = onnx_node.name();
ir_node_t* ir_node = create_ir_node(graph, node_name.c_str(), op_load_map[op_name].first, OP_VERSION);
if (ir_node == NULL)
return -1;
/* set ir node io */
for (int j = 0; j < onnx_node.input_size(); j++)
{
const std::string& input_name = onnx_node.input(j);
if (input_name == "")
{
continue;
}
int tensor_id = get_ir_tensor_index_from_name(graph, input_name.c_str());
ir_tensor_t* tensor = get_ir_graph_tensor(graph, tensor_id);
if(tensor_check[tensor->name] != 0) // Already in tensor list
{
if(tensor->dim_num == 1){
if(tensor->data == NULL){
continue;
}
}
if(tensor->dim_num == 0){
set_ir_node_input_tensor(ir_node, j, tensor);
continue;
}
std::string new_tensor_name = input_name + "_" + std::to_string(tensor_check[input_name]);
ir_tensor_t* new_ir_tensor = create_ir_tensor(graph, new_tensor_name.c_str(), TENGINE_DT_FP32);
int* dims = tensor->dims;
int dim_num = tensor->dim_num;
set_ir_tensor_shape(new_ir_tensor, dims, dim_num);
int ct = 1;
for (int n = 0; n < dim_num; n++)
{
ct *= dims[n];
}
uint8_t* mem_buf = (uint8_t*)tensor->data;
uint8_t* new_buf = (uint8_t*)new_ir_tensor->data;
new_buf = (uint8_t*)malloc(sizeof(uint8_t)*ct);
for (int j = 0; j < ct; j++)
new_buf[j] = mem_buf[j];
set_ir_node_input_tensor(ir_node, j, new_ir_tensor);
}
else
{
tensor_check[tensor->name] = tensor_check[tensor->name] + 1;
set_ir_node_input_tensor(ir_node, j, tensor);
}
}
for (int j = 0; j < onnx_node.output_size(); j++)
{
if (op_name == "Dropout" && j > 0)
continue;
const std::string& output_name = onnx_node.output(j);
ir_tensor_t* tensor = create_ir_tensor(graph, output_name.c_str(), TENGINE_DT_FP32);
set_ir_node_output_tensor(ir_node, j, tensor);
}
/* exec op load func */
op_load_t loader = op_load_map[op_name].second;
if (loader(graph, ir_node, onnx_node) < 0)
{
TLOG_ERR("load op %s func failed in node %s .\n", op_name.c_str(), node_name.c_str());
return -1;
}
}
return 0;
}
int onnx_serializer::set_graph_output(ir_graph_t* graph, const onnx::GraphProto& onnx_graph)
{
std::vector<int16_t> output_nodes;
for (int i = 0; i < onnx_graph.output_size(); i++)
{
const onnx::ValueInfoProto& val = onnx_graph.output(i);
int tensor_id = get_ir_tensor_index_from_name(graph, val.name().c_str());
const onnx::TypeProto& type = val.type();
const onnx::TypeProto::Tensor& tensor_type = type.tensor_type();
const onnx::TensorShapeProto& shape = tensor_type.shape();
int has_shape = 1;
int *dims = new int[shape.dim_size()];
for(int j = 0; j < shape.dim_size(); j++)
{
const onnx::TensorShapeProto::Dimension& dim = shape.dim(j);
if(dim.has_dim_param())
{
has_shape = 0;
break;
}
dims[j] = dim.dim_value();
}
ir_tensor_t* tensor = get_ir_graph_tensor(graph, tensor_id);
if (has_shape)
set_ir_tensor_shape(tensor, dims, shape.dim_size());
ir_node_t* node = get_ir_graph_node(graph, tensor->producer);
output_nodes.push_back(node->index);
}
int16_t* node_idx = (int16_t*)sys_malloc(sizeof(int16_t) * output_nodes.size());
for (int i = 0; i < output_nodes.size(); i++)
{
node_idx[i] = output_nodes[i];
}
set_ir_graph_output_node(graph, node_idx, output_nodes.size());
return 0;
}
int onnx_serializer::load_model(ir_graph_t* graph, std::string model_file)
{
register_op_load();
onnx::ModelProto model;
if (load_model_file(model_file, model) < 0)
return -1;
const onnx::GraphProto& onnx_graph = model.graph();
if (load_initializer_tensor(graph, onnx_graph) < 0)
return -1;
if (load_constant_tensor(graph, onnx_graph) < 0)
return -1;
if (set_graph_input(graph, onnx_graph) < 0)
return -1;
if (load_graph_node(graph, onnx_graph) < 0)
return -1;
if (set_graph_output(graph, onnx_graph) < 0)
return -1;
return 0;
}
graph_t onnx_serializer::onnx2tengine(std::string model_file)
{
fprintf(stderr, "----------onnx2tengine begin----------\n");
context_t context = create_context(NULL, 1);
ir_graph_t* ir_graph = create_ir_graph((struct context*)context);
if (ir_graph == NULL)
{
destroy_context(context);
return NULL;
}
ir_graph->attribute->private_context = 1; // new context
int ret = load_model(ir_graph, model_file);
if (0 != ret)
{
destroy_graph(ir_graph);
return NULL;
}
ir_graph->device = find_default_device();
fprintf(stderr, "----------onnx2tengine done.----------\n");
return ir_graph;
}
int load_conv(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct conv_param* conv_param = ( struct conv_param* )node->op.param_mem;
for (int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if (attr.name() == "kernel_shape")
{
conv_param->kernel_h = attr.ints(0);
conv_param->kernel_w = attr.ints(1);
}
else if (attr.name() == "strides")
{
conv_param->stride_h = attr.ints(0);
conv_param->stride_w = attr.ints(1);
}
else if (attr.name() == "pads")
{
conv_param->pad_h0 = attr.ints(0);
conv_param->pad_h1 = attr.ints(2);
conv_param->pad_w0 = attr.ints(1);
conv_param->pad_w1 = attr.ints(3);
}
else if (attr.name() == "group")
{
conv_param->group = attr.i();
}
else if (attr.name() == "dilations")
{
conv_param->dilation_h = attr.ints(0);
conv_param->dilation_w = attr.ints(0);
}
else if (attr.name() == "auto_pad")
{
const std::string& auto_pad = attr.s();
if (auto_pad == "NOTSET")
{
continue;
}
else if (auto_pad == "SAME_UPPER")
{
// ToDo
TLOG_ERR("%s attr.name: %s :SAME_UPPER todo implement.\n", node->name, attr.name().c_str());
}
else if (auto_pad == "SAME_LOWER" || auto_pad == "VALID")
{
// ToDo
TLOG_ERR("%s attr.name: %s :SAME_LOWER todo implement.\n", node->name, attr.name().c_str());
}
else
TLOG_ERR("%s attr.name: %s : %s not support.\n", node->name, attr.name().c_str(), auto_pad.c_str());
}
else
TLOG_ERR("%s attr.name: %s \n", node->name, attr.name().c_str());
}
struct tensor* weight = get_ir_graph_tensor(graph, node->input_tensors[1]);
conv_param->output_channel = weight->dims[0]; /* onnx hide the output channel in weight .. */
return 0;
}
int load_relu(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct relu_param* relu_param = ( struct relu_param* )node->op.param_mem;
relu_param->negative_slope = 0.f;
return 0;
}
int load_pool(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct pool_param* pool_param = ( struct pool_param* )node->op.param_mem;
const std::string& onnx_op = onnx_node.op_type();
if(onnx_op == "GlobalAveragePool")
{
pool_param->global = 1;
pool_param->pool_method = POOL_AVG;
}
else if(onnx_op == "MaxPool" || onnx_op == "AveragePool")
{
pool_param->global = 0;
if(onnx_op == "AveragePool")
pool_param->pool_method = POOL_AVG;
else
pool_param->pool_method = POOL_MAX;
for(int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if(attr.name() == "kernel_shape")
{
pool_param->kernel_h = attr.ints(0);
pool_param->kernel_w = attr.ints(1);
}
else if(attr.name() == "strides")
{
pool_param->stride_h = attr.ints(0);
pool_param->stride_w = attr.ints(1);
}
else if(attr.name() == "pads") /* onnx pads: x0_begin, x1_begin, ... , x0_end, x1_end, ... */
{
pool_param->pad_h0 = attr.ints(0);
pool_param->pad_h1 = attr.ints(2);
pool_param->pad_w0 = attr.ints(1);
pool_param->pad_w1 = attr.ints(3);
if (pool_param->pad_h0 == 0 && pool_param->pad_h1 == 1 && pool_param->pad_w0 == 0 && pool_param->pad_w1 == 1)
pool_param->caffe_flavor = 1;
}
}
}
else
{
TLOG_ERR("UKNOWN POOLING: %s \n", onnx_op.c_str());
return -1;
}
return 0;
}
int load_flatten(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct flatten_param* flatten_param = ( struct flatten_param* )node->op.param_mem;
flatten_param->axis = 1;
if (1 == onnx_node.attribute_size())
{
const onnx::AttributeProto& attr = onnx_node.attribute(0);
flatten_param->axis = attr.i();
}
return 0;
}
int load_gemm(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct gemm_param* gemm_param = ( struct gemm_param* )node->op.param_mem;
for (int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if (attr.name() == "alpha")
gemm_param->alpha = attr.f();
else if (attr.name() == "beta")
gemm_param->beta = attr.f();
else if (attr.name() == "transA")
gemm_param->transA = attr.i();
else if (attr.name() == "transB")
gemm_param->transB = attr.i();
}
ir_tensor_t* weight_tensor = get_ir_graph_tensor(graph, node->input_tensors[1]);
ir_tensor_t* bias_tensor = get_ir_graph_tensor(graph, node->input_tensors[2]);
if (gemm_param->transA)
{
return 0;
}
// create fc instead
if (!gemm_param->transB)
{
// swap shape
int k = weight_tensor->dims[0];
int n = weight_tensor->dims[1];
weight_tensor->dims[0] = n;
weight_tensor->dims[1] = k;
float* tmp = ( float* )sys_malloc(k * n * sizeof(float));
float* data = ( float* )weight_tensor->data;
for (int i = 0; i < n; i++)
for (int j = 0; j < k; j++)
{
tmp[i * k + j] = data[j * n + i];
}
memcpy(data, tmp, n * k * sizeof(float));
sys_free(tmp);
}
if (gemm_param->alpha != 1)
{
float* data = ( float* )weight_tensor->data;
int tensor_size = weight_tensor->dims[0] * weight_tensor->dims[1];
for (int i = 0; i < tensor_size; i++)
data[i] *= gemm_param->alpha;
}
if (gemm_param->beta != 1)
{
float* data = ( float* )bias_tensor->data;
int tensor_size = weight_tensor->dims[0];
for (int i = 0; i < tensor_size; i++)
data[i] *= gemm_param->beta;
}
if (change_node_op(node, OP_FC) < 0)
return -1;
struct fc_param* fc_param = (struct fc_param*)node->op.param_mem;
fc_param->num_output = weight_tensor->dims[0];
return 0;
}
int load_concat(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct concat_param* concat_param = ( struct concat_param* )node->op.param_mem;
for (int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if (attr.name() == "axis")
{
concat_param->axis = attr.i();
}
}
return 0;
}
int load_bn(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct batchnorm_param* batchnorm_param = ( struct batchnorm_param* )node->op.param_mem;
// get espilon
for (int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if (attr.name() == "epsilon")
batchnorm_param->eps = attr.f();
}
return 0;
}
int load_eltwise(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct eltwise_param* eltwise_param = ( struct eltwise_param* )node->op.param_mem;
const std::string& op_name = onnx_node.op_type();
if (op_name == "Add")
eltwise_param->type = ELT_SUM;
else if (op_name == "Mul")
{
eltwise_param->type = ELT_PROD;
for(int i = 0; i < onnx_node.input().size(); ++i)
{
ir_tensor_t* tensor = find_tensor(graph, onnx_node.input(i));
if(tensor->dim_num == 0)
{
tensor->dim_num = 1;
tensor->dims[0] = 1;
}
}
}
else if (op_name == "Div")
{
eltwise_param->type = ELT_DIV;
for(int i = 0; i < onnx_node.input().size(); ++i)
{
ir_tensor_t* tensor = find_tensor(graph, onnx_node.input(i));
if(tensor->dim_num == 0)
{
tensor->dim_num = 1;
tensor->dims[0] = 1;
}
}
}
else if (op_name == "Floor")
eltwise_param->type = ELT_FLOOR;
else if (op_name == "Exp")
eltwise_param->type = ELT_EXP;
else if (op_name == "Sub")
eltwise_param->type = ELT_SUB;
else if (op_name == "Pow")
eltwise_param->type = ELT_POW;
else if (op_name == "Sqrt")
eltwise_param->type = ELT_SQRT;
return 0;
}
int load_transpose(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct transpose_param* transpose_param = ( struct transpose_param* )node->op.param_mem;
const onnx::AttributeProto& attr = onnx_node.attribute(0);
int size = attr.ints_size();
transpose_param->tr_shape = (int*)sys_malloc(sizeof(int) * size);
transpose_param->tr_shape_size = size;
for (int i = 0; i < size; i++)
{
transpose_param->tr_shape[i] = attr.ints(i);
}
return 0;
}
int load_clip(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct clip_param* clip_param = ( struct clip_param* )node->op.param_mem;
int size = onnx_node.attribute_size();
for (int i = 0; i < size; i++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(i);
if (attr.name() == "max")
{
clip_param->max = attr.f();
}
else if (attr.name() == "min")
{
clip_param->min = attr.f();
}
}
return 0;
}
int load_reshape(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct reshape_param* reshape_param = ( struct reshape_param* )node->op.param_mem;
ir_tensor_t* shape_tensor = find_tensor(graph, onnx_node.input(1));
if (shape_tensor == nullptr)
{
fprintf(stderr, "find shape tensor of reshape node failed.\n");
return -1;
}
reshape_param->is_onnx = 1;
int size = shape_tensor->elem_num;
reshape_param->re_shape = (int*)sys_malloc(sizeof(int) * size);
reshape_param->dim_size = size;
int64_t* data = (int64_t*)shape_tensor->data;
for (int i = 0; i < size; i++)
{
reshape_param->re_shape[i] = data[i];
}
return 0;
}
int load_no_param(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
// no param
return 0;
}
int load_softmax(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct softmax_param* softmax_param = ( struct softmax_param* )node->op.param_mem;
for (int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if (attr.name() == "axis")
{
softmax_param->axis = attr.i();
}
}
return 0;
}
int load_elu(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct elu_param* elu_param = ( struct elu_param* )node->op.param_mem;
for (int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if (attr.name() == "alpha")
elu_param->alpha = attr.f();
}
return 0;
}
int load_interp(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
std::string mode = "nearest";
for (int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if (attr.name() == "mode")
{
mode = attr.s();
}
}
if(mode != "nearest")
{
struct interp_param* interp_param = ( struct interp_param* )node->op.param_mem;
if (onnx_node.input_size() == 1)
{
for (int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if (attr.name() == "scales")
{
if (attr.floats_size() == 4)
{
float num0 = attr.floats(0);
float num1 = attr.floats(1);
float num2 = attr.floats(2);
float num3 = attr.floats(3);
interp_param->height_scale = num2 / num0;
interp_param->width_scale = num3 / num1;
}
else
{
interp_param->height_scale = attr.f();
interp_param->width_scale = attr.f();
}
}
}
}
else
{
const std::string& input_name = onnx_node.input(1);
ir_tensor_t* tensor = find_tensor(graph, input_name);
float* data = ( float* )tensor->data;
interp_param->height_scale = data[2];
interp_param->width_scale = data[3];
}
if (mode == "nearest")
{
interp_param->resize_type = 1;
}
else if (mode == "bilinear" || mode == "linear")
{
interp_param->resize_type = 2;
}
}
else
{
/* change op */
sys_free(node->op.param_mem);
int new_op_type = OP_RESIZE;
node->op.type = new_op_type;
ir_method_t* ir_method = find_op_method(new_op_type, OP_VERSION);
if ((NULL != ir_method) && (NULL != ir_method->init) && (ir_method->init(&node->op) < 0))
{
return -1;
}
struct resize_param* resize_param = (struct resize_param*)node->op.param_mem;
if (onnx_node.input_size() == 2)
{
const std::string& input_name = onnx_node.input(1);
ir_tensor_t* tensor = find_tensor(graph, input_name);
float* data = ( float* )tensor->data;
resize_param->scale_h = data[2];
resize_param->scale_w = data[3];
}
else
{
resize_param->scale_w = 1.f;
resize_param->scale_h = 1.f;
}
}
return 0;
}
int load_leaky_relu(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct relu_param* relu_param = ( struct relu_param* )node->op.param_mem;
const onnx::AttributeProto& attr = onnx_node.attribute(0);
relu_param->negative_slope = attr.f();
return 0;
}
int load_slice(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct slice_param* slice_param = ( struct slice_param* )node->op.param_mem;
slice_param->step = 1;
slice_param->axis = 0;
slice_param->begin = 0;
slice_param->end = -1;
if (onnx_node.input_size() == 1)
{
for (int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if (attr.name() == "axes")
{
slice_param->axis = attr.ints(0);
}
else if (attr.name() == "ends")
{
long long end = attr.ints(0);
if (end > INT_MAX)
end = INT_MAX;
slice_param->end = ( int )end;
}
else if (attr.name() == "starts")
{
slice_param->begin = attr.ints(0);
}
}
}
else
{
ir_tensor_t* node_tensor = nullptr;
node_tensor = find_tensor(graph, onnx_node.input(1));
slice_param->begin = (int)(*(int64_t*)(node_tensor->data));
node_tensor = find_tensor(graph, onnx_node.input(2));
slice_param->end = (int)(*(int64_t*)(node_tensor->data));
if (onnx_node.input_size() >= 4)
{
node_tensor = find_tensor(graph, onnx_node.input(3));
slice_param->axis = (int)(*(int64_t*)(node_tensor->data));
}
if (onnx_node.input_size() >= 5)
{
node_tensor = find_tensor(graph, onnx_node.input(4));
slice_param->step = (int)(*(int64_t*)(node_tensor->data));
}
}
slice_param->iscaffe = 0;
slice_param->ismxnet = 0;
slice_param->isonnx = 1;
return 0;
}
int load_split(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct split_param* split_param = ( struct split_param* )node->op.param_mem;
split_param->is_onnx = true;
for (int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if (attr.name() == "axis")
{
split_param->axis = attr.i();
}
else if (attr.name() == "split")
{
int size = attr.ints_size();
struct vector* new_shape = create_vector(sizeof(int), NULL);
split_param->split_dim = size;
for (int i = 0; i < size; i++)
{
int tmp = attr.ints(i);
push_vector_data(new_shape, &tmp);
}
split_param->split_sizes_ = new_shape;
}
}
split_param->is_caffe = false;
return 0;
}
int load_unsqueeze(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct unsqueeze_param* unsqueeze_param = ( struct unsqueeze_param* )node->op.param_mem;
std::vector<int> axises;
for (int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if (attr.name() == "axes")
{
for (int i = 0; i < attr.ints_size(); i++)
{
axises.push_back(attr.ints(i));
}
}
}
sort(axises.begin(), axises.end());
unsqueeze_param->axises_size = axises.size();
unsqueeze_param->axises = (int*)sys_malloc(sizeof(int) * unsqueeze_param->axises_size);
for (size_t i = 0; i < axises.size(); i++)
{
unsqueeze_param->axises[i] = axises[i];
}
return 0;
}
int load_squeeze(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct squeeze_param* squeeze_param = ( struct squeeze_param* )node->op.param_mem;
for (int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if (attr.name() == "axes")
{
for (int i = 0; i < attr.ints_size(); i++)
{
if (0 == attr.ints(i))
{
squeeze_param->dim_0 = 1;
}
else if (1 == attr.ints(i))
{
squeeze_param->dim_1 = 1;
}
else if (2 == attr.ints(i))
{
squeeze_param->dim_2 = 1;
}
else if (3 == attr.ints(i))
{
squeeze_param->dim_3 = 1;
}
}
}
}
return 0;
}
int load_matmul(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
ir_tensor_t* input_tensor = find_tensor(graph, onnx_node.input(0));
ir_tensor_t* weight_tensor = find_tensor(graph, onnx_node.input(1));
if(2 == weight_tensor->dim_num && weight_tensor->tensor_type == TENSOR_TYPE_CONST)
{
// swap shape
int k = weight_tensor->dims[0];
int n = weight_tensor->dims[1];
weight_tensor->dims[0] = n;
weight_tensor->dims[1] = k;
float* tmp = ( float* )sys_malloc(k * n * sizeof(float));
float* data = ( float* )weight_tensor->data;
for (int i = 0; i < n; i++)
{
for (int j = 0; j < k; j++)
{
tmp[i * k + j] = data[j * n + i];
}
}
memcpy(data, tmp, n * k * sizeof(float));
free(tmp);
if (change_node_op(node, OP_FC) < 0)
return -1;
struct fc_param* fc_param = ( struct fc_param* )node->op.param_mem;
fc_param->num_output = weight_tensor->dims[0];
}
return 0;
}
int load_reducel2(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct reducel2_param* reducel2_param = ( struct reducel2_param* )node->op.param_mem;
for (int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if (attr.name() == "axes")
{
reducel2_param->axis = attr.ints(0); // TODO:Support muti axis
}
if (attr.name() == "keepdims")
{
reducel2_param->keepdim = attr.i();
}
}
return 0;
}
int load_gather(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct gather_param* gather_param = ( struct gather_param* )node->op.param_mem;
ir_tensor_t* indices_tensor = find_tensor(graph, onnx_node.input(1));
for (int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if (attr.name() == "axis")
{
gather_param->axis = attr.i();
}
}
int64_t* data = ( int64_t* )indices_tensor->data;
gather_param->indices_num = *data;
gather_param->is_onnx = 1;
return 0;
}
int load_comparison(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct comparison_param* comparison_param = ( struct comparison_param* )node->op.param_mem;
const std::string& op_name = onnx_node.op_type();
if (op_name == "Greater")
comparison_param->type = COMP_GREATER;
else if (op_name == "Equal")
comparison_param->type = COMP_EQUAL;
else if (op_name == "Less")
comparison_param->type = COMP_LESS;
return 0;
}
int load_LRN(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct lrn_param* lrn_param = ( struct lrn_param* )node->op.param_mem;
for (int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if (attr.name() == "alpha")
{
lrn_param->alpha = attr.f(); // TODO:Support muti axis
}
if (attr.name() == "beta")
{
lrn_param->beta = attr.f();
}
if (attr.name() == "bias")
{
lrn_param->k = attr.f();
}
if (attr.name() == "size")
{
lrn_param->local_size = attr.i();
}
}
return 0;
}
int load_unary(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct unary_param* unary_param = ( struct unary_param* )node->op.param_mem;
const std::string& op_name = onnx_node.op_type();
if (op_name == "Abs")
unary_param->type = 0;
else if (op_name == "Neg")
unary_param->type = 1;
else if (op_name == "Ceil")
unary_param->type = 3;
else if (op_name == "Log")
unary_param->type = 8;
else if (op_name == "Cos")
unary_param->type = 10;
else if (op_name == "Asin")
unary_param->type = 12;
else if (op_name == "Acos")
unary_param->type = 13;
else if (op_name == "Atan")
unary_param->type = 14;
return 0;
}
int load_logical(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct logical_param* logical_param = ( struct logical_param* )node->op.param_mem;
const std::string& op_name = onnx_node.op_type();
if (op_name == "And")
logical_param->type = 0;
else if (op_name == "Or")
logical_param->type = 1;
return 0;
}
int load_pad(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct pad_param* pad_param = ( struct pad_param* )node->op.param_mem;
if (onnx_node.attribute_size() == 1){ // since opset 11, 'pads' and 'value' have been moved from attributes to inputs
const std::string& input_name_pad = onnx_node.input(1);
ir_tensor_t* tensor_pad = find_tensor(graph, input_name_pad);
int64_t* data_pad = ( int64_t * )tensor_pad->data;
pad_param->pad_0_h = data_pad[0];
pad_param->pad_0_w = data_pad[4];
pad_param->pad_1_h = data_pad[1];
pad_param->pad_1_w = data_pad[5];
pad_param->pad_2_h = data_pad[2];
pad_param->pad_2_w = data_pad[6];
pad_param->pad_3_h = data_pad[3];
pad_param->pad_3_w = data_pad[7];
if (onnx_node.input_size() > 2)
{
const std::string& input_name_value = onnx_node.input(2);
ir_tensor_t* tensor_value = find_tensor(graph, input_name_value);
float* data_value = ( float * )tensor_value->data;
pad_param->value = data_value[0];
}
}
for (int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if (attr.name() == "mode")
{
if (attr.s() == "constant")
{
pad_param->mode = 0;
}
else if (attr.s() == "reflect")
{
pad_param->mode = 1;
}
else
{
pad_param->mode = 2;
}
}
if (attr.name() == "pads")
{
pad_param->pad_0_h = attr.ints(0);
pad_param->pad_0_w = attr.ints(4);
pad_param->pad_1_h = attr.ints(1);
pad_param->pad_1_w = attr.ints(5);
pad_param->pad_2_h = attr.ints(2);
pad_param->pad_2_w = attr.ints(6);
pad_param->pad_3_h = attr.ints(3);
pad_param->pad_3_w = attr.ints(7);
}
if (attr.name() == "value")
{
pad_param->value = attr.f();
}
}
if(onnx_node.input_size() > 1){
ir_tensor_t* shape_tensor = find_tensor(graph, onnx_node.input(1));
int size = shape_tensor->dims[0];
int64_t* data = ( int64_t* )shape_tensor->data;
for (int i = 0; i < size; i++)
{
pad_param->pad_0_h = data[0];
pad_param->pad_0_w = data[4];
pad_param->pad_1_h = data[1];
pad_param->pad_1_w = data[5];
pad_param->pad_2_h = data[2];
pad_param->pad_2_w = data[6];
pad_param->pad_3_h = data[3];
pad_param->pad_3_w = data[7];
}
}
return 0;
}
int load_reduce(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct reduction_param* reduction_param = ( struct reduction_param* )node->op.param_mem;
const std::string& op_name = onnx_node.op_type();
if (op_name == "ReduceSum")
reduction_param->type = 0;
else if (op_name == "ReduceMean")
reduction_param->type = 1;
else if (op_name == "ReduceSumSquare")
reduction_param->type = 3;
else if (op_name == "ReduceMax")
reduction_param->type = 4;
else if (op_name == "ReduceMin")
reduction_param->type = 5;
else if (op_name == "ReduceProd")
reduction_param->type = 6;
else if (op_name == "ReduceLogSum")
reduction_param->type = 9;
else if (op_name == "ReduceLogSumExp")
reduction_param->type = 10;
reduction_param->dim_0 = -2;
reduction_param->dim_1 = -2;
reduction_param->dim_2 = -2;
reduction_param->dim_3 = -2;
reduction_param->keepdim = 1;
int size = onnx_node.attribute_size();
for (int i = 0; i < size; i++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(i);
if (attr.name() == "keepdims")
{
reduction_param->keepdim = attr.i();
}
else if (attr.name() == "axes")
{
int axis_size = attr.ints_size();
if (axis_size == 1)
{
int attr_0 = attr.ints(0);
if (attr.ints(0) < 0)
attr_0 = 4 + attr.ints(0);
reduction_param->dim_0 = attr_0;
}
else if (axis_size == 2)
{
int attr_0 = attr.ints(0);
int attr_1 = attr.ints(1);
if (attr.ints(0) < 0)
attr_0 = 4 + attr.ints(0);
if (attr.ints(1) < 0)
attr_0 = 4 + attr.ints(1);
reduction_param->dim_0 = attr_0;
reduction_param->dim_1 = attr_1;
}
else if (axis_size == 3)
{
int attr_0 = attr.ints(0);
int attr_1 = attr.ints(1);
int attr_2 = attr.ints(2);
if (attr.ints(0) < 0)
attr_0 = 4 + attr.ints(0);
if (attr.ints(1) < 0)
attr_0 = 4 + attr.ints(1);
if (attr.ints(2) < 0)
attr_0 = 4 + attr.ints(2);
reduction_param->dim_0 = attr_0;
reduction_param->dim_1 = attr_1;
reduction_param->dim_2 = attr_2;
}
else if (axis_size == 4)
{
int attr_0 = attr.ints(0);
int attr_1 = attr.ints(1);
int attr_2 = attr.ints(2);
int attr_3 = attr.ints(3);
if (attr.ints(0) < 0)
attr_0 = 4 + attr.ints(0);
if (attr.ints(1) < 0)
attr_0 = 4 + attr.ints(1);
if (attr.ints(2) < 0)
attr_0 = 4 + attr.ints(2);
if (attr.ints(3) < 0)
attr_0 = 4 + attr.ints(3);
reduction_param->dim_0 = attr_0;
reduction_param->dim_1 = attr_1;
reduction_param->dim_2 = attr_2;
reduction_param->dim_3 = attr_3;
}
}
}
return 0;
}
int load_argmax(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct argmax_param* argmax_param = ( struct argmax_param* )node->op.param_mem;
int size = onnx_node.attribute_size();
argmax_param->axis = 0;
for (int i = 0; i < size; i++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(i);
if (attr.name() == "axis")
argmax_param->axis = attr.i();
if (attr.name() == "keepdims")
argmax_param->keepdims = attr.i();
}
return 0;
}
int load_argmin(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct argmin_param* argmin_param = ( struct argmin_param* )node->op.param_mem;
int size = onnx_node.attribute_size();
argmin_param->axis = 0;
for (int i = 0; i < size; i++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(i);
if (attr.name() == "axis")
argmin_param->axis = attr.i();
if (attr.name() == "keepdims")
argmin_param->keepdims = attr.i();
}
return 0;
}
int load_log_softmax(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct logsoftmax_param* logsoftmax_param = ( struct logsoftmax_param* )node->op.param_mem;
int size = onnx_node.attribute_size();
logsoftmax_param->axis = 1;
for (int i = 0; i < size; i++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(i);
if (attr.name() == "axis")
logsoftmax_param->axis = attr.i();
}
return 0;
}
int load_deconv(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct deconv_param* deconv_param = ( struct deconv_param* )node->op.param_mem;
for (int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if (attr.name() == "kernel_shape")
{
deconv_param->kernel_h = attr.ints(0);
deconv_param->kernel_w = attr.ints(1);
}
else if (attr.name() == "strides")
{
deconv_param->stride_h = attr.ints(0);
deconv_param->stride_w = attr.ints(1);
}
else if (attr.name() == "output_padding")
{
deconv_param->output_pad_h0 = attr.ints(0);
deconv_param->output_pad_w0 = attr.ints(1);
}
else if (attr.name() == "pads")
{
deconv_param->pad_h0 = attr.ints(0);
deconv_param->pad_h1 = attr.ints(2);
deconv_param->pad_w0 = attr.ints(1);
deconv_param->pad_w1 = attr.ints(3);
}
else if (attr.name() == "group")
{
deconv_param->group = attr.i();
}
else if (attr.name() == "dilations")
{
deconv_param->dilation_h = attr.ints(0);
deconv_param->dilation_w = attr.ints(0);
}
else
TLOG_ERR("attr.name: %s \n", attr.name().c_str());
}
/* update the input tensor data layout */
for (int k = 0; k < onnx_node.input_size(); k++)
{
const std::string& input_name = onnx_node.input(k);
ir_tensor_t* tensor = find_tensor(graph, input_name);
if (k == 1) // weight
{
int* dim = tensor->dims;
/* onnx hide the output channel in weight ..*/
deconv_param->num_output = dim[1];
deconv_param->kernel_h = dim[2];
deconv_param->kernel_w = dim[3];
}
}
return 0;
}
int load_scatter(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct scatter_param* scatter_param = ( struct scatter_param* )node->op.param_mem;
int size = onnx_node.attribute_size();
scatter_param->axis = 0;
scatter_param->is_onnx = 1;
for (int i = 0; i < size; i++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(i);
if (attr.name() == "axis")
scatter_param->axis = attr.i();
}
return 0;
}
int load_selu(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct selu_param* selu_param = ( struct selu_param* )node->op.param_mem;
for (int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if (attr.name() == "alpha")
selu_param->alpha = attr.f();
else if (attr.name() == "gamma")
selu_param->lambda = attr.f();
}
return 0;
}
int load_hard_sigmoid(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct hard_sigmoid_param* hard_sigmoid_param = ( struct hard_sigmoid_param* )node->op.param_mem;
for (int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if (attr.name() == "alpha")
hard_sigmoid_param->alpha = attr.f();
else if (attr.name() == "beta")
hard_sigmoid_param->beta = attr.f();
}
return 0;
}
int load_tile(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct tile_param* tile_param = ( struct tile_param* )node->op.param_mem;
tile_param->frame_flag = 1;
return 0;
}
int load_cast(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct cast_param* cast_param = ( struct cast_param* )node->op.param_mem;
for(int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if(attr.name() == "to")
cast_param->type_to = attr.i();
}
cast_param->type_from = 1;
return 0;
}
int load_depth_to_space(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct depthtospace_param* depthtospace_param = ( struct depthtospace_param* )node->op.param_mem;
for(int k = 0; k < onnx_node.attribute_size(); k++){
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if(attr.name() == "block_size"){
depthtospace_param->block_size = attr.i();
}
}
return 0;
}
int load_instance_norm(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct instancenorm_Param* instancenorm_param = ( struct instancenorm_Param* )node->op.param_mem;
for(int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if(attr.name() == "epsilon")
instancenorm_param->eps = attr.f();
}
return 0;
}
int load_resize(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct interp_param* interp_param = ( struct interp_param* )node->op.param_mem;
if(onnx_node.input_size() == 1)
{
for(int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if(attr.name() == "scales")
{
interp_param->height_scale = attr.f();
interp_param->width_scale = attr.f();
}
}
}
else if(onnx_node.input_size() == 2) // opset 10
{
const std::string& input_name = onnx_node.input(1);
ir_tensor_t* tensor = find_tensor(graph, input_name);
float* data = ( float* )tensor->data;
interp_param->height_scale = data[2];
interp_param->width_scale = data[3];
}
else if(onnx_node.input_size() == 3) // opset 11
{
const std::string& input_name = onnx_node.input(2);
ir_tensor_t* tensor = find_tensor(graph, input_name);
float* data = ( float* )tensor->data;
interp_param->height_scale = data[2];
interp_param->width_scale = data[3];
}
else if (onnx_node.input_size() == 4){
const std::string& input_name = onnx_node.input(3);
ir_tensor_t* tensor = find_tensor(graph, input_name);
float* data = ( float* )tensor->data;
interp_param->height_scale = data[2];
interp_param->width_scale = data[3];
}
else
{
fprintf(stderr, "Not support the num of inputs > 3, please check the onnx model or update the codes of convert tool\n");
return -1;
}
std::string mode = "nearest";
for(int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if(attr.name() == "mode")
mode = attr.s();
}
if (mode == "nearest")
interp_param->resize_type = 1;
else if (mode == "bilinear" || mode == "linear")
interp_param->resize_type = 2;
return 0;
}
int load_LSTM(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct lstm_param* lstm_param = ( struct lstm_param* )node->op.param_mem;
int s_size;
std::string lstm_type;
for(int k = 0; k < onnx_node.attribute_size(); k++)
{
const onnx::AttributeProto& attr = onnx_node.attribute(k);
if(attr.name() == "hidden_size")
s_size = attr.i();
if(attr.name() == "direction")
lstm_type = attr.s();
}
lstm_param->mxnet_flag = 0;
lstm_param->hidden_size = s_size;
lstm_param->cell_size = s_size;
return 0;
}
int load_expand(ir_graph_t* graph, ir_node_t* node, const onnx::NodeProto& onnx_node)
{
struct expand_param* expand_param = ( struct expand_param* )node->op.param_mem;
ir_tensor_t* shape_tensor = find_tensor(graph, onnx_node.input(1));
if (shape_tensor == nullptr)
{
fprintf(stderr, "find shape tensor of expand node failed.\n");
return -1;
}
int size = shape_tensor->elem_num;
expand_param->ex_shape = (int*)sys_malloc(sizeof(int) * size);
expand_param->dim_num = size;
int64_t* data = (int64_t*)shape_tensor->data;
for (int i = 0; i < size; i++)
{
expand_param->ex_shape[i] = data[i];
}
return 0;
}
/*
* OPERAOTR REGISTER FUNCTION DEFINE FOR ONNX SERIALIZER START
*/
void onnx_serializer::register_op_load()
{
op_load_map["Abs"] = std::pair<int, op_load_t>(OP_UNARY, load_unary);
op_load_map["Acos"] = std::pair<int, op_load_t>(OP_UNARY, load_unary);
op_load_map["And"] = std::pair<int, op_load_t>(OP_LOGICAL, load_logical);
op_load_map["ArgMax"] = std::pair<int, op_load_t>(OP_ARGMAX, load_argmax);
op_load_map["ArgMin"] = std::pair<int, op_load_t>(OP_ARGMIN, load_argmin);
op_load_map["Asin"] = std::pair<int, op_load_t>(OP_UNARY, load_unary);
op_load_map["Atan"] = std::pair<int, op_load_t>(OP_UNARY, load_unary);
op_load_map["AveragePool"] = std::pair<int, op_load_t>(OP_POOL, load_pool);
op_load_map["Add"] = std::pair<int, op_load_t>(OP_ELTWISE, load_eltwise);
op_load_map["BatchNormalization"] = std::pair<int, op_load_t>(OP_BATCHNORM, load_bn);
op_load_map["Conv"] = std::pair<int, op_load_t>(OP_CONV, load_conv);
op_load_map["ConvTranspose"] = std::pair<int, op_load_t>(OP_DECONV, load_deconv);
op_load_map["Concat"] = std::pair<int, op_load_t>(OP_CONCAT, load_concat);
op_load_map["Clip"] = std::pair<int, op_load_t>(OP_CLIP, load_clip);
op_load_map["Ceil"] = std::pair<int, op_load_t>(OP_UNARY, load_unary);
op_load_map["Cos"] = std::pair<int, op_load_t>(OP_UNARY, load_unary);
op_load_map["Cast"] = std::pair<int, op_load_t>(OP_CAST, load_cast);
op_load_map["Dropout"] = std::pair<int, op_load_t>(OP_DROPOUT, load_no_param);
op_load_map["DepthToSpace"] = std::pair<int, op_load_t>(OP_DEPTHTOSPACE, load_depth_to_space);
op_load_map["Div"] = std::pair<int, op_load_t>(OP_ELTWISE, load_eltwise);
op_load_map["Elu"] = std::pair<int, op_load_t>(OP_ELU, load_elu);
op_load_map["Exp"] = std::pair<int, op_load_t>(OP_ELTWISE, load_eltwise);
op_load_map["Equal"] = std::pair<int, op_load_t>(OP_COMPARISON, load_comparison);
op_load_map["Flatten"] = std::pair<int, op_load_t>(OP_FLATTEN, load_flatten);
op_load_map["Floor"] = std::pair<int, op_load_t>(OP_ELTWISE, load_eltwise);
op_load_map["Gemm"] = std::pair<int, op_load_t>(OP_GEMM, load_gemm);
op_load_map["Gather"] = std::pair<int, op_load_t>(OP_GATHER, load_gather);
op_load_map["Greater"] = std::pair<int, op_load_t>(OP_COMPARISON, load_comparison);
op_load_map["GlobalAveragePool"] = std::pair<int, op_load_t>(OP_POOL, load_pool);
op_load_map["HardSwish"] = std::pair<int, op_load_t>(OP_HARDSWISH, load_no_param);
op_load_map["HardSigmoid"] = std::pair<int, op_load_t>(OP_HARDSIGMOID, load_hard_sigmoid);
op_load_map["InstanceNormalization"] = std::pair<int, op_load_t>(OP_INSTANCENORM, load_instance_norm);
op_load_map["Log"] = std::pair<int, op_load_t>(OP_UNARY, load_unary);
op_load_map["LRN"] = std::pair<int, op_load_t>(OP_LRN, load_LRN);
op_load_map["Less"] = std::pair<int, op_load_t>(OP_COMPARISON, load_comparison);
op_load_map["LSTM"] = std::pair<int, op_load_t>(OP_LSTM, load_LSTM);
op_load_map["LeakyRelu"] = std::pair<int, op_load_t>(OP_RELU, load_leaky_relu);
op_load_map["LogSoftmax"] = std::pair<int, op_load_t>(OP_LOGSOFTMAX, load_log_softmax);
op_load_map["Mul"] = std::pair<int, op_load_t>(OP_ELTWISE, load_eltwise);
op_load_map["Max"] = std::pair<int, op_load_t>(OP_MAXIMUM, load_no_param);
op_load_map["Min"] = std::pair<int, op_load_t>(OP_MINIMUM, load_no_param);
op_load_map["Mean"] = std::pair<int, op_load_t>(OP_MEAN, load_no_param);
op_load_map["MatMul"] = std::pair<int, op_load_t>(OP_MATMUL, load_matmul);
op_load_map["MaxPool"] = std::pair<int, op_load_t>(OP_POOL, load_pool);
op_load_map["Neg"] = std::pair<int, op_load_t>(OP_UNARY, load_unary);
op_load_map["Or"] = std::pair<int, op_load_t>(OP_LOGICAL, load_logical);
op_load_map["Pad"] = std::pair<int, op_load_t>(OP_PAD, load_pad);
op_load_map["Pow"] = std::pair<int, op_load_t>(OP_ELTWISE, load_eltwise);
op_load_map["PRelu"] = std::pair<int, op_load_t>(OP_PRELU, load_no_param);
op_load_map["Relu"] = std::pair<int, op_load_t>(OP_RELU, load_relu);
op_load_map["Resize"] = std::pair<int, op_load_t>(OP_INTERP, load_resize);
op_load_map["Reshape"] = std::pair<int, op_load_t>(OP_RESHAPE, load_reshape);
op_load_map["ReduceL2"] = std::pair<int, op_load_t>(OP_REDUCEL2, load_reducel2);
op_load_map["ReduceMean"] = std::pair<int, op_load_t>(OP_REDUCTION, load_reduce);
op_load_map["ReduceLogSumExp"] = std::pair<int, op_load_t>(OP_REDUCTION, load_reduce);
op_load_map["ReduceLogSum"] = std::pair<int, op_load_t>(OP_REDUCTION, load_reduce);
op_load_map["ReduceMax"] = std::pair<int, op_load_t>(OP_REDUCTION, load_reduce);
op_load_map["ReduceMin"] = std::pair<int, op_load_t>(OP_REDUCTION, load_reduce);
op_load_map["ReduceProd"] = std::pair<int, op_load_t>(OP_REDUCTION, load_reduce);
op_load_map["ReduceSumSquare"] = std::pair<int, op_load_t>(OP_REDUCTION, load_reduce);
op_load_map["ReduceSum"] = std::pair<int, op_load_t>(OP_REDUCTION, load_reduce);
op_load_map["Reciprocal"] = std::pair<int, op_load_t>(OP_RECIPROCAL, load_no_param);
op_load_map["Sub"] = std::pair<int, op_load_t>(OP_ELTWISE, load_eltwise);
op_load_map["Selu"] = std::pair<int, op_load_t>(OP_SELU, load_selu);
op_load_map["Sqrt"] = std::pair<int, op_load_t>(OP_ELTWISE, load_eltwise);
op_load_map["Slice"] = std::pair<int, op_load_t>(OP_SLICE, load_slice);
op_load_map["Split"] = std::pair<int, op_load_t>(OP_SPLIT, load_split);
op_load_map["Shape"] = std::pair<int, op_load_t>(OP_SHAPE, load_no_param);
op_load_map["Squeeze"] = std::pair<int, op_load_t>(OP_SQUEEZE, load_squeeze);
op_load_map["Scatter"] = std::pair<int, op_load_t>(OP_SCATTER, load_scatter);
op_load_map["Sigmoid"] = std::pair<int, op_load_t>(OP_SIGMOID, load_no_param);
op_load_map["Softmax"] = std::pair<int, op_load_t>(OP_SOFTMAX, load_softmax);
op_load_map["Softplus"] = std::pair<int, op_load_t>(OP_SOFTPLUS, load_no_param);
op_load_map["Tanh"] = std::pair<int, op_load_t>(OP_TANH, load_no_param);
op_load_map["Tile"] = std::pair<int, op_load_t>(OP_TILE, load_tile);
op_load_map["Transpose"] = std::pair<int, op_load_t>(OP_TRANSPOSE, load_transpose);
op_load_map["Upsample"] = std::pair<int, op_load_t>(OP_INTERP, load_interp);
op_load_map["Unsqueeze"] = std::pair<int, op_load_t>(OP_UNSQUEEZE, load_unsqueeze);
op_load_map["Where"] = std::pair<int, op_load_t>(OP_WHERE, load_no_param);
op_load_map["Expand"] = std::pair<int, op_load_t>(OP_EXPAND, load_expand);
}
/*
* OPERAOTR REGISTER FUNCTION DEFINE FOR ONNX SERIALIZER END
*/ |
from igrill import IGrillHandler
from tokencube import TokenCubeHandler
class IoTDeviceManager:
def __init__(self, device_settings):
self.device_settings = device_settings
self.connected_devices = {}
def connect_device(self, device_addr):
if device_addr in self.device_settings:
device_info = self.device_settings[device_addr]
device_type = device_info["device"]
if device_type == "iGrill Mini":
igrill_handler = IGrillHandler(device_addr)
igrill_handler.connect()
self.connected_devices[device_addr] = igrill_handler
elif device_type == "TokenCube":
tokencube_handler = TokenCubeHandler(device_addr)
tokencube_handler.connect()
self.connected_devices[device_addr] = tokencube_handler
else:
print("Unsupported device type")
else:
print("Device not found in settings")
def get_device_data(self, device_addr):
if device_addr in self.connected_devices:
return self.connected_devices[device_addr].get_data()
else:
print("Device not connected")
def disconnect_device(self, device_addr):
if device_addr in self.connected_devices:
self.connected_devices[device_addr].disconnect()
del self.connected_devices[device_addr]
else:
print("Device not connected") |
package edu.miracosta.cs113.test;
import static org.junit.Assert.*;
import java.util.function.BiPredicate;
import static org.hamcrest.core.Is.*;
import org.junit.Test;
import static edu.miracosta.cs113.Palindrome.*;
public class PalindromeTest
{
@Test
public void testIsPalindrome ()
{
BiPredicate<Character, Character> charEquals = (c1, c2) -> c1 == c2;
assertThat (isPalindrome (""), is (true));
assertThat (isPalindrome ("aa"), is (true));
assertThat (isPalindrome ("aba"), is (true));
assertThat (isPalindrome ("abba"), is (true));
assertThat (isPalindrome ("ab"), is (false));
assertThat (isPalindrome ("aab"), is (false));
assertThat (isPalindrome ("a a", charEquals, Character::isWhitespace), is (true));
assertThat (isPalindrome ("a b a", charEquals, Character::isWhitespace), is (true));
assertThat (isPalindrome ("a ba", charEquals, Character::isWhitespace), is (true));
}
}
|
<filename>src/test/java/org/thymeleaf/templateengine/processors/dialects/remove/RemoveDialect.java
/*
* =============================================================================
*
* Copyright (c) 2011-2016, The THYMELEAF team (http://www.thymeleaf.org)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* =============================================================================
*/
package org.thymeleaf.templateengine.processors.dialects.remove;
import java.util.HashSet;
import java.util.Set;
import org.thymeleaf.dialect.AbstractProcessorDialect;
import org.thymeleaf.processor.IProcessor;
public class RemoveDialect extends AbstractProcessorDialect {
public static final String PREFIX = "precedence";
public RemoveDialect() {
super(RemoveDialect.class.getSimpleName(), PREFIX, 1000);
}
public Set<IProcessor> getProcessors(final String dialectPrefix) {
final Set<IProcessor> processors = new HashSet<IProcessor>();
processors.add(new RemoveCDATASectionProcessor());
processors.add(new RemoveCommentProcessor());
processors.add(new RemoveDocTypeProcessor());
processors.add(new RemoveProcessingInstructionProcessor());
processors.add(new RemoveTextProcessor());
processors.add(new RemoveXMLDeclarationProcessor());
return processors;
}
}
|
<filename>timegloves-openstm32-project/inc/common.h
#ifndef COMMON_H_
#define COMMON_H_
#define SET_REG(REG,SELECT,VAL) {((REG)=((REG)&(~(SELECT))) | (VAL));};
void delay_us(int n);
#endif
|
class BankAccount:
def __init__(self, initial_balance):
self.balance = initial_balance
def deposit(self, amount):
self.balance += amount
def withdraw(self, amount):
self.balance -= amount
def get_balance(self):
return self.balance
def perform_transactions():
account = BankAccount(1000)
account.deposit(500)
account.withdraw(200)
account.deposit(1000)
account.withdraw(700)
print("Final balance:", account.get_balance())
perform_transactions() |
<gh_stars>0
#include "systems/ParticleSystem.hh"
//#include <glow/objects/Program.hh>
//#include <glow/objects/ArrayBuffer.hh>
//#include <glow/objects/VertexArray.hh>
#include <glow/objects.hh>
#include "typed-geometry/types/vec.hh"
#include "typed-geometry/types/pos.hh"
#include "typed-geometry/types/mat.hh"
#include "utility/Random.hh"
#include "Mesh3D.hh"
void gamedev::ParticleSystem::AddEntity(InstanceHandle& handle, Signature entitySignature)
{
// Only include rendered systems
if (!mECS->TestSignature<Render>(handle))
{
mEntities.erase(handle);
}
mEntities.insert(handle);
}
void gamedev::ParticleSystem::Init(SharedEngineECS& ecs)
{
Mesh3D cube;
cube.loadFromFile("../data/meshes/cube.obj", true);
mECS = ecs;
mVaoCube = cube.createBasicVertexArray();
mVboParticles = glow::ArrayBuffer::create();
SetupParticleCount(MAX_PARTICLES);
Init_MonsterDeathParticles();
Init_PioneerDeathParticles();
Init_BuildingDestroyedParticles();
mECS->AddFunctionalListener(FUNCTIONAL_LISTENER(EventType::MonsterDeath, ParticleSystem::ParticleListener));
mECS->AddFunctionalListener(FUNCTIONAL_LISTENER(EventType::PioneerDeath, ParticleSystem::ParticleListener));
mECS->AddFunctionalListener(FUNCTIONAL_LISTENER(EventType::BuildingDestroyed, ParticleSystem::ParticleListener));
mECS->AddFunctionalListener(FUNCTIONAL_LISTENER(EventType::BuildingHit, ParticleSystem::ParticleListener));
mECS->AddFunctionalListener(FUNCTIONAL_LISTENER(EventType::PioneerHit, ParticleSystem::ParticleListener));
mECS->AddFunctionalListener(FUNCTIONAL_LISTENER(EventType::MonsterHit, ParticleSystem::ParticleListener));
}
void gamedev::ParticleSystem::SetupParticleCount(int particleCount)
{
MAX_PARTICLES = particleCount;
mParticlePool.resize(MAX_PARTICLES);
//mShadowParticlePool.resize(MAX_PARTICLES);
auto vao = mVaoCube->bind();
auto vbo = mVboParticles->bind();
mVboParticles->defineAttribute(&ParticleAttributes::pos, "aTranslation", glow::AttributeMode::Float, 1);
mVboParticles->defineAttribute(&ParticleAttributes::color, "aColor", glow::AttributeMode::Float, 1);
mVboParticles->defineAttribute(&ParticleAttributes::rotation, "aRotation", glow::AttributeMode::Float, 1);
mVboParticles->defineAttribute(&ParticleAttributes::scale, "aScale", glow::AttributeMode::Float, 1);
mVboParticles->defineAttribute(&ParticleAttributes::blend, "aBlend", glow::AttributeMode::Float, 1);
vao.attach(mVboParticles);
}
void gamedev::ParticleSystem::UploadParticleData()
{
auto vbo = mVboParticles->bind();
vbo.setData(mParticleAttributes, GL_STREAM_DRAW);
}
int gamedev::ParticleSystem::Update(float elapsed_time)
{
auto t0 = std::chrono::steady_clock::now();
mParticleAttributes.clear();
// Create new particles for all emitters
for (const auto& handle : mEntities)
{
auto& xform = mECS->GetInstance(handle).xform;
auto* particleEmitter = mECS->GetComponent<ParticleEmitter>(handle);
for (auto& pp : particleEmitter->pp)
{
auto& particleProperties = particleEmitter->pp;
pp.emitNew += elapsed_time * pp.particlesPerSecond;
for (auto i = 0; i < tg::floor(pp.emitNew); i++)
{
EmitParticle(pp, xform);
}
if (pp.emitNew > 1.0)
pp.emitNew = 0.0;
}
}
// Update all living particles
for (auto id = 0; id < mAlive; id++)
{
auto& p = mParticlePool[id];
if (p.lifeRemaining <= 0.0f)
{
KillParticle(id);
continue;
}
p.lifeRemaining -= elapsed_time;
p.position += p.velocity * elapsed_time;
p.rotation_y += 0.01f * elapsed_time;
float life = p.lifeRemaining / p.lifeTime;
float scale = p.size_t0;
tg::mat4 modelMatrix = tg::translation(p.position) * tg::rotation_y(tg::degree(p.rotation_y)) * tg::scaling(scale, scale, scale);
mParticleAttributes.push_back({p.position, 0.0, p.color, 0.0, p.rotation_y, scale, life});
}
auto tn = std::chrono::steady_clock::now();
return std::chrono::duration_cast<std::chrono::microseconds>(tn - t0).count();
}
void gamedev::ParticleSystem::RenderInstanced(const glow::UsedProgram& shader, const glow::SharedFramebuffer& framebuffer)
{
auto fb = framebuffer->bind();
UploadParticleData();
mVaoCube->bind().draw(mAlive);
}
long int gamedev::ParticleSystem::GetParticleCount() { return mAlive; }
glow::SharedVertexArray& gamedev::ParticleSystem::GetParticleVAO() { return mVaoCube; }
void gamedev::ParticleSystem::EmitParticle(const ParticleProperties& particleProperties, transform& xform)
{
Particle& particle = mParticlePool[mFreeIndex];
particle.active = true;
//particle.position = affine_to_mat4(xform.transform_mat()) * particleProperties.basePosition;
//auto vary = affine_to_mat4(xform.transform_mat()) * particleProperties.varyPosition;
particle.position = affine_to_mat4(xform.transform_mat()) * particleProperties.basePosition;
auto vary = xform.scale_mat() * particleProperties.varyPosition;
particle.position.x += vary.x * (RandomFloat() - 0.5f);
particle.position.y += vary.y * (RandomFloat() - 0.5f);
particle.position.z += vary.z * (RandomFloat() - 0.5f);
particle.color = tg::vec3(particleProperties.baseColor);
particle.color = tg::saturate(particle.color + tg::vec3(particleProperties.varyColor) * (RandomFloat() - 0.5f));
particle.rotation_y = RandomFloat() * 2 * tg::pi_scalar<float>;
particle.velocity = particleProperties.baseVelocity;
particle.velocity.x += particleProperties.varyVelocity.x * (RandomFloat() - 0.5f);
particle.velocity.y += particleProperties.varyVelocity.y * (RandomFloat() - 0.5f);
particle.velocity.z += particleProperties.varyVelocity.z * (RandomFloat() - 0.5f);
particle.velocity *= xform.scaling;
particle.lifeTime = particleProperties.baseLife;
particle.lifeTime += particleProperties.varyLife * (RandomFloat() - 0.5f);
particle.lifeRemaining = particle.lifeTime;
float scale = tg::max(xform.scaling.depth, xform.scaling.height, xform.scaling.width);
particle.size_t0 = scale * particleProperties.baseSize;
particle.size_t0 -= particleProperties.varySize * RandomFloat();
mFreeIndex++;
mAlive++;
if (mAlive >= MAX_PARTICLES * 0.8)
{
glow::error() << "Too many particles, need to increase storage!";
MAX_PARTICLES += MAX_PARTICLES * 0.2;
SetupParticleCount(MAX_PARTICLES);
}
}
void gamedev::ParticleSystem::KillParticle(int id)
{
mParticlePool[id].active = false;
std::swap(mParticlePool[id], mParticlePool[mAlive - 1]);
mAlive--;
mFreeIndex--;
}
void gamedev::ParticleSystem::ParticleListener(Event& e)
{
auto& instance = mECS->GetInstance(e.mSubject);
if (e.mType == EventType::MonsterDeath)
{
for (int i = 0; i < 20; i++)
{
EmitParticle(mMonsterDeath, instance.xform);
}
}
else if (e.mType == EventType::PioneerDeath)
{
for (int i = 0; i < 40; i++)
{
EmitParticle(mPioneerDeath, instance.xform);
}
}
else if (e.mType == EventType::BuildingDestroyed)
{
for (int i = 0; i < 20; i++)
{
EmitParticle(mBuildingDestroyed, instance.xform);
}
}
else if (e.mType == EventType::MonsterHit)
{
for (int i = 0; i < 3; i++)
{
EmitParticle(mMonsterDeath, instance.xform);
}
}
else if (e.mType == EventType::PioneerHit)
{
for (int i = 0; i < 10; i++)
{
EmitParticle(mPioneerDeath, instance.xform);
}
}
else if (e.mType == EventType::BuildingHit)
{
for (int i = 0; i < 2; i++)
{
EmitParticle(mBuildingDestroyed, instance.xform);
}
}
}
void gamedev::ParticleSystem::Init_MonsterDeathParticles()
{
mMonsterDeath.baseColor = tg::color3::black;
mMonsterDeath.baseLife = 2.f;
mMonsterDeath.basePosition = tg::pos3::zero;
mMonsterDeath.baseSize = 0.3f;
mMonsterDeath.baseVelocity = tg::vec3::unit_y * 0.6f;
mMonsterDeath.varyColor = tg::color3::white * 0.2f;
mMonsterDeath.varyLife = 0.2f;
mMonsterDeath.varyPosition = tg::vec3(0.5, 0.5, 0.5);
mMonsterDeath.varySize = 0.1f;
mMonsterDeath.varyVelocity = tg::vec3(0.3f, 0.1f, 0.3f);
}
void gamedev::ParticleSystem::Init_PioneerDeathParticles()
{
mPioneerDeath.baseColor = tg::color3::red;
mPioneerDeath.baseLife = 1.3f;
mPioneerDeath.basePosition = tg::pos3(0, 0.3f, 0);
mPioneerDeath.baseSize = 0.1f;
mPioneerDeath.baseVelocity = tg::vec3(0.0f, -0.1f, 0.0f);
mPioneerDeath.varyColor = tg::color3::red * 0.3f;
mPioneerDeath.varyLife = 0.2f;
mPioneerDeath.varyPosition = tg::vec3(0.5, 0.5, 0.5);
mPioneerDeath.varySize = 0.02f;
mPioneerDeath.varyVelocity = tg::vec3(1.0f, -0.1f, 1.0f);
}
void gamedev::ParticleSystem::Init_BuildingDestroyedParticles()
{
mBuildingDestroyed.baseColor = tg::color3::white * 0.5;
mBuildingDestroyed.baseLife = 2.f;
mBuildingDestroyed.basePosition = tg::pos3::zero;
mBuildingDestroyed.baseSize = 0.1f;
mBuildingDestroyed.baseVelocity = tg::vec3::unit_y * 0.6f;
mBuildingDestroyed.varyColor = tg::color3(255 / 255.f, 211 / 255.f, 155 / 255.f);
mBuildingDestroyed.varyLife = 0.2f;
mBuildingDestroyed.varyPosition = tg::vec3(0.5, 0.5, 0.5);
mBuildingDestroyed.varySize = 0.05f;
mBuildingDestroyed.varyVelocity = tg::vec3(0.3f, 0.1f, 0.3f);
}
// =========================================================================================
// =========================================================================================
/*
void gamedev::GPUParticleSystem::AddEntity(InstanceHandle& handle, Signature entitySignature)
{
// Only include rendered systems
if (!mECS->TestSignature<Render>(handle))
{
mEntities.erase(handle);
}
mEntities.insert(handle);
}
void gamedev::GPUParticleSystem::Init(SharedEngineECS& ecs)
{
Mesh3D cube;
cube.loadFromFile("../data/meshes/cube.obj", true);
mECS = ecs;
mVaoParticle = cube.createBasicVertexArray();
mVboParticleData = glow::ArrayBuffer::create();
mSsboParticleData = glow::ShaderStorageBuffer::createAliased(mVboParticleData);
mSsboDeadParticles = glow::ShaderStorageBuffer::create();
mSsboAliveParticles1 = glow::ShaderStorageBuffer::create();
mSsboAliveParticles2 = glow::ShaderStorageBuffer::create();
mProgramComputeParticles = glow::Program::createFromFile("../data/shaders/compute_particles");
AllocateParticleBuffer(mMaxGenParticles);
}
void gamedev::GPUParticleSystem::AllocateParticleBuffer(std::uint64_t particles)
{
mVboParticleData->bind().setData(particles * PARTICLE_DATA_LENGTH, nullptr, GL_STREAM_DRAW);
mSsboDeadParticles->bind().setData(particles * sizeof(std::uint32_t), 0, GL_STREAM_DRAW);
mSsboAliveParticles1->bind().setData(particles * sizeof(std::uint32_t), 0, GL_STREAM_DRAW);
mSsboAliveParticles2->bind().setData(particles * sizeof(std::uint32_t), 0, GL_STREAM_DRAW);
mParticleBufferSize = particles;
}
void gamedev::GPUParticleSystem::UpdateParticleBuffer()
{
auto vbo = mVboParticles->bind();
glBufferSubData(GL_SHADER_STORAGE_BUFFER);
vbo.setData(particles * PARTICLE_DATA_LENGTH, nullptr, GL_STREAM_DRAW);
mParticleBufferSize = particles;
}
void gamedev::GPUParticleSystem::SetupParticleCount(int particleCount)
{
MAX_PARTICLES = particleCount;
mParticlePool.resize(MAX_PARTICLES);
// mShadowParticlePool.resize(MAX_PARTICLES);
auto vao = mVaoCube->bind();
auto vbo = mVboParticles->bind();
mVboParticles->defineAttribute(&ParticleAttributes::pos, "aTranslation", glow::AttributeMode::Float, 1);
mVboParticles->defineAttribute(&ParticleAttributes::color, "aColor", glow::AttributeMode::Float, 1);
mVboParticles->defineAttribute(&ParticleAttributes::rotation, "aRotation", glow::AttributeMode::Float, 1);
mVboParticles->defineAttribute(&ParticleAttributes::scale, "aScale", glow::AttributeMode::Float, 1);
mVboParticles->defineAttribute(&ParticleAttributes::blend, "aBlend", glow::AttributeMode::Float, 1);
vao.attach(mVboParticles);
}
void gamedev::GPUParticleSystem::UploadParticleData()
{
auto vbo = mVboParticles->bind();
vbo.setData(mParticleAttributes, GL_STREAM_DRAW);
}
void gamedev::GPUParticleSystem::Update(float elapsed_time)
{
mParticleAttributes.clear();
std::uint64_t upperBound = 0;
// Create new particles for all emitters
for (const auto& handle : mEntities)
{
auto& xform = mECS->GetInstance(handle).xform;
auto* particleEmitter = mECS->GetComponent<ParticleEmitter>(handle);
for (auto& pp : particleEmitter->pp)
{
auto& particleProperties = particleEmitter->pp;
pp.emitNew += elapsed_time * pp.particlesPerSecond;
upperBound += pp.particlesPerSecond * (pp.baseLife + pp.varyLife + 1)
for (auto i = 0; i < tg::floor(pp.emitNew); i++)
{
EmitParticle(pp, xform);
}
if (pp.emitNew > 1.0)
pp.emitNew = 0.0;
}
}
if (upperBound > mMaxGenParticles)
{
// First check if a new buffer needs to be allocated
if (upperBound > mParticleBufferSize)
{
AllocateParticleBuffer(upperBound * 1.5);
}
else if (upperBound > 0.9 * mParticleBufferSize)
{
AllocateParticleBuffer(mParticleBufferSize * 1.5);
}
// Then update the number of max generated particles
mMaxGenParticles = upperBound;
}
// Update all living particles
for (auto id = 0; id < mAlive; id++)
{
auto& p = mParticlePool[id];
if (p.lifeRemaining <= 0.0f)
{
KillParticle(id);
continue;
}
p.lifeRemaining -= elapsed_time;
p.position += p.velocity * elapsed_time;
p.rotation_y += 0.01f * elapsed_time;
float life = p.lifeRemaining / p.lifeTime;
float scale = p.size_t0;
tg::mat4 modelMatrix = tg::translation(p.position) * tg::rotation_y(tg::degree(p.rotation_y)) * tg::scaling(scale, scale, scale);
mParticleAttributes.push_back({p.position, p.color, p.rotation_y, scale, life});
}
}
void gamedev::GPUParticleSystem::RenderParticles(glow::UsedProgram& shader)
{
mVaoCube->bind().draw(mAlive);
}
void gamedev::GPUParticleSystem::EmitParticle(const ParticleProperties& particleProperties, transform& xform)
{
Particle& particle = mParticlePool[mFreeIndex];
particle.active = true;
particle.position = tg::mat4(xform.transform_mat()) * particleProperties.basePosition;
auto vary = tg::mat4(xform.transform_mat()) * particleProperties.varyPosition;
particle.position.x += vary.x * (RandomFloat() - 0.5f);
particle.position.y += vary.y * (RandomFloat() - 0.5f);
particle.position.z += vary.z * (RandomFloat() - 0.5f);
particle.color = tg::vec3(particleProperties.baseColor);
particle.color = tg::saturate(particle.color + tg::vec3(particleProperties.varyColor) * (RandomFloat() - 0.5f));
particle.rotation_y = RandomFloat() * 2 * tg::pi_scalar<float>;
particle.velocity = particleProperties.baseVelocity;
particle.velocity += particleProperties.varyVelocity * (RandomFloat() - 0.5f);
particle.lifeTime = particleProperties.baseLife;
particle.lifeTime += particleProperties.varyLife * (RandomFloat() - 0.5f);
particle.lifeRemaining = particle.lifeTime;
float scale = tg::max(xform.scaling.depth, xform.scaling.height, xform.scaling.width);
particle.size_t0 = scale * particleProperties.baseSize;
particle.size_t0 -= particleProperties.varySize * RandomFloat();
mFreeIndex++;
mAlive++;
if (mAlive >= MAX_PARTICLES * 0.8)
{
glow::error() << "Too many particles, need to increase storage!";
MAX_PARTICLES += MAX_PARTICLES * 0.2;
SetupParticleCount(MAX_PARTICLES);
}
}
void gamedev::GPUParticleSystem::KillParticle(int id)
{
mParticlePool[id].active = false;
std::swap(mParticlePool[id], mParticlePool[mAlive - 1]);
mAlive--;
mFreeIndex--;
}
*/ |
package com.shiliu.game.service;
import com.shiliu.game.domain.Poll;
public interface IPollService {
int insertSelective(Poll record);
Poll selectPhone(String phone);
int updateByPrimaryKeySelective(Poll record);
int selectApp(String appName);
}
|
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# shellcheck source=scripts/in_container/_in_container_script_init.sh
. "$( dirname "${BASH_SOURCE[0]}" )/_in_container_script_init.sh"
function import_all_provider_classes() {
group_start "Importing all classes"
python3 "${AIRFLOW_SOURCES}/dev/import_all_classes.py" --path "airflow/providers"
group_end
}
function verify_provider_packages_named_properly() {
python3 "${PROVIDER_PACKAGES_DIR}/prepare_provider_packages.py" \
verify-provider-classes \
"${OPTIONAL_BACKPORT_FLAG[@]}"
}
function run_prepare_documentation() {
local prepared_documentation=()
local skipped_documentation=()
local error_documentation=()
# Delete the remote, so that we fetch it and update it once, not once per package we build!
git remote rm apache-https-for-providers 2>/dev/null || :
local provider_package
for provider_package in "${PROVIDER_PACKAGES[@]}"
do
set +e
local res
# There is a separate group created in logs for each provider package
python3 "${PROVIDER_PACKAGES_DIR}/prepare_provider_packages.py" \
update-package-documentation \
--version-suffix "${TARGET_VERSION_SUFFIX}" \
--no-git-update \
"${OPTIONAL_BACKPORT_FLAG[@]}" \
"${OPTIONAL_VERBOSE_FLAG[@]}" \
"${OPTIONAL_RELEASE_VERSION_ARGUMENT[@]}" \
"${provider_package}"
res=$?
if [[ ${res} == "64" ]]; then
skipped_documentation+=("${provider_package}")
continue
echo "${COLOR_YELLOW}Skipping provider package '${provider_package}'${COLOR_RESET}"
fi
if [[ ${res} != "0" ]]; then
echo "${COLOR_RED}Error when generating provider package '${provider_package}'${COLOR_RESET}"
error_documentation+=("${provider_package}")
continue
fi
prepared_documentation+=("${provider_package}")
set -e
done
echo "${COLOR_BLUE}===================================================================================${COLOR_RESET}"
echo
echo "Summary of prepared documentations:"
echo
if [[ "${#prepared_documentation[@]}" != "0" ]]; then
echo "${COLOR_GREEN} Success:${COLOR_RESET}"
echo "${prepared_documentation[@]}" | fold -w 100
fi
if [[ "${#skipped_documentation[@]}" != "0" ]]; then
echo "${COLOR_YELLOW} Skipped:${COLOR_RESET}"
echo "${skipped_documentation[@]}" | fold -w 100
fi
if [[ "${#error_documentation[@]}" != "0" ]]; then
echo "${COLOR_RED} Errors:${COLOR_RESET}"
echo "${error_documentation[@]}" | fold -w 100
fi
echo
echo "${COLOR_BLUE}===================================================================================${COLOR_RESET}"
if [[ ${#error_documentation[@]} != "0" ]]; then
echo
echo "${COLOR_RED}There were errors when preparing documentation. Exiting! ${COLOR_RESET}"
exit 1
fi
}
setup_provider_packages
cd "${AIRFLOW_SOURCES}" || exit 1
export PYTHONPATH="${AIRFLOW_SOURCES}"
verify_suffix_versions_for_package_preparation
install_supported_pip_version
# install extra packages missing in devel_ci
# TODO: remove it when devel_all == devel_ci
install_remaining_dependencies
if [[ ${BACKPORT_PACKAGES} != "true" ]]; then
import_all_provider_classes
verify_provider_packages_named_properly
fi
# We will be able to remove it when we get rid of BACKPORT_PACKAGES
OPTIONAL_RELEASE_VERSION_ARGUMENT=()
if [[ $# != "0" && ${1} =~ ^[0-9][0-9][0-9][0-9]\.[0-9][0-9]\.[0-9][0-9]$ ]]; then
OPTIONAL_RELEASE_VERSION_ARGUMENT+=("--release-version" "${1}")
shift
fi
PROVIDER_PACKAGES=("${@}")
get_providers_to_act_on "${@}"
run_prepare_documentation
echo
echo "${COLOR_GREEN}All good! Airflow Provider's documentation generated!${COLOR_RESET}"
echo
|
#!/bin/sh
# https://github.com/shyiko/ktlint pre-commit hook
git diff --name-only --cached --relative | grep '\.kt[s"]\?$' | xargs ktlint --relative .
if [ $? -ne 0 ]; then exit 1; fi
|
SELECT Item, COUNT(*)
FROM TableName
GROUP BY Item; |
#!/bin/sh
# This is the cordova-resource-generators install script!
# Are you looking at this in your web browser, and would like to install?
# Just open up your terminal and type:
#
# curl https://raw.githubusercontent.com/kdmny/cordova-resource-generators/master/install.sh | sh
#
# Currently supports Mac OS X
set -e
set -u
# Display everything on stderr.
exec 1>&2
UNAME=$(uname)
if [ "$UNAME" != "Darwin" ] ; then
echo "Sorry, this OS is not supported yet."
exit 1
fi
INSTALL_URL="https://raw.githubusercontent.com/kdmny/cordova-resource-generators/master"
PREFIX="/usr/local"
INSTALL_BIN="$PREFIX/bin"
# New macs (10.9+) don't ship with /usr/local, however it is still in
# the default PATH. We still install there, we just need to create the
# directory first.
if [ ! -d "$INSTALL_BIN" ] ; then
sudo mkdir -m 755 "$PREFIX" || true
sudo mkdir -m 755 "$INSTALL_BIN" || true
fi
GEN_ICONS="/cordova-generate-icons"
GEN_SPLASHES="/cordova-generate-splash-screens"
curl -# -o $INSTALL_BIN$GEN_ICONS $INSTALL_URL$GEN_ICONS
sudo chmod +x $INSTALL_BIN$GEN_ICONS
curl -# -o $INSTALL_BIN$GEN_SPLASHES $INSTALL_URL$GEN_SPLASHES
sudo chmod +x $INSTALL_BIN$GEN_SPLASHES
cat <<"EOF"
# Installation Complete
To generate icons:
$ cd path/to/your/app
$ cordova-generate-icons path/to/icon.png
To generate splash screens:
$ cd path/to/your/app
$ cordova-generate-splash-screens path/to/icon.png
EOF
|
<filename>FHSPS-2019/Round1/gyms.java<gh_stars>0
import java.util.*;
public class gyms {
static boolean[][] superEffective;
static String ans = "";
public static void main(String[] args) {
Scanner scan = new Scanner(System.in);
int t = scan.nextInt();
for(int q = 1; q <= t; q++) {
ans = "";
int n = scan.nextInt();
String[] name = new String[n];
superEffective = new boolean[n][n];
for(int i = 0; i < n; i++) {
name[i] = scan.next();
for(int j = 0; j < n; j++) {
superEffective[i][j] = scan.nextInt() == 1;
}
}
perm(0, new int[n], name, new boolean[n]);
if(ans.isEmpty()) System.out.println("Type Chart #"+q+": Impossible");
else System.out.println("Type Chart #"+q+": "+ans);
}
}
static void perm(int u, int[] arr, String[] name, boolean[] used) {
if(u == arr.length) {
for(int i = 0; i < arr.length-1; i++)
if(!superEffective[arr[i+1]][arr[i]]) return;
String res = name[arr[0]];
for(int i = 1; i < arr.length; i++) res += " "+name[arr[i]];
if(ans.isEmpty()) ans = res;
else
if(res.compareTo(ans) < 0)
ans = res;
}
for(int i = 0; i < arr.length; i++) {
if(!used[i]) {
used[i] = true;
arr[u] = i;
perm(u+1, arr, name, used);
used[i] = false;
}
}
}
}
/*
2
8
ROCK 0 0 0 0 0 0 1 0
WATER 1 0 0 0 0 0 1 0
ELECTRIC 0 1 0 0 0 0 0 0
GRASS 1 1 0 0 0 0 0 1
POISON 0 0 0 1 0 0 0 0
PSYCHIC 0 0 0 0 1 0 0 0
FIRE 0 0 0 1 0 0 0 0
GROUND 1 0 1 0 1 0 1 0
8
FLYING 0 1 0 0 1 0 0 0
BUG 0 0 0 0 0 0 0 0
NORMAL 0 0 0 0 0 0 0 0
GHOST 0 0 0 0 0 0 0 0
FIGHTING 0 0 1 0 0 1 1 0
STEEL 0 0 0 0 0 0 1 0
ICE 1 0 0 0 0 0 0 1
DRAGON 0 0 0 0 0 0 0 1
*/ |
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
# Data
sentences = [
'I really like this movie',
'I did not enjoy this movie',
'This was an excellent movie'
]
labels = [1, 0, 1] # 0: Negative, 1: Positive
# Tokenize
tokenizer = Tokenizer()
tokenizer.fit_on_texts(sentences)
# Encode sequences
sequences = tokenizer.texts_to_sequences(sentences)
max_sequence_length = max([len(seq) for seq in sequences])
padded_sequences = pad_sequences(sequences, maxlen=max_sequence_length, padding='post')
# Build model
model = tf.keras.Sequential([
tf.keras.layers.Embedding(tokenizer.num_words, 5), # embedding layer
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(20)), # bidirectional LSTM layer
tf.keras.layers.Dense(1, activation='sigmoid') # dense output layer
])
# Compile model
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# Train model
model.fit(padded_sequences, labels, batch_size=1, epochs=20, verbose=2) |
/**
* Copyright (c) 2017, salesforce.com, inc.
* All rights reserved.
* Licensed under the BSD 3-Clause license.
* For full license text, see LICENSE.txt file in the repo root or
* https://opensource.org/licenses/BSD-3-Clause
*/
/**
* src/heartbeat/utils.js
*/
'use strict'; // eslint-disable-line strict
const debug = require('debug')('refocus-collector:heartbeat');
const configModule = require('../config/config');
const repeater = require('../repeater/repeater');
const logger = require('winston');
const commonUtils = require('../utils/commonUtils');
const sanitize = commonUtils.sanitize;
const collectorStatus = require('../constants').collectorStatus;
const { collectBulk, collectBySubject } = require('../remoteCollection/collect');
const { handleCollectResponseBulk, handleCollectResponseBySubject } =
require('../remoteCollection/handleCollectResponse');
/**
* Pauses, resumes or stops the collector based on the status of the collector.
*
* @param {String} currentStatus - The current status of the collector.
* @param {String} newStatus - The new status of the collector. This is the
* state the collector will be in, once this function has been executed.
*/
function changeCollectorStatus(currentStatus, newStatus) {
debug('changeCollectorStatus from %s to %s', currentStatus, newStatus);
// no-op if either arg is missing or if they are already the same status
if (!currentStatus || !newStatus || (currentStatus === newStatus)) {
return;
}
if (newStatus === collectorStatus.PAUSED) {
repeater.stopGenerators();
} else if (newStatus === collectorStatus.STOPPED) {
repeater.stopAllRepeaters();
process.exit(0);
}
} // changeCollectorState
/**
* Update the collector config with any changes from the heartbeat response.
*
* @param {Object} cc - The collectorConfig from the Heartbeat Response object
*/
function updateCollectorConfig(cc) {
const config = configModule.getConfig();
Object.keys(cc).forEach((key) => {
const oldValue = config.refocus[key];
const newValue = cc[key];
config.refocus[key] = cc[key];
if (oldValue && newValue !== oldValue) {
if (key === 'heartbeatIntervalMillis') {
repeater.updateHeartbeatRepeater(newValue);
}
}
});
const sanitized = sanitize(config.refocus, configModule.attributesToSanitize);
debug('exiting updateCollectorConfig %O', sanitized);
} // updateCollectorConfig
/**
* Assign any default values from the template into the generator context if
* no value was provided in the generator context.
*
* @param {Object} ctx - The context from the generator
* @param {Object} def - The contextDefinition from the generator template
* @param {Object} collectorToken - The token for this collector
* @param {Object} res - The heartbeat response object
* @returns {Object} the context object with default values populated
*/
function assignContext(ctx, def, collectorToken, res) {
if (!ctx) {
ctx = {};
}
if (!def) {
def = {};
}
const heartbeatTimestamp = res.timestamp;
const secret = collectorToken + heartbeatTimestamp;
Object.keys(def).forEach((key) => {
if (!ctx.hasOwnProperty(key) && def[key].hasOwnProperty('default')) {
ctx[key] = def[key].default;
}
if (ctx.hasOwnProperty(key) && def.hasOwnProperty(key) &&
def[key].encrypted) {
ctx[key] = commonUtils.decrypt(ctx[key], secret, res.encryptionAlgorithm);
}
});
debug('assignContext returning %O', ctx);
return ctx;
} // assignContext
/**
* Set up generator repeaters for each generator and add them to the collector
* config.
*
* @param {Object} res - The start response or heartbeat response
*/
function addGenerators(res) {
const generators = res.generatorsAdded;
const config = configModule.getConfig(); // Get a fresh copy
const cr = config.refocus;
if (generators && Array.isArray(generators)) {
// Create a new repeater for each generator and add to config.
generators.forEach((g) => {
if (g.generatorTemplate.contextDefinition) {
g.context = assignContext(g.context,
g.generatorTemplate.contextDefinition, cr.collectorToken, res);
}
// Add dataSourceProxy to connection, if specified
if (config.dataSourceProxy) {
g.generatorTemplate.connection.dataSourceProxy = config.dataSourceProxy;
}
// Add Refocus url/proxy to generator
g.refocus = { url: cr.url };
if (cr.proxy) g.refocus.proxy = cr.proxy;
config.generators[g.name] = g;
try {
repeater.createGeneratorRepeater(g);
} catch (err) {
debug('addGenerators error for generator "%s":\n%s', g.name,
err.message);
logger.error(`addGenerators error for generator "${g.name}":\n`,
err.message);
}
debug('Generator added: %O (%s v%s)',
sanitize(g, ['token', 'context']), g.generatorTemplate.name,
g.generatorTemplate.version);
});
} else {
debug('No generators to add.');
}
} // addGenerators
/**
* Stop generator repeaters and delete generators from collector config.
*
* @param {Object} res - The Heartbeat Response object
*/
function deleteGenerators(res) {
const generators = res.generatorsDeleted;
const config = configModule.getConfig(); // Get a fresh copy
if (generators && Array.isArray(generators)) {
// Stop the repeater for each generator and delete from config.
generators.forEach((g) => {
debug('deleteGenerators: generator "%s"...', g.name);
repeater.stop(g.name);
delete config.generators[g.name];
debug('Generator "%s" deleted', g.name);
});
} else {
debug('No generators to delete.');
}
} // deleteGenerators
/**
* Update generator repeaters and collector config.
*
* @param {Object} res - The Heartbeat Response object
*/
function updateGenerators(res) {
const generators = res.generatorsUpdated;
const config = configModule.getConfig(); // Get a fresh copy
const cr = config.refocus;
if (generators && Array.isArray(generators)) {
// Update the repeater for each generator and update in config.
generators.forEach((g) => {
debug('updateGenerators: generator "%s"...', g.name);
if (g.generatorTemplate.contextDefinition) {
g.context = assignContext(g.context,
g.generatorTemplate.contextDefinition, cr.collectorToken, res);
}
// Add dataSourceProxy to connection, if specified
if (config.dataSourceProxy) {
g.generatorTemplate.connection.dataSourceProxy = config.dataSourceProxy;
}
// Add Refocus url/proxy to generator
g.refocus = { url: cr.url };
if (cr.proxy) g.refocus.proxy = cr.proxy;
Object.keys(g).forEach((key) =>
config.generators[g.name][key] = g[key]);
// Repeaters cannot be updated--stop old ones and create new ones.
try {
repeater.stop(g.name);
repeater.createGeneratorRepeater(g);
} catch (err) {
debug('updateGenerators error for generator "%s":\n%s', g.name,
err.message);
logger.error(`updateGenerators error for generator "${g.name}":\n`,
err.message);
}
debug('Generator updated: %O (%s v%s)',
sanitize(g, ['token', 'context']), g.generatorTemplate.name,
g.generatorTemplate.version);
});
} else {
debug('No generators to update.');
}
} // updateGenerators
module.exports = {
addGenerators,
assignContext, // exporting for testing purposes only
changeCollectorStatus,
deleteGenerators,
updateGenerators,
updateCollectorConfig,
};
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "utils.h"
#include "matrix.h"
void logger(int log_level, const char *function_name, const char *message)
{
if (log_level >= LOG_LEVEL)
{
if (log_level == EXCEPTION)
{
RED_COLOR;
}
printf("%s: %s\n", function_name, message);
RESET_COLOR;
}
}
Dataset* create_dataset(
int train_size,
int input_size,
int output_size,
int val_size,
Matrix **train_inputs,
Matrix **train_labels,
Matrix **val_inputs,
Matrix **val_labels)
{
Dataset *dataset = (Dataset *) malloc (sizeof (Dataset));
dataset->train_size = train_size;
dataset->val_size = val_size;
if (train_inputs == NULL || train_labels == NULL)
{
return NULL;
}
dataset->train_inputs = train_inputs;
dataset->train_labels = train_labels;
dataset->val_inputs = val_inputs;
dataset->val_labels = val_labels;
if (val_inputs == NULL)
{
dataset->val_inputs = train_inputs;
dataset->val_labels = train_labels;
}
return dataset;
}
int delete_dataset(Dataset *dataset)
{
if (dataset == NULL) {
return -1;
}
for (int i = 0; i < dataset->train_size; i++)
{
delete_matrix(dataset->train_inputs[i]);
delete_matrix(dataset->train_labels[i]);
}
for (int i = 0; i < dataset->val_size; i++)
{
delete_matrix(dataset->val_inputs[i]);
delete_matrix(dataset->val_labels[i]);
}
free(dataset);
dataset = NULL;
return 0;
}
Matrix** load_csv(char *filename, int lines, int line_length)
{
FILE* fp = fopen(filename, "r");
if (!fp)
{
logger(EXCEPTION, __func__, "Failed to open csv file");
return NULL;
}
Matrix **result = (Matrix**) malloc (sizeof (Matrix*) * lines);
int buffer_length = line_length*4;
char buffer[buffer_length];
int line_idx = 0;
while(fgets(buffer, buffer_length, fp)) {
char *token = strtok(buffer, ",");
double mat[line_length][1];
int i = 0;
while( token != NULL ) {
mat[i++][0] = strtod(token, NULL);
token = strtok(NULL, ",");
}
result[line_idx++] = create_matrix(line_length, 1, mat);
}
fclose(fp);
return result;
}
int vectorize(Matrix **a, int length, int num_classes)
{
for (int i = 0; i < length; i++)
{
int index = (int) a[i]->matrix[0][0];
if (index >= num_classes)
{
return -1;
}
double mat[num_classes][1];
for (int j = 0; j < num_classes; j++)
{
mat[j][0] = 0;
}
mat[index][0] = 1;
delete_matrix(a[i]);
a[i] = create_matrix(num_classes, 1, mat);
}
return 0;
}
int normalize(Matrix **a, int length, int max_num)
{
for (int i = 0; i < length; i++)
{
Matrix *matrix = a[i];
if (is_null(matrix))
{
return -1;
}
for (int j = 0; j < matrix->rows; j++)
{
for (int k = 0; k < matrix->cols; k++)
{
matrix->matrix[j][k] = matrix->matrix[j][k] / max_num;
}
}
}
return 0;
}
TrainingOptions* init_training_options()
{
TrainingOptions *training_options = (TrainingOptions *) malloc (sizeof (TrainingOptions));
training_options->cost_type = CROSS_ENTROPY;
training_options->epochs = 0;
training_options->batch_size = 0;
training_options->learning_rate = 0;
training_options->momentum = 0;
training_options->regularization_lambda = 0;
return training_options;
}
int delete_training_options(TrainingOptions *training_options)
{
free(training_options);
training_options = NULL;
} |
import { Client, Collection } from "discord.js";
import Slashcmds from "./utilities/slash";
import { ActionRow } from "./utilities/buttons/ActionRow";
import { MessageButton } from "./utilities/buttons/MessageButton";
import { Command } from "./utilities/command";
import { awaitButtons } from "./utilities/buttons/awaitButtons";
declare type SlashcordOptions = {
useComponents?: boolean | undefined;
testServers?: string[] | undefined;
botOwners?: string[] | undefined;
cooldownError?: string | undefined;
permissionError?: string | undefined;
devError?: string | undefined;
};
declare class Slashcord {
static Command: typeof Command;
static ActionRow: typeof ActionRow;
static MessageButton: typeof MessageButton;
static awaitButtons: typeof awaitButtons;
private _client;
private _commandsDir;
private _featuresDir;
private _testServers;
private _botOwners;
private _useComponents;
commands: Collection<string, Command>;
cooldowns: Collection<string, any>;
private _slash;
private _command;
cooldownMsg: string | undefined;
permissionMsg: string | undefined;
devOnlyMsg: string | undefined;
constructor(client: Client, commandsDir: string, options: SlashcordOptions);
get client(): Client;
get testServers(): string[] | undefined;
get botOwners(): string[] | undefined;
get slashCmds(): Slashcmds;
}
export default Slashcord;
export { Command, MessageButton, ActionRow, awaitButtons };
|
<filename>src/test/java/seedu/address/model/person/AgeTest.java
package seedu.address.model.person;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import org.junit.Test;
import seedu.address.model.person.patient.Age;
import seedu.address.testutil.Assert;
public class AgeTest {
@Test
public void constructor_null_throwsNullPointerException() {
Assert.assertThrows(NullPointerException.class, () -> new Age(null));
}
@Test
public void constructor_invalidAge_throwsIllegalArgumentException() {
String invalidAge = "";
Assert.assertThrows(IllegalArgumentException.class, () -> new Age(invalidAge));
}
@Test
public void isValidAge() {
// null age
Assert.assertThrows(NullPointerException.class, () -> Age.isValidAge(null));
// invalid ages
assertFalse(Age.isValidAge("")); // empty string
assertFalse(Age.isValidAge(" ")); // spaces only
assertFalse(Age.isValidAge("151"));
// valid ages
assertTrue(Age.isValidAge("0"));
assertTrue(Age.isValidAge("55"));
assertTrue(Age.isValidAge("2")); // one digit
assertTrue(Age.isValidAge("150")); // three digits
}
}
|
<reponame>chcbaram/arm_seminar_fw
/*
* bsp.c
*
* boart support package
*
* Created on: 2017. 3. 16.
* Author: Baram
*/
#include "bsp.h"
#include "hw.h"
void SystemClock_Config(void);
void bspInit()
{
GPIO_InitTypeDef GPIO_InitStruct;
// STM32Cube HAL Init
HAL_Init();
// Clock Setup
// SYSCLK(Hz) = 72000000
// HCLK(Hz) = 72000000
// HSE(Hz) = 8000000
SystemClock_Config();
__HAL_RCC_GPIOA_CLK_ENABLE();
__HAL_RCC_GPIOB_CLK_ENABLE();
__HAL_RCC_GPIOC_CLK_ENABLE();
// USB_DISCONNECT used as USB pull-up
//
GPIO_InitStruct.Pin = GPIO_PIN_13;
GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP;
GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_HIGH;
HAL_GPIO_Init(GPIOC, &GPIO_InitStruct);
HAL_GPIO_WritePin(GPIOC, GPIO_PIN_13, GPIO_PIN_SET);
}
void bspDeinit()
{
uint32_t i;
GPIO_InitTypeDef GPIO_InitStruct;
HAL_NVIC_DisableIRQ(USB_LP_CAN1_RX0_IRQn);
HAL_NVIC_DisableIRQ(TIM3_IRQn);
HAL_NVIC_DisableIRQ(TIM4_IRQn);
HAL_NVIC_DisableIRQ(TIM2_IRQn);
HAL_NVIC_DisableIRQ(DMA1_Channel6_IRQn);
HAL_NVIC_DisableIRQ(USART2_IRQn);
HAL_NVIC_DisableIRQ(DMA1_Channel5_IRQn);
HAL_NVIC_DisableIRQ(USART1_IRQn);
// USB_DISCONNECT used as USB pull-up
//
GPIO_InitStruct.Pin = GPIO_PIN_13;
GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP;
GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_HIGH;
HAL_GPIO_Init(GPIOC, &GPIO_InitStruct);
HAL_GPIO_WritePin(GPIOC, GPIO_PIN_13, GPIO_PIN_SET);
for (i=0; i<5; i++)
{
ledToggle(0);
delay(50);
}
ledOff(0);
}
/**
* @brief System Clock Configuration
* The system Clock is configured as follow :
* System Clock source = PLL (HSE)
* SYSCLK(Hz) = 72000000
* HCLK(Hz) = 72000000
* AHB Prescaler = 1
* APB1 Prescaler = 2
* APB2 Prescaler = 1
* HSE Frequency(Hz) = 8000000
* HSE PREDIV1 = 1
* PLLMUL = 9
* Flash Latency(WS) = 2
* @param None
* @retval None
*/
void SystemClock_Config(void)
{
RCC_ClkInitTypeDef clkinitstruct = {0};
RCC_OscInitTypeDef oscinitstruct = {0};
/* Enable HSE Oscillator and activate PLL with HSE as source */
oscinitstruct.OscillatorType = RCC_OSCILLATORTYPE_HSE;
oscinitstruct.HSEState = RCC_HSE_ON;
oscinitstruct.HSEPredivValue = RCC_HSE_PREDIV_DIV1;
oscinitstruct.PLL.PLLState = RCC_PLL_ON;
oscinitstruct.PLL.PLLSource = RCC_PLLSOURCE_HSE;
oscinitstruct.PLL.PLLMUL = RCC_PLL_MUL9;
if (HAL_RCC_OscConfig(&oscinitstruct)!= HAL_OK)
{
/* Initialization Error */
while(1);
}
/* Select PLL as system clock source and configure the HCLK, PCLK1 and PCLK2
clocks dividers */
clkinitstruct.ClockType = (RCC_CLOCKTYPE_SYSCLK | RCC_CLOCKTYPE_HCLK | RCC_CLOCKTYPE_PCLK1 | RCC_CLOCKTYPE_PCLK2);
clkinitstruct.SYSCLKSource = RCC_SYSCLKSOURCE_PLLCLK;
clkinitstruct.AHBCLKDivider = RCC_SYSCLK_DIV1;
clkinitstruct.APB2CLKDivider = RCC_HCLK_DIV1;
clkinitstruct.APB1CLKDivider = RCC_HCLK_DIV2;
if (HAL_RCC_ClockConfig(&clkinitstruct, FLASH_LATENCY_2)!= HAL_OK)
{
/* Initialization Error */
while(1);
}
}
|
<filename>src/app/@core/data/electricity.ts
import { Observable } from 'rxjs';
export interface Month {
month: string;
delta: string;
down: boolean;
kWatts: string;
cost: string;
}
export interface Electricity {
title: string;
active?: boolean;
months: Month[];
}
export interface ElectricityChart {
label: string;
value: number;
}
export abstract class ElectricityData {
abstract getListData(): Observable<Electricity[]>;
abstract getChartData(): Observable<ElectricityChart[]>;
}
|
#!/usr/bin/env bash
# Set working directory
cd ../
# Return non-zero exit code if any commands fail
set -e
# Extract list of services from args by trimming off the first arg (which is the program name)
SERVICES=${@:1}
# Build and Test all services
for SERVICE in ${SERVICES}; do
# Run `docker build` with:
# -t (tagName): name of the service
# -f (fileName): path to the service's Dockerfile
echo "TESTING ${SERVICE} FROM [${SERVICES}]"
docker build -t ${SERVICE} -f services/${SERVICE}/Dockerfile --build-arg SERVICE_NAME=${SERVICE} .
done
# Test lib folder
cd lib && go test ./...
cd ../ |
#include <iostream>
struct Node {
int data;
Node* next;
};
void insert_end(Node** head_ref, int data) {
// Create a new node
Node* new_node = new Node;
new_node->data = data;
new_node->next = NULL;
// If the list is empty, make the new node the head
if (*head_ref == NULL) {
*head_ref = new_node;
return;
}
// Else, traverse the list to find the tail and insert the new node there
Node* last = *head_ref;
while (last->next != NULL) {
last = last->next;
}
last->next = new_node;
return;
} |
import math
# Prompt the user to input a number
num1 = float(input('Enter a number: '))
# Calculate the square root of the input number
square_root = math.sqrt(num1)
# Round the square root to the nearest integer
rounded_square_root = round(square_root)
# Print the original number and the rounded square root
print('The number {} has a square root of {}'.format(num1, rounded_square_root)) |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-2606-1
#
# Security announcement date: 2013-01-13 00:00:00 UTC
# Script generation date: 2017-01-01 21:06:29 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - proftpd-dfsg:1.3.3a-6squeeze5
#
# Last versions recommanded by security team:
# - proftpd-dfsg:1.3.3a-6squeeze5
#
# CVE List:
# - CVE-2012-6095
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade proftpd-dfsg=1.3.3a-6squeeze5 -y
|
-- Create table for Courses
CREATE TABLE Courses (
Id INT PRIMARY KEY,
Name VARCHAR(100)
);
-- Create table for Chapters
CREATE TABLE Chapters (
Id INT PRIMARY KEY,
CourseId INT,
Name VARCHAR(100),
FOREIGN KEY (CourseId) REFERENCES Courses(Id)
);
-- Create table for Sections
CREATE TABLE Sections (
Id INT PRIMARY KEY,
ChapterId INT,
Title VARCHAR(100),
Content TEXT,
FOREIGN KEY (ChapterId) REFERENCES Chapters(Id)
); |
#!/bin/bash
source ./vars.sh
# Create application
$GCMD app delete --app-id 9 \
--from $PLATFORM_ACCT \
-o app-delete.txn \
-s
$GCMD clerk rawsend -f app-delete.txn
|
def insert_value(lst, index, value):
"""
Insert a value at the given index in list
"""
lst.insert(index, value)
return lst |
# function to solve equation
def solve(eqn):
x, y = sympy.symbols('x y')
return sympy.solve((eqn, ), (x, y))
# equation
eqn = x + y - 5
# solve equation
result = solve(eqn)
x_sol = result[x]
y_sol = result[y]
print("x =", x_sol)
print("y =", y_sol) |
requirejs.config({
//By default load any module IDs from js/lib
/* development */
/*
baseUrl: 'graphingwikiapp/js/',
paths: {
components: "components/",
configuration: "configuration/",
lib: "lib/",
utils: "utils/"
}
*/
/* production */
paths: {
components: "graphingwikiapp/js/components/",
configuration: "graphingwikiapp/js/configuration/",
lib: "graphingwikiapp/js/lib/",
utils: "graphingwikiapp/js/utils/",
}
});
require([
"lib/jquery",
"graphingwiki",
"utils/cyInitUtils"
], function ($, graphingwiki, cyInitUtils) {
'use strict';
console.info("Starting the graphingwikiBrowser!");
// jQuery is loadeed in index.html
console.log('built');
console.info(requirejs.config({}));
cyInitUtils.registerExtensions();
graphingwiki.start();
}); |
<gh_stars>100-1000
import java.util.Scanner;
public class FindDigits
{
public static void main(String[] args)
{
Scanner stdin = new Scanner(System.in);
int tests = Integer.parseInt(stdin.nextLine());
for(int i = 0; i < tests; i++)
{
int result = 0;
String number = stdin.nextLine();
for(char digit : number.toCharArray())
{
if(Character.getNumericValue(digit) != 0
&& Integer.parseInt(number) % Character.getNumericValue(digit) == 0)
{
result++;
}
}
System.out.println(result);
}
stdin.close();
}
}
|
import * as React from "react";
import { observer } from "mobx-react";
import { FormField } from ".";
import { Form } from "..";
import { SubFormFieldModel } from "../../models";
@observer
export class SubFormField extends FormField<SubFormFieldModel> {
private divRef: React.RefObject<HTMLDivElement> = React.createRef();
focus() {
this.divRef.current?.scrollIntoView();
}
render() {
return (
<div ref={this.divRef}>
<Form hooks={this.props.hooks} hideTitle form={this.props.field.value.value!} />
</div>
);
}
}
|
#!/bin/bash
source "../helpers/colorprint.sh"
alias YUM_UPDATE="sudo yum update"
alias YUM_UPGRADE="sudo yum upgrade -y"
function yum-groupinstall-packages() {
local packages=( $@ )
for package in "${packages[@]}"; do
yum groupinstall $package -y
done
}
function install-yum-package() {
local pack="$1"
colorprint -green "[*] Installing $pack..."
sudo yum install $pack -y
}
function install-yum-packages() {
local packages=( $@ )
for package in "${packages[@]}"; do
install-yum-package $package
done
}
function remove-yum-package() {
local pack="$1"
colorprint -red "[!] Removing $pack..."
sudo yum remove -y $pack
}
function remove-yum-packages() {
local packages=( $@ )
for package in "${packages[@]}"; do
remove-yum-package $package
done
} |
#ifndef INLIMIT_HPP_
#define INLIMIT_HPP_
//============================================================================
// Name :
// Author : Avi
// Revision : $Revision: #61 $
//
// Copyright 2009-2020 ECMWF.
// This software is licensed under the terms of the Apache Licence version 2.0
// which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
// In applying this licence, ECMWF does not waive the privileges and immunities
// granted to it by virtue of its status as an intergovernmental organisation
// nor does it submit to any jurisdiction.
//
// Description :
//============================================================================
#include "LimitFwd.hpp"
namespace cereal { class access; }
// Inlimit. Multiple inlimits on same Node are logically ANDED
// inlimit limitName // This will consume one token in the limit <limitName>
// inlimit limitName 10 // This will consume 10 tokens in the limit <limitName>
// inlimit -s limitName // Limit submission, consume one token in the limit <limitName>
// inlimit -s limitName 10 // Limit submission, consume consume 10 tokens in the limit <limitName>
// inlimit -n limitName // Only applicable to a Suite/family, does not matter how many tasks
// // the family has, will only consume one token in the family
// // Can control number of active families.
//
// Inlimit of the same name specified on a task take priority over the family
class InLimit {
public:
explicit InLimit(const std::string& limit_name, // referenced limit
const std::string& path_to_node_with_referenced_limit = std::string(), // if empty, search for limit up parent hierarchy
int tokens = 1, // tokens to consume in the Limit
bool limit_this_node_only = false, // if true limit this node only
bool limit_submission = false, // limit submission only
bool check = true // disable name checking
);
InLimit()= default;
void print(std::string&) const;
bool operator==(const InLimit& rhs) const;
bool operator<(const InLimit& rhs) const { return n_ < rhs.name();}
const std::string& name() const { return n_;} // must be defined
const std::string& pathToNode() const { return path_;} // can be empty,the node referenced by the In-Limit, this should hold the Limit.
int tokens() const { return tokens_;}
bool limit_submission() const { return limit_submission_;}
bool limit_this_node_only() const { return limit_this_node_only_;}
bool incremented() const { return incremented_;} // only used with limit_this_node_only
void set_incremented(bool f) { incremented_ = f;}
std::string toString() const;
private:
void write(std::string&) const;
void limit( limit_ptr l) { limit_ = std::weak_ptr<Limit>(l);}
Limit* limit() const { return limit_.lock().get();} // can return NULL
friend class InLimitMgr;
private:
std::weak_ptr<Limit> limit_; // NOT persisted since computed on the fly
std::string n_;
std::string path_;
int tokens_{1};
bool limit_this_node_only_{false}; // default is false,if True, will consume one token(s) only, regardless of number of children
bool limit_submission_{false}; // limit submission only
bool incremented_{false}; // state
friend class cereal::access;
template<class Archive>
void serialize(Archive & ar);
};
#endif
|
#!/bin/bash
if [[ "$1" == '--list' ]]; then
cat ./inventory.json
fi
|
import pytest
@pytest.mark.run(order=4)
@pytest.mark.asyncio
async def test_present(hub, ctx, availability_set, resource_group):
expected = {
"changes": {
"new": {
"name": availability_set,
"sku": {"name": "Classic"},
"type": "Microsoft.Compute/availabilitySets",
"location": "eastus",
"platform_fault_domain_count": 3,
"platform_update_domain_count": 5,
},
"old": {},
},
"comment": f"Availability set {availability_set} has been created.",
"name": availability_set,
"result": True,
}
ret = await hub.states.azurerm.compute.availability_set.present(
ctx, name=availability_set, resource_group=resource_group
)
ret["changes"]["new"].pop("id")
assert ret == expected
@pytest.mark.run(order=4, after="test_present", before="test_absent")
@pytest.mark.asyncio
async def test_changes(hub, ctx, availability_set, resource_group, tags):
expected = {
"changes": {"tags": {"new": tags}},
"comment": f"Availability set {availability_set} has been updated.",
"name": availability_set,
"result": True,
}
ret = await hub.states.azurerm.compute.availability_set.present(
ctx, name=availability_set, resource_group=resource_group, tags=tags
)
assert ret == expected
@pytest.mark.run(order=-4)
@pytest.mark.asyncio
async def test_absent(hub, ctx, availability_set, resource_group):
expected = {
"changes": {"new": {}, "old": {"name": availability_set,},},
"comment": f"Availability set {availability_set} has been deleted.",
"name": availability_set,
"result": True,
}
ret = await hub.states.azurerm.compute.availability_set.absent(
ctx, availability_set, resource_group
)
assert ret["changes"]["new"] == expected["changes"]["new"]
assert ret["changes"]["old"]["name"] == expected["changes"]["old"]["name"]
assert ret["result"] == expected["result"]
|
<filename>src/com/yarsnowing/toolssss/AlarmToastActivity.java
package com.yarsnowing.toolssss;
import android.support.v4.app.Fragment;
public class AlarmToastActivity extends SingleFragmentActivity {
@Override
protected Fragment createFragment() {
// TODO Auto-generated method stub
return new AlarmToastFragment();
}
}
|
import matplotlib.pyplot as plt
def custom_scatter_plot(x_data, y_data, title):
fig, ax = plt.subplots()
ax.scatter(x_data, y_data)
ax.set_xticklabels(['0.01', '0.1', '1'])
ax.set_yticks([-0.1, -0.05, 0, 0.05])
ax.tick_params(axis='both', which='major', labelsize=10)
ax.set_title(title)
plt.show()
return ax |
<reponame>matheusdoedev/southern-cars<filename>web/src/components/MenuMobileToggle/MenuMobileToggle.js
import React from "react";
import useMenuMobile from "../../hooks/useMenuMobile";
import "./MenuMobileToggle.styles.scss";
export default function MenuMobileToggle() {
const { handleActive } = useMenuMobile();
return (
<div
className="menu-mobile-toggle"
onClick={handleActive}
data-mobile="toggle"
>
<span />
</div>
);
}
|
import app from './app'
app.listen(process.env.SERVER_PORT || 3000, () => console.log('Server OK - 200'))
|
<gh_stars>0
/*
* Copyright 2016 California Institute of Technology ("Caltech").
* U.S. Government sponsorship acknowledged.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* License Terms
*/
package gov.nasa.jpl.imce.oti.magicdraw.dynamicScripts
//import java.awt.event.ActionEvent
//import com.nomagic.magicdraw.core.{Application, Project}
//import com.nomagic.magicdraw.uml.symbols.{DiagramPresentationElement, PresentationElement}
//import com.nomagic.magicdraw.uml.symbols.shapes.PackageView
//import com.nomagic.uml2.ext.magicdraw.classes.mdkernel.Package
//import com.nomagic.uml2.ext.magicdraw.mdprofiles.Profile
//import gov.nasa.jpl.dynamicScripts.DynamicScriptsTypes
//import gov.nasa.jpl.dynamicScripts.magicdraw.validation.MagicDrawValidationDataResults
//import gov.nasa.jpl.imce.oti.magicdraw.dynamicScripts.validation.OTIMagicDrawValidation
//import org.omg.oti.uml.read.api._
//import org.omg.oti.magicdraw.uml.read.{MagicDrawUML, MagicDrawUMLUtil}
//import org.omg.oti.uml.validation._
//import scala.collection.JavaConversions._
//import scala.language.{implicitConversions, postfixOps}
//import scala.util.{Success, Try}
/**
* @author Nicolas.F.R<EMAIL>
*/
object checkEachSelectedPackageReferencesOnlyAccessibleMembers {
// def doitExceptNestingPackagesAndAppliedProfiles
// ( p: Project,
// ev: ActionEvent,
// script: DynamicScriptsTypes.DiagramContextMenuAction,
// dpe: DiagramPresentationElement,
// triggerView: PackageView,
// triggerElement: Profile,
// selection: java.util.Collection[PresentationElement] )
// : Try[Option[MagicDrawValidationDataResults]] = {
//
// implicit val umlUtil = MagicDrawUMLUtil( p )
// import umlUtil._
//
// checkEachSelectedPackageReferencesOnlyAccessibleMembersExceptNestingPackagesAndAppliedProfiles(
// p,
// selection.toSet selectByKindOf { case pv: PackageView => umlPackage( pv.getPackage ) } )
// }
// def doitIncludingNestingPackagesAndAppliedProfiles
// ( p: Project,
// ev: ActionEvent,
// script: DynamicScriptsTypes.DiagramContextMenuAction,
// dpe: DiagramPresentationElement,
// triggerView: PackageView,
// triggerElement: Profile,
// selection: java.util.Collection[PresentationElement] )
// : Try[Option[MagicDrawValidationDataResults]] = {
//
// implicit val umlUtil = MagicDrawUMLUtil( p )
// import umlUtil._
//
// checkEachSelectedPackageReferencesOnlyAccessibleMembersIncludingNestingPackagesAndAppliedProfiles(
// p,
// selection.toSet selectByKindOf { case pv: PackageView => umlPackage( pv.getPackage ) } )
// }
// def doitExceptNestingPackagesAndAppliedProfiles
// ( p: Project,
// ev: ActionEvent,
// script: DynamicScriptsTypes.DiagramContextMenuAction,
// dpe: DiagramPresentationElement,
// triggerView: PackageView,
// triggerElement: Package,
// selection: java.util.Collection[PresentationElement] )
// : Try[Option[MagicDrawValidationDataResults]] = {
//
// implicit val umlUtil = MagicDrawUMLUtil( p )
// import umlUtil._
//
// checkEachSelectedPackageReferencesOnlyAccessibleMembersExceptNestingPackagesAndAppliedProfiles(
// p,
// selection.toSet selectByKindOf { case pv: PackageView => umlPackage( pv.getPackage ) } )
// }
// def doitIncludingNestingPackagesAndAppliedProfiles
// ( p: Project,
// ev: ActionEvent,
// script: DynamicScriptsTypes.DiagramContextMenuAction,
// dpe: DiagramPresentationElement,
// triggerView: PackageView,
// triggerElement: Package,
// selection: java.util.Collection[PresentationElement] )
// : Try[Option[MagicDrawValidationDataResults]] = {
//
// implicit val umlUtil = MagicDrawUMLUtil( p )
// import umlUtil._
//
// checkEachSelectedPackageReferencesOnlyAccessibleMembersIncludingNestingPackagesAndAppliedProfiles(
// p,
// selection.toSet selectByKindOf { case pv: PackageView => umlPackage( pv.getPackage ) } )
// }
// def checkEachSelectedPackageReferencesOnlyAccessibleMembersExceptNestingPackagesAndAppliedProfiles
// ( p: Project,
// pkgs: Iterable[UMLPackage[MagicDrawUML]] )
// ( implicit _umlUtil: MagicDrawUMLUtil )
// : Try[Option[MagicDrawValidationDataResults]] = {
//
// import _umlUtil._
// val app = Application.getInstance
// val guiLog = app.getGUILog
// guiLog.clearLog()
//
// val otiV = OTIMagicDrawValidation(p)
//
// val rules = new UMLPackageableElementRules[Uml, MagicDrawUMLUtil] {
// implicit val umlOps = _umlUtil
// }
//
// implicit val referencedButNotAccessibleValidationConstructor =
// rules.defaultReferencedButNotAccessibleConstructor _
//
// val elementMessages = for {
// pkg <- pkgs
// _ = guiLog.log( s"Analyzing ${pkg.qualifiedName.get}" )
// mdPkg = umlMagicDrawUMLPackage(pkg).getMagicDrawPackage
// as = List( actions.SelectInContainmentTreeAction( mdPkg ) )
// violation <- rules.
// findNonAccessibleButReferencedImportablePackabeableElementsExceptNestingPackagesAndAppliedProfiles( pkg )
// vInfo = otiV.constructValidationInfo(
// otiV.MD_OTI_ValidationConstraint_UnresolvedCrossReference,
// Some(s"unaccessible cross-reference from ${pkg.qualifiedName.get}"),
// Nil).get
// } yield
// umlMagicDrawUMLPackageableElement(violation.referencedButNotAccessible).getMagicDrawElement -> List(vInfo)
//
// otiV.makeMDIllegalArgumentExceptionValidation(
// "Validate each package references only accessible members (excluding nesting packages & applied profiles)",
// elementMessages.toMap)
// }
// def checkEachSelectedPackageReferencesOnlyAccessibleMembersIncludingNestingPackagesAndAppliedProfiles
// ( p: Project,
// pkgs: Iterable[UMLPackage[MagicDrawUML]] )
// ( implicit _umlUtil: MagicDrawUMLUtil )
// : Try[Option[MagicDrawValidationDataResults]] = {
//
// import _umlUtil._
// val app = Application.getInstance
// val guiLog = app.getGUILog
// guiLog.clearLog()
//
// val otiV = OTIMagicDrawValidation(p)
//
// val rules = new UMLPackageableElementRules[Uml, MagicDrawUMLUtil] {
// implicit val umlOps = _umlUtil
// }
//
// implicit val referencedButNotAccessibleValidationConstructor =
// rules.defaultReferencedButNotAccessibleConstructor _
//
// val elementMessages = for {
// pkg <- pkgs
// _ = guiLog.log( s"Analyzing ${pkg.qualifiedName.get}" )
// as = List( actions.SelectInContainmentTreeAction( umlMagicDrawUMLPackage(pkg).getMagicDrawPackage ) )
// violation <- rules.
// findNonAccessibleButReferencedImportablePackabeableElementsIncludingNestingPackagesAndAppliedProfiles( pkg )
// vInfo = otiV.constructValidationInfo(
// otiV.MD_OTI_ValidationConstraint_UnresolvedCrossReference,
// Some(s"unaccessible cross-reference from ${pkg.qualifiedName.get}"),
// Nil).get
// } yield
// umlMagicDrawUMLPackageableElement(violation.referencedButNotAccessible).getMagicDrawElement -> List(vInfo)
//
// otiV.makeMDIllegalArgumentExceptionValidation(
// "Validate each package references only accessible members (including nesting packages & applied profiles)",
// elementMessages.toMap)
// }
} |
package interfaces
import (
"fmt"
)
var (
ints = []int{3, 3, 1, 2, 6}
one = One(ints)
two = Two(ints)
three = Three(ints)
four = Four(ints)
five = Five(ints)
)
func ExampleOne() {
fmt.Println(one) // 自动调了String方法,所以排序了
fmt.Println(one.String())
fmt.Println([]int(one))
// output:
//[1 2 3 3 6]
//[1 2 3 3 6]
//[3 3 1 2 6]
}
func ExampleTwo() {
fmt.Println(two) // 自动调了String方法,所以排序了
fmt.Println(two.String())
fmt.Println([]int(two))
// output:
//[1 2 3 3 6]
//[1 2 3 3 6]
//[3 3 1 2 6]
}
func ExampleThree() {
fmt.Println(three) // 自动调了String方法,所以排序了
fmt.Println(three.String())
fmt.Println([]int(three))
// output:
//[1 2 3 3 6]
//[1 2 3 3 6]
//[3 3 1 2 6]
}
func ExampleFour() {
// 没有String方法,两个输出是一样的
fmt.Println(four)
//fmt.Println(four.String())
fmt.Println([]int(four))
// output:
//[3 3 1 2 6]
//[3 3 1 2 6]
}
func ExampleFive() {
// 没有Copy,caller也被改变了
// 所以输出的都是有序的
fmt.Println(five) // 自动调了String方法,所以排序了
fmt.Println(five.String())
fmt.Println([]int(five))
// output:
//[1 2 3 3 6]
//[1 2 3 3 6]
//[1 2 3 3 6]
}
|
const { DomoClient } = require('../dist');
const { API_SCOPE } = require('../dist/common/Constants');
const clientId = process.env.DOMO_CLIENT_ID;
const clientSecret = process.env.DOMO_CLIENT_SECRET;
const datasetId = 'e10348d6-b0ab-4471-9195-4f862ac3c56c';
const scopes = [API_SCOPE.DATA];
const host = 'api.domo.com';
const domo = new DomoClient(clientId, clientSecret, scopes, host);
domo.policies.list(datasetId)
.then(res => { console.log(`\nPolicies: ${res.length}`); })
.catch(console.error);
|
#!/bin/sh
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2015 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
#######################################################################
# Copyright [2014] [Cisco Systems, Inc.]
#
# Licensed under the Apache License, Version 2.0 (the \"License\");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an \"AS IS\" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################
#------------------------------------------------------------------
# This script is used to start the routing daemons (zebra and ripd)
# $1 is the calling event (current_wan_state current_lan_state ipv6_prefix)
#------------------------------------------------------------------
if [ -f /etc/device.properties ]
then
source /etc/device.properties
fi
SERVICE_NAME="routed"
case "$1" in
${SERVICE_NAME}-start)
service_routed start
;;
${SERVICE_NAME}-stop)
service_routed stop
;;
${SERVICE_NAME}-restart)
service_routed restart
;;
#----------------------------------------------------------------------------------
# Add other event entry points here
#----------------------------------------------------------------------------------
wan-status)
status=$(sysevent get wan-status)
if [ "$status" == "started" ]; then
service_routed start
elif [ "$status" == "stopped" ]; then
service_routed stop
fi
;;
lan-status)
status=$(sysevent get lan-status)
if [ "$status" == "started" ]; then
service_routed start
elif [ "$status" == "stopped" ]; then
# As per Sky requirement, radvd should run with ULA prefix though the wan-status is down
if [ "x$BOX_TYPE" != "xHUB4" ] && [ "x$BOX_TYPE" != "xSR300" ]; then
service_routed stop
fi
fi
;;
ripd-restart)
service_routed rip-restart
;;
zebra-restart)
service_routed radv-restart
;;
staticroute-restart)
service_routed radv-restart
;;
ipv6_prefix|ipv6_nameserver)
service_routed radv-restart
;;
*)
echo "Usage: $SERVICE_NAME [ ${SERVICE_NAME}-start | ${SERVICE_NAME}-stop | ${SERVICE_NAME}-restart]" > /dev/console
exit 3
;;
esac
exit 0
|
<reponame>openvinotoolkit/model_preparation_algorithm
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import normal_init, constant_init, build_activation_layer
from mmcls.models.builder import HEADS
from mmcls.models.heads.cls_head import ClsHead
@HEADS.register_module()
class NonLinearClsHead(ClsHead):
"""None linear classifier head.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
hid_channels (int): Number of channels of hidden layer.
act_cfg (dict): Config of activation layer.
loss (dict): Config of classification loss.
topk (int | tuple): Top-k accuracy.
""" # noqa: W605
def __init__(self,
num_classes,
in_channels,
hid_channels=1280,
act_cfg=dict(type='ReLU'),
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, ),
dropout=False):
topk = (1, ) if num_classes < 5 else (1, 5)
super(NonLinearClsHead, self).__init__(loss=loss, topk=topk)
self.in_channels = in_channels
self.hid_channels = hid_channels
self.num_classes = num_classes
self.act = build_activation_layer(act_cfg)
self.dropout = dropout
if self.num_classes <= 0:
raise ValueError(
f'num_classes={num_classes} must be a positive integer')
self._init_layers()
def _init_layers(self):
if self.dropout:
self.classifier = nn.Sequential(
nn.Linear(self.in_channels, self.hid_channels),
nn.BatchNorm1d(self.hid_channels),
self.act,
nn.Dropout(p=0.2),
nn.Linear(self.hid_channels, self.num_classes)
)
else:
self.classifier = nn.Sequential(
nn.Linear(self.in_channels, self.hid_channels),
nn.BatchNorm1d(self.hid_channels),
self.act,
nn.Linear(self.hid_channels, self.num_classes)
)
def init_weights(self):
for m in self.classifier:
if isinstance(m, nn.Linear):
normal_init(m, mean=0, std=0.01, bias=0)
elif isinstance(m, nn.BatchNorm1d):
constant_init(m, 1)
def simple_test(self, img):
"""Test without augmentation."""
cls_score = self.classifier(img)
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
pred = F.softmax(cls_score, dim=1) if cls_score is not None else None
if torch.onnx.is_in_onnx_export():
return pred
pred = list(pred.detach().cpu().numpy())
return pred
def forward_train(self, x, gt_label):
cls_score = self.classifier(x)
losses = self.loss(cls_score, gt_label)
return losses
|
<filename>hermes-management/src/main/java/pl/allegro/tech/hermes/management/domain/oauth/commands/RemoveOAuthProviderRepositoryCommand.java
package pl.allegro.tech.hermes.management.domain.oauth.commands;
import pl.allegro.tech.hermes.api.OAuthProvider;
import pl.allegro.tech.hermes.domain.oauth.OAuthProviderRepository;
import pl.allegro.tech.hermes.management.domain.dc.DatacenterBoundRepositoryHolder;
import pl.allegro.tech.hermes.management.domain.dc.RepositoryCommand;
public class RemoveOAuthProviderRepositoryCommand extends RepositoryCommand<OAuthProviderRepository> {
private final String providerName;
private OAuthProvider backup;
public RemoveOAuthProviderRepositoryCommand(String providerName) {
this.providerName = providerName;
}
@Override
public void backup(DatacenterBoundRepositoryHolder<OAuthProviderRepository> holder) {
backup = holder.getRepository().getOAuthProviderDetails(providerName);
}
@Override
public void execute(DatacenterBoundRepositoryHolder<OAuthProviderRepository> holder) {
holder.getRepository().removeOAuthProvider(providerName);
}
@Override
public void rollback(DatacenterBoundRepositoryHolder<OAuthProviderRepository> holder) {
holder.getRepository().createOAuthProvider(backup);
}
@Override
public Class<OAuthProviderRepository> getRepositoryType() {
return OAuthProviderRepository.class;
}
@Override
public String toString() {
return "RemoveOAuthProvider(" + providerName + ")";
}
}
|
package main
import (
_ "github.com/kernelschmelze/porkpie/plugin/filter"
_ "github.com/kernelschmelze/porkpie/plugin/geoip"
_ "github.com/kernelschmelze/porkpie/plugin/logger"
_ "github.com/kernelschmelze/porkpie/plugin/mail"
_ "github.com/kernelschmelze/porkpie/plugin/pushover"
_ "github.com/kernelschmelze/porkpie/plugin/reader"
_ "github.com/kernelschmelze/porkpie/plugin/sidmap"
_ "github.com/kernelschmelze/porkpie/plugin/slack"
"fmt"
"os"
"os/signal"
"time"
"github.com/kernelschmelze/pkg/plugin/config"
"github.com/kernelschmelze/pkg/plugin/manager"
log "github.com/kernelschmelze/pkg/logger"
)
func main() {
path := "./config.toml"
if err := config.Read(path); err != nil {
log.Errorf("read config '%s' failed, err=%s", path, err)
}
manager := plugin.GetManager()
defer func() {
config.Close() // close the file watcher, it is save to call config.Write
manager.Stop()
}()
manager.Start()
signalHandler()
go func() {
select {
case <-time.After(10 * time.Second):
os.Exit(1)
}
}()
}
func signalHandler() {
var gracefulStop = make(chan os.Signal)
signal.Notify(gracefulStop, os.Interrupt)
select {
case <-gracefulStop:
fmt.Println("")
}
}
|
<reponame>bio-boris/KBaseSearchEngine
package kbasesearchengine.test;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.regex.Pattern;
import org.apache.http.HttpHost;
import org.elasticsearch.client.Response;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import kbasesearchengine.search.ElasticIndexingStorage;
import kbasesearchengine.search.IndexingConflictException;
import us.kbase.common.service.UObject;
public class ElasticPayloadAnanlyzerTester {
public static final boolean cleanup = true;
private static String indexName;
private static ElasticIndexingStorage indexStorage;
@BeforeClass
public static void prepare() throws Exception {
String indexNamePrefix = "test_" + System.currentTimeMillis() + ".";
indexStorage = new ElasticIndexingStorage(
new HttpHost("localhost", 9200), null);
indexStorage.setIndexNamePrefix(indexNamePrefix);
cleanup();
indexName = indexNamePrefix + "sparse";
createTables(indexName);
}
@SuppressWarnings("serial")
private static void createTables(String indexName)
throws IOException, IndexingConflictException {
// Index settings
Map<String, Object> payloadAnalyzer = new LinkedHashMap<String, Object>() {{
put("type", "custom");
put("tokenizer", "whitespace");
put("filter", "delimited_payload_filter");
}};
Map<String, Object> analyzer = new LinkedHashMap<String, Object>() {{
put("payload_analyzer", payloadAnalyzer);
}};
Map<String, Object> analysis = new LinkedHashMap<String, Object>() {{
put("analyzer", analyzer);
}};
Map<String, Object> settings = new LinkedHashMap<String, Object>() {{
put("analysis", analysis);
}};
// Table
Map<String, Object> mappings = new LinkedHashMap<>();
// Now data (child)
String tableName = "sparse";
Map<String, Object> table = new LinkedHashMap<>();
mappings.put(tableName, table);
Map<String, Object> props = new LinkedHashMap<>();
table.put("properties", props);
props.put("guid", new LinkedHashMap<String, Object>() {{
put("type", "integer");
}});
props.put("@profile", new LinkedHashMap<String, Object>() {{
put("type", "text");
put("term_vector", "with_positions_offsets_payloads");
put("analyzer", "payload_analyzer");
}});
Map<String, Object> doc = new LinkedHashMap<String, Object>() {{
put("settings", settings);
put("mappings", mappings);
}};
indexStorage.makeRequest("PUT", "/" + indexName, doc);
}
@AfterClass
public static void teardown() throws Exception {
}
private static void cleanup() throws Exception {
Set<String> indNames = indexStorage.listIndeces();
for (String index : indNames) {
/*if (!index.startsWith("test_")) {
System.out.println("Skipping Elasticsearch index: " + index);
continue;
}*/
System.out.println("Deleting Elasticsearch index: " + index);
indexStorage.deleteIndex(index);
}
}
@SuppressWarnings({ "serial", "unchecked" })
@Test
public void testMain() throws Exception {
Random rand = new Random(1234567890);
int datasetSize = 1000;
int dimensions = 1000000;
int fulfillCount = 1000;
List<List<Integer>> generated = generatePackedData(rand,
datasetSize, dimensions, fulfillCount);
try {
File tempFile = new File("test_local/temp_files/esbulk/bulk.json");
Map<String, Object> index = new LinkedHashMap<String, Object>() {{
put("_index", indexName);
put("_type", "sparse");
}};
Map<String, Object> header = new LinkedHashMap<String, Object>() {{
put("index", index);
}};
List<Integer> firstRow = null;
long t1 = System.currentTimeMillis();
for (int blockPos = 0; blockPos < 1; blockPos++) {
PrintWriter pw = new PrintWriter(tempFile);
List<List<Integer>> callback = new ArrayList<>();
//int saved = loadData(new File("test_local/sparse/data/sparse.csv"), 10000, blockPos,
// header, pw, callback);
int saved = saveGeneratedBlock(generated, 10000, blockPos, header, pw, callback);
pw.close();
if (firstRow == null) {
firstRow = callback.get(0);
}
System.out.println("Block " + blockPos + ": " + saved);
if (saved == 0) {
break;
}
indexStorage.makeRequestBulk("POST", "/" + indexName, tempFile);
}
indexStorage.makeRequest("POST", "/" + indexName + "/_refresh", null);
System.out.println("Indexing is done in " + (System.currentTimeMillis() - t1) + " ms");
List<Double> queryVec = new ArrayList<>();
StringBuilder queryText = new StringBuilder();
for (int i = 0; i < dimensions; i++) {
queryVec.add(0.0);
}
for (int item : firstRow) {
queryVec.set(item, 1.0);
queryText.append(item).append(' ');
}
System.out.println("Query text: " + queryText.toString());
Map<String, Object> queryString = new LinkedHashMap<String, Object>() {{
put("query", queryText.toString()); // "*");
}};
Map<String, Object> subQuery = new LinkedHashMap<String, Object>() {{
put("query_string", queryString);
}};
Map<String, Object> params = new LinkedHashMap<String, Object>() {{
put("field", "@profile");
put("vector", queryVec); //Arrays.asList(0.1,2.3,-1.6,0.7,-1.3));
put("cosine", true);
}};
Map<String, Object> script = new LinkedHashMap<String, Object>() {{
put("inline", "payload_vector_score");
put("lang", "native");
put("params", params);
}};
Map<String, Object> scriptScore = new LinkedHashMap<String, Object>() {{
put("script", script);
}};
Map<String, Object> functionScore = new LinkedHashMap<String, Object>() {{
put("query", subQuery);
put("script_score", scriptScore);
put("boost_mode", "replace");
}};
Map<String, Object> query = new LinkedHashMap<String, Object>() {{
put("function_score", functionScore);
}};
Map<String, Object> doc = new LinkedHashMap<String, Object>() {{
put("query", query);
put("_source", Arrays.asList("guid"));
}};
long t2 = System.currentTimeMillis();
Response resp = indexStorage.makeRequest("GET", "/" + indexName + "/sparse/_search", doc);
System.out.println("Search is done in " + (System.currentTimeMillis() - t2) + " ms");
Map<String, Object> data = UObject.getMapper().readValue(
resp.getEntity().getContent(), Map.class);
Map<String, Object> hits = (Map<String, Object>)data.get("hits");
List<Map<String, Object>> hitList = (List<Map<String, Object>>)hits.get("hits");
for (Map<String, Object> hit : hitList) {
double score = (Double)hit.get("_score");
Map<String, Object> source = (Map<String, Object>)hit.get("_source");
int guid = (Integer)source.get("guid");
System.out.println("Found: " + guid + " -> " + score);
}
} catch (Exception e) {
throw new IllegalStateException(e);
}
/*
int datasetSize = 10000;
int dimensions = 100000;
int fulfillCount = 1000;
Indexing is done in 10552 ms
Search is done in 61158 ms
*/
}
private static int saveGeneratedBlock(List<List<Integer>> generated,
int blockSize, int blockPos, Map<String, Object> header,
PrintWriter pw, List<List<Integer>> callback) throws Exception {
int ret = 0;
for (int i = blockPos * blockSize; i < Math.min((blockPos + 1) * blockSize, generated.size()); i++, ret++) {
pw.println(UObject.transformObjectToString(header));
pw.println(UObject.transformObjectToString(createDoc(i + 1, toProfile(generated.get(i)))));
}
callback.add(generated.get(0));
return ret;
}
private static List<List<Integer>> generatePackedData(Random rand,
int datasetSize, int dimensions, int fulfillCount) {
List<List<Integer>> ret = new ArrayList<>();
byte[] v1 = new byte[dimensions];
{
for (int i = 0; i < fulfillCount; i++) {
while (true) {
int pos = rand.nextInt(dimensions);
if (v1[pos] == 0) {
v1[pos] = 1;
break;
}
}
}
}
ret.add(pack(v1));
for (int n = 1; n < datasetSize; n++) {
byte[] v2 = new byte[dimensions];
System.arraycopy(v1, 0, v2, 0, dimensions);
int pos0 = findRandomPos(rand, v1, false);
int pos1 = findRandomPos(rand, v1, true);
if (v1[pos0] > 0 || v1[pos1] == 0) {
throw new IllegalStateException();
}
v2[pos0] = 1;
v2[pos1] = 0;
ret.add(pack(v2));
v1 = v2;
}
return ret;
}
private static int findRandomPos(Random rand, byte[] vec, boolean nonZero) {
int dimensions = vec.length;
int ret = rand.nextInt(dimensions);
for (int iter = 0; (vec[ret] > 0) != nonZero; iter++) {
ret = (ret + 1) % dimensions;
if (iter >= dimensions) {
throw new IllegalStateException("Too many iterations");
}
}
return ret;
}
private static List<Integer> pack(byte[] array) {
List<Integer> ret = new ArrayList<>();
for (int i = 0; i < array.length; i++) {
if (array[i] != 0) {
ret.add(i);
}
}
return ret;
}
private static String toProfile(List<Integer> nonZeroPack) {
StringBuilder ret = new StringBuilder();
for (int pos : nonZeroPack) {
if (ret.length() > 0) {
ret.append(" ");
}
ret.append(pos).append("|").append(1);
}
return ret.toString();
}
@SuppressWarnings("unused")
private static int loadData(File input, int blockSize, int blockPos,
Map<String, Object> header, PrintWriter pw, List<List<Integer>> firstRow) throws Exception {
int blockStart = blockPos * blockSize;
int itemsStoredInBlock = 0;
int rowPos = 0;
BufferedReader br = new BufferedReader(new FileReader(input));
int currentId = -1;
StringBuilder temp = null;
List<Integer> row = new ArrayList<>();
while (true) {
String l = br.readLine();
if (l == null || l.trim().length() == 0) {
break;
}
String[] parts = l.split(Pattern.quote("\t"));
int id = (int)Math.round(Double.parseDouble(parts[0]));
if (id != currentId) {
if (currentId > 0) {
if (rowPos >= blockStart) {
pw.println(UObject.transformObjectToString(header));
pw.println(UObject.transformObjectToString(createDoc(currentId, temp.toString())));
if (firstRow != null && firstRow.isEmpty()) {
firstRow.add(row);
}
currentId = -1;
itemsStoredInBlock++;
if (itemsStoredInBlock >= blockSize) {
break;
}
}
rowPos++;
}
currentId = id;
temp = new StringBuilder();
row = new ArrayList<>();
}
int profId = Integer.parseInt(parts[1]);
if (temp.length() > 0) {
temp.append(" ");
}
temp.append(profId).append("|").append(1);
row.add(profId);
}
if (currentId > 0 && rowPos >= blockStart) {
pw.println(UObject.transformObjectToString(header));
pw.println(UObject.transformObjectToString(createDoc(currentId, temp.toString())));
itemsStoredInBlock++;
}
br.close();
return itemsStoredInBlock;
}
@SuppressWarnings("serial")
private static Map<String, Object> createDoc(int guid, String profile) {
return new LinkedHashMap<String, Object>() {{
put("guid", guid);
put("@profile", profile);
}};
}
}
|
#!/bin/bash
# Get the directory of the current script
current_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Source the ttcp_activate.sh script
source "$current_dir/ttcp_activate.sh"
# Check if sourcing was successful
if [ $? -ne 0 ]; then
echo "Error: Failed to source ttcp_activate.sh"
exit 1
fi
# Perform the specific action using the environment variables and functions from ttcp_activate.sh
# Replace the following line with the actual action to be performed
echo "Executing specific action using environment variables and functions from ttcp_activate.sh" |
#!/bin/bash
if [ -z "$PG_PASS" ]
then
echo "$0: you need to specify PG_PASS env variable"
exit 1
fi
if ( [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ] )
then
echo "$0: you need to provide companies, starty date, end date arguments"
echo "Example \"'Google', 'VMware'\" '2014-01-01 '2019-01-01'"
echo "Use GHA2DB_CSVOUT=filename.csv to save as CSV"
exit 1
fi
GHA2DB_LOCAL=1 GHA2DB_SKIPTIME=1 GHA2DB_SKIPLOG=1 runq util_sql/event_types_per_login.sql {{companies}} "$1" {{from}} "$2" {{to}} "$3"
|
#!/usr/bin/env bash
# Base16 Atelier Estuary Light - Gnome Terminal color scheme install script
# Bram de Haan (http://atelierbramdehaan.nl)
[[ -z "$PROFILE_NAME" ]] && PROFILE_NAME="Base 16 Atelier Estuary Light 256"
[[ -z "$PROFILE_SLUG" ]] && PROFILE_SLUG="base-16-atelier-estuary-light-256"
[[ -z "$DCONF" ]] && DCONF=dconf
[[ -z "$UUIDGEN" ]] && UUIDGEN=uuidgen
dset() {
local key="$1"; shift
local val="$1"; shift
if [[ "$type" == "string" ]]; then
val="'$val'"
fi
"$DCONF" write "$PROFILE_KEY/$key" "$val"
}
# Because dconf still doesn't have "append"
dlist_append() {
local key="$1"; shift
local val="$1"; shift
local entries="$(
{
"$DCONF" read "$key" | tr -d '[]' | tr , "\n" | fgrep -v "$val"
echo "'$val'"
} | head -c-1 | tr "\n" ,
)"
"$DCONF" write "$key" "[$entries]"
}
# Newest versions of gnome-terminal use dconf
if which "$DCONF" > /dev/null 2>&1; then
# Check that uuidgen is available
type $UUIDGEN >/dev/null 2>&1 || { echo >&2 "Requires uuidgen but it's not installed. Aborting!"; exit 1; }
[[ -z "$BASE_KEY_NEW" ]] && BASE_KEY_NEW=/org/gnome/terminal/legacy/profiles:
if [[ -n "`$DCONF list $BASE_KEY_NEW/`" ]]; then
if which "$UUIDGEN" > /dev/null 2>&1; then
PROFILE_SLUG=`uuidgen`
fi
if [[ -n "`$DCONF read $BASE_KEY_NEW/default`" ]]; then
DEFAULT_SLUG=`$DCONF read $BASE_KEY_NEW/default | tr -d \'`
else
DEFAULT_SLUG=`$DCONF list $BASE_KEY_NEW/ | grep '^:' | head -n1 | tr -d :/`
fi
DEFAULT_KEY="$BASE_KEY_NEW/:$DEFAULT_SLUG"
PROFILE_KEY="$BASE_KEY_NEW/:$PROFILE_SLUG"
# Copy existing settings from default profile
$DCONF dump "$DEFAULT_KEY/" | $DCONF load "$PROFILE_KEY/"
# Add new copy to list of profiles
dlist_append $BASE_KEY_NEW/list "$PROFILE_SLUG"
# Update profile values with theme options
dset visible-name "'$PROFILE_NAME'"
dset palette "['#f4f3ec', '#ba6236', '#7d9726', '#a5980d', '#36a166', '#5f9182', '#5b9d48', '#5f5e4e', '#878573', '#ba6236', '#7d9726', '#a5980d', '#36a166', '#5f9182', '#5b9d48', '#22221b']"
dset background-color "'#f4f3ec'"
dset foreground-color "'#5f5e4e'"
dset bold-color "'#5f5e4e'"
dset bold-color-same-as-fg "true"
dset cursor-colors-set "true"
dset cursor-background-color "'#5f5e4e'"
dset cursor-foreground-color "'#f4f3ec'"
dset use-theme-colors "false"
dset use-theme-background "false"
unset PROFILE_NAME
unset PROFILE_SLUG
unset DCONF
unset UUIDGEN
exit 0
fi
fi
# Fallback for Gnome 2 and early Gnome 3
[[ -z "$GCONFTOOL" ]] && GCONFTOOL=gconftool
[[ -z "$BASE_KEY" ]] && BASE_KEY=/apps/gnome-terminal/profiles
PROFILE_KEY="$BASE_KEY/$PROFILE_SLUG"
gset() {
local type="$1"; shift
local key="$1"; shift
local val="$1"; shift
"$GCONFTOOL" --set --type "$type" "$PROFILE_KEY/$key" -- "$val"
}
# Because gconftool doesn't have "append"
glist_append() {
local type="$1"; shift
local key="$1"; shift
local val="$1"; shift
local entries="$(
{
"$GCONFTOOL" --get "$key" | tr -d '[]' | tr , "\n" | fgrep -v "$val"
echo "$val"
} | head -c-1 | tr "\n" ,
)"
"$GCONFTOOL" --set --type list --list-type $type "$key" "[$entries]"
}
# Append the Base16 profile to the profile list
glist_append string /apps/gnome-terminal/global/profile_list "$PROFILE_SLUG"
gset string visible_name "$PROFILE_NAME"
gset string palette "#f4f3ec:#ba6236:#7d9726:#a5980d:#36a166:#5f9182:#5b9d48:#5f5e4e:#878573:#ba6236:#7d9726:#a5980d:#36a166:#5f9182:#5b9d48:#22221b"
gset string background_color "#f4f3ec"
gset string foreground_color "#5f5e4e"
gset string bold_color "#5f5e4e"
gset bool bold_color_same_as_fg "true"
gset bool cursor-colors-set "true"
gset string cursor-background-color "'#5f5e4e'"
gset string cursor-foreground-color "'#f4f3ec'"
gset bool use_theme_colors "false"
gset bool use_theme_background "false"
unset PROFILE_NAME
unset PROFILE_SLUG
unset DCONF
unset UUIDGEN
|
#!/bin/busybox ash
rm -rf test
mkdir -p test/src/ignore test/src/use/me
export BACKUP_ROOT="$PWD/test"
export BACKUP_TIME_FORMAT="%F %T"
export BACKUP_CONFIG="/dev/null"
./init.sh
which tree >/dev/null 2>&1 || tree() { `which find` $2 -printf '%i\t%p\n'; }
# test 1: sync whole tree
echo a>test/src/ignore/file
echo b>test/src/use/me/file
echo c>test/src/use/file
rsync -a "$PWD/test/src/" "$PWD/test/current"
./backup.sh
tree --inodes test
sqlite3 -column -header test/backup.db 'select * from history;'
echo "looks good?"
read
# test 2: sync only one subdir
echo d>test/src/ignore/file
echo e>test/src/use/me/file
echo f>test/src/use/file
run_this()
{
run_rsync always use/me "$PWD/test/src/use/me/"
}
. ./backup.sh
tree --inodes test
sqlite3 -column -header test/backup.db 'select * from history;'
echo "looks good?"
# read
|
/*
============================================================================
This source file is part of the Ogre-Maya Tools.
Distributed as part of Ogre (Object-oriented Graphics Rendering Engine).
Copyright (C) 2003 Fifty1 Software Inc., Bytelords
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
or go to http://www.gnu.org/licenses/gpl.txt
============================================================================
*/
#ifndef _OGREMAYA_COMMON_H_
#define _OGREMAYA_COMMON_H_
#if defined( _MSC_VER )
// Turn off warnings generated by long std templates
// This warns about truncation to 255 characters in debug/browse info
# pragma warning (disable : 4786)
#endif
namespace OgreMaya {
typedef float Real;
template <typename Iterator>
void deleteAll(Iterator it, Iterator end) {
for(;it!=end; ++it)
delete *it;
}
template <typename Iterator>
bool listEqual(Iterator it1, Iterator it2, Iterator end1, Iterator end2) {
bool eq = true;
while(eq && it1!=end1 && it2!=end2) {
eq = *it1 == *it2;
++it1;
++it2;
}
return
eq && it1==end1 && it2==end2;
}
struct Vector3 {
Real x,y,z;
Vector3(): x(0), y(0), z(0) {}
bool operator ==(const Vector3& other) const {
return x==other.x && y==other.y && z==other.z;
}
Vector3 &operator *=(float mult) {
x = (Real)mult * x;
y = (Real)mult * y;
z = (Real)mult * z;
return *this;
}
};
struct ColourValue {
Real r,g,b,a;
ColourValue(): r(0), g(0), b(0), a(1) {}
bool operator ==(const ColourValue& other) const {
return r==other.r && g==other.g && b==other.b && a==other.a;
}
};
}
#endif
|
#!/bin/bash
salt_minion_check () {
if [ ! -f /var/log/salt/minion ]; then
echo "File not found!" && \
touch /var/log/salt/minion && \
salt_minion_check
else
tail -f /var/log/salt/minion
fi
}
echo "master: salt-master" >> /etc/salt/minion && \
echo "id: salt-minion-$(hostname)" >> /etc/salt/minion
service salt-minion start && \
salt_minion_check
|
<gh_stars>1-10
import { Injectable } from '@angular/core';
interface MessageDict {
[key: number]: string;
}
@Injectable({
providedIn: 'root',
})
export class ErrorMessageService {
messages: MessageDict = {
401: 'ユーザが認証されていません',
403: 'その動作は許可されていません',
404: '存在しないURLへはアクセスできません',
500: '予期せぬエラーが発生しました',
1000: '予期せぬエラーが発生しました',
1101: '存在しないユーザです',
1102: '存在しないロールです',
1103: '存在しない予約です',
1104: 'APIが見つかりません',
1200: 'メールアドレスは既に登録済みです',
1201: 'その時刻は既に予約済みです',
1300: 'その動作は許可されていません',
1400: '無効なリクエストです',
1401: 'パスワードの長さは8~32文字にしてください',
1402: 'パスワードが単純すぎます',
1403: '無効な予約時間です',
1404: '予約時間が長すぎます',
1405: '過去の時間は予約できません',
1406: '過去の予約は編集できません',
1407: '過去の予約は削除できません',
1408: '9:00~20:00の範囲で予約してください',
1500: 'ユーザはログインしていません',
1501: 'パスワードが間違っています',
1502: '無効なアクセストークンです',
1503: '期限切れのアクセストークンです',
};
constructor() {}
getErrorMessage(statusCode: number): string {
if (statusCode in this.messages) {
return this.messages[statusCode];
} else {
return this.messages[500];
}
}
}
|
package cloud.nativ.jakarta;
import lombok.extern.java.Log;
import javax.enterprise.context.ApplicationScoped;
import javax.inject.Inject;
import javax.ws.rs.*;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.util.Map;
import static java.util.Collections.singletonMap;
@Log
@ApplicationScoped
@Path("demo")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public class DemoResource {
@Inject
private DemoService service;
@GET
public Response demo(@QueryParam("name") String name) {
Map<String, Object> payload = singletonMap("name", service.getMessage(name));
return Response.ok(payload).build();
}
}
|
#!/bin/bash -e
# taken from https://github.com/bioconda/bioconda-recipes/blob/25ee21573c577aa1ae899e0f7fbbc848d8a63865/recipes/longshot/build.sh
# this build script is taken from the rust-bio-tools recipe
# https://github.com/bioconda/bioconda-recipes/blob/master/recipes/rust-bio-tools/build.sh
# taken from yacrd recipe, see: https://github.com/bioconda/bioconda-recipes/blob/2b02c3db6400499d910bc5f297d23cb20c9db4f8/recipes/yacrd/build.sh
if [ "$(uname)" == "Darwin" ]; then
# apparently the HOME variable isn't set correctly, and circle ci output indicates the following as the home directory
export HOME="/Users/distiller"
export HOME=`pwd`
# according to https://github.com/rust-lang/cargo/issues/2422#issuecomment-198458960 removing circle ci default configuration solves cargo trouble downloading crates
#git config --global --unset url.ssh://git@github.com.insteadOf
fi
# build statically linked binary with Rust
C_INCLUDE_PATH=$PREFIX/include LIBRARY_PATH=$PREFIX/lib cargo install --path . --root $PREFIX
|
#!/bin/bash
set -e
set -o pipefail
config_keepalived() {
if ! compgen -A variable | grep -q -E 'KEEPALIVED_VIRTUAL_IPADDRESS_[0-9]{1,3}'; then
echo "[$(date)][KEEPALIVED] No KEEPALIVED_VIRTUAL_IPADDRESS_ varibles detected."
return 1
fi
KEEPALIVED_STATE=${KEEPALIVED_STATE:-MASTER}
if [[ "${KEEPALIVED_STATE^^}" == 'MASTER' ]]; then
KEEPALIVED_PRIORITY=${KEEPALIVED_PRIORITY:-200}
elif [[ "${KEEPALIVED_STATE^^}" == 'BACKUP' ]]; then
KEEPALIVED_PRIORITY=${KEEPALIVED_PRIORITY:-100}
fi
KEEPALIVED_INTERFACE=${KEEPALIVED_INTERFACE:-eth0}
KEEPALIVED_VIRTUAL_ROUTER_ID=${KEEPALIVED_VIRTUAL_ROUTER_ID:-1}
KEEPALIVED_ADVERT_INT=${KEEPALIVED_ADVERT_INT:-1}
KEEPALIVED_AUTH_PASS=${KEEPALIVED_AUTH_PASS:-"pwd$KEEPALIVED_VIRTUAL_ROUTER_ID"}
if [[ ! $KEEPALIVED_UNICAST_SRC_IP ]]; then
bind_target="$(ip addr show "$KEEPALIVED_INTERFACE" | \
grep -m 1 -E -o 'inet [0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | awk '{print $2}')"
KEEPALIVED_UNICAST_SRC_IP="$bind_target"
fi
{
echo 'global_defs {'
echo 'router_id LVS_MAIN'
echo '}'
} > "$KEEPALIVED_CONF"
if [[ ${KEEPALIVED_KUBE_APISERVER_CHECK,,} == 'true' ]]; then
# if no address supplied, assume its the first (or only) VIP
if [[ ! $KUBE_APISERVER_ADDRESS ]]; then
kube_api_vip="$(compgen -A variable | grep -E 'KEEPALIVED_VIRTUAL_IPADDRESS_[0-9]{1,3}' | head -1)"
KUBE_APISERVER_ADDRESS="$(echo "${!kube_api_vip}" | grep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')"
fi
KUBE_APISERVER_PORT=${KUBE_APISERVER_PORT:-6443}
KUBE_APISERVER_CHK_INTERVAL=${KUBE_APISERVER_CHK_INTERVAL:-'3'}
KUBE_APISERVER_CHK_WEIGHT=${KUBE_APISERVER_CHK_WEIGHT:-'-50'}
KUBE_APISERVER_CHK_FALL=${KUBE_APISERVER_CHK_FALL:-'10'}
KUBE_APISERVER_CHK_RISE=${KUBE_APISERVER_CHK_RISE:-'2'}
{
echo 'vrrp_script chk_kube_apiserver {'
echo " script \"/usr/lib/keepalived/scripts/chk_kube_apiserver.sh $KUBE_APISERVER_ADDRESS $KUBE_APISERVER_PORT\""
echo " interval $KUBE_APISERVER_CHK_INTERVAL"
echo " fall $KUBE_APISERVER_CHK_FALL"
echo " rise $KUBE_APISERVER_CHK_RISE"
echo " weight $KUBE_APISERVER_CHK_WEIGHT"
echo '}'
} >> "$KEEPALIVED_CONF"
fi
{
echo 'vrrp_instance MAIN {'
echo " state $KEEPALIVED_STATE"
echo " interface $KEEPALIVED_INTERFACE"
echo " virtual_router_id $KEEPALIVED_VIRTUAL_ROUTER_ID"
echo " priority $KEEPALIVED_PRIORITY"
echo " advert_int $KEEPALIVED_ADVERT_INT"
echo " unicast_src_ip $KEEPALIVED_UNICAST_SRC_IP"
echo ' unicast_peer {'
} >> "$KEEPALIVED_CONF"
for peer in $(compgen -A variable | grep -E "KEEPALIVED_UNICAST_PEER_[0-9]{1,3}"); do
echo " ${!peer}" >> "$KEEPALIVED_CONF"
done
{
echo ' }'
echo ' authentication {'
echo ' auth_type PASS'
echo " auth_pass $KEEPALIVED_AUTH_PASS"
echo ' }'
echo ' virtual_ipaddress {'
} >> "$KEEPALIVED_CONF"
for vip in $(compgen -A variable | grep -E 'KEEPALIVED_VIRTUAL_IPADDRESS_[0-9]{1,3}'); do
echo " ${!vip}" >> "$KEEPALIVED_CONF"
done
echo ' }' >> "$KEEPALIVED_CONF"
if compgen -A variable | grep -q -E 'KEEPALIVED_VIRTUAL_IPADDRESS_EXCLUDED_[0-9]{1,3}'; then
echo ' virtual_ipaddress_excluded {' >> "$KEEPALIVED_CONF"
for evip in $(compgen -A variable | grep -E 'KEEPALIVED_VIRTUAL_IPADDRESS_EXCLUDED_[0-9]{1,3}'); do
echo " ${!evip}" >> "$KEEPALIVED_CONF"
done
echo ' }' >> "$KEEPALIVED_CONF"
fi
if compgen -A variable | grep -q -E 'KEEPALIVED_TRACK_INTERFACE_[0-9]{1,3}'; then
echo ' track_interface {' >> "$KEEPALIVED_CONF"
for interface in $(compgen -A variable | grep -E 'KEEPALIVED_TRACK_INTERFACE_[0-9]{1,3}'); do
echo " ${!interface}" >> "$KEEPALIVED_CONF"
done
echo ' }' >> "$KEEPALIVED_CONF"
else
{
echo ' track_interface {'
echo " $KEEPALIVED_INTERFACE"
echo '}'
} >> "$KEEPALIVED_CONF"
fi
if [[ ${KEEPALIVED_KUBE_APISERVER_CHECK,,} == 'true' ]]; then
{
echo ' track_script {'
echo ' chk_kube_apiserver'
echo ' }'
} >> "$KEEPALIVED_CONF"
fi
echo '}' >> "$KEEPALIVED_CONF"
return 0
}
init_vars() {
KEEPALIVED_AUTOCONF=${KEEPALIVED_AUTOCONF:-true}
KEEPALIVED_DEBUG=${KEEPALIVED_DEBUG:-false}
KEEPALIVED_KUBE_APISERVER_CHECK=${KEEPALIVED_KUBE_APISERVER_CHECK:-false}
KEEPALIVED_CONF=${KEEPALIVED_CONF:-/etc/keepalived/keepalived.conf}
KEEPALIVED_VAR_RUN=${KEEPALIVED_VAR_RUN:-/var/run/keepalived}
if [[ ${KEEPALIVED_DEBUG,,} == 'true' ]]; then
local kd_cmd="/usr/sbin/keepalived -n -l -D -f $KEEPALIVED_CONF"
else
local kd_cmd="/usr/sbin/keepalived -n -l -f $KEEPALIVED_CONF"
fi
KEEPALIVED_CMD=${KEEPALIVED_CMD:-"$kd_cmd"}
}
main() {
init_vars
if [[ ${KEEPALIVED_AUTOCONF,,} == 'true' ]]; then
config_keepalived
fi
rm -fr "$KEEPALIVED_VAR_RUN"
# shellcheck disable=SC2086
exec $KEEPALIVED_CMD
}
main
|
require 'matrix'
require 'libopencv_rice'
module CVRice
class Mat
include Enumerable
# TODO: Think of a better way to do this
CV_8U = 0
CV_8S = 1
CV_16U = 2
CV_16S = 3
CV_32S = 4
CV_32F = 5
CV_64F = 6
# Emulate an overloaded constructor in Ruby
class << self
def new( *args )
case args.length
when 0
a = allocate
a.send( :initialize )
a
when 1
arg = args.pop
case arg
when Array, Matrix
Mat::from_ruby arg
when Vector, Vec3d
Mat::columns [ arg.to_a ]
when Matx22f, Matx22d, Matx33f, Matx33d
arg.to_mat
when CVRice::Mat
Mat::copy_constructor arg
else
raise "Don't know how to make a Mat from a #{args.first.class}"
end
when 2..3
if Numeric === args[0] and Numeric === args[1]
Mat::zeros( *(args.first(3)) )
end
else
raise "Don't know how to make a Mat from: %s" % (args.map {|x| x.inspect}.join(', '))
end
end
def rows( arr )
Mat.new arr
end
def columns( arr )
Mat.rows(arr.transpose)
end
def diagonal( *elems )
Mat.new Matrix.diagonal(*elems)
end
def identity( r, c = r, type = CV_64F )
Mat::eye( r, c, type )
end
end
# For compatability with Matrix
alias_method :row_count, :rows
alias_method :column_count, :cols
def inspect
"Mat: %d x %d" % [rows,cols]
end
alias_method :svd_c, :svd
def svd( opts = {} )
flags = 0
flags += 1 if opts[:modify_a]
flags += 2 if opts[:no_uv]
flags += 4 if opts[:full_uv]
w = Mat.new
u = Mat.new
vt = Mat.new
svd_c( w,u,vt, flags )
[u,w,vt]
end
def self.arithmatic_operator( operator, mat_function, const_function, msg = nil )
msg ||= '"Don\'t know how to %s a #{b.class} from a CVRice::Mat"' % operator
self.send :define_method, operator, Proc.new { |b|
case b
when Vector,Matrix,Mat
send( mat_function, b )
when Matx22f
## Smells are bad...
send( mat_function, Mat.rows(b.to_a) )
when Array
case b.first
when Array
send( mat_function, Mat.rows(b) )
when Numeric
send( mat_function, Mat.columns([b]) )
else
raise msg % [b.class]
end
when Vec3d
send( mat_function, b.to_mat )
when Numeric
send( const_function, b )
else
raise msg % [b.class]
end
}
end
arithmatic_operator :+, :add_mat, :add_const, '"Don\'t know how to add a %s to a CVRice::Mat"'
arithmatic_operator :-, :subtract_mat, :subtract_const, '"Dont\'t know how to subtract a %s from a CVRice::Mat"'
arithmatic_operator :*, :mult_mat, :mult_const, '"Dont\'t know how to multiply a CVRice::Mat by a %s"'
def zero?
norm.abs < 1e-12
end
def each
if block_given?
rows.times { |r|
cols.times { |c|
yield at_d(r,c)
}
}
else
raise "Enumerator form of Mat::each only makes sense if Matrix has 1 row or 1 column" unless rows == 1 or cols == 1
# Assumed to be less efficient because the temporary Array is created...
(to_a.flatten(1)).each
end
end
def each_with_index
if block_given?
rows.times { |r|
cols.times { |c|
yield at_d(r,c),r,c
}
}
else
raise "Enumerator form of Mat::each_with_index doesn't exist (yet)..."
end
end
def map
if block_given?
out = Mat.new rows, cols, type
rows.times {|r| cols.times {|c|
out.set_d( r, c, (yield at_d(r,c)) )
} }
out
else
raise "Enumerator for of Mat::map doesn't exist (yet)..."
end
end
def row_vectors
to_a.map! { |arr|
Vector[ *arr ]
}
end
# TODO: Need this now, could be done many ways, each
# better than this...
def to_matx33d
raise "This mat isn't 3x3, can't convert to matx33d" unless rows ==3 and cols == 3
Matx33d::from_ruby to_a
end
def to_matx33f
raise "This mat isn't 3x3, can't convert to matx33f" unless rows ==3 and cols == 3
Matx33f::from_ruby to_a
end
def to_matx22d
raise "This mat isn't 2x2, can't convert to matx22d" unless rows ==2 and cols == 2
Matx22d::from_ruby to_a
end
def to_matx22f
raise "This mat isn't 2x2, can't convert to matx22f" unless rows ==2 and cols == 2
Matx22f::from_ruby to_a
end
def to_Matrix
Matrix.rows to_a
end
def to_Vector
raise "Mat is not a vector" unless rows == 1 or cols == 1
Vector[ *(to_a.flatten(1)) ]
end
def [](r,c=0)
at_d(r,c)
end
def []=(r,c,v)
set_d( r,c, v)
end
def to_CvMat
CVRice::mat_to_cvmat( self )
end
def print( opts = {} )
caption = case opts
when ::String
st = opts
opts = {}
st
when Hash
opts[:caption]
end
puts "#{caption} (%dx%d)= " % [rows,cols] if caption
rows.times { |r|
puts cols.times.map { |c|
format = case opts[:format]
when :exp
"%-5.2e"
when nil
"%.5f"
else
opts[:format]
end
format % at_d(r,c)
}.join(' ')
}
end
end
end
|
#!/bin/bash
source ${ENTRYPOINT_HOME}/global_env.sh
# $CDM can contain multiple configs of the form
# <directory>|<processing_mechanism>|<threads>|<readlock>,<directory>|....
function main {
IFS=',' read -r -a _cdm_configs <<< "${CDM}"
for index in "${!_cdm_configs[@]}"
do
IFS='|' read -r -a _config <<< "${_cdm_configs[$index]}"
_cdm_directory=${_config[0]}
_cdm_mechanism=${_config[1]}
_cdm_threads=${_config[2]}
_cdm_readlock=${_config[3]}
_cdm_args="${_cdm_directory}"
if [ -n "${_cdm_mechanism}" ]; then
_cdm_args="${_cdm_args} --processing-mechanism ${_cdm_mechanism}"
fi
if [ -n "${_cdm_threads}" ]; then
_cdm_args="${_cdm_args} --threads ${_cdm_threas}"
fi
if [ -n "${_cdm_readlock}" ]; then
_cdm_args="${_cdm_args} --readlock-interval ${_cdm_readlock}"
fi
echo "Creating CDM configuration with arguments: ${_cdm_args}"
create-cdm --ddf-directory ${APP_HOME} ${_cdm_args}
done
}
main
|
'use strict';
describe('auth-service-spec:', function () {
//prepare module for testing
beforeEach(angular.mock.module('bolt.services'));
describe('AuthService-spec:', function () {
var mockHttp, mockUserRoles, mockSession;
var authService;
/* Deferred response of http service */
var deferredHttp, $rootScope;
//prepare session for testing
beforeEach(angular.mock.module(function ($provide) {
//mock dependencies
mockHttp = jasmine.createSpyObj('$http', ['post']);
$provide.value('$http', mockHttp);
mockUserRoles = jasmine.createSpyObj('USER_ROLES', ['GUEST', 'USER']);
$provide.value('USER_ROLES', mockUserRoles);
mockSession = jasmine.createSpyObj('Session', ['create', 'destroy', 'getToken', 'getUserRole']);
$provide.value('Session', mockSession);
}));
beforeEach(inject(function ($injector, $q, _$rootScope_) {
$rootScope = _$rootScope_;
//initialize deferred http response
deferredHttp = $q.defer();
mockHttp.post = function (url, args) {
return deferredHttp.promise;
};
//get service
authService = $injector.get('AuthService');
}));
it('should login user with valid credentials', function () {
//given user is defined in service
expect(authService).toBeDefined();
var user = {
token: 'token-123',
id: 'user-345',
role: 'USER'
};
//when trying to log in with user's credentials
authService.login({login: '<EMAIL>', password: '<PASSWORD>'});
//and response for authentication is positive
var response = {data: user};
deferredHttp.resolve(response);
$rootScope.$apply();
//then session is created for validated user
expect(mockSession.create).toHaveBeenCalledWith(user);
});
it('should not login user with wrong credentials', function () {
//given user is defined in service
expect(authService).toBeDefined();
var user = {
token: 'token-123',
id: 'user-345',
role: 'USER'
};
//when trying to log in with wrong credentials
authService.login({login: '<EMAIL>', password: '<PASSWORD>'});
//and response for authentication is not positive
deferredHttp.reject();
$rootScope.$apply();
//then session is not created for user
expect(mockSession.create).not.toHaveBeenCalled();
});
it('should log out current user', function () {
//given auth service is initialized
expect(authService).toBeDefined();
//when user is logging out
authService.logout();
//and back-end has responded
deferredHttp.resolve();
$rootScope.$apply();
//then session of current user is destroyed
expect(mockSession.destroy).toHaveBeenCalled();
});
it('should authenticate user with valid token', function () {
//given token of user is valid
expect(authService).toBeDefined();
mockSession.getToken = function () {
return 'token-123';
};
//when checking user whether user is authenticated
//then check is positive
expect(authService.isAuthenticated()).toBe(true);
});
it('should not authenticate user without valid token', function () {
//given token of user is invalid
expect(authService).toBeDefined();
mockSession.getToken = function () {
return null;
};
//when checking user whether user is authenticated
//then check is negative
expect(authService.isAuthenticated()).toBe(false);
});
it('should authorize user with correct access rights', function () {
//given user has required access rights
expect(authService).toBeDefined();
mockSession.getUserRole = function () {
return 'ADMIN';
};
//when checking whether user is authorized
//then check is positive
expect(authService.isAuthorized(['USER', 'ADMIN'])).toBe(true);
});
it('should not authorize user with insufficient access rights', function () {
//given user hasn't required access rights
expect(authService).toBeDefined();
mockSession.getUserRole = function () {
return 'GUEST';
};
//when checking whether user is authorized
//then check is negative
expect(authService.isAuthorized(['USER', 'ADMIN'])).toBe(false);
});
it('should return current session', function () {
//given auth service is initialized
expect(authService).toBeDefined();
//when checking user's current session
//then singleton instance is returned
expect(authService.getCurrentSession()).toBe(mockSession);
});
});
});
|
type Query {
allBooks: [Book]
}
type Book {
id: ID!
title: String!
author: String!
published_date: String
}
type Mutation {
addBook(title: String!, author: String!, published_date: String): Book
}
type BookPayload {
msg: String!
book: Book
}
schema {
query: Query
mutation: Mutation
}
`
const resolvers = {
Query: {
allBooks: async (_, args, { books }) => await books.find().toArray()
},
Mutation: {
add book: async (_, { title, author, published_date }, { books }) => {
let bookInserted = await books.insert({
title,
author,
published_date,
});
return {
msg: 'Book successfully created',
book: bookInserted,
};
}
}
}; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.