text
stringlengths
1
1.05M
<gh_stars>10-100 module KubeDSL::DSL::Admissionregistration::V1beta1 autoload :MutatingWebhook, 'kube-dsl/dsl/admissionregistration/v1beta1/mutating_webhook' autoload :MutatingWebhookConfiguration, 'kube-dsl/dsl/admissionregistration/v1beta1/mutating_webhook_configuration' autoload :MutatingWebhookConfigurationList, 'kube-dsl/dsl/admissionregistration/v1beta1/mutating_webhook_configuration_list' autoload :RuleWithOperations, 'kube-dsl/dsl/admissionregistration/v1beta1/rule_with_operations' autoload :ServiceReference, 'kube-dsl/dsl/admissionregistration/v1beta1/service_reference' autoload :ValidatingWebhook, 'kube-dsl/dsl/admissionregistration/v1beta1/validating_webhook' autoload :ValidatingWebhookConfiguration, 'kube-dsl/dsl/admissionregistration/v1beta1/validating_webhook_configuration' autoload :ValidatingWebhookConfigurationList, 'kube-dsl/dsl/admissionregistration/v1beta1/validating_webhook_configuration_list' autoload :WebhookClientConfig, 'kube-dsl/dsl/admissionregistration/v1beta1/webhook_client_config' end
"use strict"; /** * This class contains methods to build common {@link https://cs.cmu.edu/~kmcrane/Projects/DDG/paper.pdf discrete exterior calculus} operators. * @memberof module:Core */ class DEC { /** * Builds a sparse diagonal matrix encoding the Hodge operator on 0-forms. * By convention, the area of a vertex is 1. * @static * @param {module:Core.Geometry} geometry The geometry of a mesh. * @param {Object} vertexIndex A dictionary mapping each vertex of a mesh to a unique index. * @returns {module:LinearAlgebra.SparseMatrix} */ static buildHodgeStar0Form(geometry, vertexIndex) { let T = new Triplet(geometry.mesh.vertices.length, geometry.mesh.vertices.length); for (let vertex of geometry.mesh.vertices) { let v = vertexIndex[vertex]; let dual = geometry.barycentricDualArea(vertex); T.addEntry(dual, v, v); } return SparseMatrix.fromTriplet(T); } /** * Builds a sparse diagonal matrix encoding the Hodge operator on 1-forms. * @static * @param {module:Core.Geometry} geometry The geometry of a mesh. * @param {Object} edgeIndex A dictionary mapping each edge of a mesh to a unique index. * @returns {module:LinearAlgebra.SparseMatrix} */ static buildHodgeStar1Form(geometry, edgeIndex) { let T = new Triplet(geometry.mesh.edges.length, geometry.mesh.edges.length); for (let edge of geometry.mesh.edges) { let e = edgeIndex[edge]; let dual = (geometry.cotan(edge.halfedge) + geometry.cotan(edge.halfedge.twin)) / 2; T.addEntry(dual, e, e); } return SparseMatrix.fromTriplet(T); } /** * Builds a sparse diagonal matrix encoding the Hodge operator on 2-forms. * By convention, the area of a vertex is 1. * @static * @param {module:Core.Geometry} geometry The geometry of a mesh. * @param {Object} faceIndex A dictionary mapping each face of a mesh to a unique index. * @returns {module:LinearAlgebra.SparseMatrix} */ static buildHodgeStar2Form(geometry, faceIndex) { let T = new Triplet(geometry.mesh.faces.length, geometry.mesh.faces.length); for (let face of geometry.mesh.faces) { let f = faceIndex[face]; let edges = [...face.adjacentEdges()]; let s = (geometry.length(edges[0]) + geometry.length(edges[1]) + geometry.length(edges[2])) / 2; let dual = 1 / Math.sqrt(s * (s - geometry.length(edges[0])) * (s - geometry.length(edges[1])) * (s - geometry.length(edges[2]))); T.addEntry(dual, f, f); } return SparseMatrix.fromTriplet(T); } /** * Builds a sparse matrix encoding the exterior derivative on 0-forms. * @static * @param {module:Core.Geometry} geometry The geometry of a mesh. * @param {Object} edgeIndex A dictionary mapping each edge of a mesh to a unique index. * @param {Object} vertexIndex A dictionary mapping each vertex of a mesh to a unique index. * @returns {module:LinearAlgebra.SparseMatrix} */ static buildExteriorDerivative0Form(geometry, edgeIndex, vertexIndex) { let mesh = geometry.mesh; let T = new Triplet(mesh.edges.length, mesh.vertices.length); for (let i = 0; i < mesh.edges.length; i++) { let e = edgeIndex[i]; let v1 = vertexIndex[mesh.edges[e].halfedge.vertex.index]; let v2 = vertexIndex[mesh.edges[e].halfedge.twin.vertex.index]; //vert -> edge (oriented) T.addEntry(-1, e, v1); // orientation The vertex at the base of this halfedge. T.addEntry(1, e, v2); } return SparseMatrix.fromTriplet(T); } /** * Builds a sparse matrix encoding the exterior derivative on 1-forms. * @static * @param {module:Core.Geometry} geometry The geometry of a mesh. * @param {Object} faceIndex A dictionary mapping each face of a mesh to a unique index. * @param {Object} edgeIndex A dictionary mapping each edge of a mesh to a unique index. * @returns {module:LinearAlgebra.SparseMatrix} */ static buildExteriorDerivative1Form(geometry, faceIndex, edgeIndex) { //I copied this from a fork, I don't understand this either const edges = geometry.mesh.edges; const faces = geometry.mesh.faces; let T = new Triplet(faces.length, edges.length); for (let f of faces) { const row = faceIndex[f]; for (let e of f.adjacentEdges()) { // get edge orientation here. Not sure if I understand this fully. // see http://brickisland.net/DDGSpring2019/2019/02/13/1669/#comment-107 const entry = e.halfedge.face === f ? 1 : -1; T.addEntry(entry, row, edgeIndex[e]); } } return SparseMatrix.fromTriplet(T); } }
import pandas as pd from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score # Load the dataset dataset = pd.read_csv("dataset.csv") # Separate data into features and labels features = dataset.drop(['label'], axis=1) labels = dataset["label"] # Split the data into training and testing sets x_train, x_test, y_train, y_test = train_test_split(features, labels, test_size=0.25, random_state=42) # Create a model and train it model = LogisticRegression() model.fit(x_train, y_train) # Get a prediction on the test set y_preds = model.predict(x_test) # Calculate the accuracy of the model accuracy = accuracy_score(y_test, y_preds) print('Accuracy of the model is {}%'.format(accuracy * 100))
import os def configure_proxy(proxy_host, proxy_port): https_proxy = f"http://{proxy_host}:{proxy_port}" os.environ['https_proxy'] = https_proxy os.system(f'export https_proxy={https_proxy}')
<reponame>melkishengue/cpachecker public class BooleanOperators_true_assert { public static void main(String[] args) { boolean a = true; a = !a; assert a == false; a |= true; a = a & true; a = a ^ false; assert a == true; } }
<gh_stars>10-100 //============================================================================ // Copyright 2009-2020 ECMWF. // This software is licensed under the terms of the Apache Licence version 2.0 // which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. // In applying this licence, ECMWF does not waive the privileges and immunities // granted to it by virtue of its status as an intergovernmental organisation // nor does it submit to any jurisdiction. //============================================================================ #include <stdexcept> #include "InfoProvider.hpp" #include "VNode.hpp" #include "VReply.hpp" #include "ServerHandler.hpp" #include "Submittable.hpp" #include "EcfFile.hpp" #include <QDateTime> #include <fstream> #include <boost/algorithm/string/predicate.hpp> InfoProvider::InfoProvider(InfoPresenter* owner,VTask::Type taskType) : owner_(owner), taskType_(taskType), active_(false), autoUpdate_(false), inAutoUpdate_(false) { reply_=new VReply(this); if(owner_) owner_->registerInfoProvider(this); } InfoProvider::~InfoProvider() { delete reply_; clear(); } void InfoProvider::clear() { if(task_) task_->status(VTask::CANCELLED); reply_->reset(); info_.reset(); } void InfoProvider::setActive(bool b) { active_=b; if(!active_) clear(); } void InfoProvider::setAutoUpdate(bool b) { autoUpdate_=b; } void InfoProvider::info(VInfo_ptr info) { //We keep it alive info_=info; if(task_) { task_->status(VTask::CANCELLED); task_.reset(); } if(owner_ && info_) info_->accept(this); } //Server void InfoProvider::visit(VInfoServer* info) { reply_->reset(); if(!info->server()) { owner_->infoFailed(reply_); } //Define a task for getting the info from the server. task_=VTask::create(taskType_,this); //Run the task in the server. When it completes taskFinished() is called. The text returned //in the reply will be prepended to the string we generated above. info->server()->run(task_); } //Node void InfoProvider::visit(VInfoNode* info) { reply_->reset(); if(!info->node() || !info->node()->node()) { owner_->infoFailed(reply_); } //Check if we have a server if(!info->server()) { owner_->infoFailed(reply_); } VNode *n=info->node(); std::string fileName; if(!fileVarName_.empty()) { //Get the fileName fileName=n->genVariable(fileVarName_) + fileSuffix_; } //We try to read the file directly from the disk if(info->server()->readFromDisk()) { //There is a variable defined for the filename if(!fileName.empty()) { if(reply_->textFromFile(fileName)) { reply_->fileReadMode(VReply::LocalReadMode); reply_->fileName(fileName); owner_->infoReady(reply_); return; } /*else if(handleFileMissing(fileName,reply_)) { return; }*/ } } //We try to get the file contents from the server //(this will go through the threaded communication) //Define a task for getting the info from the server. task_=VTask::create(taskType_,n,this); task_->reply()->fileName(fileName); task_->reply()->fileReadMode(VReply::ServerReadMode); //Run the task in the server. When it finish taskFinished() is called. The text returned //in the reply will be prepended to the string we generated above. info->server()->run(task_); } void InfoProvider::handleFileNotDefined(VReply *reply) { reply->setInfoText(fileNotDefinedText_); owner_->infoReady(reply_); } bool InfoProvider::handleFileMissing(const std::string& fileName,VReply *reply) { return false; //reply->setWarningText(fileMissingText_); //owner_->infoReady(reply_); } void InfoProvider::taskChanged(VTask_ptr task) { if(task_ != task) return; //temporary hack! task_->reply()->setSender(this); switch(task->status()) { case VTask::FINISHED: { task->reply()->addLog("TRY>fetch file from ecflow server: OK"); //The file should have a copy of the reply log VFile_ptr f=task_->reply()->tmpFile(); if(f) { f->setFetchDate(QDateTime::currentDateTime()); f->setFetchMode(VFile::ServerFetchMode); f->setLog(task_->reply()->log()); } task->reply()->status(VReply::TaskDone); owner_->infoReady(task->reply()); task_.reset(); } break; case VTask::ABORTED: case VTask::REJECTED: task->reply()->addLog("TRY>fetch file from ecflow server: FAILED"); task->reply()->status(VReply::TaskFailed); owner_->infoFailed(task->reply()); task_.reset(); break; case VTask::CANCELLED: if(!task->reply()->errorText().empty()) { task->reply()->addLog("TRY>fetch file from ecflow server: FAILED"); task->reply()->status(VReply::TaskCancelled); owner_->infoFailed(task->reply()); } //We do not need the task anymore. task_.reset(); break; default: break; } } JobProvider::JobProvider(InfoPresenter* owner) : InfoProvider(owner,VTask::JobTask) { fileVarName_="ECF_JOB"; fileNotDefinedText_="Job is <b>not</b> defined"; fileMissingText_="Job <b>not</b> found! <br> Check <b>ECF_HOME</b> directory \ for read/write access. Check for file presence and read access below. \ The file may have been deleted or this may be a '<i>dummy</i>' task"; } bool JobProvider::handleFileMissing(const std::string& fileName,VReply *reply) { if(fileName.find(".job0") != std::string::npos) { reply->setInfoText("No job to be expected when <b>TRYNO</b> is 0!"); owner_->infoReady(reply_); return true; } return false; } JobStatusFileProvider::JobStatusFileProvider(InfoPresenter* owner) : InfoProvider(owner,VTask::JobStatusFileTask) { fileVarName_ ="ECF_JOB"; fileSuffix_ = ".stat"; fileNotDefinedText_ = "Job status is <b>not</b> defined"; fileMissingText_ = "Job status file <b>not</b> found! <br> Check <b>ECF_HOME</b> directory \ for read/write access. Check for file presence and read access below. \ The file may have been deleted or this may be a '<i>dummy</i>' task"; } bool JobStatusFileProvider::handleFileMissing(const std::string& fileName,VReply *reply) { if(fileName.find(".job0") != std::string::npos) { reply->setInfoText("No job to be expected when <b>TRYNO</b> is 0!"); owner_->infoReady(reply_); return true; } return false; } JobStatusProvider::JobStatusProvider(InfoPresenter* owner) : InfoProvider(owner,VTask::JobStatusTask) { } ManualProvider::ManualProvider(InfoPresenter* owner) : InfoProvider(owner,VTask::ManualTask) { fileVarName_="ECF_MANUAL"; fileNotDefinedText_="Manual is <b>not</b> available"; fileMissingText_="Manual is <b>not</b> available"; } MessageProvider::MessageProvider(InfoPresenter* owner) : InfoProvider(owner,VTask::MessageTask) { } ScriptProvider::ScriptProvider(InfoPresenter* owner) : InfoProvider(owner,VTask::ScriptTask) { fileVarName_="ECF_SCRIPT"; fileNotDefinedText_="Script is <b>not</b> defined"; fileMissingText_="Script <b>not/b> found! <br> Check <b>ECF_FILES</b> or <b>ECF_HOME</b> directories, \ for read access. Check for file presence and read access below files directory \ or this may be a '<i>dummy</i>' task"; } void ScriptProvider::visit(VInfoNode* info) { reply_->reset(); if(!info->node() || !info->node()->node()) { owner_->infoFailed(reply_); } //Check if we have a server if(!info->server()) { owner_->infoFailed(reply_); } VNode *n=info->node(); std::string fileName; // We try to read the file directly from the disk // Try client first. // *THIS will minimize calls to the server. when .ecf is not in ECF_SCRIPT but still accessible from the client try { if(Submittable* sb=info->node()->node()->isSubmittable()) { EcfFile ecf_file = sb->locatedEcfFile(); // will throw std::runtime_error for errors std::string fileContents, fileName, fileMethod; ecf_file.script(fileContents); //Check extra info in the first line std::string pattern("# ecf_script_origin :"); if(fileContents.size() > pattern.size() && fileContents.substr(0,pattern.size()) == pattern) { //strip off the first line std::string::size_type pos=fileContents.find('\n'); if(pos != std::string::npos && fileContents.size() > pos) { QString s=QString::fromStdString(fileContents.substr(0,pos)); QStringList sLst=s.split(":"); if(sLst.count() == 3) { fileMethod=sLst[1].simplified().toStdString(); fileName=sLst[2].simplified().toStdString(); } fileContents=fileContents.substr(pos+1); } } else { fileName=ecf_file.script_path_or_cmd(); } reply_->text(fileContents); reply_->fileReadMode(VReply::LocalReadMode); reply_->fileName(fileName); reply_->fileReadMethod(fileMethod); owner_->infoReady(reply_); return; } } catch (std::exception& e) { // Try the server } //We try to get the file contents from the server //(this will go through the threaded communication) if(!fileVarName_.empty()) { //Get the fileName fileName=n->genVariable(fileVarName_) + fileSuffix_; } //Define a task for getting the info from the server. task_=VTask::create(taskType_,n,this); task_->reply()->fileName(fileName); task_->reply()->fileReadMode(VReply::ServerReadMode); //Run the task in the server. When it finish taskFinished() is called. The text returned //in the reply will be prepended to the string we generated above. info->server()->run(task_); } HistoryProvider::HistoryProvider(InfoPresenter* owner) : InfoProvider(owner,VTask::HistoryTask) { } SuiteProvider::SuiteProvider(InfoPresenter* owner) : InfoProvider(owner,VTask::SuiteListTask) { } ZombieProvider::ZombieProvider(InfoPresenter* owner) : InfoProvider(owner,VTask::ZombieListTask) { } WhyProvider::WhyProvider(InfoPresenter* owner) : InfoProvider(owner,VTask::WhySyncTask) { }
package wearable.hotelbeds.shared.price; import java.io.Serializable; import java.math.BigDecimal; import java.util.List; import wearable.hotelbeds.shared.event.EventInfoBean; import wearable.hotelbeds.shared.hotel.HotelInfo; import wearable.hotelbeds.shared.visa.VisaBean; /** * Created by Zavierazo on 08/10/2015. */ public class PriceInfoBean implements Serializable { private int id; private BigDecimal totalAmount; private HotelInfo hotelInfo; private List<FlyBean> flyDeparture; private List<FlyBean> flyArrival; private EventInfoBean event; private List<VisaBean> visas; public PriceInfoBean() { } public PriceInfoBean(int id, BigDecimal totalAmount, List<FlyBean> flyDeparture, List<FlyBean> flyArrival, EventInfoBean event, HotelInfo hotelInfo, List<VisaBean> visas) { this.id = id; this.totalAmount = totalAmount; this.flyDeparture = flyDeparture; this.flyArrival = flyArrival; this.event = event; this.hotelInfo = hotelInfo; this.visas = visas; } public int getId() { return id; } public void setId(int id) { this.id = id; } public BigDecimal getTotalAmount() { return totalAmount; } public void setTotalAmount(BigDecimal totalAmount) { this.totalAmount = totalAmount; } public HotelInfo getHotelInfo() { return hotelInfo; } public void setHotelInfo(HotelInfo hotelInfo) { this.hotelInfo = hotelInfo; } public List<FlyBean> getFlyDeparture() { return flyDeparture; } public void setFlyDeparture(List<FlyBean> flyDeparture) { this.flyDeparture = flyDeparture; } public List<FlyBean> getFlyArrival() { return flyArrival; } public void setFlyArrival(List<FlyBean> flyArrival) { this.flyArrival = flyArrival; } public EventInfoBean getEvent() { return event; } public void setEvent(EventInfoBean event) { this.event = event; } public List<VisaBean> getVisas() { return visas; } public void setVisas(List<VisaBean> visas) { this.visas = visas; } }
<filename>examples/action_dann_lightn/main.py<gh_stars>100-1000 """This example is about domain adaptation for action recognition, using PyTorch Lightning. Reference: https://github.com/thuml/CDAN/blob/master/pytorch/train_image.py """ import argparse import logging import pytorch_lightning as pl from config import get_cfg_defaults from model import get_model from pytorch_lightning import loggers as pl_loggers from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint, TQDMProgressBar from kale.loaddata.video_access import VideoDataset from kale.loaddata.video_multi_domain import VideoMultiDomainDatasets from kale.utils.seed import set_seed # from pytorch_lightning.callbacks.early_stopping import EarlyStopping def arg_parse(): """Parsing arguments""" parser = argparse.ArgumentParser(description="Domain Adversarial Networks on Action Datasets") parser.add_argument("--cfg", required=True, help="path to config file", type=str) parser.add_argument( "--gpus", default=1, help="gpu id(s) to use. None/int(0) for cpu. list[x,y] for xth, yth GPU." "str(x) for the first x GPUs. str(-1)/int(-1) for all available GPUs", ) parser.add_argument("--resume", default="", type=str) args = parser.parse_args() return args def main(): """The main for this domain adaptation example, showing the workflow""" args = arg_parse() # ---- setup configs ---- cfg = get_cfg_defaults() cfg.merge_from_file(args.cfg) cfg.freeze() print(cfg) # ---- setup output ---- format_str = "@%(asctime)s %(name)s [%(levelname)s] - (%(message)s)" logging.basicConfig(format=format_str) # ---- setup dataset ---- seed = cfg.SOLVER.SEED source, target, num_classes = VideoDataset.get_source_target( VideoDataset(cfg.DATASET.SOURCE.upper()), VideoDataset(cfg.DATASET.TARGET.upper()), seed, cfg ) dataset = VideoMultiDomainDatasets( source, target, image_modality=cfg.DATASET.IMAGE_MODALITY, seed=seed, config_weight_type=cfg.DATASET.WEIGHT_TYPE, config_size_type=cfg.DATASET.SIZE_TYPE, ) # ---- training/test process ---- ### Repeat multiple times to get std for i in range(0, cfg.DATASET.NUM_REPEAT): seed = seed + i * 10 set_seed(seed) # seed_everything in pytorch_lightning did not set torch.backends.cudnn print(f"==> Building model for seed {seed} ......") # ---- setup model and logger ---- model, train_params = get_model(cfg, dataset, num_classes) tb_logger = pl_loggers.TensorBoardLogger(cfg.OUTPUT.TB_DIR, name="seed{}".format(seed)) checkpoint_callback = ModelCheckpoint( # dirpath=full_checkpoint_dir, filename="{epoch}-{step}-{val_loss:.4f}", # save_last=True, # save_top_k=1, monitor="val_loss", mode="min", ) ### Set early stopping # early_stop_callback = EarlyStopping(monitor="val_target_acc", min_delta=0.0000, patience=100, mode="max") lr_monitor = LearningRateMonitor(logging_interval="epoch") progress_bar = TQDMProgressBar(cfg.OUTPUT.PB_FRESH) ### Set the lightning trainer. Comment `limit_train_batches`, `limit_val_batches`, `limit_test_batches` when # training. Uncomment and change the ratio to test the code on the smallest sub-dataset for efficiency in # debugging. Uncomment early_stop_callback to activate early stopping. trainer = pl.Trainer( min_epochs=cfg.SOLVER.MIN_EPOCHS, max_epochs=cfg.SOLVER.MAX_EPOCHS, # resume_from_checkpoint=last_checkpoint_file, gpus=args.gpus, logger=tb_logger, # logger, # weights_summary='full', fast_dev_run=cfg.OUTPUT.FAST_DEV_RUN, # True, callbacks=[lr_monitor, checkpoint_callback, progress_bar], # callbacks=[early_stop_callback, lr_monitor], # limit_train_batches=0.005, # limit_val_batches=0.06, # limit_test_batches=0.06, ) ### Find learning_rate # lr_finder = trainer.tuner.lr_find(model, max_lr=0.1, min_lr=1e-6) # fig = lr_finder.plot(suggest=True) # fig.show() # logging.info(lr_finder.suggestion()) ### Training/validation process trainer.fit(model) ### Test process trainer.test() if __name__ == "__main__": main()
package server import ( . "airviz/core" "airviz/datasrc" . "airviz/latest" "fmt" "log" "net/http" ) type ServerDags map[Topic]*Dag type ServerState struct { hub *Hub dags ServerDags events chan datasrc.DataEvent } func NewServerState(hub *Hub) *ServerState { return &ServerState{ hub: hub, dags: make(ServerDags), events: make(chan datasrc.DataEvent, 100), } } func (s *ServerState) GetEventCh() chan<- datasrc.DataEvent { return s.events } func (s *ServerState) SetDag(topic Topic, dag *Dag) { s.dags[topic] = dag } func (s *ServerState) newDataRequestHandler(sendMsg func([]byte), topic Topic) *DataRequestHandler { if dag, ok := s.dags[topic]; ok { return NewDataRequestHandler(sendMsg, dag, topic) } else { fmt.Printf("cannot create handler, unrecognized topic %d\n", topic) return nil } } // Start serving a new client func (s *ServerState) ServeWs(w http.ResponseWriter, r *http.Request) { upgrader.CheckOrigin = func(r *http.Request) bool { return true } conn, err := upgrader.Upgrade(w, r, nil) if err != nil { log.Println(err) return } // make a client client := &Client{ id: s.hub.NewClientId(), hub: s.hub, conn: conn, send: make(chan []byte, buffedMsgCount), } // give it a state client.clientState = &ClientState{ blocks: s.newDataRequestHandler(client.sendMsg, TopicBlocks), } // register it client.hub.register <- client // start processing routines for the client go client.writePump() go client.readPump() go client.clientState.HandleEvents() } func (s *ServerState) PipeEvents() { for { ev := <-s.events if dag, ok := s.dags[ev.Topic]; ok { dag.AddBox(ev.Box) } else { fmt.Printf("cannot pipe event into dag, unrecognized topic %d\n", ev.Topic) continue } s.hub.Triggers <- Trigger{Topic: ev.Topic, Index: ev.Box.Index} } }
<reponame>seawindnick/javaFamily<gh_stars>0 package com.java.study.basic; import java.lang.reflect.Array; import java.util.ArrayList; import java.util.LinkedList; import java.util.List; import java.util.Stack; /** * <Description> * * @author hushiye * @since 10/24/21 15:32 */ public class ArrayListAndLinkedList { public static void main(String[] args) { // testHeadInsert(); // System.out.println("-------------------"); // testTailInsert(); // System.out.println("-------------------"); testMiddleInsert(); } private static void testMiddleInsert() { testMiddleInsert((long) Math.pow(10, 5)); testMiddleInsert((long) Math.pow(10, 6)); testMiddleInsert((long) Math.pow(10, 7)); } private static void testMiddleInsert(long longIndex) { Stack stack = new Stack(); long time = System.currentTimeMillis(); List<Long> list = new ArrayList<Long>(); for (long i = 0; i < longIndex; i++) { list.add(list.size() >> 2, i); } System.out.println(longIndex + ":ArrayList 中间插法耗时:" + (System.currentTimeMillis() - time)); time = System.currentTimeMillis(); LinkedList<Long> linkedList = new LinkedList<>(); for (long i = 0; i < longIndex; i++) { linkedList.add(linkedList.size() >> 2, i); } System.out.println(longIndex + ":LinkedList 中间插法耗时:" + (System.currentTimeMillis() - time)); } private static void testTailInsert() { testTailInsert((long) Math.pow(10, 5)); testTailInsert((long) Math.pow(10, 6)); testTailInsert((long) Math.pow(10, 7)); } private static void testTailInsert(long longIndex) { long time = System.currentTimeMillis(); List<Long> list = new ArrayList<Long>(); for (long i = 0; i < longIndex; i++) { list.add(i); } System.out.println(longIndex + ":ArrayList 尾插法耗时:" + (System.currentTimeMillis() - time)); time = System.currentTimeMillis(); LinkedList<Long> linkedList = new LinkedList<>(); for (long i = 0; i < longIndex; i++) { linkedList.addLast(i); } System.out.println(longIndex + ":LinkedList 尾插法耗时:" + (System.currentTimeMillis() - time)); } private static void testHeadInsert() { testHeadInsert((long) Math.pow(10, 5)); testHeadInsert((long) Math.pow(10, 6)); testHeadInsert((long) Math.pow(10, 7)); } private static void testHeadInsert(long longIndex) { // long time = System.currentTimeMillis(); // List<Long> list = new ArrayList<Long>(); // for (long i = 0; i < longIndex; i++) { // list.add(0, i); // } // // System.out.println(longIndex + ":ArrayList 头插法耗时:" + (System.currentTimeMillis() - time)); long time = System.currentTimeMillis(); // List<Long> list = new ArrayList<Long>(); // for (long i = 0; i < longIndex; i++) { // list.add(0, i); // } // // System.out.println(longIndex + ":ArrayList 头插法耗时:" + (System.currentTimeMillis() - time)); time = System.currentTimeMillis(); LinkedList<Long> linkedList = new LinkedList<>(); for (long i = 0; i < longIndex; i++) { linkedList.addFirst(i); } System.out.println(longIndex + ":LinkedList 头插法耗时:" + (System.currentTimeMillis() - time)); } }
<filename>Giphy App/src/app/app.module.ts import { BrowserModule } from '@angular/platform-browser'; import { NgModule } from '@angular/core'; import { FormsModule } from '@angular/forms'; import { HttpModule } from '@angular/http'; import { RouterModule } from '@angular/router'; import { AppComponent } from './app.component'; import { NavBarComponent } from './nav-bar/nav-bar.component'; import { HomeComponent } from './home/home.component'; import { TrendingComponent } from './trending/trending.component'; import { SearchComponent } from './search/search.component'; import { PageNotFoundComponent } from './page-not-found/page-not-found.component'; import { ROUTES } from './app.routes'; import { GiphyService } from './giphy.service'; import { GifViewerComponent } from './gif-viewer/gif-viewer.component'; @NgModule({ declarations: [ AppComponent, NavBarComponent, HomeComponent, TrendingComponent, SearchComponent, PageNotFoundComponent, GifViewerComponent ], imports: [ BrowserModule, FormsModule, HttpModule, RouterModule.forRoot(ROUTES) ], providers: [ GiphyService ], bootstrap: [AppComponent] }) export class AppModule { }
cp -r $RECIPE_DIR/.. $SRC_DIR $PYTHON setup.py install
<reponame>LitenApe/CookedHam<filename>src/atoms/accordion/index.ts<gh_stars>0 export { default as Accordion } from './Accordion'; export { default as AccordionHeader } from './bones/AccordionHeader'; export { default as AccordionPanel } from './bones/AccordionPanel';
<reponame>filippoferri/student-hotels-it<gh_stars>0 import { connect } from 'react-redux'; import Movies from './Movies'; // Getting visible movies from state. function getVisibleMovies(year, genre, rating, sorting, movies) { return movies .filter(m => { return ( (year == 'all' || year == m.year) && (genre == 'all' || genre == m.genre) && (rating == 'all' || rating == m.rating) ); }) .sort((a, b) => { if (sorting == 'year') { return b.year - a.year; } if (sorting == 'rating') { return b.rating - a.rating; } if (sorting == 'alphabetically') { return a.title > b.title ? 1 : a.title < b.title ? -1 : 0; } }); } function mapStateToProps(state) { const { year, genre, rating, sorting, movies } = state; return { movies: getVisibleMovies(year, genre, rating, sorting, movies), }; } export default connect(mapStateToProps)(Movies);
<filename>app/src/main/java/com/telenav/osv/common/adapter/GeneralSettingsAdapter.java<gh_stars>10-100 package com.telenav.osv.common.adapter; import android.annotation.SuppressLint; import android.content.Context; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.ImageView; import android.widget.ProgressBar; import android.widget.TextView; import androidx.annotation.NonNull; import androidx.cardview.widget.CardView; import androidx.recyclerview.widget.RecyclerView; import com.telenav.osv.R; import com.telenav.osv.common.adapter.model.GeneralItemBase; import com.telenav.osv.obd.model.HeaderSettingItem; import com.telenav.osv.obd.model.IconTitleBtDeviceItem; import com.telenav.osv.obd.model.LeftIconTitleSettingItem; import com.telenav.osv.obd.model.LeftIconTitleSubtitleItem; import com.telenav.osv.recorder.tagging.RecordingTaggingItem; import com.telenav.osv.utils.Log; import java.util.ArrayList; import java.util.List; /** * General adapter for all settings item. This will create the item types available in {@code GeneralItemTypes} interface. * @author horatiuf * @see com.telenav.osv.common.adapter.model.GeneralItemBase.GeneralItemTypes */ public class GeneralSettingsAdapter extends RecyclerView.Adapter<GeneralSettingsAdapter.GeneralSettingsViewHolder> { /** * The {@code String} representing the TAG of the current class. */ public static final String TAG = GeneralSettingsAdapter.class.getSimpleName(); /** * The collection of {@code String} items which will be display by the current adapter. */ private List<GeneralItemBase> items; /** * The tagging height used for all tagging items. */ private int taggingWidth = 0; /** * Default constructor for the current class. * @param items the collection of {@code String} to be used for view binding/creation. */ public GeneralSettingsAdapter(List<GeneralItemBase> items) { this.items = items; } /** * Default constructor for the current class for setting the list of items on going. */ public GeneralSettingsAdapter() { items = new ArrayList<>(); } @Override public GeneralSettingsViewHolder onCreateViewHolder(@NonNull ViewGroup parent, int viewType) { if (viewType == GeneralItemBase.GENERAL_TYPE_TAGGING) { View taggingView = LayoutInflater.from(parent.getContext()).inflate(R.layout.partial_tagging_subitem, parent, false); if (taggingWidth == 0) { Log.d(TAG, "getParentWidth " + parent.getMeasuredWidth()); Log.d(TAG, "getParentWidth width " + parent.getWidth()); taggingWidth = ((parent.getMeasuredWidth() - 70) / 3); } taggingView.setLayoutParams(new CardView.LayoutParams(taggingWidth, CardView.LayoutParams.WRAP_CONTENT)); return new TaggingViewHolder(taggingView); } return new ItemViewHolder(LayoutInflater.from(parent.getContext()).inflate(R.layout.partial_kv_setting_item, parent, false)); } @Override public void onBindViewHolder(@NonNull GeneralSettingsViewHolder holder, int position) { final GeneralItemBase currentItem = items.get(holder.getAdapterPosition()); if (holder instanceof ItemViewHolder) { bindItemViewHolder((ItemViewHolder) holder, currentItem); } else if (holder instanceof TaggingViewHolder) { bindTaggingViewHolder((TaggingViewHolder) holder, (RecordingTaggingItem) currentItem); } } @Override public int getItemViewType(int position) { return items.get(position).getType(); } @Override public int getItemCount() { return items.size(); } /** * Updates a {@link HeaderSettingItem} and notifies the adapter about the change. * @param headerSettingItem the new item for display. */ public void updateItem(HeaderSettingItem headerSettingItem) { for (int i = 0; i < items.size(); i++) { if (items.get(i).getType() == GeneralItemBase.GENERAL_TYPE_OBD_HEADER) { if (items.get(i).equals(headerSettingItem)) { HeaderSettingItem currentHeaderSettingItem = (HeaderSettingItem) items.get(i); if (currentHeaderSettingItem.isProgressVisible() != headerSettingItem.isProgressVisible()) { currentHeaderSettingItem.setProgressVisible(headerSettingItem.isProgressVisible()); notifyItemChanged(i); notifyItemChanged(i); } return; } } } } /** * Adds a {@link GeneralItemBase} element to be displayed. * @param settingsItemBase the item that will be added and displayed. */ public void addItem(GeneralItemBase settingsItemBase) { if (!items.contains(settingsItemBase)) { items.add(settingsItemBase); notifyItemInserted(items.size() - 1); } } /** * Clear the list of items from the given index to the end of list. * @param index the index from where should start deleting the items. */ public void clearFromIndex(int index) { if (index <= items.size()) { int numberOfItemsRemoved = items.size() - index; items = items.subList(0, index); notifyItemRangeRemoved(index, numberOfItemsRemoved); } } /** * Binds view of type {@code ItemViewHolder}. */ @SuppressLint("SwitchIntDef") private void bindItemViewHolder(ItemViewHolder holder, GeneralItemBase currentItem) { resetViewHolderVisibilities(holder); resetViewHolderLayout(holder); holder.layout.setOnClickListener(currentItem.getClickListener()); switch (currentItem.getType()) { case GeneralItemBase.GENERAL_TYPE_OBD_LEFT_ICON_TITLE: bindLeftIconTitle((LeftIconTitleSettingItem) currentItem, holder); break; case GeneralItemBase.GENERAL_TYPE_OBD_LEFT_ICON_TITLE_SUBTITLE: bindLeftIconTitleSubtitle((LeftIconTitleSubtitleItem) currentItem, holder); break; case GeneralItemBase.GENERAL_TYPE_OBD_BT_DEVICE_ITEM: bindBtDeviceItem((IconTitleBtDeviceItem) currentItem, holder); holder.layout.setOnClickListener(v -> ((IconTitleBtDeviceItem) currentItem).getOnDeviceClickListener().onDeviceSelected(((IconTitleBtDeviceItem) currentItem).getAddress())); break; case GeneralItemBase.GENERAL_TYPE_OBD_HEADER: bindHeader((HeaderSettingItem) currentItem, holder); break; default: // empty since will not handle any types which are not bt devices } } /** * Binds view of type {@code ItemViewHolder}. */ private void bindTaggingViewHolder(TaggingViewHolder holder, RecordingTaggingItem currentItem) { holder.subtitle.setText(currentItem.getSubtitle()); holder.subtitle.setCompoundDrawablesRelativeWithIntrinsicBounds(currentItem.getIconResId(), 0, 0, 0); holder.layout.setOnClickListener(currentItem.getClickListener()); } /** * Bind the item view holder for {@code IconTitleBtDeviceItem}. * @param currentItem the item to be displayed. * @param holder the current view holder to be bound */ private void bindBtDeviceItem(IconTitleBtDeviceItem currentItem, ItemViewHolder holder) { holder.leftIcon.setVisibility(View.VISIBLE); holder.leftIcon.setImageResource(currentItem.getIconResId()); holder.title.setVisibility(View.VISIBLE); holder.title.setText(currentItem.getName()); if (currentItem.isItemDividerVisible()) { holder.divider.setVisibility(View.VISIBLE); } } /** * Bind the header view holder for {@code HeaderSettingItem}. * @param currentItem the item to be displayed. * @param holder the current view holder to be bound. */ private void bindHeader(HeaderSettingItem currentItem, ItemViewHolder holder) { ViewGroup.LayoutParams params = holder.layout.getLayoutParams(); params.height = ViewGroup.LayoutParams.WRAP_CONTENT; params.width = ViewGroup.LayoutParams.MATCH_PARENT; holder.layout.setLayoutParams(params); holder.title.setVisibility(View.VISIBLE); holder.title.setText(currentItem.getTitleResId()); holder.title.setTextAppearance(holder.layout.getContext(), R.style.obd_settings_header); holder.progressBar.setVisibility(currentItem.isProgressVisible() ? View.VISIBLE : View.GONE); } /** * Bind the view holder for {@code LeftIconTitleSettingItem} item type. * @param leftIconTitleSettingItem the {@code LeftIconTitleSettingItem}. * @param holder the current view holder to be bound. */ private void bindLeftIconTitle(LeftIconTitleSettingItem leftIconTitleSettingItem, ItemViewHolder holder) { holder.leftIcon.setVisibility(View.VISIBLE); holder.leftIcon.setImageResource(leftIconTitleSettingItem.getIconResId()); holder.title.setVisibility(View.VISIBLE); holder.title.setText(leftIconTitleSettingItem.getTitleResId()); } /** * Bind the view holder for {@code LeftIconTitleSubtitleItem} item type. * @param leftIconTitleSubtitleItem the item to be displayed. * @param holder the current view holder to be bound. */ private void bindLeftIconTitleSubtitle(LeftIconTitleSubtitleItem leftIconTitleSubtitleItem, ItemViewHolder holder) { holder.leftIcon.setVisibility(View.VISIBLE); holder.leftIcon.setImageResource(leftIconTitleSubtitleItem.getIconResId()); holder.title.setVisibility(View.VISIBLE); holder.title.setText(leftIconTitleSubtitleItem.getTitleResId()); holder.subtitle.setVisibility(View.VISIBLE); holder.subtitle.setText(leftIconTitleSubtitleItem.getSubtitleResId()); } /** * @param holder the current view holder. * Sets the visibilities to the all the view holder view to gone. */ private void resetViewHolderVisibilities(ItemViewHolder holder) { holder.title.setVisibility(View.GONE); holder.subtitle.setVisibility(View.GONE); holder.leftIcon.setVisibility(View.GONE); holder.rightIcon.setVisibility(View.GONE); holder.divider.setVisibility(View.GONE); holder.progressBar.setVisibility(View.GONE); } /** * Sets the default layout for the view holder item. * @param holder the current view holder. */ private void resetViewHolderLayout(ItemViewHolder holder) { Context context = holder.layout.getContext(); holder.title.setTextAppearance(context, R.style.obd_faq_text_style); ViewGroup.LayoutParams params = holder.layout.getLayoutParams(); params.height = (int) context.getResources().getDimension(R.dimen.partial_kv_setting_item); params.width = ViewGroup.LayoutParams.MATCH_PARENT; holder.layout.setLayoutParams(params); } /** * Default view holder for the recycler view. */ class GeneralSettingsViewHolder extends RecyclerView.ViewHolder { /** * Default constructor for the current class. * @param itemView the view which will be passed. */ public GeneralSettingsViewHolder(@NonNull View itemView) { super(itemView); } } /** * Item View holder class in order to hold all references to the view's elements. */ class ItemViewHolder extends GeneralSettingsViewHolder { /** * {@code ImageView} representing the right icon. */ private ImageView rightIcon; /** * {@code ImageView} representing the left icon. */ private ImageView leftIcon; /** * {@code TextView} representing the title. */ private TextView title; /** * {@code TextView} representing the subtitle. */ private TextView subtitle; /** * {@code View} representing the parent layout for all the views. */ private View layout; /** * {@code View} representing a divider decorator between the items. */ private View divider; /** * {@code ProgressBar} representing the progress for the current header items. */ private ProgressBar progressBar; /** * Default constructor for the current class. * @param itemView the item view to be set for the current holder. */ ItemViewHolder(View itemView) { super(itemView); title = itemView.findViewById(R.id.text_view_kv_setting_item_title); subtitle = itemView.findViewById(R.id.text_view_kv_setting_item_subtitle); leftIcon = itemView.findViewById(R.id.image_view_kv_setting_item_left_icon); rightIcon = itemView.findViewById(R.id.image_view_kv_setting_item_right_icon); layout = itemView.findViewById(R.id.layout_partial_kv_setting_item); divider = itemView.findViewById(R.id.view_kv_setting_item_divider); progressBar = itemView.findViewById(R.id.progress_bar_kv_setting_item_loader); } } /** * Tagging view holder class which will hold all references related to the view's elements. */ class TaggingViewHolder extends GeneralSettingsViewHolder { /** * {@code TextView} representing the subtitle. */ private TextView subtitle; /** * {@code View} representing the parent layout for all the views. */ private View layout; /** * Default constructor for the current class. * @param itemView the view which will be passed. */ public TaggingViewHolder(@NonNull View itemView) { super(itemView); subtitle = itemView.findViewById(R.id.text_tagging_subitem_subtitle); layout = itemView.findViewById(R.id.card_view_partial_tagging_subitem_parent); } } }
#!/bin/bash shopt -s nullglob # TODO: Transform to input variable. out_dir="./brick_sentinel2" band="$1" if [ "$band" == "" ] ; then echo "Expected 1 paramter. The name of a band." exit 1 fi files=("${out_dir}"/vrt/*"${band}".vrt) if [ ${#files[@]} -eq 0 ] ; then echo "Band not found!" exit 1 fi vrt_files=(${out_dir}/vrt/*"${band}".vrt) /usr/bin/gdalbuildvrt -separate -overwrite -resolution user -tr 10 10 -r bilinear -q "${out_dir}"/S2A_MSIL2A_R096_T20LKP_20180812T143751_"${band%_*}"_10m.vrt "${vrt_files[@]}"
<!DOCTYPE html> <html> <head> <title>Employee Data Table</title> </head> <body> <table> <thead> <tr> <th>Name</th> <th>Age</th> <th>Team</th> </tr> </thead> <tbody> <tr> <td>John</td> <td>25</td> <td>A</td> </tr> <tr> <td>Mark</td> <td>30</td> <td>B</td> </tr> </tbody> </table> </body> </html>
<filename>java/org/tinyb/BluetoothAddressType.java /** * Author: <NAME> <<EMAIL>> * Copyright (c) 2020 Gothel Software e.K. * Copyright (c) 2020 ZAFENA AB * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ package org.tinyb; /** * Bluetooth address type constants. * <p> * See {@link #get(String)} for its string mapping. * </p> * <p> * See {@link #get(int)} for its native integer mapping. * </p> * <p> * BT Core Spec v5.2: Vol 3, Part C Generic Access Profile (GAP): 172.16.58.3 Public Bluetooth address * </p> * <pre> * 1) BT public address used as BD_ADDR for BR/EDR physical channel is defined in Vol 2, Part B 1.2 * - EUI-48 or MAC (6 octets) * * 2) BT public address used as BD_ADDR for the LE physical channel is defined in Vol 6, Part B 1.3 * </pre> * <p> * BT Core Spec v5.2: Vol 3, Part C Generic Access Profile (GAP): 15.1.1.2 Random Bluetooth address * </p> * <pre> * 3) BT random address used as BD_ADDR on the LE physical channel is defined in Vol 3, Part C 10.8 * </pre> * * @since 2.0.0 */ public enum BluetoothAddressType { /** Bluetooth BREDR address */ BDADDR_BREDR (0x00), /** Bluetooth LE public address */ BDADDR_LE_PUBLIC (0x01), /** Bluetooth LE random address, see {@link BLERandomAddressType} */ BDADDR_LE_RANDOM (0x02), /** Undefined */ BDADDR_UNDEFINED (0xff); public final int value; private static String _public = "public"; private static String _random = "random"; /** * Maps the specified name to a constant of {@link BluetoothAddressType}. * <p> * According to BlueZ's D-Bus protocol, which is also followed by TinyB, * the following mappings are valid: * <ul> * <li>"{@code public}" -> {@link #BDADDR_LE_PUBLIC}</li> * <li>"{@code random}" -> {@link #BDADDR_LE_RANDOM}</li> * <li>{@code null or empty} -> {@link #BDADDR_BREDR}</li> * </ul> * </p> * <p> * If the above mappings are not resolving, * {@link #valueOf(String)} is being returned. * This maps the constant names itself to their respective constant. * </p> * @param name the string name to be mapped to a constant of this enum type. * @return the corresponding constant of this enum type. * @throws IllegalArgumentException if the specified name can't be mapped to a constant of this enum type * as described above. */ public static BluetoothAddressType get(final String name) throws IllegalArgumentException { if( null == name || name.length() == 0 ) { return BDADDR_BREDR; } if( _public.equals(name) ) { return BDADDR_LE_PUBLIC; } if( _random.equals(name) ) { return BDADDR_LE_RANDOM; } return valueOf(name); } /** * Maps the specified integer value to a constant of {@link BluetoothAddressType}. * @param value the integer value to be mapped to a constant of this enum type. * @return the corresponding constant of this enum type, using {@link #BDADDR_UNDEFINED} if not supported. */ public static BluetoothAddressType get(final int value) { switch(value) { case 0x00: return BDADDR_BREDR; case 0x01: return BDADDR_LE_PUBLIC; case 0x02: return BDADDR_LE_RANDOM; default: return BDADDR_UNDEFINED; } } BluetoothAddressType(final int v) { value = v; } }
#! /bin/sh # Copyright (C) 1999-2014 Free Software Foundation, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # Test to make sure texinfo.tex is correctly installed and disted by # -a when we're using AC_CONFIG_AUX_DIR. Bug report by by Per # Cederqvist. . test-init.sh cat > configure.ac << END AC_INIT([$me], [1.0]) AC_CONFIG_AUX_DIR([auxdir]) AM_INIT_AUTOMAKE AC_CONFIG_FILES([Makefile]) AC_OUTPUT END cat > Makefile.am << 'END' info_TEXINFOS = textutils.texi .PHONY: test1 test2 test1: @echo DISTFILES = $(DISTFILES) echo ' ' $(DISTFILES) ' ' | grep '[ /]auxdir/texinfo\.tex ' test2: distdir ls -l $(distdir)/* test -f $(distdir)/auxdir/texinfo.tex END echo '@setfilename textutils.info' > textutils.texi mkdir auxdir $ACLOCAL $AUTOCONF $AUTOMAKE -a test -f auxdir/texinfo.tex ./configure # Create textutils.info by hand, so that we don't have to require # makeinfo. Also ensure it's really newer than textutils.texi, so # that make won't try to re-create it. $sleep : > textutils.info $MAKE test1 test2 :
package com.leetcode.offer; public class Solution_27 { public TreeNode mirrorTree(TreeNode root) { dfs(root); return root; } private void dfs(TreeNode treeNode) { if (treeNode == null) return; TreeNode tmp = treeNode.left; treeNode.left = treeNode.right; treeNode.right = tmp; dfs(treeNode.left); dfs(treeNode.right); } }
import React, { Component } from "react"; import { Menu } from 'semantic-ui-react'; import { injectIntl } from 'gatsby-plugin-intl'; class FuelControlsMenu extends Component { render() { const { intl, menuItem, onMenuChange, } = this.props; return ( <React.Fragment> <Menu pointing secondary style={{ marginBottom: '2em' }} vertical > <Menu.Item as="a" name={intl.formatMessage({id: 'fuel_menu_overview' })} active={menuItem === "overview"} onClick={onMenuChange} /> <Menu.Item as="a" name={intl.formatMessage({id: 'fuel_menu_transactions' })} active={menuItem === "transactions"} onClick={onMenuChange} style={{ display: 'none' }} /> <Menu.Item as="a" name={intl.formatMessage({id: 'fuel_menu_settings' })} active={menuItem === "settings"} onClick={onMenuChange} style={{ display: 'none' }} /> </Menu> </React.Fragment> ) } } export default injectIntl(FuelControlsMenu);
./tools/train_net.py \ --gpu 1 \ --solver models/pascal_voc/ResNet_50/rfcn_end2end/solver_ohem.prototxt \ --weights data/imagenet_models/ResNet_50_model.caffemodel \ --imdb kitti_train \ --iters 100000 \ --cfg experiments/cfgs/rfcn_end2end_ohem.yml > my_train_res50.log 2> my_train_res50.err # if you want to see it you can use another screen terminal and try # tail -f my_train_res50.log # or .err for following the error output # also things like this are useful # cat my_train_res50.err | grep Accuracy # that might show you how accuracy is changing and so on
<reponame>AymanSaad7777/sdk-rest package com.bullhornsdk.data.model.response.single.customobject; import com.bullhornsdk.data.model.entity.core.customobject.ClientCorporationCustomObjectInstance26; import com.bullhornsdk.data.model.response.single.StandardWrapper; public class ClientCorporationCustomObjectInstance26Wrapper extends StandardWrapper<ClientCorporationCustomObjectInstance26> { }
#!/bin/bash # Demonstrate three static ring colours on Wraith Prism. # # The ring only supports one static colour, # but we fake two more using breath and morse channels # with background colours and blending modes. wraith \ "ring-map static static static static static breath breath breath breath breath morse morse morse morse morse" \ "ring-effect static 1 0xff 0xff 0x00 0x00" \ "ring-effect breath 1 0xff 0x00 0xff 0x00 0x00 0xff 0x00 0x40" \ "ring-effect morse 1 0xff 0x00 0x00 0xff 0x00 0x00 0xff"
package io.casperlabs.client import java.math.BigInteger import io.casperlabs.crypto.codec.Base16 import io.circe.Json import scalapb.GeneratedMessage import scalapb.descriptors._ import scalapb.textformat.TextGenerator object Printer { def print(m: GeneratedMessage, bytesStandard: Boolean, json: Boolean): String = if (json) { Json.print(m, bytesStandard) } else { ProtoString.print(m, bytesStandard) } /** Based on what scalapb-circe `JsonFormat.toJsonString` does but allows to customize Base16/Base64 for bytes. */ private object Json { private val base16Printer = new scalapb_circe.Printer() { override def serializeSingleValue( fd: FieldDescriptor, value: PValue, formattingLongAsNumber: Boolean ): Json = value match { case PByteString(value) => io.circe.Json.fromString(Base16.encode(value.toByteArray)) case v => super.serializeSingleValue(fd, v, formattingLongAsNumber) } } private val base64Printer = new scalapb_circe.Printer() private val indent = ' '.toString * 2 def print(m: GeneratedMessage, bytesStandard: Boolean): String = { val json = if (bytesStandard) base64Printer.toJson(m) else base16Printer.toJson(m) json.printWith(io.circe.Printer.indented(indent)) } } /** Based on what scalapb `toProtoString` does but but allows to customize Base16/ASCII-escaped for bytes. */ private object ProtoString { def print(m: GeneratedMessage, bytesStandard: Boolean): String = if (bytesStandard) { scalapb.TextFormat.printToUnicodeString(m) } else { this.printToUnicodeString(m) } private def printToUnicodeString(m: GeneratedMessage) = { val out = new TextGenerator(singleLine = false, escapeNonAscii = false) print(m.toPMessage, out) out.result() out.result() } private def print(p: PMessage, out: TextGenerator): Unit = p.value.toSeq.sortBy(_._1.number).foreach { case (fd, value) => printField(fd, value, out) } private def printField( fd: FieldDescriptor, value: PValue, out: TextGenerator ): Unit = value match { case PRepeated(values) => values.foreach(v => printSingleField(fd, v, out)) case PEmpty => case _ => printSingleField(fd, value, out) } private def printSingleField( fd: FieldDescriptor, value: PValue, out: TextGenerator ) = { out.add(fd.name) value match { case PMessage(_) => out.addNewLine(" {").indent() printFieldValue(fd, value, out) out.outdent().addNewLine("}") case _ => out.add(": ") printFieldValue(fd, value, out) out.addNewLine("") } } private def printFieldValue( fd: FieldDescriptor, value: PValue, out: TextGenerator ): Unit = value match { case scalapb.descriptors.PInt(v) => if (fd.protoType.isTypeUint32 || fd.protoType.isTypeFixed32) out.add(unsignedToString(v)) else out.add(v.toString) case scalapb.descriptors.PLong(v) => if (fd.protoType.isTypeUint64 || fd.protoType.isTypeFixed64) out.add(unsignedToString(v)) else out.add(v.toString) case scalapb.descriptors.PBoolean(v) => out.add(v.toString) case scalapb.descriptors.PFloat(v) => out.add(v.toString) case scalapb.descriptors.PDouble(v) => out.add(v.toString) case scalapb.descriptors.PEnum(v) => if (!v.isUnrecognized) out.add(v.name) else out.add(v.number.toString) case e: scalapb.descriptors.PMessage => print(e, out) case scalapb.descriptors.PString(v) => out .add("\"") .addMaybeEscape(v) .add("\"") case scalapb.descriptors.PByteString(v) => out .add("\"") //.add(textformat.TextFormatUtils.escapeBytes(v)) .add(Base16.encode(v.toByteArray)) .add("\"") case scalapb.descriptors.PRepeated(_) => throw new RuntimeException("Should not happen.") case scalapb.descriptors.PEmpty => throw new RuntimeException("Should not happen.") } /** Convert an unsigned 32-bit integer to a string. */ private def unsignedToString(value: Int): String = if (value >= 0) java.lang.Integer.toString(value) else java.lang.Long.toString(value & 0X00000000FFFFFFFFL) /** Convert an unsigned 64-bit integer to a string. */ private def unsignedToString(value: Long): String = if (value >= 0) java.lang.Long.toString(value) else BigInteger.valueOf(value & 0X7FFFFFFFFFFFFFFFL).setBit(63).toString } }
#!/bin/sh export PATH=${PATH}:$5 ota_offset=$1 #dir=/home/cwhaiyi/pcshare/rualxw/AliOS-Things/platform/mcu/rtl8710bn platform_dir=$2/platform/mcu/rtl8710bn if [ ! -d "${platform_dir}/Debug/Exe" ]; then mkdir -p ${platform_dir}/Debug/Exe fi BIN_DIR=${platform_dir}/Debug/Exe app=`echo $3 | tr '/' '.'` outputplatform=$app@$4 outputdir=$2/out/${outputplatform}/binary if [ "${ota_offset}" = "0x0800B000" ]; then outputname=${outputplatform}.2boot else outputname=${outputplatform} fi OS=`uname -s` PICK=${platform_dir}/tools/pick PAD=${platform_dir}/tools/padding CHKSUM=${platform_dir}/tools/checksum OTA=${platform_dir}/tools/ota echo ${platform_dir} echo "" echo -n "install dependent software packages ..." if [ "$OS" = "Darwin" ]; then if [ "`which gawk`" = "" ];then sudo easy_install gawk > /dev/null fi else #Some Linux version if [ "`which apt-get`" != "" ]; then if [ "`which gawk`" = "" ];then sudo sudo apt-get -y install gawk > /dev/null fi fi fi find ${BIN_DIR}/ -name "*.axf" | xargs rm -rf find ${BIN_DIR}/ -name "*.map" | xargs rm -rf rm -f ${outputdir}/${outputname}.bin cp ${outputdir}/${outputname}.elf ${BIN_DIR}/${outputname}.axf arm-ali-aoseabi-nm ${BIN_DIR}/${outputname}.axf | sort > ${BIN_DIR}/${outputname}.nmap arm-ali-aoseabi-objcopy -j .ram_image2.entry -j .ram_image2.data -j .ram_image2.text -j .ram_image2.bss -j .ram_image2.skb.bss -j .ram_heap.data -Obinary ${BIN_DIR}/${outputname}.axf ${BIN_DIR}/ram_2.r.bin arm-ali-aoseabi-objcopy -j .xip_image2.text -Obinary ${BIN_DIR}/${outputname}.axf ${BIN_DIR}/xip_image2.bin arm-ali-aoseabi-objcopy -j .ram_rdp.text -Obinary ${BIN_DIR}/${outputname}.axf ${BIN_DIR}/rdp.bin if [ ! -f "${BIN_DIR}/bin/boot_all.bin" ]; then cp ${platform_dir}/bin/boot_all.bin ${BIN_DIR}/boot_all.bin fi chmod 777 ${BIN_DIR}/boot_all.bin chmod +rx ${PICK} ${CHKSUM} ${PAD} ${OTA} ${PICK} 0x`grep __ram_image2_text_start__ ${BIN_DIR}/${outputname}.nmap | gawk '{print $1}'` 0x`grep __ram_image2_text_end__ ${BIN_DIR}/${outputname}.nmap | gawk '{print $1}'` ${BIN_DIR}/ram_2.r.bin ${BIN_DIR}/ram_2.bin raw ${PICK} 0x`grep __ram_image2_text_start__ ${BIN_DIR}/${outputname}.nmap | gawk '{print $1}'` 0x`grep __ram_image2_text_end__ ${BIN_DIR}/${outputname}.nmap | gawk '{print $1}'` ${BIN_DIR}/ram_2.bin ${BIN_DIR}/ram_2.p.bin ${PICK} 0x`grep __xip_image2_start__ ${BIN_DIR}/${outputname}.nmap | gawk '{print $1}'` 0x`grep __xip_image2_start__ ${BIN_DIR}/${outputname}.nmap | gawk '{print $1}'` ${BIN_DIR}/xip_image2.bin ${BIN_DIR}/xip_image2.p.bin IMAGE2_OTA1=image2_2boot.bin IMAGE2_OTA2=image2_app.bin OTA_ALL=ota_all.bin #2boot bin if [ "${ota_offset}" = "0x0800B000" ]; then cat ${BIN_DIR}/xip_image2.p.bin > ${BIN_DIR}/${IMAGE2_OTA1} chmod 777 ${BIN_DIR}/${IMAGE2_OTA1} cat ${BIN_DIR}/ram_2.p.bin >> ${BIN_DIR}/${IMAGE2_OTA1} ${CHKSUM} ${BIN_DIR}/${IMAGE2_OTA1} || true rm ${BIN_DIR}/xip_image2.p.bin ${BIN_DIR}/ram_2.p.bin cp ${platform_dir}/bin/boot_all.bin ${outputdir}/boot_all.bin cp ${BIN_DIR}/${IMAGE2_OTA1} ${outputdir}/${IMAGE2_OTA1} else cat ${BIN_DIR}/xip_image2.p.bin > ${BIN_DIR}/${IMAGE2_OTA2} chmod 777 ${BIN_DIR}/${IMAGE2_OTA2} cat ${BIN_DIR}/ram_2.p.bin >> ${BIN_DIR}/${IMAGE2_OTA2} ${CHKSUM} ${BIN_DIR}/${IMAGE2_OTA2} || true rm ${BIN_DIR}/xip_image2.p.bin ${BIN_DIR}/ram_2.p.bin cp ${BIN_DIR}/${IMAGE2_OTA2} ${outputdir}/${IMAGE2_OTA2} fi #rm -f ${BIN_DIR}/ram_2.bin ${BIN_DIR}/ram_2.p.bin ${BIN_DIR}/ram_2.r.bin ${BIN_DIR}/xip_image2.bin ${BIN_DIR}/xip_image2.p.bin
package org.folio.rest.persist.ddlgen; import static org.junit.jupiter.api.Assertions.*; import org.junit.jupiter.api.Test; class TableTest { @Test void invalidTableName() { Table table = new Table(); assertThrows(IllegalArgumentException.class, () -> table.setTableName("foo&bar")); } }
<gh_stars>1-10 // Copyright 2021 The Terasology Foundation // SPDX-License-Identifier: Apache-2.0 package org.terasology.engine.persistence.typeHandling.mathTypes; import com.google.gson.Gson; import org.junit.jupiter.api.Test; import org.terasology.engine.ModuleEnvironmentTest; import org.terasology.engine.core.module.ModuleContext; import org.terasology.engine.persistence.typeHandling.TypeHandlerLibraryImpl; import org.terasology.engine.persistence.typeHandling.gson.GsonPersistedDataReader; import org.terasology.engine.persistence.typeHandling.gson.GsonPersistedDataSerializer; import org.terasology.engine.persistence.typeHandling.gson.GsonPersistedDataWriter; import org.terasology.engine.world.block.BlockArea; import org.terasology.engine.world.block.BlockAreac; import org.terasology.gestalt.naming.Name; import org.terasology.persistence.serializers.Serializer; import org.terasology.persistence.typeHandling.TypeHandlerLibrary; import org.terasology.reflection.TypeInfo; import java.io.IOException; import static org.junit.jupiter.api.Assertions.assertEquals; public class BlockAreaTypeHandlerTest extends ModuleEnvironmentTest { static class TestObject { public BlockArea b1; public BlockAreac b2; } private TypeHandlerLibrary typeHandlerLibrary; private Serializer<?> gsonSerializer; private Gson gson = new Gson(); @Override public void setup() { ModuleContext.setContext(moduleManager.getEnvironment().get(new Name("unittest"))); typeHandlerLibrary = TypeHandlerLibraryImpl.forModuleEnvironment(moduleManager, typeRegistry); gsonSerializer = new Serializer<>(typeHandlerLibrary, new GsonPersistedDataSerializer(), new GsonPersistedDataWriter(gson), new GsonPersistedDataReader(gson) ); } @Test public void testGsonSerialization() throws IOException { TestObject a = new TestObject(); a.b1 = new BlockArea(-1, -1, 0, 0); a.b2 = new BlockArea(0, 0, 1, 1); byte[] data = gsonSerializer.serialize(a, new TypeInfo<TestObject>() { }).get(); TestObject o = gsonSerializer.deserialize(new TypeInfo<TestObject>() { },data).get(); assertEquals(new BlockArea(-1, -1, 0, 0), o.b1); assertEquals(new BlockArea(0, 0, 1, 1), o.b2); } }
def char_count(arr): # create the result dictionary result = {} # loop through each string for s in arr: # create a empty dictionary for each string result[s] = {} # loop through each character for c in s: # if the character is seen before then increment its count if c in result[s]: result[s][c] += 1 else: result[s][c] = 1 return result def main(): arr = ["Apple", "Banana"] result = char_count(arr) print(result) if __name__ == '__main__': main()
<gh_stars>0 let stringArray = [ 'Blue', 'Humpback', 'Beluga' ]; let numericStringArray = [ '80', '9', '700' ]; let numberArray = [ 40, 1, 5, 200 ]; let mixedNumericArray = [ '80', '9', '700', 40, 1, 5, 200 ]; function compareNumbers( a, b ) { return a - b; } console.log( 'stringArray:', stringArray.join() ); console.log( 'Sorted:', stringArray.sort() ); console.log( 'numberArray:', numberArray.join() ); console.log( 'Sorted without a compare function:', numberArray.sort() ); console.log( 'Sorted with compareNumbers:', numberArray.sort( compareNumbers ) ); console.log( 'numericStringArray:', numericStringArray.join() ); console.log( 'Sorted without a compare function:', numericStringArray.sort() ); console.log( 'Sorted with compareNumbers:', numericStringArray.sort( compareNumbers ) ); console.log( 'mixedNumericArray:', mixedNumericArray.join() ); console.log( 'Sorted without a compare function:', mixedNumericArray.sort() ); console.log( 'Sorted with compareNumbers:', mixedNumericArray.sort( compareNumbers ) ); /* stringArray: Blue,Humpback,Beluga Sorted: [ 'Beluga', 'Blue', 'Humpback' ] numberArray: 40,1,5,200 Sorted without a compare function: [ 1, 200, 40, 5 ] Sorted with compareNumbers: [ 1, 5, 40, 200 ] numericStringArray: 80,9,700 Sorted without a compare function: [ '700', '80', '9' ] Sorted with compareNumbers: [ '9', '80', '700' ] mixedNumericArray: 80,9,700,40,1,5,200 Sorted without a compare function: [ 1, 200, 40, 5, '700', '80', '9' ] Sorted with compareNumbers: [ 1, 5, '9', 40, '80', 200, '700' ] \___________________________________________________ bryan_dir:general-utils_exitstatus:0 ====> */
#! /bin/bash #SBATCH -o /home/hpc/pr63so/di69fol/workspace/SWEET_2015_12_26/benchmarks_performance/rexi_tests_lrz_freq_waves/2015_12_30_scalability_rexi_fd/run_rexi_fd_m0064_t028_n0128_r0014_a1.txt ###SBATCH -e /home/hpc/pr63so/di69fol/workspace/SWEET_2015_12_26/benchmarks_performance/rexi_tests_lrz_freq_waves/2015_12_30_scalability_rexi_fd/run_rexi_fd_m0064_t028_n0128_r0014_a1.err #SBATCH -J rexi_fd_m0064_t028_n0128_r0014_a1 #SBATCH --get-user-env #SBATCH --clusters=mpp2 #SBATCH --ntasks=14 #SBATCH --cpus-per-task=28 #SBATCH --exclusive #SBATCH --export=NONE #SBATCH --time=03:00:00 #declare -x NUMA_BLOCK_ALLOC_VERBOSITY=1 declare -x KMP_AFFINITY="granularity=thread,compact,1,0" declare -x OMP_NUM_THREADS=28 echo "OMP_NUM_THREADS=$OMP_NUM_THREADS" echo . /etc/profile.d/modules.sh module unload gcc module unload fftw module unload python module load python/2.7_anaconda_nompi module unload intel module load intel/16.0 module unload mpi.intel module load mpi.intel/5.1 module load gcc/5 cd /home/hpc/pr63so/di69fol/workspace/SWEET_2015_12_26/benchmarks_performance/rexi_tests_lrz_freq_waves/2015_12_30_scalability_rexi_fd cd ../../../ . local_software/env_vars.sh # force to use FFTW WISDOM data declare -x SWEET_FFTW_LOAD_WISDOM_FROM_FILE="FFTW_WISDOM_nofreq_T28" time -p mpiexec.hydra -genv OMP_NUM_THREADS 28 -envall -ppn 1 -n 14 ./build/rexi_fd_m_tyes_a1 --initial-freq-x-mul=2.0 --initial-freq-y-mul=1.0 -f 1 -g 1 -H 1 -X 1 -Y 1 --compute-error 1 -t 50 -R 4 -C 0.3 -N 128 -U 0 -S 0 --use-specdiff-for-complex-array 0 --rexi-h 0.8 --timestepping-mode 1 --staggering 0 --rexi-m=64 -C -5.0
def search(arr, target): for i in range(len(arr)): if arr[i] == target: return i return -1 print(search([1,2,3,4,5], 5))
<reponame>Thaslim/Splitwise-Lab2 import dotenv from 'dotenv'; import AWS from 'aws-sdk'; dotenv.config({ path: '.env' }); // S3 configuraton export const S3 = new AWS.S3({ accessKeyId: process.env.AWS_ACCESS_KEY, secretAccessKey: process.env.AWS_SECRET_KEY, region: process.env.AWS_BUCKET_REGION, });
async fn interact_with_blockchains( web3: &Web3Client, grpc_client: &mut GrpcClient, batch_request_mode: BatchRequestMode, ethereum_address: EthereumAddress, cosmos_key: CosmosKey, cosmos_fee: CosmosFee, ) -> Result<InteractResult, InteractError> { // Perform actions on Ethereum blockchain using web3 client and ethereum_address let ethereum_result = perform_ethereum_actions(web3, ethereum_address).await?; // Perform actions on Cosmos blockchain using gRPC client, cosmos_key, and cosmos_fee let cosmos_result = perform_cosmos_actions(grpc_client, batch_request_mode, cosmos_key, cosmos_fee).await?; Ok(InteractResult { ethereum_result, cosmos_result, }) }
<filename>src/string_handle/Boj12871.java package string_handle; import java.io.BufferedReader; import java.io.InputStreamReader; import java.util.StringTokenizer; /** * * @author minchoba * 백준 12871번: 무한 문자열 * * @see https://www.acmicpc.net/problem/12871/ * */ public class Boj12871 { private static final String SPACE = " "; public static void main(String[] args) throws Exception{ // 버퍼를 통한 값 입력 BufferedReader br = new BufferedReader(new InputStreamReader(System.in)); String s = br.readLine(); String t = br.readLine(); int sLeng = s.length(); int tLeng = t.length(); StringBuilder sb = new StringBuilder(); for(int i = 0; i < sLeng; i++) { // 문자열 t를 s의 길이 횟수 만큼 이어 붙임 sb.append(t); } sb.append(SPACE); // 중간에 공백 끼워넣기 for(int i = 0; i < tLeng; i++) { // 문자열 s를 t의 길이 횟수 만큼 이어 붙임 sb.append(s); } StringTokenizer st = new StringTokenizer(sb.toString()); // 두 문자열을 나누고 System.out.println(st.nextToken().equals(st.nextToken()) ? 1 : 0); // 서로 같은 문자열이면 1 아니면 0을 출력 } }
#!/bin/bash -e wf_name="sharp" version="0.1.1" files="submit-hashtag.sh submit-citeseq.sh submit-asapseq.sh submit-cellplex.sh Sharp.deps.zip Hashtag.wdl CiteSeq.wdl AsapSeq.wdl Sharp.options.aws.json configs/*.json" dest="$HOME/scing/bin" usage() { cat << EOF USAGE: `basename $0` [options] -d destination (e.g. $HOME/scing/bin/) -s AWS S3 destination; no trailing slash (e.g. s3://dp-lab-home/software) EOF } while getopts "d:s:h" OPTION do case $OPTION in d) dest=$OPTARG ;; s) s3_dest=$OPTARG ;; h) usage; exit 1 ;; *) usage; exit 1 ;; esac done if [ -z "$dest" ] then usage exit 1 fi # make destination directory if necessary mkdir -p ${dest} # create a temporary directory and copy files path_workdir=`mktemp -d` mkdir -p ${path_workdir}/${wf_name}-${version} rsync -Rv ${files} ${path_workdir}/${wf_name}-${version}/ # tar-gzip cd ${path_workdir} tar cvzf ${dest}/${wf_name}-${version}.tar.gz ${wf_name}-${version}/* # deploy to AWS S3 if `s3_dest` is specified if [ -n "$s3_dest" ] then # create installation script cat <<EOF > ${path_workdir}/install.sh #!/bin/bash aws s3 cp --quiet ${s3_dest}/${wf_name}-${version}.tar.gz . tar xzf ${wf_name}-${version}.tar.gz echo "DONE." EOF aws s3 cp ${dest}/${wf_name}-${version}.tar.gz ${s3_dest}/ aws s3 cp ${path_workdir}/install.sh ${s3_dest}/install-${wf_name}-${version}.sh echo "Installation:" echo "aws s3 cp ${s3_dest}/install-${wf_name}-${version}.sh - | bash" fi # remove temporary directory rm -rf ${path_workdir} echo "DONE."
# Use clang with ccache export CC=/usr/lib/ccache/bin/clang export CXX=/usr/lib/ccache/bin/clang++
#ifndef LowPass_h #define LowPass_h /* simple resonant filter posted to musicdsp.org by <NAME> http://www.musicdsp.org/archive.php?classid=3#259 // set feedback amount given f and q between 0 and 1 fb = q + q/(1.0 - f); // for each sample... buf0 = buf0 + f * (in - buf0 + fb * (buf0 - buf1)); buf1 = buf1 + f * (buf0 - buf1); out = buf1; Taken from mozzi */ class LowPass { public: LowPass() { } ~LowPass(void) { } void SetParameters(float f, float q) { if(f==float(1)) { f = float(0.999); } this->f = f; this->q = q; fb = q + q/(float(1) - f); } float Process(float in) { buf0 = buf0 + f * (in - buf0 + fb * (buf0 - buf1)); buf1 = buf1 + f * (buf0 - buf1); return buf1; } protected: float q; float f; float fb; float buf0; float buf1; }; #endif
import { Injectable } from '@angular/core'; import { IOathProvider } from '../oauth.provider.interface'; import { OAuthProfile } from '../models/oauth-profile.model'; import { CordovaOauth } from 'ng2-cordova-oauth/oauth'; import { Google } from 'ng2-cordova-oauth/provider/google'; import { Config } from '../../../config'; import { Http } from '@angular/http'; interface ILoginResponse { access_token: string; } @Injectable() export class GoogleOauthProvider implements IOathProvider { private http: Http; private config: Config; private cordovaOauth: CordovaOauth; private google: Google; constructor(http: Http, config: Config) { this.http = http; this.config = config; this.google = new Google({ clientId: config.google.appId, appScope: config.google.scope }); this.cordovaOauth = new CordovaOauth(); } login(): Promise<string> { return this.cordovaOauth.login(this.google).then((x: ILoginResponse) => x.access_token); } getProfile(accessToken: string): Promise<OAuthProfile> { let query = `access_token=${accessToken}`; let url = `${this.config.google.apiUrl}userinfo?${query}`; return this.http.get(url) .map(x => x.json()) .map(x => { let name = x.name.split(' '); return { firstName: name[0], lastName: name[1], email: x.email, provider: 'google' }; }) .toPromise(); } }
<filename>src/users/users.service.ts import { Injectable } from '@nestjs/common'; export type User = any; @Injectable() export class UsersService { private readonly users = [ { useId: 1, username: 'smith', password: '<PASSWORD>', }, { useId: 2, username: 'john', password: '<PASSWORD>', }, ]; async findOne(username: string): Promise<User | undefined> { return this.users.find((user) => user.username === username); } }
/* ============ Menu Queries ============ */ const startOptions = [ { type: "list", message: "What would you like to do?", name: "choice", choices: [ { name: "View Employees", value: "View", }, { name: "Manage Employees", value: "Manage", }, { name: "Manage Departments", value: "Department", }, { name: "Exit application", value: "Exit" } ] } ] const viewOptions = [ { type: "list", message: "Please choose from the following:", name: "choice", choices: [ { name: "View All Employees", value: "All", }, { name: "View Managers", value: "Managers", }, { name: "Return to Main Menu", value: "Main", } ] } ] const manageOptions = [ { type: "list", message: "Please choose from the following:", name: "choice", choices: [ { name: "Add Employee", value: "Add", }, { name: "Remove Employee", value: "Remove", }, { name: "Update Employee Information", value: "Update", }, { name: "Return to Main Menu", value: "Main", } ] } ] const departmentOptions = [ { type: "list", message: "Please choose from the following:", name: "choice", choices: [ { name: "View Departments", value: "Departments", }, { name: "View Roles", value: "Roles", }, { name: "Add/Remove Department", value: "manageDepartment", }, { name: "Add/Remove Roles", value: "manageRoles", }, { name: "Return to Main Menu", value: "Main", } ] } ] module.exports = { startOptions, viewOptions, manageOptions, departmentOptions, }
const path = require("path"); const getPath = (p) => path.resolve(__dirname, "../", p); const createClientBuildConfig = (p, watch=undefined) => ({ root: getPath(`src/client/${p}/`), base: "./", // publicDir, build: { outDir: getPath(`out/client/${p}/`), emptyOutDir: true, watch, }, }); module.exports = { getPath, createClientBuildConfig };
<reponame>Daniel201618/git_learning<gh_stars>0 package com.cwl.service.part_4.section_27; import com.cwl.service.part_4.section_19.Future; import java.util.HashMap; import java.util.Map; /** * @author cwl * @description: TODO * @date 2020/1/313:58 */ public class OrderServiceProxy implements OrderService { private final OrderService orderService; private final ActiveMessageQueue activeMessageQueue; public OrderServiceProxy(OrderService orderService, ActiveMessageQueue activeMessageQueue) { this.orderService = orderService; this.activeMessageQueue = activeMessageQueue; } @Override public Future<String> findOrderDetails(long orderId) { //定义一个ActiveFuture,并且可支持立即返回 final ActiveFuture<String> activeFuture = new ActiveFuture<>(); //收集方法入参以及返回的ActiveFuture封装成MethodMessage Map<String, Object> params = new HashMap<>(); params.put("orderId", orderId); params.put("activeFuture", activeFuture); MethodMessage message = new FindOrderDetailsMessage(params, orderService); //将MethodMessage保存至activeMessageQueue中 activeMessageQueue.offer(message); return activeFuture; } @Override public void order(String account, long orderId) { //收集方法参数,并且封装成MethodMessage,然后offer之队列中 Map<String, Object> params = new HashMap<>(); params.put("account", account); params.put("orderId", orderId); MethodMessage message = new OrderMessage(params, orderService); activeMessageQueue.offer(message); } }
<reponame>lsieun/learn-AI import imghdr import os def calc_type(): path = 'D:\\tmp\\images' cnt = 0 for image_classes in os.listdir(path): image_classes_all = path + '\\' + image_classes for image_path in os.listdir(image_classes_all): image_path_all = image_classes_all + '\\' + image_path type = imghdr.what(image_path_all) if type != 'jpeg' and type != 'png': os.remove(image_path_all) print(image_path_all) cnt += 1 print(cnt) if __name__ == '__main__': calc_type()
<gh_stars>0 require('./server/app').start();
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.carbondata.datamap.bloom; import java.io.IOException; import java.util.List; import org.apache.carbondata.common.annotations.InterfaceAudience; import org.apache.carbondata.core.datamap.Segment; import org.apache.carbondata.core.datamap.dev.IndexBuilder; import org.apache.carbondata.core.datastore.block.SegmentProperties; import org.apache.carbondata.core.metadata.datatype.DataTypes; import org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn; import org.apache.carbondata.core.util.CarbonUtil; import org.apache.carbondata.core.util.DataTypeUtil; /** * Implementation for BloomFilter Index to rebuild the datamap for main table with existing data */ @InterfaceAudience.Internal public class BloomIndexBuilder extends AbstractBloomIndexWriter implements IndexBuilder { BloomIndexBuilder(String tablePath, String dataMapName, List<CarbonColumn> indexColumns, Segment segment, String shardName, SegmentProperties segmentProperties, int bloomFilterSize, double bloomFilterFpp, boolean bloomCompress) throws IOException { super(tablePath, dataMapName, indexColumns, segment, shardName, segmentProperties, bloomFilterSize, bloomFilterFpp, bloomCompress); } @Override public void initialize() { super.resetBloomFilters(); } @Override public void addRow(int blockletId, int pageId, int rowId, Object[] values) { if (currentBlockletId != blockletId) { // new blocklet started, flush bloom filter to datamap fileh super.writeBloomDataMapFile(); currentBlockletId = blockletId; } // for each indexed column, add the data to bloom filter for (int i = 0; i < indexColumns.size(); i++) { Object data = values[i]; addValue2BloomIndex(i, data); } } @Override protected byte[] convertNonDictionaryValue(int indexColIdx, Object value) { // no dictionary measure columns will be of original data, so convert it to bytes if (DataTypeUtil.isPrimitiveColumn(indexColumns.get(indexColIdx).getDataType())) { return CarbonUtil.getValueAsBytes(indexColumns.get(indexColIdx).getDataType(), value); } return (byte[]) value; } @Override public void finish() { if (!isWritingFinished()) { if (indexBloomFilters.size() > 0) { writeBloomDataMapFile(); } releaseResouce(); setWritingFinished(true); } } @Override protected byte[] convertDictionaryValue(int indexColIdx, Object value) { // input value from IndexDataMapRebuildRDD is already decoded as surrogate key return CarbonUtil.getValueAsBytes(DataTypes.INT, value); } @Override public void close() { releaseResouce(); } @Override public boolean isIndexForCarbonRawBytes() { return true; } }
/* * Copyright The Stargate Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.stargate.graphql.schema.graphqlfirst.fetchers.admin; import graphql.schema.DataFetchingEnvironment; import io.stargate.auth.Scope; import io.stargate.auth.SourceAPI; import io.stargate.auth.entity.ResourceKind; import io.stargate.db.schema.Keyspace; import io.stargate.graphql.persistence.graphqlfirst.SchemaSourceDao; import io.stargate.graphql.schema.CassandraFetcher; import io.stargate.graphql.web.StargateGraphqlContext; import java.util.UUID; public class UndeploySchemaFetcher extends CassandraFetcher<Boolean> { @Override protected Boolean get(DataFetchingEnvironment environment, StargateGraphqlContext context) throws Exception { String keyspaceName = environment.getArgument("keyspace"); Keyspace keyspace = context.getDataStore().schema().keyspace(keyspaceName); if (keyspace == null) { throw new IllegalArgumentException( String.format("Keyspace '%s' does not exist.", keyspaceName)); } context .getAuthorizationService() .authorizeSchemaWrite( context.getSubject(), keyspaceName, null, Scope.MODIFY, SourceAPI.GRAPHQL, ResourceKind.KEYSPACE); UUID expectedVersion = getExpectedVersion(environment); boolean force = environment.getArgument("force"); new SchemaSourceDao(context.getDataStore()).undeploy(keyspaceName, expectedVersion, force); return true; } private UUID getExpectedVersion(DataFetchingEnvironment environment) { // Unlike deploy, the field is mandatory. try { return UUID.fromString(environment.getArgument("expectedVersion")); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Invalid 'expectedVersion' value."); } } }
import OnboardingComponent from 'react/components/Onboarding'; export default (req, res, next) => req.apollo.render(OnboardingComponent, {}) .then((onboardingComponent) => { res.locals.onboardingComponent = onboardingComponent; next(); }) .catch(next);
// Create embedza instance for album's video links. Add templates for item preview (on album page). // // - data.embedza (Embedza) // 'use strict'; const Embedza = require('embedza'); const embedza_pkg = require('embedza/package.json'); const templates = require('embedza/lib/templates'); module.exports = function (N, apiPath) { let rootUrl = (N.config.bind?.default?.mount || 'http://localhost') + '/'; let userAgentEmbedza = `${embedza_pkg.name}/${embedza_pkg.version} (Nodeca; +${rootUrl})`; templates['default_thumb_url'] = result => { let thumbnail = result.snippets.find(snippet => snippet.tags.indexOf('thumbnail') !== -1); return thumbnail.href; }; templates['vimeo.com_thumb_url'] = result => { let thumbnail = result.snippets.find(snippet => snippet.tags.indexOf('thumbnail') !== -1); return thumbnail.href.replace(/_[0-9]+\.jpg$/, '_200.jpg'); }; templates['youtube.com_thumb_url'] = result => { let thumbnail = result.snippets.find(snippet => snippet.tags.indexOf('thumbnail') !== -1); return thumbnail.href.replace('hqdefault.jpg', 'mqdefault.jpg'); }; let instance = new Embedza({ cache: N.models.core.EmbedzaCache, enabledProviders: N.config.album.embed, request: { headers: { 'user-agent': userAgentEmbedza } } }); // Convert embedza async methods to promise // instance.render = instance.render; instance.info = instance.info; N.wire.on(apiPath, function create_embedza_for_albums(data) { data.embedza = instance; }); };
<gh_stars>1-10 import { Meteor } from 'meteor/meteor'; import { check } from 'meteor/check'; import { Subscriptions } from '../../../models'; Meteor.methods({ blockUser({ rid, blocked }) { check(rid, String); check(blocked, String); if (!Meteor.userId()) { throw new Meteor.Error('error-invalid-user', 'Invalid user', { method: 'blockUser' }); } const subscription = Subscriptions.findOneByRoomIdAndUserId(rid, Meteor.userId()); const subscription2 = Subscriptions.findOneByRoomIdAndUserId(rid, blocked); if (!subscription || !subscription2) { throw new Meteor.Error('error-invalid-room', 'Invalid room', { method: 'blockUser' }); } Subscriptions.setBlockedByRoomId(rid, blocked, Meteor.userId()); return true; }, });
#!/usr/bin/env bash # Copyright 2020 Antrea Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # The script runs kind e2e tests with different traffic encapsulation modes. set -eo pipefail function echoerr { >&2 echo "$@" } _usage="Usage: $0 [--encap-mode <mode>] [--ip-family <v4|v6>] [--no-proxy] [--np] [--coverage] [--help|-h] --encap-mode Traffic encapsulation mode. (default is 'encap'). --ip-family Configures the ipFamily for the KinD cluster. --no-proxy Disables Antrea proxy. --proxy-all Enables Antrea proxy with all Service support. --endpointslice Enables Antrea proxy and EndpointSlice support. --no-np Disables Antrea-native policies. --skip A comma-separated list of keywords, with which tests should be skipped. --coverage Enables measure Antrea code coverage when run e2e tests on kind. --help, -h Print this message and exit. " function print_usage { echoerr "$_usage" } TESTBED_CMD=$(dirname $0)"/kind-setup.sh" YML_CMD=$(dirname $0)"/../../hack/generate-manifest.sh" FLOWAGGREGATOR_YML_CMD=$(dirname $0)"/../../hack/generate-manifest-flow-aggregator.sh" function quit { if [[ $? != 0 ]]; then echoerr " Test failed cleaning testbed" $TESTBED_CMD destroy kind fi } trap "quit" INT EXIT mode="" ipfamily="v4" proxy=true proxy_all=false endpointslice=false np=true coverage=false skiplist="" while [[ $# -gt 0 ]] do key="$1" case $key in --no-proxy) proxy=false shift ;; --proxy-all) proxy_all=true shift ;; --ip-family) ipfamily="$2" shift 2 ;; --endpointslice) endpointslice=true shift ;; --no-np) np=false shift ;; --skip) skiplist="$2" shift 2 ;; --encap-mode) mode="$2" shift 2 ;; --coverage) coverage=true shift ;; -h|--help) print_usage exit 0 ;; *) # unknown option echoerr "Unknown option $1" exit 1 ;; esac done manifest_args="" if ! $proxy; then manifest_args="$manifest_args --no-proxy" fi if $proxy_all; then if ! $proxy; then echoerr "--proxy-all requires AntreaProxy, so it cannot be used with --no-proxy" exit 1 fi manifest_args="$manifest_args --proxy-all" fi if $endpointslice; then manifest_args="$manifest_args --endpointslice" fi if ! $np; then manifest_args="$manifest_args --no-np" fi COMMON_IMAGES_LIST=("k8s.gcr.io/e2e-test-images/agnhost:2.29" \ "projects.registry.vmware.com/library/busybox" \ "projects.registry.vmware.com/antrea/nginx" \ "projects.registry.vmware.com/antrea/perftool" \ "projects.registry.vmware.com/antrea/ipfix-collector:v0.5.11" \ "projects.registry.vmware.com/antrea/wireguard-go:0.0.20210424") for image in "${COMMON_IMAGES_LIST[@]}"; do for i in `seq 3`; do docker pull $image && break sleep 1 done done if $coverage; then manifest_args="$manifest_args --coverage" COMMON_IMAGES_LIST+=("antrea/antrea-ubuntu-coverage:latest") COMMON_IMAGES_LIST+=("antrea/flow-aggregator-coverage:latest") else COMMON_IMAGES_LIST+=("projects.registry.vmware.com/antrea/antrea-ubuntu:latest") COMMON_IMAGES_LIST+=("projects.registry.vmware.com/antrea/flow-aggregator:latest") fi if $proxy_all; then COMMON_IMAGES_LIST+=("k8s.gcr.io/echoserver:1.10") fi printf -v COMMON_IMAGES "%s " "${COMMON_IMAGES_LIST[@]}" function run_test { current_mode=$1 args=$2 if [[ "$ipfamily" == "v6" ]]; then args="$args --ip-family ipv6 --pod-cidr fd00:10:244::/56" elif [[ "$ipfamily" != "v4" ]]; then echoerr "invalid value for --ip-family \"$ipfamily\", expected \"v4\" or \"v6\"" exit 1 fi if $proxy_all; then args="$args --no-kube-proxy" fi echo "creating test bed with args $args" eval "timeout 600 $TESTBED_CMD create kind $args" if $coverage; then $YML_CMD --kind --encap-mode $current_mode $manifest_args | docker exec -i kind-control-plane dd of=/root/antrea-coverage.yml $YML_CMD --kind --encap-mode $current_mode --wireguard-go $manifest_args | docker exec -i kind-control-plane dd of=/root/antrea-wireguard-go-coverage.yml $FLOWAGGREGATOR_YML_CMD --coverage | docker exec -i kind-control-plane dd of=/root/flow-aggregator-coverage.yml else $YML_CMD --kind --encap-mode $current_mode $manifest_args | docker exec -i kind-control-plane dd of=/root/antrea.yml $YML_CMD --kind --encap-mode $current_mode --wireguard-go $manifest_args | docker exec -i kind-control-plane dd of=/root/antrea-wireguard-go.yml $FLOWAGGREGATOR_YML_CMD | docker exec -i kind-control-plane dd of=/root/flow-aggregator.yml fi if $proxy_all; then apiserver=$(docker exec -i kind-control-plane kubectl get endpoints kubernetes --no-headers | awk '{print $2}') if $coverage; then docker exec -i kind-control-plane sed -i.bak -E "s/^[[:space:]]*#kubeAPIServerOverride[[:space:]]*:[[:space:]]*[a-z\"]+[[:space:]]*$/ kubeAPIServerOverride: \"$apiserver\"/" /root/antrea-coverage.yml docker exec -i kind-control-plane sed -i.bak -E "s/^[[:space:]]*#kubeAPIServerOverride[[:space:]]*:[[:space:]]*[a-z\"]+[[:space:]]*$/ kubeAPIServerOverride: \"$apiserver\"/" /root/antrea-wireguard-go-coverage.yml else docker exec -i kind-control-plane sed -i.bak -E "s/^[[:space:]]*#kubeAPIServerOverride[[:space:]]*:[[:space:]]*[a-z\"]+[[:space:]]*$/ kubeAPIServerOverride: \"$apiserver\"/" /root/antrea.yml docker exec -i kind-control-plane sed -i.bak -E "s/^[[:space:]]*#kubeAPIServerOverride[[:space:]]*:[[:space:]]*[a-z\"]+[[:space:]]*$/ kubeAPIServerOverride: \"$apiserver\"/" /root/antrea-wireguard-go.yml fi fi sleep 1 if $coverage; then go test -v -timeout=70m antrea.io/antrea/test/e2e -provider=kind --logs-export-dir=$ANTREA_LOG_DIR --coverage --coverage-dir $ANTREA_COV_DIR --skip=$skiplist else go test -v -timeout=65m antrea.io/antrea/test/e2e -provider=kind --logs-export-dir=$ANTREA_LOG_DIR --skip=$skiplist fi $TESTBED_CMD destroy kind } if [[ "$mode" == "" ]] || [[ "$mode" == "encap" ]]; then echo "======== Test encap mode ==========" run_test encap "--images \"$COMMON_IMAGES\"" fi if [[ "$mode" == "" ]] || [[ "$mode" == "noEncap" ]]; then echo "======== Test noencap mode ==========" run_test noEncap "--images \"$COMMON_IMAGES\"" fi if [[ "$mode" == "" ]] || [[ "$mode" == "hybrid" ]]; then echo "======== Test hybrid mode ==========" run_test hybrid "--subnets \"20.20.20.0/24\" --images \"$COMMON_IMAGES\"" fi exit 0
# -*- coding: utf-8 -*- """ Assessments This module currently contains 2 types of Assessments * Flexible Impact Assessments (including Mobile access) * Rapid Assessment Tool (from ECB: http://www.ecbproject.org/page/48) @ToDo: Migrate this to a Template in the Survey module @ToDo Validation similar to sitrep_school_report_onvalidation() http://bazaar.launchpad.net/~flavour/sahana-eden/trunk/annotate/head:/models/sitrep.py#L99 It also contains some Baseline Data: * Populations http://eden.sahanafoundation.org/wiki/BluePrintBaselineData """ module = request.controller resourcename = request.function if not settings.has_module(module): raise HTTP(404, body="Module disabled: %s" % module) # ----------------------------------------------------------------------------- # Define the Model # @ToDo: Move to modules/s3db/assess.py # - here it isn't visible to s3db.load_all_models() or Sync # ----------------------------------------------------------------------------- assess_severity_opts = { 0: T("Low"), 1: T("Medium"), 2: T("High"), 3: T("Very High"), } assess_colour_opts = { 0:"green", 1:"yellow", 2:"orange", 3:"red" } def s3_assess_severity_represent(value): if value: return IMG(_src="/%s/static/img/%s_circle_16px.png" % (appname, assess_colour_opts[value]), _alt= value, _align="middle" ) else: return NONE repr_select = lambda l: len(l.name) > 48 and "%s..." % l.name[:44] or l.name S3Represent = s3base.S3Represent add_components = s3db.add_components configure = s3db.configure crud_strings = s3.crud_strings define_table = db.define_table location_id = s3db.gis_location_id person_id = s3db.pr_person_id organisation_id = s3db.org_organisation_id organisation_represent = s3db.org_organisation_represent sector_id = s3db.org_sector_id human_resource_id = s3db.hrm_human_resource_id ireport_id = s3db.irs_ireport_id # Impact as component of assessments add_components("assess_assess", impact_impact="assess_id") def assess_tables(): """ Load the Assess Tables when needed """ module = "assess" # ========================================================================= # Flexible Impact Assessments # ========================================================================= # Assessment # resourcename = "assess" tablename = "assess_assess" define_table(tablename, Field("datetime", "datetime", label = T("Date & Time"), default = request.utcnow), location_id(widget = S3LocationAutocompleteWidget(), requires = IS_LOCATION()), organisation_id(widget = S3OrganisationAutocompleteWidget(default_from_profile=True)), person_id("assessor_person_id", label = T("Assessor"), default = s3_logged_in_person()), s3_comments(), ireport_id(), # Assessment can be linked to an Incident Report *s3_meta_fields()) assess_id = S3ReusableField("assess_id", "reference %s" % tablename, requires = IS_NULL_OR( IS_ONE_OF(db, "assess_assess.id", "%(id)s") ), represent = lambda id: id, label = T("Assessment"), ondelete = "RESTRICT") # CRUD strings ADD_ASSESSMENT = T("Add Assessment") crud_strings[tablename] = Storage( label_create = ADD_ASSESSMENT, title_display = T("Assessment Details"), title_list = T("Assessments"), title_update = T("Edit Assessment"), label_list_button = T("List Assessments"), label_delete_button = T("Delete Assessment"), msg_record_created = T("Assessment added"), msg_record_modified = T("Assessment updated"), msg_record_deleted = T("Assessment deleted"), msg_list_empty = T("No Assessments currently registered"), name_nice = T("Assessment"), name_nice_plural = T("Assessments")) # assess_assess as component of org_organisation add_components("org_organisation", assess_assess="organisation_id") # Hide Add Assessment functionality. Users should only add assessments # through the Basic Assessment. configure(tablename, insertable=False) # ========================================================================= # Baseline Type # tablename = "assess_baseline_type" define_table(tablename, Field("name", length=128, notnull=True, unique=True), *s3_meta_fields()) # CRUD strings ADD_BASELINE_TYPE = T("Add Baseline Type") crud_strings[tablename] = Storage( label_create = ADD_BASELINE_TYPE, title_display = T("Baseline Type Details"), title_list = T("Baseline Types"), title_update = T("Edit Baseline Type"), label_list_button = T("List Baseline Types"), label_delete_button = T("Delete Baseline Type"), msg_record_created = T("Baseline Type added"), msg_record_modified = T("Baseline Type updated"), msg_record_deleted = T("Baseline Type deleted"), msg_list_empty = T("No Baseline Types currently registered"), name_nice = T("Baseline Type"), name_nice_plural = T("Baseline Types")) def baseline_type_comment(): # ToDo: Is this membership check required? if auth.has_membership(auth.id_group("'Administrator'")): return S3AddResourceLink(c="assess", f="baseline_type", label=ADD_BASELINE_TYPE) else: return None represent = S3Represent(tablename) baseline_type_id = S3ReusableField("baseline_type_id", "reference %s" % tablename, sortby="name", requires = IS_NULL_OR(IS_ONE_OF(db, "assess_baseline_type.id", represent, sort=True)), represent = represent, label = T("Baseline Type"), comment = baseline_type_comment(), ondelete = "RESTRICT" ) # ========================================================================= # Baseline # tablename = "assess_baseline" define_table(tablename, # Hide FK fields in forms assess_id(readable = False, writable = False), baseline_type_id(), Field("value", "double"), s3_comments(), *s3_meta_fields()) # CRUD strings ADD_BASELINE = T("Add Baseline") crud_strings[tablename] = Storage( label_create = ADD_BASELINE, title_display = T("Baselines Details"), title_list = T("Baselines"), title_update = T("Edit Baseline"), label_list_button = T("List Baselines"), label_delete_button = T("Delete Baseline"), msg_record_created = T("Baseline added"), msg_record_modified = T("Baseline updated"), msg_record_deleted = T("Baseline deleted"), msg_list_empty = T("No Baselines currently registered"), name_nice = T("Baseline"), name_nice_plural = T("Baselines")) # Baseline as component of assessments add_components("assess_assess", assess_baseline="assess_id") # ========================================================================= # Summary # tablename = "assess_summary" define_table(tablename, assess_id(readable = False, writable = False), sector_id(), #Field("value", "double"), Field("value", "integer", default = 0, label = T("Severity"), requires = IS_EMPTY_OR(IS_IN_SET(assess_severity_opts)), widget = SQLFORM.widgets.radio.widget, represent = s3_assess_severity_represent), s3_comments(), *s3_meta_fields()) # CRUD strings ADD_ASSESS_SUMMARY = T("Add Assessment Summary") crud_strings[tablename] = Storage( label_create = ADD_ASSESS_SUMMARY, title_display = T("Assessment Summary Details"), title_list = T("Assessment Summaries"), title_update = T("Edit Assessment Summary"), label_list_button = T("List Assessment Summaries"), label_delete_button = T("Delete Assessment Summary"), msg_record_created = T("Assessment Summary added"), msg_record_modified = T("Assessment Summary updated"), msg_record_deleted = T("Assessment Summary deleted"), msg_list_empty = T("No Assessment Summaries currently registered"), name_nice = T("Assessment"), name_nice_plural = T("Assessments")) # Summary as component of assessments add_components("assess_assess", assess_summary="assess_id") # Pass variables back to global scope (response.s3.*) return dict( assess_id = assess_id ) # ========================================================================= # Rapid Assessment Tool # ========================================================================= def rat_tables(): """ Load the RAT Tables when needed """ module = "assess" # Load the models we depend on if settings.has_module("cr"): shelter_id = s3db.shelter_id if settings.has_module("hrm"): human_resource_id = s3db.hrm_human_resource_id else: human_resource_id = s3db.pr_person_id # Section CRUD strings ADD_SECTION = T("Add Section") rat_section_crud_strings = Storage( label_create = ADD_SECTION, title_display = T("Section Details"), title_list = T("Sections"), title_update = "", label_list_button = T("List Sections"), label_delete_button = T("Delete Section"), msg_record_created = T("Section updated"), msg_record_modified = T("Section updated"), msg_record_deleted = T("Section deleted"), msg_list_empty = T("No Sections currently registered"), name_nice = T("Search"), name_nice_plural = T("Searches")) # ------------------------------------------------------------------------- # Common options rat_walking_time_opts = { 1: T("0-15 minutes"), 2: T("15-30 minutes"), 3: T("30-60 minutes"), 4: T("over one hour"), 999: NOT_APPLICABLE } # ------------------------------------------------------------------------- # Helper functions def rat_represent_multiple(set, opt): """ Represent an IS_IN_SET with multiple=True as comma-separated list of options @param set: the options set as dict @param opt: the selected option(s) """ if isinstance(opt, (list, tuple)): opts = opt vals = [str(set.get(o, o)) for o in opts] #elif isinstance(opt, basestring): # opts = opt.split("|") # vals = [str(set.get(int(o), o)) for o in opts if o] elif isinstance(opt, int): opts = [opt] vals = str(set.get(opt, opt)) else: return T("None") if len(opts) > 1: vals = ", ".join(vals) else: vals = len(vals) and vals[0] or "" return vals def rat_tooltip(tooltip, multiple=False): """ Prepare tooltip """ if multiple: comment = DIV("(%s)" % T("Select all that apply"), DIV(_class="tooltipbody", _title="|%s" % T(tooltip))) else: comment = DIV(DIV(_class="tooltipbody", _title="|%s" % T(tooltip))) return comment def rat_label_and_tooltip(label, tooltip, multiple=False): """ Prepare tooltip that incorporates a field's label """ label = T(label) if multiple: comment = DIV("(%s)" % T("Select all that apply"), DIV(_class="tooltip", _title="%s|%s" % (T(label), T(tooltip)))) else: comment = DIV(DIV(_class="tooltip", _title="%s|%s" % (T(label), T(tooltip)))) return {"label": label, "comment": comment} rat_interview_location_opts = { 1:T("Village"), 2:T("Urban area"), 3:T("Collective center"), 4:T("Informal camp"), 5:T("Formal camp"), 6:T("School"), 7:T("Mosque"), 8:T("Church"), 99:T("Other") } rat_interviewee_opts = { 1:T("Male"), 2:T("Female"), 3:T("Village Leader"), 4:T("Informal Leader"), 5:T("Community Member"), 6:T("Religious Leader"), 7:T("Police"), 8:T("Healthcare Worker"), 9:T("School Teacher"), 10:T("Womens Focus Groups"), 11:T("Child (< 18 yrs)"), 99:T("Other") } rat_accessibility_opts = { 1:T("2x4 Car"), 2:T("4x4 Car"), 3:T("Truck"), 4:T("Motorcycle"), 5:T("Boat"), 6:T("Walking Only"), 7:T("No access at all"), 99:T("Other") } # Main Resource ----------------------------------------------------------- # contains Section 1: Identification Information # tablename = "assess_rat" define_table(tablename, Field("date", "date", requires = [IS_DATE(format = settings.get_L10n_date_format()), IS_NOT_EMPTY()], default = datetime.datetime.today()), location_id(widget = S3LocationAutocompleteWidget(), requires = IS_LOCATION()), human_resource_id("staff_id", label=T("Staff")), human_resource_id("staff2_id", label=T("Staff2")), Field("interview_location", "list:integer", label = T("Interview taking place at"), requires = IS_NULL_OR(IS_IN_SET(rat_interview_location_opts, multiple=True, zero=None)), #widget = SQLFORM.widgets.checkboxes.widget, represent = lambda opt, set=rat_interview_location_opts: \ rat_represent_multiple(set, opt), comment = "(%s)" % T("Select all that apply")), Field("interviewee", "list:integer", label = T("Person interviewed"), requires = IS_NULL_OR(IS_IN_SET(rat_interviewee_opts, multiple=True, zero=None)), #widget = SQLFORM.widgets.checkboxes.widget, represent = lambda opt, set=rat_interviewee_opts: \ rat_represent_multiple(set, opt), comment = "(%s)" % T("Select all that apply")), Field("accessibility", "integer", label = T("Accessibility of Affected Location"), requires = IS_NULL_OR(IS_IN_SET(rat_accessibility_opts, zero=None)), represent = lambda opt: rat_accessibility_opts.get(opt, opt)), s3_comments(), #document_id(), # Better to have multiple Documents on a Tab s3db.shelter_id(), *s3_meta_fields()) # CRUD strings ADD_ASSESSMENT = T("Add Rapid Assessment") crud_strings[tablename] = Storage( label_create = ADD_ASSESSMENT, title_display = T("Rapid Assessment Details"), title_list = T("Rapid Assessments"), title_update = T("Edit Rapid Assessment"), label_list_button = T("List Rapid Assessments"), label_delete_button = T("Delete Rapid Assessment"), msg_record_created = T("Rapid Assessment added"), msg_record_modified = T("Rapid Assessment updated"), msg_record_deleted = T("Rapid Assessment deleted"), msg_list_empty = T("No Rapid Assessments currently registered"), name_nice = T("Rapid Assessment"), name_nice_plural = T("Rapid Assessments")) # ------------------------------------------------------------------------- def rat_assessment_onaccept(form): id = form.vars.get("id", None) if id: for x in xrange(2, 10): section = "assess_section%s" % x set = db(db[section].assessment_id == id) record = set.select(db[section].id, limitby=(0, 1)).first() if not record: db[section].insert(assessment_id=id) # ------------------------------------------------------------------------- def rat_represent(id): """ Represent assessment as string """ table = db.assess_rat row = db(table.id == id).select(table.date, table.staff_id, table.staff2_id, table.location_id, limitby = (0, 1)).first() if row: date = row.date and str(row.date) or "" location = row.location_id and s3db.gis_LocationRepresent()(row.location_id) or "" table = db.org_staff org = ["", ""] i = 0 for staff_id in [row.staff_id, row.staff2_id]: if staff_id: staff = db(table.id == staff_id).select(table.organisation_id, limitby=(0, 1)).first() if staff: i += 1 org[i] = organisation_represent(staff.organisation_id) assessment_represent = XML("<div>%s %s, %s %s</div>" % (location, org[0], org[1], date)) else: assessment_represent = NONE return assessment_represent # ------------------------------------------------------------------------- # re-usable field assessment_id = S3ReusableField("assessment_id", "reference %s" % tablename, requires = IS_NULL_OR( IS_ONE_OF(db, "assess_rat.id", rat_represent, orderby="assess_rat.id") ), #represent = rat_represent, readable = False, writable = False, #label = T("Rapid Assessment"), #comment = A(ADD_ASSESSMENT, # _class="s3_add_resource_link", # _href=URL(c="assess", f="rat", # args="create", # vars=dict(format="popup")), # _target="top", # _title=ADD_ASSESSMENT), ondelete = "RESTRICT") # Assessment as component of cr_shelter. # RAT has components itself, so best not to constrain within the parent resource tabs # - therefore disable the listadd & jump out of the tabs for Create/Update add_components("cr_shelter", assess_rat="shelter_id") configure(tablename, listadd=False, # We override this in the RAT controller for when not a component onaccept=rat_assessment_onaccept) # Section 2: Demographic -------------------------------------------------- tablename = "assess_section2" define_table(tablename, assessment_id(), Field("population_total", "integer", label = T("Total population of site visited"), comment = T("people")), Field("households_total", "integer", label = T("Total # of households of site visited"), comment = T("households")), Field("population_affected", "integer", label = T("Estimated # of people who are affected by the emergency"), comment = T("people")), Field("households_affected", "integer", label = T("Estimated # of households who are affected by the emergency"), comment = T("households")), Field("male_05", "double", label = T("Number/Percentage of affected population that is Male & Aged 0-5")), Field("male_612", "double", label = T("Number/Percentage of affected population that is Male & Aged 6-12")), Field("male_1317", "double", label = T("Number/Percentage of affected population that is Male & Aged 13-17")), Field("male_1825", "double", label = T("Number/Percentage of affected population that is Male & Aged 18-25")), Field("male_2660", "double", label = T("Number/Percentage of affected population that is Male & Aged 26-60")), Field("male_61", "double", label = T("Number/Percentage of affected population that is Male & Aged 61+")), Field("female_05", "double", label = T("Number/Percentage of affected population that is Female & Aged 0-5")), Field("female_612", "double", label = T("Number/Percentage of affected population that is Female & Aged 6-12")), Field("female_1317", "double", label = T("Number/Percentage of affected population that is Female & Aged 13-17")), Field("female_1825", "double", label = T("Number/Percentage of affected population that is Female & Aged 18-25")), Field("female_2660", "double", label = T("Number/Percentage of affected population that is Female & Aged 26-60")), Field("female_61", "double", label = T("Number/Percentage of affected population that is Female & Aged 61+")), Field("dead_women", "integer", label = T("How many Women (18 yrs+) are Dead due to the crisis"), comment = T("people")), # @ToDo: Should this say "Number of people"? Field("dead_men", "integer", label = T("How many Men (18 yrs+) are Dead due to the crisis"), comment = T("people")), Field("dead_girl", "integer", label = T("How many Girls (0-17 yrs) are Dead due to the crisis"), comment = T("people")), Field("dead_boy", "integer", label = T("How many Boys (0-17 yrs) are Dead due to the crisis"), comment = T("people")), Field("injured_women", "integer", label = T("How many Women (18 yrs+) are Injured due to the crisis"), comment = T("people")), Field("injured_men", "integer", label = T("How many Men (18 yrs+) are Injured due to the crisis"), comment = T("people")), Field("injured_girl", "integer", label = T("How many Girls (0-17 yrs) are Injured due to the crisis"), comment = T("people")), Field("injured_boy", "integer", label = T("How many Boys (0-17 yrs) are Injured due to the crisis"), comment = T("people")), Field("missing_women", "integer", label = T("How many Women (18 yrs+) are Missing due to the crisis"), comment = T("people")), Field("missing_men", "integer", label = T("How many Men (18 yrs+) are Missing due to the crisis"), comment = T("people")), Field("missing_girl", "integer", label = T("How many Girls (0-17 yrs) are Missing due to the crisis"), comment = T("people")), Field("missing_boy", "integer", label = T("How many Boys (0-17 yrs) are Missing due to the crisis"), comment = T("people")), Field("household_head_elderly", "integer", label = T("Elderly person headed households (>60 yrs)"), comment = T("households")), Field("household_head_female", "integer", label = T("Female headed households"), comment = T("households")), Field("household_head_child", "integer", label = T("Child headed households (<18 yrs)"), comment = T("households")), Field("disabled_physical", "integer", label = T("Persons with disability (physical)"), comment = T("people")), Field("disabled_mental", "integer", label = T("Persons with disability (mental)"), comment = T("people")), Field("pregnant", "integer", label = T("Pregnant women"), comment = T("people")), Field("lactating", "integer", label = T("Lactating women"), comment = T("people")), Field("minorities", "integer", label = T("Migrants or ethnic minorities"), comment = T("people")), s3_comments(), *s3_meta_fields()) # CRUD strings crud_strings[tablename] = rat_section_crud_strings configure(tablename, deletable=False) # Section 3: Shelter & Essential NFIs ------------------------------------- rat_houses_salvmat_types = { 1: T("Wooden plank"), 2: T("Zinc roof"), 3: T("Bricks"), 4: T("Wooden poles"), 5: T("Door frame"), 6: T("Window frame"), 7: T("Roof tile"), 999: NOT_APPLICABLE } rat_water_container_types = { 1: T("Jerry can"), 2: T("Bucket"), 3: T("Water gallon"), 99: T("Other (specify)") } tablename = "assess_section3" define_table(tablename, assessment_id(), Field("houses_total", "integer", label = T("Total number of houses in the area"), requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 99999999)), ), Field("houses_destroyed", "integer", requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 99999999)), **rat_label_and_tooltip( "Number of houses destroyed/uninhabitable", "How many houses are uninhabitable (uninhabitable = foundation and structure destroyed)?")), Field("houses_damaged", "integer", requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 99999999)), **rat_label_and_tooltip( "Number of houses damaged, but usable", "How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?")), Field("houses_salvmat", "list:integer", requires = IS_NULL_OR(IS_IN_SET(rat_houses_salvmat_types, multiple=True, zero=None)), represent = lambda opt, set=rat_houses_salvmat_types: \ rat_represent_multiple(set, opt), **rat_label_and_tooltip( "Salvage material usable from destroyed houses", "What type of salvage material can be used from destroyed houses?", multiple=True)), Field("water_containers_available", "boolean", **rat_label_and_tooltip( "Water storage containers available for HH", "Do households have household water storage containers?")), Field("water_containers_sufficient", "boolean", **rat_label_and_tooltip( "Water storage containers sufficient per HH", "Do households each have at least 2 containers (10-20 litres each) to hold water?")), Field("water_containers_types", "list:integer", requires = IS_EMPTY_OR(IS_IN_SET(rat_water_container_types, zero=None, multiple=True)), represent = lambda opt, set=rat_water_container_types: \ rat_represent_multiple(set, opt), **rat_label_and_tooltip( "Types of water storage containers available", "What types of household water storage containers are available?", multiple=True)), Field("water_containers_types_other", label = T("Other types of water storage containers")), Field("cooking_equipment_available", "boolean", **rat_label_and_tooltip( "Appropriate cooking equipment/materials in HH", "Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?")), Field("sanitation_items_available", "boolean", **rat_label_and_tooltip( "Reliable access to sanitation/hygiene items", "Do people have reliable access to sufficient sanitation/hygiene items (bathing soap, laundry soap, shampoo, toothpaste and toothbrush)?")), Field("sanitation_items_available_women", "boolean", **rat_label_and_tooltip( "Easy access to sanitation items for women/girls", "Do women and girls have easy access to sanitary materials?")), Field("bedding_materials_available", "boolean", **rat_label_and_tooltip( "Bedding materials available", "Do households have bedding materials available (tarps, plastic mats, blankets)?")), Field("clothing_sets_available", "boolean", **rat_label_and_tooltip( "Appropriate clothing available", "Do people have at least 2 full sets of clothing (shirts, pants/sarong, underwear)?")), Field("nfi_assistance_available", "boolean", **rat_label_and_tooltip( "Shelter/NFI assistance received/expected", "Have households received any shelter/NFI assistance or is assistance expected in the coming days?")), Field("kits_hygiene_received", "boolean", label = T("Hygiene kits received")), Field("kits_hygiene_source", label = T("Hygiene kits, source")), Field("kits_household_received", "boolean", label = T("Household kits received")), Field("kits_household_source", label = T("Household kits, source")), Field("kits_dwelling_received", "boolean", label = T("Family tarpaulins received")), # @ToDo: Better label, perhaps? A tarp isn't a dwelling. Field("kits_dwelling_source", label = T("Family tarpaulins, source")), s3_comments(), *s3_meta_fields()) # CRUD strings crud_strings[tablename] = rat_section_crud_strings configure(tablename, deletable=False) # Section 4 - Water and Sanitation ---------------------------------------- rat_water_source_types = { 1: T("PDAM"), 2: T("Dug Well"), 3: T("Spring"), 4: T("River"), 5: T("Other Faucet/Piped Water"), 99: T("Other (describe)"), 999: NOT_APPLICABLE } rat_water_coll_person_opts = { 1: T("Child"), 2: T("Adult male"), 3: T("Adult female"), 4: T("Older person (>60 yrs)"), 999: NOT_APPLICABLE } rat_defec_place_types = { 1: T("open defecation"), 2: T("pit"), 3: T("latrines"), 4: T("river"), 99: T("other") } rat_defec_place_animals_opts = { 1: T("enclosed area"), 2: T("within human habitat"), 999: NOT_APPLICABLE } rat_latrine_types = { 1: T("flush latrine with septic tank"), 2: T("pit latrine"), 999: NOT_APPLICABLE } tablename = "assess_section4" define_table(tablename, assessment_id(), Field("water_source_pre_disaster_type", "integer", label = T("Type of water source before the disaster"), requires = IS_EMPTY_OR(IS_IN_SET(rat_water_source_types, zero=None)), represent = lambda opt: rat_water_source_types.get(opt, UNKNOWN_OPT)), Field("water_source_pre_disaster_description", label = T("Description of water source before the disaster")), Field("dwater_source_type", "integer", requires = IS_EMPTY_OR(IS_IN_SET(rat_water_source_types, zero=None)), represent = lambda opt: rat_water_source_types.get(opt, UNKNOWN_OPT), **rat_label_and_tooltip( "Current type of source for drinking water", "What is your major source of drinking water?")), Field("dwater_source_description", label = T("Description of drinking water source")), Field("dwater_reserve", **rat_label_and_tooltip( "How long will this water resource last?", "Specify the minimum sustainability in weeks or days.")), Field("swater_source_type", "integer", requires = IS_EMPTY_OR(IS_IN_SET(rat_water_source_types, zero=None)), represent = lambda opt: rat_water_source_types.get(opt, UNKNOWN_OPT), **rat_label_and_tooltip( "Current type of source for sanitary water", "What is your major source of clean water for daily use (ex: washing, cooking, bathing)?")), Field("swater_source_description", label = T("Description of sanitary water source")), Field("swater_reserve", **rat_label_and_tooltip( "How long will this water resource last?", "Specify the minimum sustainability in weeks or days.")), Field("water_coll_time", "integer", requires = IS_EMPTY_OR(IS_IN_SET(rat_walking_time_opts, zero=None)), represent = lambda opt: rat_walking_time_opts.get(opt, UNKNOWN_OPT), **rat_label_and_tooltip( "Time needed to collect water", "How long does it take you to reach the available water resources? Specify the time required to go there and back, including queuing time, by foot.")), Field("water_coll_safe", "boolean", label = T("Is it safe to collect water?"), default = True), Field("water_coll_safety_problems", label = T("If no, specify why")), Field("water_coll_person", "integer", label = T("Who usually collects water for the family?"), requires = IS_EMPTY_OR(IS_IN_SET(rat_water_coll_person_opts, zero=None)), represent = lambda opt: rat_water_coll_person_opts.get(opt, UNKNOWN_OPT)), Field("defec_place_type", requires = IS_EMPTY_OR(IS_IN_SET(rat_defec_place_types, zero=None, multiple=True)), represent = lambda opt: rat_defec_place_types.get(opt, UNKNOWN_OPT), **rat_label_and_tooltip( "Type of place for defecation", "Where do the majority of people defecate?", multiple=True)), Field("defec_place_description", label = T("Description of defecation area")), Field("defec_place_distance", "integer", label = T("Distance between defecation area and water source"), comment = T("meters")), Field("defec_place_animals", "integer", label = T("Defecation area for animals"), requires = IS_EMPTY_OR(IS_IN_SET(rat_defec_place_animals_opts, zero = None)), represent = lambda opt: rat_defec_place_animals_opts.get(opt, UNKNOWN_OPT)), Field("close_industry", "boolean", **rat_label_and_tooltip( "Industry close to village/camp", "Is there any industrial or agro-chemical production close to the affected area/village?")), Field("waste_disposal", **rat_label_and_tooltip( "Place for solid waste disposal", "Where is solid waste disposed in the village/camp?")), Field("latrines_number", "integer", requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)), **rat_label_and_tooltip( "Number of latrines", "How many latrines are available in the village/IDP centre/Camp?")), Field("latrines_type", "integer", requires = IS_EMPTY_OR(IS_IN_SET(rat_latrine_types, zero=None)), represent = lambda opt: rat_latrine_types.get(opt, UNKNOWN_OPT), **rat_label_and_tooltip( "Type of latrines", "What type of latrines are available in the village/IDP centre/Camp?")), Field("latrines_separation", "boolean", **rat_label_and_tooltip( "Separate latrines for women and men", "Are there separate latrines for women and men available?")), Field("latrines_distance", "integer", **rat_label_and_tooltip( "Distance between shelter and latrines", "Distance between latrines and temporary shelter in meters")), s3_comments(), *s3_meta_fields()) # CRUD strings crud_strings[tablename] = rat_section_crud_strings configure(tablename, deletable=False) # Section 5 - Health ------------------------------------------------------ rat_health_services_types = { 1: T("Community Health Center"), 2: T("Hospital") } rat_health_problems_opts = { 1: T("Respiratory Infections"), 2: T("Diarrhea"), 3: T("Dehydration"), 99: T("Other (specify)") } rat_infant_nutrition_alternative_opts = { 1: T("Porridge"), 2: T("Banana"), 3: T("Instant Porridge"), 4: T("Air tajin"), 99: T("Other (specify)") } tablename = "assess_section5" define_table(tablename, assessment_id(), Field("health_services_pre_disaster", "boolean", **rat_label_and_tooltip( "Health services functioning prior to disaster", "Were there health services functioning for the community prior to the disaster?")), Field("medical_supplies_pre_disaster", "boolean", **rat_label_and_tooltip( "Basic medical supplies available prior to disaster", "Were basic medical supplies available for health services prior to the disaster?")), Field("health_services_post_disaster", "boolean", **rat_label_and_tooltip( "Health services functioning since disaster", "Are there health services functioning for the community since the disaster?")), Field("medical_supplies_post_disaster", "boolean", **rat_label_and_tooltip( "Basic medical supplies available since disaster", "Are basic medical supplies available for health services since the disaster?")), Field("medical_supplies_reserve", "integer", label = T("How many days will the supplies last?")), Field("health_services_available_types", "list:integer", requires = IS_EMPTY_OR(IS_IN_SET(rat_health_services_types, zero=None, multiple=True)), represent = lambda opt: \ rat_represent_multiple(rat_health_services_types, opt), **rat_label_and_tooltip( "Types of health services available", "What types of health services are still functioning in the affected area?", multiple=True)), Field("staff_number_doctors", "integer", requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)), **rat_label_and_tooltip( "Number of doctors actively working", "How many doctors in the health centers are still actively working?")), Field("staff_number_nurses", "integer", requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)), **rat_label_and_tooltip( "Number of nurses actively working", "How many nurses in the health centers are still actively working?")), Field("staff_number_midwives", "integer", requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)), **rat_label_and_tooltip( "Number of midwives actively working", "How many midwives in the health centers are still actively working?")), Field("health_service_walking_time", "integer", requires = IS_EMPTY_OR(IS_IN_SET(rat_walking_time_opts, zero=None)), represent = lambda opt: rat_walking_time_opts.get(opt, UNKNOWN_OPT), **rat_label_and_tooltip( "Walking time to the health service", "How long does it take you to walk to the health service?")), Field("health_problems_adults", "list:integer", requires = IS_EMPTY_OR(IS_IN_SET(rat_health_problems_opts, zero=None, multiple=True)), represent = lambda opt, set=rat_health_problems_opts: \ rat_represent_multiple(set, opt), **rat_label_and_tooltip( "Current type of health problems, adults", "What types of health problems do people currently have?", multiple=True)), Field("health_problems_adults_other", label = T("Other current health problems, adults")), Field("health_problems_children", "list:integer", requires = IS_EMPTY_OR(IS_IN_SET(rat_health_problems_opts, zero=None, multiple=True)), represent = lambda opt, set=rat_health_problems_opts: \ rat_represent_multiple(set, opt), **rat_label_and_tooltip( "Current type of health problems, children", "What types of health problems do children currently have?", multiple=True)), Field("health_problems_children_other", label = T("Other current health problems, children")), Field("chronical_illness_cases", "boolean", # @ToDo: "chronic illness"? **rat_label_and_tooltip( "People with chronical illnesses", "Are there people with chronical illnesses in your community?")), Field("chronical_illness_children", "boolean", **rat_label_and_tooltip( "Children with chronical illnesses", "Are there children with chronical illnesses in your community?")), Field("chronical_illness_elderly", "boolean", **rat_label_and_tooltip( "Older people with chronical illnesses", "Are there older people with chronical illnesses in your community?")), Field("chronical_care_sufficient", "boolean", **rat_label_and_tooltip( "Sufficient care/assistance for chronically ill", "Are the chronically ill receiving sufficient care and assistance?")), Field("malnutrition_present_pre_disaster", "boolean", **rat_label_and_tooltip( "Malnutrition present prior to disaster", "Were there cases of malnutrition in this area prior to the disaster?")), Field("mmd_present_pre_disaster", "boolean", **rat_label_and_tooltip( "Micronutrient malnutrition prior to disaster", "Were there reports or evidence of outbreaks of any micronutrient malnutrition disorders before the emergency?")), Field("breast_milk_substitutes_pre_disaster", "boolean", **rat_label_and_tooltip( "Breast milk substitutes used prior to disaster", "Were breast milk substitutes used prior to the disaster?")), Field("breast_milk_substitutes_post_disaster", "boolean", **rat_label_and_tooltip( "Breast milk substitutes in use since disaster", "Are breast milk substitutes being used here since the disaster?")), Field("infant_nutrition_alternative", "list:integer", requires = IS_EMPTY_OR(IS_IN_SET(rat_infant_nutrition_alternative_opts, zero=None, multiple=True)), represent = lambda opt, set=rat_infant_nutrition_alternative_opts: \ rat_represent_multiple(set, opt), **rat_label_and_tooltip( "Alternative infant nutrition in use", "Babies who are not being breastfed, what are they being fed on?", multiple=True)), Field("infant_nutrition_alternative_other", label = T("Other alternative infant nutrition in use")), Field("u5_diarrhea", "boolean", **rat_label_and_tooltip( "Diarrhea among children under 5", "Are there cases of diarrhea among children under the age of 5?")), Field("u5_diarrhea_rate_48h", "integer", **rat_label_and_tooltip( "Approx. number of cases/48h", "Approximately how many children under 5 with diarrhea in the past 48 hours?")), s3_comments(), *s3_meta_fields()) # CRUD strings crud_strings[tablename] = rat_section_crud_strings configure(tablename, deletable=False) # Section 6 - Nutrition/Food Security ------------------------------------- rat_main_dish_types = { 1: T("Rice"), 2: T("Noodles"), 3: T("Biscuits"), 4: T("Corn"), 5: T("Wheat"), 6: T("Cassava"), 7: T("Cooking Oil") } rat_side_dish_types = { 1: T("Salted Fish"), 2: T("Canned Fish"), 3: T("Chicken"), 4: T("Eggs"), 99: T("Other (specify)") } rat_food_stock_reserve_opts = { 1: T("1-3 days"), 2: T("4-7 days"), 3: T("8-14 days") } rat_food_source_types = { 1: "Local market", 2: "Field cultivation", 3: "Food stall", 4: "Animal husbandry", 5: "Raising poultry", 99: "Other (specify)" } tablename = "assess_section6" define_table(tablename, assessment_id(), Field("food_stocks_main_dishes", "list:integer", requires = IS_EMPTY_OR(IS_IN_SET(rat_main_dish_types, zero=None, multiple=True)), represent = lambda opt, set=rat_main_dish_types: \ rat_represent_multiple(set, opt), **rat_label_and_tooltip( "Existing food stocks, main dishes", "What food stocks exist? (main dishes)", multiple=True)), # @ToDo: Should there be a field "food_stocks_other_main_dishes"? Field("food_stocks_side_dishes", "list:integer", requires = IS_EMPTY_OR(IS_IN_SET(rat_side_dish_types, zero=None, multiple=True)), represent = lambda opt, set=rat_side_dish_types: \ rat_represent_multiple(set, opt), **rat_label_and_tooltip( "Existing food stocks, side dishes", "What food stocks exist? (side dishes)", multiple=True)), Field("food_stocks_other_side_dishes", label = T("Other side dishes in stock")), Field("food_stocks_reserve", "integer", label = T("How long will the food last?"), requires = IS_EMPTY_OR(IS_IN_SET(rat_food_stock_reserve_opts, zero=None)), represent = lambda opt: rat_food_stock_reserve_opts.get(opt, UNKNOWN_OPT)), Field("food_sources", "list:integer", requires = IS_EMPTY_OR(IS_IN_SET(rat_food_source_types, zero=None, multiple=True)), represent = lambda opt, set=rat_food_source_types: \ rat_represent_multiple(set, opt), **rat_label_and_tooltip( "Usual food sources in the area", "What are the people's normal ways to obtain food in this area?", multiple=True)), Field("food_sources_other", label = T("Other ways to obtain food")), Field("food_sources_disruption", "boolean", **rat_label_and_tooltip( "Normal food sources disrupted", "Have normal food sources been disrupted?")), Field("food_sources_disruption_details", label = T("If yes, which and how")), Field("food_assistance_available", "boolean", **rat_label_and_tooltip( "Food assistance available/expected", "Have the people received or are you expecting any medical or food assistance in the coming days?")), Field("food_assistance_details", "text", label = T("If yes, specify what and by whom")), s3_comments(), *s3_meta_fields()) # CRUD strings crud_strings[tablename] = rat_section_crud_strings configure(tablename, deletable=False) # Section 7 - Livelihood -------------------------------------------------- rat_income_source_opts = { 1: T("Agriculture"), 2: T("Fishing"), 3: T("Poultry"), 4: T("Casual Labor"), 5: T("Small Trade"), 6: T("Other") } rat_expense_types = { 1: T("Education"), 2: T("Health"), 3: T("Food"), 4: T("Hygiene"), 5: T("Shelter"), 6: T("Clothing"), 7: T("Funeral"), 8: T("Alcohol"), 99: T("Other (specify)") } rat_cash_source_opts = { 1: T("Family/friends"), 2: T("Government"), 3: T("Bank/micro finance"), 4: T("Humanitarian NGO"), 99: T("Other (specify)") } rat_ranking_opts = xrange(1, 7) tablename = "assess_section7" define_table(tablename, assessment_id(), Field("income_sources_pre_disaster", "list:integer", requires = IS_EMPTY_OR(IS_IN_SET(rat_income_source_opts, zero=None, multiple=True)), represent = lambda opt, set=rat_income_source_opts: \ rat_represent_multiple(set, opt), **rat_label_and_tooltip( "Main income sources before disaster", "What were your main sources of income before the disaster?", multiple=True)), Field("income_sources_post_disaster", "list:integer", requires = IS_EMPTY_OR(IS_IN_SET(rat_income_source_opts, zero=None, multiple=True)), represent = lambda opt, set=rat_income_source_opts: \ rat_represent_multiple(set, opt), **rat_label_and_tooltip( "Current main income sources", "What are your main sources of income now?", multiple=True)), Field("main_expenses", "list:integer", requires = IS_EMPTY_OR(IS_IN_SET(rat_expense_types, zero=None, multiple=True)), represent = lambda opt, set=rat_expense_types: \ rat_represent_multiple(set, opt), **rat_label_and_tooltip( "Current major expenses", "What do you spend most of your income on now?", multiple=True)), Field("main_expenses_other", label = T("Other major expenses")), Field("business_damaged", "boolean", **rat_label_and_tooltip( "Business damaged", "Has your business been damaged in the course of the disaster?")), Field("business_cash_available", "boolean", **rat_label_and_tooltip( "Cash available to restart business", "Do you have access to cash to restart your business?")), Field("business_cash_source", "list:integer", requires = IS_EMPTY_OR(IS_IN_SET(rat_cash_source_opts, zero=None, multiple=True)), represent = lambda opt, set=rat_cash_source_opts: \ rat_represent_multiple(set, opt), **rat_label_and_tooltip( "Main cash source", "What are your main sources of cash to restart your business?")), Field("rank_reconstruction_assistance", "integer", requires = IS_EMPTY_OR(IS_IN_SET(rat_ranking_opts, zero=None)), **rat_label_and_tooltip( "Immediate reconstruction assistance, Rank", "Assistance for immediate repair/reconstruction of houses")), Field("rank_farmland_fishing_assistance", "integer", label = T("Farmland/fishing material assistance, Rank"), requires = IS_EMPTY_OR(IS_IN_SET(rat_ranking_opts, zero=None))), Field("rank_poultry_restocking", "integer", label = T("Poultry restocking, Rank"), requires = IS_EMPTY_OR(IS_IN_SET(rat_ranking_opts, zero=None))), Field("rank_health_care_assistance", "integer", label = T("Health care assistance, Rank"), requires = IS_EMPTY_OR(IS_IN_SET(rat_ranking_opts, zero=None))), Field("rank_transportation_assistance", "integer", label = T("Transportation assistance, Rank"), requires = IS_EMPTY_OR(IS_IN_SET(rat_ranking_opts, zero=None))), Field("other_assistance_needed", label = T("Other assistance needed")), Field("rank_other_assistance", "integer", label = T("Other assistance, Rank"), requires = IS_EMPTY_OR(IS_IN_SET(rat_ranking_opts, zero=None))), s3_comments(), *s3_meta_fields()) # CRUD strings crud_strings[tablename] = rat_section_crud_strings configure(tablename, deletable=False) # Section 8 - Education --------------------------------------------------- rat_schools_salvmat_types = { 1: T("Wooden plank"), 2: T("Zinc roof"), 3: T("Bricks"), 4: T("Wooden poles"), 5: T("Door frame"), 6: T("Window frame"), 7: T("Roof tile"), 999: NOT_APPLICABLE } rat_alternative_study_places = { 1: T("Community Centre"), 2: T("Church"), 3: T("Mosque"), 4: T("Open area"), 5: T("Government building"), 6: T("Other (specify)"), 999: NOT_APPLICABLE } rat_school_attendance_barriers_opts = { 1: T("School used for other purpose"), 2: T("School destroyed"), 3: T("Lack of school uniform"), 4: T("Lack of transport to school"), 5: T("Children not enrolled in new school"), 6: T("School heavily damaged"), 7: T("Desire to remain with family"), 8: T("Lack of supplies at school"), 9: T("Displaced"), 10: T("Other (specify)"), 999: NOT_APPLICABLE } tablename = "assess_section8" define_table(tablename, assessment_id(), Field("schools_total", "integer", label = T("Total number of schools in affected area"), requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))), Field("schools_public", "integer", label = T("Number of public schools"), requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))), Field("schools_private", "integer", label = T("Number of private schools"), requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))), Field("schools_religious", "integer", label = T("Number of religious schools"), requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))), Field("schools_destroyed", "integer", requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)), **rat_label_and_tooltip( "Number of schools destroyed/uninhabitable", "uninhabitable = foundation and structure destroyed")), Field("schools_damaged", "integer", requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)), **rat_label_and_tooltip( "Number of schools damaged but usable", "windows broken, cracks in walls, roof slightly damaged")), Field("schools_salvmat", "list:integer", requires = IS_EMPTY_OR(IS_IN_SET(rat_schools_salvmat_types, zero=None, multiple=True)), represent = lambda opt, set=rat_schools_salvmat_types: \ rat_represent_multiple(set, opt), **rat_label_and_tooltip( "Salvage material usable from destroyed schools", "What type of salvage material can be used from destroyed schools?", multiple=True)), Field("alternative_study_places_available", "boolean", **rat_label_and_tooltip( "Alternative places for studying available", "Are there alternative places for studying?")), Field("alternative_study_places_number", "integer", label = T("Number of alternative places for studying"), requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999))), Field("alternative_study_places", "list:integer", requires = IS_EMPTY_OR(IS_IN_SET(rat_alternative_study_places, zero=None, multiple=True)), represent = lambda opt, set=rat_alternative_study_places: \ rat_represent_multiple(set, opt), **rat_label_and_tooltip( "Alternative places for studying", "Where are the alternative places for studying?", multiple=True)), Field("alternative_study_places_other", label = T("Other alternative places for study")), Field("schools_open_pre_disaster", "integer", requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)), **rat_label_and_tooltip( "Number of schools open before disaster", "How many primary/secondary schools were opening prior to the disaster?")), Field("schools_open_post_disaster", "integer", requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)), **rat_label_and_tooltip( "Number of schools open now", "How many of the primary/secondary schools are now open and running a regular schedule of class?")), Field("teachers_active_pre_disaster", "integer", requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)), **rat_label_and_tooltip( "Number of teachers before disaster", "How many teachers worked in the schools prior to the disaster?")), Field("teachers_affected_by_disaster", "integer", requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)), **rat_label_and_tooltip( "Number of teachers affected by disaster", "How many teachers have been affected by the disaster (affected = unable to work)?")), Field("children_0612_female", "integer", requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)), **rat_label_and_tooltip( "Girls 6-12 yrs in affected area", "How many primary school age girls (6-12) are in the affected area?")), Field("children_0612_male", "integer", requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)), **rat_label_and_tooltip( "Boys 6-12 yrs in affected area", "How many primary school age boys (6-12) are in the affected area?")), Field("children_0612_not_in_school_female", "integer", requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)), **rat_label_and_tooltip( "Girls 6-12 yrs not attending school", "How many of the primary school age girls (6-12) in the area are not attending school?")), Field("children_0612_not_in_school_male", "integer", requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)), **rat_label_and_tooltip( "Boys 6-12 yrs not attending school", "How many of the primary school age boys (6-12) in the area are not attending school?")), Field("children_1318_female", "integer", requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)), **rat_label_and_tooltip( "Girls 13-18 yrs in affected area", "How many secondary school age girls (13-18) are in the affected area?")), Field("children_1318_male", "integer", requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)), **rat_label_and_tooltip( "Boys 13-18 yrs in affected area", "How many secondary school age boys (13-18) are in the affected area?")), Field("children_1318_not_in_school_female", "integer", requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)), **rat_label_and_tooltip( "Girls 13-18 yrs not attending school", "How many of the secondary school age girls (13-18) in the area are not attending school?")), Field("children_1318_not_in_school_male", "integer", requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, 999999)), **rat_label_and_tooltip( "Boys 13-18 yrs not attending school", "How many of the secondary school age boys (13-18) in the area are not attending school?")), Field("school_attendance_barriers", "list:integer", requires = IS_EMPTY_OR(IS_IN_SET(rat_school_attendance_barriers_opts, zero=None, multiple=True)), represent = lambda opt, set=rat_school_attendance_barriers_opts: \ rat_represent_multiple(set, opt), **rat_label_and_tooltip( "Factors affecting school attendance", "What are the factors affecting school attendance?", multiple=True)), Field("school_attendance_barriers_other", label = T("Other factors affecting school attendance")), Field("school_assistance_available", "boolean", **rat_label_and_tooltip( "School assistance received/expected", "Have schools received or are expecting to receive any assistance?")), Field("school_assistance_tents_available", "boolean", label = T("School tents received")), Field("school_assistence_tents_source", label = T("School tents, source")), Field("school_assistance_materials_available", "boolean", label = T("Education materials received")), Field("school_assistance_materials_source", label = T("Education materials, source")), Field("school_assistance_other_available", "boolean", label = T("Other school assistance received")), Field("school_assistance_other", label = T("Other school assistance, details")), Field("school_assistance_other_source", label = T("Other school assistance, source")), s3_comments(), *s3_meta_fields()) # @ToDo: onvalidation! # CRUD strings crud_strings[tablename] = rat_section_crud_strings configure(tablename, deletable=False) # Section 9 - Protection -------------------------------------------------- rat_fuzzy_quantity_opts = { 1: T("None"), 2: T("Few"), 3: T("Some"), 4: T("Many") } rat_quantity_opts = { 1: "1-10", 2: "11-50", 3: "51-100", 4: "100+" } rat_child_activity_opts = { 1: T("Playing"), 2: T("Domestic chores"), 3: T("School/studying"), 4: T("Doing nothing (no structured activity)"), 5: T("Working or other to provide money/food"), 99: T("Other (specify)") } rat_child_activity_post_disaster_opts = rat_child_activity_opts.copy() rat_child_activity_post_disaster_opts.update({ 6: T("Disaster clean-up/repairs") }) tablename = "assess_section9" define_table(tablename, assessment_id(), Field("vulnerable_groups_safe_env", "boolean", label = T("Safe environment for vulnerable groups"), comment = rat_tooltip("Are the areas that children, older people, and people with disabilities live in, play in and walk through on a daily basis physically safe?")), Field("safety_children_women_affected", "boolean", label = T("Safety of children and women affected by disaster?"), comment = rat_tooltip("Has the safety and security of women and children in your community changed since the emergency?")), Field("sec_incidents", "boolean", label = T("Known incidents of violence since disaster"), comment = rat_tooltip("Do you know of any incidents of violence?")), Field("sec_incidents_gbv", "boolean", label = T("Known incidents of violence against women/girls"), comment = rat_tooltip("Without mentioning any names or indicating anyone, do you know of any incidents of violence against women or girls occuring since the disaster?")), Field("sec_current_needs", label = T("Needs to reduce vulnerability to violence"), comment = rat_tooltip("What should be done to reduce women and children's vulnerability to violence?")), Field("children_separated", "integer", label = T("Children separated from their parents/caregivers"), requires = IS_EMPTY_OR(IS_IN_SET(rat_fuzzy_quantity_opts, zero=None)), represent = lambda opt: rat_fuzzy_quantity_opts.get(opt, UNKNOWN_OPT), comment = rat_tooltip("Do you know of children separated from their parents or caregivers?")), Field("children_separated_origin", label = T("Origin of the separated children"), comment = rat_tooltip("Where are the separated children originally from?")), Field("children_missing", "integer", label = T("Parents/Caregivers missing children"), requires = IS_EMPTY_OR(IS_IN_SET(rat_fuzzy_quantity_opts, zero=None)), represent = lambda opt: rat_fuzzy_quantity_opts.get(opt, UNKNOWN_OPT), comment = rat_tooltip("Do you know of parents/caregivers missing children?")), Field("children_orphaned", "integer", label = T("Children orphaned by the disaster"), requires = IS_EMPTY_OR(IS_IN_SET(rat_fuzzy_quantity_opts, zero=None)), represent = lambda opt: rat_fuzzy_quantity_opts.get(opt, UNKNOWN_OPT), comment = rat_tooltip("Do you know of children that have been orphaned by the disaster?")), Field("children_unattended", "integer", label = T("Children living on their own (without adults)"), requires = IS_EMPTY_OR(IS_IN_SET(rat_fuzzy_quantity_opts, zero=None)), represent = lambda opt: rat_fuzzy_quantity_opts.get(opt, UNKNOWN_OPT), comment = rat_tooltip("Do you know of children living on their own (without adults)?")), Field("children_disappeared", "integer", label = T("Children who have disappeared since the disaster"), requires = IS_EMPTY_OR(IS_IN_SET(rat_fuzzy_quantity_opts, zero=None)), represent = lambda opt: rat_fuzzy_quantity_opts.get(opt, UNKNOWN_OPT), comment = rat_tooltip("Do you know of children that have disappeared without explanation in the period since the disaster?")), Field("children_evacuated", "integer", label = T("Children that have been sent to safe places"), requires = IS_EMPTY_OR(IS_IN_SET(rat_fuzzy_quantity_opts, zero=None)), represent = lambda opt: rat_fuzzy_quantity_opts.get(opt, UNKNOWN_OPT), comment = rat_tooltip("Do you know of children that have been sent to safe places?")), Field("children_evacuated_to", label = T("Places the children have been sent to"), comment = rat_tooltip("Where have the children been sent?")), Field("children_with_older_caregivers", "integer", label = T("Older people as primary caregivers of children"), requires = IS_EMPTY_OR(IS_IN_SET(rat_fuzzy_quantity_opts, zero=None)), represent = lambda opt: rat_fuzzy_quantity_opts.get(opt, UNKNOWN_OPT), comment = rat_tooltip("Do you know of older people who are primary caregivers of children?")), Field("children_in_disabled_homes", "boolean", label = T("Children in homes for disabled children"), comment = rat_tooltip("Are there children living in homes for disabled children in this area?")), Field("children_in_orphanages", "boolean", label = T("Children in orphanages"), comment = rat_tooltip("Are there children living in orphanages in this area?")), Field("children_in_boarding_schools", "boolean", label = T("Children in boarding schools"), comment = rat_tooltip("Are there children living in boarding schools in this area?")), Field("children_in_juvenile_detention", "boolean", label = T("Children in juvenile detention"), comment = rat_tooltip("Are there children living in juvenile detention in this area?")), Field("children_in_adult_prisons", "boolean", label = T("Children in adult prisons"), comment = rat_tooltip("Are there children living in adult prisons in this area?")), Field("people_in_adult_prisons", "boolean", label = T("Adults in prisons"), comment = rat_tooltip("Are there adults living in prisons in this area?")), Field("people_in_care_homes", "boolean", label = T("Older people in care homes"), comment = rat_tooltip("Are there older people living in care homes in this area?")), Field("people_in_institutions_est_total", "integer", label = T("Estimated total number of people in institutions"), requires = IS_EMPTY_OR(IS_IN_SET(rat_quantity_opts, zero=None)), represent = lambda opt: rat_quantity_opts.get(opt, UNKNOWN_OPT), comment = rat_tooltip("What is the estimated total number of people in all of these institutions?")), Field("staff_in_institutions_present", "boolean", label = T("Staff present and caring for residents"), comment = rat_tooltip("Are there staff present and caring for the residents in these institutions?")), Field("adequate_food_water_in_institutions", "boolean", label = T("Adequate food and water available"), comment = rat_tooltip("Is adequate food and water available for these institutions?")), Field("child_activities_u12f_pre_disaster", "list:integer", label = T("Activities of girls <12yrs before disaster"), requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts, zero=None, multiple=True)), represent = lambda opt, set=rat_child_activity_opts: \ rat_represent_multiple(set, opt), comment = rat_tooltip("How did girls <12yrs spend most of their time prior to the disaster?", multiple=True)), Field("child_activities_u12f_pre_disaster_other", label = T("Other activities of girls<12yrs before disaster")), Field("child_activities_u12m_pre_disaster", "list:integer", label = T("Activities of boys <12yrs before disaster"), requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts, zero=None, multiple=True)), represent = lambda opt, set=rat_child_activity_opts: \ rat_represent_multiple(set, opt), comment = rat_tooltip("How did boys <12yrs spend most of their time prior to the disaster?", multiple=True)), Field("child_activities_u12m_pre_disaster_other", label = T("Other activities of boys <12yrs before disaster")), Field("child_activities_o12f_pre_disaster", "list:integer", label = T("Activities of girls 13-17yrs before disaster"), requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts, zero=None, multiple=True)), represent = lambda opt, set=rat_child_activity_opts: \ rat_represent_multiple(set, opt), comment = rat_tooltip("How did boys girls 13-17yrs spend most of their time prior to the disaster?", multiple=True)), Field("child_activities_o12f_pre_disaster_other", label = T("Other activities of girls 13-17yrs before disaster")), Field("child_activities_o12m_pre_disaster", "list:integer", label = T("Activities of boys 13-17yrs before disaster"), requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts, zero=None, multiple=True)), represent = lambda opt, set=rat_child_activity_opts: \ rat_represent_multiple(set, opt), comment = rat_tooltip("How did boys 13-17yrs spend most of their time prior to the disaster?", multiple=True)), Field("child_activities_o12m_pre_disaster_other", label = T("Other activities of boys 13-17yrs before disaster")), Field("child_activities_u12f_post_disaster", "list:integer", label = T("Activities of girls <12yrs now"), requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts, zero=None, multiple=True)), represent = lambda opt, set=rat_child_activity_opts: \ rat_represent_multiple(set, opt), comment = rat_tooltip("How do girls <12yrs spend most of their time now?", multiple=True)), Field("child_activities_u12f_post_disaster_other", label = T("Other activities of girls<12yrs")), Field("child_activities_u12m_post_disaster", "list:integer", label = T("Activities of boys <12yrs now"), requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts, zero=None, multiple=True)), represent = lambda opt, set=rat_child_activity_opts: \ rat_represent_multiple(set, opt), comment = rat_tooltip("How do boys <12yrs spend most of their time now?", multiple=True)), Field("child_activities_u12m_post_disaster_other", label = T("Other activities of boys <12yrs")), Field("child_activities_o12f_post_disaster", "list:integer", label = T("Activities of girls 13-17yrs now"), requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts, zero=None, multiple=True)), represent = lambda opt, set=rat_child_activity_opts: \ rat_represent_multiple(set, opt), comment = rat_tooltip("How do girls 13-17yrs spend most of their time now?", multiple=True)), Field("child_activities_o12f_post_disaster_other", label = T("Other activities of girls 13-17yrs")), Field("child_activities_o12m_post_disaster", "list:integer", label = T("Activities of boys 13-17yrs now"), requires = IS_EMPTY_OR(IS_IN_SET(rat_child_activity_opts, zero=None, multiple=True)), represent = lambda opt, set=rat_child_activity_opts: \ rat_represent_multiple(set, opt), comment = rat_tooltip("How do boys 13-17yrs spend most of their time now?", multiple=True)), Field("child_activities_o12m_post_disaster_other", label = T("Other activities of boys 13-17yrs")), Field("coping_activities_elderly", "boolean", label = T("Older people participating in coping activities"), comment = rat_tooltip("Do older people in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)")), Field("coping_activities_women", "boolean", label = T("Women participating in coping activities"), comment = rat_tooltip("Do women in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)")), Field("coping_activities_disabled", "boolean", label = T("Disabled participating in coping activities"), comment = rat_tooltip("Do people with disabilities in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)")), Field("coping_activities_minorities", "boolean", label = T("Minorities participating in coping activities"), comment = rat_tooltip("Do minority members in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)")), Field("coping_activities_adolescent", "boolean", label = T("Adolescent participating in coping activities"), comment = rat_tooltip("Do adolescent and youth in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)")), Field("current_general_needs", "text", label = T("Current greatest needs of vulnerable groups"), comment = rat_tooltip("In general, what are the greatest needs of older people, people with disabilities, children, youth and women in your community?")), s3_comments(), *s3_meta_fields()) # CRUD strings crud_strings[tablename] = rat_section_crud_strings configure(tablename, deletable=False) # Sections as components of RAT add_components("assess_rat", assess_section2={"joinby": "assessment_id", "multiple": False, }, assess_section3={"joinby": "assessment_id", "multiple": False, }, assess_section4={"joinby": "assessment_id", "multiple": False, }, assess_section5={"joinby": "assessment_id", "multiple": False, }, assess_section6={"joinby": "assessment_id", "multiple": False, }, assess_section7={"joinby": "assessment_id", "multiple": False, }, assess_section8={"joinby": "assessment_id", "multiple": False, }, assess_section9={"joinby": "assessment_id", "multiple": False, }, ) # ----------------------------------------------------------------------------- def assess_rat_summary(r, **attr): """ Aggregate reports """ if r.name == "rat": if r.representation == "html": return dict() elif r.representation == "xls": return None else: # Other formats? raise HTTP(501, body=ERROR.BAD_FORMAT) else: raise HTTP(501, body=ERROR.BAD_METHOD) s3db.set_method("assess", "rat", method="summary", action=assess_rat_summary) # Pass variables back to global scope (response.s3.*) # ========================================================================= # UN Common Operational Datasets # ========================================================================= # Population Statistics tablename = "assess_population" define_table(tablename, location_id(widget = S3LocationAutocompleteWidget(), requires = IS_LOCATION()), Field("population", "integer"), Field("households", "integer"), Field("median_age", "double"), Field("average_family_size", "double"), Field("effective_date", "datetime"), s3_comments(), *(s3_timestamp() + s3_uid() + s3_deletion_status())) # CRUD strings crud_strings[tablename] = Storage( label_create = T("Add Population Statistic"), title_display = T("Population Statistic Details"), title_list = T("Population Statistics"), title_update = T("Edit Population Statistic"), label_list_button = T("List Population Statistics"), label_delete_button = T("Delete Population Statistic"), msg_record_created = T("Population Statistic added"), msg_record_modified = T("Population Statistic updated"), msg_record_deleted = T("Population Statistic deleted"), msg_list_empty = T("No Population Statistics currently registered"), name_nice = T("Population Statistic"), name_nice_plural = T("Population Statistics")) # Impact as component of incident reports #add_components("irs_ireport", impact_impact="ireport_id") # ========================================================================= def impact_tables(): """ Load the Impact tables as-needed """ sector_id = s3db.org_sector_id ireport_id = s3db.irs_ireport_id # Load the models we depend on if settings.has_module("assess"): assess_tables() assess_id = s3.assess_id module = "impact" # ------------------------------------------------------------------------- # Impact Type resourcename = "type" tablename = "%s_%s" % (module, resourcename) db.define_table(tablename, Field("name", length=128, notnull=True, unique=True), sector_id(), *s3_meta_fields()) # CRUD strings ADD_IMPACT_TYPE = T("Add Impact Type") s3.crud_strings[tablename] = Storage( label_create = ADD_IMPACT_TYPE, title_display = T("Impact Type Details"), title_list = T("Impact Types"), title_update = T("Edit Impact Type"), label_list_button = T("List Impact Types"), label_delete_button = T("Delete Impact Type"), msg_record_created = T("Impact Type added"), msg_record_modified = T("Impact Type updated"), msg_record_deleted = T("Impact Type deleted"), msg_list_empty = T("No Impact Types currently registered"), name_nice = T("Impact"), name_nice_plural = T("Impacts")) def impact_type_comment(): if auth.has_membership(auth.id_group("'Administrator'")): return S3AddResourceLink(c="assess", f="type", vars=dict(child="impact_type_id")) else: return None represent = S3Represent(tablename) impact_type_id = S3ReusableField("impact_type_id", "reference %s" % tablename, sortby="name", requires = IS_NULL_OR( IS_ONE_OF(db, "impact_type.id", represent, sort=True)), represent = represent, label = T("Impact Type"), comment = impact_type_comment(), ondelete = "RESTRICT") # ===================================================================== # Impact # Load model ireport_id = s3db.irs_ireport_id tablename = "assess_impact" define_table(tablename, ireport_id(readable=False, writable=False), assess_id(readable=False, writable=False), impact_type_id(), Field("value", "double"), Field("severity", "integer", requires = IS_EMPTY_OR(IS_IN_SET(assess_severity_opts)), widget=SQLFORM.widgets.radio.widget, represent = s3_assess_severity_represent, default = 0), s3_comments(), *s3_meta_fields()) # CRUD strings ADD_IMPACT = T("Add Impact") crud_strings[tablename] = Storage( label_create = ADD_IMPACT, title_display = T("Impact Details"), title_list = T("Impacts"), title_update = T("Edit Impact"), label_list_button = T("List Impacts"), label_delete_button = T("Delete Impact"), msg_record_created = T("Impact added"), msg_record_modified = T("Impact updated"), msg_record_deleted = T("Impact deleted"), msg_list_empty = T("No Impacts currently registered")) # ============================================================================= def index(): """ Module's Home Page """ module_name = settings.modules[module].name_nice response.title = module_name return dict(module_name=module_name) # ----------------------------------------------------------------------------- def create(): """ Redirect to assess/create """ redirect(URL(f="assess", args="create")) # ============================================================================= # UN Common Operational Datasets # ============================================================================= def population(): """ RESTful controller """ output = s3_rest_controller() return output # ============================================================================= # Rapid Assessments # ============================================================================= def rat(): """ Rapid Assessments, RESTful controller """ # Load Models assess_tables() tablename = "%s_%s" % (module, resourcename) table = db[tablename] # Villages only #table.location_id.requires = IS_NULL_OR(IS_ONE_OF(db(db.gis_location.level == "L5"), # "gis_location.id", # repr_select, sort=True)) # Subheadings in forms: configure("assess_section2", subheadings = { T("Population and number of households"): "population_total", T("Fatalities"): "dead_women", T("Casualties"): "injured_women", T("Missing Persons"): "missing_women", T("General information on demographics"): "household_head_elderly", T("Comments"): "comments"}) configure("assess_section3", subheadings = { T("Access to Shelter"): "houses_total", T("Water storage containers in households"): "water_containers_available", T("Other non-food items"): "cooking_equipment_available", T("Shelter/NFI Assistance"): "nfi_assistance_available", T("Comments"): "comments"}) configure("assess_section4", subheadings = { T("Water supply"): "water_source_pre_disaster_type", T("Water collection"): "water_coll_time", T("Places for defecation"): "defec_place_type", T("Environment"): "close_industry", T("Latrines"): "latrines_number", T("Comments"): "comments"}) configure("assess_section5", subheadings = { T("Health services status"): "health_services_pre_disaster", T("Current health problems"): "health_problems_adults", T("Nutrition problems"): "malnutrition_present_pre_disaster", T("Comments"): "comments"}) configure("assess_section6", subheadings = { T("Existing food stocks"): "food_stocks_main_dishes", T("food_sources") : "Food sources", T("Food assistance"): "food_assistance_available", T("Comments"): "comments"}) configure("assess_section7", subheadings = { "%s / %s" % (T("Sources of income"), T("Major expenses")): "income_sources_pre_disaster", T("Business Damaged"): "Access to cash", T("Current community priorities"): "rank_reconstruction_assistance", T("Comments"): "comments"}) configure("assess_section8", subheadings = { T("Access to education services"): "schools_total", T("Alternative places for studying"): "alternative_study_places_available", T("School activities"): "schools_open_pre_disaster", T("School attendance"): "children_0612_female", T("School assistance"): "school_assistance_available", T("Comments"): "comments"}) configure("assess_section9", subheadings = { T("Physical Safety"): "vulnerable_groups_safe_env", T("Separated children, caregiving arrangements"): "children_separated", T("Persons in institutions"): "children_in_disabled_homes", T("Activities of children"): "child_activities_u12f_pre_disaster", T("Coping Activities"): "coping_activities_elderly", T("Current general needs"): "current_general_needs", T("Comments"): "comments"}) # @ToDo Generalize this and make it available as a function that other # component prep methods can call to set the default for a join field. def prep(r): if r.interactive: # Pre-populate staff ID staff_id = auth.s3_logged_in_human_resource() if staff_id: r.table.staff_id.default = staff_id if r.method == "create": # If this assessment is being created as a component of a shelter, # it will have the shelter id in its vars. shelter_id = r.get_vars.get("rat.shelter_id", None) if shelter_id: try: shelter_id = int(shelter_id) except ValueError: pass else: r.table.shelter_id.default = shelter_id return True response.s3.prep = prep # Post-processor def postp(r, output): s3_action_buttons(r, deletable=False) # Redirect to update view to open tabs if r.representation == "html" and r.method == "create": r.next = r.url(method="", id=s3base.s3_get_last_record_id("assess_rat")) return output response.s3.postp = postp # Over-ride the listadd since we're not a component here configure(tablename, create_next="", listadd=True) tabs = [(T("Identification"), None), (T("Demographic"), "section2"), (T("Shelter & Essential NFIs"), "section3"), (T("WatSan"), "section4"), (T("Health"), "section5"), (T("Nutrition"), "section6"), (T("Livelihood"), "section7"), (T("Education"), "section8"), (T("Protection"), "section9") ] rheader = lambda r: rat_rheader(r, tabs) output = s3_rest_controller(rheader=rheader, s3ocr_config={"tabs": tabs}) response.s3.stylesheets.append( "S3/rat.css" ) return output # ----------------------------------------------------------------------------- def rat_rheader(r, tabs=[]): """ Resource Headers """ if r.representation == "html": if r.name == "rat": report = r.record if report: htable = db.hrm_human_resource rheader_tabs = s3_rheader_tabs(r, tabs, paging=True) location = report.location_id if location: location = r.table.location_id.represent(location) staff = report.staff_id if staff: organisation_represent = htable.organisation_id.represent query = (htable.id == staff) organisation_id = db(query).select(htable.organisation_id, limitby=(0, 1)).first().organisation_id organisation = organisation_represent(organisation_id) else: organisation = None staff = report.staff2_id if staff: query = (htable.id == staff) organisation2_id = db(query).select(htable.organisation_id, limitby=(0, 1)).first().organisation_id if organisation2_id == organisation_id: organisation2 = None else: organisation2 = organisation_represent(organisation_id) else: organisation2 = None if organisation2: orgs = "%s, %s" % (organisation, organisation2) else: orgs = organisation rheader = DIV(TABLE( TR( TH("%s: " % T("Location")), location, TH("%s: " % T("Date")), report.date ), TR( TH("%s: " % T("Organizations")), orgs, ) ), rheader_tabs) return rheader return None # ============================================================================= # Flexible Impact Assessments # ============================================================================= def assess_rheader(r, tabs=[]): """ Resource Headers for Flexible Impact Assessments """ if r.representation == "html": rheader_tabs = s3_rheader_tabs(r, tabs) assess = r.record if assess: table = db.assess_assess rheader = DIV(TABLE(TR( TH("%s: " % T("Date & Time")), table.datetime.represent(assess.datetime), TH("%s: " % T("Location")), table.location_id.represent(assess.location_id), TH("%s: " % T("Assessor")), table.assessor_person_id.represent(assess.assessor_person_id), ), ), rheader_tabs ) return rheader return None # ----------------------------------------------------------------------------- def assess(): """ RESTful CRUD controller """ # Load Models assess_tables() impact_tables() tablename = "%s_%s" % (module, resourcename) table = db[tablename] # Pre-processor def prep(r): if session.s3.mobile and r.method == "create" and r.interactive: # redirect to mobile-specific form: redirect(URL(f="assess_short_mobile")) return True response.s3.prep = prep #table.incident_id.comment = DIV(_class="tooltip", # _title="%s|%s" % (T("Incident"), # T("Optional link to an Incident which this Assessment was triggered by."))) tabs = [ (T("Edit Details"), None), (T("Baselines"), "baseline"), (T("Impacts"), "impact"), (T("Summary"), "summary"), #(T("Requested"), "ritem"), ] rheader = lambda r: assess_rheader(r, tabs) return s3_rest_controller(rheader=rheader) # ----------------------------------------------------------------------------- def impact_type(): """ RESTful CRUD controller """ # Load Models impact_tables() module = "impact" resourcename = "type" return s3_rest_controller(module, resourcename) # ----------------------------------------------------------------------------- def baseline_type(): """ RESTful CRUD controller """ # Load Models assess_tables() return s3_rest_controller() # ----------------------------------------------------------------------------- def baseline(): """ RESTful CRUD controller """ # Load Models assess_tables() return s3_rest_controller() # ----------------------------------------------------------------------------- def summary(): """ RESTful CRUD controller """ # Load Models assess_tables() return s3_rest_controller() # ============================================================================= def basic_assess(): """ Custom page to hide the complexity of the Assessments/Impacts/Summary model: PC Browser version """ if not auth.is_logged_in(): session.error = T("Need to be logged-in to be able to submit assessments") redirect(URL(c="default", f="user", args=["login"])) # Load Models assess_tables() impact_tables() # See if we've been created from an Incident ireport_id = request.vars.get("ireport_id") if ireport_id: # Location is the same as the calling Incident table = db.irs_ireport row = db(table.id == ireport_id).select(table.location_id, limitby=(0, 1)).first() if row: irs_location_id = row.location_id location = table.location_id.represent(irs_location_id) else: irs_location_id = None location = None custom_assess_fields = ( ("impact", 1), ("impact", 2), ("impact", 3), ("impact", 4), ("impact", 5), ("impact", 6), ("impact", 7), ("assess", "comments"), ) form, form_accepted, assess_id = custom_assess(custom_assess_fields, location_id=irs_location_id) else: location = None custom_assess_fields = ( ("assess", "location_id", "selector"), ("impact", 1), ("impact", 2), ("impact", 3), ("impact", 4), ("impact", 5), ("impact", 6), ("impact", 7), ("assess", "comments"), ) form, form_accepted, assess_id = custom_assess(custom_assess_fields) if form_accepted: session.confirmation = T("Basic Assessment Reported") redirect(URL(f="assess", args=[assess_id, "impact"])) return dict(title = T("Basic Assessment"), location = location, form = form) # ----------------------------------------------------------------------------- def mobile_basic_assess(): """ Custom page to hide the complexity of the Assessments/Impacts/Summary model: Mobile device version """ if not auth.is_logged_in(): redirect(URL(c="default", f="index")) # Load Models assess_tables() impact_tables() custom_assess_fields = ( ("assess", "location_id", "auto"), ("impact", 1), ("impact", 2), ("impact", 3), ("impact", 4), ("impact", 5), ("impact", 6), ("impact", 7), ("assess", "comments"), ) form, form_accepted, assess_id = custom_assess(custom_assess_fields) if form_accepted: form = FORM(H1(settings.get_system_name_short()), H2(T("Short Assessment")), P(T("Assessment Reported")), A(T("Report Another Assessment..."), _href = URL(r=request) ), _class = "mobile", ) return dict(form = form) # ----------------------------------------------------------------------------- def color_code_severity_widget(widget, name): """ Utility function to colour-code Severity options """ for option, color in zip(widget, ["green", "yellow", "orange", "red"]): option[0].__setitem__("_style", "background-color:%s;" % color) option[0][0].__setitem__("_name", name) return widget # ----------------------------------------------------------------------------- def custom_assess(custom_assess_fields, location_id=None): """ Build a custom page to hide the complexity of the Assessments/Impacts/Summary model @ToDo: Improved validation - the existing .double JS isn't 100% reliable & this currently crashes the back-end upon submission if bad data slips through """ # Load Models assess_tables() impact_tables() form_rows = [] comment = "" for field in custom_assess_fields: name = "custom_%s_%s" % (field[0], field[1]) if field[0] == "assess": if field[1] == "comments": label = "%s:" % db.assess_assess[ field[1] ].label #widget = db.assess_assess[ field[1] ].widget widget = TEXTAREA(_name = name, _class = "double", _type = "text") elif field[1] == "location_id": if field[2] == "auto": # HTML5 Geolocate label = "%s:" % T("Location") #widget = db.assess_assess[ field[1] ].widget widget = DIV(INPUT(_name = name, _type = "text"), INPUT(_name = "gis_location_lat", _id = "gis_location_lat", _type = "text"), INPUT(_name = "gis_location_lon", _id = "gis_location_lon", _type = "text")) else: # Location Selector label = "%s:" % T("Location") #widget = SELECT(_id = name, # _class = "reference gis_location", # _name = "location_id") #response.s3.gis.location_id = "custom_assess_location_id" widget = db.assess_assess.location_id.widget(field=db.assess_assess.location_id, value="") elif field[0] == "baseline": label = S3Represent(lookup="assess_baseline_type")(field[1]) label = "%s:" % T(label) widget = INPUT(_name = name, _class = "double", _type = "text") elif field[0] == "impact": label = S3Represent(lookup="assess_impact_type")(field[1]) label = "%s:" % T(label) value_widget = INPUT(_name = name, _class = "double", _type = "text") severity_widget = db.assess_summary.value.widget(db.impact_impact.severity, 0, _name = "%s_severity" % name ) severity_widget = color_code_severity_widget(severity_widget, "%s_severity" % name) widget = DIV(value_widget, DIV("%s:" % T("Severity")), severity_widget, XML("&nbsp")) elif field[0] == "summary": label = "%s:" % T(org_subsector_represent(field[1])) widget = db.assess_summary.value.widget(db.assess_summary.value, 0, _name = name) widget = color_code_severity_widget(widget) # Add the field components to the form_rows if field[0] == "title": form_rows.append(TR(H3( field[1] ))) else: form_rows = form_rows + list(s3_formstyle("%s__row" % name, label, widget, comment)) form = FORM(TABLE(*form_rows), INPUT(_value = T("Save"), _type = "submit")) assess_id = None form_accepted = form.accepts(request.vars, session) if form_accepted: record_dict = {"organisation_id" : session.s3.organisation_id} for field in custom_assess_fields: if field[0] != "assess" or field[1] == "location": continue name = "custom__assess_%s" % field[1] if name in request.vars: record_dict[field[1]] = request.vars[name] # Add Location (must happen first) if "custom_assess_location_id" in request.vars: # Auto location_dict = {} if "gis_location_lat" in request.vars: location_dict["lat"] = request.vars["gis_location_lat"] if "gis_location_lon" in request.vars: location_dict["lon"] = request.vars["gis_location_lon"] location_dict["name"] = request.vars["custom_assess_location_id"] record_dict["location_id"] = s3db.gis_location.insert(**location_dict) if "location_id" in request.vars: # Location Selector record_dict["location_id"] = request.vars["location_id"] if location_id: # Location_id was passed to function record_dict["location_id"] = location_id # Add Assessment assess_id = db.assess_assess.insert(**record_dict) fk_dict = dict(baseline = "baseline_type_id", impact = "impact_type_id", summary = "subsector_id" ) component_dict = dict(baseline = "assess_baseline", impact = "impact_impact", summary = "assess_summary" ) # Add Assessment Components sector_summary = {} for field in custom_assess_fields: if field[0] == "assess": continue record_dict = {} name = "custom_%s_%s" % (field[0], field[1]) if name in request.vars: record_dict["assess_id"] = assess_id record_dict[fk_dict[ field[0] ] ] = field[1] record_dict["value"] = request.vars[name] if field[0] == "impact": severity = int(request.vars[name + "_severity"]) record_dict["severity"] = severity if not record_dict["value"] and not record_dict["severity"]: # Do not record impact if there is no data for it. # Should we still average severity though? Now not doing this continue # Record the Severity per sector table = db.impact_type row = db(table.id == field[1]).select(table.sector_id, limitby=(0, 1) ).first() sector_id = row.sector_id if sector_id in sector_summary.keys(): sector_summary[sector_id].append(severity) elif sector_id: sector_summary[sector_id] = [severity] db[component_dict[ field[0] ] ].insert(**record_dict) # Add Cluster summaries # @ToDo: make sure that this doesn't happen if there are sectors in the assess for sector_id in sector_summary.keys(): severity_values = sector_summary[sector_id] db.assess_summary.insert(assess_id = assess_id, sector_id = sector_id, # Average severity value = sum(severity_values) / len(severity_values) ) # Send Out Notification SMS #message = "Sahana: " + T("New Assessment reported from") + " %s by %s %s" % ( location_dict["name"], # session.auth.user.first_name, # session.auth.user.last_name # ) # Hard coded notification message for Demo #msg.send_by_pe_id(3, # message=message, # contact_method = 2) return form, form_accepted, assess_id # ============================================================================= def type(): """ RESTful CRUD controller """ return s3_rest_controller("impact", "type") # ============================================================================= def impact(): """ RESTful CRUD controller """ return s3_rest_controller("impact", "impact") # END =========================================================================
<gh_stars>10-100 import React from "react"; const Contact = () => ( <div> <h1>Contact Page</h1> <p> Integer cursus bibendum sem non pretium. Vestibulum in aliquet sem, quis molestie urna. Aliquam semper ultrices varius. Aliquam faucibus sit amet magna a ultrices. Aenean pellentesque placerat lacus imperdiet efficitur. In felis nisl, luctus non ante euismod, tincidunt bibendum mi. In a molestie nisl, eu sodales diam. Nam tincidunt lacus quis magna posuere, eget tristique dui dapibus. Maecenas fermentum elementum faucibus. Quisque nec metus vestibulum, egestas massa eu, sollicitudin ipsum. Nulla facilisi. Sed ut erat ligula. Nam tincidunt nunc in nibh dictum ullamcorper. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Etiam ornare rutrum felis at rhoncus. Etiam vel condimentum magna, quis tempor nulla. </p> </div> ); export default Contact;
#!/bin/bash ## Core utils apt-get install -y vim tar nano net-tools telnet curl wget telnet ## Classic tools apt-get install -y lynx ## Workflow tools apt-get install -y tree ## Image utils apt-get install -y ImageMagick ## Forensics and penetration tools apt-get install -y hydra john nmap aircrack-ng # sqlmap git clone https://github.com/sqlmapproject/sqlmap.git /opt/sqlmap ## Privacy tools apt-get install -y tor gnupg ## proxychains apt-get install -y proxychains ( sed -Ei 's/strict_chain/random_chain/g' /etc/proxychains.conf echo "socks4 58.20.0.246 1080" >> /etc/proxychains.conf echo "socks5 109.251.162.7 1080" >> /etc/proxychains.conf echo "socks4 125.71.88.121 1080" >> /etc/proxychains.conf echo "socks4 14.156.144.115 1080" >> /etc/proxychains.conf echo "socks4 181.15.217.116 1080" >> /etc/proxychains.conf ) # torsocks-2.0.0 ( rm `which torsocks` cd /tmp wget -O torsocks-2.0.0.tar.gz https://github.com/dgoulet/torsocks/archive/v2.0.0.tar.gz tar -xzf torsocks-2.0.0.tar.gz ( cd torsocks-2.0.0 ./autogen.sh && ./configure && make && make install ) )
"use strict"; const base64url = require('rfc4648').base64url; const HeaderMissingException = require('./exception/headermissingexception'); const InvalidMessageException = require('./exception/invalidmessageexception'); const queryString = require('query-string'); const SealingPublicKey = require('./cryptographykeys/sealingpublickey'); const SealingSecretKey = require('./cryptographykeys/sealingsecretkey'); const SharedAuthenticationKey = require('./cryptographykeys/sharedauthenticationkey'); const SharedEncryptionKey = require('./cryptographykeys/sharedencryptionkey'); const SigningPublicKey = require('./cryptographykeys/signingpublickey'); const SigningSecretKey = require('./cryptographykeys/signingsecretkey'); const Simple = require('./simple'); const { SodiumPlus } = require('sodium-plus'); const arrayToBuffer = require('typedarray-to-buffer'); const HEADER_AUTH_NAME = 'Body-HMAC-SHA512256'; const HEADER_SIGNATURE_NAME = 'Body-Signature-Ed25519'; let sodium; module.exports = class SapientCore { /** * @param {Object} request * @param {SharedAuthenticationKey} key */ static async authenticateRequestWithSharedKey(request, key) { if (!sodium) sodium = await SodiumPlus.auto(); let request2 = Object.assign({}, request); if (typeof request2.headers === 'undefined') { request2.headers = {}; } let mac = await sodium.crypto_auth( await SapientCore.serializeBody(request), key ); request2.headers[HEADER_AUTH_NAME] = base64url.stringify(mac); request2.resolveWithFullResponse = true; return request2; } /** * * @param {Object} request * @param {SharedEncryptionKey} key * @return {Object} */ static async decryptRequestWithSharedKey(request, key) { let request2 = Object.assign({}, request); request2.body = await Simple.decrypt( base64url.parse(request.body), key ); request2.resolveWithFullResponse = true; return request2; } /** * * @param {Object} request * @param {SharedEncryptionKey} key * @return {Object} */ static async encryptRequestWithSharedKey(request, key) { let request2 = Object.assign({}, request); request2.body = base64url.stringify( await Simple.encrypt( await SapientCore.serializeBody(request), key ) ); request2.resolveWithFullResponse = true; return request2; } /** * @param {Object} request * @param {SealingPublicKey} pk * @return {Object} */ static async sealRequest(request, pk) { let request2 = Object.assign({}, request); request2.body = base64url.stringify( await Simple.seal( await SapientCore.serializeBody(request), pk ) ); request2.resolveWithFullResponse = true; return request2; } /** * @param {Object} request * @param {SealingSecretKey} sk * @return {Object} */ static async unsealRequest(request, sk) { let request2 = Object.assign({}, request); request2.body = await Simple.unseal( arrayToBuffer(base64url.parse(request.body)), sk ); request2.resolveWithFullResponse = true; return request2; } /** * * @param {Object} request * @param {SigningSecretKey} sk * @return {Object} */ static async signRequest(request, sk) { if (!sodium) sodium = await SodiumPlus.auto(); let request2 = Object.assign({}, request); if (typeof (request2.headers) === 'undefined') { request2.headers = {}; } request2.headers[HEADER_SIGNATURE_NAME] = base64url.stringify( await sodium.crypto_sign_detached( await SapientCore.serializeBody(request), sk ) ); request2.resolveWithFullResponse = true; return request2; } /** * Verifies the signature contained in the Body-Signature-Ed25519 header * is valid for the HTTP Request body provided. Will either return the * request given, or throw an InvalidMessageException if the signature * is invalid. Will also throw a HeaderMissingException is there is no * Body-Signature-Ed25519 header. * * @param {Object} request * @param {SigningPublicKey} pk * @return {Object} */ static async verifySignedRequest(request, pk) { if (!sodium) sodium = await SodiumPlus.auto(); let request2 = Object.assign({}, request); if (typeof (request2.headers) === 'undefined') { throw new HeaderMissingException('No headers to verify'); } let valid = false; let body = await SapientCore.serializeBody(request2); for (let h in request2.headers) { if (request2.headers.hasOwnProperty(h)) { if (h.toLowerCase() === HEADER_SIGNATURE_NAME.toLowerCase()) { // This header, when cast to lowercase, matches. if (typeof(request2.headers[h]) === 'string') { // Single header check valid = valid || await sodium.crypto_sign_verify_detached( body, pk, base64url.parse(request2.headers[h]) ); } else { // Multiple header checks for (let head of request2.headers[h]) { valid = valid || await sodium.crypto_sign_verify_detached( body, pk, base64url.parse(head) ); if (valid) { break; } } } if (valid) { break; } } } } if (!valid) { throw new InvalidMessageException(`No valid ${HEADER_SIGNATURE_NAME} header found`); } request2.body = body; request2.resolveWithFullResponse = true; return request2; } /** * @param {Object} request * @param {SharedAuthenticationKey} key * @return {Object} */ static async verifySymmetricAuthenticatedRequest(request, key) { if (!sodium) sodium = await SodiumPlus.auto(); let request2 = Object.assign({}, request); if (typeof (request2.headers) === 'undefined') { throw new HeaderMissingException('No headers to verify'); } let valid = false; let body = await SapientCore.serializeBody(request); for (let h in request2.headers) { if (request2.headers.hasOwnProperty(h)) { if (h.toLowerCase() === HEADER_AUTH_NAME.toLowerCase()) { // This header, when cast to lowercase, matches. if (typeof(request2.headers[h]) === 'string') { // Single header check valid = valid || await sodium.crypto_auth_verify( body, key, base64url.parse(request2.headers[h]) ); } else { // Multiple header checks for (let head of request2.headers[h]) { valid = valid || await sodium.crypto_auth_verify( body, key, base64url.parse(head) ); if (valid) { break; } } } if (valid) { break; } } } } if (!valid) { throw new InvalidMessageException(`No valid ${HEADER_SIGNATURE_NAME} header found`); } request2.body = body; request2.resolveWithFullResponse = true; return request2; } /** * * @param {Object} obj * @param {string|null} prefix * @return {string} * @link https://stackoverflow.com/a/1714899 */ static objectToRequestParams(obj, prefix = null) { let str = []; for (let p in obj) { if (obj.hasOwnProperty(p)) { let k = prefix ? prefix + "[" + p + "]" : p, v = obj[p]; str.push((v !== null && typeof v === "object") ? SapientCore.objectToRequestParams(v, k) : encodeURIComponent(k) + "=" + encodeURIComponent(v)); } } return str.join("&"); } /** * * @param {Object} httpMessage * @return {Promise<string>} */ static async serializeBody(httpMessage) { if (typeof httpMessage.json !== 'undefined') { if (httpMessage.json) { return JSON.stringify(httpMessage.body); } } if (typeof httpMessage.body === 'string') { if (httpMessage.body) { return httpMessage.body; } } if (typeof httpMessage.form !== 'undefined') { return SapientCore.objectToRequestParams(httpMessage.form); } return ''; } /** * @param {Object} response * @param {SharedAuthenticationKey} key * @return {Object} */ static async authenticateResponseWithSharedKey(response, key) { return await SapientCore.authenticateRequestWithSharedKey(response, key); } /** * @param {Object} response * @param {SharedEncryptionKey} key * @return {Object} */ static async decryptResponseWithSharedKey(response, key) { return await SapientCore.decryptRequestWithSharedKey(response, key); } /** * @param {Object} response * @param {SharedEncryptionKey} key * @return {Object} */ static async encryptResponseWithSharedKey(response, key) { return await SapientCore.encryptRequestWithSharedKey(response, key); } /** * * @param {Object} response * @param {SealingPublicKey} pk * @return {Object} */ static async sealResponse(response, pk) { return await SapientCore.sealRequest(response, pk); } /** * * @param {Object} response * @param {SealingSecretKey} sk * @return {Object} */ static async unsealResponse(response, sk) { return await SapientCore.unsealRequest(response, sk); } /** * @param {Object} response * @param {SigningSecretKey} sk * @return {Object} */ static async signResponse(response, sk) { return await SapientCore.signRequest(response, sk); } /** * @param {Object} response * @param {SigningPublicKey} pk * @return {Object} */ static async verifySignedResponse(response, pk) { return await SapientCore.verifySignedRequest(response, pk); } /** * @param {Object} response * @param {SharedAuthenticationKey} key * @return {Object} */ static async verifySymmetricAuthenticatedResponse(response, key) { return await SapientCore.verifySymmetricAuthenticatedRequest(response, key); } };
cd ./src/cashweb/keyserver bash ./generate_protobufs.sh cd ../relay bash ./generate_protobufs.sh cd ../bip70 bash ./generate_protobufs.sh cd ../auth_wrapper bash ./generate_protobufs.sh
DL_ENV=${1:"pytorch"} NOTEBOOK_PORT=${2:-8888} VISDOM_PORT=${3:-8097} PYTORCH_IMAGE=pytorch:0.4.1-py3-gpu if [ ${DL_ENV}=="pytorch" ]; then if [[ ! $(docker images -q ${PYTORCH_IMAGE}) ]]; then docker build . -t ${PYTORCH_IMAGE} -f ./docker/Dockerfile.pytorch fi # this should run a pytorch notebook container docker run --runtime=nvidia --shm-size 8G -v `pwd`:/workspace -p ${NOTEBOOK_PORT}:8888 -p ${VISDOM_PORT}:8097 --name pytorch_notebook ${PYTORCH_IMAGE} docker exec pytorch_notebook jupyter notebook list else exit 1 fi
#!/bin/bash #SBATCH --time=3:00:00 #SBATCH --mem-per-cpu=32000 #SBATCH --cpus-per-task=1 #SBATCH --mail-user=__EMAIL__ #SBATCH --mail-type=__EMAIL_TYPE__ #SBATCH --error=__ERR_LOG__ #SBATCH --output=__OUT_LOG__ #SBATCH --workdir=__WORK_DIR__ # All job recive a different number, from 1 to 18, that number is # stored in $SLURM_ARRAY_TASK_ID module load hisat2/2.1.0 inputFiles=../data/_all_fq.txt ###strTie_Assembly_list="../results/assembly_GTF_list.txt" ###strTie_merged="../results/stringtie_merged.gtf" # Here I pick line number ${SLURM_ARRAY_TASK_ID} f1=$(sed -n "${SLURM_ARRAY_TASK_ID}p" $inputFiles) # and run the rest of the script f2=${f1/_1.fq.gz/_2.fq.gz} gunzip -k $f1 gunzip -k $f2 read1=${f1/fq.gz/fq} read2=${f2/fq.gz/fq} chmod 750 $read1 chmod 750 $read2 lable=${read2/_2.fq/} out=${read2/_2.fq/_hisat2.sam} out=${out/data/results} outsplice=${out/_hisat2.sam/_hisat2_spliced.sam} log=${out/.sam/.log} log=${log/results/results\/_logs} metfile=${out/.sam/_metric.txt} summary=${metfile/_metric/_summary} time hisat2 --dta -p 8 --trim3 40 --skip 10 --no-mixed --novel-splicesite-outfile $outsplice --no-discordant --downstream-transcriptome-assembly --new-summary --summary-file=$summary --met-file $metfile -x $__GENOME_DIR__/Mus_musculus.GRCm38/Mus_musculus.GRCm38 -1 $read1 -2 $read2 -S $out &> $log rm $read1 $read2 echo "hisat2 "$label >> ../results/_RNA-Seq_checkpoint.txt chmod 750 $outsplice $out $metfile
#!/bin/sh # CYBERWATCH SAS - 2017 # # Security fix for USN-2966-1 # # Security announcement date: 2016-05-09 00:00:00 UTC # Script generation date: 2017-01-01 21:05:24 UTC # # Operating System: Ubuntu 15.10 # Architecture: i686 # # Vulnerable packages fix on version: # - openssh-server:1:6.9p1-2ubuntu0.2 # # Last versions recommanded by security team: # - openssh-server:1:6.9p1-2ubuntu0.2 # # CVE List: # - CVE-2015-8325 # - CVE-2016-1907 # - CVE-2016-1908 # - CVE-2016-3115 # # More details: # - https://www.cyberwatch.fr/vulnerabilites # # Licence: Released under The MIT License (MIT), See LICENSE FILE sudo apt-get install --only-upgrade openssh-server=1:6.9p1-2ubuntu0.2 -y
import { request } from '../http/client' const dietQuery = ` query { dietLogs{ id date fat weight protein carbs calories } } ` export const getDietLogs = async () => { let response = await request({ query: dietQuery }) return response.data.dietLogs.map(fromApi) } function fromApi(item) { return { ...item, // The server time is UTC, we want to sidestep timezone issues date: new Date(item.date.replace(/-/g, '/').replace(/T.+/, '')) } }
import React, { FC } from 'react' import enUS from './en-us' import { FormattedMessage, MessageDescriptor, useIntl } from 'react-intl' type Id = keyof typeof enUS; interface Props extends MessageDescriptor { id: Id; } export const LocaleFormatter: FC<Props> = ({ ...props }) => { const notChildProps = { ...props, children: undefined } return <FormattedMessage {...notChildProps} id={props.id} /> } type FormatMessageProps = (descriptor: Props) => string; export const useLocale = () => { const { formatMessage: _formatMessage, ...rest } = useIntl() const formatMessage: FormatMessageProps = _formatMessage return { ...rest, formatMessage } }
function convertTimestamp(timestamp) { let date = new Date(timestamp); let year = date.getFullYear(); let month = date.getMonth() + 1; let day = date.getDate(); let hour = date.getHours(); let min = date.getMinutes(); let sec = date.getSeconds(); return `${year}/${month}/${day} ${hour}:${min}:${sec}`; } let timestamp = 1534421521500; let date = convertTimestamp(timestamp); console.log(date); // Outputs: 2018/8/18 12:25:21
#!/bin/bash curl -sc /tmp/cookie "https://drive.google.com/uc?export=download&id=1riAjV_A-jD-r5tdrJQpc0WCc1kOUExRZ" > /dev/null CODE="$(awk '/_warning_/ {print $NF}' /tmp/cookie)" curl -Lb /tmp/cookie "https://drive.google.com/uc?export=download&confirm=${CODE}&id=1riAjV_A-jD-r5tdrJQpc0WCc1kOUExRZ" -o resources.tar.gz tar -zxvf resources.tar.gz rm resources.tar.gz echo Download finished.
from django import forms from crispy_forms.helper import FormHelper class DelUserForm(forms.Form): def __init__(self, *args, **kwargs): super(DelUserForm, self).__init__(*args, **kwargs) self.helper = FormHelper(self) self.helper.form_tag = False def delete_user(self, user_id): # Assume User model exists and can be imported from myapp.models import User try: user = User.objects.get(id=user_id) user.delete() return True # User deleted successfully except User.DoesNotExist: return False # User with given ID does not exist
# run the script with the command: # sh 22.hicexplorer_mergeStats.sh xenome filtered_hicExplorer.txt PDXtype=$1 paste *${PDXtype}*.txt | awk '{print $1,$7,$8,$2,$3,$4,$5}' OFS="\t" > $2 rm *${PDXtype}*.txt
<filename>cthulhator/src/main/java/com/bustiblelemons/cthulhator/character/viewer/logic/HeightSizeListener.java package com.bustiblelemons.cthulhator.character.viewer.logic; /** * Created by hiv on 08.02.15. */ public interface HeightSizeListener { void onHeightSizeReported(Object reporter, int height); }
# Count words in a text tr -sc 'A-Za-z' '\n' < ../data/nyt_200811.txt | sort | uniq -c | head -n 10 # Extended counting exercises # Downcase everything tr -sc 'A-Za-z' '\n' < ../data/nyt_200811.txt | tr 'A-Z' 'a-z' | sort | uniq -c | head -n 10 tr -sc 'A-Za-z' '\n' < ../data/nyt_200811.txt | tr '[:upper:]' '[:lower:]' | sort | uniq -c | head -n 10 # Common different sequences of vowels tr -sc 'A-Za-z' '\n' < ../data/nyt_200811.txt | tr '[:upper:]' '[:lower:]' | tr -sc 'aeiou' '\n' | sort | uniq -c | head -n 10 # Counting and sorting exercises tr -sc 'A-Za-z' '\n' < ../data/nyt_200811.txt | tr '[:upper:]' '[:lower:]' | sort | uniq -c | sort -nr | head -n 50 # Find the words in the NYT that end in "zz" tr -sc 'A-Za-z' '\n' < ../data/nyt_200811.txt | tr '[:upper:]' '[:lower:]' | rev | sort | uniq -c | rev | tail -n 10 tr -sc 'A-Za-z' '\n' < ../data/nyt_200811.txt | tr '[:upper:]' '[:lower:]' | sort | uniq -c | grep 'zz$' # Bigrams tr -sc 'A-Za-z' '\n' < ../data/nyt_200811.txt > ../data/nyt.words tail -n +2 nyt.words > ../data/nyt.nextwords paste nyt.words nyt.nextwords > ../data/nyt.bigrams head -n 5 ../data/nyt.bigrams # Top 10 most common bigrams tr '[:upper:]' '[:lower:]' < ../data/nyt.bigrams | sort | uniq -c | sort -nr | head -n 10 # Trigrams tail -n +3 ../data/nyt.words > ../data/nyt.nextnextwords paste ../data/nyt.words ../data/nyt.nextwords ../data/nyt.nextnextwords > ../data/nyt.trigrams cat ../data/nyt.trigrams | tr '[:upper:]' '[:lower:]' | sort | uniq -c | sort -rn | head -n 10 # grep and wc # how many all uppercase words are there in this NYT file? egrep '^[A-Z]+$' data/nyt.words | wc -l # how many four letter words? egrep '^\w{4}$' data/nyt.words | wc -l # how many different words are there with no vowels egrep -v '[AEIOUaeiou]' data/nyt.words | sort | uniq | wc -l # how many one syllable words are there? tr '[:upper:]' '[:lower:]' < data/nyt.words | egrep '^[^aeiouAEIOU]*[aeiouAEIOU]+[^aeiouAEIOU]*$' data/nyt.words | uniq | wc -l # sed exercises # Count frequency of word initial consonant sequences tr '[:upper:]' '[:lower:]' < data/nyt.words | sed 's/[aeiouAEIOU].*$//' data/nyt.words | sort | uniq -c # Count word final consonant sequences tr '[:upper:]' '[:lower:]' < data/nyt.words | sed 's/^.*[aeiou]//g' data/nyt.words | sort | uniq -c | sort -rn | less
<reponame>nileshaggarwal/Resume-Builder import React, { Component } from "react"; import Profile from "./Profile"; import Education from "./Education"; import Projects from "./Projects"; import Experience from "./Experience"; import Extras from "./Extras"; import { isAutheticated } from "../Pages/helper/authhelper"; export class Resume extends Component { state = { step: 1, // Personal Profile Details... firstname: "", lastname: "", email: "", phone: "", github: "", linkedin: "", facebook: "", instagram: "", // Education Information college: "", fromyear1: "", toyear1: "", qualification1: "", description1: "", school: "", fromyear2: "", toyear2: "", qualification2: "", description2: "", // Project Information... title1: "", link1: "", projectDescription1: "", title2: "", link2: "", projectDescription2: "", title3: "", link3: "", projectDescription3: "", // Experience Information institute1: "", position1: "", duration1: "", experienceDescription1: "", institute2: "", position2: "", duration2: "", experienceDescription2: "", // Extra Information skill1: "", skill2: "", skill3: "", skill4: "", skill5: "", skill6: "", interest1: "", interest2: "", interest3: "", interest4: "", interest5: "", interest6: "", }; nextStep = () => { const { step } = this.state; this.setState({ step: step + 1, }); }; prevStep = () => { const { step } = this.state; this.setState({ step: step - 1, }); }; handleChange = ({ target: { value, name } }) => { this.setState({ [name]: value }); }; render() { let { user } = isAutheticated(); const { step } = this.state; const { // Profile-Information firstname, lastname, email, phone, website, github, linkedin, twitter, facebook, instagram, // Education Information college, fromyear1, toyear1, qualification1, description1, school, fromyear2, toyear2, qualification2, description2, // Project Information... title1, link1, projectDescription1, title2, link2, projectDescription2, title3, link3, projectDescription3, // Experience Information institute1, position1, duration1, experienceDescription1, institute2, position2, duration2, experienceDescription2, // Extra Information skill1, skill2, skill3, skill4, skill5, skill6, interest1, interest2, interest3, interest4, interest5, interest6, } = this.state; const values = { // Profile-Information firstname, lastname, email, phone, website, github, linkedin, twitter, facebook, instagram, // Education Information college, fromyear1, toyear1, qualification1, description1, school, fromyear2, toyear2, qualification2, description2, // Project Information... title1, link1, projectDescription1, title2, link2, projectDescription2, title3, link3, projectDescription3, // Experience Information institute1, position1, duration1, experienceDescription1, institute2, position2, duration2, experienceDescription2, // Extra Information skill1, skill2, skill3, skill4, skill5, skill6, interest1, interest2, interest3, interest4, interest5, interest6, }; switch (step) { case 1: return ( <div className="App mt-3"> <div className="container col-lg-10 mx-auto text-center"> {isAutheticated() && ( <p style={{ fontSize: "30px", fontWeight: "700" }}> Hi {user.name} </p> )} <Profile nextStep={this.nextStep} handleChange={this.handleChange} values={values} /> </div> </div> ); case 2: return ( <div className="App mt-3"> <div className="container col-lg-10 mx-auto text-center"> <Education nextStep={this.nextStep} prevStep={this.prevStep} handleChange={this.handleChange} values={values} /> </div> </div> ); case 3: return ( <div className="App mt-3"> <div className="container col-lg-8 mx-auto text-center"> <Projects nextStep={this.nextStep} prevStep={this.prevStep} handleChange={this.handleChange} values={values} /> </div> </div> ); case 4: return ( <div className="App mt-3"> <div className="container col-lg-10 mx-auto text-center"> <Experience nextStep={this.nextStep} prevStep={this.prevStep} handleChange={this.handleChange} values={values} /> </div> </div> ); case 5: return ( <div className="App mt-3"> <div className="container col-lg-10 mx-auto text-center"> <Extras prevStep={this.prevStep} handleChange={this.handleChange} values={values} /> </div> </div> ); default: return <div />; } } } export default Resume;
<filename>src/main/java/me/fabsi23/worldrollback/utils/Logging.java<gh_stars>0 package me.fabsi23.worldrollback.utils; import org.bukkit.Bukkit; import me.fabsi23.worldrollback.config.WorldRollbackConfig; public class Logging { public static void logWarning(String message) { Bukkit.getLogger().warning(WorldRollbackConfig.getConsolePrefix() + " " + message); } public static void logInfo(String message) { Bukkit.getLogger().info(WorldRollbackConfig.getConsolePrefix() + " " + message); } }
#! /usr/bin/env bash set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" PROJECTS=( batect-sample-cypress batect-sample-golang batect-sample-java batect-sample-ruby batect-sample-seq batect-sample-typescript bundle-dev-bundle golang-bundle hadolint-bundle hello-world-bundle java-bundle node-bundle shellcheck-bundle ) function getLatestVersion { curl --fail --silent --show-error https://api.github.com/repos/batect/batect/releases/latest | jq -r '.name' } function updateProject { project_name=$1 commit_message="Update Batect to $2." project_dir="$(cd "$SCRIPT_DIR/../../$project_name" && pwd)" { echo "Updating $project_name..." cd "$project_dir" if output=$(git status --porcelain) && [ ! -z "$output" ]; then echo "Error: the working copy in $project_dir is dirty." exit 1 fi git pull --ff-only ./batect --upgrade git add batect git add batect.cmd if output=$(git status --porcelain) && [ ! -z "$output" ]; then git commit -m "$commit_message" git push else echo "$project_dir is already up-to-date." fi } } function main { echo "Getting latest version info..." latestVersion=$(getLatestVersion) echo "Latest version is $latestVersion." echo for project in "${PROJECTS[@]}"; do updateProject "$project" "$latestVersion" echo done } main
import Tectonic from './tectonic'; window.onload = () => { const tectonic = new Tectonic(); tectonic.run(); };
import {IServerResponse} from './iserver-response'; import {IUser} from './user'; export interface ILoginResponse extends IServerResponse { payload: ILoginPayload; } interface ILoginPayload { userData: IUser; token: string; }
class FormValidator { public function validateFields($fields) { $errors = []; foreach ($fields as $fieldName => $fieldValue) { // Example validation rules (replace with actual validation rules) if ($fieldName === 'email' && !filter_var($fieldValue, FILTER_VALIDATE_EMAIL)) { $errors[$fieldName] = 'Invalid email format'; } // Add more validation rules for other fields as needed } if (!empty($errors)) { return $errors; } return false; } }
import React from 'react'; import PropTypes from 'prop-types'; import Svg, { Path } from 'react-native-svg'; const NAVIGATION_HOME = ({ width, height, color }) => ( <Svg width={width} height={height} viewBox="0 0 18 18"> <Path d="M17 9C17 13.4183 13.4183 17 9 17C4.58172 17 1 13.4183 1 9C1 4.58172 4.58172 1 9 1C13.4183 1 17 4.58172 17 9Z" stroke={color} strokeWidth={2} /> </Svg> ); NAVIGATION_HOME.propTypes = { width: PropTypes.number, height: PropTypes.number, color: PropTypes.string, }; NAVIGATION_HOME.defaultProps = { width: 18, height: 18, color: '#F5F4FF', }; export default NAVIGATION_HOME;
#!/usr/bin/env bash set -e cd "$(dirname "$0")/.." if ! ci/version-check.sh stable; then # This job doesn't run within a container, try once to upgrade tooling on a # version check failure rustup install stable ci/version-check.sh stable fi DRYRUN= if [[ -z $BUILDKITE_BRANCH ]]; then DRYRUN="echo" fi if ./ci/is-pr.sh; then DRYRUN="echo" CHANNEL="none (pullrequest)" fi eval "$(ci/channel-info.sh)" if [[ $BUILDKITE_BRANCH = "$STABLE_CHANNEL" ]]; then CHANNEL=stable elif [[ $BUILDKITE_BRANCH = "$EDGE_CHANNEL" ]]; then CHANNEL=edge elif [[ $BUILDKITE_BRANCH = "$BETA_CHANNEL" ]]; then CHANNEL=beta fi if [[ -z $CHANNEL ]]; then echo Unable to determine channel to publish into, exiting. exit 0 fi if [[ -z $DRYRUN ]]; then [[ -n $SNAPCRAFT_CREDENTIALS_KEY ]] || { echo SNAPCRAFT_CREDENTIALS_KEY not defined exit 1; } ( openssl aes-256-cbc -d \ -in ci/snapcraft.credentials.enc \ -out ci/snapcraft.credentials \ -k "$SNAPCRAFT_CREDENTIALS_KEY" snapcraft login --with ci/snapcraft.credentials ) || { rm -f ci/snapcraft.credentials; exit 1 } fi set -x echo --- checking for multilog if [[ ! -x /usr/bin/multilog ]]; then if [[ -z $CI ]]; then echo "multilog not found, install with: sudo apt-get install -y daemontools" exit 1 fi sudo apt-get install -y daemontools fi echo --- build: $CHANNEL channel snapcraft source ci/upload_ci_artifact.sh upload_ci_artifact solana_*.snap if [[ -z $DO_NOT_PUBLISH_SNAP ]]; then echo --- publish: $CHANNEL channel $DRYRUN snapcraft push solana_*.snap --release $CHANNEL fi
const JobAction = require('../JobAction').AbstractJobAction const HandleConnectionErrorAction = require('./HandleConnectionErrorAction').HandleConnectionErrorAction const Errors = require('../error/errors') const ScrapeError = Errors.ScrapeError const PDFParseError = Errors.PDFParseError class HandleDownloadErrorAction extends JobAction { constructor (callback, createFn, params) { super() this.pdfError = 'InvalidPDF' this.parseError = 'ParseException' this.freeError = 'Cannot read property \'free\' of undefined' this.callback = callback this.create = createFn this.params = params this.error = null this.handled = false } async perform (e) { this.requeueConnectionFailures(e) this.requeueDataFailure(e) if (!this.handled) { return e } } requeueConnectionFailures (e) { const connectionError = HandleConnectionErrorAction.connectionErrorName(e.message) if (connectionError) { this.handled = true const message = `ERROR: Connection failure ${connectionError}, re-enqueueing job: ${this.params.url}` console.debug(message) this.error = new ScrapeError(message, this.params.url) this.callback([ this.create(this.params, this.callback) ]) return this.error } } requeueDataFailure (e) { if (this.handled) { return } if (e.message.includes(this.pdfError) || e.message.includes(this.parseError) || e.message.includes(this.freeError)) { this.handled = true this.error = new PDFParseError() const name = this.pdfError || (this.parseError || '') this.error.message = `ERROR: PDF data for: ${this.params.id} was corrupted: ${name}, from job: ${this.params.url}. Will attempt to re-parse` console.debug(this.error.message) this.callback([ this.create(this.params, this.callback) ]) return this.error } } } module.exports.HandleDownloadErrorAction = HandleDownloadErrorAction module.exports.PDFParseError = PDFParseError
#!/bin/bash root=/gpfs1/tangfuchou_pkuhpc/tangfuchou_coe/jiangzh/ root_dir=$root/hyq_atac/tgs/ script=$root_dir/pipeline.sh if [ ! -f $root_dir/log ] then mkdir -p $root_dir/log fi # 96-cell library #lib_list="SMA-HG01 SMA-K562 20210222_ATAC_ONT 2104_293T 2104_HePG2 2104_HFF1 \ # 2105_H293T-96 2105_HG01-96 2105_K562-96 2105_HepG2-96 2105_HFF1-96" #lib_list="2106_X-HG001_12" #barcode="81 82" #lib_list="2106_X-HG001_34" #barcode="83 84" #lib_list="2105_AL480" #barcode=`seq 81 90` #lib_list="2107_AL960_1 2107_AL960_2 2107_ALC1 2107_ALC2 2108_EHF 2110_GK 2110_HH 2110_CD19 2110_CD48 2110_PBMC" #barcode=`seq 73 92` # 220111 # quality control redo lib_list="2107_ALC2" barcode=`seq 73 92` info(){ job=hyq_atac_$lib name=jzh } run_long_one(){ threads=10 for lib in $lib_list do info sbatch \ -J ${name}_${job} \ -A tangfuchou_g1 \ -p cn-long \ --qos tangfuchoucnl \ -c $threads \ -o $root_dir/log/stdout_$job.log \ -e $root_dir/log/stderr_$job.log \ $script $lib . $threads $root_dir "$barcode" done } run_long_96(){ threads=4 for lib in $lib_list do for bc in `seq 1 96` do info sbatch \ -J ${name}_${job} \ -A tangfuchou_g1 \ -p cn-long \ --qos tangfuchoucnl \ -c $threads \ -o $root_dir/log/stdout_$job.log \ -e $root_dir/log/stderr_$job.log \ $script $lib sc$bc $threads $root_dir done done } run_long_ht(){ threads=4 for lib in $lib_list do for bc1 in $barcode do for bc2 in `seq 1 48` do info sbatch \ -J ${name}_${job} \ -A tangfuchou_g1 \ -p cn-long \ --qos tangfuchoucnl \ -c $threads \ -o $root_dir/log/stdout_$job.log \ -e $root_dir/log/stderr_$job.log \ $script $lib ${bc1}_${bc2} $threads $root_dir done done done } run_fat_one(){ threads=5 for lib in $lib_list do info sbatch \ -J ${name}_${job} \ -A tangfuchou_g1 \ -p fat8way \ --qos tangfuchouf8w \ -c $threads \ -o $root_dir/log/out_$job.log \ -e $root_dir/log/err_$job.log \ $script $lib . $threads $root_dir done } run_one_directly(){ for lib in $lib_list do bash $script $lib . 4 $root_dir & done } run_96_directly(){ for lib in $lib_list do for cell in `seq 1 96` do bash $script $lib sc$cell 1 $root_dir & done done } run_ht_directly(){ threads=1 for lib in $lib_list do for bc1 in $barcode do for bc2 in `seq 1 48` do bash $script $lib ${bc1}_${bc2} $threads $root_dir done done done } #run_long_one run_long_ht #run_fat_one #run_one_directly #run_ht_directly
<filename>migrations/2_book_supply_chain.js var Book = artifacts.require("./BookSupplyChain.sol"); module.exports = function(deployer) { deployer.deploy(Book); };
export default function model(tableName:string, connectionSelector:string):string{ return `const { editor } = require("@nodespull/core/database")("${connectionSelector}") const { type, onUpload, onRevert, rawQuery } = editor onUpload(() => { editor.defineModel(withTable = "${tableName}").as({ /* add attributes */ uuid: { type: type.string, primaryKey: true, defaultValue: type.UUIDV1 }, }) }) onRevert(() => { rawQuery("DROP TABLE ${tableName}") })` }
<filename>packages/insomnia-app/app/common/export.ts import { Insomnia4Data } from 'insomnia-importers'; import clone from 'clone'; import { database as db } from './database'; import * as har from './har'; import type { BaseModel } from '../models/index'; import * as models from '../models/index'; import { EXPORT_TYPE_API_SPEC, EXPORT_TYPE_COOKIE_JAR, EXPORT_TYPE_ENVIRONMENT, EXPORT_TYPE_GRPC_REQUEST, EXPORT_TYPE_PROTO_DIRECTORY, EXPORT_TYPE_PROTO_FILE, EXPORT_TYPE_REQUEST, EXPORT_TYPE_REQUEST_GROUP, EXPORT_TYPE_UNIT_TEST, EXPORT_TYPE_UNIT_TEST_SUITE, EXPORT_TYPE_WORKSPACE, getAppVersion, } from './constants'; import YAML from 'yaml'; import { trackEvent } from './analytics'; import { isGrpcRequest } from '../models/grpc-request'; import { isRequest } from '../models/request'; import { isRequestGroup } from '../models/request-group'; import { isProtoDirectory } from '../models/proto-directory'; import { isProtoFile } from '../models/proto-file'; import { isWorkspace, Workspace } from '../models/workspace'; import { isApiSpec } from '../models/api-spec'; import { isCookieJar } from '../models/cookie-jar'; import { isEnvironment } from '../models/environment'; import { isUnitTestSuite } from '../models/unit-test-suite'; import { isUnitTest } from '../models/unit-test'; import { resetKeys } from '../sync/vcs/ignore-keys'; const EXPORT_FORMAT = 4; const getDocWithDescendants = (includePrivateDocs = false) => async (parentDoc: BaseModel | null) => { const docs = await db.withDescendants(parentDoc); return docs.filter( // Don't include if private, except if we want to doc => !doc?.isPrivate || includePrivateDocs, ); }; export async function exportWorkspacesHAR( workspaces: Workspace[], includePrivateDocs = false, ) { // regarding `[null]`, see the comment here in `exportWorkspacesData` const rootDocs = workspaces.length === 0 ? [null] : workspaces; const promises = rootDocs.map(getDocWithDescendants(includePrivateDocs)); const docs = (await Promise.all(promises)).flat(); const requests = docs.filter(isRequest); return exportRequestsHAR(requests, includePrivateDocs); } export async function exportRequestsHAR( requests: BaseModel[], includePrivateDocs = false, ) { const workspaces: BaseModel[] = []; const mapRequestIdToWorkspace: Record<string, any> = {}; const workspaceLookup: Record<string, any> = {}; for (const request of requests) { const ancestors: BaseModel[] = await db.withAncestors(request, [ models.workspace.type, models.requestGroup.type, ]); const workspace = ancestors.find(isWorkspace); mapRequestIdToWorkspace[request._id] = workspace; if (workspace == null || workspaceLookup.hasOwnProperty(workspace._id)) { continue; } workspaceLookup[workspace._id] = true; workspaces.push(workspace); } const mapWorkspaceIdToEnvironmentId: Record<string, any> = {}; for (const workspace of workspaces) { const workspaceMeta = await models.workspaceMeta.getByParentId(workspace._id); let environmentId = workspaceMeta ? workspaceMeta.activeEnvironmentId : null; const environment = await models.environment.getById(environmentId || 'n/a'); if (!environment || (environment.isPrivate && !includePrivateDocs)) { environmentId = 'n/a'; } mapWorkspaceIdToEnvironmentId[workspace._id] = environmentId; } requests = requests.sort((a: Record<string, any>, b: Record<string, any>) => a.metaSortKey < b.metaSortKey ? -1 : 1, ); const harRequests: har.ExportRequest[] = []; for (const request of requests) { const workspace = mapRequestIdToWorkspace[request._id]; if (workspace == null) { // Workspace not found for request, so don't export it. continue; } const environmentId = mapWorkspaceIdToEnvironmentId[workspace._id]; harRequests.push({ requestId: request._id, environmentId: environmentId, }); } const data = await har.exportHar(harRequests); trackEvent('Data', 'Export', 'HAR'); return JSON.stringify(data, null, '\t'); } export async function exportWorkspacesData( workspaces: Workspace[], includePrivateDocs: boolean, format: 'json' | 'yaml', ) { // Semantically, if an empty array is passed, then nothing will be returned. What an empty array really signifies is "no parent", which, at the database layer is the same as "parentId === null", hence we add null in ourselves. const rootDocs = workspaces.length === 0 ? [null] : workspaces; const promises = rootDocs.map(getDocWithDescendants(includePrivateDocs)); const docs = (await Promise.all(promises)).flat(); const requests = docs.filter(doc => isRequest(doc) || isGrpcRequest(doc)); return exportRequestsData(requests, includePrivateDocs, format); } export async function exportRequestsData( requests: BaseModel[], includePrivateDocs: boolean, format: 'json' | 'yaml', ) { const data: Insomnia4Data = { // @ts-expect-error -- TSCONVERSION maybe this needs to be added to the upstream type? _type: 'export', __export_format: EXPORT_FORMAT, __export_date: new Date(), __export_source: `insomnia.desktop.app:v${getAppVersion()}`, resources: [], }; const docs: BaseModel[] = []; const workspaces: Workspace[] = []; const mapTypeAndIdToDoc: Record<string, BaseModel> = {}; for (const request of requests) { const ancestors = clone<BaseModel[]>(await db.withAncestors(request)); for (const ancestor of ancestors) { const key = ancestor.type + '___' + ancestor._id; if (mapTypeAndIdToDoc.hasOwnProperty(key)) { continue; } mapTypeAndIdToDoc[key] = ancestor; docs.push(ancestor); if (isWorkspace(ancestor)) { workspaces.push(ancestor); } } } for (const workspace of workspaces) { const descendants = (await db.withDescendants(workspace)).filter(d => { // Only interested in these additional model types. return ( isCookieJar(d) || isEnvironment(d) || isApiSpec(d) || isUnitTestSuite(d) || isUnitTest(d) || isProtoFile(d) || isProtoDirectory(d) ); }); docs.push(...descendants); } data.resources = docs .filter(d => { // Only export these model types. if ( !( isUnitTestSuite(d) || isUnitTest(d) || isRequest(d) || isGrpcRequest(d) || isRequestGroup(d) || isProtoFile(d) || isProtoDirectory(d) || isWorkspace(d) || isCookieJar(d) || isEnvironment(d) || isApiSpec(d) ) ) { return false; } // BaseModel doesn't have isPrivate, so cast it first. return !d.isPrivate || includePrivateDocs; }) .map(d => { if (isWorkspace(d)) { // @ts-expect-error -- TSCONVERSION maybe this needs to be added to the upstream type? d._type = EXPORT_TYPE_WORKSPACE; // reset the parentId of a workspace resetKeys(d); } else if (isCookieJar(d)) { // @ts-expect-error -- TSCONVERSION maybe this needs to be added to the upstream type? d._type = EXPORT_TYPE_COOKIE_JAR; } else if (isEnvironment(d)) { // @ts-expect-error -- TSCONVERSION maybe this needs to be added to the upstream type? d._type = EXPORT_TYPE_ENVIRONMENT; } else if (isUnitTestSuite(d)) { // @ts-expect-error -- TSCONVERSION maybe this needs to be added to the upstream type? d._type = EXPORT_TYPE_UNIT_TEST_SUITE; } else if (isUnitTest(d)) { // @ts-expect-error -- TSCONVERSION maybe this needs to be added to the upstream type? d._type = EXPORT_TYPE_UNIT_TEST; } else if (isRequestGroup(d)) { // @ts-expect-error -- TSCONVERSION maybe this needs to be added to the upstream type? d._type = EXPORT_TYPE_REQUEST_GROUP; } else if (isRequest(d)) { // @ts-expect-error -- TSCONVERSION maybe this needs to be added to the upstream type? d._type = EXPORT_TYPE_REQUEST; } else if (isGrpcRequest(d)) { // @ts-expect-error -- TSCONVERSION maybe this needs to be added to the upstream type? d._type = EXPORT_TYPE_GRPC_REQUEST; } else if (isProtoFile(d)) { // @ts-expect-error -- TSCONVERSION maybe this needs to be added to the upstream type? d._type = EXPORT_TYPE_PROTO_FILE; } else if (isProtoDirectory(d)) { // @ts-expect-error -- TSCONVERSION maybe this needs to be added to the upstream type? d._type = EXPORT_TYPE_PROTO_DIRECTORY; } else if (isApiSpec(d)) { // @ts-expect-error -- TSCONVERSION maybe this needs to be added to the upstream type? d._type = EXPORT_TYPE_API_SPEC; } // @ts-expect-error -- TSCONVERSION maybe this needs to be added to the upstream type? // Delete the things we don't want to export delete d.type; return d; }); trackEvent('Data', 'Export', `Insomnia ${format}`); if (format.toLowerCase() === 'yaml') { return YAML.stringify(data); } else if (format.toLowerCase() === 'json') { return JSON.stringify(data); } else { throw new Error(`Invalid export format ${format}. Must be "json" or "yaml"`); } }
<gh_stars>100-1000 from flake8_plugin_utils.utils import assert_error, assert_not_error from flake8_pytest_style.config import DEFAULT_CONFIG from flake8_pytest_style.errors import AssertInExcept from flake8_pytest_style.visitors import RaisesVisitor def test_ok(): code = """ def test_xxx(): try: something() except Exception as e: something_else() with pytest.raises(ZeroDivisionError) as e: 1 / 0 assert e.value.message """ assert_not_error(RaisesVisitor, code, config=DEFAULT_CONFIG) def test_error(): code = """ def test_xxx(): try: something() except Exception as e: assert e.message, 'blah blah' """ assert_error(RaisesVisitor, code, AssertInExcept, name='e', config=DEFAULT_CONFIG)
<filename>momo-permission-system-core/momo-permission-system-core-mapper/src/main/java/com/momo/mapper/req/authority/AclReq.java package com.momo.mapper.req.authority; import com.momo.common.error.BaseReq; import lombok.*; import org.springframework.format.annotation.NumberFormat; import javax.validation.constraints.Max; import javax.validation.constraints.Min; import javax.validation.constraints.NotBlank; import javax.validation.constraints.NotNull; /** * Created by MOMO on 2019/4/9. */ @Getter @Setter @ToString @Builder @NoArgsConstructor @AllArgsConstructor //@EqualsAndHashCode(of = {"id"}) public class AclReq extends BaseReq { /** * sysAclModuleType 菜单系统类型 1 系统管理 2资产管理. */ @NotBlank(message = "菜单系统类型 必填", groups = {save.class, Modify.class,Permission.class}) private String sysAclPermissionCode; /** * sysAclModuleParentId 上级权限id. */ @NotNull(message = "上级权限id 必填", groups = {save.class, Modify.class,Permission.class}) private Long sysAclParentIdStr; /** * remark 备注. */ private String remark; /** * sysAclUrl 请求的url, 可以填正则表达式. */ // @NotBlank(message = "请求的url 必填", groups = {save.class, Modify.class}) private String sysAclUrl; /** * sysAclCode 权限码. */ private String sysAclCode; /** * sysAclIcon 图标class. */ private String sysAclIcon; /** * sysAclName 权限名称. */ @NotBlank(message = "权限名称 必填", groups = {save.class, Modify.class,Permission.class}) private String sysAclName; /** * 类型,-1系统 0:目录 1:菜单,2:按钮,3:其他 */ @NotNull(message = "类型,类型,-1系统 0:目录 1:菜单,2:按钮,3:其他 必填", groups = {save.class, Modify.class,Permission.class}) @Max(value = 3,message = "权限类型:最大值为1") @Min(value = -1,message = "权限类型:最小值为-1") private Integer sysAclType; /** * sysAclUuid 唯一,32位字符串,查询用. */ @NotBlank(message = "uuid 必填", groups = {Status.class, Modify.class,Detail.class}) private String uuid; /** * flag 状态 0启用 1禁用. */ @NotNull(message = "状态 0启用 1禁用 必填", groups = {save.class,Status.class, Modify.class,Permission.class}) @Max(value = 1,message = "状态:最大值为1") @Min(value = 0,message = "状态:最小值为0") private Integer disabledFlag; /** * sysAclAction 按钮动作类型(交给前端处理). */ // @NotBlank(message = "按钮动作类型 必填", groups = {save.class, Modify.class}) private String sysAclAction; /** * sysAclRouter 所属页面(交给前端处理). */ // @NotBlank(message = "所属页面 必填", groups = {save.class, Modify.class}) private String sysAclRouter; /** * sysAclSeq 权限在当前模块下的顺序,由小到大. */ @NotNull(message = "权限在当前模块下的顺序,由小到大 必填", groups = {save.class, Modify.class,Permission.class}) @Min(value = 0,message = "排序:最小值为0") private Integer sysAclSeq; }
import { makeReduxFormEntry } from 'shared/helpers/redux'; import * as NS from '../namespace'; export const toggle2faFormEntry = makeReduxFormEntry<NS.IToggle2faForm>('toggle-2fa', ['code', 'new2FaProviderCode']);
__title__ = 'isimip-publisher' __version__ = '0.1.0' __author__ = '<NAME>' __email__ = '<EMAIL>' __license__ = 'MIT' __copyright__ = 'Copyright (c) 2019 Potsdam Institute for Climate Impact Research' VERSION = __version__
package com.lightbend.hedgehog.generators import com.lightbend.hedgehog.generators.ShortGenerators._ import com.lightbend.hedgehog.scalamock.{GeneratorSpec, TestRunnerMock} import hedgehog.runner.Test object ShortGeneratorsSpec extends TestRunnerMock with GeneratorSpec { override def tests: List[Test] = test("genShort", genShort).addLinearNumericRangeTests(0, Short.MinValue, Short.MaxValue).tests }
#!/bin/bash # This script is invoked from ../run.sh # It contains some SGMM-related scripts that I am breaking out of the main run.sh for clarity. . cmd.sh # Note: you might want to try to give the option --spk-dep-weights=false to train_sgmm2.sh; # this takes out the "symmetric SGMM" part which is not always helpful. # SGMM system on si84 data [sgmm5a]. Note: the system we aligned from used the si284 data for # training, but this shouldn't have much effect. ( steps/align_fmllr.sh --nj 30 --cmd "$train_cmd" \ data/train_si84 data/lang exp/tri4b exp/tri4b_ali_si84 || exit 1; steps/train_ubm.sh --cmd "$train_cmd" \ 400 data/train_si84 data/lang exp/tri4b_ali_si84 exp/ubm5a || exit 1; steps/train_sgmm2.sh --cmd "$train_cmd" \ 7000 9000 data/train_si84 data/lang exp/tri4b_ali_si84 \ exp/ubm5a/final.ubm exp/sgmm2_5a || exit 1; ( utils/mkgraph.sh data/lang_test_tgpr exp/sgmm2_5a exp/sgmm2_5a/graph_tgpr steps/decode_sgmm2.sh --nj 10 --cmd "$decode_cmd" --transform-dir exp/tri4b/decode_tgpr_dev93 \ exp/sgmm2_5a/graph_tgpr data/test_dev93 exp/sgmm2_5a/decode_tgpr_dev93 ) & steps/align_sgmm2.sh --nj 30 --cmd "$train_cmd" --transform-dir exp/tri4b_ali_si84 \ --use-graphs true --use-gselect true data/train_si84 data/lang exp/sgmm2_5a exp/sgmm2_5a_ali_si84 || exit 1; steps/make_denlats_sgmm2.sh --nj 30 --sub-split 30 --cmd "$decode_cmd" --transform-dir exp/tri4b_ali_si84 \ data/train_si84 data/lang exp/sgmm2_5a_ali_si84 exp/sgmm2_5a_denlats_si84 steps/train_mmi_sgmm2.sh --cmd "$decode_cmd" --transform-dir exp/tri4b_ali_si84 --boost 0.1 \ data/train_si84 data/lang exp/sgmm2_5a_ali_si84 exp/sgmm2_5a_denlats_si84 exp/sgmm2_5a_mmi_b0.1 for iter in 1 2 3 4; do steps/decode_sgmm2_rescore.sh --cmd "$decode_cmd" --iter $iter \ --transform-dir exp/tri4b/decode_tgpr_dev93 data/lang_test_tgpr data/test_dev93 exp/sgmm2_5a/decode_tgpr_dev93 \ exp/sgmm2_5a_mmi_b0.1/decode_tgpr_dev93_it$iter & done steps/train_mmi_sgmm2.sh --cmd "$decode_cmd" --transform-dir exp/tri4b_ali_si84 --boost 0.1 \ --update-opts "--cov-min-value=0.9" data/train_si84 data/lang exp/sgmm2_5a_ali_si84 exp/sgmm2_5a_denlats_si84 exp/sgmm2_5a_mmi_b0.1_m0.9 for iter in 1 2 3 4; do steps/decode_sgmm2_rescore.sh --cmd "$decode_cmd" --iter $iter \ --transform-dir exp/tri4b/decode_tgpr_dev93 data/lang_test_tgpr data/test_dev93 exp/sgmm2_5a/decode_tgpr_dev93 \ exp/sgmm2_5a_mmi_b0.1_m0.9/decode_tgpr_dev93_it$iter & done ) & ( # The next commands are the same thing on all the si284 data. # SGMM system on the si284 data [sgmm5b] steps/train_ubm.sh --cmd "$train_cmd" \ 600 data/train_si284 data/lang exp/tri4b_ali_si284 exp/ubm5b || exit 1; steps/train_sgmm2.sh --cmd "$train_cmd" \ 11000 25000 data/train_si284 data/lang exp/tri4b_ali_si284 \ exp/ubm5b/final.ubm exp/sgmm2_5b || exit 1; ( utils/mkgraph.sh data/lang_test_tgpr exp/sgmm2_5b exp/sgmm2_5b/graph_tgpr steps/decode_sgmm2.sh --nj 10 --cmd "$decode_cmd" --transform-dir exp/tri4b/decode_tgpr_dev93 \ exp/sgmm2_5b/graph_tgpr data/test_dev93 exp/sgmm2_5b/decode_tgpr_dev93 steps/decode_sgmm2.sh --nj 8 --cmd "$decode_cmd" --transform-dir exp/tri4b/decode_tgpr_eval92 \ exp/sgmm2_5b/graph_tgpr data/test_eval92 exp/sgmm2_5b/decode_tgpr_eval92 utils/mkgraph.sh data/lang_test_bd_tgpr exp/sgmm2_5b exp/sgmm2_5b/graph_bd_tgpr || exit 1; steps/decode_sgmm2.sh --nj 10 --cmd "$decode_cmd" --transform-dir exp/tri4b/decode_bd_tgpr_dev93 \ exp/sgmm2_5b/graph_bd_tgpr data/test_dev93 exp/sgmm2_5b/decode_bd_tgpr_dev93 steps/decode_sgmm2.sh --nj 8 --cmd "$decode_cmd" --transform-dir exp/tri4b/decode_bd_tgpr_eval92 \ exp/sgmm2_5b/graph_bd_tgpr data/test_eval92 exp/sgmm2_5b/decode_bd_tgpr_eval92 ) & # This shows how you would build and test a quinphone SGMM2 system, but ( steps/train_sgmm2.sh --cmd "$train_cmd" \ --context-opts "--context-width=5 --central-position=2" \ 11000 25000 data/train_si284 data/lang exp/tri4b_ali_si284 \ exp/ubm5b/final.ubm exp/sgmm2_5c || exit 1; # Decode from lattices in exp/sgmm2_5b steps/decode_sgmm2_fromlats.sh --cmd "$decode_cmd" --transform-dir exp/tri4b/decode_tgpr_dev93 \ data/test_dev93 data/lang_test_tgpr exp/sgmm2_5b/decode_tgpr_dev93 exp/sgmm2_5c/decode_tgpr_dev93 steps/decode_sgmm2_fromlats.sh --cmd "$decode_cmd" --transform-dir exp/tri4b/decode_tgpr_eval92 \ data/test_eval92 data/lang_test_tgpr exp/sgmm2_5b/decode_tgpr_eval92 exp/sgmm2_5c/decode_tgpr_eval92 ) & steps/align_sgmm2.sh --nj 30 --cmd "$train_cmd" --transform-dir exp/tri4b_ali_si284 \ --use-graphs true --use-gselect true data/train_si284 data/lang exp/sgmm2_5b exp/sgmm2_5b_ali_si284 steps/make_denlats_sgmm2.sh --nj 30 --sub-split 30 --cmd "$decode_cmd" --transform-dir exp/tri4b_ali_si284 \ data/train_si284 data/lang exp/sgmm2_5b_ali_si284 exp/sgmm2_5b_denlats_si284 steps/train_mmi_sgmm2.sh --cmd "$decode_cmd" --transform-dir exp/tri4b_ali_si284 --boost 0.1 \ data/train_si284 data/lang exp/sgmm2_5b_ali_si284 exp/sgmm2_5b_denlats_si284 exp/sgmm2_5b_mmi_b0.1 for iter in 1 2 3 4; do for test in eval92; do # dev93 steps/decode_sgmm2_rescore.sh --cmd "$decode_cmd" --iter $iter \ --transform-dir exp/tri4b/decode_bd_tgpr_${test} data/lang_test_bd_fg data/test_${test} exp/sgmm2_5b/decode_bd_tgpr_${test} \ exp/sgmm2_5b_mmi_b0.1/decode_bd_tgpr_${test}_it$iter & done done steps/train_mmi_sgmm2.sh --cmd "$decode_cmd" --transform-dir exp/tri4b_ali_si284 --boost 0.1 \ --drop-frames true data/train_si284 data/lang exp/sgmm2_5b_ali_si284 exp/sgmm2_5b_denlats_si284 exp/sgmm2_5b_mmi_b0.1_z for iter in 1 2 3 4; do for test in eval92 dev93; do steps/decode_sgmm2_rescore.sh --cmd "$decode_cmd" --iter $iter \ --transform-dir exp/tri4b/decode_bd_tgpr_${test} data/lang_test_bd_fg data/test_${test} exp/sgmm2_5b/decode_bd_tgpr_${test} \ exp/sgmm2_5b_mmi_b0.1_z/decode_bd_tgpr_${test}_it$iter & done done ) & wait # Examples of combining some of the best decodings: SGMM+MMI with # MMI+fMMI on a conventional system. local/score_combine.sh data/test_eval92 \ data/lang_test_bd_tgpr \ exp/tri4b_fmmi_a/decode_tgpr_eval92_it8 \ exp/sgmm2_5b_mmi_b0.1/decode_bd_tgpr_eval92_it3 \ exp/combine_tri4b_fmmi_a_sgmm2_5b_mmi_b0.1/decode_bd_tgpr_eval92_it8_3 # %WER 4.43 [ 250 / 5643, 41 ins, 12 del, 197 sub ] exp/tri4b_fmmi_a/decode_tgpr_eval92_it8/wer_11 # %WER 3.85 [ 217 / 5643, 35 ins, 11 del, 171 sub ] exp/sgmm2_5b_mmi_b0.1/decode_bd_tgpr_eval92_it3/wer_10 # combined to: # %WER 3.76 [ 212 / 5643, 32 ins, 12 del, 168 sub ] exp/combine_tri4b_fmmi_a_sgmm2_5b_mmi_b0.1/decode_bd_tgpr_eval92_it8_3/wer_12 # Checking MBR decode of baseline: cp -r -T exp/sgmm2_5b_mmi_b0.1/decode_bd_tgpr_eval92_it3{,.mbr} local/score_mbr.sh data/test_eval92 data/lang_test_bd_tgpr exp/sgmm2_5b_mmi_b0.1/decode_bd_tgpr_eval92_it3.mbr # MBR decoding did not seem to help (baseline was 3.85). I think this is normal at such low WERs. %WER 3.86 [ 218 / 5643, 35 ins, 11 del, 172 sub ] exp/sgmm2_5b_mmi_b0.1/decode_bd_tgpr_eval92_it3.mbr/wer_10
#!/bin/sh set -e set -u set -o pipefail function on_error { echo "$(realpath -mq "${0}"):$1: error: Unexpected failure" } trap 'on_error $LINENO' ERR if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then # If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy # frameworks to, so exit 0 (signalling the script phase was successful). exit 0 fi echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}" SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}" BCSYMBOLMAP_DIR="BCSymbolMaps" # This protects against multiple targets copying the same framework dependency at the same time. The solution # was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????") # Copies and strips a vendored framework install_framework() { if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then local source="${BUILT_PRODUCTS_DIR}/$1" elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")" elif [ -r "$1" ]; then local source="$1" fi local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" if [ -L "${source}" ]; then echo "Symlinked..." source="$(readlink "${source}")" fi if [ -d "${source}/${BCSYMBOLMAP_DIR}" ]; then # Locate and install any .bcsymbolmaps if present, and remove them from the .framework before the framework is copied find "${source}/${BCSYMBOLMAP_DIR}" -name "*.bcsymbolmap"|while read f; do echo "Installing $f" install_bcsymbolmap "$f" "$destination" rm "$f" done rmdir "${source}/${BCSYMBOLMAP_DIR}" fi # Use filter instead of exclude so missing patterns don't throw errors. echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\"" rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}" local basename basename="$(basename -s .framework "$1")" binary="${destination}/${basename}.framework/${basename}" if ! [ -r "$binary" ]; then binary="${destination}/${basename}" elif [ -L "${binary}" ]; then echo "Destination binary is symlinked..." dirname="$(dirname "${binary}")" binary="${dirname}/$(readlink "${binary}")" fi # Strip invalid architectures so "fat" simulator / device frameworks work on device if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then strip_invalid_archs "$binary" fi # Resign the code if required by the build settings to avoid unstable apps code_sign_if_enabled "${destination}/$(basename "$1")" # Embed linked Swift runtime libraries. No longer necessary as of Xcode 7. if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then local swift_runtime_libs swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u) for lib in $swift_runtime_libs; do echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\"" rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}" code_sign_if_enabled "${destination}/${lib}" done fi } # Copies and strips a vendored dSYM install_dsym() { local source="$1" warn_missing_arch=${2:-true} if [ -r "$source" ]; then # Copy the dSYM into the targets temp dir. echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\"" rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}" local basename basename="$(basename -s .dSYM "$source")" binary_name="$(ls "$source/Contents/Resources/DWARF")" binary="${DERIVED_FILES_DIR}/${basename}.dSYM/Contents/Resources/DWARF/${binary_name}" # Strip invalid architectures from the dSYM. if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then strip_invalid_archs "$binary" "$warn_missing_arch" fi if [[ $STRIP_BINARY_RETVAL == 0 ]]; then # Move the stripped file into its final destination. echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\"" rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.dSYM" "${DWARF_DSYM_FOLDER_PATH}" else # The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing. touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.dSYM" fi fi } # Used as a return value for each invocation of `strip_invalid_archs` function. STRIP_BINARY_RETVAL=0 # Strip invalid architectures strip_invalid_archs() { binary="$1" warn_missing_arch=${2:-true} # Get architectures for current target binary binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)" # Intersect them with the architectures we are building for intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)" # If there are no archs supported by this binary then warn the user if [[ -z "$intersected_archs" ]]; then if [[ "$warn_missing_arch" == "true" ]]; then echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)." fi STRIP_BINARY_RETVAL=1 return fi stripped="" for arch in $binary_archs; do if ! [[ "${ARCHS}" == *"$arch"* ]]; then # Strip non-valid architectures in-place lipo -remove "$arch" -output "$binary" "$binary" stripped="$stripped $arch" fi done if [[ "$stripped" ]]; then echo "Stripped $binary of architectures:$stripped" fi STRIP_BINARY_RETVAL=0 } # Copies the bcsymbolmap files of a vendored framework install_bcsymbolmap() { local bcsymbolmap_path="$1" local destination="${BUILT_PRODUCTS_DIR}" echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}"" rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}" } # Signs a framework with the provided identity code_sign_if_enabled() { if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then # Use the current code_sign_identity echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}" local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'" if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then code_sign_cmd="$code_sign_cmd &" fi echo "$code_sign_cmd" eval "$code_sign_cmd" fi } if [[ "$CONFIGURATION" == "Debug" ]]; then install_framework "${BUILT_PRODUCTS_DIR}/XCTest-Gherkin/XCTest_Gherkin.framework" fi if [[ "$CONFIGURATION" == "Release" ]]; then install_framework "${BUILT_PRODUCTS_DIR}/XCTest-Gherkin/XCTest_Gherkin.framework" fi if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then wait fi
import subprocess # Define the spider script file path spider_script = "path_to_spider_script.py" # Construct the command to run the spider script using Scrapy command = f"scrapy runspider {spider_script}" # Execute the command using subprocess process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output, error = process.communicate() # Check if the spider script execution was successful if process.returncode == 0: print("done") else: print(f"Error occurred: {error.decode('utf-8')}")
#!/bin/bash mongo localhost/h_media-dev
// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package org.apache.tapestry5; import org.apache.tapestry5.services.RequestExceptionHandler; import java.io.IOException; import java.util.List; /** * A contribution to the default {@link RequestExceptionHandler} service, this is mapped to an exception class, * allowing class specific (based on an inheritance search) handling of an exception. * * @see ContextAwareException */ public interface ExceptionHandlerAssistant { /** * Handles the exception, returning a page class or link to redirect to. * * @param exception * the exception as thrown * @param exceptionContext * a page activation context that is derived from the root-most exception * @return either a page class or a {@link Link}; a page will be redirected to, with the exception context * as the page activation context * @throws IOException */ Object handleRequestException(Throwable exception, List<Object> exceptionContext) throws IOException; }
#!/bin/sh source conf/drillTestConfig.properties hadoop fs -test -d /drill/testdata/tpcds_sf1/text/views ;if [ `echo $?` -eq 1 ]; then hadoop fs -mkdir /drill/testdata/tpcds_sf1/text/views; fi if [ -z "$PASSWORD" ] then ${DRILL_HOME}/bin/sqlline -n ${USERNAME} -u "jdbc:drill:schema=dfs.tpcds_sf1_text_views;drillbit=${DRILL_STORAGE_PLUGIN_SERVER}" --run=${DRILL_TEST_DATA_DIR}/Datasources/tpcds/createViewsText.sql else ${DRILL_HOME}/bin/sqlline -n ${USERNAME} -p ${PASSWORD} -u "jdbc:drill:schema=dfs.tpcds_sf1_text_views;drillbit=${DRILL_STORAGE_PLUGIN_SERVER}" --run=${DRILL_TEST_DATA_DIR}/Datasources/tpcds/createViewsText.sql fi
#!/bin/bash set -x PROG=rke2 REGISTRY=${REGISTRY:-docker.io} REPO=${REPO:-rancher} K3S_PKG=github.com/rancher/k3s RKE2_PKG=github.com/rancher/rke2 GO=${GO-go} GOARCH=${GOARCH:-$("${GO}" env GOARCH)} ARCH=${GOARCH} GOOS=${GOOS:-$("${GO}" env GOOS)} if [ -z "$GOOS" ]; then if [ "${OS}" == "Windows_NT" ]; then GOOS="windows" else UNAME_S=$(shell uname -s) if [ "${UNAME_S}" == "Linux" ]; then GOOS="linux" elif [ "${UNAME_S}" == "Darwin" ]; then GOOS="darwin" elif [ "${UNAME_S}" == "FreeBSD" ]; then GOOS="freebsd" fi fi fi GOOS=linux UNAME_S=Linux TREE_STATE=clean COMMIT=$DRONE_COMMIT if [ -z "${IMAGE_BUILD_VERSION}" ]; then IMAGE_BUILD_VERSION=multiarch-build$(date +%Y%m%d); fi BASE_VERSION=${BASE_VERSION:-v1.17.5b7-multiarch} KUBERNETES_VERSION=${KUBERNETES_VERSION:-v1.23.1} RKE2_RELEASE=${RKE2_RELEASE:-rke2r1} RKE2_MULTIARCH_RELEASE=${RKE2_MULTIARCH_RELEASE:-multiarch-alpha1} VERSION=${VERSION:-$KUBERNETES_VERSION-$RKE2_MULTIARCH_RELEASE+$RKE2_RELEASE} REVISION=$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .dirty; fi) PLATFORM=${GOOS}-${GOARCH} RELEASE=${PROG}.${PLATFORM} # hardcode versions unless set specifically KUBERNETES_IMAGE_TAG=${KUBERNETES_IMAGE_TAG:-$KUBERNETES_VERSION-$RKE2_RELEASE} ETCD_VERSION=${ETCD_VERSION:-v3.5.1-k3s1} PAUSE_VERSION=${PAUSE_VERSION:-3.6} CCM_VERSION=${CCM_VERSION:-v0.0.3} if [ -d .git ]; then if [ -z "$GIT_TAG" ]; then GIT_TAG=$(git tag -l --contains HEAD | head -n 1) fi if [ -n "$(git status --porcelain --untracked-files=no)" ]; then DIRTY="-dirty" TREE_STATE=dirty fi fi CONTAINERD_VERSION=${CONTAINERD_VERSION:-v1.5.8-k3s2} CRICTL_VERSION=${CRICTL_VERSION:-v1.22.0} RUNC_VERSION=${RUNC_VERSION:-v1.0.3} COREDNS_VERSION=${COREDNS_VERSION:-v1.8.5} METRICS_SERVER_VERSION=${METRICS_SERVER_VERSION:-v0.5.2} KLIPPER_HELM_VERSION=${KLIPPER_HELM_VERSION:-v0.6.8} NGINX_INGRESS_VERSION=${NGINX_INGRESS_VERSION:-nginx-1.0.2-multiarch-hardened2} NGINX_INGRESS_DEFAULT_BACKEND_VERSION=${NGINX_INGRESS_DEFAULT_BACKEND_VERSION:-1.5-rancher1} HARDENED_CALICO_VERSION=${HARDENEDCALICO_VERSION:-v3.20.2} CALICO_VERSION=${CALICO_VERSION:-v3.20.2} CALICO_OPERATOR_VERSION=${CALICO_OPERATOR_VERSION:-v1.20.4} CALICO_CRD_VERSION=${CALICO_CRD_VERSION:-v1.0.202} FLANNEL_VERSION=${FLANNEL_VERSION:-v0.15.1} CILIUM_VERSION=${CILIUM_VERSION:-v1.10.4} CILIUM_STARTUP_SCRIPT_VERSION=${CILIUM_STARTUP_SCRIPT_VERSION:-62bfbe88c17778aad7bef9fa57ff9e2d4a9ba0d8} MULTUS_VERSION=${MULTUS_VERSION:-v3.8} CNI_PLUGINS_VERSION=${CNI_PLUGINS_VERSION:-v1.0.1} SRIOV_VERSION=${SRIOV_VERSION:-v1.0.0-multiarch-build20210908} SRIOV_DEVICE_PLUGIN_VERSION=${SRIOV_DEVICE_PLUGIN_VERSION:-v3.3.2-multiarch-build20210908} SRIOV_CNI_VERSION=${SRIOV_CNI_VERSION:-v2.6.1-multiarch-build20210908} SRIOV_RESOURCES_INJECTOR_VERSION=${SRIOV_RESOURCES_INJECTOR_VERSION:-v1.2-multiarch-build20210908} VSPHERE_CPI_VERSION=${VSPHERE_CPI_VERSION:-v1.21.0} VSPHERE_CSI_VERSION=${VSPHERE_CSI_VERSION:-v2.3.0} K8SCSI_CSI_ATTACHER_VERSION=${K8SCSI_CSI_ATTACHER_VERSION:-v3.2.0} K8SCSI_CSI_NODE_DRIVER_VERSION=${K8SCSI_CSI_NODE_DRIVER_VERSION:-v2.1.0} K8SCSI_CSI_PROVISIONER_VERSION=${K8SCSI_CSI_PROVISIONER_VERSION:-v2.2.0} K8SCSI_CSI_RESIZER_VERSION=${K8SCSI_CSI_RESIZER_VERSION:-v1.1.0} K8SCSI_CSI_LIVENESSPROBE_VERSION=${K8SCSI_CSI_LIVENESSPROBE_VERSION:-v2.2.0} CLUSTER_AUTOSCALER_VERSION=${CLUSTER_AUTOSCALER_VERSION:-v1.8.5} DNS_NODE_CACHE_VERSION=${DNS_NODE_CACHE_VERSION:-1.21.2} CERTGEN_VERSION=${CERTGEN_VERSION:-v1.0} HARVESTER_VERSION=${HARVESTER_VERSION:-v0.1.1-multiarch} LONGHORN_REGISTRAR_VERSION=${LONGHORN_REGISTRAR_VERSION:-v2.3.0} LONGHORN_RESIZER_VERSION=${LONGHORN_PROVISIONER_VERSION:-v1.2.0} LONGHORN_PROVISIONER_VERSION=${LONGHORN_PROVISIONER_VERSION:-v2.1.2} LONGHORN_ATTACHER_VERSION=${LONGHORN_ATTACHER_VERSION:-v3.2.1} CILIUM_CHART_VERSION=${CILIUM_CHART_VERSION:-"1.10.404"} CANAL_CHART_VERSION=${CANAL_CHART_VERSION:-"v3.20.1-build2021111904"} CALICO_CHART_VERSION=${CALICO_CHART_VERSION:-"v3.20.201"} CALICO_CRD_CHART_VERSION=${CALICO_CRD_CHART_VERSION:-"v1.0.202"} COREDNS_CHART_VERSION=${COREDNS_CHART_VERSION:-"1.16.401-build2021111901"} NGINX_INGRESS_CHART_VERSION=${NGINX_INGRESS_CHART_VERSION:-"4.0.306"} METRICS_SERVER_CHART_VERSION=${METRICS_SERVER_CHART_VERSION:-"2.11.100-build2021111904"} MULTUS_CHART_VERSION=${MULTUS_CHART_VERSION:-"v3.7.1-build2021111906"} VSPHERE_CPI_CHART_VERSION=${VSPHERE_CPI_CHART_VERSION:-"100.1.0+up1.0.100"} VSPHERE_CSI_CHART_VERSION=${VSPHERE_CSI_CHART_VERSION:-"100.1.0+up2.3.0"} HARVESTER_CLOUD_PROVIDER_CHART_VERSION=${HARVESTER_CLOUD_PROVIDER_CHART_VERSION:-"0.1.800"} HARVESTER_CSI_DRIVER_CHART_VERSION=${HARVESTER_CSI_DRIVER_CHART_VERSION:-"0.1.900"} GIT_TAG="${VERSION}" if [[ "${VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?([-+].*)?$ ]]; then VERSION_MAJOR=${BASH_REMATCH[1]} VERSION_MINOR=${BASH_REMATCH[2]} fi DOCKERIZED_VERSION="${VERSION/+/-}" # this mimics what kubernetes builds do
<filename>internal/operator/networking/kinds/networking/legacycf/config/external.go<gh_stars>10-100 package config import ( "errors" "fmt" "github.com/caos/orbos/mntr" "github.com/caos/orbos/pkg/secret" core2 "github.com/caos/orbos/internal/operator/core" "github.com/caos/orbos/internal/operator/networking/kinds/networking/core" "github.com/caos/orbos/pkg/labels" ) type ExternalConfig struct { Verbose bool Domain string Rules []*Rule Groups []*Group `yaml:"groups"` Credentials *Credentials `yaml:"credentials"` Prefix string `yaml:"prefix"` AdditionalDNS []*Subdomain `yaml:"additionalSubdomains,omitempty"` } func (i *ExternalConfig) IsZero() bool { if (i.Credentials == nil || i.Credentials.IsZero()) && !i.Verbose && i.Domain == "" && i.Groups == nil && i.Prefix == "" && i.Rules == nil && i.AdditionalDNS == nil { return true } return false } func (e *ExternalConfig) Internal(namespace string, apiLabels *labels.API) (*InternalConfig, *current) { dom, curr := e.internalDomain() return &InternalConfig{ Domains: []*InternalDomain{dom}, Groups: e.Groups, Credentials: e.Credentials, Prefix: e.Prefix, Namespace: namespace, OriginCASecretName: curr.tlsCertName, Labels: apiLabels, }, curr } func (e *ExternalConfig) Validate() (err error) { defer func() { err = mntr.ToUserError(err) }() if e == nil { return errors.New("domain not found") } if e.Domain == "" { return errors.New("no domain configured") } return nil } func (e *ExternalConfig) ValidateSecrets() (err error) { defer func() { err = mntr.ToUserError(err) }() if e.Credentials == nil { return errors.New("no credentials specified") } if err := secret.ValidateSecret(e.Credentials.APIKey, e.Credentials.ExistingAPIKey); err != nil { return fmt.Errorf("validating api key failed: %w", err) } if err := secret.ValidateSecret(e.Credentials.User, e.Credentials.ExistingUser); err != nil { return fmt.Errorf("validating user failed: %w", err) } if err := secret.ValidateSecret(e.Credentials.UserServiceKey, e.Credentials.ExistingUserServiceKey); err != nil { return fmt.Errorf("validating userservice key failed: %w", err) } return nil } func (e *ExternalConfig) internalDomain() (*InternalDomain, *current) { subdomains := make([]*Subdomain, 0) for _, sd := range e.AdditionalDNS { subdomains = append(subdomains, sd) } return &InternalDomain{ Domain: e.Domain, Subdomains: subdomains, Rules: e.Rules, }, &current{ domain: e.Domain, tlsCertName: "tls-cert-wildcard", } } var _ core.NetworkingCurrent = (*current)(nil) type current struct { domain string `yaml:"-"` tlsCertName string `yaml:"-"` ReadyCertificate core2.EnsureFunc } func (c *current) GetDomain() string { return c.domain } func (c *current) GetReadyCertificate() core2.EnsureFunc { return c.ReadyCertificate } func (c *current) GetTlsCertName() string { return c.tlsCertName }
package network import ( "context" "errors" "github.com/ava-labs/avalanche-network-runner/network/node" ) var ErrUndefined = errors.New("undefined network") var ErrStopped = errors.New("network stopped") // Network is an abstraction of an Avalanche network type Network interface { // Returns nil if all the nodes in the network are healthy. // A stopped network is considered unhealthy. // Timeout is given by the context parameter. Healthy(context.Context) error // Stop all the nodes. // Returns ErrStopped if Stop() was previously called. Stop(context.Context) error // Start a new node with the given config. // Returns ErrStopped if Stop() was previously called. AddNode(node.Config) (node.Node, error) // Stop the node with this name. // Returns ErrStopped if Stop() was previously called. RemoveNode(name string) error // Return the node with this name. // Returns ErrStopped if Stop() was previously called. GetNode(name string) (node.Node, error) // Return all the nodes in this network. // Node name --> Node. // Returns ErrStopped if Stop() was previously called. GetAllNodes() (map[string]node.Node, error) // Returns the names of all nodes in this network. // Returns ErrStopped if Stop() was previously called. GetNodeNames() ([]string, error) // Save network snapshot // Network is stopped in order to do a safe preservation // Returns the full local path to the snapshot dir SaveSnapshot(context.Context, string) (string, error) // Remove network snapshot RemoveSnapshot(string) error // Get name of available snapshots GetSnapshotNames() ([]string, error) }
def binary_search(arr, x): low = 0 high = len(arr) - 1 while low <= high: mid = (low + high) // 2 if arr[mid] == x: return mid elif arr[mid] < x: low = mid + 1 else: high = mid - 1 return -1
/* * Copyright (C) 2017-2019 Dremio Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { Component } from 'react'; import Radium from 'radium'; import pureRender from 'pure-render-decorator'; import PropTypes from 'prop-types'; import FontIcon from 'components/Icon/FontIcon'; import EllipsedText from 'components/EllipsedText'; import Meter from 'components/Meter'; import { LINE_NOWRAP_ROW_START_CENTER, FLEX_COL_START } from 'uiTheme/radium/flexStyle'; import { formDescription } from 'uiTheme/radium/typography'; import dataFormatUtils from 'utils/dataFormatUtils'; @pureRender @Radium export default class FieldValues extends Component { static propTypes = { options: PropTypes.arrayOf( PropTypes.shape({ percent: PropTypes.number, value: PropTypes.any, type: PropTypes.any }) ), optionsStyle: PropTypes.object }; static defaultProps = { options: [] }; render() { const { options } = this.props; const maxPercent = Math.max(...options.map(option => option.percent)); return <table className='field'> <tbody> { options.map(option => { const correctText = dataFormatUtils.formatValue(option.value); const correctTextStyle = option.value === undefined || option.value === null || option.value === '' ? styles.nullwrap : {}; return ( <tr> <td> <FontIcon type={FontIcon.getIconTypeForDataType(option.type)} style={styles.icon}/> </td> <td style={styles.value}> <EllipsedText text={correctText} style={{...correctTextStyle}}/> </td> <td style={styles.progressWrap}> <Meter value={option.percent} max={maxPercent}/> </td> <td style={styles.percent}> {`${option.percent.toPrecision(2)}%`} </td> </tr> ); }) } </tbody> </table>; } } const styles = { options: { ...FLEX_COL_START, height: 250 }, checkbox: { marginRight: -7, marginLeft: 15 }, option: { ...LINE_NOWRAP_ROW_START_CENTER, marginTop: 16 }, icon: { display: 'block', height: 24 }, value: { maxWidth: 200, paddingLeft: 10 }, nullwrap: { color: '#aaa', fontStyle: 'italic', width: '95%' }, progressWrap: { width: 400, paddingLeft: 10 }, percent: { ...formDescription, paddingLeft: 10 } };
var webpack = require('webpack'); var ExtractTextPlugin = require('extract-text-webpack-plugin'); var HtmlWebpackPlugin = require('html-webpack-plugin'); var path = require('path'); var version = require('yargs').argv.version || 'trunk'; var publicPath = 'http://1172.16.31.10:20001/' + version + '/'; function make() { var entry = { app: "./src_web/index.js", vendor: [ "lodash" ] }; var plugins = [ new webpack.ProvidePlugin({ _: "lodash" }), new ExtractTextPlugin('css/[contenthash].css', { allChunks: true }), new HtmlWebpackPlugin({ template: './src_web/index.html', favicon: './src_web/favicon.ico', faviconPath: publicPath + 'favicon.ico', }), new webpack.optimize.CommonsChunkPlugin("vendor", "vendor.bundle.js") ]; return { entry: entry, output: { filename: 'module.[hash].js', path: path.resolve('./dist_web/' + version + '/'), publicPath: publicPath, libraryTarget: 'umd' }, module: { loaders: [{ test: /\.js$/, loader: 'babel-loader?optional[]=runtime&stage=0', exclude: /(node_modules|bower_components|spread|file_saver)/ }, { test: /(((.*)webix(.*))|((.*)Gcspread(.*))|((.*)ueditor(.*))|((.*)pdfjs(.*)))\.scss$/, loader: ExtractTextPlugin.extract('style-loader', 'css-loader!autoprefixer-loader?{browsers:["last 2 version", "ie >= 10"]}!sass-loader', { publicPath: '../' }) }, { test: /(.*)web_css(.*)\.scss$/, loader: ExtractTextPlugin.extract('style-loader', 'css-loader?modules&importLoaders=2&localIdentName=[name]__[local]___[hash:base64:5]!autoprefixer-loader?{browsers:["last 2 version", "ie >= 10"]}!sass-loader', { publicPath: '../' }) }, { test: /\.html$/, loader: 'html' }, { test: /\.jade$/, loader: "jade-loader?self" }, { test: /\.(woff|woff2|eot|ttf|svg|png|gif)$/, loader: 'url-loader?limit=10000&name=[path][name]_[hash:base64:5].[ext]' }] }, resolve: { alias: { lodash: "lodash/lodash.min.js" } }, plugins: plugins }; } module.exports = make();
func calculateAverageScore(testScores: [Int]) -> Double { guard !testScores.isEmpty else { return 0.0 // Return 0 for an empty array } let validScores = testScores.filter { $0 >= 0 } // Filter out non-numeric scores guard !validScores.isEmpty else { return 0.0 // Return 0 if all scores are non-numeric } let sum = validScores.reduce(0, +) return Double(sum) / Double(validScores.count) }
// Define the subscriber function function subscriber() { const subscriptions = {}; // Subscribe to an event type function subscribe(eventType, callback) { if (!subscriptions[eventType]) { subscriptions[eventType] = []; } subscriptions[eventType].push(callback); } // Unsubscribe from an event type function unsubscribe(eventType, callback) { if (subscriptions[eventType]) { subscriptions[eventType] = subscriptions[eventType].filter( (cb) => cb !== callback ); } } // Publish an event function publish(eventType, data) { if (subscriptions[eventType]) { subscriptions[eventType].forEach((callback) => callback(data)); } } return { subscribe, unsubscribe, publish, }; } // Usage of the subscriber function const subscriber1 = subscriber(); const subscriber2 = subscriber(); // Subscribe to events subscriber1.subscribe("userLoggedIn", (data) => { console.log("Subscriber 1: User logged in -", data); }); subscriber2.subscribe("userLoggedIn", (data) => { console.log("Subscriber 2: User logged in -", data); }); // Publish events subscriber1.publish("userLoggedIn", { username: "example_user" }); // Output: // Subscriber 1: User logged in - { username: 'example_user' } // Subscriber 2: User logged in - { username: 'example_user' } // Unsubscribe from events subscriber1.unsubscribe("userLoggedIn", subscriber1.callbacks["userLoggedIn"][0]); subscriber2.unsubscribe("userLoggedIn", subscriber2.callbacks["userLoggedIn"][0]); // Publish events after unsubscribing subscriber1.publish("userLoggedIn", { username: "another_user" }); // No output as subscriber1 is unsubscribed subscriber2.publish("userLoggedIn", { username: "another_user" }); // Output: // Subscriber 2: User logged in - { username: 'another_user' }
<filename>app/src/main/java/com/roadmapper/deviceid/Band.java package com.roadmapper.deviceid; public class Band { private String frequency; private Integer band; private String technology; // For GSM public Band(String frequency, String technology) { this.frequency = frequency; this.band = 0; this.technology = technology; } // For UMTS/LTE public Band(String frequency, int band, String technology) { this.frequency = frequency; this.band = band; this.technology = technology; } public String getFrequency() { return frequency; } public void setFrequency(String frequency) { this.frequency = frequency; } public Integer getBand() { return band; } public void setBand(Integer band) { this.band = band; } public String getTechnology() { return technology; } public void setTechnology(String technology) { this.technology = technology; } @Override public boolean equals(Object obj) { if (!(obj instanceof Band)) { return false; } if (obj == this) { return true; } Band b = (Band) obj; if (b.getBand().equals(this.getBand()) && b.getTechnology().equals(this.getTechnology())) return true; return false; } @Override public int hashCode() { return 51 * (this.band.hashCode() + this.technology.hashCode()); } public String toString() { return this.getBand() + ", " + this.getFrequency() + ", " + this.getTechnology(); } }