code stringlengths 3 1.05M | repo_name stringlengths 4 116 | path stringlengths 4 991 | language stringclasses 9
values | license stringclasses 15
values | size int32 3 1.05M |
|---|---|---|---|---|---|
/*
* Licensed to Crate under one or more contributor license agreements.
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership. Crate licenses this file
* to you under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*
* However, if you have executed another commercial license agreement
* with Crate these terms will supersede the license and you may use the
* software solely pursuant to the terms of the relevant commercial
* agreement.
*/
package io.crate.analyze;
import io.crate.expression.symbol.Symbol;
import io.crate.metadata.doc.DocTableInfo;
import io.crate.sql.tree.Table;
public class AnalyzedAlterTableOpenClose implements DDLStatement {
private final DocTableInfo tableInfo;
private final Table<Symbol> table;
private final boolean openTable;
AnalyzedAlterTableOpenClose(DocTableInfo tableInfo,
Table<Symbol> table,
boolean openTable) {
this.tableInfo = tableInfo;
this.table = table;
this.openTable = openTable;
}
public DocTableInfo tableInfo() {
return tableInfo;
}
public Table<Symbol> table() {
return table;
}
public boolean isOpenTable() {
return openTable;
}
@Override
public <C, R> R accept(AnalyzedStatementVisitor<C, R> analyzedStatementVisitor, C context) {
return analyzedStatementVisitor.visitAnalyzedAlterTableOpenClose(this, context);
}
}
| EvilMcJerkface/crate | server/src/main/java/io/crate/analyze/AnalyzedAlterTableOpenClose.java | Java | apache-2.0 | 2,013 |
/*
* DBeaver - Universal Database Manager
* Copyright (C) 2010-2019 Serge Rider (serge@jkiss.org)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jkiss.dbeaver.ext.postgresql.model;
import org.jkiss.code.NotNull;
import org.jkiss.code.Nullable;
import org.jkiss.dbeaver.DBException;
import org.jkiss.dbeaver.Log;
import org.jkiss.dbeaver.ext.postgresql.PostgreConstants;
import org.jkiss.dbeaver.ext.postgresql.PostgreUtils;
import org.jkiss.dbeaver.model.DBUtils;
import org.jkiss.dbeaver.model.data.DBDPseudoAttribute;
import org.jkiss.dbeaver.model.data.DBDPseudoAttributeContainer;
import org.jkiss.dbeaver.model.exec.DBCException;
import org.jkiss.dbeaver.model.exec.jdbc.JDBCPreparedStatement;
import org.jkiss.dbeaver.model.exec.jdbc.JDBCResultSet;
import org.jkiss.dbeaver.model.exec.jdbc.JDBCSession;
import org.jkiss.dbeaver.model.impl.SimpleObjectCache;
import org.jkiss.dbeaver.model.impl.jdbc.JDBCUtils;
import org.jkiss.dbeaver.model.meta.Association;
import org.jkiss.dbeaver.model.meta.Property;
import org.jkiss.dbeaver.model.runtime.DBRProgressMonitor;
import org.jkiss.dbeaver.model.struct.DBSEntityAssociation;
import org.jkiss.dbeaver.model.struct.DBStructUtils;
import org.jkiss.utils.CommonUtils;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.*;
/**
* PostgreTable
*/
public abstract class PostgreTable extends PostgreTableReal implements PostgreTableContainer, DBDPseudoAttributeContainer
{
private static final Log log = Log.getLog(PostgreTable.class);
private SimpleObjectCache<PostgreTable, PostgreTableForeignKey> foreignKeys = new SimpleObjectCache<>();
//private List<PostgreTablePartition> partitions = null;
private boolean hasOids;
private long tablespaceId;
private List<PostgreTableInheritance> superTables;
private List<PostgreTableInheritance> subTables;
private boolean hasSubClasses;
private boolean hasPartitions;
private String partitionKey;
public PostgreTable(PostgreTableContainer container)
{
super(container);
}
public PostgreTable(
PostgreTableContainer container,
ResultSet dbResult)
{
super(container, dbResult);
this.hasOids = JDBCUtils.safeGetBoolean(dbResult, "relhasoids");
this.tablespaceId = JDBCUtils.safeGetLong(dbResult, "reltablespace");
this.hasSubClasses = JDBCUtils.safeGetBoolean(dbResult, "relhassubclass");
this.partitionKey = getDataSource().isServerVersionAtLeast(10, 0) ? JDBCUtils.safeGetString(dbResult, "partition_key") : null;
this.hasPartitions = this.partitionKey != null;
}
// Copy constructor
public PostgreTable(DBRProgressMonitor monitor, PostgreTableContainer container, PostgreTable source, boolean persisted) throws DBException {
super(monitor, container, source, persisted);
this.hasOids = source.hasOids;
this.tablespaceId = container == source.getContainer() ? source.tablespaceId : 0;
this.partitionKey = source.partitionKey;
/*
// Copy FKs
List<PostgreTableForeignKey> fkList = new ArrayList<>();
for (PostgreTableForeignKey srcFK : CommonUtils.safeCollection(source.getForeignKeys(monitor))) {
PostgreTableForeignKey fk = new PostgreTableForeignKey(monitor, this, srcFK);
if (fk.getReferencedConstraint() != null) {
fk.setName(fk.getName() + "_copy"); // Fix FK name - they are unique within schema
fkList.add(fk);
} else {
log.debug("Can't copy association '" + srcFK.getName() + "' - can't find referenced constraint");
}
}
this.foreignKeys.setCache(fkList);
*/
}
public SimpleObjectCache<PostgreTable, PostgreTableForeignKey> getForeignKeyCache() {
return foreignKeys;
}
public boolean isTablespaceSpecified() {
return tablespaceId != 0;
}
@Property(viewable = true, editable = true, updatable = true, order = 20, listProvider = TablespaceListProvider.class)
public PostgreTablespace getTablespace(DBRProgressMonitor monitor) throws DBException {
if (tablespaceId == 0) {
return getDatabase().getDefaultTablespace(monitor);
}
return PostgreUtils.getObjectById(monitor, getDatabase().tablespaceCache, getDatabase(), tablespaceId);
}
public void setTablespace(PostgreTablespace tablespace) {
this.tablespaceId = tablespace.getObjectId();
}
@Override
public boolean isView()
{
return false;
}
@Property(editable = true, updatable = true, order = 40)
public boolean isHasOids() {
return hasOids;
}
public void setHasOids(boolean hasOids) {
this.hasOids = hasOids;
}
@Property(viewable = true, order = 42)
public boolean hasPartitions() {
return hasPartitions;
}
@Property(viewable = true, editable = true, updatable = true, order = 43)
public String getPartitionKey() {
return partitionKey;
}
public void setPartitionKey(String partitionKey) {
this.partitionKey = partitionKey;
}
@Override
public Collection<PostgreIndex> getIndexes(DBRProgressMonitor monitor) throws DBException {
return getSchema().indexCache.getObjects(monitor, getSchema(), this);
}
@Override
public String getObjectDefinitionText(DBRProgressMonitor monitor, Map<String, Object> options) throws DBException {
return DBStructUtils.generateTableDDL(monitor, this, options, false);
}
@Override
public DBDPseudoAttribute[] getPseudoAttributes() {
if (this.hasOids && getDataSource().getServerType().supportsOids()) {
return new DBDPseudoAttribute[]{PostgreConstants.PSEUDO_ATTR_OID};
} else {
return null;
}
}
@Association
@Override
public synchronized Collection<? extends DBSEntityAssociation> getAssociations(@NotNull DBRProgressMonitor monitor)
throws DBException
{
final List<PostgreTableInheritance> superTables = getSuperInheritance(monitor);
final Collection<PostgreTableForeignKey> foreignKeys = getForeignKeys(monitor);
if (CommonUtils.isEmpty(superTables)) {
return foreignKeys;
} else if (CommonUtils.isEmpty(foreignKeys)) {
return superTables;
}
List<DBSEntityAssociation> agg = new ArrayList<>(superTables.size() + foreignKeys.size());
agg.addAll(superTables);
agg.addAll(foreignKeys);
return agg;
}
@Override
public Collection<? extends DBSEntityAssociation> getReferences(@NotNull DBRProgressMonitor monitor) throws DBException {
List<DBSEntityAssociation> refs = new ArrayList<>(
CommonUtils.safeList(getSubInheritance(monitor)));
// This is dummy implementation
// Get references from this schema only
final Collection<PostgreTableForeignKey> allForeignKeys =
getContainer().getSchema().constraintCache.getTypedObjects(monitor, getContainer(), PostgreTableForeignKey.class);
for (PostgreTableForeignKey constraint : allForeignKeys) {
if (constraint.getAssociatedEntity() == this) {
refs.add(constraint);
}
}
return refs;
}
@Association
public Collection<PostgreTableForeignKey> getForeignKeys(@NotNull DBRProgressMonitor monitor) throws DBException {
return getSchema().constraintCache.getTypedObjects(monitor, getSchema(), this, PostgreTableForeignKey.class);
}
@Nullable
@Property(viewable = false, optional = true, order = 30)
public List<PostgreTableBase> getSuperTables(DBRProgressMonitor monitor) throws DBException {
final List<PostgreTableInheritance> si = getSuperInheritance(monitor);
if (CommonUtils.isEmpty(si)) {
return null;
}
List<PostgreTableBase> result = new ArrayList<>(si.size());
for (int i1 = 0; i1 < si.size(); i1++) {
result.add(si.get(i1).getAssociatedEntity());
}
return result;
}
/**
* Sub tables = child tables
*/
@Nullable
@Property(viewable = false, optional = true, order = 31)
public List<PostgreTableBase> getSubTables(DBRProgressMonitor monitor) throws DBException {
final List<PostgreTableInheritance> si = getSubInheritance(monitor);
if (CommonUtils.isEmpty(si)) {
return null;
}
List<PostgreTableBase> result = new ArrayList<>(si.size());
for (PostgreTableInheritance aSi : si) {
PostgreTableBase table = aSi.getParentObject();
if (!table.isPartition()) {
result.add(table);
}
}
return result;
}
@Nullable
public List<PostgreTableInheritance> getSuperInheritance(DBRProgressMonitor monitor) throws DBException {
if (superTables == null && getDataSource().getServerType().supportsInheritance()) {
try (JDBCSession session = DBUtils.openMetaSession(monitor, this, "Load table inheritance info")) {
try (JDBCPreparedStatement dbStat = session.prepareStatement(
"SELECT i.*,c.relnamespace " +
"FROM pg_catalog.pg_inherits i,pg_catalog.pg_class c " +
"WHERE i.inhrelid=? AND c.oid=i.inhparent " +
"ORDER BY i.inhseqno")) {
dbStat.setLong(1, getObjectId());
try (JDBCResultSet dbResult = dbStat.executeQuery()) {
while (dbResult.next()) {
final long parentSchemaId = JDBCUtils.safeGetLong(dbResult, "relnamespace");
final long parentTableId = JDBCUtils.safeGetLong(dbResult, "inhparent");
PostgreSchema schema = getDatabase().getSchema(monitor, parentSchemaId);
if (schema == null) {
log.warn("Can't find parent table's schema '" + parentSchemaId + "'");
continue;
}
PostgreTableBase parentTable = schema.getTable(monitor, parentTableId);
if (parentTable == null) {
log.warn("Can't find parent table '" + parentTableId + "' in '" + schema.getName() + "'");
continue;
}
if (superTables == null) {
superTables = new ArrayList<>();
}
superTables.add(
new PostgreTableInheritance(
this,
parentTable,
JDBCUtils.safeGetInt(dbResult, "inhseqno"),
true));
}
}
}
} catch (SQLException e) {
throw new DBCException(e, getDataSource());
}
if (superTables == null) {
superTables = Collections.emptyList();
}
}
return superTables == null || superTables.isEmpty() ? null : superTables;
}
public boolean hasSubClasses() {
return hasSubClasses;
}
@Nullable
public List<PostgreTableInheritance> getSubInheritance(@NotNull DBRProgressMonitor monitor) throws DBException {
if (subTables == null && hasSubClasses && getDataSource().getServerType().supportsInheritance()) {
List<PostgreTableInheritance> tables = new ArrayList<>();
try (JDBCSession session = DBUtils.openMetaSession(monitor, this, "Load table inheritance info")) {
String sql = "SELECT i.*,c.relnamespace " +
"FROM pg_catalog.pg_inherits i,pg_catalog.pg_class c " +
"WHERE i.inhparent=? AND c.oid=i.inhrelid";
// if (getDataSource().isServerVersionAtLeast(10, 0)) {
// sql += " AND c.relispartition=false";
// }
try (JDBCPreparedStatement dbStat = session.prepareStatement(sql)) {
dbStat.setLong(1, getObjectId());
try (JDBCResultSet dbResult = dbStat.executeQuery()) {
while (dbResult.next()) {
final long subSchemaId = JDBCUtils.safeGetLong(dbResult, "relnamespace");
final long subTableId = JDBCUtils.safeGetLong(dbResult, "inhrelid");
PostgreSchema schema = getDatabase().getSchema(monitor, subSchemaId);
if (schema == null) {
log.warn("Can't find sub-table's schema '" + subSchemaId + "'");
continue;
}
PostgreTableBase subTable = schema.getTable(monitor, subTableId);
if (subTable == null) {
log.warn("Can't find sub-table '" + subTableId + "' in '" + schema.getName() + "'");
continue;
}
tables.add(
new PostgreTableInheritance(
subTable,
this,
JDBCUtils.safeGetInt(dbResult, "inhseqno"),
true));
}
}
}
} catch (SQLException e) {
throw new DBCException(e, getDataSource());
}
DBUtils.orderObjects(tables);
this.subTables = tables;
}
return subTables == null || subTables.isEmpty() ? null : subTables;
}
@Nullable
@Association
public Collection<PostgreTableBase> getPartitions(DBRProgressMonitor monitor) throws DBException {
final List<PostgreTableInheritance> si = getSubInheritance(monitor);
if (CommonUtils.isEmpty(si)) {
return null;
}
List<PostgreTableBase> result = new ArrayList<>(si.size());
for (int i1 = 0; i1 < si.size(); i1++) {
PostgreTableBase table = si.get(i1).getParentObject();
if (table.isPartition()) {
result.add(table);
}
}
return result;
}
}
| liuyuanyuan/dbeaver | plugins/org.jkiss.dbeaver.ext.postgresql/src/org/jkiss/dbeaver/ext/postgresql/model/PostgreTable.java | Java | apache-2.0 | 15,134 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package at.illecker.storm.commons.util.io;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SerializationUtils {
private static final Logger LOG = LoggerFactory
.getLogger(SerializationUtils.class);
public static <T extends Serializable> void serializeList(List<T> objects,
String fileName) {
// Assume List is Serializable
// e.g., LinkedList or ArrayList
if (objects instanceof java.io.Serializable) {
SerializationUtils.serialize((Serializable) objects, fileName);
} else {
LOG.error("List is not serializable!");
}
}
public static <T extends Serializable> void serialize(T object,
String fileName) {
try {
if (object != null) {
FileOutputStream fos = new FileOutputStream(fileName);
ObjectOutputStream oos = new ObjectOutputStream(fos);
oos.writeObject(object);
oos.close();
fos.close();
}
LOG.info("Serialized in " + fileName);
} catch (FileNotFoundException fnfe) {
LOG.error("FileNotFoundException: " + fnfe.getMessage());
} catch (IOException ioe) {
LOG.error("IOException: " + ioe.getMessage());
}
}
public static <T extends Serializable> T deserialize(String fileName) {
return deserialize(IOUtils.getInputStream(fileName));
}
public static <T extends Serializable> T deserialize(InputStream is) {
T object = null;
try {
if (is != null) {
ObjectInputStream ois = new ObjectInputStream(is);
object = (T) ois.readObject();
ois.close();
is.close();
}
} catch (FileNotFoundException fnfe) {
LOG.error("FileNotFoundException: " + fnfe.getMessage());
} catch (IOException ioe) {
LOG.error("IOException: " + ioe.getMessage());
} catch (ClassNotFoundException c) {
LOG.error("ClassNotFoundException: " + c.getMessage());
}
return object;
}
}
| millecker/storm-apps | commons/src/at/illecker/storm/commons/util/io/SerializationUtils.java | Java | apache-2.0 | 2,971 |
<?php
use DTS\eBaySDK\Trading\Types\MaximumBuyerPolicyViolationsType;
class MaximumBuyerPolicyViolationsTypeTest extends \PHPUnit_Framework_TestCase
{
private $obj;
protected function setUp()
{
$this->obj = new MaximumBuyerPolicyViolationsType();
}
public function testCanBeCreated()
{
$this->assertInstanceOf('\DTS\eBaySDK\Trading\Types\MaximumBuyerPolicyViolationsType', $this->obj);
}
public function testExtendsBaseType()
{
$this->assertInstanceOf('\DTS\eBaySDK\Types\BaseType', $this->obj);
}
}
| spoilie/ebay-sdk-trading | test/DTS/eBaySDK/Trading/Types/MaximumBuyerPolicyViolationsTypeTest.php | PHP | apache-2.0 | 569 |
package org.synyx.urlaubsverwaltung.web;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
/**
* @author Aljona Murygina - murygina@synyx.de
*/
@Controller
public class BaseController {
private static final String ROOT_URL = "/";
@RequestMapping(value = ROOT_URL, method = RequestMethod.GET)
public String index() {
return "redirect:/web/overview";
}
}
| pongo710/urlaubsverwaltung | src/main/java/org/synyx/urlaubsverwaltung/web/BaseController.java | Java | apache-2.0 | 512 |
// classe Proposta
package cryptohelper.data;
import cryptohelper.service.QueryResult;
import cryptohelper.interfaces.HtmlVisitable;
import cryptohelper.interfaces.HtmlVisitorInterface;
import cryptohelper.service.DBController;
import java.sql.SQLException;
import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
public class Proposta implements HtmlVisitable {
private static Log log = LogFactory.getLog(Messaggio.class); //per log
private SistemaCifratura sdc;
private UserInfo proponente;
private UserInfo partner;
private String stato;
public Proposta(SistemaCifratura sdc, UserInfo proponente, UserInfo partner) {
this.sdc = sdc;
this.proponente = proponente;
this.partner = partner;
this.stato = "pending";
}
public Proposta(SistemaCifratura sdc, UserInfo proponente, UserInfo partner, String stato) {
this.sdc = sdc;
this.proponente = proponente;
this.partner = partner;
this.stato = stato;
}
public boolean salva() {
//se proponente e il partner hanno gia concordato un sistema di cifratura in precedenza non potranno piu farlo.
//SistemaCifratura esisteSdc = SistemaCifratura.load(proponente.getId(), partner.getId());
//System.out.println(esisteSdc.toString());
// if(esisteSdc != null)
// return false;
System.out.println(this.toString());
boolean result = false;
DBController dbc = DBController.getInstance();
String queryExists = "SELECT * "
+ " FROM SDCPARTNERS"
+ " WHERE ((ID_CREATORE = " + proponente.getId()
+ " AND ID_PARTNER = " + partner.getId()
+ ") OR (ID_CREATORE = " + partner.getId()
+ ") AND ID_PARTNER = " + proponente.getId()
+ ") AND STATO_PROPOSTA = 'accettata'";
QueryResult qr;
try {
qr = DBController.getInstance().executeQuery(queryExists);
System.out.println("QR*************" + qr.toString());
System.out.println("Size*************" + qr.getSize());
if (qr.getSize() > 0) {
return false;
}
} catch (SQLException ex) {
log.fatal(ex.getMessage());
}
String queryInsert = "INSERT INTO SDCPARTNERS(ID_CREATORE, ID_PARTNER,ID_SDC,STATO_PROPOSTA)"
+ "VALUES("
+ this.proponente.getId()
+ ","
+ this.partner.getId()
+ ","
+ this.sdc.getId()
+ ",'"
+ this.stato
+ "')";
String querryUpdate = "UPDATE SDCPARTNERS"
+ " SET STATO_PROPOSTA = '" + this.getStato()
+ "'"
+ " WHERE ID_CREATORE = "
+ this.getProponente().getId()
+ " AND ID_PARTNER = " + this.getPartner().getId()
+ " AND ID_SDC = " + this.getSdc().getId();
try {
if (this.stato.equals("pending")) {
result = dbc.executeUpdate(queryInsert);
System.out.println("INFO DATA:" + this.getClass() + "." + Thread.currentThread().getStackTrace()[1].getMethodName() + "Inserito: " + this.toString());
} else {
result = dbc.executeUpdate(querryUpdate);
}
} catch (SQLException ex) {
log.fatal(ex.getMessage());
}
return result;
}
/**
*
* @param stud Studente logato al sistema
* @return lista con proposte che hanno come stato pending
*/
public static ArrayList<Proposta> caricaProposteSistemiCifraturaPedding(UserInfo stud) {
String query = "SELECT * FROM SDCPARTNERS WHERE ID_PARTNER =" + stud.getId();
QueryResult qr = null;
ArrayList<Proposta> proposte = new ArrayList<>();
try {
qr = DBController.getInstance().executeQuery(query);
while (qr.next()) {
if (qr.getString("stato_proposta").equals("pending")) {
Proposta temp = new Proposta(SistemaCifratura.getSistemaCifratura(qr.getInt("ID_SDC")), UserInfo.getUserInfo(qr.getInt("ID_CREATORE")), UserInfo.getUserInfo(qr.getInt("ID_PARTNER")));
System.out.println("Proposta: " + temp.toString());
proposte.add(temp);
}
}
} catch (SQLException ex) {
log.fatal(ex.getMessage());
} catch (Exception ex) {
log.fatal(ex.getMessage());
}
return proposte;
}
/**
*
* @param stud Studente logato al sistema
* @return lista con proposte accettate di cifratura accettate
*/
public static ArrayList<Proposta> caricaProposteSistemiCifratura(UserInfo stud) {
String query = "SELECT *"
+ "FROM SDCPARTNERS "
+ "WHERE ID_PARTNER = " + stud.getId() + " OR ID_CREATORE = " + stud.getId() + "";
QueryResult qr = null;
ArrayList<Proposta> proposte = new ArrayList<>();
try {
qr = DBController.getInstance().executeQuery(query);
System.out.println(qr.toString());
while (qr.next()) {
Proposta temp = new Proposta(SistemaCifratura.getSistemaCifratura(qr.getInt("ID_SDC")), UserInfo.getUserInfo(qr.getInt("ID_CREATORE")), UserInfo.getUserInfo(qr.getInt("ID_PARTNER")), qr.getString("STATO_PROPOSTA"));
System.out.println("Proposta: " + temp.toString());
proposte.add(temp);
}
} catch (SQLException ex) {
log.fatal(ex.getMessage());
} catch (Exception ex) {
log.fatal(ex.getMessage());
}
return proposte;
}
//Elimina una proposta dalla tabella sdcpartners. Restituisce TRUE se l'oparazione va a buon fine
public boolean elimina() {
DBController dbc = DBController.getInstance();
boolean result = false;
String query = "DELETE FROM SDCPARTNERS WHERE ID_SDC =" + sdc.getId() + "AND ID_CREATORE = " + proponente.getId() + "AND ID_PARTNER =" + partner.getId() + "";
try {
result = dbc.executeUpdate(query);
} catch (SQLException ex) {
log.fatal(ex.getMessage());
}
return result;
}
@Override
public String toString() {
return "Proposta{" + "sdc=" + sdc.getId() + ", proponente=" + proponente.getId() + ", partner=" + partner.getId() + ", stato=" + stato + '}';
}
@Override
public void accept(HtmlVisitorInterface visitor) {
visitor.visit(this);
}
//METODI GETTER
public UserInfo getProponente() {
return proponente;
}
public UserInfo getPartner() {
return partner;
}
public SistemaCifratura getSdc() {
return sdc;
}
public String getStato() {
return stato;
}
//METODI SETTER
public void setProponente(UserInfo proponente) {
this.proponente = proponente;
}
public void setPartner(UserInfo partner) {
this.partner = partner;
}
public void setSdc(SistemaCifratura sdc) {
this.sdc = sdc;
}
public void setStato(String stato) {
this.stato = stato;
}
}
| sashapodgoreanu/CryptoHelper | src/cryptohelper/data/Proposta.java | Java | apache-2.0 | 7,595 |
package org.drools.example.api.defaultkiesession;
import org.kie.KieServices;
import org.kie.runtime.KieContainer;
import org.kie.runtime.KieSession;
import java.io.PrintStream;
public class DefaultKieSessionExample {
public void go(PrintStream out) {
KieServices ks = KieServices.Factory.get();
KieContainer kContainer = ks.getKieClasspathContainer();
KieSession kSession = kContainer.newKieSession( );
kSession.setGlobal( "out", out );
kSession.insert( new Message("Dave", "Hello, HAL. Do you read me, HAL?") );
kSession.fireAllRules();
}
public static void main( String[] args ) {
new DefaultKieSessionExample().go( System.out );
}
}
| yurloc/drools | drools-examples-api/default-kiesession/src/main/java/org/drools/example/api/defaultkiesession/DefaultKieSessionExample.java | Java | apache-2.0 | 741 |
/**
* Copyright 2013-2017 the original author or authors from the JHipster project.
*
* This file is part of the JHipster project, see http://www.jhipster.tech/
* for more information.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* eslint-disable consistent-return */
const chalk = require('chalk');
const _ = require('lodash');
const BaseGenerator = require('../generator-base');
const prompts = require('./prompts');
const writeAngularFiles = require('./files-angular').writeFiles;
const writeAngularJsFiles = require('./files-angularjs').writeFiles;
const writeReactFiles = require('./files-react').writeFiles;
const packagejs = require('../../package.json');
const constants = require('../generator-constants');
let useBlueprint;
module.exports = class extends BaseGenerator {
constructor(args, opts) {
super(args, opts);
this.configOptions = this.options.configOptions || {};
// This adds support for a `--protractor` flag
this.option('protractor', {
desc: 'Enable protractor tests',
type: Boolean,
defaults: false
});
// This adds support for a `--uaa-base-name` flag
this.option('uaa-base-name', {
desc: 'Provide the name of UAA server, when using --auth uaa',
type: String
});
// This adds support for a `--build` flag
this.option('build', {
desc: 'Provide build tool for the application',
type: String
});
// This adds support for a `--websocket` flag
this.option('websocket', {
desc: 'Provide websocket option for the application',
type: String
});
// This adds support for a `--auth` flag
this.option('auth', {
desc: 'Provide authentication type for the application',
type: String
});
// This adds support for a `--db` flag
this.option('db', {
desc: 'Provide DB name for the application',
type: String
});
// This adds support for a `--social` flag
this.option('social', {
desc: 'Provide development DB option for the application',
type: Boolean,
default: false
});
// This adds support for a `--search-engine` flag
this.option('search-engine', {
desc: 'Provide development DB option for the application',
type: String
});
// This adds support for a `--search-engine` flag
this.option('hb-cache', {
desc: 'Provide hibernate cache option for the application',
type: String
});
// This adds support for a `--jhi-prefix` flag
this.option('jhi-prefix', {
desc: 'Add prefix before services, controllers and states name',
type: String,
defaults: 'jhi'
});
// This adds support for a `--skip-user-management` flag
this.option('skip-user-management', {
desc: 'Skip the user management module during app generation',
type: Boolean,
defaults: false
});
// This adds support for a `--npm` flag
this.option('npm', {
desc: 'Use npm instead of yarn',
type: Boolean,
defaults: false
});
// This adds support for a `--experimental` flag which can be used to enable experimental features
this.option('experimental', {
desc: 'Enable experimental features. Please note that these features may be unstable and may undergo breaking changes at any time',
type: Boolean,
defaults: false
});
this.setupClientOptions(this);
const blueprint = this.options.blueprint || this.configOptions.blueprint || this.config.get('blueprint');
useBlueprint = this.composeBlueprint(blueprint, 'client'); // use global variable since getters dont have access to instance property
}
get initializing() {
if (useBlueprint) return;
return {
displayLogo() {
if (this.logo) {
this.printJHipsterLogo();
}
},
setupClientconsts() {
// Make constants available in templates
this.MAIN_SRC_DIR = constants.CLIENT_MAIN_SRC_DIR;
this.TEST_SRC_DIR = constants.CLIENT_TEST_SRC_DIR;
this.serverPort = this.config.get('serverPort') || this.configOptions.serverPort || 8080;
this.applicationType = this.config.get('applicationType') || this.configOptions.applicationType;
if (!this.applicationType) {
this.applicationType = 'monolith';
}
this.clientFramework = this.config.get('clientFramework');
if (!this.clientFramework) {
/* for backward compatibility */
this.clientFramework = 'angular1';
}
if (this.clientFramework === 'angular2') {
/* for backward compatibility */
this.clientFramework = 'angularX';
}
this.useSass = this.config.get('useSass');
this.enableTranslation = this.config.get('enableTranslation'); // this is enabled by default to avoid conflicts for existing applications
this.nativeLanguage = this.config.get('nativeLanguage');
this.languages = this.config.get('languages');
this.enableI18nRTL = this.isI18nRTLSupportNecessary(this.languages);
this.messageBroker = this.config.get('messageBroker');
this.packagejs = packagejs;
const baseName = this.config.get('baseName');
if (baseName) {
this.baseName = baseName;
}
const clientConfigFound = this.useSass !== undefined;
if (clientConfigFound) {
// If translation is not defined, it is enabled by default
if (this.enableTranslation === undefined) {
this.enableTranslation = true;
}
if (this.nativeLanguage === undefined) {
this.nativeLanguage = 'en';
}
if (this.languages === undefined) {
this.languages = ['en', 'fr'];
}
this.existingProject = true;
}
if (!this.clientPackageManager) {
if (this.useYarn) {
this.clientPackageManager = 'yarn';
} else {
this.clientPackageManager = 'npm';
}
}
},
validateSkipServer() {
if (this.skipServer && !(this.databaseType && this.devDatabaseType && this.prodDatabaseType && this.authenticationType)) {
this.error(`When using skip-server flag, you must pass a database option and authentication type using ${chalk.yellow('--db')} and ${chalk.yellow('--auth')} flags`);
}
}
};
}
get prompting() {
if (useBlueprint) return;
return {
askForModuleName: prompts.askForModuleName,
askForClient: prompts.askForClient,
askForClientSideOpts: prompts.askForClientSideOpts,
askFori18n: prompts.askFori18n,
setSharedConfigOptions() {
this.configOptions.clientFramework = this.clientFramework;
this.configOptions.useSass = this.useSass;
}
};
}
get configuring() {
if (useBlueprint) return;
return {
insight() {
const insight = this.insight();
insight.trackWithEvent('generator', 'client');
insight.track('app/clientFramework', this.clientFramework);
insight.track('app/useSass', this.useSass);
insight.track('app/enableTranslation', this.enableTranslation);
insight.track('app/nativeLanguage', this.nativeLanguage);
insight.track('app/languages', this.languages);
},
configureGlobal() {
// Application name modified, using each technology's conventions
this.camelizedBaseName = _.camelCase(this.baseName);
this.angularAppName = this.getAngularAppName();
this.angularXAppName = this.getAngularXAppName();
this.capitalizedBaseName = _.upperFirst(this.baseName);
this.dasherizedBaseName = _.kebabCase(this.baseName);
this.lowercaseBaseName = this.baseName.toLowerCase();
if (!this.nativeLanguage) {
// set to english when translation is set to false
this.nativeLanguage = 'en';
}
},
saveConfig() {
this.config.set('jhipsterVersion', packagejs.version);
this.config.set('baseName', this.baseName);
this.config.set('clientFramework', this.clientFramework);
this.config.set('useSass', this.useSass);
this.config.set('enableTranslation', this.enableTranslation);
if (this.enableTranslation && !this.configOptions.skipI18nQuestion) {
this.config.set('nativeLanguage', this.nativeLanguage);
this.config.set('languages', this.languages);
}
this.config.set('clientPackageManager', this.clientPackageManager);
if (this.skipServer) {
this.authenticationType && this.config.set('authenticationType', this.authenticationType);
this.uaaBaseName && this.config.set('uaaBaseName', this.uaaBaseName);
this.hibernateCache && this.config.set('hibernateCache', this.hibernateCache);
this.websocket && this.config.set('websocket', this.websocket);
this.databaseType && this.config.set('databaseType', this.databaseType);
this.devDatabaseType && this.config.set('devDatabaseType', this.devDatabaseType);
this.prodDatabaseType && this.config.set('prodDatabaseType', this.prodDatabaseType);
this.searchEngine && this.config.set('searchEngine', this.searchEngine);
this.buildTool && this.config.set('buildTool', this.buildTool);
}
}
};
}
get default() {
if (useBlueprint) return;
return {
getSharedConfigOptions() {
if (this.configOptions.hibernateCache) {
this.hibernateCache = this.configOptions.hibernateCache;
}
if (this.configOptions.websocket !== undefined) {
this.websocket = this.configOptions.websocket;
}
if (this.configOptions.clientFramework) {
this.clientFramework = this.configOptions.clientFramework;
}
if (this.configOptions.databaseType) {
this.databaseType = this.configOptions.databaseType;
}
if (this.configOptions.devDatabaseType) {
this.devDatabaseType = this.configOptions.devDatabaseType;
}
if (this.configOptions.prodDatabaseType) {
this.prodDatabaseType = this.configOptions.prodDatabaseType;
}
if (this.configOptions.messageBroker !== undefined) {
this.messageBroker = this.configOptions.messageBroker;
}
if (this.configOptions.searchEngine !== undefined) {
this.searchEngine = this.configOptions.searchEngine;
}
if (this.configOptions.buildTool) {
this.buildTool = this.configOptions.buildTool;
}
if (this.configOptions.enableSocialSignIn !== undefined) {
this.enableSocialSignIn = this.configOptions.enableSocialSignIn;
}
if (this.configOptions.authenticationType) {
this.authenticationType = this.configOptions.authenticationType;
}
if (this.configOptions.otherModules) {
this.otherModules = this.configOptions.otherModules;
}
if (this.configOptions.testFrameworks) {
this.testFrameworks = this.configOptions.testFrameworks;
}
this.protractorTests = this.testFrameworks.includes('protractor');
if (this.configOptions.enableTranslation !== undefined) {
this.enableTranslation = this.configOptions.enableTranslation;
}
if (this.configOptions.nativeLanguage !== undefined) {
this.nativeLanguage = this.configOptions.nativeLanguage;
}
if (this.configOptions.languages !== undefined) {
this.languages = this.configOptions.languages;
this.enableI18nRTL = this.isI18nRTLSupportNecessary(this.languages);
}
if (this.configOptions.uaaBaseName !== undefined) {
this.uaaBaseName = this.configOptions.uaaBaseName;
}
// Make dist dir available in templates
if (this.configOptions.buildTool === 'maven') {
this.BUILD_DIR = 'target/';
} else {
this.BUILD_DIR = 'build/';
}
this.styleSheetExt = this.useSass ? 'scss' : 'css';
this.pkType = this.getPkType(this.databaseType);
this.apiUrlPrefix = `${this.authenticationType === 'uaa' ? `'${this.uaaBaseName.toLowerCase()}/` : 'SERVER_API_URL + \''}`;
this.apiUaaUrlPrefix = `${this.authenticationType === 'uaa' ? `${this.uaaBaseName.toLowerCase()}/` : ''}`;
this.apiServerUrlPrefix = `${this.authenticationType !== 'uaa' ? 'SERVER_API_URL + \'' : '\''}`;
this.DIST_DIR = this.BUILD_DIR + constants.CLIENT_DIST_DIR;
},
composeLanguages() {
if (this.configOptions.skipI18nQuestion) return;
this.composeLanguagesSub(this, this.configOptions, 'client');
}
};
}
writing() {
if (useBlueprint) return;
switch (this.clientFramework) {
case 'angular1':
return writeAngularJsFiles.call(this);
case 'react':
return writeReactFiles.call(this);
default:
return writeAngularFiles.call(this);
}
}
install() {
if (useBlueprint) return;
let logMsg =
`To install your dependencies manually, run: ${chalk.yellow.bold(`${this.clientPackageManager} install`)}`;
if (this.clientFramework === 'angular1') {
logMsg =
`To install your dependencies manually, run: ${chalk.yellow.bold(`${this.clientPackageManager} install & bower install`)}`;
}
const installConfig = {
bower: this.clientFramework === 'angular1',
npm: this.clientPackageManager !== 'yarn',
yarn: this.clientPackageManager === 'yarn'
};
if (this.options['skip-install']) {
this.log(logMsg);
} else {
this.installDependencies(installConfig).then(
() => {
if (this.clientFramework === 'angular1') {
this.spawnCommandSync('gulp', ['install']);
} else {
this.spawnCommandSync(this.clientPackageManager, ['run', 'webpack:build']);
}
},
(err) => {
this.warning('Install of dependencies failed!');
this.log(logMsg);
}
);
}
}
end() {
if (useBlueprint) return;
this.log(chalk.green.bold('\nClient application generated successfully.\n'));
let logMsg =
`Start your Webpack development server with:\n ${chalk.yellow.bold(`${this.clientPackageManager} start`)}\n`;
if (this.clientFramework === 'angular1') {
logMsg =
'Inject your front end dependencies into your source code:\n' +
` ${chalk.yellow.bold('gulp inject')}\n\n` +
'Generate the AngularJS constants:\n' +
` ${chalk.yellow.bold('gulp ngconstant:dev')}` +
`${this.useSass ? '\n\nCompile your Sass style sheets:\n\n' +
`${chalk.yellow.bold('gulp sass')}` : ''}\n\n` +
'Or do all of the above:\n' +
` ${chalk.yellow.bold('gulp install')}\n`;
}
this.log(chalk.green(logMsg));
}
};
| deepu105/generator-jhipster | generators/client/index.js | JavaScript | apache-2.0 | 17,806 |
package com.suscipio_solutions.consecro_mud.Locales;
import java.util.List;
import com.suscipio_solutions.consecro_mud.Common.interfaces.PhyStats;
import com.suscipio_solutions.consecro_mud.Locales.interfaces.Room;
import com.suscipio_solutions.consecro_mud.core.interfaces.Places;
public class CaveSurface extends ClimbableSurface
{
@Override public String ID(){return "CaveSurface";}
public CaveSurface()
{
super();
basePhyStats().setDisposition(basePhyStats().disposition()|PhyStats.IS_DARK);
basePhyStats.setWeight(4);
recoverPhyStats();
climask=Places.CLIMASK_NORMAL;
}
@Override public int domainType(){return Room.DOMAIN_INDOORS_CAVE;}
@Override public int maxRange(){return 5;}
@Override public List<Integer> resourceChoices(){return CaveRoom.roomResources;}
}
| ConsecroMUD/ConsecroMUD | com/suscipio_solutions/consecro_mud/Locales/CaveSurface.java | Java | apache-2.0 | 790 |
/*
Copyright 2016 Anderson Dorow
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jcurry.util.function;
import java.util.Objects;
import java.util.function.DoubleFunction;
import java.util.function.ToDoubleBiFunction;
import java.util.function.ToDoubleFunction;
import java.util.function.ToIntFunction;
public interface CurryingDoubleFunction<R> extends DoubleFunction<R> {
default CurryingSupplier<R> curry(double d) {
return () -> this.apply(d);
}
default <V> CurryingFunction<V, R> compose(ToDoubleFunction<? super V> before) {
Objects.requireNonNull(before);
return (v) -> this.apply(before.applyAsDouble(v));
}
default <V, U> CurryingBiFunction<V, U, R> compose(ToDoubleBiFunction<? super V, ? super U> before) {
Objects.requireNonNull(before);
return (v, u) -> this.apply(before.applyAsDouble(v, u));
}
}
| adorow/jcurry | src/main/java/jcurry/util/function/CurryingDoubleFunction.java | Java | apache-2.0 | 1,399 |
/*
* Copyright 2014 Philip Cronje
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package net.za.slyfox.minecraft;
import java.io.IOException;
import java.util.List;
/**
* Mojang Profile API client interface. Implementations of this interface are synchronous.
*/
public interface ProfileClient {
/**
* Retrieves profile information for each requested profile name.
*
* <p>Calling this method may cause multiple API requests to be issued to circumvent the
* per-request profile limit.</p>
*
* @param profileNames the list of profile names to map to UUIDs
* @return a list of profile data structures. Invalid or non-existent profile names will not have a corresponding
* entry in this list
* @throws IOException when an I/O error occurs
* @throws RuntimeException when an error occurs that the implementation cannot meaningfully map to an {@code
* IOException}
*/
List<Profile> retrieveProfilesForNames(String... profileNames) throws IOException;
/**
* Retrieves profile information associated with the given UUID
*
* @param uuid the UUID to retrieve profile information for
* @return profile information for the given UUID. If an empty response is received, {@code
* null} will be returned
* @throws IOException when an I/O error occurs
* @throws RateLimitedException when the remote service indicates that the client has been rate limited
*/
SessionProfile retrieveProfileForUuid(String uuid) throws IOException, RateLimitedException;
}
| slyfoxza/minecraft | profile/src/main/java/net/za/slyfox/minecraft/ProfileClient.java | Java | apache-2.0 | 2,020 |
namespace MassTransit.Tests.MessageData
{
using System;
using System.IO;
using System.Linq;
using System.Threading.Tasks;
using MassTransit.MessageData;
using NUnit.Framework;
[TestFixture]
public class Storing_message_data_on_the_file_system
{
IMessageDataRepository _repository;
[Test]
public async Task Should_generate_the_folder_and_file()
{
var property = await _repository.PutString("Hello, World.");
Console.WriteLine(property.Address);
Console.WriteLine("Path: {0}", Path.Combine(property.Address.Segments.SelectMany(x => x.Split(new []{':'})).ToArray()));
}
[Test]
public async Task Should_generate_time_based_folder()
{
var property = await _repository.PutString("Hello, World.", TimeSpan.FromDays(30));
var loaded = await _repository.GetString(property.Address);
Console.WriteLine(await loaded.Value);
}
[OneTimeSetUp]
public void Setup()
{
var baseDirectory = AppDomain.CurrentDomain.BaseDirectory;
var messageDataPath = Path.Combine(baseDirectory, "MessageData");
var dataDirectory = new DirectoryInfo(messageDataPath);
Console.WriteLine("Using data directory: {0}", dataDirectory);
_repository = new FileSystemMessageDataRepository(dataDirectory);
}
}
}
| drusellers/MassTransit | src/MassTransit.Tests/MessageData/FileSystem_Specs.cs | C# | apache-2.0 | 1,510 |
package swingbootstrap;
import java.awt.Color;
import java.awt.Cursor;
import java.awt.Insets;
import java.awt.event.MouseEvent;
import java.awt.event.MouseListener;
import javax.swing.BorderFactory;
import javax.swing.Icon;
import javax.swing.JButton;
import javax.swing.JLabel;
import javax.swing.JPanel;
import swingbootstrap.ui.NavlinkUI;
public class Navlink extends JButton {
public static final int ALIGN_LEFT = 0;
public static final int ALIGN_RIGHT = 1;
private Navbar navbar;
private boolean isLeftNode = false;
private int navigationAlignment = ALIGN_LEFT;
private JLabel titleLabel;
private JPanel containerPanel;
private JPanel linkPanel;
public Navbar getNavbar() {
return navbar;
}
public void setNavbar(Navbar navbar) {
this.navbar = navbar;
}
public int getNavigationAlignment() {
return navigationAlignment;
}
public void setNavigationAlignment(int alignment) {
this.navigationAlignment = alignment;
}
public boolean isLeftNode() {
return isLeftNode;
}
public void setLeftNode(boolean isLeftNode) {
this.isLeftNode = isLeftNode;
}
public JPanel getContainerPanel() {
return containerPanel;
}
public void setContainerPanel(JPanel containerPanel) {
this.containerPanel = containerPanel;
}
public JPanel getLinkPanel() {
return linkPanel;
}
public void setLinkPanel(JPanel linkPanel) {
this.linkPanel = linkPanel;
}
public Navlink(String title) {
this(title, null, null, null);
}
public Navlink(Icon icon) {
this(null, icon, null, null);
}
public Navlink(String title, JPanel containerPanel, JPanel linkPanel) {
this(title, null, containerPanel, linkPanel);
}
public Navlink(Icon icon, JPanel containerPanel, JPanel linkPanel) {
this(null, icon, containerPanel, linkPanel);
}
public Navlink(String title, Icon icon, JPanel containerPanel, JPanel linkPanel) {
super(title, icon);
this.containerPanel = containerPanel;
this.linkPanel = linkPanel;
setOpaque(false);
setBackground(Color.decode("#F6F6F6"));
setFont(getFont().deriveFont(16f));
setBorder(BorderFactory.createEmptyBorder());
setMargin(new Insets(0, 8, 0, 8));
NavlinkUI navUI = new NavlinkUI();
setUI(navUI);
// Change text color to darker color and change cursor to hand cursor
// when hover.
addMouseListener(new MouseListener() {
private Color colorSave;
private int cursorSave;
public void mouseEntered(MouseEvent e) {
colorSave = Navlink.this.getForeground();
cursorSave = Navlink.this.getCursor().getType();
Navlink.this.setCursor(Cursor.getPredefinedCursor(Cursor.HAND_CURSOR));
Navlink.this.setForeground(Navlink.this.getForeground().darker());
}
public void mouseExited(MouseEvent e) {
Navlink.this.setForeground(colorSave);
Navlink.this.setCursor(Cursor.getPredefinedCursor(cursorSave));
}
public void mouseReleased(MouseEvent e) {
// TODO Auto-generated method stub
}
public void mousePressed(MouseEvent e) {
route();
}
public void mouseClicked(MouseEvent e) {
}
});
}
@Override
public void setForeground(Color fg) {
super.setForeground(fg);
if (this.titleLabel != null) {
this.titleLabel.setForeground(fg);
}
}
public void route() {
if (containerPanel != null && linkPanel != null) {
containerPanel.removeAll();
containerPanel.add(linkPanel, "grow");
containerPanel.revalidate();
containerPanel.repaint();
}
}
}
| qikh/swingbootstrap | swingbootstrap-core/src/main/java/swingbootstrap/Navlink.java | Java | apache-2.0 | 3,437 |
"""Lists clusters."""
from baseCmd import *
from baseResponse import *
class listClustersCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "false"
"""lists clusters by allocation state"""
self.allocationstate = None
self.typeInfo['allocationstate'] = 'string'
"""lists clusters by cluster type"""
self.clustertype = None
self.typeInfo['clustertype'] = 'string'
"""lists clusters by hypervisor type"""
self.hypervisor = None
self.typeInfo['hypervisor'] = 'string'
"""lists clusters by the cluster ID"""
self.id = None
self.typeInfo['id'] = 'uuid'
"""List by keyword"""
self.keyword = None
self.typeInfo['keyword'] = 'string'
"""whether this cluster is managed by cloudstack"""
self.managedstate = None
self.typeInfo['managedstate'] = 'string'
"""lists clusters by the cluster name"""
self.name = None
self.typeInfo['name'] = 'string'
""""""
self.page = None
self.typeInfo['page'] = 'integer'
""""""
self.pagesize = None
self.typeInfo['pagesize'] = 'integer'
"""lists clusters by Pod ID"""
self.podid = None
self.typeInfo['podid'] = 'uuid'
"""flag to display the capacity of the clusters"""
self.showcapacities = None
self.typeInfo['showcapacities'] = 'boolean'
"""lists clusters by Zone ID"""
self.zoneid = None
self.typeInfo['zoneid'] = 'uuid'
self.required = []
class listClustersResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the cluster ID"""
self.id = None
self.typeInfo['id'] = 'string'
"""the allocation state of the cluster"""
self.allocationstate = None
self.typeInfo['allocationstate'] = 'string'
"""the type of the cluster"""
self.clustertype = None
self.typeInfo['clustertype'] = 'string'
"""The cpu overcommit ratio of the cluster"""
self.cpuovercommitratio = None
self.typeInfo['cpuovercommitratio'] = 'string'
"""the hypervisor type of the cluster"""
self.hypervisortype = None
self.typeInfo['hypervisortype'] = 'string'
"""whether this cluster is managed by cloudstack"""
self.managedstate = None
self.typeInfo['managedstate'] = 'string'
"""The memory overcommit ratio of the cluster"""
self.memoryovercommitratio = None
self.typeInfo['memoryovercommitratio'] = 'string'
"""the cluster name"""
self.name = None
self.typeInfo['name'] = 'string'
"""the Pod ID of the cluster"""
self.podid = None
self.typeInfo['podid'] = 'string'
"""the Pod name of the cluster"""
self.podname = None
self.typeInfo['podname'] = 'string'
"""the Zone ID of the cluster"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""the Zone name of the cluster"""
self.zonename = None
self.typeInfo['zonename'] = 'string'
"""the capacity of the Cluster"""
self.capacity = []
class capacity:
def __init__(self):
""""the total capacity available"""
self.capacitytotal = None
""""the capacity currently in use"""
self.capacityused = None
""""the Cluster ID"""
self.clusterid = None
""""the Cluster name"""
self.clustername = None
""""the percentage of capacity currently in use"""
self.percentused = None
""""the Pod ID"""
self.podid = None
""""the Pod name"""
self.podname = None
""""the capacity type"""
self.type = None
""""the Zone ID"""
self.zoneid = None
""""the Zone name"""
self.zonename = None
| MissionCriticalCloud/marvin | marvin/cloudstackAPI/listClusters.py | Python | apache-2.0 | 3,916 |
/*
* Copyright 2015 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.handler.codec.http.cookie;
import static io.netty.handler.codec.http.cookie.CookieUtil.*;
import static io.netty.util.internal.ObjectUtil.checkNotNull;
/**
* The default {@link Cookie} implementation.
*/
public class DefaultCookie implements Cookie {
private final String name;
private String value;
private boolean wrap;
private String domain;
private String path;
private long maxAge = Long.MIN_VALUE;
private boolean secure;
private boolean httpOnly;
/**
* Creates a new cookie with the specified name and value.
*/
public DefaultCookie(String name, String value) {
name = checkNotNull(name, "name").trim();
if (name.isEmpty()) {
throw new IllegalArgumentException("empty name");
}
this.name = name;
setValue(value);
}
@Override
public String name() {
return name;
}
@Override
public String value() {
return value;
}
@Override
public void setValue(String value) {
this.value = checkNotNull(value, "value");
}
@Override
public boolean wrap() {
return wrap;
}
@Override
public void setWrap(boolean wrap) {
this.wrap = wrap;
}
@Override
public String domain() {
return domain;
}
@Override
public void setDomain(String domain) {
this.domain = validateAttributeValue("domain", domain);
}
@Override
public String path() {
return path;
}
@Override
public void setPath(String path) {
this.path = validateAttributeValue("path", path);
}
@Override
public long maxAge() {
return maxAge;
}
@Override
public void setMaxAge(long maxAge) {
this.maxAge = maxAge;
}
@Override
public boolean isSecure() {
return secure;
}
@Override
public void setSecure(boolean secure) {
this.secure = secure;
}
@Override
public boolean isHttpOnly() {
return httpOnly;
}
@Override
public void setHttpOnly(boolean httpOnly) {
this.httpOnly = httpOnly;
}
@Override
public int hashCode() {
return name().hashCode();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof Cookie)) {
return false;
}
Cookie that = (Cookie) o;
if (!name().equals(that.name())) {
return false;
}
if (path() == null) {
if (that.path() != null) {
return false;
}
} else if (that.path() == null) {
return false;
} else if (!path().equals(that.path())) {
return false;
}
if (domain() == null) {
if (that.domain() != null) {
return false;
}
} else if (that.domain() == null) {
return false;
} else {
return domain().equalsIgnoreCase(that.domain());
}
return true;
}
@Override
public int compareTo(Cookie c) {
int v = name().compareTo(c.name());
if (v != 0) {
return v;
}
if (path() == null) {
if (c.path() != null) {
return -1;
}
} else if (c.path() == null) {
return 1;
} else {
v = path().compareTo(c.path());
if (v != 0) {
return v;
}
}
if (domain() == null) {
if (c.domain() != null) {
return -1;
}
} else if (c.domain() == null) {
return 1;
} else {
v = domain().compareToIgnoreCase(c.domain());
return v;
}
return 0;
}
/**
* Validate a cookie attribute value, throws a {@link IllegalArgumentException} otherwise.
* Only intended to be used by {@link io.netty.handler.codec.http.DefaultCookie}.
* @param name attribute name
* @param value attribute value
* @return the trimmed, validated attribute value
* @deprecated CookieUtil is package private, will be removed once old Cookie API is dropped
*/
@Deprecated
protected String validateValue(String name, String value) {
return validateAttributeValue(name, value);
}
@Override
public String toString() {
StringBuilder buf = stringBuilder()
.append(name())
.append('=')
.append(value());
if (domain() != null) {
buf.append(", domain=")
.append(domain());
}
if (path() != null) {
buf.append(", path=")
.append(path());
}
if (maxAge() >= 0) {
buf.append(", maxAge=")
.append(maxAge())
.append('s');
}
if (isSecure()) {
buf.append(", secure");
}
if (isHttpOnly()) {
buf.append(", HTTPOnly");
}
return buf.toString();
}
}
| ichaki5748/netty | codec-http/src/main/java/io/netty/handler/codec/http/cookie/DefaultCookie.java | Java | apache-2.0 | 5,793 |
package com.vmware.vim25;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlType;
/**
* <p>Java class for anonymous complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType>
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element name="returnval" type="{urn:vim25}HttpNfcLeaseManifestEntry" maxOccurs="unbounded" minOccurs="0"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "", propOrder = {
"returnval"
})
@XmlRootElement(name = "HttpNfcLeaseGetManifestResponse")
public class HttpNfcLeaseGetManifestResponse {
protected List<HttpNfcLeaseManifestEntry> returnval;
/**
* Gets the value of the returnval property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the returnval property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getReturnval().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link HttpNfcLeaseManifestEntry }
*
*
*/
public List<HttpNfcLeaseManifestEntry> getReturnval() {
if (returnval == null) {
returnval = new ArrayList<HttpNfcLeaseManifestEntry>();
}
return this.returnval;
}
}
| jdgwartney/vsphere-ws | java/JAXWS/samples/com/vmware/vim25/HttpNfcLeaseGetManifestResponse.java | Java | apache-2.0 | 1,999 |
package com.planet_ink.coffee_mud.WebMacros;
import com.planet_ink.coffee_web.interfaces.*;
import com.planet_ink.coffee_mud.core.interfaces.*;
import com.planet_ink.coffee_mud.core.*;
import com.planet_ink.coffee_mud.core.CMath.CompiledOperation;
import com.planet_ink.coffee_mud.core.collections.*;
import com.planet_ink.coffee_mud.Abilities.interfaces.*;
import com.planet_ink.coffee_mud.Areas.interfaces.*;
import com.planet_ink.coffee_mud.Behaviors.interfaces.*;
import com.planet_ink.coffee_mud.CharClasses.interfaces.*;
import com.planet_ink.coffee_mud.Libraries.interfaces.*;
import com.planet_ink.coffee_mud.Common.interfaces.*;
import com.planet_ink.coffee_mud.Exits.interfaces.*;
import com.planet_ink.coffee_mud.Items.interfaces.*;
import com.planet_ink.coffee_mud.Locales.interfaces.*;
import com.planet_ink.coffee_mud.MOBS.interfaces.*;
import com.planet_ink.coffee_mud.Races.interfaces.*;
import java.util.*;
/*
Copyright 2004-2016 Bo Zimmerman
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
public class StatRejuvCharts extends StdWebMacro
{
@Override public String name() {return "StatRejuvCharts";}
protected String getReq(HTTPRequest httpReq, String tag)
{
String s=httpReq.getUrlParameter(tag);
if(s==null)
s="";
return s;
}
public final int avgMath(final int stat, final int level, final int add, final LinkedList<CompiledOperation> formula)
{
final double[] variables={
level,
stat,
(double)stat+7,
stat,
(double)stat+7,
stat,
(double)stat+7,
stat,
stat
};
return add+(level*(int)Math.round(CMath.parseMathExpression(formula, variables, 0.0)));
}
@Override
public String runMacro(HTTPRequest httpReq, String parm, HTTPResponse httpResp)
{
final StringBuffer buf=new StringBuffer("");
final String which=httpReq.getUrlParameter("WHICH");
final MOB mob=CMClass.getMOB("StdMOB");
mob.baseState().setMana(100);
mob.baseState().setMovement(100);
mob.baseState().setHitPoints(100);
mob.recoverMaxState();
mob.resetToMaxState();
mob.curState().setHunger(1000);
mob.curState().setThirst(1000);
if((which!=null)&&(which.equals("HP")))
buf.append("<BR>Chart: Hit Points<BR>");
else
if((which!=null)&&(which.equals("MN")))
buf.append("<BR>Chart: Mana<BR>");
else
if((which!=null)&&(which.equals("MV")))
buf.append("<BR>Chart: Movement<BR>");
else
buf.append("<BR>Chart: Hit Points<BR>");
buf.append("Flags: ");
int disposition=0;
if((getReq(httpReq,"SITTING").length()>0))
{ disposition=PhyStats.IS_SITTING; buf.append("Sitting ");}
if((getReq(httpReq,"SLEEPING").length()>0))
{ disposition=PhyStats.IS_SLEEPING; buf.append("Sleeping ");}
if((getReq(httpReq,"FLYING").length()>0))
{ disposition=PhyStats.IS_FLYING; buf.append("Flying ");}
if((getReq(httpReq,"SWIMMING").length()>0))
{ disposition=PhyStats.IS_SWIMMING; buf.append("Swimming ");}
if((getReq(httpReq,"RIDING").length()>0))
{ mob.setRiding((Rideable)CMClass.getMOB("GenRideable")); buf.append("Riding ");}
final boolean hungry=(httpReq.getUrlParameter("HUNGRY")!=null)&&(httpReq.getUrlParameter("HUNGRY").length()>0);
if(hungry){ buf.append("Hungry "); mob.curState().setHunger(0);}
final boolean thirsty=(httpReq.getUrlParameter("THIRSTY")!=null)&&(httpReq.getUrlParameter("THIRSTY").length()>0);
if(thirsty){ buf.append("Thirsty "); mob.curState().setThirst(0);}
mob.basePhyStats().setDisposition(disposition);
mob.recoverPhyStats();
final int MAX_STAT=25;
final int MAX_LEVEL=90;
final int SKIP_STAT=2;
final int SKIP_LEVEL=5;
final int[][] hitpointcharts=new int[MAX_LEVEL+1][MAX_STAT+1];
final int[][] manacharts=new int[MAX_LEVEL+1][MAX_STAT+1];
final int[][] movementcharts=new int[MAX_LEVEL+1][MAX_STAT+1];
final int sh=CMProps.getIntVar(CMProps.Int.STARTHP);
final int sm=CMProps.getIntVar(CMProps.Int.STARTMANA);
final int sv=CMProps.getIntVar(CMProps.Int.STARTMOVE);
final Map<CharClass,LinkedList<CompiledOperation>> hpformulas=new Hashtable<CharClass,LinkedList<CompiledOperation>>();
final Map<CharClass,LinkedList<CompiledOperation>> mnformulas=new Hashtable<CharClass,LinkedList<CompiledOperation>>();
final Map<CharClass,LinkedList<CompiledOperation>> mvformulas=new Hashtable<CharClass,LinkedList<CompiledOperation>>();
for(final Enumeration<CharClass> c=CMClass.charClasses();c.hasMoreElements();)
{
final CharClass C1=c.nextElement();
hpformulas.put(C1, CMath.compileMathExpression(C1.getHitPointsFormula()));
mnformulas.put(C1, CMath.compileMathExpression(C1.getManaFormula()));
mvformulas.put(C1, CMath.compileMathExpression(C1.getMovementFormula()));
}
for(int l=1;l<=MAX_LEVEL;l+=SKIP_LEVEL)
for(int s=4;s<=MAX_STAT;s+=SKIP_STAT)
{
int num=0;
for(final Enumeration<CharClass> c=CMClass.charClasses();c.hasMoreElements();)
{
final CharClass C1=c.nextElement();
num++;
hitpointcharts[l][s]+=avgMath(s,l,sh,hpformulas.get(C1));
manacharts[l][s]+=avgMath(s,l,sm,mnformulas.get(C1));
movementcharts[l][s]+=avgMath(s,l,sv,mvformulas.get(C1));
}
hitpointcharts[l][s]/=num;
manacharts[l][s]/=num;
movementcharts[l][s]/=num;
}
buf.append("<P><TABLE WIDTH=100% BORDER=1>");
buf.append("<TR><TD><B><FONT COLOR=WHITE>STATS:</FONT></B></TD>");
for(int stats=4;stats<=MAX_STAT;stats+=SKIP_STAT)
buf.append("<TD><B><FONT COLOR=WHITE>"+stats+"</FONT></B></TD>");
buf.append("</TR>");
LinkedList<CMath.CompiledOperation> stateHitPointRecoverFormula = null;
LinkedList<CMath.CompiledOperation> stateManaRecoverFormula = null;
LinkedList<CMath.CompiledOperation> stateMovesRecoverFormula = null;
stateHitPointRecoverFormula = CMath.compileMathExpression(CMProps.getVar(CMProps.Str.FORMULA_HITPOINTRECOVER));
stateManaRecoverFormula = CMath.compileMathExpression(CMProps.getVar(CMProps.Str.FORMULA_MANARECOVER));
stateMovesRecoverFormula = CMath.compileMathExpression(CMProps.getVar(CMProps.Str.FORMULA_MOVESRECOVER));
for(int level=1;level<=MAX_LEVEL;level+=SKIP_LEVEL)
{
buf.append("<TR>");
buf.append("<TD><B><FONT COLOR=WHITE>LVL "+level+"</FONT></B></TD>");
for(int stats=4;stats<=MAX_STAT;stats+=SKIP_STAT)
{
for(final int c: CharStats.CODES.BASECODES())
mob.baseCharStats().setStat(c,stats);
mob.recoverCharStats();
mob.basePhyStats().setLevel(level);
mob.recoverPhyStats();
mob.curState().setMana(0);
mob.curState().setMovement(0);
mob.curState().setHitPoints(0);
final CharStats charStats=mob.charStats();
final CharState curState=mob.curState();
final boolean isSleeping=(CMLib.flags().isSleeping(mob));
final boolean isSittingOrRiding=(!isSleeping) && ((CMLib.flags().isSitting(mob))||(mob.riding()!=null));
final boolean isFlying=(!isSleeping) && (!isSittingOrRiding) && CMLib.flags().isFlying(mob);
final boolean isSwimming=(!isSleeping) && (!isSittingOrRiding) && (!isFlying) && CMLib.flags().isSwimming(mob);
final double[] vals=new double[]{
charStats.getStat(CharStats.STAT_CONSTITUTION),
mob.phyStats().level(),
(curState.getHunger()<1)?1.0:0.0,
(curState.getThirst()<1)?1.0:0.0,
(curState.getFatigue()>CharState.FATIGUED_MILLIS)?1.0:0.0,
isSleeping?1.0:0.0,
isSittingOrRiding?1.0:0.0,
isFlying?1.0:0.0,
isSwimming?1.0:0.0
};
if((which!=null)&&(which.equals("HP")))
{
final long hpGain = Math.round(CMath.parseMathExpression(stateHitPointRecoverFormula, vals, 0.0));
buf.append("<TD><FONT COLOR=CYAN>"+hitpointcharts[level][stats]+"/"+hpGain+"="+(hitpointcharts[level][stats]/hpGain)+"</FONT></TD>");
}
else
if((which!=null)&&(which.equals("MN")))
{
vals[0]=((charStats.getStat(CharStats.STAT_INTELLIGENCE)+charStats.getStat(CharStats.STAT_WISDOM)));
final long manaGain = Math.round(CMath.parseMathExpression(stateManaRecoverFormula, vals, 0.0));
buf.append("<TD><FONT COLOR=PINK>"+manacharts[level][stats]+"/"+manaGain+"="+(manacharts[level][stats]/manaGain)+"</FONT></TD>");
}
else
if((which!=null)&&(which.equals("MV")))
{
vals[0]=charStats.getStat(CharStats.STAT_STRENGTH);
final long moveGain = Math.round(CMath.parseMathExpression(stateMovesRecoverFormula, vals, 0.0));
buf.append("<TD><FONT COLOR=YELLOW>"+movementcharts[level][stats]+"/"+moveGain+"="+(movementcharts[level][stats]/moveGain)+"</FONT></TD>");
}
else
{
final long hpGain = Math.round(CMath.parseMathExpression(stateHitPointRecoverFormula, vals, 0.0));
buf.append("<TD><FONT COLOR=CYAN>"+hitpointcharts[level][stats]+"/"+hpGain+"="+(hitpointcharts[level][stats]/hpGain)+"</FONT></TD>");
}
}
buf.append("</TR>");
}
mob.destroy();
buf.append("</TABLE>");
return clearWebMacros(buf);
}
}
| oriontribunal/CoffeeMud | com/planet_ink/coffee_mud/WebMacros/StatRejuvCharts.java | Java | apache-2.0 | 9,555 |
package com.unidev.polydata.changes.tags;
import static com.unidev.polydata.MongodbStorage.COUNT_KEY;
import com.mongodb.client.MongoCollection;
import com.unidev.changesexecutor.model.ChangeContext;
import com.unidev.polydata.changes.MongodbChange;
import org.bson.Document;
/**
* Change for adding index on tag count field for poly tags.
*/
public class TagsCountIndex extends MongodbChange {
public TagsCountIndex() {
super(100L, "tag-count-index");
}
@Override
public void execute(ChangeContext changeContext) {
MongoCollection<Document> collection = (MongoCollection<Document>) changeContext
.get(TagsCountIndex.COLLECTION_KEY);
Document index = new Document();
index.put(COUNT_KEY, 1);
collection.createIndex(index);
}
}
| unidev-polydata/polydata-storage-mongodb | polydata-storage-mongodb-core/src/main/java/com/unidev/polydata/changes/tags/TagsCountIndex.java | Java | apache-2.0 | 807 |
/**
* @license Apache-2.0
*
* Copyright (c) 2018 The Stdlib Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
// MODULES //
var isString = require( '@stdlib/assert/is-string' ).isPrimitive;
var isNonNegativeInteger = require( '@stdlib/assert/is-nonnegative-integer' ).isPrimitive;
var isCollection = require( '@stdlib/assert/is-collection' );
var isTypedArrayLike = require( '@stdlib/assert/is-typed-array-like' );
var isArrayBuffer = require( '@stdlib/assert/is-arraybuffer' );
var isComplex64Array = require( '@stdlib/assert/is-complex64array' );
var isComplex128Array = require( '@stdlib/assert/is-complex128array' );
var setReadOnly = require( '@stdlib/utils/define-nonenumerable-read-only-property' );
var setReadOnlyAccessor = require( '@stdlib/utils/define-nonenumerable-read-only-accessor' );
var ctors = require( '@stdlib/array/typed-ctors' );
var reinterpret64 = require( '@stdlib/strided/base/reinterpret-complex64' );
var reinterpret128 = require( '@stdlib/strided/base/reinterpret-complex128' );
var arraylike2object = require( '@stdlib/array/base/arraylike2object' );
var copy = require( '@stdlib/utils/copy' );
var ArrayBuffer = require( '@stdlib/array/buffer' );
var ceil = require( '@stdlib/math/base/special/ceil' );
var floor = require( '@stdlib/math/base/special/floor' );
var ceil2 = require( '@stdlib/math/base/special/ceil2' );
var log2 = require( '@stdlib/math/base/special/log2' );
var min = require( '@stdlib/math/base/special/min' );
var defaults = require( './defaults.json' );
var validate = require( './validate.js' );
var createPool = require( './pool.js' );
var BYTES_PER_ELEMENT = require( './bytes_per_element.json' );
// VARIABLES //
var Complex64Array = ctors( 'complex64' );
var Complex128Array = ctors( 'complex128' );
// FUNCTIONS //
/**
* Tests whether an array is a single-precision complex floating-point number array.
*
* @private
* @param {Collection} arr - input array
* @returns {boolean} boolean indicating whether an input array is a single-precision complex floating-point number array
*/
function isCmplx64Array( arr ) {
return ( arr instanceof Complex64Array );
}
/**
* Tests whether an array is a double-precision complex floating-point number array.
*
* @private
* @param {Collection} arr - input array
* @returns {boolean} boolean indicating whether an input array is a double-precision complex floating-point number array
*/
function isCmplx128Array( arr ) {
return ( arr instanceof Complex128Array );
}
// MAIN //
/**
* Creates a typed array pool.
*
* @param {Options} [options] - pool options
* @param {NonNegativeInteger} [options.highWaterMark] - maximum total memory which can be allocated
* @throws {TypeError} options argument must be an object
* @throws {TypeError} must provide valid options
* @returns {Function} allocator
*
* @example
* var typedarraypool = factory();
*
* // Allocate an array of doubles:
* var arr = typedarraypool( 5, 'float64' );
* // returns <Float64Array>[ 0.0, 0.0, 0.0, 0.0, 0.0 ]
*
* arr[ 0 ] = 3.14;
* arr[ 1 ] = 3.14;
*
* // ...
*
* // Free the allocated memory to be used in a future allocation:
* typedarraypool.free( arr );
*/
function factory( options ) {
var nbytes;
var pool;
var opts;
var err;
opts = copy( defaults );
if ( arguments.length ) {
err = validate( opts, options );
if ( err ) {
throw err;
}
}
pool = createPool( ceil( log2( opts.highWaterMark ) ) );
nbytes = 0;
setReadOnly( malloc, 'malloc', malloc ); // circular reference
setReadOnly( malloc, 'calloc', calloc );
setReadOnly( malloc, 'free', free );
setReadOnly( malloc, 'clear', clear );
setReadOnly( malloc, 'highWaterMark', opts.highWaterMark );
setReadOnlyAccessor( malloc, 'nbytes', getBytes );
return malloc;
/**
* Returns the number of allocated bytes.
*
* @private
* @returns {NonNegativeInteger} number of allocated bytes
*/
function getBytes() {
return nbytes;
}
/**
* Returns an array buffer.
*
* @private
* @param {NonNegativeInteger} n - number of bytes
* @returns {(ArrayBuffer|null)} array buffer or null
*/
function arraybuffer( n ) {
var buf;
var i;
// Convert the number of bytes to an index in our pool table:
i = log2( n );
// If we already have an available array buffer, use it...
if ( i < pool.length && pool[ i ].length ) {
return pool[ i ].pop();
}
// Before allocating a new array buffer, ensure that we have not exceeded the maximum number of bytes we are allowed to allocate...
if ( nbytes+n > opts.highWaterMark ) {
return null;
}
buf = new ArrayBuffer( n );
// Update the running counter of allocated bytes:
nbytes += n;
return buf;
}
/**
* Returns a typed array.
*
* @private
* @param {Function} ctor - typed array constructor
* @param {NonNegativeInteger} len - view length
* @param {string} dtype - data type
* @returns {(TypedArray|null)} typed array or null
*/
function typedarray( ctor, len, dtype ) {
var buf;
if ( len === 0 ) {
return new ctor( 0 );
}
buf = arraybuffer( ceil2( len )*BYTES_PER_ELEMENT[ dtype ] );
if ( buf === null ) {
return buf;
}
return new ctor( buf, 0, len );
}
/**
* Returns an uninitialized typed array.
*
* ## Notes
*
* - Memory is **not** initialized.
* - Memory is lazily allocated.
* - If the function returns `null`, the function was unable to allocate a new typed array from the typed array pool (most likely due to insufficient memory).
*
* @private
* @param {(NonNegativeInteger|Collection)} [arg] - an array length or an array-like object
* @param {string} [dtype="float64"] - data type
* @throws {TypeError} must provide a valid array length or an array-like object
* @throws {TypeError} must provide a recognized data type
* @returns {(TypedArray|null)} typed array or null
*/
function malloc() {
var nargs;
var dtype;
var ctor;
var arr;
var out;
var set;
var get;
var len;
var i;
nargs = arguments.length;
if ( nargs && isString( arguments[ nargs-1 ] ) ) {
nargs -= 1;
dtype = arguments[ nargs ];
} else {
dtype = 'float64';
}
ctor = ctors( dtype );
if ( ctor === null ) {
throw new TypeError( 'invalid argument. Must provide a recognized data type. Value: `'+dtype+'`.' );
}
if ( nargs <= 0 ) {
return new ctor( 0 );
}
// Check if provided a typed array length...
if ( isNonNegativeInteger( arguments[ 0 ] ) ) {
return typedarray( ctor, arguments[ 0 ], dtype );
}
// Check if provided an array-like object containing data elements...
if ( isCollection( arguments[ 0 ] ) ) {
arr = arguments[ 0 ];
len = arr.length;
if ( isComplex128Array( arr ) ) {
arr = reinterpret128( arr, 0 );
} else if ( isComplex64Array( arr ) ) {
arr = reinterpret64( arr, 0 );
} else if ( /^complex/.test( dtype ) ) {
// Assume we've been provided an array of interleaved real and imaginary components...
len /= 2;
}
out = typedarray( ctor, len, dtype );
if ( out === null ) {
return out;
}
if ( isCmplx128Array( out ) || isCmplx64Array( out ) ) {
out.set( arr );
return out;
}
// Wrap the arrays in order to account for the possibility that `arr` is a complex number array. As we don't prohibit other "unsafe" casts (e.g., providing a `Float64Array` and specifying a `dtype` of `uint8`), we don't prohibit providing a complex number array and specifying a real `dtype`. The results will probably be unexpected/gibberish, but I am not sure we should be overly pedantic in ensuring user's don't do ill-advised things...
get = arraylike2object( arr ).getter;
set = arraylike2object( out ).setter;
for ( i = 0; i < len; i++ ) {
set( out, i, get( arr, i ) );
}
return out;
}
throw new TypeError( 'invalid argument. First argument must be either an array length or an array-like object. Value: `'+arguments[ 0 ]+'`.' );
}
/**
* Returns a zero-initialized typed array.
*
* ## Notes
*
* - If the function returns `null`, the function was unable to allocate a new typed array from the typed array pool (most likely due to insufficient memory).
*
* @private
* @param {NonNegativeInteger} [len=0] - array length
* @param {string} [dtype="float64"] - data type
* @throws {TypeError} must provide a valid array length
* @throws {TypeError} must provide a recognized data type
* @returns {(TypedArray|null)} typed array or null
*/
function calloc() {
var nargs;
var out;
var tmp;
var i;
nargs = arguments.length;
if ( nargs === 0 ) {
out = malloc();
} else if ( nargs === 1 ) {
out = malloc( arguments[ 0 ] );
} else {
out = malloc( arguments[ 0 ], arguments[ 1 ] );
}
if ( out !== null ) {
// Initialize the memory...
if ( isCmplx128Array( out ) ) {
tmp = reinterpret128( out, 0 );
} else if ( isCmplx64Array( out ) ) {
tmp = reinterpret64( out, 0 );
} else {
tmp = out;
}
for ( i = 0; i < tmp.length; i++ ) {
tmp[ i ] = 0.0;
}
}
return out;
}
/**
* Frees a typed array or typed array buffer.
*
* ## Notes
*
* - Implicitly, we support providing non-internally allocated arrays and array buffer (e.g., "freeing" a typed array allocated in userland); however, the freed array buffer is likely to have excess capacity when compared to other members in its pool.
*
* @private
* @param {(TypedArray|ArrayBuffer)} buf - typed array or array buffer to free
* @throws {TypeError} must provide a typed array or typed array buffer
* @returns {boolean} boolean indicating whether the typed array or array buffer was successfully freed
*/
function free( buf ) {
var n;
var p;
var i;
if ( isTypedArrayLike( buf ) && buf.buffer ) {
buf = buf.buffer;
} else if ( !isArrayBuffer( buf ) ) {
throw new TypeError( 'invalid argument. Must provide a typed array or typed array buffer. Value: `'+buf+'`.' );
}
if ( buf.byteLength > 0 ) {
n = floor( log2( buf.byteLength ) );
// Prohibit "freeing" array buffers which would potentially allow users to circumvent high water mark limits:
n = min( pool.length-1, n );
// Ensure that we do not attempt to free the same buffer more than once...
p = pool[ n ];
for ( i = 0; i < p.length; i++ ) {
if ( p[ i ] === buf ) {
return false;
}
}
// Add the buffer to our pool of free buffers:
p.push( buf );
}
return true;
}
/**
* Clears the typed array pool allowing garbage collection of previously allocated (and currently free) array buffers.
*
* @private
*/
function clear() {
var i;
for ( i = 0; i < pool.length; i++ ) {
pool[ i ].length = 0;
}
nbytes = 0;
}
}
// EXPORTS //
module.exports = factory;
| stdlib-js/stdlib | lib/node_modules/@stdlib/array/pool/lib/factory.js | JavaScript | apache-2.0 | 11,150 |
// Copyright 2015 Light Code Labs, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package gzip provides a middleware layer that performs
// gzip compression on the response.
package gzip
import (
"compress/gzip"
"io"
"net/http"
"strings"
"github.com/caddyserver/caddy"
"github.com/caddyserver/caddy/caddyhttp/httpserver"
)
func init() {
caddy.RegisterPlugin("gzip", caddy.Plugin{
ServerType: "http",
Action: setup,
})
initWriterPool()
}
// Gzip is a middleware type which gzips HTTP responses. It is
// imperative that any handler which writes to a gzipped response
// specifies the Content-Type, otherwise some clients will assume
// application/x-gzip and try to download a file.
type Gzip struct {
Next httpserver.Handler
Configs []Config
}
// Config holds the configuration for Gzip middleware
type Config struct {
RequestFilters []RequestFilter
ResponseFilters []ResponseFilter
Level int // Compression level
}
// ServeHTTP serves a gzipped response if the client supports it.
func (g Gzip) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
return g.Next.ServeHTTP(w, r)
}
outer:
for _, c := range g.Configs {
// Check request filters to determine if gzipping is permitted for this request
for _, filter := range c.RequestFilters {
if !filter.ShouldCompress(r) {
continue outer
}
}
// In order to avoid unused memory allocation, gzip.putWriter only be called when gzip compression happened.
// see https://github.com/caddyserver/caddy/issues/2395
gz := &gzipResponseWriter{
ResponseWriterWrapper: &httpserver.ResponseWriterWrapper{ResponseWriter: w},
newWriter: func() io.Writer {
// gzipWriter modifies underlying writer at init,
// use a discard writer instead to leave ResponseWriter in
// original form.
return getWriter(c.Level)
},
}
defer func() {
if gzWriter, ok := gz.internalWriter.(*gzip.Writer); ok {
putWriter(c.Level, gzWriter)
}
}()
var rw http.ResponseWriter
// if no response filter is used
if len(c.ResponseFilters) == 0 {
// replace discard writer with ResponseWriter
if gzWriter, ok := gz.Writer().(*gzip.Writer); ok {
gzWriter.Reset(w)
}
rw = gz
} else {
// wrap gzip writer with ResponseFilterWriter
rw = NewResponseFilterWriter(c.ResponseFilters, gz)
}
// Any response in forward middleware will now be compressed
status, err := g.Next.ServeHTTP(rw, r)
// If there was an error that remained unhandled, we need
// to send something back before gzipWriter gets closed at
// the return of this method!
if status >= 400 {
httpserver.DefaultErrorFunc(w, r, status)
return 0, err
}
return status, err
}
// no matching filter
return g.Next.ServeHTTP(w, r)
}
// gzipResponseWriter wraps the underlying Write method
// with a gzip.Writer to compress the output.
type gzipResponseWriter struct {
internalWriter io.Writer
*httpserver.ResponseWriterWrapper
statusCodeWritten bool
newWriter func() io.Writer
}
// WriteHeader wraps the underlying WriteHeader method to prevent
// problems with conflicting headers from proxied backends. For
// example, a backend system that calculates Content-Length would
// be wrong because it doesn't know it's being gzipped.
func (w *gzipResponseWriter) WriteHeader(code int) {
w.Header().Del("Content-Length")
w.Header().Set("Content-Encoding", "gzip")
w.Header().Add("Vary", "Accept-Encoding")
originalEtag := w.Header().Get("ETag")
if originalEtag != "" && !strings.HasPrefix(originalEtag, "W/") {
w.Header().Set("ETag", "W/"+originalEtag)
}
w.ResponseWriterWrapper.WriteHeader(code)
w.statusCodeWritten = true
}
// Write wraps the underlying Write method to do compression.
func (w *gzipResponseWriter) Write(b []byte) (int, error) {
if w.Header().Get("Content-Type") == "" {
w.Header().Set("Content-Type", http.DetectContentType(b))
}
if !w.statusCodeWritten {
w.WriteHeader(http.StatusOK)
}
n, err := w.Writer().Write(b)
return n, err
}
//Writer use a lazy way to initialize Writer
func (w *gzipResponseWriter) Writer() io.Writer {
if w.internalWriter == nil {
w.internalWriter = w.newWriter()
}
return w.internalWriter
}
// Interface guards
var _ httpserver.HTTPInterfaces = (*gzipResponseWriter)(nil)
| mholt/caddy | caddyhttp/gzip/gzip.go | GO | apache-2.0 | 4,876 |
var notifyDesktop = function(msg, link) {
$.notify(msg, 'info');
var doNotification = function() {
var options = {
body: 'Notification from knowledge.',
icon: _CONTEXT + '/favicon.ico'
};
var n = new Notification(msg, options);
n.onclick = function() {
window.location.href = link;
};
};
checkPermission().then(function(result) {
if (result) {
doNotification();
}
});
};
var checkPermission = function() {
return Promise.try(function() {
if (window.Notification) {
if (Notification.permission === 'granted') {
console.log('Notification.permission is granted');
return Promise.resolve(true);
} else if (Notification.permission === 'denied') {
console.log('Notification.permission is denied');
Notification.requestPermission(function(result) {
if (result === 'denied') {
console.log('requestPermission is denied');
return Promise.resolve(false);
} else if (result === 'default') {
console.log('requestPermission is default');
return Promise.resolve(false);
} else if (result === 'granted') {
console.log('requestPermission is granted');
return Promise.resolve(true);
}
});
} else if (Notification.permission === 'default') {
console.log('Notification.permission is default');
Notification.requestPermission(function(result) {
if (result === 'denied') {
console.log('requestPermission is denied');
return Promise.resolve(false);
} else if (result === 'default') {
console.log('requestPermission is default');
return Promise.resolve(false);
} else if (result === 'granted') {
console.log('requestPermission is granted');
return Promise.resolve(true);
}
});
}
} else {
console.log('Notification is not available');
return Promise.resolve(false);
}
});
};
var webSocket;
window.onload = function() {
checkPermission();
var forRtoA = document.createElement('a');
forRtoA.href = _CONTEXT + '/notify';
console.log(forRtoA.href.replace("http://", "ws://").replace("https://",
"wss://"));
webSocket = new WebSocket(forRtoA.href.replace("http://", "ws://").replace(
"https://", "wss://"));
webSocket.onopen = function() {
}
webSocket.onclose = function() {
}
webSocket.onmessage = function(message) {
console.log('[RECEIVE] ');
var result = JSON.parse(message.data);
console.log(result);
notifyDesktop(result.message, result.result);
}
webSocket.onerror = function(message) {
}
setInterval(function() {
console.log('interval connection check');
webSocket.send('a');
}, 1000 * 60 * 4); // 3分に1回通信する(ALBのタイムアウトを300秒に設定していると、コネクションを切ってしまう)
};
| support-project/knowledge | src/main/webapp/js/notification.js | JavaScript | apache-2.0 | 3,455 |
package br.com.rbg.pedidovenda.model;
public enum TipoPessoa {
FISICA("Fisica"), JURIDICA("Juridica");
private String descricao;
private TipoPessoa(String descricao) {
this.descricao = descricao;
}
public String getDescricao() {
return descricao;
}
}
| renatobarata/PedidoVenda | src/main/java/br/com/rbg/pedidovenda/model/TipoPessoa.java | Java | apache-2.0 | 270 |
package com.cms.controller.admin;
import com.cms.service.admin.CategoryService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.propertyeditors.CustomDateEditor;
import org.springframework.web.bind.WebDataBinder;
import org.springframework.web.bind.annotation.InitBinder;
import org.springframework.web.context.request.WebRequest;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpSession;
import java.text.SimpleDateFormat;
/**
* Created by Evan on 2016/3/15.
*/
public class BaseController {
private static final Logger LOGGER = LoggerFactory.getLogger(BaseController.class);
@Autowired
CategoryService categoryService;
public HttpSession getSession(HttpServletRequest request) {
HttpSession session = request.getSession(false);
if (session == null) {
LOGGER.error("会话不存在!");
session = request.getSession(true);
}
return session;
}
@InitBinder
public void initBinder(WebDataBinder binder, WebRequest request) {
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
dateFormat.setLenient(false);
binder.registerCustomEditor(java.util.Date.class, new CustomDateEditor(
dateFormat, true));
}
}
| EvilGreenArmy/cms | src/main/java/com/cms/controller/admin/BaseController.java | Java | apache-2.0 | 1,384 |
type pathMatcherConfig struct {
MatchAny []string `yaml:"match_any"`
}
type pathMatcher struct {
Paths []*regexp.Regexp
}
type pathMatcherFactory struct{}
func (pmf pathMatcherFactory) Type() string {
return "paths"
}
func (pmf pathMatcherFactory) Create(config interface{}) (Matcher, error) {
...
| Clever/talks | interfaces-sphinx/plugins/path_matcher.go | GO | apache-2.0 | 307 |
require File.dirname(__FILE__) + '/../test_helper'
require 'job_candidates_controller'
# Re-raise errors caught by the controller.
class JobCandidatesController; def rescue_action(e) raise e end; end
class JobCandidatesControllerTest < Test::Unit::TestCase
fixtures :job_candidates, :users
def setup
@controller = JobCandidatesController.new
@request = ActionController::TestRequest.new
@response = ActionController::TestResponse.new
login_as(:oleg)
@candidate = job_candidates(:first_candidate)
ActionMailer::Base.deliveries.clear
end
def test_index
get :index
assert_response :success
assert_not_nil assigns(:job_candidates)
end
def test_new
get :new
assert_response :success
assert_template 'new'
assert_not_nil assigns(:job_candidate)
end
def test_show
get :show, :id => @candidate
assert_response :success
assert_template 'show'
assert_not_nil assigns(:job_candidate)
assert assigns(:job_candidate).valid?
end
def test_create_valid_candidate
assert_difference JobCandidate, :count do
post :create, :job_candidate => {
:first_name => "Cameron", :last_name => 'Booth', :email => 'test@test.com',
:reports => {"1"=>"no", "2"=>"no"}
}
assert_response :redirect
assert_redirected_to :action => 'index'
end
end
def test_create_invalid_candidate
assert_no_difference JobCandidate, :count do
post :create, :job_candidate => {
:first_name => "", :last_name => '', :email => '',
:reports => {"1"=>"no", "1"=>"no"}
}
assert_response :success
assert_template 'new'
assert_select "div#errorExplanation"
end
end
def test_destroy
assert_nothing_raised { JobCandidate.find(@candidate) }
assert_difference JobCandidate, :count, -1 do
delete :destroy, :id => @candidate
assert_response :redirect
assert_redirected_to :action => 'index'
end
assert_raise(ActiveRecord::RecordNotFound) { JobCandidate.find(@candidate) }
end
def test_create_new_agent_notification
# Job candidate gets this email
assert_difference ActionMailer::Base.deliveries, :size, 1 do
assert_difference JobCandidate, :count do
post :create, :job_candidate => {
:first_name => "Cameron", :last_name => 'Booth', :email => 'test@test.com',
:reports => {"1"=>"no", "2"=>"no"}
}
assert_equal ActionMailer::Base.deliveries.last.to.join, 'test@test.com'
end
end
end
end
| techwyseintl/Northwood | newapp/test/functional/job_candidates_controller_test.rb | Ruby | apache-2.0 | 2,838 |
package org.gradle.test.performance.mediummonolithicjavaproject.p329;
import org.junit.Test;
import static org.junit.Assert.*;
public class Test6598 {
Production6598 objectUnderTest = new Production6598();
@Test
public void testProperty0() {
Production6589 value = new Production6589();
objectUnderTest.setProperty0(value);
assertEquals(value, objectUnderTest.getProperty0());
}
@Test
public void testProperty1() {
Production6593 value = new Production6593();
objectUnderTest.setProperty1(value);
assertEquals(value, objectUnderTest.getProperty1());
}
@Test
public void testProperty2() {
Production6597 value = new Production6597();
objectUnderTest.setProperty2(value);
assertEquals(value, objectUnderTest.getProperty2());
}
@Test
public void testProperty3() {
String value = "value";
objectUnderTest.setProperty3(value);
assertEquals(value, objectUnderTest.getProperty3());
}
@Test
public void testProperty4() {
String value = "value";
objectUnderTest.setProperty4(value);
assertEquals(value, objectUnderTest.getProperty4());
}
@Test
public void testProperty5() {
String value = "value";
objectUnderTest.setProperty5(value);
assertEquals(value, objectUnderTest.getProperty5());
}
@Test
public void testProperty6() {
String value = "value";
objectUnderTest.setProperty6(value);
assertEquals(value, objectUnderTest.getProperty6());
}
@Test
public void testProperty7() {
String value = "value";
objectUnderTest.setProperty7(value);
assertEquals(value, objectUnderTest.getProperty7());
}
@Test
public void testProperty8() {
String value = "value";
objectUnderTest.setProperty8(value);
assertEquals(value, objectUnderTest.getProperty8());
}
@Test
public void testProperty9() {
String value = "value";
objectUnderTest.setProperty9(value);
assertEquals(value, objectUnderTest.getProperty9());
}
} | oehme/analysing-gradle-performance | my-app/src/test/java/org/gradle/test/performance/mediummonolithicjavaproject/p329/Test6598.java | Java | apache-2.0 | 2,174 |
#
# Cookbook Name:: awsk-sdk
# Attributes: default
#
# Copyright (c) 2014 Quad Learning, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
default['aws-sdk']['version'] = '1.42.0'
default['aws-sdk']['unf_version'] = '0.1.4'
| americanhonors-cookbooks/aws-sdk | attributes/default.rb | Ruby | apache-2.0 | 728 |
#include <iostream>
#include <string>
#include <vector>
#include <algorithm>
using namespace std;
int main() {
int n; cin >> n;
string s;
vector<string> v(n);
for (int i = 0; i < n; ++i) {
cin >> v[i];
}
sort(v.begin(), v.end());
char ch; cin >> ch;
for (int i = 0; i < v.size(); ++i) {
if (v[i][0] == ch)
cout << v[i] << endl;
}
return 0;
} | ChameleonTartu/competitive_programming | Timus/C++/Timus1545.cpp | C++ | apache-2.0 | 360 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.calcite.adapter.spark;
import org.apache.calcite.adapter.enumerable.JavaRowFormat;
import org.apache.calcite.adapter.enumerable.PhysType;
import org.apache.calcite.adapter.enumerable.PhysTypeImpl;
import org.apache.calcite.adapter.java.JavaTypeFactory;
import org.apache.calcite.adapter.jdbc.JdbcConvention;
import org.apache.calcite.adapter.jdbc.JdbcImplementor;
import org.apache.calcite.adapter.jdbc.JdbcRel;
import org.apache.calcite.adapter.jdbc.JdbcSchema;
import org.apache.calcite.linq4j.tree.BlockBuilder;
import org.apache.calcite.linq4j.tree.Expression;
import org.apache.calcite.linq4j.tree.Expressions;
import org.apache.calcite.linq4j.tree.Primitive;
import org.apache.calcite.plan.ConventionTraitDef;
import org.apache.calcite.plan.RelOptCluster;
import org.apache.calcite.plan.RelOptCost;
import org.apache.calcite.plan.RelOptPlanner;
import org.apache.calcite.plan.RelTraitSet;
import org.apache.calcite.prepare.CalcitePrepareImpl;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.convert.ConverterImpl;
import org.apache.calcite.rel.metadata.RelMetadataQuery;
import org.apache.calcite.sql.SqlDialect;
import org.apache.calcite.util.BuiltInMethod;
import java.util.ArrayList;
import java.util.List;
/**
* Relational expression representing a scan of a table in a JDBC data source
* that returns its results as a Spark RDD.
*/
public class JdbcToSparkConverter
extends ConverterImpl
implements SparkRel {
protected JdbcToSparkConverter(RelOptCluster cluster, RelTraitSet traits,
RelNode input) {
super(cluster, ConventionTraitDef.INSTANCE, traits, input);
}
@Override public RelNode copy(RelTraitSet traitSet, List<RelNode> inputs) {
return new JdbcToSparkConverter(
getCluster(), traitSet, sole(inputs));
}
@Override public RelOptCost computeSelfCost(RelOptPlanner planner,
RelMetadataQuery mq) {
return super.computeSelfCost(planner, mq).multiplyBy(.1);
}
public SparkRel.Result implementSpark(SparkRel.Implementor implementor) {
// Generate:
// ResultSetEnumerable.of(schema.getDataSource(), "select ...")
final BlockBuilder list = new BlockBuilder();
final JdbcRel child = (JdbcRel) getInput();
final PhysType physType =
PhysTypeImpl.of(
implementor.getTypeFactory(), getRowType(),
JavaRowFormat.CUSTOM);
final JdbcConvention jdbcConvention =
(JdbcConvention) child.getConvention();
String sql = generateSql(jdbcConvention.dialect);
if (CalcitePrepareImpl.DEBUG) {
System.out.println("[" + sql + "]");
}
final Expression sqlLiteral =
list.append("sql", Expressions.constant(sql));
final List<Primitive> primitives = new ArrayList<>();
for (int i = 0; i < getRowType().getFieldCount(); i++) {
final Primitive primitive = Primitive.ofBoxOr(physType.fieldClass(i));
primitives.add(primitive != null ? primitive : Primitive.OTHER);
}
final Expression primitivesLiteral =
list.append("primitives",
Expressions.constant(
primitives.toArray(new Primitive[0])));
final Expression enumerable =
list.append(
"enumerable",
Expressions.call(
BuiltInMethod.RESULT_SET_ENUMERABLE_OF.method,
Expressions.call(
Expressions.convert_(
jdbcConvention.expression,
JdbcSchema.class),
BuiltInMethod.JDBC_SCHEMA_DATA_SOURCE.method),
sqlLiteral,
primitivesLiteral));
list.add(
Expressions.return_(null, enumerable));
return implementor.result(physType, list.toBlock());
}
private String generateSql(SqlDialect dialect) {
final JdbcImplementor jdbcImplementor =
new JdbcImplementor(dialect,
(JavaTypeFactory) getCluster().getTypeFactory());
final JdbcImplementor.Result result =
jdbcImplementor.visitChild(0, getInput());
return result.asStatement().toSqlString(dialect).getSql();
}
}
// End JdbcToSparkConverter.java
| dindin5258/calcite | spark/src/main/java/org/apache/calcite/adapter/spark/JdbcToSparkConverter.java | Java | apache-2.0 | 4,926 |
package org.jtheque.core.utils;
/*
* Copyright JTheque (Baptiste Wicht)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The image type.
*
* @author Baptiste Wicht
*/
public enum ImageType {
PNG("png"),
JPG("jpg"),
JPEG("jpeg"),
GIF("gif");
private final String extension;
/**
* Construct a new ImageType with a specified extension.
*
* @param extension The extension of the image.
*/
ImageType(String extension) {
this.extension = extension;
}
/**
* Return the extension of the image.
*
* @return The extension of the image.
*/
public String getExtension() {
return extension;
}
/**
* Resolve the string type to the ImageType value.
*
* @param type The string type of the image.
*
* @return The corresponding ImageType value.
*/
public static ImageType resolve(String type) {
if ("png".equalsIgnoreCase(type)) {
return PNG;
} else if ("jpg".equalsIgnoreCase(type)) {
return JPG;
} else if ("jpeg".equalsIgnoreCase(type)) {
return JPEG;
} else if("gif".equalsIgnoreCase(type)){
return GIF;
}
throw new IllegalArgumentException("The given type is not an image type. ");
}
}
| wichtounet/jtheque-core | jtheque-core-utils/src/main/java/org/jtheque/core/utils/ImageType.java | Java | apache-2.0 | 1,839 |
package com.notronix.lw.api.model;
import java.time.Instant;
import java.util.UUID;
public class RefundInfo
{
private UUID pkRefundRowId;
private String SKU;
private String ItemTitle;
private Boolean IsItem;
private Boolean IsService;
private Double Amount;
private String Reason;
private Boolean Actioned;
private Instant ActionDate;
private String ReturnReference;
private Double Cost;
private PostSaleStatusType RefundStatus;
private Boolean IgnoredValidation;
private UUID fkOrderItemRowId;
private Boolean ShouldSerializeChannelReason;
private String ChannelReason;
private Boolean ShouldSerializeChannelReasonSec;
private String ChannelReasonSec;
private Boolean IsNew;
public UUID getPkRefundRowId() {
return pkRefundRowId;
}
public void setPkRefundRowId(UUID pkRefundRowId) {
this.pkRefundRowId = pkRefundRowId;
}
public String getSKU() {
return SKU;
}
public void setSKU(String SKU) {
this.SKU = SKU;
}
public String getItemTitle() {
return ItemTitle;
}
public void setItemTitle(String itemTitle) {
ItemTitle = itemTitle;
}
public Boolean getItem() {
return IsItem;
}
public void setItem(Boolean item) {
IsItem = item;
}
public Boolean getService() {
return IsService;
}
public void setService(Boolean service) {
IsService = service;
}
public Double getAmount() {
return Amount;
}
public void setAmount(Double amount) {
Amount = amount;
}
public String getReason() {
return Reason;
}
public void setReason(String reason) {
Reason = reason;
}
public Boolean getActioned() {
return Actioned;
}
public void setActioned(Boolean actioned) {
Actioned = actioned;
}
public Instant getActionDate() {
return ActionDate;
}
public void setActionDate(Instant actionDate) {
ActionDate = actionDate;
}
public String getReturnReference() {
return ReturnReference;
}
public void setReturnReference(String returnReference) {
ReturnReference = returnReference;
}
public Double getCost() {
return Cost;
}
public void setCost(Double cost) {
Cost = cost;
}
public PostSaleStatusType getRefundStatus() {
return RefundStatus;
}
public void setRefundStatus(PostSaleStatusType refundStatus) {
RefundStatus = refundStatus;
}
public Boolean getIgnoredValidation() {
return IgnoredValidation;
}
public void setIgnoredValidation(Boolean ignoredValidation) {
IgnoredValidation = ignoredValidation;
}
public UUID getFkOrderItemRowId() {
return fkOrderItemRowId;
}
public void setFkOrderItemRowId(UUID fkOrderItemRowId) {
this.fkOrderItemRowId = fkOrderItemRowId;
}
public Boolean getShouldSerializeChannelReason() {
return ShouldSerializeChannelReason;
}
public void setShouldSerializeChannelReason(Boolean shouldSerializeChannelReason) {
ShouldSerializeChannelReason = shouldSerializeChannelReason;
}
public String getChannelReason() {
return ChannelReason;
}
public void setChannelReason(String channelReason) {
ChannelReason = channelReason;
}
public Boolean getShouldSerializeChannelReasonSec() {
return ShouldSerializeChannelReasonSec;
}
public void setShouldSerializeChannelReasonSec(Boolean shouldSerializeChannelReasonSec) {
ShouldSerializeChannelReasonSec = shouldSerializeChannelReasonSec;
}
public String getChannelReasonSec() {
return ChannelReasonSec;
}
public void setChannelReasonSec(String channelReasonSec) {
ChannelReasonSec = channelReasonSec;
}
public Boolean getNew() {
return IsNew;
}
public void setNew(Boolean aNew) {
IsNew = aNew;
}
}
| Notronix/JaLAPI | src/main/java/com/notronix/lw/api/model/RefundInfo.java | Java | apache-2.0 | 4,241 |
'use strict';
const chai = require('chai');
const assert = chai.assert;
const extensions = require('../../lib/extensions');
describe('Extensions Tests', function () {
const noop = function () {
return undefined;
};
const noop2 = function () {
return undefined;
};
describe('add', function () {
it('undefined', function () {
const output = extensions.add();
assert.isFalse(output);
});
it('missing type', function () {
const output = extensions.add(undefined, 'myfunc', noop);
assert.isFalse(output);
});
it('type not string', function () {
const output = extensions.add(123, 'myfunc', noop);
assert.isFalse(output);
});
it('invalid type', function () {
const output = extensions.add('test', 'myfunc', noop);
assert.isFalse(output);
});
it('missing name', function () {
const output = extensions.add('connection', undefined, noop);
assert.isFalse(output);
});
it('name not string', function () {
const output = extensions.add('connection', 123, noop);
assert.isFalse(output);
});
it('missing function', function () {
const output = extensions.add('connection', 'myfunc');
assert.isFalse(output);
});
it('invalid function type', function () {
const output = extensions.add('connection', 'myfunc', 123);
assert.isFalse(output);
});
it('valid', function () {
let output = extensions.add('connection', 'myfunc1', noop);
assert.isTrue(output);
output = extensions.add('connection', 'myfunc2', noop2);
assert.isTrue(output);
output = extensions.add('pool', 'myfunc1', noop);
assert.isTrue(output);
assert.isFunction(extensions.extensions.connection.myfunc1);
assert.isFunction(extensions.extensions.connection.myfunc2);
output = extensions.add('connection', 'myfunc2', noop);
assert.isTrue(output);
assert.isFunction(extensions.extensions.connection.myfunc1);
assert.isFunction(extensions.extensions.connection.myfunc2);
assert.isFunction(extensions.get('connection').myfunc1);
assert.isFunction(extensions.get('connection').myfunc2);
assert.isFunction(extensions.get('pool').myfunc1);
});
it('valid, no promise', function () {
let output = extensions.add('connection', 'myfunc1', noop, {
promise: {
noPromise: true
}
});
assert.isTrue(output);
output = extensions.add('connection', 'myfunc2', noop2, {
promise: {
noPromise: true
}
});
assert.isTrue(output);
output = extensions.add('pool', 'myfunc1', noop, {
promise: {
noPromise: true
}
});
assert.isTrue(output);
assert.deepEqual(extensions.extensions.connection, {
myfunc1: noop,
myfunc2: noop2
});
output = extensions.add('connection', 'myfunc2', noop, {
promise: {
noPromise: true
}
});
assert.isTrue(output);
assert.deepEqual(extensions.extensions.connection, {
myfunc1: noop,
myfunc2: noop
});
assert.deepEqual(extensions.get('connection'), {
myfunc1: noop,
myfunc2: noop
});
assert.deepEqual(extensions.get('pool'), {
myfunc1: noop
});
});
});
describe('get', function () {
it('undefined', function () {
const output = extensions.get();
assert.isUndefined(output);
});
it('null', function () {
const output = extensions.get(null);
assert.isUndefined(output);
});
it('empty', function () {
extensions.extensions.connection = {};
const output = extensions.get('connection');
assert.deepEqual(output, {});
});
it('functions exist', function () {
extensions.extensions.connection = {
test: noop,
test2: noop2
};
const output = extensions.get('connection');
assert.deepEqual(output, {
test: noop,
test2: noop2
});
});
});
});
| sagiegurari/simple-oracledb | test/spec/extensions-spec.js | JavaScript | apache-2.0 | 4,815 |
package com.chloxen95.RspamdConfiguration.Service;
import java.io.BufferedReader;
import java.io.IOException;
import java.util.Map;
public interface ConfigurationParser {
/**
* ¶ÁÈ¡±äÁ¿£ºcheck_all_filters = false;</br>
* ½âÎö´øÓбäÁ¿¸³ÖµµÄ×Ö·û´®£¬²¢½«Æä±£´æµ½Ä¿±êMapÖÐ
* @param varStr Ô´×Ö·û´®
* @param result Ä¿±êMap
*/
public void ReadVariable(String varStr, Map<String, Object> result);
/**
* ¶ÁÈ¡ÁÐ±í£º classify_headers = [ "User-Agent", "X-Mailer", "Content-Type", "X-MimeOLE", ];</br>
* ½âÎö´øÓÐÁбíµÄ×Ö·û´®£¬²¢½«Æä±£´æµ½Ä¿±êMapÖС£</br>
* ÁбíÖÐÿһÏîÐè·ÖÐУ¬·ñÔò½«¶ÁȡΪһ°ã×Ö·û´®
* @param varNameStr Ô´×Ö·û´®
* @param br ÎļþÁ÷£¬´ËʱӦ¶ÁÈ¡µ½ÁбíÃû³ÆËùÔÚµÄÐÐ
* @param result Ä¿±êMap
* @throws IOException
*/
public void ReadList(String varNameStr, BufferedReader br, Map<String, Object> result) throws IOException;
/**
* ¶ÁȡӳÉä¹ØÏµ£º dns { timeout = 1s; sockets = 16; retransmits = 5; }</br>
* ½âÎö´øÓÐÓ³Éä¹ØÏµµÄ×Ö·û´®£¬²¢½«Æä±£´æµ½Ä¿±êMapÖС£</br>
* Ó³Éä¹ØÏµÐè·ÖÐУ¬·ñÔò½«¶ÁȡΪһ°ã×Ö·û´®
* @param varNameStr Ô´×Ö·û´®
* @param br ÎļþÁ÷£¬´ËʱӦ¶ÁÈ¡µ½Ó³Éä¹ØÏµÃû³ÆËùÔÚµÄÐÐ
* @param result Ä¿±êMap
* @throws IOException
*/
public void ReadMap(String varNameStr, BufferedReader br, Map<String, Object> result) throws IOException;
/**
* ·ÖÎö×Ö·û´®ËùÊôÀàÐÍ£¬²¢½«²¢½«Æä±£´æµ½Ä¿±êMapÖС£
* @param br ÎļþÁ÷
* @param result Ä¿±êMap
* @throws IOException
*/
public void ConfParser(BufferedReader br, Map<String, Object> result) throws IOException;
/**
* »ñȡָ¶¨ÎļþÄÚÈÝ
* @param filePath Îļþ·¾¶
* @return Map¸ñʽµÄÎļþÄÚÈÝ
* @throws IOException
*/
public Map<String, Object> getFileContent(String path) throws IOException;
}
| chloxen95/RspamdConfiguration | src/main/java/com/chloxen95/RspamdConfiguration/Service/ConfigurationParser.java | Java | apache-2.0 | 1,715 |
# -*- coding: utf-8 -*-
# Minio Python Library for Amazon S3 compatible cloud storage, (C) 2015 Minio, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
try:
from urllib.parse import urlparse as compat_urllib_parse
except ImportError: # python 2
from urlparse import urlparse as compat_urllib_parse
strtype = None
if sys.version_info < (3, 0):
strtype = basestring
else:
strtype = str
| krishnasrinivas/minio-py | tests/unit/compat.py | Python | apache-2.0 | 917 |
from azure.servicebus import ServiceBusService, Message, Queue
class azure_service_bus_listener(object):
def __init__(self, azure_settings):
self.bus_service = ServiceBusService(
service_namespace= azure_settings['name_space'],
shared_access_key_name = azure_settings['key_name'],
shared_access_key_value = azure_settings['key_value'])
self.queue_name = azure_settings['queue_name']
def wait_for_message(self, on_receive_target, on_timeout_target):
# just in case it isn't there
self.create_queue()
message = self.bus_service.receive_queue_message(self.queue_name, peek_lock=False)
if (message.body == None):
print("[ASB_Listener]: No Message Received")
on_timeout_target()
else:
message_string = message.body.decode('utf-8')
on_receive_target(message_string)
def create_queue(self):
q_opt = Queue()
q_opt.max_size_in_megabytes = '1024'
q_opt.default_message_time_to_live = 'PT1M'
self.bus_service.create_queue(self.queue_name, q_opt) | rumdood/martinique | src/PiDaemon/azure_service_bus_listener.py | Python | apache-2.0 | 1,135 |
package allow.simulator.netlogo.agent;
import it.unimi.dsi.fastutil.longs.Long2LongOpenHashMap;
import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import org.nlogo.agent.Link;
import org.nlogo.agent.Turtle;
import org.nlogo.agent.World;
import org.nlogo.api.AgentException;
import allow.simulator.core.Context;
import allow.simulator.core.EntityManager;
import allow.simulator.core.Simulator;
import allow.simulator.entity.Entity;
import allow.simulator.entity.EntityTypes;
import allow.simulator.util.Coordinate;
import allow.simulator.world.Street;
import allow.simulator.world.StreetMap;
import allow.simulator.world.StreetSegment;
import allow.simulator.world.Transformation;
public final class NetLogoWrapper implements IContextWrapper {
// Static instance
private static NetLogoWrapper instance;
// NetLogo world instance
private final World netLogoWorld;
// Mapping of simulator Ids to NetLogo Ids and vice versa
private final Map<Long, Long> simToNetLogo;
private final Map<Long, Long> netLogoToSim;
// Transformation to convert coordinates
private Transformation transformation;
public NetLogoWrapper(World netLogoWorld) {
this.netLogoWorld = netLogoWorld;
this.simToNetLogo = new Long2LongOpenHashMap();
this.netLogoToSim = new Long2LongOpenHashMap();
}
public Transformation getTransformation() {
return transformation;
}
public World getWorld() {
return netLogoWorld;
}
@Override
public void wrap(Context context) {
// Wrap world
wrapWorld((StreetMap) context.getWorld());
// Wrap entities
try {
wrapEntities(context.getEntityManager());
} catch (AgentException e) {
e.printStackTrace();
}
}
private void wrapWorld(StreetMap world) {
// Wrap world
double worldEnvelope[] = new double[] { netLogoWorld.minPxcor(),
netLogoWorld.maxPxcor(), netLogoWorld.minPycor(),
netLogoWorld.maxPxcor() };
// Get envelope of loaded world.
double gisEnvelope[] = world.getDimensions();
// Set transformation between NetLogo and loaded world.
transformation = new Transformation(gisEnvelope, worldEnvelope);
// Create NetLogo bindings for streets and nodes
Collection<Street> streets = world.getStreets();
Long2ObjectOpenHashMap<Turtle> nodes = new Long2ObjectOpenHashMap<Turtle>();
for (Street street : streets) {
List<StreetSegment> segs = street.getSubSegments();
double color = street.isBlocked() ? 15.0 : 5.0;
for (StreetSegment seg : segs) {
Turtle startNode = nodes.get(seg.getStartingNode().getId());
if (startNode == null) {
Coordinate pos = transformation.transform(seg.getStartingPoint());
startNode = new Turtle(netLogoWorld, netLogoWorld.getBreed("NODES"), pos.x, pos.y);
nodes.put(seg.getStartingNode().getId(), startNode);
netLogoWorld.turtles().add(startNode);
startNode.hidden(true);
}
Turtle endNode = nodes.get(seg.getEndingNode().getId());
if (endNode == null) {
Coordinate pos = transformation.transform(seg.getStartingPoint());
endNode = new Turtle(netLogoWorld, netLogoWorld.getBreed("NODES"), pos.x, pos.y);
nodes.put(seg.getEndingNode().getId(), endNode);
netLogoWorld.turtles().add(endNode);
endNode.hidden(true);
}
// Pair<StreetNode, StreetNode> in = world.getIncidentNodes(seg);
Link newLink = netLogoWorld.linkManager.createLink(startNode, endNode, netLogoWorld.links());
netLogoWorld.links().add(newLink);
newLink.colorDouble(color);
newLink.lineThickness(0.05);
newLink.hidden(false);
}
}
}
private void wrapEntities(EntityManager entityManager) throws AgentException {
// Prepare mappings
Long2LongOpenHashMap simToNetLogoTemp = (Long2LongOpenHashMap) simToNetLogo;
Long2LongOpenHashMap netLogoToSimTemp = (Long2LongOpenHashMap) netLogoToSim;
simToNetLogoTemp.clear();
netLogoToSimTemp.clear();
for (String type : entityManager.getEntityTypes()) {
// Get all entities of certain type
Collection<Entity> entities = entityManager.getEntitiesOfType(type);
if ((entities == null) || (entities.size() == 0))
continue;
for (Entity entity : entities) {
switch (type) {
case EntityTypes.BUS:
case EntityTypes.FLEXIBUS:
case EntityTypes.PERSON:
case EntityTypes.TAXI:
case EntityTypes.PUBLIC_TRANSPORT_AGENCY:
case EntityTypes.FLEXIBUS_AGENCY:
case EntityTypes.TAXI_AGENCY:
NetLogoAgent newAgent = NetLogoAgent.createNetLogoAgent(this, entity);
netLogoWorld.turtles().add(newAgent);
if (netLogoToSim.get(newAgent.id) != null)
throw new IllegalStateException("Error: NetLogo entity Id" + newAgent.id + " already in use.");
netLogoToSim.put(newAgent.id, entity.getId());
if (simToNetLogo.get(entity.getId()) != null)
throw new IllegalStateException("Error: Simulator entity Id" + entity.getId() + " already in use.");
simToNetLogo.put(entity.getId(), newAgent.id);
break;
default:
break;
}
}
}
}
public static NetLogoWrapper initialize(Simulator simulator, World world) {
instance = new NetLogoWrapper(world);
instance.wrap(simulator.getContext());
return instance;
}
public static NetLogoWrapper Instance() {
if (instance == null)
throw new UnsupportedOperationException();
return instance;
}
}
| poxrucker/collaborative-learning-simulation | Simulator/src/allow/simulator/netlogo/agent/NetLogoWrapper.java | Java | apache-2.0 | 5,395 |
module ChefAPI
class Resource::DataBag < Resource::Base
collection_path '/data'
schema do
attribute :name, type: String, primary: true, required: true
end
class << self
#
# Load the data bag from a collection of JSON files on disk. Just like
# +knife+, the basename of the folder is assumed to be the name of the
# data bag and all containing items a proper JSON data bag.
#
# This will load **all** items in the data bag, returning an array of
# those items. To load an individual data bag item, see
# {DataBagItem.from_file}.
#
# **This method does NOT return an instance of a {DataBag}!**
#
# @param [String] path
# the path to the data bag **folder** on disk
# @param [String] name
# the name of the data bag
#
# @return [Array<DataBagItem>]
#
def from_file(path, name = File.basename(path))
path = File.expand_path(path)
raise Error::FileNotFound.new(path: path) unless File.exists?(path)
raise Error::NotADirectory.new(path: path) unless File.directory?(path)
raise ArgumentError unless File.directory?(path)
bag = new(name: name)
Util.fast_collect(Dir["#{path}/*.json"]) do |item|
DataBagItem.from_file(item, bag)
end
end
#
#
#
def fetch(id, prefix = {})
return nil if id.nil?
path = resource_path(id, prefix)
response = connection.get(path)
new(name: id)
rescue Error::HTTPNotFound
nil
end
#
#
#
def each(&block)
collection.each do |name, path|
result = new(name: name)
block.call(result) if block
end
end
end
#
# This is the same as +has_many :items+, but creates a special collection
# for data bag items, which is mutable and handles some special edge cases
# that only data bags encounter.
#
# @see Base.has_many
#
def items
associations[:items] ||= Resource::DataBagItemCollectionProxy.new(self)
end
end
end
module ChefAPI
#
# The mutable collection is a special kind of collection proxy that permits
# Rails-like attribtue creation, like:
#
# DataBag.first.items.create(id: 'me', thing: 'bar', zip: 'zap')
#
class Resource::DataBagItemCollectionProxy < Resource::CollectionProxy
def initialize(bag)
# Delegate to the superclass
super(bag, Resource::DataBagItem, nil, bag: bag.name)
end
# @see klass.new
def new(data = {})
klass.new(data, prefix, parent)
end
# @see klass.destroy
def destroy(id)
klass.destroy(id, prefix)
ensure
reload!
end
# @see klass.destroy_all
def destroy_all
klass.destroy_all(prefix)
ensure
reload!
end
# @see klass.build
def build(data = {})
klass.build(data, prefix)
end
# @see klass.create
def create(data = {})
klass.create(data, prefix)
ensure
reload!
end
# @see klass.create!
def create!(data = {})
klass.create!(data, prefix)
ensure
reload!
end
# @see klass.update
def update(id, data = {})
klass.update(id, data, prefix)
end
end
end
| sethvargo/chef-api | lib/chef-api/resources/data_bag.rb | Ruby | apache-2.0 | 3,310 |
package org.graniteds.tutorial.data.services;
import javax.persistence.EntityManager;
import javax.persistence.TypedQuery;
import javax.persistence.criteria.*;
import org.granite.tide.data.model.Page;
import org.granite.tide.data.model.PageInfo;
import org.graniteds.tutorial.data.entities.Account;
import java.util.*;
public class AccountSearch {
private EntityManager entityManager;
public AccountSearch(EntityManager entityManager) {
this.entityManager = entityManager;
}
// tag::findByFilter[]
public Page<Account> findByFilter(Map<String, String> filter, PageInfo pageInfo) {
CriteriaBuilder cb = entityManager.getCriteriaBuilder();
String searchText = filter == null ? null : filter.get("searchText"); // <1>
if (searchText != null) {
searchText = searchText.trim();
}
if (searchText != null && searchText.length() == 0) {
searchText = null;
}
if (searchText != null) {
searchText = "%" + searchText + "%";
}
CriteriaQuery<Long> cqc = cb.createQuery(Long.class);
Root<Account> accountRoot = cqc.from(Account.class);
cqc.select(cb.count(accountRoot));
ParameterExpression<String> searchTextParam = null;
Predicate searchTextPredicate = null;
if (searchText != null) {
searchTextParam = cb.parameter(String.class);
searchTextPredicate = cb.or(
cb.like(accountRoot.get("name").as(String.class), searchTextParam),
cb.like(accountRoot.get("email").as(String.class), searchTextParam)
);
cqc.where(searchTextPredicate);
}
TypedQuery<Long> qc = entityManager.createQuery(cqc);
if (searchText != null) {
qc.setParameter(searchTextParam, searchText);
}
long resultCount = qc.getSingleResult();
CriteriaQuery<Account> cq = cb.createQuery(Account.class);
accountRoot = cq.from(Account.class);
if (searchText != null) {
cq.where(searchTextPredicate);
}
if (pageInfo != null && pageInfo.getSortInfo() != null && pageInfo.getSortInfo().getOrder() != null) { // <2>
List<Order> orderBy = new ArrayList<Order>();
for (int idx = 0; idx < pageInfo.getSortInfo().getOrder().length; idx++) {
Path<String> sortPath = accountRoot.get(pageInfo.getSortInfo().getOrder()[idx]);
orderBy.add(pageInfo.getSortInfo().getDesc()[idx] ? cb.desc(sortPath) : cb.asc(sortPath));
}
cq.orderBy(orderBy);
}
TypedQuery<Account> q = entityManager.createQuery(cq);
if (searchText != null) {
q.setParameter(searchTextParam, searchText);
}
List<Account> resultList = q
.setFirstResult(pageInfo.getFirstResult())
.setMaxResults(pageInfo.getMaxResults())
.getResultList();
System.out.println("findbyFilter:result.size()=" + resultList.size());
for (Account account : resultList) {
System.out.println(" -" + account);
}
return new Page<Account>(pageInfo.getFirstResult(), pageInfo.getMaxResults(), (int) resultCount, resultList);
}
// end::findByFilter[]
}
| og0815/granditeds.javafx.sample | server-model/src/main/java/org/graniteds/tutorial/data/services/AccountSearch.java | Java | apache-2.0 | 3,331 |
package jim.android.mainFrame;
import android.content.Context;
import android.util.Log;
import android.widget.ImageView;
import com.android.volley.Response;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.util.Objects;
/**
* Created by Jim Huang on 2015/8/25.
*/
class Home01 implements Response.Listener<String> {
private MyApplication application=MyApplication.getInstance();
@Override
public void onResponse(String o) {
}
}
| huangjim/MyViewPager | app/src/main/java/jim/android/mainFrame/Home01.java | Java | apache-2.0 | 503 |
<ul>
<li class="block">
<a href="/ph/egpc/api/sendMessage" id="blocksendMessage">Send a Message to people</a><br/>
<div class="fss">
url : /ph/egpc/api/sendMessage<br/>
method type : POST <br/>
params : <br/>
message : <textarea name="sendMessagemsg" id="sendMessagemsg"></textarea> <br/>
email(s) : <textarea type="text" name="sendMessageemail" id="sendMessageemail">egpc@egpc.com</textarea><br/>
séparé par des virgules<br/>
<a href="javascript:sendMessage()">Send it</a><br/>
<select id="sendMessagePeople">
<option></option>
<?php
$groups = Yii::app()->mongodb->groups->find( array( "type" => new MongoRegex("/.*/") ));
foreach ($groups as $value) {
echo '<option value="'.$value["name"].'">'.$value["name"].'</option>';
}
?>
</select>
<a href="javascript:setPeople()">Get People </a><br/>
<div id="sendMessageResult" class="result fss"></div>
<script>
function sendMessage(){
params = {
"email" : $("#sendMessageemail").val() ,
"msg" : $("#sendMessagemsg").val()
};
ajaxPost("sendMessageResult", baseUrl+'/egpc/api/sendMessage',params);
}
function setPeople(){
$("#sendMessageemail").val("");
$.ajax({
url:'/ph/egpc/api/getPeopleBy',
type:"POST",
data:{ "groupname":$("#sendMessagePeople").val()},
datatype : "json",
success:function(data) {
list = "";
$.each(data,function(k,v){
list += (list == "") ? v.email : ","+v.email ;
})
mylog.log(list);
$("#sendMessageemail").val(list);
},
error:function (xhr, ajaxOptions, thrownError){
$("#"+id).html(data);
}
});
}
</script>
</div>
</li>
</ul> | pixelhumain/communecter | views/api/communications.php | PHP | apache-2.0 | 1,822 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.service;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateTaskConfig;
import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.NodeConnectionsService;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.OperationRouting;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.DummyTransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.MockLogAppender;
import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.threadpool.ThreadPool;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.Semaphore;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet;
import static org.elasticsearch.cluster.service.ClusterServiceUtils.setState;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
public class ClusterServiceTests extends ESTestCase {
static ThreadPool threadPool;
TimedClusterService clusterService;
@BeforeClass
public static void createThreadPool() {
threadPool = new ThreadPool(ClusterServiceTests.class.getName());
}
@AfterClass
public static void stopThreadPool() {
if (threadPool != null) {
threadPool.shutdownNow();
threadPool = null;
}
}
@Before
public void setUp() throws Exception {
super.setUp();
clusterService = createTimedClusterService(true);
}
@After
public void tearDown() throws Exception {
clusterService.close();
super.tearDown();
}
TimedClusterService createTimedClusterService(boolean makeMaster) throws InterruptedException {
TimedClusterService timedClusterService = new TimedClusterService(Settings.EMPTY, null,
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
threadPool, new ClusterName("ClusterServiceTests"));
timedClusterService.setLocalNode(new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, emptyMap(),
emptySet(), Version.CURRENT));
timedClusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) {
@Override
public void connectToAddedNodes(ClusterChangedEvent event) {
// skip
}
@Override
public void disconnectFromRemovedNodes(ClusterChangedEvent event) {
// skip
}
});
timedClusterService.setClusterStatePublisher((event, ackListener) -> {
});
timedClusterService.start();
ClusterState state = timedClusterService.state();
final DiscoveryNodes nodes = state.nodes();
final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(nodes)
.masterNodeId(makeMaster ? nodes.getLocalNodeId() : null);
state = ClusterState.builder(state).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK)
.nodes(nodesBuilder).build();
setState(timedClusterService, state);
return timedClusterService;
}
public void testTimeoutUpdateTask() throws Exception {
final CountDownLatch block = new CountDownLatch(1);
clusterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
try {
block.await();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
return currentState;
}
@Override
public void onFailure(String source, Throwable t) {
throw new RuntimeException(t);
}
});
final CountDownLatch timedOut = new CountDownLatch(1);
final AtomicBoolean executeCalled = new AtomicBoolean();
clusterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
@Override
public TimeValue timeout() {
return TimeValue.timeValueMillis(2);
}
@Override
public void onFailure(String source, Throwable t) {
timedOut.countDown();
}
@Override
public ClusterState execute(ClusterState currentState) {
executeCalled.set(true);
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
});
timedOut.await();
block.countDown();
final CountDownLatch allProcessed = new CountDownLatch(1);
clusterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
@Override
public void onFailure(String source, Throwable t) {
throw new RuntimeException(t);
}
@Override
public ClusterState execute(ClusterState currentState) {
allProcessed.countDown();
return currentState;
}
});
allProcessed.await(); // executed another task to double check that execute on the timed out update task is not called...
assertThat(executeCalled.get(), equalTo(false));
}
public void testMasterAwareExecution() throws Exception {
ClusterService nonMaster = createTimedClusterService(false);
final boolean[] taskFailed = {false};
final CountDownLatch latch1 = new CountDownLatch(1);
nonMaster.submitStateUpdateTask("test", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
latch1.countDown();
return currentState;
}
@Override
public void onFailure(String source, Throwable t) {
taskFailed[0] = true;
latch1.countDown();
}
});
latch1.await();
assertTrue("cluster state update task was executed on a non-master", taskFailed[0]);
taskFailed[0] = true;
final CountDownLatch latch2 = new CountDownLatch(1);
nonMaster.submitStateUpdateTask("test", new ClusterStateUpdateTask() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
taskFailed[0] = false;
latch2.countDown();
return currentState;
}
@Override
public void onFailure(String source, Throwable t) {
taskFailed[0] = true;
latch2.countDown();
}
});
latch2.await();
assertFalse("non-master cluster state update task was not executed", taskFailed[0]);
nonMaster.close();
}
/*
* test that a listener throwing an exception while handling a
* notification does not prevent publication notification to the
* executor
*/
public void testClusterStateTaskListenerThrowingExceptionIsOkay() throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
AtomicBoolean published = new AtomicBoolean();
clusterService.submitStateUpdateTask(
"testClusterStateTaskListenerThrowingExceptionIsOkay",
new Object(),
ClusterStateTaskConfig.build(Priority.NORMAL),
new ClusterStateTaskExecutor<Object>() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public BatchResult<Object> execute(ClusterState currentState, List<Object> tasks) throws Exception {
ClusterState newClusterState = ClusterState.builder(currentState).build();
return BatchResult.builder().successes(tasks).build(newClusterState);
}
@Override
public void clusterStatePublished(ClusterState newClusterState) {
published.set(true);
latch.countDown();
}
},
new ClusterStateTaskListener() {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
throw new IllegalStateException(source);
}
@Override
public void onFailure(String source, Throwable t) {
}
}
);
latch.await();
assertTrue(published.get());
}
// test that for a single thread, tasks are executed in the order
// that they are submitted
public void testClusterStateUpdateTasksAreExecutedInOrder() throws BrokenBarrierException, InterruptedException {
class TaskExecutor implements ClusterStateTaskExecutor<Integer> {
List<Integer> tasks = new ArrayList<>();
@Override
public BatchResult<Integer> execute(ClusterState currentState, List<Integer> tasks) throws Exception {
this.tasks.addAll(tasks);
return BatchResult.<Integer>builder().successes(tasks).build(ClusterState.builder(currentState).build());
}
@Override
public boolean runOnlyOnMaster() {
return false;
}
}
int numberOfThreads = randomIntBetween(2, 8);
TaskExecutor[] executors = new TaskExecutor[numberOfThreads];
for (int i = 0; i < numberOfThreads; i++) {
executors[i] = new TaskExecutor();
}
int tasksSubmittedPerThread = randomIntBetween(2, 1024);
CopyOnWriteArrayList<Tuple<String, Throwable>> failures = new CopyOnWriteArrayList<>();
CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread);
ClusterStateTaskListener listener = new ClusterStateTaskListener() {
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure: [{}]", t, source);
failures.add(new Tuple<>(source, t));
updateLatch.countDown();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
updateLatch.countDown();
}
};
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
for (int i = 0; i < numberOfThreads; i++) {
final int index = i;
Thread thread = new Thread(() -> {
try {
barrier.await();
for (int j = 0; j < tasksSubmittedPerThread; j++) {
clusterService.submitStateUpdateTask("[" + index + "][" + j + "]", j,
ClusterStateTaskConfig.build(randomFrom(Priority.values())), executors[index], listener);
}
barrier.await();
} catch (InterruptedException | BrokenBarrierException e) {
throw new AssertionError(e);
}
});
thread.start();
}
// wait for all threads to be ready
barrier.await();
// wait for all threads to finish
barrier.await();
updateLatch.await();
assertThat(failures, empty());
for (int i = 0; i < numberOfThreads; i++) {
assertEquals(tasksSubmittedPerThread, executors[i].tasks.size());
for (int j = 0; j < tasksSubmittedPerThread; j++) {
assertNotNull(executors[i].tasks.get(j));
assertEquals("cluster state update task executed out of order", j, (int) executors[i].tasks.get(j));
}
}
}
public void testClusterStateBatchedUpdates() throws BrokenBarrierException, InterruptedException {
AtomicInteger counter = new AtomicInteger();
class Task {
private AtomicBoolean state = new AtomicBoolean();
public void execute() {
if (!state.compareAndSet(false, true)) {
throw new IllegalStateException();
} else {
counter.incrementAndGet();
}
}
}
int numberOfThreads = randomIntBetween(2, 8);
int tasksSubmittedPerThread = randomIntBetween(1, 1024);
int numberOfExecutors = Math.max(1, numberOfThreads / 4);
final Semaphore semaphore = new Semaphore(numberOfExecutors);
class TaskExecutor implements ClusterStateTaskExecutor<Task> {
private AtomicInteger counter = new AtomicInteger();
private AtomicInteger batches = new AtomicInteger();
private AtomicInteger published = new AtomicInteger();
@Override
public BatchResult<Task> execute(ClusterState currentState, List<Task> tasks) throws Exception {
tasks.forEach(task -> task.execute());
counter.addAndGet(tasks.size());
ClusterState maybeUpdatedClusterState = currentState;
if (randomBoolean()) {
maybeUpdatedClusterState = ClusterState.builder(currentState).build();
batches.incrementAndGet();
semaphore.acquire();
}
return BatchResult.<Task>builder().successes(tasks).build(maybeUpdatedClusterState);
}
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public void clusterStatePublished(ClusterState newClusterState) {
published.incrementAndGet();
semaphore.release();
}
}
ConcurrentMap<String, AtomicInteger> counters = new ConcurrentHashMap<>();
CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread);
ClusterStateTaskListener listener = new ClusterStateTaskListener() {
@Override
public void onFailure(String source, Throwable t) {
assert false;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
counters.computeIfAbsent(source, key -> new AtomicInteger()).incrementAndGet();
updateLatch.countDown();
}
};
List<TaskExecutor> executors = new ArrayList<>();
for (int i = 0; i < numberOfExecutors; i++) {
executors.add(new TaskExecutor());
}
// randomly assign tasks to executors
List<TaskExecutor> assignments = new ArrayList<>();
for (int i = 0; i < numberOfThreads; i++) {
for (int j = 0; j < tasksSubmittedPerThread; j++) {
assignments.add(randomFrom(executors));
}
}
Map<TaskExecutor, Integer> counts = new HashMap<>();
for (TaskExecutor executor : assignments) {
counts.merge(executor, 1, (previous, one) -> previous + one);
}
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
for (int i = 0; i < numberOfThreads; i++) {
final int index = i;
Thread thread = new Thread(() -> {
try {
barrier.await();
for (int j = 0; j < tasksSubmittedPerThread; j++) {
ClusterStateTaskExecutor<Task> executor = assignments.get(index * tasksSubmittedPerThread + j);
clusterService.submitStateUpdateTask(
Thread.currentThread().getName(),
new Task(),
ClusterStateTaskConfig.build(randomFrom(Priority.values())),
executor,
listener);
}
barrier.await();
} catch (BrokenBarrierException | InterruptedException e) {
throw new AssertionError(e);
}
});
thread.start();
}
// wait for all threads to be ready
barrier.await();
// wait for all threads to finish
barrier.await();
// wait until all the cluster state updates have been processed
updateLatch.await();
// and until all of the publication callbacks have completed
semaphore.acquire(numberOfExecutors);
// assert the number of executed tasks is correct
assertEquals(numberOfThreads * tasksSubmittedPerThread, counter.get());
// assert each executor executed the correct number of tasks
for (TaskExecutor executor : executors) {
if (counts.containsKey(executor)) {
assertEquals((int) counts.get(executor), executor.counter.get());
assertEquals(executor.batches.get(), executor.published.get());
}
}
// assert the correct number of clusterStateProcessed events were triggered
for (Map.Entry<String, AtomicInteger> entry : counters.entrySet()) {
assertEquals(entry.getValue().get(), tasksSubmittedPerThread);
}
}
/**
* Note, this test can only work as long as we have a single thread executor executing the state update tasks!
*/
public void testPrioritizedTasks() throws Exception {
Settings settings = settingsBuilder()
.put("discovery.type", "local")
.build();
BlockingTask block = new BlockingTask(Priority.IMMEDIATE);
clusterService.submitStateUpdateTask("test", block);
int taskCount = randomIntBetween(5, 20);
Priority[] priorities = Priority.values();
// will hold all the tasks in the order in which they were executed
List<PrioritizedTask> tasks = new ArrayList<>(taskCount);
CountDownLatch latch = new CountDownLatch(taskCount);
for (int i = 0; i < taskCount; i++) {
Priority priority = priorities[randomIntBetween(0, priorities.length - 1)];
clusterService.submitStateUpdateTask("test", new PrioritizedTask(priority, latch, tasks));
}
block.release();
latch.await();
Priority prevPriority = null;
for (PrioritizedTask task : tasks) {
if (prevPriority == null) {
prevPriority = task.priority();
} else {
assertThat(task.priority().sameOrAfter(prevPriority), is(true));
}
}
}
@TestLogging("cluster:TRACE") // To ensure that we log cluster state events on TRACE level
public void testClusterStateUpdateLogging() throws Exception {
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test1", "cluster.service", Level.DEBUG,
"*processing [test1]: took [1s] no change in cluster_state"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.TRACE,
"*failed to execute cluster state update in [2s]*"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.DEBUG,
"*processing [test3]: took [3s] done applying updated cluster_state (version: *, uuid: *)"));
Logger rootLogger = Logger.getRootLogger();
rootLogger.addAppender(mockAppender);
try {
final CountDownLatch latch = new CountDownLatch(4);
clusterService.currentTimeOverride = System.nanoTime();
clusterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
clusterService.currentTimeOverride += TimeValue.timeValueSeconds(1).nanos();
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
fail();
}
});
clusterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
clusterService.currentTimeOverride += TimeValue.timeValueSeconds(2).nanos();
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
fail();
}
@Override
public void onFailure(String source, Throwable t) {
latch.countDown();
}
});
clusterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
clusterService.currentTimeOverride += TimeValue.timeValueSeconds(3).nanos();
return ClusterState.builder(currentState).incrementVersion().build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
fail();
}
});
// Additional update task to make sure all previous logging made it to the logger
// We don't check logging for this on since there is no guarantee that it will occur before our check
clusterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
fail();
}
});
latch.await();
} finally {
rootLogger.removeAppender(mockAppender);
}
mockAppender.assertAllExpectationsMatched();
}
@TestLogging("cluster:WARN") // To ensure that we log cluster state events on WARN level
public void testLongClusterStateUpdateLogging() throws Exception {
MockLogAppender mockAppender = new MockLogAppender();
mockAppender.addExpectation(new MockLogAppender.UnseenEventExpectation("test1 shouldn't see because setting is too low",
"cluster.service", Level.WARN, "*cluster state update task [test1] took [*] above the warn threshold of *"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.WARN,
"*cluster state update task [test2] took [32s] above the warn threshold of *"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.WARN,
"*cluster state update task [test3] took [33s] above the warn threshold of *"));
mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test4", "cluster.service", Level.WARN,
"*cluster state update task [test4] took [34s] above the warn threshold of *"));
Logger rootLogger = Logger.getRootLogger();
rootLogger.addAppender(mockAppender);
try {
final CountDownLatch latch = new CountDownLatch(5);
final CountDownLatch processedFirstTask = new CountDownLatch(1);
clusterService.currentTimeOverride = System.nanoTime();
clusterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
clusterService.currentTimeOverride += TimeValue.timeValueSeconds(1).nanos();
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
processedFirstTask.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
fail();
}
});
processedFirstTask.await();
clusterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
clusterService.currentTimeOverride += TimeValue.timeValueSeconds(32).nanos();
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
fail();
}
@Override
public void onFailure(String source, Throwable t) {
latch.countDown();
}
});
clusterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
clusterService.currentTimeOverride += TimeValue.timeValueSeconds(33).nanos();
return ClusterState.builder(currentState).incrementVersion().build();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
fail();
}
});
clusterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
clusterService.currentTimeOverride += TimeValue.timeValueSeconds(34).nanos();
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
fail();
}
});
// Additional update task to make sure all previous logging made it to the logger
// We don't check logging for this on since there is no guarantee that it will occur before our check
clusterService.submitStateUpdateTask("test5", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return currentState;
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
latch.countDown();
}
@Override
public void onFailure(String source, Throwable t) {
fail();
}
});
latch.await();
} finally {
rootLogger.removeAppender(mockAppender);
}
mockAppender.assertAllExpectationsMatched();
}
private static class BlockingTask extends ClusterStateUpdateTask {
private final CountDownLatch latch = new CountDownLatch(1);
public BlockingTask(Priority priority) {
super(priority);
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
latch.await();
return currentState;
}
@Override
public void onFailure(String source, Throwable t) {
}
public void release() {
latch.countDown();
}
}
private static class PrioritizedTask extends ClusterStateUpdateTask {
private final CountDownLatch latch;
private final List<PrioritizedTask> tasks;
private PrioritizedTask(Priority priority, CountDownLatch latch, List<PrioritizedTask> tasks) {
super(priority);
this.latch = latch;
this.tasks = tasks;
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
tasks.add(this);
latch.countDown();
return currentState;
}
@Override
public void onFailure(String source, Throwable t) {
latch.countDown();
}
}
static class TimedClusterService extends ClusterService {
public volatile Long currentTimeOverride = null;
public TimedClusterService(Settings settings, OperationRouting operationRouting, ClusterSettings clusterSettings,
ThreadPool threadPool, ClusterName clusterName) {
super(settings, operationRouting, clusterSettings, threadPool, clusterName);
}
@Override
protected long currentTimeInNanos() {
if (currentTimeOverride != null) {
return currentTimeOverride;
}
return super.currentTimeInNanos();
}
}
}
| clintongormley/elasticsearch | core/src/test/java/org/elasticsearch/cluster/service/ClusterServiceTests.java | Java | apache-2.0 | 33,027 |
package example.service;
import example.repo.Customer1112Repository;
import org.springframework.stereotype.Service;
@Service
public class Customer1112Service {
public Customer1112Service(Customer1112Repository repo) {}
}
| spring-projects/spring-data-examples | jpa/deferred/src/main/java/example/service/Customer1112Service.java | Java | apache-2.0 | 225 |
/*
* Copyright 2011 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.devtools.j2objc;
import com.google.devtools.j2objc.util.ErrorUtil;
import com.google.devtools.j2objc.util.JdtParser;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLClassLoader;
import java.util.Arrays;
import java.util.jar.JarEntry;
import java.util.jar.JarInputStream;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Translation tool for generating Objective C source files from Java sources.
* This tool is not intended to be a general purpose converter, but instead is
* focused on what is needed for business logic libraries written in Java to
* run natively on iOS. In particular, no attempt is made to translate Java
* UI framework code to any iOS frameworks.
*
* @author Tom Ball
*/
public class J2ObjC {
static {
// Always enable assertions in translator.
ClassLoader loader = J2ObjC.class.getClassLoader();
if (loader != null) {
loader.setPackageAssertionStatus(J2ObjC.class.getPackage().getName(), true);
}
}
private static final Logger logger = Logger.getLogger(J2ObjC.class.getName());
private static void exit() {
Options.deleteTemporaryDirectory();
System.exit(ErrorUtil.errorCount());
}
public static String getFileHeader(String sourceFileName) {
return String.format(Options.getFileHeader(), sourceFileName);
}
private static class JarFileLoader extends URLClassLoader {
public JarFileLoader() {
super(new URL[]{});
}
public void addJarFile(String path) throws MalformedURLException {
String urlPath = "jar:file://" + path + "!/";
addURL(new URL(urlPath));
}
}
private static void initPlugins(String[] pluginPaths, String pluginOptionString)
throws IOException {
@SuppressWarnings("resource")
JarFileLoader classLoader = new JarFileLoader();
for (String path : pluginPaths) {
if (path.endsWith(".jar")) {
JarInputStream jarStream = null;
try {
jarStream = new JarInputStream(new FileInputStream(path));
classLoader.addJarFile(new File(path).getAbsolutePath());
JarEntry entry;
while ((entry = jarStream.getNextJarEntry()) != null) {
String entryName = entry.getName();
if (!entryName.endsWith(".class")) {
continue;
}
String className = entryName.replaceAll("/", "\\.").substring(
0, entryName.length() - ".class".length());
try {
Class<?> clazz = classLoader.loadClass(className);
if (Plugin.class.isAssignableFrom(clazz)) {
Constructor<?> cons = clazz.getDeclaredConstructor();
Plugin plugin = (Plugin) cons.newInstance();
plugin.initPlugin(pluginOptionString);
Options.getPlugins().add(plugin);
}
} catch (Exception e) {
throw new IOException("plugin exception: ", e);
}
}
} finally {
if (jarStream != null) {
jarStream.close();
}
}
} else {
logger.warning("Don't understand plugin path entry: " + path);
}
}
}
public static void error(Exception e) {
logger.log(Level.SEVERE, "Exiting due to exception", e);
System.exit(1);
}
private static void checkErrors() {
int errors = ErrorUtil.errorCount();
if (Options.treatWarningsAsErrors()) {
errors += ErrorUtil.warningCount();
}
if (errors > 0) {
System.exit(1);
}
}
private static JdtParser createParser() {
JdtParser parser = new JdtParser();
parser.addClasspathEntries(Options.getClassPathEntries());
parser.addClasspathEntries(Options.getBootClasspath());
parser.addSourcepathEntries(Options.getSourcePathEntries());
parser.setIncludeRunningVMBootclasspath(false);
parser.setEncoding(Options.fileEncoding());
parser.setIgnoreMissingImports(Options.ignoreMissingImports());
parser.setEnableDocComments(Options.docCommentsEnabled());
return parser;
}
/**
* Entry point for tool.
*
* @param args command-line arguments: flags and source file names
* @throws IOException
*/
public static void main(String[] args) {
if (args.length == 0) {
Options.help(true);
}
String[] files = null;
try {
files = Options.load(args);
if (files.length == 0) {
Options.usage("no source files");
}
} catch (IOException e) {
ErrorUtil.error(e.getMessage());
System.exit(1);
}
try {
initPlugins(Options.getPluginPathEntries(), Options.getPluginOptionString());
} catch (IOException e) {
error(e);
}
JdtParser parser = createParser();
// Remove dead-code first, so modified file paths are replaced in the
// translation list.
DeadCodeProcessor deadCodeProcessor = DeadCodeProcessor.create(parser);
if (deadCodeProcessor != null) {
deadCodeProcessor.processFiles(Arrays.asList(files));
checkErrors();
files = deadCodeProcessor.postProcess().toArray(new String[0]);
}
TranslationProcessor translationProcessor = new TranslationProcessor(parser);
translationProcessor.processFiles(Arrays.asList(files));
translationProcessor.postProcess();
checkErrors();
exit();
}
}
| xuvw/j2objc | translator/src/main/java/com/google/devtools/j2objc/J2ObjC.java | Java | apache-2.0 | 6,094 |
/*
* Copyright (c) 2011-2015, Peter Abeles. All Rights Reserved.
*
* This file is part of BoofCV (http://boofcv.org).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package boofcv.alg.interpolate;
import boofcv.alg.interpolate.array.PolynomialNevilleFixed_F32;
import boofcv.core.image.border.ImageBorder;
import boofcv.struct.image.ImageSingleBand;
/**
* <p>
* Polynomial interpolation using {@link PolynomialNevilleFixed_F32 Neville's} algorithm.
* First interpolation is performed along the horizontal axis, centered at the specified x-coordinate.
* Then a second pass is done along the vertical axis using the output from the first pass.
* </p>
*
* <p>
* The code is unoptimized and the algorithm is relatively expensive.
* </p>
*
* @author Peter Abeles
*/
public abstract class PolynomialPixel<T extends ImageSingleBand> implements InterpolatePixelS<T> {
// for reading pixels outside the image border
protected ImageBorder<T> border;
// the image that is being interpolated
protected T image;
protected int M;
// if even need to add one to initial coordinate to make sure
// the point interpolated is bounded inside the interpolation points
protected int offM;
// temporary arrays used in the interpolation
protected float horiz[];
protected float vert[];
// the minimum and maximum pixel intensity values allowed
protected float min;
protected float max;
protected PolynomialNevilleFixed_F32 interp1D;
public PolynomialPixel(int maxDegree, float min, float max) {
this.M = maxDegree;
this.min = min;
this.max = max;
horiz = new float[maxDegree];
vert = new float[maxDegree];
if( maxDegree % 2 == 0 ) {
offM = 1;
} else {
offM = 0;
}
interp1D = new PolynomialNevilleFixed_F32(maxDegree);
}
@Override
public void setBorder(ImageBorder<T> border) {
this.border = border;
}
@Override
public void setImage(T image) {
if( border != null )
border.setImage(image);
this.image = image;
}
@Override
public T getImage() {
return image;
}
@Override
public boolean isInFastBounds(float x, float y) {
float x0 = x - M/2 + offM;
float x1 = x0 + M;
float y0 = y - M/2 + offM;
float y1 = y0 + M;
return (x0 >= 0 && y0 >= 0 && x1 <= image.width-1 && y1 <= image.height-1 );
}
@Override
public int getFastBorderX() {
return M;
}
@Override
public int getFastBorderY() {
return M;
}
@Override
public ImageBorder<T> getBorder() {
return border;
}
}
| pacozaa/BoofCV | main/ip/src/boofcv/alg/interpolate/PolynomialPixel.java | Java | apache-2.0 | 2,970 |
import os
import logging
import time
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend
from googleapiclient import discovery, errors
logger = logging.getLogger(__name__)
crm = discovery.build("cloudresourcemanager", "v1")
iam = discovery.build("iam", "v1")
compute = discovery.build("compute", "v1")
VERSION = "v1"
RAY = "ray-autoscaler"
DEFAULT_SERVICE_ACCOUNT_ID = RAY + "-sa-" + VERSION
SERVICE_ACCOUNT_EMAIL_TEMPLATE = (
"{account_id}@{project_id}.iam.gserviceaccount.com")
DEFAULT_SERVICE_ACCOUNT_CONFIG = {
"displayName": "Ray Autoscaler Service Account ({})".format(VERSION),
}
DEFAULT_SERVICE_ACCOUNT_ROLES = ("roles/storage.objectAdmin",
"roles/compute.admin")
MAX_POLLS = 12
POLL_INTERVAL = 5
def wait_for_crm_operation(operation):
"""Poll for cloud resource manager operation until finished."""
logger.info("wait_for_crm_operation: "
"Waiting for operation {} to finish...".format(operation))
for _ in range(MAX_POLLS):
result = crm.operations().get(name=operation["name"]).execute()
if "error" in result:
raise Exception(result["error"])
if "done" in result and result["done"]:
logger.info("wait_for_crm_operation: Operation done.")
break
time.sleep(POLL_INTERVAL)
return result
def wait_for_compute_global_operation(project_name, operation):
"""Poll for global compute operation until finished."""
logger.info("wait_for_compute_global_operation: "
"Waiting for operation {} to finish...".format(
operation["name"]))
for _ in range(MAX_POLLS):
result = compute.globalOperations().get(
project=project_name,
operation=operation["name"],
).execute()
if "error" in result:
raise Exception(result["error"])
if result["status"] == "DONE":
logger.info("wait_for_compute_global_operation: "
"Operation done.")
break
time.sleep(POLL_INTERVAL)
return result
def key_pair_name(i, region, project_id, ssh_user):
"""Returns the ith default gcp_key_pair_name."""
key_name = "{}_gcp_{}_{}_{}".format(RAY, region, project_id, ssh_user, i)
return key_name
def key_pair_paths(key_name):
"""Returns public and private key paths for a given key_name."""
public_key_path = os.path.expanduser("~/.ssh/{}.pub".format(key_name))
private_key_path = os.path.expanduser("~/.ssh/{}.pem".format(key_name))
return public_key_path, private_key_path
def generate_rsa_key_pair():
"""Create public and private ssh-keys."""
key = rsa.generate_private_key(
backend=default_backend(), public_exponent=65537, key_size=2048)
public_key = key.public_key().public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.OpenSSH).decode("utf-8")
pem = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()).decode("utf-8")
return public_key, pem
def bootstrap_gcp(config):
config = _configure_project(config)
config = _configure_iam_role(config)
config = _configure_key_pair(config)
config = _configure_subnet(config)
return config
def _configure_project(config):
"""Setup a Google Cloud Platform Project.
Google Compute Platform organizes all the resources, such as storage
buckets, users, and instances under projects. This is different from
aws ec2 where everything is global.
"""
project_id = config["provider"].get("project_id")
assert config["provider"]["project_id"] is not None, (
"'project_id' must be set in the 'provider' section of the autoscaler"
" config. Notice that the project id must be globally unique.")
project = _get_project(project_id)
if project is None:
# Project not found, try creating it
_create_project(project_id)
project = _get_project(project_id)
assert project is not None, "Failed to create project"
assert project["lifecycleState"] == "ACTIVE", (
"Project status needs to be ACTIVE, got {}".format(
project["lifecycleState"]))
config["provider"]["project_id"] = project["projectId"]
return config
def _configure_iam_role(config):
"""Setup a gcp service account with IAM roles.
Creates a gcp service acconut and binds IAM roles which allow it to control
control storage/compute services. Specifically, the head node needs to have
an IAM role that allows it to create further gce instances and store items
in google cloud storage.
TODO: Allow the name/id of the service account to be configured
"""
email = SERVICE_ACCOUNT_EMAIL_TEMPLATE.format(
account_id=DEFAULT_SERVICE_ACCOUNT_ID,
project_id=config["provider"]["project_id"])
service_account = _get_service_account(email, config)
if service_account is None:
logger.info("_configure_iam_role: "
"Creating new service account {}".format(
DEFAULT_SERVICE_ACCOUNT_ID))
service_account = _create_service_account(
DEFAULT_SERVICE_ACCOUNT_ID, DEFAULT_SERVICE_ACCOUNT_CONFIG, config)
assert service_account is not None, "Failed to create service account"
_add_iam_policy_binding(service_account, DEFAULT_SERVICE_ACCOUNT_ROLES)
config["head_node"]["serviceAccounts"] = [{
"email": service_account["email"],
# NOTE: The amount of access is determined by the scope + IAM
# role of the service account. Even if the cloud-platform scope
# gives (scope) access to the whole cloud-platform, the service
# account is limited by the IAM rights specified below.
"scopes": ["https://www.googleapis.com/auth/cloud-platform"]
}]
return config
def _configure_key_pair(config):
"""Configure SSH access, using an existing key pair if possible.
Creates a project-wide ssh key that can be used to access all the instances
unless explicitly prohibited by instance config.
The ssh-keys created by ray are of format:
[USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME]
where:
[USERNAME] is the user for the SSH key, specified in the config.
[KEY_VALUE] is the public SSH key value.
"""
if "ssh_private_key" in config["auth"]:
return config
ssh_user = config["auth"]["ssh_user"]
project = compute.projects().get(
project=config["provider"]["project_id"]).execute()
# Key pairs associated with project meta data. The key pairs are general,
# and not just ssh keys.
ssh_keys_str = next(
(item for item in project["commonInstanceMetadata"].get("items", [])
if item["key"] == "ssh-keys"), {}).get("value", "")
ssh_keys = ssh_keys_str.split("\n") if ssh_keys_str else []
# Try a few times to get or create a good key pair.
key_found = False
for i in range(10):
key_name = key_pair_name(i, config["provider"]["region"],
config["provider"]["project_id"], ssh_user)
public_key_path, private_key_path = key_pair_paths(key_name)
for ssh_key in ssh_keys:
key_parts = ssh_key.split(" ")
if len(key_parts) != 3:
continue
if key_parts[2] == ssh_user and os.path.exists(private_key_path):
# Found a key
key_found = True
break
# Create a key since it doesn't exist locally or in GCP
if not key_found and not os.path.exists(private_key_path):
logger.info("_configure_key_pair: "
"Creating new key pair {}".format(key_name))
public_key, private_key = generate_rsa_key_pair()
_create_project_ssh_key_pair(project, public_key, ssh_user)
with open(private_key_path, "w") as f:
f.write(private_key)
os.chmod(private_key_path, 0o600)
with open(public_key_path, "w") as f:
f.write(public_key)
key_found = True
break
if key_found:
break
assert key_found, "SSH keypair for user {} not found for {}".format(
ssh_user, private_key_path)
assert os.path.exists(private_key_path), (
"Private key file {} not found for user {}"
"".format(private_key_path, ssh_user))
logger.info("_configure_key_pair: "
"Private key not specified in config, using"
"{}".format(private_key_path))
config["auth"]["ssh_private_key"] = private_key_path
return config
def _configure_subnet(config):
"""Pick a reasonable subnet if not specified by the config."""
# Rationale: avoid subnet lookup if the network is already
# completely manually configured
if ("networkInterfaces" in config["head_node"]
and "networkInterfaces" in config["worker_nodes"]):
return config
subnets = _list_subnets(config)
if not subnets:
raise NotImplementedError("Should be able to create subnet.")
# TODO: make sure that we have usable subnet. Maybe call
# compute.subnetworks().listUsable? For some reason it didn't
# work out-of-the-box
default_subnet = subnets[0]
if "networkInterfaces" not in config["head_node"]:
config["head_node"]["networkInterfaces"] = [{
"subnetwork": default_subnet["selfLink"],
"accessConfigs": [{
"name": "External NAT",
"type": "ONE_TO_ONE_NAT",
}],
}]
if "networkInterfaces" not in config["worker_nodes"]:
config["worker_nodes"]["networkInterfaces"] = [{
"subnetwork": default_subnet["selfLink"],
"accessConfigs": [{
"name": "External NAT",
"type": "ONE_TO_ONE_NAT",
}],
}]
return config
def _list_subnets(config):
response = compute.subnetworks().list(
project=config["provider"]["project_id"],
region=config["provider"]["region"]).execute()
return response["items"]
def _get_subnet(config, subnet_id):
subnet = compute.subnetworks().get(
project=config["provider"]["project_id"],
region=config["provider"]["region"],
subnetwork=subnet_id,
).execute()
return subnet
def _get_project(project_id):
try:
project = crm.projects().get(projectId=project_id).execute()
except errors.HttpError as e:
if e.resp.status != 403:
raise
project = None
return project
def _create_project(project_id):
operation = crm.projects().create(body={
"projectId": project_id,
"name": project_id
}).execute()
result = wait_for_crm_operation(operation)
return result
def _get_service_account(account, config):
project_id = config["provider"]["project_id"]
full_name = ("projects/{project_id}/serviceAccounts/{account}"
"".format(project_id=project_id, account=account))
try:
service_account = iam.projects().serviceAccounts().get(
name=full_name).execute()
except errors.HttpError as e:
if e.resp.status != 404:
raise
service_account = None
return service_account
def _create_service_account(account_id, account_config, config):
project_id = config["provider"]["project_id"]
service_account = iam.projects().serviceAccounts().create(
name="projects/{project_id}".format(project_id=project_id),
body={
"accountId": account_id,
"serviceAccount": account_config,
}).execute()
return service_account
def _add_iam_policy_binding(service_account, roles):
"""Add new IAM roles for the service account."""
project_id = service_account["projectId"]
email = service_account["email"]
member_id = "serviceAccount:" + email
policy = crm.projects().getIamPolicy(
resource=project_id, body={}).execute()
already_configured = True
for role in roles:
role_exists = False
for binding in policy["bindings"]:
if binding["role"] == role:
if member_id not in binding["members"]:
binding["members"].append(member_id)
already_configured = False
role_exists = True
if not role_exists:
already_configured = False
policy["bindings"].append({
"members": [member_id],
"role": role,
})
if already_configured:
# In some managed environments, an admin needs to grant the
# roles, so only call setIamPolicy if needed.
return
result = crm.projects().setIamPolicy(
resource=project_id, body={
"policy": policy,
}).execute()
return result
def _create_project_ssh_key_pair(project, public_key, ssh_user):
"""Inserts an ssh-key into project commonInstanceMetadata"""
key_parts = public_key.split(" ")
# Sanity checks to make sure that the generated key matches expectation
assert len(key_parts) == 2, key_parts
assert key_parts[0] == "ssh-rsa", key_parts
new_ssh_meta = "{ssh_user}:ssh-rsa {key_value} {ssh_user}".format(
ssh_user=ssh_user, key_value=key_parts[1])
common_instance_metadata = project["commonInstanceMetadata"]
items = common_instance_metadata.get("items", [])
ssh_keys_i = next(
(i for i, item in enumerate(items) if item["key"] == "ssh-keys"), None)
if ssh_keys_i is None:
items.append({"key": "ssh-keys", "value": new_ssh_meta})
else:
ssh_keys = items[ssh_keys_i]
ssh_keys["value"] += "\n" + new_ssh_meta
items[ssh_keys_i] = ssh_keys
common_instance_metadata["items"] = items
operation = compute.projects().setCommonInstanceMetadata(
project=project["name"], body=common_instance_metadata).execute()
response = wait_for_compute_global_operation(project["name"], operation)
return response
| stephanie-wang/ray | python/ray/autoscaler/gcp/config.py | Python | apache-2.0 | 14,384 |
/* Typechecking With PropTypes
As your app grows, you can catch a lot of bugs with typechecking. For some applications, you can use JavaScript extensions like Flow or
TypeScript to typecheck your whole application. But even if you don’t use those, React has some built-in typechecking abilities. To run
typechecking on the props for a component, you can assign the special propTypes property:'PropTypes' allow us to supply a property type
for all of our different properties, so that it will validate to
make sure that we're supplying the right type.
Note: For performance reasons, propTypes is only checked in development mode.
NOTENOTE: React.PropTypes has moved into a different package since React v15.5. Please use the "prop-types" library instead.
primitives:
optionalArray: React.PropTypes.array,
optionalBool: React.PropTypes.bool,
optionalFunc: React.PropTypes.func,
optionalNumber: React.PropTypes.number,
optionalObject: React.PropTypes.object,
optionalString: React.PropTypes.string,
optionalSymbol: React.PropTypes.symbol,
PropTypes.instanceOf(Date) //for date objects
There are many more complex data types: https://facebook.github.io/react/docs/typechecking-with-proptypes.html
*/
/* Type 1: Creating Components using Class */
(SkiDayCount-createClass.js)
import { createClass, PropTypes } from 'react'
//Just after `export const SkiDayCount = createClass({` and before `getDefaultProps() {`
propTypes: {
total: PropTypes.number.isRequired, //make sure a warning is shown if the prop isn't provided. (if defaults are there,this will never give a warning)
powder: PropTypes.number,
backcountry: PropTypes.number
},
//note that we've destructured PropTypes from 'react'. Hadn't we done that, we should use React.propTypes.number etc
(index.js)
import { SkiDayCount } from './components/SkiDayCount-createClass.js'
render(
<SkiDayCount total="lots" />, //we're intentionally giving wrong type(string) to `total` (which shuld have number type)
document.getElementById('react-container')
)
//Once you run, You'll see that it will still render but in console, you will get a warning!
/* Type-2: Using ES6 Syntax */
(in SkiDayCount-ES6.js)
import { Component, PropTypes } from 'react'
//completely outside the class definition
SkiDayCount.propTypes = {
total: PropTypes.number,
powder: PropTypes.number,
backcountry: PropTypes.number
}
(in index.js)
import { SkiDayCount } from './components/SkiDayCount-ES6.js'
render( //supplied wrong type(boolean) instead of number
<SkiDayCount backcountry={false} />,
document.getElementById('react-container')
)
/* Type-3: Statless Function Types */
(in SkiDay.js)
import { PropTypes } from 'react'
//completely outside the class definition
SkiDayCount.propTypes = {
total: PropTypes.number,
powder: PropTypes.number,
backcountry: PropTypes.number
}
(in index.js)
import { SkiDayCount } from './components/SkiDayCount'
render(
<SkiDayCount backcountry={false} />,
document.getElementById('react-container')
)
| iitjee/SteppinsWebDev | React/08 Prop Types.js | JavaScript | apache-2.0 | 3,584 |
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class SoundManager : MonoBehaviour
{
public AudioSource fxSource;
public AudioSource musicSource;
public static SoundManager instance = null;
public float lowPitchRange = .95f;
public float highPitchRange = 1.05f;
// Use this for initialization
void Awake ()
{
if (instance == null)
{
instance = this;
}
else if (instance != this)
{
Destroy(gameObject);
}
DontDestroyOnLoad(gameObject);
}
public void PlaySingle(AudioClip clip)
{
fxSource.clip = clip;
fxSource.Play();
}
//Can send multiple audio clips to this method
public void RandomizeSfx(params AudioClip[] clips)
{
int randomIndex = Random.Range(0, clips.Length);
float randomPitch = Random.Range(lowPitchRange, highPitchRange);
fxSource.pitch = randomPitch;
fxSource.clip = clips[randomIndex];
fxSource.Play();
}
}
| CattyClouds/Hunger-Man | Hunger Man/Assets/Scripts/SoundManager.cs | C# | apache-2.0 | 1,065 |
/**
* Copyright 2010 The PlayN Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package playn.html;
import com.google.gwt.dom.client.CanvasElement;
import com.google.gwt.dom.client.Document;
import com.google.gwt.dom.client.Element;
import com.google.gwt.webgl.client.WebGLContextAttributes;
import com.google.gwt.webgl.client.WebGLRenderingContext;
import playn.core.Game;
import playn.core.GroupLayer;
import playn.core.Image;
import playn.core.ImageLayer;
import playn.core.ImmediateLayer;
import playn.core.SurfaceLayer;
import playn.core.gl.GL20;
import playn.core.gl.GroupLayerGL;
import playn.core.gl.ImageLayerGL;
import playn.core.gl.ImmediateLayerGL;
import playn.core.gl.Scale;
import playn.core.gl.SurfaceLayerGL;
class HtmlGraphicsGL extends HtmlGraphics {
private final CanvasElement canvas;
private final HtmlGLContext ctx;
private final GroupLayerGL rootLayer;
HtmlGraphicsGL(HtmlPlatform platform, HtmlPlatform.Config config) throws RuntimeException {
super(config);
canvas = Document.get().createCanvasElement();
canvas.setWidth(rootElement.getOffsetWidth());
canvas.setHeight(rootElement.getOffsetHeight());
rootElement.appendChild(canvas);
try {
WebGLContextAttributes attrs = WebGLContextAttributes.create();
attrs.setAlpha(config.transparentCanvas);
attrs.setAntialias(config.antiAliasing);
// if this returns null, the browser doesn't support WebGL on this machine
WebGLRenderingContext gl = WebGLRenderingContext.getContext(canvas, attrs);
// Some systems seem to have a problem where they return a valid context, but it's in an
// error state initially. We give up and fall back to Canvas in this case, because nothing
// seems to work properly.
if (gl == null || gl.getError() != WebGLRenderingContext.NO_ERROR) {
throw new RuntimeException("GL context not created [err=" +
(gl == null ? "null" : gl.getError()) + "]");
}
ctx = new HtmlGLContext(platform, config.scaleFactor, gl, canvas);
rootLayer = new GroupLayerGL(ctx);
} catch (RuntimeException re) {
// Give up. HtmlPlatform will catch the exception and fall back to dom/canvas.
rootElement.removeChild(canvas);
throw re;
}
}
@Override
public void setSize(int width, int height) {
super.setSize(width, height);
canvas.setWidth(width);
canvas.setHeight(height);
ctx.setSize(width, height);
}
@Override
public GroupLayer createGroupLayer() {
return new GroupLayerGL(ctx);
}
@Override
public GroupLayer.Clipped createGroupLayer(float width, float height) {
return new GroupLayerGL.Clipped(ctx, width, height);
}
@Override
public ImageLayer createImageLayer() {
return new ImageLayerGL(ctx);
}
@Override
public ImageLayer createImageLayer(Image img) {
return new ImageLayerGL(ctx, img);
}
@Override
public SurfaceLayer createSurfaceLayer(float width, float height) {
return new SurfaceLayerGL(ctx, width, height);
}
@Override
public ImmediateLayer.Clipped createImmediateLayer(
int width, int height, ImmediateLayer.Renderer renderer) {
return new ImmediateLayerGL.Clipped(ctx, width, height, renderer);
}
@Override
public ImmediateLayer createImmediateLayer(ImmediateLayer.Renderer renderer) {
return new ImmediateLayerGL(ctx, renderer);
}
@Override
public GroupLayer rootLayer() {
return rootLayer;
}
@Override
public int width() {
return ctx.viewWidth;
}
@Override
public int height() {
return ctx.viewHeight;
}
@Override
public GL20 gl20() {
return ctx.gl;
}
@Override
public HtmlGLContext ctx() {
return ctx;
}
@Override
Scale scale() {
return ctx.scale;
}
@Override
void paint(Game game, float paintAlpha) {
if (rootLayer.size() > 0) {
ctx.preparePaint(rootLayer);
game.paint(paintAlpha);
ctx.paintLayers(rootLayer);
} else {
game.paint(paintAlpha);
}
}
@Override
Element rootElement() {
return canvas;
}
}
| KoriSamui/PlayN | html/src/playn/html/HtmlGraphicsGL.java | Java | apache-2.0 | 4,612 |
const { spawn } = require('child_process')
module.exports = function myExec(cmd, ...args) {
return new Promise((resolve, reject) => {
const run = spawn(cmd, args)
let out = ''
run.stdout.on('data', (data) => {
console.log(`[stdout]: ${data.toString().trimEnd()}`)
out += data.toString()
})
run.stderr.on('data', (data) => {
console.log(`[stderr]: ${data.toString().trimEnd()}`)
})
run.on('exit', function (code) {
console.log('child process exited with code ' + code.toString())
if (code === 0) {
resolve(out)
} else {
reject(code)
}
})
})
}
| oldj/SwitchHosts | scripts/libs/my_exec.js | JavaScript | apache-2.0 | 641 |
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class V1ServiceAccount(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
V1ServiceAccount - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'kind': 'str',
'api_version': 'str',
'metadata': 'V1ObjectMeta',
'secrets': 'list[V1ObjectReference]',
'image_pull_secrets': 'list[V1LocalObjectReference]'
}
self.attribute_map = {
'kind': 'kind',
'api_version': 'apiVersion',
'metadata': 'metadata',
'secrets': 'secrets',
'image_pull_secrets': 'imagePullSecrets'
}
self._kind = None
self._api_version = None
self._metadata = None
self._secrets = None
self._image_pull_secrets = None
@property
def kind(self):
"""
Gets the kind of this V1ServiceAccount.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1ServiceAccount.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1ServiceAccount.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1ServiceAccount.
:type: str
"""
self._kind = kind
@property
def api_version(self):
"""
Gets the api_version of this V1ServiceAccount.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:return: The api_version of this V1ServiceAccount.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1ServiceAccount.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1ServiceAccount.
:type: str
"""
self._api_version = api_version
@property
def metadata(self):
"""
Gets the metadata of this V1ServiceAccount.
Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
:return: The metadata of this V1ServiceAccount.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1ServiceAccount.
Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
:param metadata: The metadata of this V1ServiceAccount.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def secrets(self):
"""
Gets the secrets of this V1ServiceAccount.
Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md
:return: The secrets of this V1ServiceAccount.
:rtype: list[V1ObjectReference]
"""
return self._secrets
@secrets.setter
def secrets(self, secrets):
"""
Sets the secrets of this V1ServiceAccount.
Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md
:param secrets: The secrets of this V1ServiceAccount.
:type: list[V1ObjectReference]
"""
self._secrets = secrets
@property
def image_pull_secrets(self):
"""
Gets the image_pull_secrets of this V1ServiceAccount.
ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret
:return: The image_pull_secrets of this V1ServiceAccount.
:rtype: list[V1LocalObjectReference]
"""
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, image_pull_secrets):
"""
Sets the image_pull_secrets of this V1ServiceAccount.
ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret
:param image_pull_secrets: The image_pull_secrets of this V1ServiceAccount.
:type: list[V1LocalObjectReference]
"""
self._image_pull_secrets = image_pull_secrets
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| danielfrg/jupyterhub-kubernetes_spawner | kubernetes_spawner/swagger_client/models/v1_service_account.py | Python | apache-2.0 | 8,051 |
package com.xiaoleilu.ucloud.test;
import org.slf4j.Logger;
import com.xiaoleilu.hutool.Log;
import com.xiaoleilu.ucloud.core.Param;
import com.xiaoleilu.ucloud.core.Response;
import com.xiaoleilu.ucloud.core.enums.Region;
import com.xiaoleilu.ucloud.udb.ClassType;
import com.xiaoleilu.ucloud.udb.UDB;
import com.xiaoleilu.ucloud.udb.UDBName;
/**
* UDB样例
* @author Looly
*
*/
public class UDBTest {
private final static Logger log = Log.get();
public static void main(String[] args) {
final UDB udb = new UDB();
//获取udb实例信息
Param param = Param.create()
.setRegion(Region.CN_NORTH_03)
.set(UDBName.ClassType, ClassType.SQL)
.setOffset(0)
.setLimit(10);
Response resp = udb.describeUDBInstance(param);
log.debug("UDB Instance: {}", resp.toPretty());
}
}
| looly/ucloud-java-sdk | src/test/java/com/xiaoleilu/ucloud/test/UDBTest.java | Java | apache-2.0 | 844 |
import { Configuration } from "./configuration/Configuration.js";
import { SettingsManager } from "./configuration/SettingsManager.js";
import { EventBuilder } from "./EventBuilder.js";
import { Event, KnownEventDataKeys } from "./models/Event.js";
import { UserDescription } from "./models/data/UserDescription.js";
import { EventContext } from "./models/EventContext.js";
import { EventPluginContext } from "./plugins/EventPluginContext.js";
import { EventPluginManager } from "./plugins/EventPluginManager.js";
import { PluginContext } from "./plugins/PluginContext.js";
export class ExceptionlessClient {
private _intervalId = 0;
private _timeoutId = 0;
protected _initialized = false;
public constructor(public config: Configuration = new Configuration()) { }
/** Resume background submission, resume any timers. */
public async startup(configurationOrApiKey?: (config: Configuration) => void | string): Promise<void> {
if (configurationOrApiKey && !this._initialized) {
this._initialized = true;
EventPluginManager.addDefaultPlugins(this.config);
if (typeof configurationOrApiKey === "string") {
this.config.apiKey = configurationOrApiKey;
} else {
configurationOrApiKey(this.config);
}
this.config.services.queue.onEventsPosted(() =>
Promise.resolve(this.updateSettingsTimer())
);
await SettingsManager.applySavedServerSettings(this.config);
}
this.updateSettingsTimer(!!configurationOrApiKey);
await EventPluginManager.startup(new PluginContext(this));
const { queue } = this.config.services;
await queue.startup();
if (this.config.usePersistedQueueStorage) {
// TODO: Can we schedule this as part of startup?
await queue.process();
}
}
/** Submit events, pause any timers and go into low power mode. */
public async suspend(): Promise<void> {
await EventPluginManager.suspend(new PluginContext(this));
const { queue } = this.config.services;
await queue.suspend();
await queue.process();
this.suspendSettingsTimer();
}
private suspendSettingsTimer(): void {
clearTimeout(this._timeoutId);
this._timeoutId = 0;
clearInterval(this._intervalId);
this._intervalId = 0;
}
public async processQueue(): Promise<void> {
await this.config.services.queue.process();
}
private updateSettingsTimer(startingUp = false) {
this.suspendSettingsTimer();
const interval = this.config.updateSettingsWhenIdleInterval;
if (interval > 0) {
let initialDelay: number = interval;
if (startingUp) {
initialDelay = this.config.settingsVersion > 0 ? 15000 : 5000;
}
this.config.services.log.info(
`Update settings every ${interval}ms (${initialDelay || 0}ms delay)`,
);
// TODO: Look into better async scheduling..
const updateSettings = () =>
void SettingsManager.updateSettings(this.config);
if (initialDelay < interval) {
this._timeoutId = setTimeout(updateSettings, initialDelay);
}
this._intervalId = setInterval(updateSettings, interval);
}
}
public createException(exception: Error): EventBuilder {
const pluginContextData = new EventContext();
pluginContextData.setException(exception);
return this.createEvent(pluginContextData).setType("error");
}
public submitException(exception: Error): Promise<EventPluginContext> {
return this.createException(exception).submit();
}
public createUnhandledException(exception: Error, submissionMethod?: string): EventBuilder {
const builder = this.createException(exception);
builder.context.markAsUnhandledError();
builder.context.setSubmissionMethod(submissionMethod || "");
return builder;
}
public submitUnhandledException(exception: Error, submissionMethod?: string): Promise<EventPluginContext> {
return this.createUnhandledException(exception, submissionMethod).submit();
}
public createFeatureUsage(feature: string): EventBuilder {
return this.createEvent().setType("usage").setSource(feature);
}
public submitFeatureUsage(feature: string): Promise<EventPluginContext> {
return this.createFeatureUsage(feature).submit();
}
public createLog(message: string): EventBuilder;
public createLog(source: string, message: string): EventBuilder;
public createLog(source: string | undefined, message: string, level: string): EventBuilder;
public createLog(sourceOrMessage: string, message?: string, level?: string): EventBuilder {
let builder = this.createEvent().setType("log");
if (level) {
builder = builder.setSource(sourceOrMessage).setMessage(message)
.setProperty(KnownEventDataKeys.Level, level);
} else if (message) {
builder = builder.setSource(sourceOrMessage).setMessage(message);
} else {
builder = builder.setMessage(sourceOrMessage);
try {
// TODO: Look into using https://www.stevefenton.co.uk/Content/Blog/Date/201304/Blog/Obtaining-A-Class-Name-At-Runtime-In-TypeScript/
const caller = this.createLog.caller;
builder = builder.setSource(
caller && caller.caller && caller.caller.name,
);
} catch (ex) {
this.config.services.log.trace(`Unable to resolve log source: ${ex instanceof Error ? ex.message : ex + ''}`);
}
}
return builder;
}
public submitLog(message: string): Promise<EventPluginContext>;
public submitLog(source: string, message: string): Promise<EventPluginContext>;
public submitLog(source: string | undefined, message: string, level: string): Promise<EventPluginContext>;
public submitLog(sourceOrMessage: string, message?: string, level?: string): Promise<EventPluginContext> {
return this.createLog(sourceOrMessage, <string>message, <string>level).submit();
}
public createNotFound(resource: string): EventBuilder {
return this.createEvent().setType("404").setSource(resource);
}
public submitNotFound(resource: string): Promise<EventPluginContext> {
return this.createNotFound(resource).submit();
}
public createSessionStart(): EventBuilder {
return this.createEvent().setType("session");
}
public submitSessionStart(): Promise<EventPluginContext> {
return this.createSessionStart().submit();
}
public async submitSessionEnd(sessionIdOrUserId: string): Promise<void> {
if (sessionIdOrUserId && this.config.enabled && this.config.isValid) {
this.config.services.log.info(
`Submitting session end: ${sessionIdOrUserId}`,
);
await this.config.services.submissionClient.submitHeartbeat(
sessionIdOrUserId,
true,
);
}
}
public async submitSessionHeartbeat(sessionIdOrUserId: string): Promise<void> {
if (sessionIdOrUserId && this.config.enabled && this.config.isValid) {
this.config.services.log.info(
`Submitting session heartbeat: ${sessionIdOrUserId}`,
);
await this.config.services.submissionClient.submitHeartbeat(
sessionIdOrUserId,
false,
);
}
}
public createEvent(context?: EventContext): EventBuilder {
return new EventBuilder({ date: new Date() }, this, context);
}
/**
* Submits the event to the server.
*
* @param event The event
* @param context Contextual data used by event plugins to enrich the event details
*/
public async submitEvent(event: Event, context?: EventContext): Promise<EventPluginContext> {
const pluginContext = new EventPluginContext(this, event, context ?? new EventContext());
if (!event) {
pluginContext.cancelled = true;
return pluginContext;
}
if (!this.config.enabled || !this.config.isValid) {
this.config.services.log.info("Event submission is currently disabled.");
pluginContext.cancelled = true;
return pluginContext;
}
if (!event.data) {
event.data = {};
}
if (!event.tags || !event.tags.length) {
event.tags = [];
}
await EventPluginManager.run(pluginContext);
if (pluginContext.cancelled) {
return pluginContext;
}
const ev = pluginContext.event;
// ensure all required data
if (!ev.type || ev.type.length === 0) {
ev.type = "log";
}
if (!ev.date) {
ev.date = new Date();
}
await this.config.services.queue.enqueue(ev);
if (ev.reference_id && ev.reference_id.length > 0) {
pluginContext.log.info(`Setting last reference id "${ev.reference_id}"`);
this.config.services.lastReferenceIdManager.setLast(ev.reference_id);
}
return pluginContext;
}
/**
* Updates the user"s email address and description of an event for the specified reference id.
* @param referenceId The reference id of the event to update.
* @param email The user"s email address to set on the event.
* @param description The user"s description of the event.
* @param callback The submission response.
*/
public async updateUserEmailAndDescription(referenceId: string, email: string, description: string): Promise<void> {
if (!referenceId || !email || !description || !this.config.enabled || !this.config.isValid) {
return;
}
const userDescription: UserDescription = { email_address: email, description };
const response = await this.config.services.submissionClient.submitUserDescription(referenceId, userDescription);
if (!response.success) {
this.config.services.log.error(
`Failed to submit user email and description for event "${referenceId}": ${response.status} ${response.message}`,
);
}
}
/**
* Gets the last event client id that was submitted to the server.
* @returns {string} The event client id.
*/
public getLastReferenceId(): string | null {
return this.config.services.lastReferenceIdManager.getLast();
}
}
| exceptionless/Exceptionless.JavaScript | packages/core/src/ExceptionlessClient.ts | TypeScript | apache-2.0 | 9,879 |
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import mock
from oslo_utils import uuidutils
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from oslo_vmware import pbm
from nova import exception
from nova.network import model as network_model
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.vmwareapi import fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import vm_util
class partialObject(object):
def __init__(self, path='fake-path'):
self.path = path
self.fault = fake.DataObject()
class VMwareVMUtilTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVMUtilTestCase, self).setUp()
fake.reset()
stubs.set_stubs(self.stubs)
vm_util.vm_refs_cache_reset()
self._instance = fake_instance.fake_instance_obj(
None,
**{'id': 7, 'name': 'fake!',
'uuid': uuidutils.generate_uuid(),
'vcpus': 2, 'memory_mb': 2048})
def _test_get_stats_from_cluster(self, connection_state="connected",
maintenance_mode=False):
ManagedObjectRefs = [fake.ManagedObjectReference("host1",
"HostSystem"),
fake.ManagedObjectReference("host2",
"HostSystem")]
hosts = fake._convert_to_array_of_mor(ManagedObjectRefs)
respool = fake.ManagedObjectReference("resgroup-11", "ResourcePool")
prop_dict = {'host': hosts, 'resourcePool': respool}
hardware = fake.DataObject()
hardware.numCpuCores = 8
hardware.numCpuThreads = 16
hardware.vendor = "Intel"
hardware.cpuModel = "Intel(R) Xeon(R)"
runtime_host_1 = fake.DataObject()
runtime_host_1.connectionState = "connected"
runtime_host_1.inMaintenanceMode = False
runtime_host_2 = fake.DataObject()
runtime_host_2.connectionState = connection_state
runtime_host_2.inMaintenanceMode = maintenance_mode
prop_list_host_1 = [fake.Prop(name="hardware_summary", val=hardware),
fake.Prop(name="runtime_summary",
val=runtime_host_1)]
prop_list_host_2 = [fake.Prop(name="hardware_summary", val=hardware),
fake.Prop(name="runtime_summary",
val=runtime_host_2)]
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.ObjectContent("prop_list_host1",
prop_list_host_1))
fake_objects.add_object(fake.ObjectContent("prop_list_host1",
prop_list_host_2))
respool_resource_usage = fake.DataObject()
respool_resource_usage.maxUsage = 5368709120
respool_resource_usage.overallUsage = 2147483648
def fake_call_method(*args):
if "get_dynamic_properties" in args:
return prop_dict
elif "get_properties_for_a_collection_of_objects" in args:
return fake_objects
else:
return respool_resource_usage
session = fake.FakeSession()
with mock.patch.object(session, '_call_method', fake_call_method):
result = vm_util.get_stats_from_cluster(session, "cluster1")
mem_info = {}
if connection_state == "connected" and not maintenance_mode:
vcpus = 32
else:
vcpus = 16
mem_info['total'] = 5120
mem_info['free'] = 3072
expected_stats = {'vcpus': vcpus, 'mem': mem_info}
self.assertEqual(expected_stats, result)
def test_get_stats_from_cluster_hosts_connected_and_active(self):
self._test_get_stats_from_cluster()
def test_get_stats_from_cluster_hosts_disconnected_and_active(self):
self._test_get_stats_from_cluster(connection_state="disconnected")
def test_get_stats_from_cluster_hosts_connected_and_maintenance(self):
self._test_get_stats_from_cluster(maintenance_mode=True)
def test_get_host_ref_no_hosts_in_cluster(self):
self.assertRaises(exception.NoValidHost,
vm_util.get_host_ref,
fake.FakeObjectRetrievalSession(""), 'fake_cluster')
def test_get_resize_spec(self):
vcpus = 2
memory_mb = 2048
extra_specs = vm_util.ExtraSpecs()
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_resize_spec(fake_factory,
vcpus, memory_mb, extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.memoryMB = memory_mb
expected.numCPUs = vcpus
cpuAllocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpuAllocation.reservation = 0
cpuAllocation.limit = -1
cpuAllocation.shares = fake_factory.create('ns0:SharesInfo')
cpuAllocation.shares.level = 'normal'
cpuAllocation.shares.shares = 0
expected.cpuAllocation = cpuAllocation
self.assertEqual(expected, result)
def test_get_resize_spec_with_limits(self):
vcpus = 2
memory_mb = 2048
cpu_limits = vm_util.CpuLimits(cpu_limit=7,
cpu_reservation=6)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_resize_spec(fake_factory,
vcpus, memory_mb, extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.memoryMB = memory_mb
expected.numCPUs = vcpus
cpuAllocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpuAllocation.reservation = 6
cpuAllocation.limit = 7
cpuAllocation.shares = fake_factory.create('ns0:SharesInfo')
cpuAllocation.shares.level = 'normal'
cpuAllocation.shares.shares = 0
expected.cpuAllocation = cpuAllocation
self.assertEqual(expected, result)
def test_get_cdrom_attach_config_spec(self):
fake_factory = fake.FakeFactory()
datastore = fake.Datastore()
result = vm_util.get_cdrom_attach_config_spec(fake_factory,
datastore,
"/tmp/foo.iso",
200, 0)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.deviceChange = []
device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device_change.operation = 'add'
device_change.device = fake_factory.create('ns0:VirtualCdrom')
device_change.device.controllerKey = 200
device_change.device.unitNumber = 0
device_change.device.key = -1
connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo')
connectable.allowGuestControl = False
connectable.startConnected = True
connectable.connected = True
device_change.device.connectable = connectable
backing = fake_factory.create('ns0:VirtualCdromIsoBackingInfo')
backing.fileName = '/tmp/foo.iso'
backing.datastore = datastore
device_change.device.backing = backing
expected.deviceChange.append(device_change)
self.assertEqual(expected, result)
def test_lsilogic_controller_spec(self):
# Test controller spec returned for lsiLogic sas adapter type
config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
adapter_type="lsiLogicsas")
self.assertEqual("ns0:VirtualLsiLogicSASController",
config_spec.device.obj_name)
def test_paravirtual_controller_spec(self):
# Test controller spec returned for paraVirtual adapter type
config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
adapter_type="paraVirtual")
self.assertEqual("ns0:ParaVirtualSCSIController",
config_spec.device.obj_name)
def _vmdk_path_and_adapter_type_devices(self, filename, parent=None):
# Test the adapter_type returned for a lsiLogic sas controller
controller_key = 1000
disk = fake.VirtualDisk()
disk.controllerKey = controller_key
disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = filename
disk.capacityInBytes = 1024
if parent:
disk_backing.parent = parent
disk.backing = disk_backing
# Ephemeral disk
e_disk = fake.VirtualDisk()
e_disk.controllerKey = controller_key
disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = '[test_datastore] uuid/ephemeral_0.vmdk'
e_disk.capacityInBytes = 512
e_disk.backing = disk_backing
controller = fake.VirtualLsiLogicSASController()
controller.key = controller_key
devices = [disk, e_disk, controller]
return devices
def test_get_vmdk_path_and_adapter_type(self):
filename = '[test_datastore] uuid/uuid.vmdk'
devices = self._vmdk_path_and_adapter_type_devices(filename)
session = fake.FakeSession()
with mock.patch.object(session, '_call_method', return_value=devices):
vmdk = vm_util.get_vmdk_info(session, None)
self.assertEqual('lsiLogicsas', vmdk.adapter_type)
self.assertEqual('[test_datastore] uuid/ephemeral_0.vmdk',
vmdk.path)
self.assertEqual(512, vmdk.capacity_in_bytes)
self.assertEqual(devices[1], vmdk.device)
def test_get_vmdk_path_and_adapter_type_with_match(self):
n_filename = '[test_datastore] uuid/uuid.vmdk'
devices = self._vmdk_path_and_adapter_type_devices(n_filename)
session = fake.FakeSession()
with mock.patch.object(session, '_call_method', return_value=devices):
vmdk = vm_util.get_vmdk_info(session, None, uuid='uuid')
self.assertEqual('lsiLogicsas', vmdk.adapter_type)
self.assertEqual(n_filename, vmdk.path)
self.assertEqual(1024, vmdk.capacity_in_bytes)
self.assertEqual(devices[0], vmdk.device)
def test_get_vmdk_path_and_adapter_type_with_nomatch(self):
n_filename = '[test_datastore] diuu/diuu.vmdk'
session = fake.FakeSession()
devices = self._vmdk_path_and_adapter_type_devices(n_filename)
with mock.patch.object(session, '_call_method', return_value=devices):
vmdk = vm_util.get_vmdk_info(session, None, uuid='uuid')
self.assertIsNone(vmdk.adapter_type)
self.assertIsNone(vmdk.path)
self.assertEqual(0, vmdk.capacity_in_bytes)
self.assertIsNone(vmdk.device)
def test_get_vmdk_adapter_type(self):
# Test for the adapter_type to be used in vmdk descriptor
# Adapter type in vmdk descriptor is same for LSI-SAS, LSILogic
# and ParaVirtual
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogic")
self.assertEqual("lsiLogic", vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogicsas")
self.assertEqual("lsiLogic", vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("paraVirtual")
self.assertEqual("lsiLogic", vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("dummyAdapter")
self.assertEqual("dummyAdapter", vmdk_adapter_type)
def test_get_scsi_adapter_type(self):
vm = fake.VirtualMachine()
devices = vm.get("config.hardware.device").VirtualDevice
scsi_controller = fake.VirtualLsiLogicController()
ide_controller = fake.VirtualIDEController()
devices.append(scsi_controller)
devices.append(ide_controller)
fake._update_object("VirtualMachine", vm)
# return the scsi type, not ide
hardware_device = vm.get("config.hardware.device")
self.assertEqual(constants.DEFAULT_ADAPTER_TYPE,
vm_util.get_scsi_adapter_type(hardware_device))
def test_get_scsi_adapter_type_with_error(self):
vm = fake.VirtualMachine()
devices = vm.get("config.hardware.device").VirtualDevice
scsi_controller = fake.VirtualLsiLogicController()
ide_controller = fake.VirtualIDEController()
devices.append(scsi_controller)
devices.append(ide_controller)
fake._update_object("VirtualMachine", vm)
# the controller is not suitable since the device under this controller
# has exceeded SCSI_MAX_CONNECT_NUMBER
for i in range(0, constants.SCSI_MAX_CONNECT_NUMBER):
scsi_controller.device.append('device' + str(i))
hardware_device = vm.get("config.hardware.device")
self.assertRaises(exception.StorageError,
vm_util.get_scsi_adapter_type,
hardware_device)
def test_find_allocated_slots(self):
disk1 = fake.VirtualDisk(200, 0)
disk2 = fake.VirtualDisk(200, 1)
disk3 = fake.VirtualDisk(201, 1)
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
scsi0 = fake.VirtualLsiLogicController(key=1000, scsiCtlrUnitNumber=7)
devices = [disk1, disk2, disk3, ide0, ide1, scsi0]
taken = vm_util._find_allocated_slots(devices)
self.assertEqual([0, 1], sorted(taken[200]))
self.assertEqual([1], taken[201])
self.assertEqual([7], taken[1000])
def test_allocate_controller_key_and_unit_number_ide_default(self):
# Test that default IDE controllers are used when there is a free slot
# on them
disk1 = fake.VirtualDisk(200, 0)
disk2 = fake.VirtualDisk(200, 1)
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
devices = [disk1, disk2, ide0, ide1]
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
None,
devices,
'ide')
self.assertEqual(201, controller_key)
self.assertEqual(0, unit_number)
self.assertIsNone(controller_spec)
def test_allocate_controller_key_and_unit_number_ide(self):
# Test that a new controller is created when there is no free slot on
# the default IDE controllers
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
devices = [ide0, ide1]
for controller_key in [200, 201]:
for unit_number in [0, 1]:
disk = fake.VirtualDisk(controller_key, unit_number)
devices.append(disk)
factory = fake.FakeFactory()
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
factory,
devices,
'ide')
self.assertEqual(-101, controller_key)
self.assertEqual(0, unit_number)
self.assertIsNotNone(controller_spec)
def test_allocate_controller_key_and_unit_number_scsi(self):
# Test that we allocate on existing SCSI controller if there is a free
# slot on it
devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7)]
for unit_number in range(7):
disk = fake.VirtualDisk(1000, unit_number)
devices.append(disk)
factory = fake.FakeFactory()
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
factory,
devices,
'lsiLogic')
self.assertEqual(1000, controller_key)
self.assertEqual(8, unit_number)
self.assertIsNone(controller_spec)
def test_get_vnc_config_spec(self):
fake_factory = fake.FakeFactory()
result = vm_util.get_vnc_config_spec(fake_factory,
7)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.extraConfig = []
remote_display_vnc_enabled = fake_factory.create('ns0:OptionValue')
remote_display_vnc_enabled.value = 'true'
remote_display_vnc_enabled.key = 'RemoteDisplay.vnc.enabled'
expected.extraConfig.append(remote_display_vnc_enabled)
remote_display_vnc_port = fake_factory.create('ns0:OptionValue')
remote_display_vnc_port.value = 7
remote_display_vnc_port.key = 'RemoteDisplay.vnc.port'
expected.extraConfig.append(remote_display_vnc_port)
remote_display_vnc_keymap = fake_factory.create('ns0:OptionValue')
remote_display_vnc_keymap.value = 'en-us'
remote_display_vnc_keymap.key = 'RemoteDisplay.vnc.keyMap'
expected.extraConfig.append(remote_display_vnc_keymap)
self.assertEqual(expected, result)
def _create_fake_vms(self):
fake_vms = fake.FakeRetrieveResult()
OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
for i in range(10):
vm = fake.ManagedObject()
opt_val = OptionValue(key='', value=5900 + i)
vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
fake_vms.add_object(vm)
return fake_vms
def test_get_vnc_port(self):
fake_vms = self._create_fake_vms()
self.flags(vnc_port=5900, group='vmware')
self.flags(vnc_port_total=10000, group='vmware')
actual = vm_util.get_vnc_port(
fake.FakeObjectRetrievalSession(fake_vms))
self.assertEqual(actual, 5910)
def test_get_vnc_port_exhausted(self):
fake_vms = self._create_fake_vms()
self.flags(vnc_port=5900, group='vmware')
self.flags(vnc_port_total=10, group='vmware')
self.assertRaises(exception.ConsolePortRangeExhausted,
vm_util.get_vnc_port,
fake.FakeObjectRetrievalSession(fake_vms))
def test_get_all_cluster_refs_by_name_none(self):
fake_objects = fake.FakeRetrieveResult()
refs = vm_util.get_all_cluster_refs_by_name(
fake.FakeObjectRetrievalSession(fake_objects), ['fake_cluster'])
self.assertEqual({}, refs)
def test_get_all_cluster_refs_by_name_exists(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.ClusterComputeResource(name='cluster'))
refs = vm_util.get_all_cluster_refs_by_name(
fake.FakeObjectRetrievalSession(fake_objects), ['cluster'])
self.assertEqual(1, len(refs))
def test_get_all_cluster_refs_by_name_missing(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(partialObject(path='cluster'))
refs = vm_util.get_all_cluster_refs_by_name(
fake.FakeObjectRetrievalSession(fake_objects), ['cluster'])
self.assertEqual({}, refs)
def test_propset_dict_simple(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
object = ObjectContent(propSet=[
DynamicProperty(name='foo', val="bar")])
propdict = vm_util.propset_dict(object.propSet)
self.assertEqual("bar", propdict['foo'])
def test_propset_dict_complex(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
MoRef = collections.namedtuple('Val', ['value'])
object = ObjectContent(propSet=[
DynamicProperty(name='foo', val="bar"),
DynamicProperty(name='some.thing',
val=MoRef(value='else')),
DynamicProperty(name='another.thing', val='value')])
propdict = vm_util.propset_dict(object.propSet)
self.assertEqual("bar", propdict['foo'])
self.assertTrue(hasattr(propdict['some.thing'], 'value'))
self.assertEqual("else", propdict['some.thing'].value)
self.assertEqual("value", propdict['another.thing'])
def _test_detach_virtual_disk_spec(self, destroy_disk=False):
virtual_device_config = vm_util.detach_virtual_disk_spec(
fake.FakeFactory(),
'fake_device',
destroy_disk)
self.assertEqual('remove', virtual_device_config.operation)
self.assertEqual('fake_device', virtual_device_config.device)
self.assertEqual('ns0:VirtualDeviceConfigSpec',
virtual_device_config.obj_name)
if destroy_disk:
self.assertEqual('destroy', virtual_device_config.fileOperation)
else:
self.assertFalse(hasattr(virtual_device_config, 'fileOperation'))
def test_detach_virtual_disk_spec(self):
self._test_detach_virtual_disk_spec(destroy_disk=False)
def test_detach_virtual_disk_destroy_spec(self):
self._test_detach_virtual_disk_spec(destroy_disk=True)
def test_get_vm_create_spec(self):
extra_specs = vm_util.ExtraSpecs()
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.name = self._instance.uuid
expected.instanceUuid = self._instance.uuid
expected.deviceChange = []
expected.numCPUs = 2
expected.version = None
expected.memoryMB = 2048
expected.guestId = 'otherGuest'
expected.extraConfig = []
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = self._instance.uuid
extra_config.key = 'nvp.vm-uuid'
expected.extraConfig.append(extra_config)
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_allocations(self):
cpu_limits = vm_util.CpuLimits(cpu_limit=7,
cpu_reservation=6)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.deviceChange = []
expected.guestId = 'otherGuest'
expected.instanceUuid = self._instance.uuid
expected.memoryMB = self._instance.memory_mb
expected.name = self._instance.uuid
expected.numCPUs = self._instance.vcpus
expected.version = None
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpu_allocation.limit = 7
cpu_allocation.reservation = 6
cpu_allocation.shares = fake_factory.create('ns0:SharesInfo')
cpu_allocation.shares.level = 'normal'
cpu_allocation.shares.shares = 0
expected.cpuAllocation = cpu_allocation
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.key = 'nvp.vm-uuid'
extra_config.value = self._instance.uuid
expected.extraConfig.append(extra_config)
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_limit(self):
cpu_limits = vm_util.CpuLimits(cpu_limit=7)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.instanceUuid = self._instance.uuid
expected.name = self._instance.uuid
expected.deviceChange = []
expected.extraConfig = []
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = self._instance.uuid
extra_config.key = 'nvp.vm-uuid'
expected.extraConfig.append(extra_config)
expected.memoryMB = 2048
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
expected.version = None
expected.guestId = 'otherGuest'
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpu_allocation.limit = 7
cpu_allocation.reservation = 0
cpu_allocation.shares = fake_factory.create('ns0:SharesInfo')
cpu_allocation.shares.level = 'normal'
cpu_allocation.shares.shares = 0
expected.cpuAllocation = cpu_allocation
expected.numCPUs = 2
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_share(self):
cpu_limits = vm_util.CpuLimits(cpu_shares_level='high')
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.instanceUuid = self._instance.uuid
expected.name = self._instance.uuid
expected.deviceChange = []
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.value = self._instance.uuid
extra_config.key = 'nvp.vm-uuid'
expected.extraConfig.append(extra_config)
expected.memoryMB = 2048
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.type = 'instance'
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.version = None
expected.guestId = 'otherGuest'
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.beforeGuestStandby = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.afterResume = True
expected.tools.afterPowerOn = True
cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpu_allocation.reservation = 0
cpu_allocation.limit = -1
cpu_allocation.shares = fake_factory.create('ns0:SharesInfo')
cpu_allocation.shares.level = 'high'
cpu_allocation.shares.shares = 0
expected.cpuAllocation = cpu_allocation
expected.numCPUs = 2
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_share_custom(self):
cpu_limits = vm_util.CpuLimits(cpu_shares_level='custom',
cpu_shares_share=1948)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.instanceUuid = self._instance.uuid
expected.name = self._instance.uuid
expected.deviceChange = []
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.key = 'nvp.vm-uuid'
extra_config.value = self._instance.uuid
expected.extraConfig.append(extra_config)
expected.memoryMB = 2048
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
expected.version = None
expected.guestId = 'otherGuest'
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.beforeGuestStandby = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.afterResume = True
expected.tools.afterPowerOn = True
cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpu_allocation.reservation = 0
cpu_allocation.limit = -1
cpu_allocation.shares = fake_factory.create('ns0:SharesInfo')
cpu_allocation.shares.level = 'custom'
cpu_allocation.shares.shares = 1948
expected.cpuAllocation = cpu_allocation
expected.numCPUs = 2
self.assertEqual(expected, result)
def test_create_vm(self):
method_list = ['CreateVM_Task', 'get_dynamic_property']
def fake_call_method(module, method, *args, **kwargs):
expected_method = method_list.pop(0)
self.assertEqual(expected_method, method)
if (expected_method == 'CreateVM_Task'):
return 'fake_create_vm_task'
elif (expected_method == 'get_dynamic_property'):
task_info = mock.Mock(state="success", result="fake_vm_ref")
return task_info
else:
self.fail('Should not get here....')
def fake_wait_for_task(self, *args):
task_info = mock.Mock(state="success", result="fake_vm_ref")
return task_info
session = fake.FakeSession()
fake_call_mock = mock.Mock(side_effect=fake_call_method)
fake_wait_mock = mock.Mock(side_effect=fake_wait_for_task)
with contextlib.nested(
mock.patch.object(session, '_wait_for_task',
fake_wait_mock),
mock.patch.object(session, '_call_method',
fake_call_mock)
) as (wait_for_task, call_method):
vm_ref = vm_util.create_vm(
session,
self._instance,
'fake_vm_folder',
'fake_config_spec',
'fake_res_pool_ref')
self.assertEqual('fake_vm_ref', vm_ref)
call_method.assert_called_once_with(mock.ANY, 'CreateVM_Task',
'fake_vm_folder', config='fake_config_spec',
pool='fake_res_pool_ref')
wait_for_task.assert_called_once_with('fake_create_vm_task')
@mock.patch.object(vm_util.LOG, 'warning')
def test_create_vm_invalid_guestid(self, mock_log_warn):
"""Ensure we warn when create_vm() fails after we passed an
unrecognised guestId
"""
found = [False]
def fake_log_warn(msg, values):
if not isinstance(values, dict):
return
if values.get('ostype') == 'invalid_os_type':
found[0] = True
mock_log_warn.side_effect = fake_log_warn
session = driver.VMwareAPISession()
config_spec = vm_util.get_vm_create_spec(
session.vim.client.factory,
self._instance, 'fake-datastore', [],
vm_util.ExtraSpecs(),
os_type='invalid_os_type')
self.assertRaises(vexc.VMwareDriverException,
vm_util.create_vm, session, self._instance,
'folder', config_spec, 'res-pool')
self.assertTrue(found[0])
def test_convert_vif_model(self):
expected = "VirtualE1000"
result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000)
self.assertEqual(expected, result)
expected = "VirtualE1000e"
result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000E)
self.assertEqual(expected, result)
types = ["VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
"VirtualVmxnet", "VirtualVmxnet3"]
for type in types:
self.assertEqual(type,
vm_util.convert_vif_model(type))
self.assertRaises(exception.Invalid,
vm_util.convert_vif_model,
"InvalidVifModel")
def test_power_on_instance_with_vm_ref(self):
session = fake.FakeSession()
with contextlib.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_call_method, fake_wait_for_task):
vm_util.power_on_instance(session, self._instance,
vm_ref='fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_power_on_instance_without_vm_ref(self):
session = fake.FakeSession()
with contextlib.nested(
mock.patch.object(vm_util, "get_vm_ref",
return_value='fake-vm-ref'),
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_get_vm_ref, fake_call_method, fake_wait_for_task):
vm_util.power_on_instance(session, self._instance)
fake_get_vm_ref.assert_called_once_with(session, self._instance)
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_power_on_instance_with_exception(self):
session = fake.FakeSession()
with contextlib.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task",
side_effect=exception.NovaException('fake')),
) as (fake_call_method, fake_wait_for_task):
self.assertRaises(exception.NovaException,
vm_util.power_on_instance,
session, self._instance,
vm_ref='fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_power_on_instance_with_power_state_exception(self):
session = fake.FakeSession()
with contextlib.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(
session, "_wait_for_task",
side_effect=vexc.InvalidPowerStateException),
) as (fake_call_method, fake_wait_for_task):
vm_util.power_on_instance(session, self._instance,
vm_ref='fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_create_virtual_disk(self):
session = fake.FakeSession()
dm = session.vim.service_content.virtualDiskManager
with contextlib.nested(
mock.patch.object(vm_util, "get_vmdk_create_spec",
return_value='fake-spec'),
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_get_spec, fake_call_method, fake_wait_for_task):
vm_util.create_virtual_disk(session, 'fake-dc-ref',
'fake-adapter-type', 'fake-disk-type',
'fake-path', 7)
fake_get_spec.assert_called_once_with(
session.vim.client.factory, 7,
'fake-adapter-type',
'fake-disk-type')
fake_call_method.assert_called_once_with(
session.vim,
"CreateVirtualDisk_Task",
dm,
name='fake-path',
datacenter='fake-dc-ref',
spec='fake-spec')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_copy_virtual_disk(self):
session = fake.FakeSession()
dm = session.vim.service_content.virtualDiskManager
with contextlib.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_call_method, fake_wait_for_task):
vm_util.copy_virtual_disk(session, 'fake-dc-ref',
'fake-source', 'fake-dest')
fake_call_method.assert_called_once_with(
session.vim,
"CopyVirtualDisk_Task",
dm,
sourceName='fake-source',
sourceDatacenter='fake-dc-ref',
destName='fake-dest')
fake_wait_for_task.assert_called_once_with('fake-task')
def _create_fake_vm_objects(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.VirtualMachine())
return fake_objects
def test_get_values(self):
objects = self._create_fake_vm_objects()
query = vm_util.get_values_from_object_properties(
fake.FakeObjectRetrievalSession(objects), objects)
self.assertEqual('poweredOn', query['runtime.powerState'])
self.assertEqual('guestToolsRunning',
query['summary.guest.toolsRunningStatus'])
self.assertEqual('toolsOk', query['summary.guest.toolsStatus'])
def test_reconfigure_vm(self):
session = fake.FakeSession()
with contextlib.nested(
mock.patch.object(session, '_call_method',
return_value='fake_reconfigure_task'),
mock.patch.object(session, '_wait_for_task')
) as (_call_method, _wait_for_task):
vm_util.reconfigure_vm(session, 'fake-ref', 'fake-spec')
_call_method.assert_called_once_with(mock.ANY,
'ReconfigVM_Task', 'fake-ref', spec='fake-spec')
_wait_for_task.assert_called_once_with(
'fake_reconfigure_task')
def test_get_network_attach_config_spec_opaque(self):
vif_info = {'network_name': 'br-int',
'mac_address': '00:00:00:ca:fe:01',
'network_ref': {'type': 'OpaqueNetwork',
'network-id': 'fake-network-id',
'network-type': 'opaque'},
'iface_id': 7,
'vif_model': 'VirtualE1000'}
fake_factory = fake.FakeFactory()
result = vm_util.get_network_attach_config_spec(
fake_factory, vif_info, 1)
card = 'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo'
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.value = vif_info['iface_id']
extra_config.key = 'nvp.iface-id.1'
expected.extraConfig.append(extra_config)
expected.deviceChange = []
device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device_change.operation = 'add'
device = fake_factory.create('ns0:VirtualE1000')
device.macAddress = vif_info['mac_address']
device.addressType = 'manual'
connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo')
connectable.allowGuestControl = True
connectable.startConnected = True
connectable.connected = True
device.connectable = connectable
backing = fake_factory.create(card)
backing.opaqueNetworkType = vif_info['network_ref']['network-type']
backing.opaqueNetworkId = vif_info['network_ref']['network-id']
device.backing = backing
device.key = -47
device.wakeOnLanEnabled = True
device_change.device = device
expected.deviceChange.append(device_change)
self.assertEqual(expected, result)
def test_get_network_attach_config_spec_dvs(self):
vif_info = {'network_name': 'br100',
'mac_address': '00:00:00:ca:fe:01',
'network_ref': {'type': 'DistributedVirtualPortgroup',
'dvsw': 'fake-network-id',
'dvpg': 'fake-group'},
'iface_id': 7,
'vif_model': 'VirtualE1000'}
fake_factory = fake.FakeFactory()
result = vm_util.get_network_attach_config_spec(
fake_factory, vif_info, 1)
port = 'ns0:DistributedVirtualSwitchPortConnection'
backing = 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo'
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.value = vif_info['iface_id']
extra_config.key = 'nvp.iface-id.1'
expected.extraConfig.append(extra_config)
expected.deviceChange = []
device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device_change.operation = 'add'
device = fake_factory.create('ns0:VirtualE1000')
device.macAddress = vif_info['mac_address']
device.key = -47
device.addressType = 'manual'
device.wakeOnLanEnabled = True
device.backing = fake_factory.create(backing)
device.backing.port = fake_factory.create(port)
device.backing.port.portgroupKey = vif_info['network_ref']['dvpg']
device.backing.port.switchUuid = vif_info['network_ref']['dvsw']
connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo')
connectable.allowGuestControl = True
connectable.connected = True
connectable.startConnected = True
device.connectable = connectable
device_change.device = device
expected.deviceChange.append(device_change)
self.assertEqual(expected, result)
def test_get_network_detach_config_spec(self):
fake_factory = fake.FakeFactory()
result = vm_util.get_network_detach_config_spec(
fake_factory, 'fake-device', 2)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.value = 'free'
extra_config.key = 'nvp.iface-id.2'
expected.extraConfig.append(extra_config)
expected.deviceChange = []
device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device_change.device = 'fake-device'
device_change.operation = 'remove'
expected.deviceChange.append(device_change)
self.assertEqual(expected, result)
@mock.patch.object(vm_util, "get_vm_ref")
def test_power_off_instance(self, fake_get_ref):
session = fake.FakeSession()
with contextlib.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(session, '_wait_for_task')
) as (fake_call_method, fake_wait_for_task):
vm_util.power_off_instance(session, self._instance, 'fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
self.assertFalse(fake_get_ref.called)
@mock.patch.object(vm_util, "get_vm_ref", return_value="fake-vm-ref")
def test_power_off_instance_no_vm_ref(self, fake_get_ref):
session = fake.FakeSession()
with contextlib.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(session, '_wait_for_task')
) as (fake_call_method, fake_wait_for_task):
vm_util.power_off_instance(session, self._instance)
fake_get_ref.assert_called_once_with(session, self._instance)
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
@mock.patch.object(vm_util, "get_vm_ref")
def test_power_off_instance_with_exception(self, fake_get_ref):
session = fake.FakeSession()
with contextlib.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(session, '_wait_for_task',
side_effect=exception.NovaException('fake'))
) as (fake_call_method, fake_wait_for_task):
self.assertRaises(exception.NovaException,
vm_util.power_off_instance,
session, self._instance, 'fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
self.assertFalse(fake_get_ref.called)
@mock.patch.object(vm_util, "get_vm_ref")
def test_power_off_instance_power_state_exception(self, fake_get_ref):
session = fake.FakeSession()
with contextlib.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(
session, '_wait_for_task',
side_effect=vexc.InvalidPowerStateException)
) as (fake_call_method, fake_wait_for_task):
vm_util.power_off_instance(session, self._instance, 'fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
self.assertFalse(fake_get_ref.called)
def test_get_vm_create_spec_updated_hw_version(self):
extra_specs = vm_util.ExtraSpecs(hw_version='vmx-08')
result = vm_util.get_vm_create_spec(fake.FakeFactory(),
self._instance,
'fake-datastore', [],
extra_specs=extra_specs)
self.assertEqual('vmx-08', result.version)
def test_vm_create_spec_with_profile_spec(self):
datastore = ds_obj.Datastore('fake-ds-ref', 'fake-ds-name')
extra_specs = vm_util.ExtraSpecs()
create_spec = vm_util.get_vm_create_spec(fake.FakeFactory(),
self._instance,
datastore.name, [],
extra_specs,
profile_spec='fake_profile_spec')
self.assertEqual(['fake_profile_spec'], create_spec.vmProfile)
@mock.patch.object(pbm, 'get_profile_id_by_name')
def test_get_storage_profile_spec(self, mock_retrieve_profile_id):
fake_profile_id = fake.DataObject()
fake_profile_id.uniqueId = 'fake_unique_id'
mock_retrieve_profile_id.return_value = fake_profile_id
profile_spec = vm_util.get_storage_profile_spec(fake.FakeSession(),
'fake_policy')
self.assertEqual('ns0:VirtualMachineDefinedProfileSpec',
profile_spec.obj_name)
self.assertEqual(fake_profile_id.uniqueId, profile_spec.profileId)
@mock.patch.object(pbm, 'get_profile_id_by_name')
def test_storage_spec_empty_profile(self, mock_retrieve_profile_id):
mock_retrieve_profile_id.return_value = None
profile_spec = vm_util.get_storage_profile_spec(fake.FakeSession(),
'fake_policy')
self.assertIsNone(profile_spec)
def test_get_ephemeral_name(self):
filename = vm_util.get_ephemeral_name(0)
self.assertEqual('ephemeral_0.vmdk', filename)
def test_detach_and_delete_devices_config_spec(self):
fake_devices = ['device1', 'device2']
fake_factory = fake.FakeFactory()
result = vm_util._detach_and_delete_devices_config_spec(fake_factory,
fake_devices)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.deviceChange = []
device1 = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device1.device = 'device1'
device1.operation = 'remove'
device1.fileOperation = 'destroy'
expected.deviceChange.append(device1)
device2 = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device2.device = 'device2'
device2.operation = 'remove'
device2.fileOperation = 'destroy'
expected.deviceChange.append(device2)
self.assertEqual(expected, result)
@mock.patch.object(vm_util, 'reconfigure_vm')
def test_detach_devices_from_vm(self, mock_reconfigure):
fake_devices = ['device1', 'device2']
session = fake.FakeSession()
vm_util.detach_devices_from_vm(session,
'fake-ref',
fake_devices)
mock_reconfigure.assert_called_once_with(session, 'fake-ref', mock.ANY)
def test_get_vm_boot_spec(self):
disk = fake.VirtualDisk()
disk.key = 7
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_boot_spec(fake_factory,
disk)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
boot_disk = fake_factory.create(
'ns0:VirtualMachineBootOptionsBootableDiskDevice')
boot_disk.deviceKey = disk.key
boot_options = fake_factory.create('ns0:VirtualMachineBootOptions')
boot_options.bootOrder = [boot_disk]
expected.bootOptions = boot_options
self.assertEqual(expected, result)
def _get_devices(self, filename):
devices = fake._create_array_of_type('VirtualDevice')
devices.VirtualDevice = self._vmdk_path_and_adapter_type_devices(
filename)
return devices
def test_find_rescue_device(self):
filename = '[test_datastore] uuid/uuid-rescue.vmdk'
devices = self._get_devices(filename)
device = vm_util.find_rescue_device(devices, self._instance)
self.assertEqual(filename, device.backing.fileName)
def test_find_rescue_device_not_found(self):
filename = '[test_datastore] uuid/uuid.vmdk'
devices = self._get_devices(filename)
self.assertRaises(exception.NotFound,
vm_util.find_rescue_device,
devices,
self._instance)
def test_validate_cpu_limits(self):
cpu_limits = vm_util.CpuLimits(cpu_shares_level='high',
cpu_shares_share=1948)
self.assertRaises(exception.InvalidInput,
cpu_limits.validate)
cpu_limits = vm_util.CpuLimits(cpu_shares_level='fira')
self.assertRaises(exception.InvalidInput,
cpu_limits.validate)
@mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
class VMwareVMUtilGetHostRefTestCase(test.NoDBTestCase):
# N.B. Mocking on the class only mocks test_*(), but we need
# VMwareAPISession.vim to be mocked in both setUp and tests. Not mocking in
# setUp causes object initialisation to fail. Not mocking in tests results
# in vim calls not using FakeVim.
@mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
def setUp(self):
super(VMwareVMUtilGetHostRefTestCase, self).setUp()
fake.reset()
vm_util.vm_refs_cache_reset()
self.session = driver.VMwareAPISession()
# Create a fake VirtualMachine running on a known host
self.host_ref = fake._db_content['HostSystem'].keys()[0]
self.vm_ref = fake.create_vm(host_ref=self.host_ref)
@mock.patch.object(vm_util, 'get_vm_ref')
def test_get_host_ref_for_vm(self, mock_get_vm_ref):
mock_get_vm_ref.return_value = self.vm_ref
ret = vm_util.get_host_ref_for_vm(self.session, 'fake-instance')
mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance')
self.assertEqual(self.host_ref, ret)
@mock.patch.object(vm_util, 'get_vm_ref')
def test_get_host_name_for_vm(self, mock_get_vm_ref):
mock_get_vm_ref.return_value = self.vm_ref
host = fake._get_object(self.host_ref)
ret = vm_util.get_host_name_for_vm(self.session, 'fake-instance')
mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance')
self.assertEqual(host.name, ret)
| blueboxgroup/nova | nova/tests/unit/virt/vmwareapi/test_vm_util.py | Python | apache-2.0 | 58,050 |
package com.example.propertyanimation.chat;
import android.content.Intent;
import android.os.Bundle;
import android.support.v7.app.AppCompatActivity;
import android.text.TextUtils;
import android.util.Log;
import android.view.View;
import android.widget.Button;
import android.widget.EditText;
import android.widget.Toast;
import com.example.propertyanimation.R;
import com.hyphenate.EMCallBack;
import com.hyphenate.chat.EMClient;
public class SplashActivity extends AppCompatActivity {
// 发起聊天 username 输入框
private EditText mChatIdEdit;
// 发起聊天
private Button mStartChatBtn;
// 退出登录
private Button mSignOutBtn;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
// 判断sdk是否登录成功过,并没有退出和被踢,否则跳转到登陆界面
if (!EMClient.getInstance().isLoggedInBefore()) {
Intent intent = new Intent(SplashActivity.this, LoginActivity.class);
startActivity(intent);
finish();
return;
}
setContentView(R.layout.activity_splash);
initView();
}
/**
* 初始化界面
*/
private void initView() {
mChatIdEdit = (EditText) findViewById(R.id.ec_edit_chat_id);
mStartChatBtn = (Button) findViewById(R.id.ec_btn_start_chat);
mStartChatBtn.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
// 获取我们发起聊天的者的username
String chatId = mChatIdEdit.getText().toString().trim();
if (!TextUtils.isEmpty(chatId)) {
// 获取当前登录用户的 username
String currUsername = EMClient.getInstance().getCurrentUser();
if (chatId.equals(currUsername)) {
Toast.makeText(SplashActivity.this, "不能和自己聊天", Toast.LENGTH_SHORT).show();
return;
}
// 跳转到聊天界面,开始聊天
Intent intent = new Intent(SplashActivity.this, SplashActivity.class);
intent.putExtra("ec_chat_id", chatId);
startActivity(intent);
} else {
Toast.makeText(SplashActivity.this, "Username 不能为空", Toast.LENGTH_LONG).show();
}
}
});
mSignOutBtn = (Button) findViewById(R.id.ec_btn_sign_out);
mSignOutBtn.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
signOut();
}
});
}
/**
* 退出登录
*/
private void signOut() {
// 调用sdk的退出登录方法,第一个参数表示是否解绑推送的token,没有使用推送或者被踢都要传false
EMClient.getInstance().logout(false, new EMCallBack() {
@Override
public void onSuccess() {
Log.i("lzan13", "logout success");
// 调用退出成功,结束app
finish();
}
@Override
public void onError(int i, String s) {
Log.i("lzan13", "logout error " + i + " - " + s);
}
@Override
public void onProgress(int i, String s) {
}
});
}
}
| lwd1815/Transition | propertyanimation/src/main/java/com/example/propertyanimation/chat/SplashActivity.java | Java | apache-2.0 | 3,103 |
<?php
interface CompanyDAO {
public function getById($id);
}
?> | fkmhrk/OpenInvoice | php/libs/model/CompanyDAO.php | PHP | apache-2.0 | 67 |
/*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okhttp3.benchmarks;
import java.io.IOException;
import java.io.InputStream;
import java.util.concurrent.TimeUnit;
import java.util.zip.GZIPInputStream;
import okhttp3.HttpUrl;
import okhttp3.internal.tls.SslClient;
import org.apache.http.Header;
import org.apache.http.HttpResponse;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.conn.ClientConnectionManager;
import org.apache.http.conn.scheme.Scheme;
import org.apache.http.conn.ssl.SSLSocketFactory;
import org.apache.http.impl.client.DefaultHttpClient;
import org.apache.http.impl.conn.PoolingClientConnectionManager;
/** Benchmark Apache HTTP client. */
class ApacheHttpClient extends SynchronousHttpClient {
private static final boolean VERBOSE = false;
private HttpClient client;
@Override public void prepare(Benchmark benchmark) {
super.prepare(benchmark);
ClientConnectionManager connectionManager = new PoolingClientConnectionManager();
if (benchmark.tls) {
SslClient sslClient = SslClient.localhost();
connectionManager.getSchemeRegistry().register(
new Scheme("https", 443, new SSLSocketFactory(sslClient.sslContext)));
}
client = new DefaultHttpClient(connectionManager);
}
@Override public Runnable request(HttpUrl url) {
return new ApacheHttpClientRequest(url);
}
class ApacheHttpClientRequest implements Runnable {
private final HttpUrl url;
ApacheHttpClientRequest(HttpUrl url) {
this.url = url;
}
public void run() {
long start = System.nanoTime();
try {
HttpResponse response = client.execute(new HttpGet(url.toString()));
InputStream in = response.getEntity().getContent();
Header contentEncoding = response.getFirstHeader("Content-Encoding");
if (contentEncoding != null && contentEncoding.getValue().equals("gzip")) {
in = new GZIPInputStream(in);
}
long total = readAllAndClose(in);
long finish = System.nanoTime();
if (VERBOSE) {
System.out.println(String.format("Transferred % 8d bytes in %4d ms",
total, TimeUnit.NANOSECONDS.toMillis(finish - start)));
}
} catch (IOException e) {
System.out.println("Failed: " + e);
}
}
}
}
| weiwenqiang/GitHub | expert/okhttp/benchmarks/src/main/java/okhttp3/benchmarks/ApacheHttpClient.java | Java | apache-2.0 | 2,912 |
/*
* Copyright 2014 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License, version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package io.netty.handler.codec.http2;
import static io.netty.handler.codec.http2.Http2CodecUtil.CONNECTION_STREAM_ID;
import static io.netty.handler.codec.http2.Http2CodecUtil.DEFAULT_WINDOW_SIZE;
import static io.netty.handler.codec.http2.Http2CodecUtil.MAX_INITIAL_WINDOW_SIZE;
import static io.netty.handler.codec.http2.Http2CodecUtil.MIN_INITIAL_WINDOW_SIZE;
import static io.netty.handler.codec.http2.Http2Error.FLOW_CONTROL_ERROR;
import static io.netty.handler.codec.http2.Http2Error.INTERNAL_ERROR;
import static io.netty.handler.codec.http2.Http2Exception.connectionError;
import static io.netty.handler.codec.http2.Http2Exception.streamError;
import static io.netty.util.internal.ObjectUtil.checkNotNull;
import static java.lang.Math.max;
import static java.lang.Math.min;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.http2.Http2Exception.CompositeStreamException;
import io.netty.handler.codec.http2.Http2Exception.StreamException;
import io.netty.util.internal.PlatformDependent;
/**
* Basic implementation of {@link Http2LocalFlowController}.
*/
public class DefaultHttp2LocalFlowController implements Http2LocalFlowController {
/**
* The default ratio of window size to initial window size below which a {@code WINDOW_UPDATE}
* is sent to expand the window.
*/
public static final float DEFAULT_WINDOW_UPDATE_RATIO = 0.5f;
private final Http2Connection connection;
private final Http2FrameWriter frameWriter;
private final Http2Connection.PropertyKey stateKey;
private ChannelHandlerContext ctx;
private volatile float windowUpdateRatio;
private volatile int initialWindowSize = DEFAULT_WINDOW_SIZE;
public DefaultHttp2LocalFlowController(Http2Connection connection, Http2FrameWriter frameWriter) {
this(connection, frameWriter, DEFAULT_WINDOW_UPDATE_RATIO);
}
public DefaultHttp2LocalFlowController(Http2Connection connection,
Http2FrameWriter frameWriter, float windowUpdateRatio) {
this.connection = checkNotNull(connection, "connection");
this.frameWriter = checkNotNull(frameWriter, "frameWriter");
windowUpdateRatio(windowUpdateRatio);
// Add a flow state for the connection.
stateKey = connection.newKey();
connection.connectionStream()
.setProperty(stateKey, new DefaultState(connection.connectionStream(), initialWindowSize));
// Register for notification of new streams.
connection.addListener(new Http2ConnectionAdapter() {
@Override
public void onStreamAdded(Http2Stream stream) {
// Unconditionally used the reduced flow control state because it requires no object allocation
// and the DefaultFlowState will be allocated in onStreamActive.
stream.setProperty(stateKey, REDUCED_FLOW_STATE);
}
@Override
public void onStreamActive(Http2Stream stream) {
// Need to be sure the stream's initial window is adjusted for SETTINGS
// frames which may have been exchanged while it was in IDLE
stream.setProperty(stateKey, new DefaultState(stream, initialWindowSize));
}
@Override
public void onStreamClosed(Http2Stream stream) {
try {
// When a stream is closed, consume any remaining bytes so that they
// are restored to the connection window.
FlowState state = state(stream);
int unconsumedBytes = state.unconsumedBytes();
if (ctx != null && unconsumedBytes > 0) {
connectionState().consumeBytes(ctx, unconsumedBytes);
state.consumeBytes(ctx, unconsumedBytes);
}
} catch (Http2Exception e) {
PlatformDependent.throwException(e);
} finally {
// Unconditionally reduce the amount of memory required for flow control because there is no
// object allocation costs associated with doing so and the stream will not have any more
// local flow control state to keep track of anymore.
stream.setProperty(stateKey, REDUCED_FLOW_STATE);
}
}
});
}
@Override
public void initialWindowSize(int newWindowSize) throws Http2Exception {
int delta = newWindowSize - initialWindowSize;
initialWindowSize = newWindowSize;
WindowUpdateVisitor visitor = new WindowUpdateVisitor(delta);
connection.forEachActiveStream(visitor);
visitor.throwIfError();
}
@Override
public int initialWindowSize() {
return initialWindowSize;
}
@Override
public int windowSize(Http2Stream stream) {
return state(stream).windowSize();
}
@Override
public int initialWindowSize(Http2Stream stream) {
return state(stream).initialWindowSize();
}
@Override
public void incrementWindowSize(ChannelHandlerContext ctx, Http2Stream stream, int delta) throws Http2Exception {
checkNotNull(ctx, "ctx");
FlowState state = state(stream);
// Just add the delta to the stream-specific initial window size so that the next time the window
// expands it will grow to the new initial size.
state.incrementInitialStreamWindow(delta);
state.writeWindowUpdateIfNeeded(ctx);
}
@Override
public boolean consumeBytes(ChannelHandlerContext ctx, Http2Stream stream, int numBytes)
throws Http2Exception {
if (numBytes < 0) {
throw new IllegalArgumentException("numBytes must not be negative");
}
if (numBytes == 0) {
return false;
}
// Streams automatically consume all remaining bytes when they are closed, so just ignore
// if already closed.
if (stream != null && !isClosed(stream)) {
if (stream.id() == CONNECTION_STREAM_ID) {
throw new UnsupportedOperationException("Returning bytes for the connection window is not supported");
}
boolean windowUpdateSent = connectionState().consumeBytes(ctx, numBytes);
windowUpdateSent |= state(stream).consumeBytes(ctx, numBytes);
return windowUpdateSent;
}
return false;
}
@Override
public int unconsumedBytes(Http2Stream stream) {
return state(stream).unconsumedBytes();
}
private static void checkValidRatio(float ratio) {
if (Double.compare(ratio, 0.0) <= 0 || Double.compare(ratio, 1.0) >= 0) {
throw new IllegalArgumentException("Invalid ratio: " + ratio);
}
}
/**
* The window update ratio is used to determine when a window update must be sent. If the ratio
* of bytes processed since the last update has meet or exceeded this ratio then a window update will
* be sent. This is the global window update ratio that will be used for new streams.
* @param ratio the ratio to use when checking if a {@code WINDOW_UPDATE} is determined necessary for new streams.
* @throws IllegalArgumentException If the ratio is out of bounds (0, 1).
*/
public void windowUpdateRatio(float ratio) {
checkValidRatio(ratio);
windowUpdateRatio = ratio;
}
/**
* The window update ratio is used to determine when a window update must be sent. If the ratio
* of bytes processed since the last update has meet or exceeded this ratio then a window update will
* be sent. This is the global window update ratio that will be used for new streams.
*/
public float windowUpdateRatio() {
return windowUpdateRatio;
}
/**
* The window update ratio is used to determine when a window update must be sent. If the ratio
* of bytes processed since the last update has meet or exceeded this ratio then a window update will
* be sent. This window update ratio will only be applied to {@code streamId}.
* <p>
* Note it is the responsibly of the caller to ensure that the the
* initial {@code SETTINGS} frame is sent before this is called. It would
* be considered a {@link Http2Error#PROTOCOL_ERROR} if a {@code WINDOW_UPDATE}
* was generated by this method before the initial {@code SETTINGS} frame is sent.
* @param ctx the context to use if a {@code WINDOW_UPDATE} is determined necessary.
* @param stream the stream for which {@code ratio} applies to.
* @param ratio the ratio to use when checking if a {@code WINDOW_UPDATE} is determined necessary.
* @throws Http2Exception If a protocol-error occurs while generating {@code WINDOW_UPDATE} frames
*/
public void windowUpdateRatio(ChannelHandlerContext ctx, Http2Stream stream, float ratio) throws Http2Exception {
checkValidRatio(ratio);
FlowState state = state(stream);
state.windowUpdateRatio(ratio);
state.writeWindowUpdateIfNeeded(ctx);
}
/**
* The window update ratio is used to determine when a window update must be sent. If the ratio
* of bytes processed since the last update has meet or exceeded this ratio then a window update will
* be sent. This window update ratio will only be applied to {@code streamId}.
* @throws Http2Exception If no stream corresponding to {@code stream} could be found.
*/
public float windowUpdateRatio(Http2Stream stream) throws Http2Exception {
return state(stream).windowUpdateRatio();
}
@Override
public void receiveFlowControlledFrame(ChannelHandlerContext ctx, Http2Stream stream, ByteBuf data,
int padding, boolean endOfStream) throws Http2Exception {
this.ctx = checkNotNull(ctx, "ctx");
int dataLength = data.readableBytes() + padding;
// Apply the connection-level flow control
FlowState connectionState = connectionState();
connectionState.receiveFlowControlledFrame(dataLength);
if (stream != null && !isClosed(stream)) {
// Apply the stream-level flow control
FlowState state = state(stream);
state.endOfStream(endOfStream);
state.receiveFlowControlledFrame(dataLength);
} else if (dataLength > 0) {
// Immediately consume the bytes for the connection window.
connectionState.consumeBytes(ctx, dataLength);
}
}
private FlowState connectionState() {
return connection.connectionStream().getProperty(stateKey);
}
private FlowState state(Http2Stream stream) {
checkNotNull(stream, "stream");
return stream.getProperty(stateKey);
}
private static boolean isClosed(Http2Stream stream) {
return stream.state() == Http2Stream.State.CLOSED;
}
/**
* Flow control window state for an individual stream.
*/
private final class DefaultState implements FlowState {
private final Http2Stream stream;
/**
* The actual flow control window that is decremented as soon as {@code DATA} arrives.
*/
private int window;
/**
* A view of {@link #window} that is used to determine when to send {@code WINDOW_UPDATE}
* frames. Decrementing this window for received {@code DATA} frames is delayed until the
* application has indicated that the data has been fully processed. This prevents sending
* a {@code WINDOW_UPDATE} until the number of processed bytes drops below the threshold.
*/
private int processedWindow;
/**
* This is what is used to determine how many bytes need to be returned relative to {@link #processedWindow}.
* Each stream has their own initial window size.
*/
private volatile int initialStreamWindowSize;
/**
* This is used to determine when {@link #processedWindow} is sufficiently far away from
* {@link #initialStreamWindowSize} such that a {@code WINDOW_UPDATE} should be sent.
* Each stream has their own window update ratio.
*/
private volatile float streamWindowUpdateRatio;
private int lowerBound;
private boolean endOfStream;
public DefaultState(Http2Stream stream, int initialWindowSize) {
this.stream = stream;
window(initialWindowSize);
streamWindowUpdateRatio = windowUpdateRatio;
}
@Override
public void window(int initialWindowSize) {
window = processedWindow = initialStreamWindowSize = initialWindowSize;
}
@Override
public int windowSize() {
return window;
}
@Override
public int initialWindowSize() {
return initialStreamWindowSize;
}
@Override
public void endOfStream(boolean endOfStream) {
this.endOfStream = endOfStream;
}
@Override
public float windowUpdateRatio() {
return streamWindowUpdateRatio;
}
@Override
public void windowUpdateRatio(float ratio) {
streamWindowUpdateRatio = ratio;
}
@Override
public void incrementInitialStreamWindow(int delta) {
// Clip the delta so that the resulting initialStreamWindowSize falls within the allowed range.
int newValue = (int) min(MAX_INITIAL_WINDOW_SIZE,
max(MIN_INITIAL_WINDOW_SIZE, initialStreamWindowSize + (long) delta));
delta = newValue - initialStreamWindowSize;
initialStreamWindowSize += delta;
}
@Override
public void incrementFlowControlWindows(int delta) throws Http2Exception {
if (delta > 0 && window > MAX_INITIAL_WINDOW_SIZE - delta) {
throw streamError(stream.id(), FLOW_CONTROL_ERROR,
"Flow control window overflowed for stream: %d", stream.id());
}
window += delta;
processedWindow += delta;
lowerBound = delta < 0 ? delta : 0;
}
@Override
public void receiveFlowControlledFrame(int dataLength) throws Http2Exception {
assert dataLength >= 0;
// Apply the delta. Even if we throw an exception we want to have taken this delta into account.
window -= dataLength;
// Window size can become negative if we sent a SETTINGS frame that reduces the
// size of the transfer window after the peer has written data frames.
// The value is bounded by the length that SETTINGS frame decrease the window.
// This difference is stored for the connection when writing the SETTINGS frame
// and is cleared once we send a WINDOW_UPDATE frame.
if (window < lowerBound) {
throw streamError(stream.id(), FLOW_CONTROL_ERROR,
"Flow control window exceeded for stream: %d", stream.id());
}
}
private void returnProcessedBytes(int delta) throws Http2Exception {
if (processedWindow - delta < window) {
throw streamError(stream.id(), INTERNAL_ERROR,
"Attempting to return too many bytes for stream %d", stream.id());
}
processedWindow -= delta;
}
@Override
public boolean consumeBytes(ChannelHandlerContext ctx, int numBytes) throws Http2Exception {
// Return the bytes processed and update the window.
returnProcessedBytes(numBytes);
return writeWindowUpdateIfNeeded(ctx);
}
@Override
public int unconsumedBytes() {
return processedWindow - window;
}
@Override
public boolean writeWindowUpdateIfNeeded(ChannelHandlerContext ctx) throws Http2Exception {
if (endOfStream || initialStreamWindowSize <= 0) {
return false;
}
int threshold = (int) (initialStreamWindowSize * streamWindowUpdateRatio);
if (processedWindow <= threshold) {
writeWindowUpdate(ctx);
return true;
}
return false;
}
/**
* Called to perform a window update for this stream (or connection). Updates the window size back
* to the size of the initial window and sends a window update frame to the remote endpoint.
*/
private void writeWindowUpdate(ChannelHandlerContext ctx) throws Http2Exception {
// Expand the window for this stream back to the size of the initial window.
int deltaWindowSize = initialStreamWindowSize - processedWindow;
try {
incrementFlowControlWindows(deltaWindowSize);
} catch (Throwable t) {
throw connectionError(INTERNAL_ERROR, t,
"Attempting to return too many bytes for stream %d", stream.id());
}
// Send a window update for the stream/connection.
frameWriter.writeWindowUpdate(ctx, stream.id(), deltaWindowSize, ctx.newPromise());
}
}
/**
* The local flow control state for a single stream that is not in a state where flow controlled frames cannot
* be exchanged.
*/
private static final FlowState REDUCED_FLOW_STATE = new FlowState() {
@Override
public int windowSize() {
return 0;
}
@Override
public int initialWindowSize() {
return 0;
}
@Override
public void window(int initialWindowSize) {
throw new UnsupportedOperationException();
}
@Override
public void incrementInitialStreamWindow(int delta) {
// This operation needs to be supported during the initial settings exchange when
// the peer has not yet acknowledged this peer being activated.
}
@Override
public boolean writeWindowUpdateIfNeeded(ChannelHandlerContext ctx) throws Http2Exception {
throw new UnsupportedOperationException();
}
@Override
public boolean consumeBytes(ChannelHandlerContext ctx, int numBytes) throws Http2Exception {
return false;
}
@Override
public int unconsumedBytes() {
return 0;
}
@Override
public float windowUpdateRatio() {
throw new UnsupportedOperationException();
}
@Override
public void windowUpdateRatio(float ratio) {
throw new UnsupportedOperationException();
}
@Override
public void receiveFlowControlledFrame(int dataLength) throws Http2Exception {
throw new UnsupportedOperationException();
}
@Override
public void incrementFlowControlWindows(int delta) throws Http2Exception {
// This operation needs to be supported during the initial settings exchange when
// the peer has not yet acknowledged this peer being activated.
}
@Override
public void endOfStream(boolean endOfStream) {
throw new UnsupportedOperationException();
}
};
/**
* An abstraction which provides specific extensions used by local flow control.
*/
private interface FlowState {
int windowSize();
int initialWindowSize();
void window(int initialWindowSize);
/**
* Increment the initial window size for this stream.
* @param delta The amount to increase the initial window size by.
*/
void incrementInitialStreamWindow(int delta);
/**
* Updates the flow control window for this stream if it is appropriate.
*
* @return true if {@code WINDOW_UPDATE} was written, false otherwise.
*/
boolean writeWindowUpdateIfNeeded(ChannelHandlerContext ctx) throws Http2Exception;
/**
* Indicates that the application has consumed {@code numBytes} from the connection or stream and is
* ready to receive more data.
*
* @param ctx the channel handler context to use when sending a {@code WINDOW_UPDATE} if appropriate
* @param numBytes the number of bytes to be returned to the flow control window.
* @return true if {@code WINDOW_UPDATE} was written, false otherwise.
* @throws Http2Exception
*/
boolean consumeBytes(ChannelHandlerContext ctx, int numBytes) throws Http2Exception;
int unconsumedBytes();
float windowUpdateRatio();
void windowUpdateRatio(float ratio);
/**
* A flow control event has occurred and we should decrement the amount of available bytes for this stream.
* @param dataLength The amount of data to for which this stream is no longer eligible to use for flow control.
* @throws Http2Exception If too much data is used relative to how much is available.
*/
void receiveFlowControlledFrame(int dataLength) throws Http2Exception;
/**
* Increment the windows which are used to determine many bytes have been processed.
* @param delta The amount to increment the window by.
* @throws Http2Exception if integer overflow occurs on the window.
*/
void incrementFlowControlWindows(int delta) throws Http2Exception;
void endOfStream(boolean endOfStream);
}
/**
* Provides a means to iterate over all active streams and increment the flow control windows.
*/
private final class WindowUpdateVisitor implements Http2StreamVisitor {
private CompositeStreamException compositeException;
private final int delta;
public WindowUpdateVisitor(int delta) {
this.delta = delta;
}
@Override
public boolean visit(Http2Stream stream) throws Http2Exception {
try {
// Increment flow control window first so state will be consistent if overflow is detected.
FlowState state = state(stream);
state.incrementFlowControlWindows(delta);
state.incrementInitialStreamWindow(delta);
} catch (StreamException e) {
if (compositeException == null) {
compositeException = new CompositeStreamException(e.error(), 4);
}
compositeException.add(e);
}
return true;
}
public void throwIfError() throws CompositeStreamException {
if (compositeException != null) {
throw compositeException;
}
}
}
}
| xiexingguang/netty | codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2LocalFlowController.java | Java | apache-2.0 | 23,640 |
/*
* Copyright 2013 Christof Lemke
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package xml.entity.immutableelement;
import javax.annotation.Nullable;
import xml.entity.select.Selector;
import com.google.common.base.Preconditions;
class Attribute extends AbstractElement implements ImmutableElement
{
private final String value;
Attribute(final String name, final String value, final Selector selector)
{
super("@" + name, selector);
Preconditions.checkNotNull(value);
this.value = value;
}
@Override @Nullable public String value()
{
return value;
}
@Override public String toString()
{
return name() + "=" + value;
}
}
| christoflemke/xml.entity | src/xml/entity/immutableelement/Attribute.java | Java | apache-2.0 | 1,226 |
// Copyright 2015 Patrick Putnam
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef INDIVIDUAL_GENOTYPER_HPP_
#define INDIVIDUAL_GENOTYPER_HPP_
#include <vector>
#include <algorithm>
#include "iterator_helper.hpp"
struct add {
template < class ResultType >
ResultType operator()( ResultType a, ResultType b ) {
return a + b;
}
};
template < class SetType, class ValueType = double, class OP = add >
class individual_genotyper {
public:
typedef individual_genotyper< SetType, ResultType > self_type;
typedef ValueType value_type;
typedef SetType set_type;
typedef typename set_type::iterator iterator;
class result_type {
public:
typedef std::vector< value_type > values_type;
typedef typename values_type::iterator iterator;
typedef typename values_type::const_iterator citerator;
friend class individual_genotyper< SetType, ValueType, OP >;
result_type ( size_t n = 1 ) : m_values( n ) {}
result_type ( const result_type & r ) : m_values( r.m_values ) {}
iterator begin() {
return m_values.begin();
}
citerator begin() const {
return m_values.begin();
}
iterator end() {
return m_values.end();
}
citerator end() const {
return m_values.end();
}
protected:
values_type m_values;
};
individual_genotyper( set_type & s, size_t n = 1 ) : m_set( &s ), m_nTraits(n) {}
template < class Sequence >
result_type operator()( Sequence & seq ) {
typedef iterator_helper< Sequence > allele_helper;
typedef typename allele_helper::type allele_iterator;
allele_iterator first = allele_helper::make_first( seq ), last = allele_helper::make_last( seq );
std::for_each( first, last, weight );
}
protected:
set_type * m_set;
size_t m_nTraits;
};
#endif // INDIVIDUAL_GENOTYPER_HPP_
| putnampp/clotho | include/clotho/genetics/individual_genotyper.hpp | C++ | apache-2.0 | 2,535 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using System.Web.UI;
using System.Web.UI.WebControls;
using System.Web.UI.HtmlControls;
using RandomSchool.Extenders;
using RandomSchool.Repositories;
using RandomSchool.Filters;
using ScaffoldFilter;
namespace RandomSchool.Maintain.vPerson
{
public partial class Default : System.Web.UI.Page
{
private PersonRepository<RandomSchool.Models.Person, int> _repository = new PersonRepository<RandomSchool.Models.Person, int>();
Dictionary<string, string> FilterDefaults = new Dictionary<string, string>();
protected void Page_Load(object sender, EventArgs e)
{
lvPerson.SetDataMethodsObject(_repository);
if (!IsPostBack)
{
DataPager dp = (DataPager)lvPerson.FindControl("dpPerson");
if (dp != null)
{
if (Session["PersonCurrentPage"] != null && Session["PersonPageSize"] != null) {
dp.SetPageProperties(Convert.ToInt32(Session["PersonCurrentPage"]), Convert.ToInt32(Session["PersonPageSize"]), true);
}
else {
dp.SetPageProperties(0, 10, true);
}
}
}
}
protected void ddlPageSize_SelectedIndexChanged(object sender, EventArgs e)
{
DropDownList ddl = ((DropDownList)sender);
DataPager dp = (DataPager)lvPerson.FindControl("dpPerson");
dp.PageSize = Convert.ToInt32(ddl.SelectedValue);
Session["PersonPageSize"] = dp.PageSize;
dp.SetPageProperties(0, Convert.ToInt32(Session["PersonPageSize"]), true);
}
protected void lvPerson_Sorting(object sender, ListViewSortEventArgs e)
{
Session["PersonSortExpression"] = e.SortExpression;
Session["PersonSortDirection"] = e.SortDirection;
Session["PersonCurrentPage"] = 0;
DisplayedSortedArrows();
}
protected void lvPerson_PagePropertiesChanging(object sender, PagePropertiesChangingEventArgs e)
{
Session["PersonCurrentPage"] = e.StartRowIndex;
}
protected void ddlPageSize_PreRender(object sender, EventArgs e)
{
DropDownList ddl = ((DropDownList)sender);
if (Session["PersonPageSize"] != null) {
ddl.SelectedValue = Session["PersonPageSize"].ToString();
}
}
protected void ScaffoldLabel_PreRender(object sender, EventArgs e)
{
Label label = (Label)sender;
ScaffoldFilterControl scaffoldFilter = (ScaffoldFilterControl)label.FindControl("ScaffoldFilter");
ScaffoldFilterUserControl fuc = scaffoldFilter.FilterTemplate as ScaffoldFilterUserControl;
if (fuc != null && fuc.FilterControl != null) {
label.AssociatedControlID = fuc.FilterControl.GetUniqueIDRelativeTo(label);
}
}
protected void ScaffoldFilter_FilterChanged(object sender, FilterChangeEventArgs e)
{
string val;
if (Session["PersonFilterDefault"] == null)
{
if (e.SelectedValue.Length > 0)
{
FilterDefaults.Add(e.FieldName, e.SelectedValue);
Session["PersonFilterDefault"] = FilterDefaults;
}
}
else
{
FilterDefaults = (Dictionary<string, string>)Session["PersonFilterDefault"];
if (FilterDefaults.TryGetValue(e.FieldName, out val))
{
if (e.SelectedValue.Length > 0) {
FilterDefaults[e.FieldName] = e.SelectedValue;
}
else {
FilterDefaults.Remove(e.FieldName);
}
}
else
{
if (e.SelectedValue.Length > 0) {
FilterDefaults.Add(e.FieldName, e.SelectedValue);
}
}
Session["PersonFilterDefault"] = FilterDefaults;
}
DataPager dp = (DataPager)lvPerson.FindControl("dpPerson");
if (dp != null) {
dp.SetPageProperties(0, dp.PageSize, true);
}
}
protected void ScaffoldFilter_FilterLoad(FilterLoadEventArgs e)
{
if (!IsPostBack)
{
if (Session["PersonFilterDefault"] != null) {
e.FilterDefaults = (Dictionary<string, string>)Session["PersonFilterDefault"];
}
}
}
public void ForeignKeyEventHandler_LoadForeignKey(ForeignModelEventArgs e)
{
e.returnResults = _repository.GetForeignList(e.foreignKeyModel, e.keyType);
}
protected void sfForeignKey_Load(object sender, EventArgs e)
{
ScaffoldFilterControl scaffoldFilter = (ScaffoldFilterControl)sender;
ForeignKeyFilter sfuc = scaffoldFilter.FilterTemplate as ForeignKeyFilter;
if (sfuc != null) {
sfuc.ForeignKey += new ForeignKeyEventHandler(ForeignKeyEventHandler_LoadForeignKey);
}
}
protected void lvPerson_LayoutCreated(object sender, EventArgs e)
{
DisplayedSortedArrows();
}
protected void DisplayedSortedArrows()
{
Control headerRow = (Control)lvPerson.FindControl("headerRow");
if (headerRow != null)
{
if (Session["PersonSortExpression"] != null && Session["PersonSortDirection"] != null)
{
string se = Session["PersonSortExpression"].ToString();
SortDirection sd = (SortDirection)Session["PersonSortDirection"];
foreach (HtmlControl tableCell in headerRow.Controls)
{
if (tableCell.GetType() == typeof(HtmlTableCell))
{
IButtonControl btnSortField = tableCell.Controls.OfType<IButtonControl>().SingleOrDefault();
HtmlGenericControl gcArrow = tableCell.Controls.OfType<HtmlGenericControl>().SingleOrDefault();
if (btnSortField != null && gcArrow != null)
{
if (btnSortField.CommandArgument == se)
gcArrow.Attributes["class"] = sd == SortDirection.Ascending ? "glyphicon glyphicon-chevron-up" : "glyphicon glyphicon-chevron-down";
else
{
if (gcArrow.Attributes["class"] != null) gcArrow.Attributes.Remove("class");
}
}
}
}
}
}
}
}
}
| jbwilliamson/MaximiseWFScaffolding | RandomSchool/RandomSchool/Maintain/vPerson/Default.aspx.cs | C# | apache-2.0 | 6,887 |
package net.catchpole.sql.connection;
// Copyright 2014 catchpole.net
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import java.sql.Connection;
import java.sql.PreparedStatement;
public class PreparedStatementHandle implements Comparable<PreparedStatementHandle> {
private Connection connection;
private PreparedStatement preparedStatement;
private String sql;
public PreparedStatementHandle(Connection connection, PreparedStatement preparedStatement, String sql) {
this.connection = connection;
this.preparedStatement = preparedStatement;
this.sql = sql;
}
public boolean equals(Object object) {
if (!(object instanceof PreparedStatementHandle)) {
return false;
}
PreparedStatementHandle other = (PreparedStatementHandle) object;
return (other.sql.equals(this.sql));
}
public int compareTo(PreparedStatementHandle preparedStatementHandle) {
return this.sql.compareTo(preparedStatementHandle.sql);
}
}
| slipperyseal/atomicobjects | atomicobjects-sql/src/main/java/net/catchpole/sql/connection/PreparedStatementHandle.java | Java | apache-2.0 | 1,553 |
package br.com.softplan.security.zap.api.authentication;
import br.com.softplan.security.zap.api.model.AuthenticationInfo;
import br.com.softplan.security.zap.commons.ZapInfo;
import br.com.softplan.security.zap.zaproxy.clientapi.core.ClientApi;
/**
* Simple extension of {@code AbstractAuthenticationHandler} to test its functionalities.
*
* @see AbstractAuthenticationHandler
* @author pdsec
*/
public class NilAuthenticationHandler extends AbstractAuthenticationHandler {
protected NilAuthenticationHandler(ClientApi api, ZapInfo zapInfo, AuthenticationInfo authenticationInfo) {
super(api, zapInfo, authenticationInfo);
}
@Override
public void setupAuthentication(String targetUrl) {}
}
| pdsoftplan/zap-maven-plugin | zap-client-api/src/test/java/br/com/softplan/security/zap/api/authentication/NilAuthenticationHandler.java | Java | apache-2.0 | 708 |
'use strict';
exports.__esModule = true;
var _react = require('react');
var _react2 = _interopRequireDefault(_react);
var _reactIntl = require('react-intl');
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var ModalCloseButton = function (_Component) {
_inherits(ModalCloseButton, _Component);
function ModalCloseButton(props) {
_classCallCheck(this, ModalCloseButton);
var _this = _possibleConstructorReturn(this, _Component.call(this, props));
_this.handleClick = _this.handleClick.bind(_this);
return _this;
}
ModalCloseButton.prototype.handleClick = function handleClick() {
this.props.onClick();
};
ModalCloseButton.prototype.render = function render() {
return _react2.default.createElement(
'div',
{ className: 'modal__close-button', onClick: this.handleClick },
_react2.default.createElement(
'i',
{ className: 'close_icon material-icons' },
'close'
),
_react2.default.createElement(
'div',
{ className: 'text' },
_react2.default.createElement(_reactIntl.FormattedMessage, { id: 'button.close' })
)
);
};
return ModalCloseButton;
}(_react.Component);
ModalCloseButton.propTypes = {
onClick: _react.PropTypes.func.isRequired
};
exports.default = ModalCloseButton;
//# sourceMappingURL=ModalCloseButton.react.js.map | EaglesoftZJ/iGem_Web | build/components/modals/ModalCloseButton.react.js | JavaScript | apache-2.0 | 2,305 |
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package source
import (
"fmt"
"net/url"
"path/filepath"
"runtime"
"strings"
"unicode"
)
const fileScheme = "file"
// URI represents the full URI for a file.
type URI string
// Filename gets the file path for the URI.
// It will return an error if the uri is not valid, or if the URI was not
// a file URI
func (uri URI) Filename() (string, error) {
filename, err := filename(uri)
if err != nil {
return "", err
}
return filepath.FromSlash(filename), nil
}
func filename(uri URI) (string, error) {
u, err := url.ParseRequestURI(string(uri))
if err != nil {
return "", err
}
if u.Scheme != fileScheme {
return "", fmt.Errorf("only file URIs are supported, got %v", u.Scheme)
}
if isWindowsDriveURI(u.Path) {
u.Path = u.Path[1:]
}
return u.Path, nil
}
// ToURI returns a protocol URI for the supplied path.
// It will always have the file scheme.
func ToURI(path string) URI {
u := toURI(path)
u.Path = filepath.ToSlash(u.Path)
return URI(u.String())
}
func toURI(path string) *url.URL {
// Handle standard library paths that contain the literal "$GOROOT".
// TODO(rstambler): The go/packages API should allow one to determine a user's $GOROOT.
const prefix = "$GOROOT"
if len(path) >= len(prefix) && strings.EqualFold(prefix, path[:len(prefix)]) {
suffix := path[len(prefix):]
path = runtime.GOROOT() + suffix
}
if isWindowsDrivePath(path) {
path = "/" + path
}
return &url.URL{
Scheme: fileScheme,
Path: path,
}
}
// isWindowsDrivePath returns true if the file path is of the form used by
// Windows. We check if the path begins with a drive letter, followed by a ":".
func isWindowsDrivePath(path string) bool {
if len(path) < 4 {
return false
}
return unicode.IsLetter(rune(path[0])) && path[1] == ':'
}
// isWindowsDriveURI returns true if the file URI is of the format used by
// Windows URIs. The url.Parse package does not specially handle Windows paths
// (see https://github.com/golang/go/issues/6027). We check if the URI path has
// a drive prefix (e.g. "/C:"). If so, we trim the leading "/".
func isWindowsDriveURI(uri string) bool {
if len(uri) < 4 {
return false
}
return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':'
}
| anpingli/origin | vendor/golang.org/x/tools/internal/lsp/source/uri.go | GO | apache-2.0 | 2,381 |
<?php
/**
* Copyright 2018 SURFnet B.V.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
namespace Surfnet\Stepup\Configuration\Event;
use Broadway\Serializer\Serializable as SerializableInterface;
use Surfnet\Stepup\Configuration\Value\Institution;
use Surfnet\Stepup\Configuration\Value\InstitutionConfigurationId;
use Surfnet\Stepup\Configuration\Value\VerifyEmailOption;
final class VerifyEmailOptionChangedEvent implements SerializableInterface
{
/**
* @var InstitutionConfigurationId
*/
public $institutionConfigurationId;
/**
* @var Institution
*/
public $institution;
/**
* @var VerifyEmailOption
*/
public $verifyEmailOption;
public function __construct(
InstitutionConfigurationId $institutionConfigurationId,
Institution $institution,
VerifyEmailOption $verifyEmailOption
) {
$this->institutionConfigurationId = $institutionConfigurationId;
$this->institution = $institution;
$this->verifyEmailOption = $verifyEmailOption;
}
public static function deserialize(array $data)
{
return new self(
new InstitutionConfigurationId($data['institution_configuration_id']),
new Institution($data['institution']),
new VerifyEmailOption($data['verify_email_option'])
);
}
public function serialize(): array
{
return [
'institution_configuration_id' => $this->institutionConfigurationId->getInstitutionConfigurationId(),
'institution' => $this->institution->getInstitution(),
'verify_email_option' => $this->verifyEmailOption->isEnabled(),
];
}
}
| OpenConext/Stepup-Middleware | src/Surfnet/Stepup/Configuration/Event/VerifyEmailOptionChangedEvent.php | PHP | apache-2.0 | 2,235 |
/*******************************************************************************
* Copyright © 2012-2015 eBay Software Foundation
* This program is dual licensed under the MIT and Apache 2.0 licenses.
* Please see LICENSE for more information.
*******************************************************************************/
package com.ebay.pulsar.analytics.auth;
import com.ebay.pulsar.analytics.config.ConfigurationFactory;
import com.google.common.base.Throwables;
/**
*@author qxing
*
**/
public class AuthModels {
public static final String AUTHENTICATION_ENABLE_KEY="pulsar.analytics.authentication.enable";
public static final String AUTHORIZATION_ENABLE_KEY="pulsar.analytics.authorization.enable";
public static final String AUTHENTICATION_IMPLEMENTATION_KEY="pulsar.analytics.authentication.impl";
public static final String AUTHORIZATION_IMPLEMENTATION_KEY="pulsar.analytics.authorization.impl";
public static Authentication authentication(){
return AuthenticationHolder.auth;
}
public static Authorization authorization(){
return AuthorizationHolder.auth;
}
private static <T> T instance(String clazz, Class<T > parent){
try{
Class<?> clz=Class.forName(clazz);
if(parent.isAssignableFrom(clz)){
Object obj=clz.newInstance();
T ret=parent.cast(obj);
return ret;
}
throw new RuntimeException("class["+clazz+"] is not assignable to ["+parent.getName()+"].");
}catch(Exception e){
Throwables.propagate(e);
}
return null;
}
private static class AuthenticationHolder{
private static volatile Authentication auth;
static{
auth=instance(ConfigurationFactory.instance().getString(AUTHENTICATION_IMPLEMENTATION_KEY),
Authentication.class);
}
}
private static class AuthorizationHolder{
private static volatile Authorization auth;
static{
auth=instance(ConfigurationFactory.instance().getString(AUTHORIZATION_IMPLEMENTATION_KEY),
Authorization.class);
}
}
}
| pulsarIO/pulsar-reporting-api | pulsarquery-core/src/main/java/com/ebay/pulsar/analytics/auth/AuthModels.java | Java | apache-2.0 | 2,016 |
#!/usr/bin/python
# coding=utf-8
# Copyright 2017 yaitza. All Rights Reserved.
#
# https://yaitza.github.io/2017-04-26-Python-FilterPicture
#
# My Code hope to usefull for you.
# ===================================================================
__author__ = "yaitza"
__date__ = "2017-04-26 13:59"
import os
from PIL import Image
class ImageHandler:
def __init__(self, pic_path):
self.pic_path = pic_path
def getPicSize(self, maxSize, minSize):
try:
img = Image.open(self.pic_path)
except IOError:
print self.pic_path + " Error!"
return
if max(img.size) <= maxSize and max(img.size) >= minSize and min(img.size) >= minSize and min(img.size) <= maxSize:
return img.size
return
class FileHandler:
def __init__(self, file_path):
self.file_path = file_path
def getAllFiles(self):
fileList = os.listdir(self.file_path)
file_dir = []
for file in fileList:
file_dir.append(self.file_path + "/" + file)
useFileList = []
for file in file_dir:
im = ImageHandler(file)
if im.getPicSize(1204, 480) is not None:
useFileList.append(file)
return useFileList
if __name__ == "__main__":
file_path = "E:/内容素材/图片/美女图"
uipath = unicode(file_path, "utf8")
fh = FileHandler(uipath)
fh.getAllFiles()
| yaitza/python | PROJECT2017/Tools/FilterPicSize.py | Python | apache-2.0 | 1,440 |
using System;
using System.Collections.Generic;
using Foundation.Services;
using Newtonsoft.Json;
namespace Foundation.Web.JavaScript
{
[CLSCompliant(false)]
public class JavaScriptSerializer : Newtonsoft.Json.JsonSerializer
{
/// <summary>
/// Holds a stack of objects marked with ISerializableToJavaScript that are
/// being serialized, to prevent stack overflow. This allows these objects to
/// still use the JsonSerializer to serialize themselves without going into
/// an infinite loop
/// </summary>
readonly IList<object> customSerializationObjects = new List<object>();
/// <exception cref="ArgumentNullException">when <paramref name="writer"/> is null</exception>
/// <exception cref="ArgumentNullException">when <paramref name="value"/> is null</exception>
public new void Serialize(JsonWriter writer, object value)
{
if (writer == null) throw new ArgumentNullException("writer");
if (value == null) throw new ArgumentNullException("value");
if( ReflectionUtilities.Implements(value.GetType(), typeof(ISerializableToJavaScript)) && !customSerializationObjects.Contains(value) )
{
customSerializationObjects.Add(value);
((ISerializableToJavaScript) value).SerializeToJavaScript(this, writer);
customSerializationObjects.Remove(value);
return;
}
var attribute = ReflectionUtilities.GetAttribute<JavaScriptObjectAttribute>(value);
if( attribute != null && !string.IsNullOrEmpty(attribute.Prefix) ) writer.WriteRaw(attribute.Prefix);
base.Serialize(writer, value);
if( attribute != null && !string.IsNullOrEmpty(attribute.Suffix) ) writer.WriteRaw(attribute.Suffix);
}
}
} | DavidMoore/Foundation | Code/Foundation.Web/JavaScript/JavaScriptSerializer.cs | C# | apache-2.0 | 1,912 |
/*
* Copyright © 2017 Christopher Zell (zelldon91@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.zell.jnative.benchmarks;
import net.openhft.chronicle.map.ChronicleMap;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
/**
*
*/
@State(Scope.Benchmark)
public class Long2LongChronicleMapSupplier
{
ChronicleMap<Long, Long> map;
@Setup
public void createChronicleMap()
{
map = ChronicleMap
.of(Long.class, Long.class)
.name("map")
.entries(Benchmarks.DATA_SET_SIZE)
.create();
}
}
| Zelldon/jnative | jnative/src/benchmarks/java/de/zell/jnative/benchmarks/Long2LongChronicleMapSupplier.java | Java | apache-2.0 | 1,177 |
class CreateOrganizedStatus < ActiveRecord::Migration
def change
create_table :statuses do |t|
t.integer :gift_id
t.integer :added_by_user_id
t.integer :status
t.string :note
end
remove_column :gifts, :status, :string
add_column :gifts, :added_by_user_id, :integer
end
end
| aligature/wishlist | db/migrate/20140810202019_create_organized_status.rb | Ruby | apache-2.0 | 323 |
package cn.telling.shop.vo;
import java.io.Serializable;
import java.math.BigDecimal;
/**
*
* @ClassName: ShopProductVo
* TODO
* @author xingle
* @date 2015-8-21 下午5:28:04
*/
public class ShopProductVo implements Serializable{
/**
* @Fields serialVersionUID : TODO(描述变量表示)
*/
private static final long serialVersionUID = -8442887672005420455L;
/**
* @Fields shopid : 店铺id
*/
private String shopid;
/**
* @Fields productId : 产品id
*/
private BigDecimal productId;
/**
* @Fields productName : 产品名称
*/
private String productName;
/**
* @Fields priceretailonline : 产品价格
*/
private BigDecimal priceretailonline;
/**
* @Fields sa_id : supply_area0表id
*/
private BigDecimal sa_id;
/**
* @Fields feature : 产品feature;
*/
private String feature;
/**
* @Fields overplusnumber : 库存
*/
private BigDecimal overplusnumber;
/**
* @Fields picturepath1 : 产品图片路径
*/
private String picturepath1;
/**
* @Fields saleNum : 销量(提供单独店铺查询用)
*/
private BigDecimal saleNum;
public BigDecimal getProductId() {
return productId;
}
public void setProductId(BigDecimal productId) {
this.productId = productId;
}
public String getProductName() {
return productName;
}
public void setProductName(String productName) {
this.productName = productName;
}
public BigDecimal getPriceretailonline() {
return priceretailonline;
}
public void setPriceretailonline(BigDecimal priceretailonline) {
this.priceretailonline = priceretailonline;
}
public BigDecimal getSa_id() {
return sa_id;
}
public void setSa_id(BigDecimal sa_id) {
this.sa_id = sa_id;
}
public String getFeature() {
return feature;
}
public void setFeature(String feature) {
this.feature = feature;
}
public BigDecimal getOverplusnumber() {
return overplusnumber;
}
public void setOverplusnumber(BigDecimal overplusnumber) {
this.overplusnumber = overplusnumber;
}
public String getPicturepath1() {
return picturepath1;
}
public void setPicturepath1(String picturepath1) {
this.picturepath1 = picturepath1;
}
public String getShopid() {
return shopid;
}
public void setShopid(String shopid) {
this.shopid = shopid;
}
public BigDecimal getSaleNum() {
return saleNum;
}
public void setSaleNum(BigDecimal saleNum) {
this.saleNum = saleNum;
}
}
| zhgo116/fancy | FancyInterface/src/main/java/cn/telling/shop/vo/ShopProductVo.java | Java | apache-2.0 | 2,497 |
/*
* Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.opsworks.model.transform;
import javax.annotation.Generated;
import com.amazonaws.SdkClientException;
import com.amazonaws.services.opsworks.model.*;
import com.amazonaws.protocol.*;
import com.amazonaws.annotation.SdkInternalApi;
/**
* ChefConfigurationMarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
@SdkInternalApi
public class ChefConfigurationMarshaller {
private static final MarshallingInfo<Boolean> MANAGEBERKSHELF_BINDING = MarshallingInfo.builder(MarshallingType.BOOLEAN)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("ManageBerkshelf").build();
private static final MarshallingInfo<String> BERKSHELFVERSION_BINDING = MarshallingInfo.builder(MarshallingType.STRING)
.marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("BerkshelfVersion").build();
private static final ChefConfigurationMarshaller instance = new ChefConfigurationMarshaller();
public static ChefConfigurationMarshaller getInstance() {
return instance;
}
/**
* Marshall the given parameter object.
*/
public void marshall(ChefConfiguration chefConfiguration, ProtocolMarshaller protocolMarshaller) {
if (chefConfiguration == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(chefConfiguration.getManageBerkshelf(), MANAGEBERKSHELF_BINDING);
protocolMarshaller.marshall(chefConfiguration.getBerkshelfVersion(), BERKSHELFVERSION_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
}
| dagnir/aws-sdk-java | aws-java-sdk-opsworks/src/main/java/com/amazonaws/services/opsworks/model/transform/ChefConfigurationMarshaller.java | Java | apache-2.0 | 2,340 |
package io.github.mamifsidtect.warzonehub.listeners;
import io.github.mamifsidtect.warzonehub.MCTheWarzoneHub;
import java.util.ArrayList;
import org.bukkit.Bukkit;
import org.bukkit.ChatColor;
import org.bukkit.Location;
import org.bukkit.Material;
import org.bukkit.entity.Player;
import org.bukkit.event.EventHandler;
import org.bukkit.event.EventPriority;
import org.bukkit.event.Listener;
import org.bukkit.event.block.Action;
import org.bukkit.event.inventory.InventoryClickEvent;
import org.bukkit.event.player.PlayerInteractEvent;
import org.bukkit.event.player.PlayerJoinEvent;
import org.bukkit.inventory.ItemStack;
import org.bukkit.inventory.meta.ItemMeta;
public class NavigationCompassListener implements Listener {
public ItemStack compass = new ItemStack(Material.COMPASS); {
ItemMeta cmeta = compass.getItemMeta();
ArrayList<String> c = new ArrayList<String>();
cmeta.setDisplayName(ChatColor.YELLOW + "Navigator");
c.add("");
c.add(ChatColor.GOLD + "Use this compass to navigate your way around the server");
c.add(ChatColor.GOLD + "Never be lost again with it");
cmeta.setLore(c);
compass.setItemMeta(cmeta);
}
public ItemStack goldengun = new ItemStack(Material.GOLD_BARDING); {
ItemMeta ggmeta = goldengun.getItemMeta();
ArrayList<String> gg = new ArrayList<String>();
ggmeta.setDisplayName(ChatColor.YELLOW + "Golden Gun");
gg.add("");
gg.add(ChatColor.GOLD + "If you want to play some GoldenGun...");
gg.add(ChatColor.GOLD + "Click here");
ggmeta.setLore(gg);
goldengun.setItemMeta(ggmeta);
}
public ItemStack hub = new ItemStack(Material.BEACON); {
ItemMeta hmeta = hub.getItemMeta();
ArrayList<String> h = new ArrayList<String>();
hmeta.setDisplayName(ChatColor.YELLOW + "The Hub");
h.add("");
h.add(ChatColor.YELLOW + "Where it all begins...");
h.add(ChatColor.YELLOW + "The hub");
hmeta.setLore(h);
hub.setItemMeta(hmeta);
}
public ItemStack vault = new ItemStack(Material.ENDER_CHEST); {
ItemMeta vmeta = vault.getItemMeta();
ArrayList<String> v = new ArrayList<String>();
vmeta.setDisplayName(ChatColor.YELLOW + "Gun Vault");
v.add("");
v.add(ChatColor.GOLD + "Come and take a look at all the guns we have to offer");
v.add(ChatColor.GOLD + "There are a lot of them");
vmeta.setLore(v);
vault.setItemMeta(vmeta);
}
public Location goldengunloc = new Location(Bukkit.getWorld("Lobby"), -94.5, 100, 335.5, 270, 7);
public Location hubloc = new Location(Bukkit.getWorld("Lobby"), -15.5, 110, 256.5, 0, 0);
//public Location vaultloc = new Location(Bukkit.getWorld("world"), -12.5, 45, 928.5, 0, 0);
private static NavigationCompassListener instance = new NavigationCompassListener();
public static NavigationCompassListener getInstance() {
return instance;
}
@EventHandler (priority = EventPriority.HIGHEST)
public void onPlayerCompassUse(PlayerInteractEvent event) {
Player p = event.getPlayer();
Action a = event.getAction();
if (p.getItemInHand().equals(compass)) {
if (a == Action.RIGHT_CLICK_AIR || a == Action.RIGHT_CLICK_BLOCK) {
if (MCTheWarzoneHub.perms.playerHas(p, "warzonehub.compass")) {
p.openInventory(MCTheWarzoneHub.compassMenu);
} else {
p.sendMessage(ChatColor.RED + "You cannot open this yet!");
}
}
}
}
@EventHandler (priority = EventPriority.HIGHEST)
public void onCompassMenuClick(InventoryClickEvent event) {
Player p = (Player) event.getWhoClicked();
ItemStack itemClicked = event.getCurrentItem();
if (event.getInventory().equals(MCTheWarzoneHub.compassMenu)) {
if (itemClicked.equals(goldengun)) {
p.teleport(goldengunloc);
event.setCancelled(true);
} else if (itemClicked.equals(hub)) {
p.teleport(hubloc);
event.setCancelled(true);
}
}
}
@EventHandler (priority = EventPriority.HIGHEST)
public void onPlayerJoin(PlayerJoinEvent event) {
Player p = event.getPlayer();
p.getInventory().setItem(MCTheWarzoneHub.getPlugin().getConfig().getInt("Player Join Items.Navigator.Inventory Slot") - 1, new ItemStack(compass));
}
}
| SheepTheGamer/WarzoneHub | src/main/java/io/github/mamifsidtect/warzonehub/listeners/NavigationCompassListener.java | Java | apache-2.0 | 4,171 |
/*
* Copyright 2014, Hridesh Rajan, Robert Dyer,
* and Iowa State University of Science and Technology
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package boa.compiler.ast.statements;
import java.util.ArrayList;
import java.util.List;
import boa.compiler.ast.Node;
import boa.compiler.visitors.AbstractVisitor;
import boa.compiler.visitors.AbstractVisitorNoArg;
import boa.compiler.visitors.AbstractVisitorNoReturn;
import boa.compiler.ast.statements.VisitStatement;
/**
*
* @author rdyer
* @author hridesh
*/
public class Block extends Statement {
protected final List<Statement> statements = new ArrayList<Statement>();
public List<Statement> getStatements() {
return statements;
}
public int getStatementsSize() {
return statements.size();
}
public Statement getStatement(final int index) {
return statements.get(index);
}
public Block addStatement(final Statement s) {
if (s != null) {
s.setParent(this);
statements.add(s);
}
return this;
}
/** {@inheritDoc} */
@Override
public <T,A> T accept(final AbstractVisitor<T,A> v, A arg) {
return v.visit(this, arg);
}
/** {@inheritDoc} */
@Override
public <A> void accept(final AbstractVisitorNoReturn<A> v, A arg) {
v.visit(this, arg);
}
/** {@inheritDoc} */
@Override
public void accept(final AbstractVisitorNoArg v) {
v.visit(this);
}
/** {@inheritDoc} */
@Override
public Node insertStatementBefore(final Statement s, final Node n) {
int index = 0;
for (; index < statements.size() && statements.get(index) != n; index++)
;
if (index == statements.size())
return super.insertStatementBefore(s, n);
s.setParent(this);
statements.add(index, s);
return this;
}
/** {@inheritDoc} */
@Override
public Node insertStatementAfter(final Statement s, final Node n) {
int index = 0;
for (; index < statements.size() && statements.get(index) != n; index++)
;
if (index == statements.size())
return super.insertStatementAfter(s, n);
s.setParent(this);
statements.add(index + 1, s);
return this;
}
/** {@inheritDoc} */
@Override
public void replaceStatement(final Statement oldStmt, final Statement newStmt) {
int index = 0;
for (; index < statements.size() && statements.get(index) != oldStmt; index++)
;
if (index == statements.size())
super.replaceStatement(oldStmt, newStmt);
else {
newStmt.setParent(this);
statements.set(index, newStmt);
}
}
public Block clone() {
final Block b = new Block();
for (final Statement s : statements)
b.addStatement(s.clone());
copyFieldsTo(b);
return b;
}
}
| kaushin/TestShadowTypes | src/java/boa/compiler/ast/statements/Block.java | Java | apache-2.0 | 3,117 |
/*
* Copyright (C) 2015 City of Lund (Lunds kommun)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
app.Plugin = L.Class.extend({
name: null, // same as the class name
type: null,
copyright: null,
options: {
styles: {}
},
initialize: function() {}
});
/**
* Static properties and methods
*/
app.Plugin.options = {
walkSpeed: 5, // km/h Affects the calc of energy (kcal)
bikeSpeed: 18, // km/h
tripsPerYear: 440 // number of trips per year (incl. return)
};
app.Plugin.getTypeResult = function() {
return {
time: null, // seconds
distance: null, // meters
costPerTrip: null, // SEK
costPerYear: null, // SEK
co2PerTrip: null, // kg
co2PerYear: null, // kg
kcalYear: null, // kcal per year
kgChok: null, //chocolate per year
copyright: null
}
};
app.Plugin.getTypePath = function() {
return {
coords: typeof coords !== 'undefined' ? coords : [ ],
color: typeof color !== 'undefined' ? color : 'black',
pattern: typeof pattern !== 'undefined' ? pattern : null
}
};
| lundskommun/resejamforaren | rjweb/static/rjweb/plugins/Plugin.js | JavaScript | apache-2.0 | 1,534 |
//
// Ce fichier a été généré par l'implémentation de référence JavaTM Architecture for XML Binding (JAXB), v2.2.11
// Voir <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Toute modification apportée à ce fichier sera perdue lors de la recompilation du schéma source.
// Généré le : 2015.10.16 à 03:51:42 PM CEST
//
package com.booleanworks.bomworkshop2015a.entity.oagi10.bom.v1;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlSchemaType;
import javax.xml.bind.annotation.XmlSeeAlso;
import javax.xml.bind.annotation.XmlType;
import javax.xml.bind.annotation.adapters.CollapsedStringAdapter;
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
/**
* TimePeriodABIEType is logically derived from UN/CEFACT TBG17 ABIE PeriodType as defined in the Reusable Aggregate Business Information Entity (RUABIE) XML Schema file.
*
* <p>Classe Java pour TimePeriodABIEType complex type.
*
* <p>Le fragment de schéma suivant indique le contenu attendu figurant dans cette classe.
*
* <pre>
* <complexType name="TimePeriodABIEType">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element name="InclusiveIndicator" type="{http://www.openapplications.org/oagis/10}IndicatorType" minOccurs="0"/>
* <element name="StartDateTime" type="{http://www.openapplications.org/oagis/10}DateTimeType" minOccurs="0"/>
* <element name="StartTime" type="{http://www.openapplications.org/oagis/10}xbt_DayOfWeekHourMinuteUTCType" minOccurs="0"/>
* <element name="Duration" type="{http://www.openapplications.org/oagis/10}DurationMeasureType" minOccurs="0"/>
* <element name="EndDateTime" type="{http://www.openapplications.org/oagis/10}DateTimeType" minOccurs="0"/>
* <element name="EndTime" type="{http://www.openapplications.org/oagis/10}xbt_DayOfWeekHourMinuteUTCType" minOccurs="0"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "TimePeriodABIEType", propOrder = {
"inclusiveIndicator",
"startDateTime",
"startTime",
"duration",
"endDateTime",
"endTime"
})
@XmlSeeAlso({
TimePeriodType.class
})
public class TimePeriodABIEType {
@XmlElement(name = "InclusiveIndicator")
protected Boolean inclusiveIndicator;
@XmlElement(name = "StartDateTime")
@XmlSchemaType(name = "anySimpleType")
protected String startDateTime;
@XmlElement(name = "StartTime")
@XmlJavaTypeAdapter(CollapsedStringAdapter.class)
@XmlSchemaType(name = "token")
protected String startTime;
@XmlElement(name = "Duration")
@XmlSchemaType(name = "anySimpleType")
protected String duration;
@XmlElement(name = "EndDateTime")
@XmlSchemaType(name = "anySimpleType")
protected String endDateTime;
@XmlElement(name = "EndTime")
@XmlJavaTypeAdapter(CollapsedStringAdapter.class)
@XmlSchemaType(name = "token")
protected String endTime;
/**
* Obtient la valeur de la propriété inclusiveIndicator.
*
* @return
* possible object is
* {@link Boolean }
*
*/
public Boolean isInclusiveIndicator() {
return inclusiveIndicator;
}
/**
* Définit la valeur de la propriété inclusiveIndicator.
*
* @param value
* allowed object is
* {@link Boolean }
*
*/
public void setInclusiveIndicator(Boolean value) {
this.inclusiveIndicator = value;
}
/**
* Obtient la valeur de la propriété startDateTime.
*
* @return
* possible object is
* {@link String }
*
*/
public String getStartDateTime() {
return startDateTime;
}
/**
* Définit la valeur de la propriété startDateTime.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setStartDateTime(String value) {
this.startDateTime = value;
}
/**
* Obtient la valeur de la propriété startTime.
*
* @return
* possible object is
* {@link String }
*
*/
public String getStartTime() {
return startTime;
}
/**
* Définit la valeur de la propriété startTime.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setStartTime(String value) {
this.startTime = value;
}
/**
* Obtient la valeur de la propriété duration.
*
* @return
* possible object is
* {@link String }
*
*/
public String getDuration() {
return duration;
}
/**
* Définit la valeur de la propriété duration.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setDuration(String value) {
this.duration = value;
}
/**
* Obtient la valeur de la propriété endDateTime.
*
* @return
* possible object is
* {@link String }
*
*/
public String getEndDateTime() {
return endDateTime;
}
/**
* Définit la valeur de la propriété endDateTime.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setEndDateTime(String value) {
this.endDateTime = value;
}
/**
* Obtient la valeur de la propriété endTime.
*
* @return
* possible object is
* {@link String }
*
*/
public String getEndTime() {
return endTime;
}
/**
* Définit la valeur de la propriété endTime.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setEndTime(String value) {
this.endTime = value;
}
}
| mlecabellec/bomworkshop2015a | src/main/java/com/booleanworks/bomworkshop2015a/entity/oagi10/bom/v1/TimePeriodABIEType.java | Java | apache-2.0 | 6,259 |
require_relative 'components'
require_relative 'metadata'
require_relative 'analysis'
require_relative 'rootlevel'
module SmartyStreets
module InternationalStreet
# A candidate is a possible match for an address that was submitted. A lookup can have multiple
# candidates if the address was ambiguous.
#
# See "https://smartystreets.com/docs/cloud/international-street-api#root"
class Candidate < RootLevel
attr_reader :metadata, :components, :analysis
def initialize(obj)
@components = Components.new(obj.fetch('components', {}))
@metadata = Metadata.new(obj.fetch('metadata', {}))
@analysis = Analysis.new(obj.fetch('analysis', {}))
super(obj)
end
end
end
end
| smartystreets/smartystreets-ruby-sdk | lib/smartystreets_ruby_sdk/international_street/candidate.rb | Ruby | apache-2.0 | 742 |
/**
* Copyright (C) 2015 The Gravitee team (http://gravitee.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gravitee.repository.jdbc.embedded;
import com.wix.mysql.EmbeddedMysql;
import com.wix.mysql.config.MysqldConfig;
import io.gravitee.repository.jdbc.AbstractJdbcTestRepositoryConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Conditional;
import javax.inject.Inject;
import static com.wix.mysql.EmbeddedMysql.anEmbeddedMysql;
import static com.wix.mysql.distribution.Version.v5_7_latest;
import static java.lang.String.format;
/**
* @author Azize ELAMRANI (azize.elamrani at graviteesource.com)
* @author GraviteeSource Team
*/
@Conditional(MySQLCondition.class)
public class MySQLTestRepositoryConfiguration extends AbstractJdbcTestRepositoryConfiguration {
@Inject
private EmbeddedMysql embeddedMysql;
@Override
protected String getJdbcUrl() {
final MysqldConfig config = embeddedMysql.getConfig();
return format("jdbc:mysql://localhost:%s/gravitee?useSSL=false&user=%s&password=%s", config.getPort(),
config.getUsername(), config.getPassword());
}
@Bean(destroyMethod = "stop")
public EmbeddedMysql embeddedMysql() {
return anEmbeddedMysql(v5_7_latest).addSchema("gravitee").start();
}
} | gravitee-io/gravitee-repository-jdbc | src/test/java/io/gravitee/repository/jdbc/embedded/MySQLTestRepositoryConfiguration.java | Java | apache-2.0 | 1,875 |
/*
* Copyright 1997-2011 teatrove.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.teatrove.tea.parsetree;
import org.teatrove.tea.compiler.SourceInfo;
/**
* A Name just associates a String with a SourceInfo object. Names are
* usually restricted to only containing Java identifier characters and
* '.' characters.
*
* @author Brian S O'Neill
*/
public class Name extends Node {
private static final long serialVersionUID = 1L;
private String mName;
public Name(SourceInfo info, String name) {
super(info);
mName = name;
}
public Object accept(NodeVisitor visitor) {
return visitor.visit(this);
}
public String getName() {
return mName;
}
public int hashCode() {
return mName.hashCode();
}
public boolean equals(Object other) {
if (other instanceof Name) {
return ((Name)other).mName == mName;
}
else {
return false;
}
}
}
| teatrove/teatrove | tea/src/main/java/org/teatrove/tea/parsetree/Name.java | Java | apache-2.0 | 1,522 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TestCases for Dataset,
including create, config, run, etc.
"""
from __future__ import print_function
import paddle
import paddle.fluid as fluid
import paddle.compat as cpt
import paddle.fluid.core as core
import numpy as np
import os
import shutil
import unittest
class TestDataset(unittest.TestCase):
""" TestCases for Dataset. """
def setUp(self):
self.use_data_loader = False
self.epoch_num = 10
self.drop_last = False
def test_dataset_create(self):
""" Testcase for dataset create. """
try:
dataset = paddle.distributed.InMemoryDataset()
except:
self.assertTrue(False)
try:
dataset = paddle.distributed.QueueDataset()
except:
self.assertTrue(False)
try:
dataset = paddle.distributed.fleet.dataset.FileInstantDataset()
except:
self.assertTrue(False)
try:
dataset = paddle.distributed.fleet.dataset.MyOwnDataset()
self.assertTrue(False)
except:
self.assertTrue(True)
def test_config(self):
"""
Testcase for python config.
"""
dataset = fluid.InMemoryDataset()
dataset.set_parse_ins_id(True)
dataset.set_parse_content(True)
dataset._set_trainer_num(1)
self.assertTrue(dataset.parse_ins_id)
self.assertTrue(dataset.parse_content)
self.assertEqual(dataset.trainer_num, 1)
def test_shuffle_by_uid(self):
"""
Testcase for shuffle_by_uid.
"""
dataset = paddle.distributed.InMemoryDataset()
dataset._set_uid_slot('6048')
dataset._set_shuffle_by_uid(True)
def test_run_with_dump(self):
"""
Testcase for InMemoryDataset from create to run.
"""
with open("test_run_with_dump_a.txt", "w") as f:
data = "1 a 1 a 1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 b 1 b 1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 c 1 c 1 3 2 3 5 4 7 7 7 7 1 3\n"
f.write(data)
with open("test_run_with_dump_b.txt", "w") as f:
data = "1 d 1 d 1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 e 1 e 1 5 2 3 4 4 6 6 6 6 1 5\n"
data += "1 f 1 f 1 6 2 3 5 4 7 7 7 7 1 6\n"
data += "1 g 1 g 1 7 2 3 6 4 8 8 8 8 1 7\n"
f.write(data)
slots = ["slot1", "slot2", "slot3", "slot4"]
slots_vars = []
for slot in slots:
var = fluid.layers.data(
name=slot, shape=[1], dtype="int64", lod_level=1)
slots_vars.append(var)
dataset = paddle.distributed.InMemoryDataset()
dataset.init(
batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars)
dataset.update_settings(pipe_command="cat1")
dataset._init_distributed_settings(
parse_ins_id=True,
parse_content=True,
fea_eval=True,
candidate_size=10000)
dataset.set_filelist(
["test_run_with_dump_a.txt", "test_run_with_dump_b.txt"])
dataset.load_into_memory()
dataset.local_shuffle()
paddle.enable_static()
exe = paddle.static.Executor(paddle.CPUPlace())
startup_program = paddle.static.Program()
main_program = paddle.static.Program()
exe.run(startup_program)
for i in range(2):
try:
exe.train_from_dataset(main_program, dataset)
except ImportError as e:
pass
except Exception as e:
self.assertTrue(False)
os.remove("./test_run_with_dump_a.txt")
os.remove("./test_run_with_dump_b.txt")
def test_dataset_config(self):
""" Testcase for dataset configuration. """
dataset = fluid.core.Dataset("MultiSlotDataset")
dataset.set_thread_num(12)
dataset.set_filelist(["a.txt", "b.txt", "c.txt"])
dataset.set_trainer_num(4)
dataset.set_hdfs_config("my_fs_name", "my_fs_ugi")
dataset.set_download_cmd("./read_from_afs my_fs_name my_fs_ugi")
dataset.set_enable_pv_merge(False)
thread_num = dataset.get_thread_num()
self.assertEqual(thread_num, 12)
filelist = dataset.get_filelist()
self.assertEqual(len(filelist), 3)
self.assertEqual(filelist[0], "a.txt")
self.assertEqual(filelist[1], "b.txt")
self.assertEqual(filelist[2], "c.txt")
trainer_num = dataset.get_trainer_num()
self.assertEqual(trainer_num, 4)
name, ugi = dataset.get_hdfs_config()
self.assertEqual(name, "my_fs_name")
self.assertEqual(ugi, "my_fs_ugi")
download_cmd = dataset.get_download_cmd()
self.assertEqual(download_cmd, "./read_from_afs my_fs_name my_fs_ugi")
def test_set_download_cmd(self):
"""
Testcase for InMemoryDataset from create to run.
"""
filename1 = "afs:test_in_memory_dataset_run_a.txt"
filename2 = "afs:test_in_memory_dataset_run_b.txt"
with open(filename1, "w") as f:
data = "1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 3 2 3 5 4 7 7 7 7 1 3\n"
f.write(data)
with open(filename2, "w") as f:
data = "1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 5 2 3 4 4 6 6 6 6 1 5\n"
data += "1 6 2 3 5 4 7 7 7 7 1 6\n"
data += "1 7 2 3 6 4 8 8 8 8 1 7\n"
f.write(data)
slots = ["slot1", "slot2", "slot3", "slot4"]
slots_vars = []
for slot in slots:
var = fluid.layers.data(
name=slot, shape=[1], dtype="int64", lod_level=1)
slots_vars.append(var)
dataset = paddle.distributed.InMemoryDataset()
dataset.init(
batch_size=32,
thread_num=3,
pipe_command="cat",
download_cmd="cat",
use_var=slots_vars)
dataset.set_filelist([filename1, filename2])
dataset.load_into_memory()
paddle.enable_static()
exe = paddle.static.Executor(paddle.CPUPlace())
startup_program = paddle.static.Program()
main_program = paddle.static.Program()
exe = fluid.Executor(fluid.CPUPlace())
exe.run(startup_program)
if self.use_data_loader:
data_loader = fluid.io.DataLoader.from_dataset(dataset,
fluid.cpu_places(),
self.drop_last)
for i in range(self.epoch_num):
for data in data_loader():
exe.run(main_program, feed=data)
else:
for i in range(self.epoch_num):
try:
exe.train_from_dataset(main_program, dataset)
except Exception as e:
self.assertTrue(False)
os.remove(filename1)
os.remove(filename2)
def test_in_memory_dataset_run(self):
"""
Testcase for InMemoryDataset from create to run.
"""
with open("test_in_memory_dataset_run_a.txt", "w") as f:
data = "1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 3 2 3 5 4 7 7 7 7 1 3\n"
f.write(data)
with open("test_in_memory_dataset_run_b.txt", "w") as f:
data = "1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 5 2 3 4 4 6 6 6 6 1 5\n"
data += "1 6 2 3 5 4 7 7 7 7 1 6\n"
data += "1 7 2 3 6 4 8 8 8 8 1 7\n"
f.write(data)
slots = ["slot1", "slot2", "slot3", "slot4"]
slots_vars = []
for slot in slots:
var = fluid.layers.data(
name=slot, shape=[1], dtype="int64", lod_level=1)
slots_vars.append(var)
dataset = paddle.distributed.InMemoryDataset()
dataset.init(
batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars)
dataset._init_distributed_settings(fea_eval=True, candidate_size=1)
dataset.set_filelist([
"test_in_memory_dataset_run_a.txt",
"test_in_memory_dataset_run_b.txt"
])
dataset.load_into_memory()
dataset.slots_shuffle(["slot1"])
dataset.local_shuffle()
dataset._set_generate_unique_feasigns(True, 15)
dataset._generate_local_tables_unlock(0, 11, 1, 25, 15)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
if self.use_data_loader:
data_loader = fluid.io.DataLoader.from_dataset(dataset,
fluid.cpu_places(),
self.drop_last)
for i in range(self.epoch_num):
for data in data_loader():
exe.run(fluid.default_main_program(), feed=data)
else:
for i in range(self.epoch_num):
try:
exe.train_from_dataset(fluid.default_main_program(),
dataset)
except Exception as e:
self.assertTrue(False)
os.remove("./test_in_memory_dataset_run_a.txt")
os.remove("./test_in_memory_dataset_run_b.txt")
def test_in_memory_dataset_masterpatch(self):
"""
Testcase for InMemoryDataset from create to run.
"""
with open("test_in_memory_dataset_masterpatch_a.txt", "w") as f:
data = "1 id1 1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 id1 1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 id2 1 1 1 1 1 0 1 0\n"
data += "1 id3 1 0 1 0 1 1 1 1\n"
data += "1 id3 1 1 1 1 1 0 1 0\n"
data += "1 id4 1 0 1 0 1 1 1 1\n"
data += "1 id4 1 0 1 0 1 1 1 1\n"
data += "1 id5 1 1 1 1 1 0 1 0\n"
data += "1 id5 1 1 1 1 1 0 1 0\n"
f.write(data)
with open("test_in_memory_dataset_masterpatch_b.txt", "w") as f:
data = "1 id6 1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 id6 1 1 2 3 4 4 6 6 6 6 1 5\n"
data += "1 id6 1 6 2 3 5 4 7 7 7 7 1 6\n"
data += "1 id6 1 7 2 3 6 4 8 8 8 8 1 7\n"
f.write(data)
slots = ["slot1", "slot2", "slot3", "slot4"]
slots_vars = []
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
for slot in slots[:2]:
var = fluid.layers.data(
name=slot, shape=[1], dtype="int64", lod_level=1)
slots_vars.append(var)
for slot in slots[2:]:
var = fluid.layers.data(
name=slot, shape=[1], dtype="float32", lod_level=1)
slots_vars.append(var)
dataset = paddle.distributed.InMemoryDataset()
dataset.init(
batch_size=32, thread_num=1, pipe_command="cat", use_var=slots_vars)
dataset._init_distributed_settings(parse_ins_id=True)
dataset.set_filelist([
"test_in_memory_dataset_masterpatch_a.txt",
"test_in_memory_dataset_masterpatch_b.txt"
])
dataset.load_into_memory()
dataset.local_shuffle()
exe = fluid.Executor(fluid.CPUPlace())
exe.run(startup_program)
for i in range(2):
try:
exe.train_from_dataset(train_program, dataset)
except ImportError as e:
pass
except Exception as e:
self.assertTrue(False)
#dataset._set_merge_by_lineid(2)
dataset.update_settings(merge_size=2)
dataset.dataset.merge_by_lineid()
os.remove("./test_in_memory_dataset_masterpatch_a.txt")
os.remove("./test_in_memory_dataset_masterpatch_b.txt")
def test_in_memory_dataset_masterpatch1(self):
"""
Testcase for InMemoryDataset from create to run.
"""
with open("test_in_memory_dataset_masterpatch1_a.txt", "w") as f:
data = "1 id1 1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 id1 1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 id2 1 1 1 1 1 0 1 0\n"
data += "1 id3 1 0 1 0 1 1 1 1\n"
data += "1 id3 1 1 1 1 1 0 1 0\n"
data += "1 id4 1 0 1 0 1 1 1 1\n"
data += "1 id4 1 0 1 0 1 1 1 1\n"
data += "1 id5 1 1 1 1 1 0 1 0\n"
data += "1 id5 1 1 1 1 1 0 1 0\n"
f.write(data)
with open("test_in_memory_dataset_masterpatch1_b.txt", "w") as f:
data = "1 id6 1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 id6 1 1 2 3 4 4 6 6 6 6 1 5\n"
data += "1 id6 1 6 2 3 5 4 7 7 7 7 1 6\n"
data += "1 id6 1 7 2 3 6 4 8 8 8 8 1 7\n"
f.write(data)
slots_vars = []
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
var1 = fluid.layers.data(
name="slot1", shape=[1], dtype="int64", lod_level=0)
var2 = fluid.layers.data(
name="slot2", shape=[1], dtype="int64", lod_level=0)
var3 = fluid.layers.data(
name="slot3", shape=[1], dtype="float32", lod_level=0)
var4 = fluid.layers.data(
name="slot4", shape=[1], dtype="float32", lod_level=0)
slots_vars = [var1, var2, var3, var4]
dataset = paddle.distributed.InMemoryDataset()
dataset.init(
batch_size=32, thread_num=1, pipe_command="cat", use_var=slots_vars)
dataset._init_distributed_settings(parse_ins_id=True)
dataset.set_filelist([
"test_in_memory_dataset_masterpatch1_a.txt",
"test_in_memory_dataset_masterpatch1_b.txt"
])
dataset.load_into_memory()
dataset.local_shuffle()
exe = fluid.Executor(fluid.CPUPlace())
exe.run(startup_program)
for i in range(2):
try:
exe.train_from_dataset(train_program, dataset)
except ImportError as e:
pass
except Exception as e:
self.assertTrue(False)
dataset._set_merge_by_lineid(2)
dataset.dataset.merge_by_lineid()
os.remove("./test_in_memory_dataset_masterpatch1_a.txt")
os.remove("./test_in_memory_dataset_masterpatch1_b.txt")
def test_in_memory_dataset_run_2(self):
"""
Testcase for InMemoryDataset from create to run.
Use CUDAPlace
Use float type id
"""
with open("test_in_memory_dataset_run_a.txt", "w") as f:
data = "1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 3 2 3 5 4 7 7 7 7 1 3\n"
f.write(data)
with open("test_in_memory_dataset_run_b.txt", "w") as f:
data = "1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 5 2 3 4 4 6 6 6 6 1 5\n"
data += "1 6 2 3 5 4 7 7 7 7 1 6\n"
data += "1 7 2 3 6 4 8 8 8 8 1 7\n"
f.write(data)
slots = ["slot1_f", "slot2_f", "slot3_f", "slot4_f"]
slots_vars = []
for slot in slots:
var = fluid.layers.data(
name=slot, shape=[1], dtype="float32", lod_level=1)
slots_vars.append(var)
dataset = paddle.distributed.InMemoryDataset()
dataset.init(
batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars)
dataset.set_filelist([
"test_in_memory_dataset_run_a.txt",
"test_in_memory_dataset_run_b.txt"
])
dataset.load_into_memory()
dataset.local_shuffle()
exe = fluid.Executor(fluid.CPUPlace() if not core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0))
exe.run(fluid.default_startup_program())
for i in range(2):
try:
exe.train_from_dataset(fluid.default_main_program(), dataset)
exe.train_from_dataset(
fluid.default_main_program(), dataset, thread=1)
exe.train_from_dataset(
fluid.default_main_program(), dataset, thread=2)
exe.train_from_dataset(
fluid.default_main_program(), dataset, thread=2)
exe.train_from_dataset(
fluid.default_main_program(), dataset, thread=3)
exe.train_from_dataset(
fluid.default_main_program(), dataset, thread=4)
except ImportError as e:
pass
except Exception as e:
self.assertTrue(False)
if self.use_data_loader:
data_loader = fluid.io.DataLoader.from_dataset(dataset,
fluid.cpu_places(),
self.drop_last)
for i in range(self.epoch_num):
for data in data_loader():
exe.run(fluid.default_main_program(), feed=data)
else:
for i in range(self.epoch_num):
try:
exe.train_from_dataset(fluid.default_main_program(),
dataset)
except Exception as e:
self.assertTrue(False)
dataset._set_merge_by_lineid(2)
dataset._set_parse_ins_id(False)
dataset._set_fleet_send_sleep_seconds(2)
dataset.preload_into_memory()
dataset.wait_preload_done()
dataset.release_memory()
dataset.preload_into_memory(1)
dataset.wait_preload_done()
dataset.dataset.merge_by_lineid()
dataset.release_memory()
dataset._set_merge_by_lineid(30)
dataset._set_parse_ins_id(False)
dataset.load_into_memory()
dataset.dataset.merge_by_lineid()
dataset.update_settings(
batch_size=1,
thread_num=2,
input_type=1,
pipe_command="cat",
use_var=[],
fs_name="",
fs_ugi="",
download_cmd="cat",
merge_size=-1,
parse_ins_id=False,
parse_content=False,
fleet_send_batch_size=2,
fleet_send_sleep_seconds=2,
fea_eval=True)
fleet_ptr = fluid.core.Fleet()
fleet_ptr.set_client2client_config(1, 1, 1)
fleet_ptr.get_cache_threshold(0)
os.remove("./test_in_memory_dataset_run_a.txt")
os.remove("./test_in_memory_dataset_run_b.txt")
def test_queue_dataset_run(self):
"""
Testcase for QueueDataset from create to run.
"""
with open("test_queue_dataset_run_a.txt", "w") as f:
data = "1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 3 2 3 5 4 7 7 7 7 1 3\n"
f.write(data)
with open("test_queue_dataset_run_b.txt", "w") as f:
data = "1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 5 2 3 4 4 6 6 6 6 1 5\n"
data += "1 6 2 3 5 4 7 7 7 7 1 6\n"
data += "1 7 2 3 6 4 8 8 8 8 1 7\n"
f.write(data)
slots = ["slot1", "slot2", "slot3", "slot4"]
slots_vars = []
for slot in slots:
var = fluid.layers.data(
name=slot, shape=[1], dtype="int64", lod_level=1)
slots_vars.append(var)
dataset = paddle.distributed.QueueDataset()
dataset.init(
batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars)
dataset.set_filelist(
["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"])
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
if self.use_data_loader:
data_loader = fluid.io.DataLoader.from_dataset(dataset,
fluid.cpu_places(),
self.drop_last)
for i in range(self.epoch_num):
for data in data_loader():
exe.run(fluid.default_main_program(), feed=data)
else:
for i in range(self.epoch_num):
try:
exe.train_from_dataset(fluid.default_main_program(),
dataset)
except Exception as e:
self.assertTrue(False)
dataset2 = paddle.distributed.QueueDataset()
dataset2.init(
batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars)
dataset.set_filelist([])
try:
exe.train_from_dataset(fluid.default_main_program(), dataset2)
except ImportError as e:
print("warning: we skip trainer_desc_pb2 import problem in windows")
except Exception as e:
self.assertTrue(False)
if os.path.exists("./test_queue_dataset_run_a.txt"):
os.remove("./test_queue_dataset_run_a.txt")
if os.path.exists("./test_queue_dataset_run_b.txt"):
os.remove("./test_queue_dataset_run_b.txt")
def test_queue_dataset_run_2(self):
"""
Testcase for QueueDataset from create to run.
Use CUDAPlace
Use float type id
"""
with open("test_queue_dataset_run_a.txt", "w") as f:
data = "1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 3 2 3 5 4 7 7 7 7 1 3\n"
f.write(data)
with open("test_queue_dataset_run_b.txt", "w") as f:
data = "1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 5 2 3 4 4 6 6 6 6 1 5\n"
data += "1 6 2 3 5 4 7 7 7 7 1 6\n"
data += "1 7 2 3 6 4 8 8 8 8 1 7\n"
f.write(data)
slots = ["slot1_f", "slot2_f", "slot3_f", "slot4_f"]
slots_vars = []
for slot in slots:
var = fluid.layers.data(
name=slot, shape=[1], dtype="float32", lod_level=1)
slots_vars.append(var)
dataset = paddle.distributed.QueueDataset()
dataset.init(
batch_size=32, thread_num=3, pipe_command="cat", use_var=slots_vars)
dataset.set_filelist(
["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"])
exe = fluid.Executor(fluid.CPUPlace() if not core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0))
exe.run(fluid.default_startup_program())
if self.use_data_loader:
data_loader = fluid.io.DataLoader.from_dataset(dataset,
fluid.cpu_places(),
self.drop_last)
for i in range(self.epoch_num):
for data in data_loader():
exe.run(fluid.default_main_program(), feed=data)
else:
for i in range(self.epoch_num):
try:
exe.train_from_dataset(fluid.default_main_program(),
dataset)
except Exception as e:
self.assertTrue(False)
if os.path.exists("./test_queue_dataset_run_a.txt"):
os.remove("./test_queue_dataset_run_a.txt")
if os.path.exists("./test_queue_dataset_run_b.txt"):
os.remove("./test_queue_dataset_run_b.txt")
def test_queue_dataset_run_3(self):
"""
Testcase for QueueDataset from create to run.
Use CUDAPlace
Use float type id
"""
with open("test_queue_dataset_run_a.txt", "w") as f:
data = "2 1 2 2 5 4 2 2 7 2 1 3\n"
data += "2 6 2 2 1 4 2 2 4 2 2 3\n"
data += "2 5 2 2 9 9 2 2 7 2 1 3\n"
data += "2 7 2 2 1 9 2 3 7 2 5 3\n"
f.write(data)
with open("test_queue_dataset_run_b.txt", "w") as f:
data = "2 1 2 2 5 4 2 2 7 2 1 3\n"
data += "2 6 2 2 1 4 2 2 4 2 2 3\n"
data += "2 5 2 2 9 9 2 2 7 2 1 3\n"
data += "2 7 2 2 1 9 2 3 7 2 5 3\n"
f.write(data)
slots = ["slot1", "slot2", "slot3", "slot4"]
slots_vars = []
for slot in slots:
var = fluid.data(
name=slot, shape=[None, 1], dtype="int64", lod_level=1)
slots_vars.append(var)
dataset = paddle.distributed.InMemoryDataset()
dataset.init(
batch_size=1,
thread_num=2,
input_type=1,
pipe_command="cat",
use_var=slots_vars)
dataset.set_filelist(
["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"])
dataset.load_into_memory()
exe = fluid.Executor(fluid.CPUPlace() if not core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0))
exe.run(fluid.default_startup_program())
if self.use_data_loader:
data_loader = fluid.io.DataLoader.from_dataset(dataset,
fluid.cpu_places(),
self.drop_last)
for i in range(self.epoch_num):
for data in data_loader():
exe.run(fluid.default_main_program(), feed=data)
else:
for i in range(self.epoch_num):
try:
exe.train_from_dataset(fluid.default_main_program(),
dataset)
except Exception as e:
self.assertTrue(False)
if os.path.exists("./test_queue_dataset_run_a.txt"):
os.remove("./test_queue_dataset_run_a.txt")
if os.path.exists("./test_queue_dataset_run_b.txt"):
os.remove("./test_queue_dataset_run_b.txt")
class TestDatasetWithDataLoader(TestDataset):
"""
Test Dataset With Data Loader class. TestCases.
"""
def setUp(self):
"""
Test Dataset With Data Loader, setUp.
"""
self.use_data_loader = True
self.epoch_num = 10
self.drop_last = False
class TestDatasetWithFetchHandler(unittest.TestCase):
"""
Test Dataset With Fetch Handler. TestCases.
"""
def net(self):
"""
Test Dataset With Fetch Handler. TestCases.
"""
slots = ["slot1", "slot2", "slot3", "slot4"]
slots_vars = []
poolings = []
for slot in slots:
data = fluid.layers.data(
name=slot, shape=[1], dtype="int64", lod_level=1)
var = fluid.layers.cast(x=data, dtype='float32')
pool = fluid.layers.sequence_pool(input=var, pool_type='AVERAGE')
slots_vars.append(data)
poolings.append(pool)
concated = fluid.layers.concat(poolings, axis=1)
fc = fluid.layers.fc(input=concated, act='tanh', size=32)
return slots_vars, fc
def get_dataset(self, inputs, files):
"""
Test Dataset With Fetch Handler. TestCases.
Args:
inputs(list): inputs of get_dataset
files(list): files of get_dataset
"""
dataset = paddle.distributed.QueueDataset()
dataset.init(
batch_size=32, thread_num=3, pipe_command="cat", use_var=inputs)
dataset.set_filelist(files)
return dataset
def setUp(self):
"""
Test Dataset With Fetch Handler. TestCases.
"""
with open("test_queue_dataset_run_a.txt", "w") as f:
data = "1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 3 2 3 5 4 7 7 7 7 1 3\n"
f.write(data)
with open("test_queue_dataset_run_b.txt", "w") as f:
data = "1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 5 2 3 4 4 6 6 6 6 1 5\n"
data += "1 6 2 3 5 4 7 7 7 7 1 6\n"
data += "1 7 2 3 6 4 8 8 8 8 1 7\n"
f.write(data)
def tearDown(self):
"""
Test Dataset With Fetch Handler. TestCases.
"""
os.remove("./test_queue_dataset_run_a.txt")
os.remove("./test_queue_dataset_run_b.txt")
def test_dataset_none(self):
"""
Test Dataset With Fetch Handler. TestCases.
"""
slots_vars, out = self.net()
files = ["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"]
dataset = self.get_dataset(slots_vars, files)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
# test dataset->None
try:
exe.train_from_dataset(fluid.default_main_program(), None)
except ImportError as e:
print("warning: we skip trainer_desc_pb2 import problem in windows")
except RuntimeError as e:
error_msg = "dataset is need and should be initialized"
self.assertEqual(error_msg, cpt.get_exception_message(e))
except Exception as e:
self.assertTrue(False)
def test_infer_from_dataset(self):
"""
Test Dataset With Fetch Handler. TestCases.
"""
slots_vars, out = self.net()
files = ["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"]
dataset = self.get_dataset(slots_vars, files)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
try:
exe.infer_from_dataset(fluid.default_main_program(), dataset)
except ImportError as e:
print("warning: we skip trainer_desc_pb2 import problem in windows")
except Exception as e:
self.assertTrue(False)
def test_fetch_handler(self):
"""
Test Dataset With Fetch Handler. TestCases.
"""
slots_vars, out = self.net()
files = ["test_queue_dataset_run_a.txt", "test_queue_dataset_run_b.txt"]
dataset = self.get_dataset(slots_vars, files)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
fh = fluid.executor.FetchHandler(out.name)
fh.help()
try:
exe.train_from_dataset(
program=fluid.default_main_program(),
dataset=dataset,
fetch_handler=fh)
except ImportError as e:
print("warning: we skip trainer_desc_pb2 import problem in windows")
except RuntimeError as e:
error_msg = "dataset is need and should be initialized"
self.assertEqual(error_msg, cpt.get_exception_message(e))
except Exception as e:
self.assertTrue(False)
class TestDataset2(unittest.TestCase):
""" TestCases for Dataset. """
def setUp(self):
""" TestCases for Dataset. """
self.use_data_loader = False
self.epoch_num = 10
self.drop_last = False
def test_dataset_fleet(self):
"""
Testcase for InMemoryDataset from create to run.
"""
self.skipTest("parameter server will add pslib UT later")
with open("test_in_memory_dataset2_run_a.txt", "w") as f:
data = "1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 3 2 3 5 4 7 7 7 7 1 3\n"
f.write(data)
with open("test_in_memory_dataset2_run_b.txt", "w") as f:
data = "1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 5 2 3 4 4 6 6 6 6 1 5\n"
data += "1 6 2 3 5 4 7 7 7 7 1 6\n"
data += "1 7 2 3 6 4 8 8 8 8 1 7\n"
f.write(data)
train_program = fluid.Program()
startup_program = fluid.Program()
scope = fluid.Scope()
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet
with fluid.program_guard(train_program, startup_program):
slots = ["slot1_ff", "slot2_ff", "slot3_ff", "slot4_ff"]
slots_vars = []
for slot in slots:
var = fluid.layers.data(\
name=slot, shape=[1], dtype="float32", lod_level=1)
slots_vars.append(var)
fake_cost = \
fluid.layers.elementwise_sub(slots_vars[0], slots_vars[-1])
fake_cost = fluid.layers.mean(fake_cost)
with fluid.scope_guard(scope):
place = fluid.CPUPlace()
exe = fluid.Executor(place)
try:
fleet.init()
except ImportError as e:
print("warning: no mpi4py")
adam = fluid.optimizer.Adam(learning_rate=0.000005)
try:
adam = fleet.distributed_optimizer(adam)
adam.minimize([fake_cost], [scope])
except AttributeError as e:
print("warning: no mpi")
except ImportError as e:
print("warning: no mpi4py")
exe.run(startup_program)
dataset = paddle.distributed.InMemoryDataset()
dataset.init(
batch_size=32,
thread_num=3,
pipe_command="cat",
use_var=slots_vars)
dataset.set_filelist([
"test_in_memory_dataset2_run_a.txt",
"test_in_memory_dataset2_run_b.txt"
])
dataset.load_into_memory()
fleet._opt_info = None
fleet._fleet_ptr = None
os.remove("./test_in_memory_dataset2_run_a.txt")
os.remove("./test_in_memory_dataset2_run_b.txt")
def test_dataset_fleet2(self):
"""
Testcase for InMemoryDataset from create to run.
"""
with open("test_in_memory_dataset2_run2_a.txt", "w") as f:
data = "1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 3 2 3 5 4 7 7 7 7 1 3\n"
f.write(data)
with open("test_in_memory_dataset2_run2_b.txt", "w") as f:
data = "1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 5 2 3 4 4 6 6 6 6 1 5\n"
data += "1 6 2 3 5 4 7 7 7 7 1 6\n"
data += "1 7 2 3 6 4 8 8 8 8 1 7\n"
f.write(data)
train_program = fluid.Program()
startup_program = fluid.Program()
scope = fluid.Scope()
from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
with fluid.program_guard(train_program, startup_program):
slots = ["slot1_ff", "slot2_ff", "slot3_ff", "slot4_ff"]
slots_vars = []
for slot in slots:
var = fluid.layers.data(\
name=slot, shape=[1], dtype="float32", lod_level=1)
slots_vars.append(var)
fake_cost = \
fluid.layers.elementwise_sub(slots_vars[0], slots_vars[-1])
fake_cost = fluid.layers.mean(fake_cost)
with fluid.scope_guard(scope):
place = fluid.CPUPlace()
exe = fluid.Executor(place)
try:
fleet.init()
except ImportError as e:
print("warning: no mpi4py")
adam = fluid.optimizer.Adam(learning_rate=0.000005)
try:
adam = fleet.distributed_optimizer(
adam,
strategy={
"fs_uri": "fs_uri_xxx",
"fs_user": "fs_user_xxx",
"fs_passwd": "fs_passwd_xxx",
"fs_hadoop_bin": "fs_hadoop_bin_xxx"
})
adam.minimize([fake_cost], [scope])
except AttributeError as e:
print("warning: no mpi")
except ImportError as e:
print("warning: no mpi4py")
exe.run(startup_program)
dataset = paddle.distributed.InMemoryDataset()
dataset.init(
batch_size=32,
thread_num=3,
pipe_command="cat",
use_var=slots_vars)
dataset.set_filelist([
"test_in_memory_dataset2_run2_a.txt",
"test_in_memory_dataset2_run2_b.txt"
])
dataset.load_into_memory()
try:
dataset.global_shuffle(fleet)
except:
print("warning: catch expected error")
fleet._opt_info = None
fleet._fleet_ptr = None
dataset = paddle.distributed.InMemoryDataset()
dataset.init(fs_name="", fs_ugi="")
d = paddle.distributed.fleet.DatasetBase()
try:
dataset._set_feed_type("MultiSlotInMemoryDataFeed")
except:
print("warning: catch expected error")
dataset.thread_num = 0
try:
dataset._prepare_to_run()
except:
print("warning: catch expected error")
try:
dataset.preprocess_instance()
except:
print("warning: catch expected error")
try:
dataset.set_current_phase(1)
except:
print("warning: catch expected error")
try:
dataset.postprocess_instance()
except:
print("warning: catch expected error")
dataset._set_fleet_send_batch_size(1024)
try:
dataset.global_shuffle()
except:
print("warning: catch expected error")
#dataset.get_pv_data_size()
dataset.get_memory_data_size()
dataset.get_shuffle_data_size()
dataset = paddle.distributed.QueueDataset()
try:
dataset.local_shuffle()
except:
print("warning: catch expected error")
try:
dataset.global_shuffle()
except:
print("warning: catch expected error")
dataset = paddle.distributed.fleet.FileInstantDataset()
try:
dataset.local_shuffle()
except:
print("warning: catch expected error")
try:
dataset.global_shuffle()
except:
print("warning: catch expected error")
os.remove("./test_in_memory_dataset2_run2_a.txt")
os.remove("./test_in_memory_dataset2_run2_b.txt")
def test_bosps_dataset_fleet2(self):
"""
Testcase for InMemoryDataset from create to run.
"""
with open("test_in_memory_dataset2_run2_a.txt", "w") as f:
data = "1 1 2 3 3 4 5 5 5 5 1 1\n"
data += "1 2 2 3 4 4 6 6 6 6 1 2\n"
data += "1 3 2 3 5 4 7 7 7 7 1 3\n"
f.write(data)
with open("test_in_memory_dataset2_run2_b.txt", "w") as f:
data = "1 4 2 3 3 4 5 5 5 5 1 4\n"
data += "1 5 2 3 4 4 6 6 6 6 1 5\n"
data += "1 6 2 3 5 4 7 7 7 7 1 6\n"
data += "1 7 2 3 6 4 8 8 8 8 1 7\n"
f.write(data)
train_program = fluid.Program()
startup_program = fluid.Program()
scope = fluid.Scope()
from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
with fluid.program_guard(train_program, startup_program):
slots = ["slot1_ff", "slot2_ff", "slot3_ff", "slot4_ff"]
slots_vars = []
for slot in slots:
var = fluid.layers.data(\
name=slot, shape=[1], dtype="float32", lod_level=1)
slots_vars.append(var)
fake_cost = \
fluid.layers.elementwise_sub(slots_vars[0], slots_vars[-1])
fake_cost = fluid.layers.mean(fake_cost)
with fluid.scope_guard(scope):
place = fluid.CPUPlace()
exe = fluid.Executor(place)
try:
fleet.init()
except ImportError as e:
print("warning: no mpi4py")
adam = fluid.optimizer.Adam(learning_rate=0.000005)
try:
adam = fleet.distributed_optimizer(
adam,
strategy={
"fs_uri": "fs_uri_xxx",
"fs_user": "fs_user_xxx",
"fs_passwd": "fs_passwd_xxx",
"fs_hadoop_bin": "fs_hadoop_bin_xxx"
})
adam.minimize([fake_cost], [scope])
except AttributeError as e:
print("warning: no mpi")
except ImportError as e:
print("warning: no mpi4py")
exe.run(startup_program)
dataset = paddle.distributed.fleet.BoxPSDataset()
dataset.init(
batch_size=32,
thread_num=3,
pipe_command="cat",
use_var=slots_vars)
dataset.set_filelist([
"test_in_memory_dataset2_run2_a.txt",
"test_in_memory_dataset2_run2_b.txt"
])
dataset.load_into_memory()
try:
dataset.global_shuffle(fleet)
except:
print("warning: catch expected error")
fleet._opt_info = None
fleet._fleet_ptr = None
dataset = paddle.distributed.fleet.BoxPSDataset()
dataset.init(
rank_offset="",
pv_batch_size=1,
fs_name="",
fs_ugi="",
data_feed_type="MultiSlotInMemoryDataFeed",
parse_logkey=True,
merge_by_sid=True,
enable_pv_merge=True)
d = paddle.distributed.fleet.DatasetBase()
try:
dataset._set_feed_type("MultiSlotInMemoryDataFeed")
except:
print("warning: catch expected error")
dataset.thread_num = 0
try:
dataset._prepare_to_run()
except:
print("warning: catch expected error")
dataset._set_parse_logkey(True)
dataset._set_merge_by_sid(True)
dataset._set_enable_pv_merge(True)
try:
dataset.preprocess_instance()
except:
print("warning: catch expected error")
try:
dataset.set_current_phase(1)
except:
print("warning: catch expected error")
try:
dataset.postprocess_instance()
except:
print("warning: catch expected error")
dataset._set_fleet_send_batch_size(1024)
try:
dataset.global_shuffle()
except:
print("warning: catch expected error")
#dataset.get_pv_data_size()
dataset.get_memory_data_size()
dataset.get_shuffle_data_size()
if __name__ == '__main__':
unittest.main()
| PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/test_dataset.py | Python | apache-2.0 | 43,925 |
using System;
using System.Xml.Serialization;
namespace DeRange.Config
{
public class ParentItem : ChangeItem
{
private Guid mp_guid;
[XmlElement(ElementName = "GUID")]
public Guid GUID
{
get { return mp_guid; }
set
{
if (mp_guid != value)
{
Registry<ParentItem>.UnRegister(this);
mp_guid = value;
Registry<ParentItem>.Register(this);
NotifyPropertyChanged();
}
}
}
public ParentItem()
{
GUID = Guid.NewGuid();
}
}
}
| bright-tools/DeRange | DeRange/Config/ParentItem.cs | C# | apache-2.0 | 722 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using System.Web.Mvc;
namespace API_GrupoE.Controllers
{
public class HomeController : Controller
{
public ActionResult Index()
{
ViewBag.Title = "Home Page";
return View();
}
}
}
| luisdeol/APIgrupoE | API_GrupoE/Controllers/HomeController.cs | C# | apache-2.0 | 330 |
//////////////////////////////////////////////////////////////////////////////////////
//
// Copyright 2012 Freshplanet (http://freshplanet.com | opensource@freshplanet.com)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//////////////////////////////////////////////////////////////////////////////////////
package com.freshplanet.flurry.functions.analytics;
import android.util.Log;
import com.adobe.fre.FREContext;
import com.adobe.fre.FREFunction;
import com.adobe.fre.FREObject;
import com.flurry.android.FlurryAgent;
public class StopSessionFunction implements FREFunction {
private static String TAG = "[AirFlurry][StopSessionFunction]";
@Override
public FREObject call(FREContext arg0, FREObject[] arg1) {
Log.d(TAG, "call");
FlurryAgent.onEndSession(arg0.getActivity());
return null;
}
}
| photobox/ANE-Flurry | android/src/main/java/com/freshplanet/flurry/functions/analytics/StopSessionFunction.java | Java | apache-2.0 | 1,344 |
/* */ package com.hundsun.network.gates.genshan.biz.domain.query;
/* */
/* */ import com.hundsun.network.gates.luosi.common.page.Pagination;
/* */
/* */ public class ProjectAttriQuery<ProjectTypeAttri> extends Pagination<ProjectTypeAttri>
/* */ {
/* */ private static final long serialVersionUID = -6343772754962024860L;
/* */ private String proTypeCode;
/* */ private String proTypeName;
/* */ private String keyName;
/* */ private String keyTitle;
/* */ private String inputType;
/* */ private String text;
/* */ private String remark;
/* */ private Short enable;
/* */ private Short rank;
/* */ private Short isRequired;
/* */ private String valueValidate;
/* */
/* */ public String getProTypeCode()
/* */ {
/* 84 */ return this.proTypeCode;
/* */ }
/* */
/* */ public void setProTypeCode(String proTypeCode) {
/* 88 */ this.proTypeCode = proTypeCode;
/* */ }
/* */
/* */ public String getProTypeName() {
/* 92 */ return this.proTypeName;
/* */ }
/* */
/* */ public void setProTypeName(String proTypeName) {
/* 96 */ this.proTypeName = proTypeName;
/* */ }
/* */
/* */ public String getKeyName() {
/* 100 */ return this.keyName;
/* */ }
/* */
/* */ public void setKeyName(String keyName) {
/* 104 */ this.keyName = keyName;
/* */ }
/* */
/* */ public String getKeyTitle() {
/* 108 */ return this.keyTitle;
/* */ }
/* */
/* */ public void setKeyTitle(String keyTitle) {
/* 112 */ this.keyTitle = keyTitle;
/* */ }
/* */
/* */ public String getInputType() {
/* 116 */ return this.inputType;
/* */ }
/* */
/* */ public void setInputType(String inputType) {
/* 120 */ this.inputType = inputType;
/* */ }
/* */
/* */ public String getText() {
/* 124 */ return this.text;
/* */ }
/* */
/* */ public void setText(String text) {
/* 128 */ this.text = text;
/* */ }
/* */
/* */ public String getRemark() {
/* 132 */ return this.remark;
/* */ }
/* */
/* */ public void setRemark(String remark) {
/* 136 */ this.remark = remark;
/* */ }
/* */
/* */ public Short getEnable() {
/* 140 */ return this.enable;
/* */ }
/* */
/* */ public void setEnable(Short enable) {
/* 144 */ this.enable = enable;
/* */ }
/* */
/* */ public Short getRank() {
/* 148 */ return this.rank;
/* */ }
/* */
/* */ public void setRank(Short rank) {
/* 152 */ this.rank = rank;
/* */ }
/* */
/* */ public Short getIsRequired() {
/* 156 */ return this.isRequired;
/* */ }
/* */
/* */ public void setIsRequired(Short isRequired) {
/* 160 */ this.isRequired = isRequired;
/* */ }
/* */
/* */ public String getValueValidate() {
/* 164 */ return this.valueValidate;
/* */ }
/* */
/* */ public void setValueValidate(String valueValidate) {
/* 168 */ this.valueValidate = valueValidate;
/* */ }
/* */ }
/* Location: E:\__安装归档\linquan-20161112\deploy15\genshan\webroot\WEB-INF\classes\
* Qualified Name: com.hundsun.network.gates.genshan.biz.domain.query.ProjectAttriQuery
* JD-Core Version: 0.6.0
*/ | hnccfr/ccfrweb | admin/src/com/hundsun/network/gates/genshan/biz/domain/query/ProjectAttriQuery.java | Java | apache-2.0 | 3,572 |
/* */ package com.hundsun.network.hseccms.web.common;
/* */
/* */ import java.io.BufferedInputStream;
/* */ import java.io.InputStream;
/* */ import java.util.Properties;
/* */ import org.slf4j.Logger;
/* */ import org.slf4j.LoggerFactory;
/* */
/* */ public class PropertiesLoader
/* */ {
/* 29 */ private static final Logger logger = LoggerFactory.getLogger(PropertiesLoader.class);
/* */ private static final String DEFAULT_FILENAME = "default.properties";
/* */ private static final String LOCAL_PROPERTIES = "/app.properties";
/* 35 */ private static Properties properties = new Properties();
/* */
/* */ public static String getProperty(String key)
/* */ {
/* 91 */ return properties.getProperty(key);
/* */ }
/* */
/* */ public static void setProperty(String key, String value)
/* */ {
/* 100 */ properties.setProperty(key, value);
/* */ }
/* */
/* */ public static String getFileResourceTypePath()
/* */ {
/* 107 */ return properties.getProperty("connector.resourceType.file.path");
/* */ }
/* */
/* */ public static String getFlashResourceTypePath()
/* */ {
/* 114 */ return properties.getProperty("connector.resourceType.flash.path");
/* */ }
/* */
/* */ public static String getImageResourceTypePath()
/* */ {
/* 121 */ return properties.getProperty("connector.resourceType.image.path");
/* */ }
/* */
/* */ public static String getMediaResourceTypePath()
/* */ {
/* 128 */ return properties.getProperty("connector.resourceType.media.path");
/* */ }
/* */
/* */ public static String getFileResourceTypeAllowedExtensions()
/* */ {
/* 136 */ return properties.getProperty("connector.resourceType.file.extensions.allowed");
/* */ }
/* */
/* */ public static String getFileResourceTypeDeniedExtensions()
/* */ {
/* 145 */ return properties.getProperty("connector.resourceType.file.extensions.denied");
/* */ }
/* */
/* */ public static String getFlashResourceTypeAllowedExtensions()
/* */ {
/* 154 */ return properties.getProperty("connector.resourceType.flash.extensions.allowed");
/* */ }
/* */
/* */ public static String getFlashResourceTypeDeniedExtensions()
/* */ {
/* 163 */ return properties.getProperty("connector.resourceType.flash.extensions.denied");
/* */ }
/* */
/* */ public static String getImageResourceTypeAllowedExtensions()
/* */ {
/* 172 */ return properties.getProperty("connector.resourceType.image.extensions.allowed");
/* */ }
/* */
/* */ public static String getImageResourceTypeDeniedExtensions()
/* */ {
/* 181 */ return properties.getProperty("connector.resourceType.image.extensions.denied");
/* */ }
/* */
/* */ public static String getMediaResourceTypeAllowedExtensions()
/* */ {
/* 190 */ return properties.getProperty("connector.resourceType.media.extensions.allowed");
/* */ }
/* */
/* */ public static String getMediaResourceTypeDeniedExtensions()
/* */ {
/* 199 */ return properties.getProperty("connector.resourceType.media.extensions.denied");
/* */ }
/* */
/* */ public static String getUserFilesPath()
/* */ {
/* 207 */ return properties.getProperty("connector.userFilesPath");
/* */ }
/* */
/* */ public static String getUserFilesAbsolutePath()
/* */ {
/* 214 */ return properties.getProperty("connector.userFilesAbsolutePath");
/* */ }
/* */
/* */ static
/* */ {
/* 40 */ InputStream in = PropertiesLoader.class.getResourceAsStream("default.properties");
/* */
/* 43 */ if (in == null) {
/* 44 */ logger.error("{} not found", "default.properties");
/* 45 */ throw new RuntimeException("default.properties not found");
/* */ }
/* 47 */ if (!(in instanceof BufferedInputStream))
/* 48 */ in = new BufferedInputStream(in);
/* */ try
/* */ {
/* 51 */ properties.load(in);
/* 52 */ in.close();
/* 53 */ logger.debug("{} loaded", "default.properties");
/* */ } catch (Exception e) {
/* 55 */ logger.error("Error while processing {}", "default.properties");
/* 56 */ throw new RuntimeException("Error while processing default.properties", e);
/* */ }
/* */
/* 62 */ InputStream in2 = PropertiesLoader.class.getResourceAsStream("/app.properties");
/* */
/* 65 */ if (in2 == null) {
/* 66 */ logger.info("{} not found", "/app.properties");
/* */ }
/* */ else {
/* 69 */ if (!(in2 instanceof BufferedInputStream))
/* 70 */ in2 = new BufferedInputStream(in2);
/* */ try
/* */ {
/* 73 */ properties.load(in2);
/* 74 */ in2.close();
/* 75 */ logger.debug("{} loaded", "/app.properties");
/* */ } catch (Exception e) {
/* 77 */ logger.error("Error while processing {}", "/app.properties");
/* 78 */ throw new RuntimeException("Error while processing /app.properties", e);
/* */ }
/* */ }
/* */ }
/* */ }
/* Location: E:\__安装归档\linquan-20161112\deploy13\cmsWeb\WEB-INF\classes\
* Qualified Name: com.hundsun.network.hseccms.web.common.PropertiesLoader
* JD-Core Version: 0.6.0
*/ | hnccfr/ccfrweb | web/src/com/hundsun/network/hseccms/web/common/PropertiesLoader.java | Java | apache-2.0 | 5,669 |
/*
* Copyright 2016 Huawei Technologies Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openo.nfvo.resmanagement.service.mapper;
import java.util.List;
import java.util.Map;
import org.openo.nfvo.resmanagement.service.entity.HostEntity;
/**
* Host info interface.<br/>
* <p>
* </p>
*
* @author
* @version NFVO 0.5 Aug 24, 2016
*/
public interface HostMapper {
/**
* It is used to get Host info.<br/>
*
* @param id
* @return The get result
* @since NFVO 0.5
*/
HostEntity getHost(String id);
/**
* It is used to get Hosts info.<br/>
*
* @param condition
* @return The get result
* @since NFVO 0.5
*/
List<HostEntity> getHosts(Map<String, Object> condition);
/**
* It is used to delete Hosts info.<br/>
*
* @param id
* @return The delete result
* @since NFVO 0.5
*/
int deleteHost(String id);
/**
* It is used to delete Hosts info.<br/>
*
* @param vimId
* @return The delete result
* @since NFVO 0.5
*/
int deleteHostByVimId(String vimId);
/**
* It is used to add Hosts info. <br/>
*
* @param hostEntity
* @return The add result
* @since NFVO 0.5
*/
int addHost(HostEntity hostEntity);
/**
* It is used to add Hosts info. <br/>
*
* @param hostEntity
* @return The add result
* @since NFVO 0.5
*/
int addHostSelective(HostEntity hostEntity);
/**
* It is used to update Hosts info. <br/>
*
* @param hostEntity
* @return The update result
* @since NFVO 0.5
*/
int updateHostSelective(HostEntity hostEntity);
/**
* It is used to update Hosts info. <br/>
*
* @param hostEntity
* @return The update result
* @since NFVO 0.5
*/
int updateHost(HostEntity hostEntity);
/**
* It is used to update Hosts info. <br/>
*
* @param hostEntity
* @return The update result
* @since NFVO 0.5
*/
int updateHostByVimId(HostEntity hostEntity);
}
| open-o/nfvo | resmanagement/ResmanagementService/service/src/main/java/org/openo/nfvo/resmanagement/service/mapper/HostMapper.java | Java | apache-2.0 | 2,621 |
package camelinaction;
import java.io.File;
import org.apache.camel.test.spring.CamelSpringTestSupport;
import org.junit.Test;
import org.springframework.context.support.AbstractXmlApplicationContext;
import org.springframework.context.support.ClassPathXmlApplicationContext;
public class SpringOrderToCsvBeanTest extends CamelSpringTestSupport {
@Override
protected AbstractXmlApplicationContext createApplicationContext() {
return new ClassPathXmlApplicationContext("camelinaction/SpringOrderToCsvBeanTest.xml");
}
@Test
public void testOrderToCsvBean() throws Exception {
// this is the inhouse format we want to transform to CSV
String inhouse = "0000005555000001144120091209 2319@1108";
template.sendBodyAndHeader("direct:start", inhouse, "Date", "20091209");
File file = new File("target/orders/received/report-20091209.csv");
assertTrue("File should exist", file.exists());
// compare the expected file content
String body = context.getTypeConverter().convertTo(String.class, file);
assertEquals("0000005555,20091209,0000011441,2319,1108", body);
}
}
| camelinaction/camelinaction2 | chapter3/transform/src/test/java/camelinaction/SpringOrderToCsvBeanTest.java | Java | apache-2.0 | 1,163 |
/*
* Licensed to CRATE Technology GmbH ("Crate") under one or more contributor
* license agreements. See the NOTICE file distributed with this work for
* additional information regarding copyright ownership. Crate licenses
* this file to you under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* However, if you have executed another commercial license agreement
* with Crate these terms will supersede the license and you may use the
* software solely pursuant to the terms of the relevant commercial agreement.
*/
package io.crate.planner.projection.builder;
import io.crate.analyze.symbol.Function;
import io.crate.analyze.symbol.InputColumn;
import io.crate.analyze.symbol.Literal;
import io.crate.analyze.symbol.Symbol;
import io.crate.test.integration.CrateUnitTest;
import io.crate.testing.TestingHelpers;
import io.crate.types.DataTypes;
import org.junit.Test;
import java.util.Arrays;
import java.util.List;
import static org.hamcrest.Matchers.*;
public class InputCreatingVisitorTest extends CrateUnitTest {
@Test
public void testNonDeterministicFunctionsReplacement() throws Exception {
Function fn1 = TestingHelpers.createFunction("non_deterministic", DataTypes.INTEGER, Arrays.<Symbol>asList(Literal.newLiteral(1), TestingHelpers.createReference("ref", DataTypes.INTEGER)), false, false);
Function fn2 = TestingHelpers.createFunction("non_deterministic", DataTypes.INTEGER, Arrays.<Symbol>asList(Literal.newLiteral(1), TestingHelpers.createReference("ref", DataTypes.INTEGER)), false, false);
List<Symbol> inputSymbols = Arrays.<Symbol>asList(
Literal.BOOLEAN_FALSE,
TestingHelpers.createFunction("deterministic", DataTypes.INTEGER, Literal.newLiteral(1), TestingHelpers.createReference("ref", DataTypes.INTEGER)),
fn1,
fn2
);
Function newSameFn = TestingHelpers.createFunction("non_deterministic", DataTypes.INTEGER, Arrays.<Symbol>asList(Literal.newLiteral(1), TestingHelpers.createReference("ref", DataTypes.INTEGER)), false, false);
Function newDifferentFn = TestingHelpers.createFunction("non_deterministic", DataTypes.INTEGER, Arrays.<Symbol>asList(Literal.newLiteral(1), TestingHelpers.createReference("ref2", DataTypes.INTEGER)), false, false);
InputCreatingVisitor.Context context = new InputCreatingVisitor.Context(inputSymbols);
Symbol replaced1 = InputCreatingVisitor.INSTANCE.process(fn1, context);
assertThat(replaced1, is(instanceOf(InputColumn.class)));
assertThat(((InputColumn)replaced1).index(), is(2));
Symbol replaced2 = InputCreatingVisitor.INSTANCE.process(fn2, context);
assertThat(replaced2, is(instanceOf(InputColumn.class)));
assertThat(((InputColumn)replaced2).index(), is(3));
Symbol replaced3 = InputCreatingVisitor.INSTANCE.process(newSameFn, context);
assertThat(replaced3, is(equalTo((Symbol)newSameFn))); // not replaced
Symbol replaced4 = InputCreatingVisitor.INSTANCE.process(newDifferentFn, context);
assertThat(replaced4, is(equalTo((Symbol)newDifferentFn)));
}
}
| aslanbekirov/crate | sql/src/test/java/io/crate/planner/projection/builder/InputCreatingVisitorTest.java | Java | apache-2.0 | 3,645 |
@extends('layouts.app')
<link href="{{ asset('js/jquery-ui-themes-1.12.0/themes/base/jquery-ui.css') }}" rel="stylesheet">
<link rel="stylesheet" href="{{ asset('plugins/datatables/dataTables.bootstrap.css') }}">
@section('main-content')
<div class="row">
<div class="col-lg-12 margin-tb">
@section('contentheader_title')
<div class="pull-left">
<h2>Fornecedores</h2>
</div>
@endsection
<div class="pull-right" style="margin-right: 2%;">
@permission('fornecedor-create')
<a class="btn btn-default" href="{{ route('fornecedor.create') }}">Cadastrar</a>
@endpermission
</div>
</div>
</div>
<br>
<div class="box box-primary " style="margin-left: 2%; margin-right: 2%; width: 96%;">
<div class="box-body">
<div class="table-responsive col-lg-12 col-md-12 col-sm-12">
<table id="table" class="table table-bordered table-hover dataTable" role="grid">
<thead>
<tr>
<th class="text-center" width="4%">Nº</th>
<th class="text-center">Nome</th>
<th class="text-center">Descrição</th>
<th class="text-center no-sort">Opções</th>
</tr>
</thead>
<tbody>
@foreach ($fornecedors as $key => $fornecedor)
<tr>
<td>{{ ++$i }}</td>
<td>{{ $fornecedor->razaosocial }}</td>
<td>{{ $fornecedor->nomefantasia }}</td>
<td width="14.5%">
<a class="btn btn-default" data-target="#{{$fornecedor->id}}" data-toggle="modal" title="Visualizar">
<i class="fa fa-eye"> </i>
</a>
@permission('fornecedor-edit')
<a class="btn btn-default" title="Editar" href="{{ route('fornecedor.edit',$fornecedor->id) }}">
<i class="fa fa-edit"> </i>
</a>
@endpermission
@permission('fornecedor-delete')
<a class="btn btn-default" data-toggle="modal" data-target="#e{{$fornecedor->id}}" title="Excluir">
<i class="fa fa-trash"> </i>
</a>
@endpermission
@if(!empty($fornecedor))
<div class="modal fade" id="e{{$fornecedor->id}}" tabindex="-1" role="dialog" aria-labelledby="myModalLabel">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">×</span></button>
<h4 class="modal-title" id="myModalLabel">Excluir</h4>
</div>
<div class="modal-body">
Tem certeza que deseja excluir?
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal">Fechar</button>
{!! Form::open(['method' => 'DELETE','route' => ['fornecedor.destroy', $fornecedor->id],'style'=>'display:inline']) !!}
{!! Form::submit('OK', ['class' => 'btn btn-primary']) !!}
{!! Form::close() !!}
</div>
</div>
</div>
</div>
@endif
<div class="modal fade" id="{{$fornecedor->id}}" tabindex="-1" role="dialog" aria-labelledby="myModalLabel">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">×</span></button>
<h4 class="modal-title" id="myModalLabel"><strong>Dados da clínica: {{$fornecedor->nome}}</strong></h4>
</div>
<div class="modal-body">
<div class="row">
<div class="col-xs-12 col-sm-12 col-md-12">
<strong>Nome:</strong>
{{ $fornecedor->nome}}
<br><br>
</div>
<div class="col-xs-12 col-sm-12 col-md-12">
<strong>Descrição:</strong>
{{ $fornecedor->descricao}}
<br><br>
</div>
</div>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal">Fechar</button>
</div>
</div>
</div>
</div>
</td>
</tr>
@endforeach
</tbody>
</table>
</div>
</div>
</div>
@endsection
<script src = "{{ asset('js/jquery-3.1.0.js') }}"></script>
<script src = "{{ asset('js/jquery.maskedinput.js') }}" type = "text/javascript" ></script>
<script src = "{{ asset('js/jquery-ui-1.12.0/jquery-ui.js') }}" type = "text/javascript" ></script>
<!-- DataTables -->
<script src="{{ asset('plugins/datatables/jquery.dataTables.js') }}" type = "text/javascript"></script>
<script src="{{ asset('plugins/datatables/dataTables.bootstrap.min.js') }}"></script>
<script>
$(function ($) {
$('#table').DataTable({
"paging": true,
"lengthChange": true,
"searching": true,
"ordering": true,
"info": true,
"autoWidth": true,
"lengthMenu": [[10, 30, 50, -1], [10, 30, 50, "Todos"]],
"columnDefs": [{
"targets": 'no-sort',
"orderable": false,
}]
});
$('#table2').DataTable({
"paging": true,
"lengthChange": true,
"searching": true,
"ordering": true,
"info": true,
"autoWidth": true,
"lengthMenu": [[10, 30, 50, -1], [10, 30, 50, "Todos"]],
"columnDefs": [{
"targets": 'no-sort',
"orderable": false,
}]
});
});
</script>
<script>
@if (Session::get('success'))
$(function () {
var msg = "{{Session::get('success')}}"
swal({
title: '',
text: msg,
confirmButtonColor: "#66BB6A",
type: "success",
html: true
});
});
@endif
</script>
| EquipeSECH/sech | resources/views/fornecedor/index.blade.php | PHP | apache-2.0 | 8,098 |
/*
* Copyright 2016 Zot201
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package zotmc.onlysilver.config.gui;
import java.util.List;
import net.minecraft.block.Block;
import net.minecraft.item.Item;
import org.apache.commons.lang3.mutable.Mutable;
import com.google.common.base.Supplier;
import com.google.common.primitives.Floats;
@SuppressWarnings("WeakerAccess")
public abstract class Icon<T extends Icon<T>> {
public static final float PHI_M1 = 0.618034f;
protected int color = 0xFFFFFFFF;
public final void drawIcon(int x, int y) {
drawIcon(x, y, 0);
}
protected abstract void drawIcon(int x, int y, int z);
@SuppressWarnings("unchecked")
public T setAlpha(int alpha) {
color = alpha << 24 | color & 0xFFFFFF;
return (T) this;
}
@SuppressWarnings("unchecked")
public T setBrightness(float brightness) {
int i = toInt(brightness);
this.color = color & 0xFF000000 | i << 16 | i << 8 | i;
return (T) this;
}
private static int toInt(float a) {
return !Floats.isFinite(a) ? 0 : (int) Math.rint(255.0f * Math.max(0, Math.min(1, a)));
}
protected boolean common() { return true; }
public Icon<?> overlay(Icon<?> icon) {
return new Overlay(this, icon);
}
public Icon<?> overlay(Item i) {
return overlay(new ItemIcon(i));
}
public Icon<?> overlay(Block b) {
return overlay(new ItemIcon(b));
}
public CategoryRow categoryRow(final Supplier<String> text) {
return CategoryRow.create(this, text);
}
public Element title(final Supplier<String> text, int x, int y) {
return new Title(this, text, x, y);
}
public IconButton iconButton(final Mutable<Boolean> state,
final Supplier<List<String>> texts, final Holder<List<String>> textHolder) {
return new IconButton(new IconButton.Handler() {
@Override public Icon<?> icon() {
return Icon.this;
}
@Override public boolean isCommon() {
return Icon.this.common();
}
@Override public boolean getState() {
return state.getValue();
}
@Override public void toggleState() {
state.setValue(!state.getValue());
}
@Override public Holder<List<String>> hoveringTextHolder() {
return textHolder;
}
@Override public List<String> getHoveringTexts() {
return texts.get();
}
});
}
private static class Overlay extends Icon<Overlay> {
private final Icon<?> top, bottom;
public Overlay(Icon<?> top, Icon<?> bottom) {
this.top = top;
this.bottom = bottom;
}
@Override protected void drawIcon(int x, int y, int z) {
bottom.drawIcon(x, y, z);
top.drawIcon(x, y, z + 50);
}
@Override public Overlay setAlpha(int alpha) {
bottom.setAlpha(alpha);
top.setAlpha(alpha);
return this;
}
@Override public Overlay setBrightness(float brightness) {
bottom.setBrightness(brightness);
top.setBrightness(brightness);
return this;
}
}
} | Zot201/OnlySilver | src/main/java/zotmc/onlysilver/config/gui/Icon.java | Java | apache-2.0 | 3,510 |
/*
* Copyright (C) 2017 Julien Viet
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package io.vertx.pgclient.impl.codec;
import io.netty.util.collection.IntObjectHashMap;
import io.netty.util.collection.IntObjectMap;
import io.vertx.core.impl.logging.Logger;
import io.vertx.core.impl.logging.LoggerFactory;
import io.vertx.core.json.JsonArray;
import io.vertx.core.json.JsonObject;
import io.vertx.pgclient.data.Box;
import io.vertx.pgclient.data.Circle;
import io.vertx.pgclient.data.Line;
import io.vertx.pgclient.data.LineSegment;
import io.vertx.sqlclient.Tuple;
import io.vertx.sqlclient.data.Numeric;
import io.vertx.pgclient.data.Interval;
import io.vertx.pgclient.data.Path;
import io.vertx.pgclient.data.Point;
import io.vertx.pgclient.data.Polygon;
import io.vertx.core.buffer.Buffer;
import io.vertx.sqlclient.impl.TupleInternal;
import java.sql.JDBCType;
import java.time.*;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;
import java.util.stream.Collectors;
/**
* PostgreSQL <a href="https://github.com/postgres/postgres/blob/master/src/include/catalog/pg_type.h">object
* identifiers (OIDs)</a> for data types
*
* @author <a href="mailto:emad.albloushi@gmail.com">Emad Alblueshi</a>
*/
enum DataType {
BOOL(16, true, Boolean.class, JDBCType.BOOLEAN, Tuple::getBoolean),
BOOL_ARRAY(1000, true, Boolean[].class, JDBCType.BOOLEAN, Tuple::getBooleanArray),
INT2(21, true, Short.class, Number.class, JDBCType.SMALLINT, Tuple::getShort),
INT2_ARRAY(1005, true, Short[].class, Number[].class, JDBCType.SMALLINT, Tuple::getShortArray),
INT4(23, true, Integer.class, Number.class, JDBCType.INTEGER, Tuple::getInteger),
INT4_ARRAY(1007, true, Integer[].class, Number[].class, JDBCType.INTEGER, Tuple::getIntegerArray),
INT8(20, true, Long.class, Number.class, JDBCType.BIGINT, Tuple::getLong),
INT8_ARRAY(1016, true, Long[].class, Number[].class, JDBCType.BIGINT, Tuple::getLongArray),
FLOAT4(700, true, Float.class, Number.class, JDBCType.REAL, Tuple::getFloat),
FLOAT4_ARRAY(1021, true, Float[].class, Number[].class, JDBCType.REAL, Tuple::getFloatArray),
FLOAT8(701, true, Double.class, Number.class, JDBCType.DOUBLE, Tuple::getDouble),
FLOAT8_ARRAY(1022, true, Double[].class, Number[].class, JDBCType.DOUBLE, Tuple::getDoubleArray),
NUMERIC(1700, false, Numeric.class, Number.class, JDBCType.NUMERIC, Tuple::getNumeric),
NUMERIC_ARRAY(1231, false, Numeric[].class, Number[].class, JDBCType.NUMERIC, Tuple::getNumericArray),
MONEY(790, true, Object.class, null),
MONEY_ARRAY(791, true, Object[].class, null),
BIT(1560, true, Object.class, JDBCType.BIT),
BIT_ARRAY(1561, true, Object[].class, JDBCType.BIT),
VARBIT(1562, true, Object.class, JDBCType.OTHER),
VARBIT_ARRAY(1563, true, Object[].class, JDBCType.BIT),
CHAR(18, true, String.class, JDBCType.BIT, Tuple::getString),
CHAR_ARRAY(1002, true, String[].class, JDBCType.CHAR, Tuple::getStringArray),
VARCHAR(1043, true, String.class, JDBCType.VARCHAR, Tuple::getString),
VARCHAR_ARRAY(1015, true, String[].class, JDBCType.VARCHAR, Tuple::getStringArray),
BPCHAR(1042, true, String.class, JDBCType.VARCHAR, Tuple::getString),
BPCHAR_ARRAY(1014, true, String[].class, JDBCType.VARCHAR, Tuple::getStringArray),
TEXT(25, true, String.class, JDBCType.LONGVARCHAR, Tuple::getString),
TEXT_ARRAY(1009, true, String[].class, JDBCType.LONGVARCHAR, Tuple::getStringArray),
NAME(19, true, String.class, JDBCType.VARCHAR, Tuple::getString),
NAME_ARRAY(1003, true, String[].class, JDBCType.VARCHAR, Tuple::getStringArray),
DATE(1082, true, LocalDate.class, JDBCType.DATE, Tuple::getLocalDate),
DATE_ARRAY(1182, true, LocalDate[].class, JDBCType.DATE, Tuple::getLocalDateArray),
TIME(1083, true, LocalTime.class, JDBCType.TIME, Tuple::getLocalTime),
TIME_ARRAY(1183, true, LocalTime[].class, JDBCType.TIME, Tuple::getLocalTimeArray),
TIMETZ(1266, true, OffsetTime.class, JDBCType.TIME_WITH_TIMEZONE, Tuple::getOffsetTime),
TIMETZ_ARRAY(1270, true, OffsetTime[].class, JDBCType.TIME_WITH_TIMEZONE, Tuple::getOffsetTimeArray),
TIMESTAMP(1114, true, LocalDateTime.class, JDBCType.TIMESTAMP, Tuple::getLocalDateTime),
TIMESTAMP_ARRAY(1115, true, LocalDateTime[].class, JDBCType.TIMESTAMP, Tuple::getLocalDateTimeArray),
TIMESTAMPTZ(1184, true, OffsetDateTime.class, JDBCType.TIMESTAMP_WITH_TIMEZONE, Tuple::getOffsetDateTime),
TIMESTAMPTZ_ARRAY(1185, true, OffsetDateTime[].class, JDBCType.TIMESTAMP_WITH_TIMEZONE, Tuple::getOffsetDateTimeArray),
INTERVAL(1186, true, Interval.class, JDBCType.DATE),
INTERVAL_ARRAY(1187, true, Interval[].class, JDBCType.DATE),
BYTEA(17, true, Buffer.class, JDBCType.BINARY, Tuple::getBuffer),
BYTEA_ARRAY(1001, true, Buffer[].class, JDBCType.BINARY, Tuple::getBufferArray),
MACADDR(829, true, Object.class, JDBCType.OTHER),
INET(869, true, Object[].class, JDBCType.OTHER),
CIDR(650, true, Object.class, JDBCType.OTHER),
MACADDR8(774, true, Object[].class, JDBCType.OTHER),
UUID(2950, true, UUID.class, JDBCType.OTHER, Tuple::getUUID),
UUID_ARRAY(2951, true, UUID[].class, JDBCType.OTHER, Tuple::getUUIDArray),
JSON(114, true, Object.class, JDBCType.OTHER, Tuple::getJsonElement),
JSON_ARRAY(199, true, Object[].class, JDBCType.OTHER, Tuple::getJsonElementArray),
JSONB(3802, true, Object.class, JDBCType.OTHER, Tuple::getJsonElement),
JSONB_ARRAY(3807, true, Object[].class, JDBCType.OTHER, Tuple::getJsonElementArray),
XML(142, true, Object.class, JDBCType.OTHER),
XML_ARRAY(143, true, Object[].class, JDBCType.OTHER),
POINT(600, true, Point.class, JDBCType.OTHER),
POINT_ARRAY(1017, true, Point[].class, JDBCType.OTHER),
LINE(628, true, Line.class, JDBCType.OTHER),
LINE_ARRAY(629, true, Line[].class, JDBCType.OTHER),
LSEG(601, true, LineSegment.class, JDBCType.OTHER),
LSEG_ARRAY(1018, true, LineSegment[].class, JDBCType.OTHER),
BOX(603, true, Box.class, JDBCType.OTHER),
BOX_ARRAY(1020, true, Box[].class, JDBCType.OTHER),
PATH(602, true, Path.class, JDBCType.OTHER),
PATH_ARRAY(1019, true, Path[].class, JDBCType.OTHER),
POLYGON(604, true, Polygon.class, JDBCType.OTHER),
POLYGON_ARRAY(1027, true, Polygon[].class, JDBCType.OTHER),
CIRCLE(718, true, Circle.class, JDBCType.OTHER),
CIRCLE_ARRAY(719, true, Circle[].class, JDBCType.OTHER),
HSTORE(33670, true, Object.class, JDBCType.OTHER),
OID(26, true, Object.class, JDBCType.OTHER),
OID_ARRAY(1028, true, Object[].class, JDBCType.OTHER),
VOID(2278, true, Object.class, JDBCType.OTHER),
UNKNOWN(705, false, String.class, JDBCType.OTHER, ParamExtractor::extractUnknownType),
TS_VECTOR(3614, false, String.class, JDBCType.OTHER),
TS_VECTOR_ARRAY(3643, false, String[].class, JDBCType.OTHER),
TS_QUERY(3615, false, String.class, JDBCType.OTHER),
TS_QUERY_ARRAY(3645, false, String[].class, JDBCType.OTHER);
private static final Logger logger = LoggerFactory.getLogger(DataType.class);
private static final IntObjectMap<DataType> oidToDataType = new IntObjectHashMap<>();
private static final Map<Class<?>, DataType> encodingTypeToDataType = new HashMap<>();
final int id;
final boolean array;
final boolean supportsBinary;
final Class<?> encodingType; // Not really used for now
final Class<?> decodingType;
final JDBCType jdbcType;
final ParamExtractor<?> paramExtractor;
<T> DataType(int id, boolean supportsBinary, Class<T> type, JDBCType jdbcType, ParamExtractor<T> paramExtractor) {
this(id, supportsBinary, type, type, jdbcType, paramExtractor);
}
<T> DataType(int id, boolean supportsBinary, Class<T> type, JDBCType jdbcType) {
this(id, supportsBinary, type, type, jdbcType, null);
}
<T> DataType(int id, boolean supportsBinary, Class<T> encodingType, Class<?> decodingType, JDBCType jdbcType, ParamExtractor<T> paramExtractor) {
this.id = id;
this.supportsBinary = supportsBinary;
this.encodingType = encodingType;
this.decodingType = decodingType;
this.jdbcType = jdbcType;
this.array = decodingType.isArray();
this.paramExtractor = paramExtractor;
}
static DataType valueOf(int oid) {
DataType value = oidToDataType.get(oid);
if (value == null) {
logger.debug("Postgres type OID=" + oid + " not handled - using unknown type instead");
return UNKNOWN;
} else {
return value;
}
}
static DataType lookup(Class<?> type) {
DataType dataType = encodingTypeToDataType.get(type);
if (dataType == null) {
if (Buffer.class.isAssignableFrom(type)) {
return BYTEA;
}
dataType = DataType.UNKNOWN;
}
return dataType;
}
static {
for (DataType dataType : values()) {
oidToDataType.put(dataType.id, dataType);
}
encodingTypeToDataType.put(String.class, VARCHAR);
encodingTypeToDataType.put(String[].class, VARCHAR_ARRAY);
encodingTypeToDataType.put(Boolean.class, BOOL);
encodingTypeToDataType.put(Boolean[].class, BOOL_ARRAY);
encodingTypeToDataType.put(Short.class, INT2);
encodingTypeToDataType.put(Short[].class, INT2_ARRAY);
encodingTypeToDataType.put(Integer.class, INT4);
encodingTypeToDataType.put(Integer[].class, INT4_ARRAY);
encodingTypeToDataType.put(Long.class, INT8);
encodingTypeToDataType.put(Long[].class, INT8_ARRAY);
encodingTypeToDataType.put(Float.class, FLOAT4);
encodingTypeToDataType.put(Float[].class, FLOAT4_ARRAY);
encodingTypeToDataType.put(Double.class, FLOAT8);
encodingTypeToDataType.put(Double[].class, FLOAT8_ARRAY);
encodingTypeToDataType.put(LocalDate.class, DATE);
encodingTypeToDataType.put(LocalDate[].class, DATE_ARRAY);
encodingTypeToDataType.put(LocalDateTime.class, TIMESTAMP);
encodingTypeToDataType.put(LocalDateTime[].class, TIMESTAMP_ARRAY);
encodingTypeToDataType.put(OffsetDateTime.class, TIMESTAMPTZ);
encodingTypeToDataType.put(OffsetDateTime[].class, TIMESTAMPTZ_ARRAY);
encodingTypeToDataType.put(Interval.class, INTERVAL);
encodingTypeToDataType.put(Interval[].class, INTERVAL_ARRAY);
encodingTypeToDataType.put(Buffer[].class, BYTEA_ARRAY);
encodingTypeToDataType.put(UUID.class, UUID);
encodingTypeToDataType.put(UUID[].class, UUID_ARRAY);
encodingTypeToDataType.put(JsonObject.class, JSON);
encodingTypeToDataType.put(JsonObject[].class, JSON_ARRAY);
encodingTypeToDataType.put(JsonArray.class, JSON);
encodingTypeToDataType.put(JsonArray[].class, JSON_ARRAY);
encodingTypeToDataType.put(Point.class, POINT);
encodingTypeToDataType.put(Point[].class, POINT_ARRAY);
encodingTypeToDataType.put(Line.class, LINE);
encodingTypeToDataType.put(Line[].class, LINE_ARRAY);
encodingTypeToDataType.put(LineSegment.class, LSEG);
encodingTypeToDataType.put(LineSegment[].class, LSEG_ARRAY);
encodingTypeToDataType.put(Box.class, BOX);
encodingTypeToDataType.put(Box[].class, BOX_ARRAY);
encodingTypeToDataType.put(Path.class, PATH);
encodingTypeToDataType.put(Path[].class, PATH_ARRAY);
encodingTypeToDataType.put(Polygon.class, POLYGON);
encodingTypeToDataType.put(Polygon[].class, POLYGON_ARRAY);
encodingTypeToDataType.put(Circle.class, CIRCLE);
encodingTypeToDataType.put(Circle[].class, CIRCLE_ARRAY);
}
}
| vietj/vertx-pg-client | vertx-pg-client/src/main/java/io/vertx/pgclient/impl/codec/DataType.java | Java | apache-2.0 | 11,762 |
require 'rails_helper'
RSpec.describe PagesController, type: :controller do
describe "GET #home" do
it "returns http success" do
get :home
expect(response).to have_http_status(:success)
end
end
describe "GET #contact" do
it "returns http success" do
get :contact
expect(response).to have_http_status(:success)
end
end
describe "GET #about" do
it "returns http success" do
get :about
expect(response).to have_http_status(:success)
end
end
describe "GET #help" do
it "returns http success" do
get :help
expect(response).to have_http_status(:success)
end
end
end
| LoikH/MyMoviez | spec/controllers/pages_controller_spec.rb | Ruby | apache-2.0 | 662 |
package com.itheima.zhbj52.utils;
import android.content.Context;
import android.content.SharedPreferences;
/**
* SharedPreferences工具类
*
* @author baoliang.zhao
*
*/
public class PrefUtils {
public static final String PREF_NAME = "config";
public static boolean getBoolean(Context ctx, String key,
boolean defaultValue) {
SharedPreferences sp = ctx.getSharedPreferences(PREF_NAME,
Context.MODE_PRIVATE);
return sp.getBoolean(key, defaultValue);
}
public static void setBoolean(Context ctx, String key, boolean value) {
SharedPreferences sp = ctx.getSharedPreferences(PREF_NAME,
Context.MODE_PRIVATE);
sp.edit().putBoolean(key, value).commit();
}
public static int getInt(Context ctx, String key, int defaultValue) {
SharedPreferences sp = ctx.getSharedPreferences(PREF_NAME,
Context.MODE_PRIVATE);
return sp.getInt(key, defaultValue);
}
public static void setInt(Context ctx, String key, int value) {
SharedPreferences sp = ctx.getSharedPreferences(PREF_NAME,
Context.MODE_PRIVATE);
sp.edit().putInt(key, value).commit();
}
public static String getString(Context ctx, String key, String defaultValue) {
SharedPreferences sp = ctx.getSharedPreferences(PREF_NAME,
Context.MODE_PRIVATE);
return sp.getString(key, defaultValue);
}
public static void setString(Context ctx, String key, String value) {
SharedPreferences sp = ctx.getSharedPreferences(PREF_NAME,
Context.MODE_PRIVATE);
sp.edit().putString(key, value).commit();
}
}
| CrazyZhao/zhbj52 | src/com/itheima/zhbj52/utils/PrefUtils.java | Java | apache-2.0 | 1,514 |
/**
* @license Apache-2.0
*
* Copyright (c) 2020 The Stdlib Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'use strict';
// MODULES //
var addon = require( './../src/addon.node' );
// MAIN //
/**
* Fills a double-precision floating-point strided array with a specified scalar constant.
*
* @param {PositiveInteger} N - number of indexed elements
* @param {number} alpha - scalar
* @param {Float64Array} x - input array
* @param {integer} stride - index increment
* @returns {Float64Array} input array
*
* @example
* var Float64Array = require( '@stdlib/array/float64' );
*
* var x = new Float64Array( [ -2.0, 1.0, 3.0, -5.0, 4.0, 0.0, -1.0, -3.0 ] );
*
* dfill( x.length, 5.0, x, 1 );
* // x => <Float64Array>[ 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0 ]
*/
function dfill( N, alpha, x, stride ) {
addon( N, alpha, x, stride );
return x;
}
// EXPORTS //
module.exports = dfill;
| stdlib-js/stdlib | lib/node_modules/@stdlib/blas/ext/base/dfill/lib/dfill.native.js | JavaScript | apache-2.0 | 1,396 |
package tutorial.core.repositories;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.annotation.Rollback;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import org.springframework.transaction.annotation.Transactional;
import tutorial.core.models.entities.Account;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
/**
* Created by Chris on 7/9/14.
*/
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration("classpath:spring/business-config.xml")
public class AccountRepoTest {
@Autowired
private AccountRepo repo;
private Account account;
@Before
@Transactional
@Rollback(false)
public void setup()
{
account = new Account();
account.setName("name");
account.setPassword("password");
repo.createAccount(account);
}
@Test
@Transactional
public void testFind()
{
Account account = repo.findAccount(this.account.getId());
assertNotNull(account);
assertEquals(account.getName(), "name");
assertEquals(account.getPassword(), "password");
}
}
| DominikR90/2015_UAM_MGR_ZTSI_JAVA | basic-web-app/src/test/java/tutorial/core/repositories/AccountRepoTest.java | Java | apache-2.0 | 1,328 |
import concat from 'lodash/concat';
import { Options } from '..';
import { GlobalParameters } from '../global-parameters';
import { Property } from './property';
import { WithMembersStatement } from './with-members.statement';
export class Class extends WithMembersStatement {
type = 'class';
public decorators: string[] | undefined;
private get rootAggregate(): string {
return this.decorators?.includes('@RootAggregate') ? ' #wheat' : '';
}
private get openBrace(): string {
return this.members.length > 0 ? ' {' : '';
}
public toPlantuml(options?: Options) {
const statements = [`${this.type} ${this.getName(options)}${this.rootAggregate}${this.openBrace}`];
this.members.forEach(member => statements.push(`${GlobalParameters.indent}${member.toPlantuml(options)}`));
if (this.members.length > 0) {
statements.push('}');
}
return statements.join(GlobalParameters.eol);
}
public toTable() {
const infos = concat(this.decorators, this.comments);
const statements = [`|${this.nameWithAnchor}|||${infos.join(GlobalParameters.br)}|`];
this.members.filter(member => member instanceof Property).forEach(member => statements.push(member.toTable()));
return statements.join(GlobalParameters.eol);
}
}
| jboz/living-documentation | livingdoc-typescript-plugin/src/models/class.ts | TypeScript | apache-2.0 | 1,307 |
<div class="tile idea-tile" onClick="document.location = '{{ action('IdeaController@view', $idea->id) }}'">
<div class="tile-image" style="background-image:url('{{ $idea->photo }}')"></div>
<div class="inner-container">
<h4>
{{ $idea->name }}
</h4>
<p>
Posted by <a href="{{ action('UserController@profile', [$idea->user]) }}">{{ $idea->user->name or $idea->user_id }}</a>
</p>
</div>
</div> | digitalinteraction/xmovement | resources/views/ideas/tile.blade.php | PHP | apache-2.0 | 414 |
/*
* Copyright (C) 2016 Merbin J Anselm <merbinjanselm@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package ohi.andre.consolelauncher.managers.flashlight;
import android.annotation.TargetApi;
import android.content.Context;
import android.content.Intent;
import android.hardware.camera2.CameraAccessException;
import android.hardware.camera2.CameraCharacteristics;
import android.hardware.camera2.CameraManager;
import ohi.andre.consolelauncher.tuils.InputOutputReceiver;
/**
* Created by I327891 on 04-Feb-17.
*/
@TargetApi(23)
public class Flashlight2 extends Flashlight {
public static final String TYPE = Constants.ID_DEVICE_OUTPUT_TORCH_FLASH_NEW;
private String[] mCameraIDList;
private boolean flashSupported;
public Flashlight2(Context context) {
super(context);
flashSupported = false;
}
@Override
protected void turnOn() {
if (!this.getStatus()) {
CameraManager mCameraManager = (CameraManager) this.mContext.getSystemService(Context.CAMERA_SERVICE);
try {
this.mCameraIDList = mCameraManager.getCameraIdList();
} catch (CameraAccessException e) {
Intent intent = new Intent(InputOutputReceiver.ACTION_OUTPUT);
intent.putExtra(InputOutputReceiver.TEXT, e.toString());
mContext.sendBroadcast(intent);
return;
}
try {
CameraCharacteristics mCameraParameters = mCameraManager.getCameraCharacteristics(this.mCameraIDList[0]);
this.flashSupported = mCameraParameters.get(CameraCharacteristics.FLASH_INFO_AVAILABLE);
} catch (Exception e) {
Intent intent = new Intent(InputOutputReceiver.ACTION_OUTPUT);
intent.putExtra(InputOutputReceiver.TEXT, e.toString());
mContext.sendBroadcast(intent);
return;
}
if (this.flashSupported) {
try {
mCameraManager.setTorchMode(this.mCameraIDList[0], true);
this.updateStatus(true);
} catch (CameraAccessException e) {
Intent intent = new Intent(InputOutputReceiver.ACTION_OUTPUT);
intent.putExtra(InputOutputReceiver.TEXT, e.toString());
mContext.sendBroadcast(intent);
}
}
}
}
@Override
protected void turnOff() {
if (this.getStatus()) {
if (this.mCameraIDList != null && this.flashSupported) {
CameraManager mCameraManager = (CameraManager) this.mContext.getSystemService(Context.CAMERA_SERVICE);
try {
mCameraManager.setTorchMode(mCameraIDList[0], false);
} catch (CameraAccessException e) {
Intent intent = new Intent(InputOutputReceiver.ACTION_OUTPUT);
intent.putExtra(InputOutputReceiver.TEXT, e.toString());
mContext.sendBroadcast(intent);
return;
}
this.updateStatus(false);
}
}
}
}
| lss4/TUI-ConsoleLauncher | app/src/main/java/ohi/andre/consolelauncher/managers/flashlight/Flashlight2.java | Java | apache-2.0 | 3,912 |
using EPiServer.Commerce.Order;
using EPiServer.Framework.Localization;
using EPiServer.ServiceLocation;
using Mediachase.Commerce.Orders;
using System.ComponentModel;
namespace EPiServer.Reference.Commerce.Site.Features.Payment.PaymentMethods
{
public class DIBSPaymentMethod : PaymentMethodBase
{
public DIBSPaymentMethod()
: this(LocalizationService.Current, ServiceLocator.Current.GetInstance<IOrderGroupFactory>())
{
}
public DIBSPaymentMethod(LocalizationService localizationService, IOrderGroupFactory orderGroupFactory)
: base(localizationService, orderGroupFactory)
{
}
public string Error
{
get { return null; }
}
public override IPayment CreatePayment(decimal amount, IOrderGroup orderGroup)
{
var payment = orderGroup.CreatePayment(_orderGroupFactory);
payment.PaymentMethodId = PaymentMethodId;
payment.PaymentMethodName = "DIBS";
payment.Amount = amount;
payment.Status = PaymentStatus.Pending.ToString();
payment.TransactionType = TransactionType.Authorization.ToString();
return payment;
}
public override void PostProcess(IPayment payment)
{
}
public override bool ValidateData()
{
return true;
}
}
} | sondn2010/Quicksilver | Sources/EPiServer.Reference.Commerce.Site/Features/Payment/PaymentMethods/DIBSPaymentMethod.cs | C# | apache-2.0 | 1,415 |
# Copyright 2011-2019, The Trustees of Indiana University and Northwestern
# University. Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# --- END LICENSE_HEADER BLOCK ---
class Admin::CollectionsController < ApplicationController
include Rails::Pagination
before_action :authenticate_user!
load_and_authorize_resource except: [:index, :remove]
before_action :load_and_authorize_collections, only: [:index]
respond_to :html
def load_and_authorize_collections
@collections = get_user_collections(params[:user])
authorize!(params[:action].to_sym, Admin::Collection)
end
# GET /collections
def index
respond_to do |format|
format.html
format.json { paginate json: @collections }
end
end
# GET /collections/1
def show
respond_to do |format|
format.json { render json: @collection.to_json }
format.html {
@groups = @collection.default_local_read_groups
@users = @collection.default_read_users
@virtual_groups = @collection.default_virtual_read_groups
@ip_groups = @collection.default_ip_read_groups
@visibility = @collection.default_visibility
@addable_groups = Admin::Group.non_system_groups.reject { |g| @groups.include? g.name }
@addable_courses = Course.all.reject { |c| @virtual_groups.include? c.context_id }
}
end
end
# GET /collections/new
def new
respond_to do |format|
format.js { render json: modal_form_response(@collection) }
format.html { render 'new' }
end
end
# GET /collections/1/edit
def edit
respond_to do |format|
format.js { render json: modal_form_response(@collection) }
end
end
# GET /collections/1/items
def items
mos = paginate @collection.media_objects
render json: mos.to_a.collect{|mo| [mo.id, mo.to_json] }.to_h
end
# POST /collections
def create
@collection = Admin::Collection.create(collection_params.merge(managers: [current_user.user_key]))
if @collection.persisted?
User.where(Devise.authentication_keys.first => [Avalon::RoleControls.users('administrator')].flatten).each do |admin_user|
NotificationsMailer.new_collection(
creator_id: current_user.id,
collection_id: @collection.id,
user_id: admin_user.id,
subject: "New collection: #{@collection.name}"
).deliver_later
end
render json: {id: @collection.id}, status: 200
else
logger.warn "Failed to create collection #{@collection.name rescue '<unknown>'}: #{@collection.errors.full_messages}"
render json: {errors: ['Failed to create collection:']+@collection.errors.full_messages}, status: 422
end
end
# PUT /collections/1
def update
name_changed = false
if params[:admin_collection].present?
if params[:admin_collection][:name].present?
if params[:admin_collection][:name] != @collection.name && can?('update_name', @collection)
@old_name = @collection.name
@collection.name = params[:admin_collection][:name]
name_changed = true
end
end
end
["manager", "editor", "depositor"].each do |title|
if params["submit_add_#{title}"].present?
if params["add_#{title}"].present? && can?("update_#{title.pluralize}".to_sym, @collection)
begin
@collection.send "add_#{title}".to_sym, params["add_#{title}"].strip
rescue ArgumentError => e
flash[:error] = e.message
end
else
flash[:error] = "#{title.titleize} can't be blank."
end
end
remove_access = "remove_#{title}"
if params[remove_access].present? && can?("update_#{title.pluralize}".to_sym, @collection)
begin
@collection.send remove_access.to_sym, params[remove_access]
rescue ArgumentError => e
flash[:error] = e.message
end
end
end
# If Save Access Setting button or Add/Remove User/Group button has been clicked
if can?(:update_access_control, @collection)
["group", "class", "user", "ipaddress"].each do |title|
if params["submit_add_#{title}"].present?
if params["add_#{title}"].present?
val = params["add_#{title}"].strip
if title=='user'
@collection.default_read_users += [val]
elsif title=='ipaddress'
if ( IPAddr.new(val) rescue false )
@collection.default_read_groups += [val]
else
flash[:notice] = "IP Address #{val} is invalid. Valid examples: 124.124.10.10, 124.124.0.0/16, 124.124.0.0/255.255.0.0"
end
else
@collection.default_read_groups += [val]
end
else
flash[:notice] = "#{title.titleize} can't be blank."
end
end
if params["remove_#{title}"].present?
if ["group", "class", "ipaddress"].include? title
# This is a hack to deal with the fact that calling default_read_groups#delete isn't marking the record as dirty
# TODO: Ensure default_read_groups is tracked by ActiveModel::Dirty
@collection.default_read_groups_will_change!
@collection.default_read_groups.delete params["remove_#{title}"]
else
# This is a hack to deal with the fact that calling default_read_users#delete isn't marking the record as dirty
# TODO: Ensure default_read_users is tracked by ActiveModel::Dirty
@collection.default_read_users_will_change!
@collection.default_read_users.delete params["remove_#{title}"]
end
end
end
@collection.default_visibility = params[:visibility] unless params[:visibility].blank?
@collection.default_hidden = params[:hidden] == "1"
end
@collection.update_attributes collection_params if collection_params.present?
saved = @collection.save
if saved and name_changed
User.where(Devise.authentication_keys.first => [Avalon::RoleControls.users('administrator')].flatten).each do |admin_user|
NotificationsMailer.update_collection(
updater_id: current_user.id,
collection_id: @collection.id,
user_id: admin_user.id,
old_name: @old_name,
subject: "Notification: collection #{@old_name} changed to #{@collection.name}"
).deliver_later
end
end
respond_to do |format|
format.html do
flash[:notice] = Array(flash[:notice]) + @collection.errors.full_messages unless @collection.valid?
redirect_to @collection
end
format.json do
if saved
render json: {id: @collection.id}, status: 200
else
logger.warn "Failed to update collection #{@collection.name rescue '<unknown>'}: #{@collection.errors.full_messages}"
render json: {errors: ['Failed to update collection:']+@collection.errors.full_messages}, status: 422
end
end
end
end
# GET /collections/1/remove
def remove
@collection = Admin::Collection.find(params['id'])
raise CanCan::AccessDenied unless current_ability.can? :destroy, @collection
@objects = @collection.media_objects
@candidates = get_user_collections.reject { |c| c == @collection }
end
# DELETE /collections/1
def destroy
@source_collection = @collection
target_path = admin_collections_path
if @source_collection.media_objects.count > 0
if @source_collection.media_objects.all?(&:valid?)
@target_collection = Admin::Collection.find(params[:target_collection_id])
Admin::Collection.reassign_media_objects( @source_collection.media_objects, @source_collection, @target_collection )
target_path = admin_collection_path(@target_collection)
@source_collection.reload
else
flash[:error] = "Collection contains invalid media objects that cannot be moved. Please address these issues before attempting to delete #{@source_collection.name}."
redirect_to admin_collection_path(@source_collection) and return
end
end
if @source_collection.media_objects.count == 0
@source_collection.destroy
redirect_to target_path
else
flash[:error] = "Something went wrong. #{@source_collection.name} is not empty."
redirect_to admin_collection_path(@source_collection)
end
end
private
def collection_params
params.permit(:admin_collection => [:name, :description, :unit, :managers => []])[:admin_collection]
end
end
| ualbertalib/avalon | app/controllers/admin/collections_controller.rb | Ruby | apache-2.0 | 9,077 |