code stringlengths 3 1.05M | repo_name stringlengths 4 116 | path stringlengths 4 991 | language stringclasses 9 values | license stringclasses 15 values | size int32 3 1.05M |
|---|---|---|---|---|---|
/*
* (C) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hp.ov.sdk.dto.networking;
public enum LagState {
Aggregation,
Collecting,
Defaulted,
Distributing,
Expired,
LacpActivity,
LacpTimeout,
Synchronization,
Unknown
}
| HewlettPackard/oneview-sdk-java | oneview-sdk-java-lib/src/main/java/com/hp/ov/sdk/dto/networking/LagState.java | Java | apache-2.0 | 839 |
package com.jsh.erp.service.materialCategory;
import com.alibaba.fastjson.JSONObject;
import com.jsh.erp.service.ICommonQuery;
import com.jsh.erp.service.materialProperty.MaterialPropertyResource;
import com.jsh.erp.service.materialProperty.MaterialPropertyService;
import com.jsh.erp.utils.Constants;
import com.jsh.erp.utils.QueryUtils;
import com.jsh.erp.utils.StringUtil;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
import javax.servlet.http.HttpServletRequest;
import java.util.List;
import java.util.Map;
@Service(value = "materialCategory_component")
@MaterialCategoryResource
public class MaterialCategoryComponent implements ICommonQuery {
@Resource
private MaterialCategoryService materialCategoryService;
@Override
public Object selectOne(Long id) throws Exception {
return materialCategoryService.getMaterialCategory(id);
}
@Override
public List<?> select(Map<String, String> map)throws Exception {
return getMaterialCategoryList(map);
}
private List<?> getMaterialCategoryList(Map<String, String> map) throws Exception{
String search = map.get(Constants.SEARCH);
String name = StringUtil.getInfo(search, "name");
Integer parentId = StringUtil.parseInteger(StringUtil.getInfo(search, "parentId"));
String order = QueryUtils.order(map);
return materialCategoryService.select(name, parentId, QueryUtils.offset(map), QueryUtils.rows(map));
}
@Override
public Long counts(Map<String, String> map)throws Exception {
String search = map.get(Constants.SEARCH);
String name = StringUtil.getInfo(search, "name");
Integer parentId = StringUtil.parseInteger(StringUtil.getInfo(search, "parentId"));
return materialCategoryService.countMaterialCategory(name, parentId);
}
@Override
public int insert(JSONObject obj, HttpServletRequest request)throws Exception {
return materialCategoryService.insertMaterialCategory(obj, request);
}
@Override
public int update(JSONObject obj, HttpServletRequest request)throws Exception {
return materialCategoryService.updateMaterialCategory(obj, request);
}
@Override
public int delete(Long id, HttpServletRequest request)throws Exception {
return materialCategoryService.deleteMaterialCategory(id, request);
}
@Override
public int deleteBatch(String ids, HttpServletRequest request)throws Exception {
return materialCategoryService.batchDeleteMaterialCategory(ids, request);
}
@Override
public int checkIsNameExist(Long id, String name)throws Exception {
return materialCategoryService.checkIsNameExist(id, name);
}
}
| jishenghua/JSH_ERP | jshERP-boot/src/main/java/com/jsh/erp/service/materialCategory/MaterialCategoryComponent.java | Java | apache-2.0 | 2,746 |
#if 0 // A cute trick to making this .cc self-building from shell.
g++ $0 -O2 -Wall -Werror -o `basename $0 .cc`;
exit;
#endif
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you
// may not use this file except in compliance with the License. You
// may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
#include "../base.h"
#include "../stream.h"
#include "../json.h"
#include <float.h>
#include <limits.h>
#include <string>
namespace webgl_loader {
class JsonSinkTest {
public:
JsonSinkTest()
: sink_(&buf_), json_(&sink_) {
}
void TestNull() {
json_.PutNull();
CheckString("null");
}
void TestBool() {
json_.PutBool(true);
CheckString("true");
json_.PutBool(false);
CheckString("false");
}
void TestInt() {
for (int i = 0; i < 10; ++i) {
json_.PutInt(i);
char test[] = "0";
test[0] += i;
CheckString(test);
}
json_.PutInt(INT_MIN);
CheckString("-2147483648");
json_.PutInt(INT_MAX);
CheckString("2147483647");
}
void TestFloat() {
json_.PutFloat(123.456);
CheckString("123.456");
json_.PutFloat(FLT_MAX);
CheckString("3.40282e+38");
json_.PutFloat(-FLT_MAX);
CheckString("-3.40282e+38");
json_.PutFloat(FLT_MIN);
CheckString("1.17549e-38");
json_.PutFloat(-FLT_MIN);
CheckString("-1.17549e-38");
}
void TestString() {
json_.PutString("foo");
CheckString("\"foo\"");
}
void TestArray() {
json_.BeginArray();
for (int i = 0; i < 100; i += 10) {
json_.PutInt(i);
}
json_.End();
CheckString("[0,10,20,30,40,50,60,70,80,90]");
json_.BeginArray();
json_.BeginArray();
json_.PutNull();
json_.End();
json_.BeginArray();
json_.PutBool(false);
json_.PutBool(true);
json_.End();
for (int i = 0; i < 5; ++i) {
json_.PutInt(i*i);
}
json_.BeginObject();
json_.PutString("key");
json_.PutString("value");
json_.EndAll();
CheckString("[[null],[false,true],0,1,4,9,16,{\"key\":\"value\"}]");
}
void TestObject() {
json_.BeginObject();
json_.PutString("key1");
json_.PutInt(1);
json_.PutString("keyABC");
json_.PutString("abc");
json_.End();
CheckString("{\"key1\":1,\"keyABC\":\"abc\"}");
json_.BeginObject();
json_.PutString("array");
json_.BeginArray();
for (int i = 1; i <= 3; ++i) {
json_.PutInt(i);
}
json_.End();
json_.BeginObject();
json_.PutString("key");
json_.PutString("value");
json_.End();
json_.PutString("k");
json_.PutFloat(0.1);
json_.End();
CheckString("{\"array\":[1,2,3]{\"key\":\"value\"},\"k\":0.1}");
}
private:
void CheckString(const char* str) {
CHECK(buf_ == str);
buf_.clear();
}
std::string buf_;
StringSink sink_;
JsonSink json_;
};
} // namespace webgl_loader
int main() {
webgl_loader::JsonSinkTest tester;
tester.TestNull();
tester.TestBool();
tester.TestInt();
tester.TestFloat();
tester.TestString();
tester.TestArray();
tester.TestObject();
return 0;
}
| andrew-aladev/webgl-loader | src/testing/json_test.cc | C++ | apache-2.0 | 3,509 |
<?php
include 'app.php';
// render template
echo $twig->render('contact.twig', array());
| FSE301-Photerra/photerras | contact.php | PHP | apache-2.0 | 90 |
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.api.ads.admanager.jaxws.v202111;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlSeeAlso;
import javax.xml.bind.annotation.XmlType;
/**
*
* Represents the actions that can be performed on slates.
*
*
* <p>Java class for SlateAction complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="SlateAction">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "SlateAction")
@XmlSeeAlso({
UnarchiveSlates.class,
ArchiveSlates.class
})
public abstract class SlateAction {
}
| googleads/googleads-java-lib | modules/dfp_appengine/src/main/java/com/google/api/ads/admanager/jaxws/v202111/SlateAction.java | Java | apache-2.0 | 1,537 |
package br.pucminas.icei.audition.repository;
/**
* @author Claudinei Gomes Mendes
*/
import br.pucminas.icei.audition.dto.SearchResponse;
import info.atende.audition.model.AuditEvent;
import info.atende.audition.model.SecurityLevel;
import org.springframework.stereotype.Component;
import org.springframework.stereotype.Repository;
import org.springframework.transaction.annotation.Transactional;
import javax.persistence.EntityManager;
import javax.persistence.PersistenceContext;
import javax.persistence.TypedQuery;
import javax.persistence.criteria.CriteriaBuilder;
import javax.persistence.criteria.CriteriaQuery;
import javax.persistence.criteria.Predicate;
import javax.persistence.criteria.Root;
import java.time.LocalDateTime;
import java.util.*;
@Component
@Repository
public class AuditEventRepository {
@PersistenceContext
private EntityManager em;
@Transactional
public void create(AuditEvent auditEvent) {
em.persist(auditEvent);
}
public SearchResponse search(Map<String, Object> filtro, Long start, Long max) {
return search(filtro, start, max, null, null);
}
public SearchResponse search(Map<String, Object> filtro, Long start, Long max,
LocalDateTime dStart, LocalDateTime dEnd) {
String securityLevel = (String) filtro.get("securityLevel");
if (securityLevel != null) {
filtro.put("securityLevel", SecurityLevel.valueOf(securityLevel));
}
return buildQuery(filtro, start, max, dStart, dEnd);
}
public List<String> listApplicationNames() {
return em.createQuery("SELECT distinct e.applicationName from AuditEvent e order by e.applicationName").getResultList();
}
public List<String> listResourceTypes(){
return em.createQuery("SELECT distinct e.resource.resourceType from AuditEvent e order by e.resource.resourceType").getResultList();
}
private SearchResponse buildQuery(Map<String, Object> filtro,
Long start,
Long max,
LocalDateTime dateStart,
LocalDateTime dateEnd) {
CriteriaBuilder cb = em.getCriteriaBuilder();
CriteriaQuery<AuditEvent> q = cb.createQuery(AuditEvent.class);
Root<AuditEvent> root = q.from(AuditEvent.class);
List<Predicate> predicates = new ArrayList();
Iterator it = filtro.entrySet().iterator();
while (it.hasNext()) {
Map.Entry pair = (Map.Entry) it.next();
String key = (String) pair.getKey();
if(key.equals("resourceType")){
predicates.add(cb.equal(root.get("resource").get(key), pair.getValue()));
}else if (key.equals("action")) {
predicates.add(cb.like(root.get(key), pair.getValue() + "%"));
} else {
predicates.add(cb.equal(root.get(key), pair.getValue()));
}
it.remove(); // avoids a ConcurrentModificationException
}
// Dates
if (dateStart != null && dateEnd != null) {
predicates.add(cb.between(root.get("dateTime"), dateStart, dateEnd));
}
CriteriaQuery<AuditEvent> where = q.where(cb.and(predicates.toArray(new Predicate[predicates.size()])));
Long countResult = JpaUtils.count(em, where);
q.select(root);
where.orderBy(cb.asc(root.get("dateTime")));
TypedQuery<AuditEvent> query = em.createQuery(where);
// Pagination
if (start != null && max != null) {
query.setFirstResult(start.intValue())
.setMaxResults(max.intValue());
}
List<AuditEvent> result = query.getResultList();
return new SearchResponse(countResult, result);
}
}
| atende/audit-view | src/main/java/br/pucminas/icei/audition/repository/AuditEventRepository.java | Java | apache-2.0 | 3,874 |
package resa.metrics;
import backtype.storm.metric.api.IMetric;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
/**
* Created by ding on 14-8-12.
*/
public class StatMetric implements IMetric {
private double[] xAxis;
private Map<String, long[]> data;
public StatMetric(double[] xAxis) {
this.xAxis = xAxis;
if (xAxis != null && xAxis.length > 0) {
data = new HashMap<>();
}
}
public void add(String key, double value) {
int pos = Arrays.binarySearch(xAxis, value);
if (pos < 0) {
pos = -pos - 1;
}
data.computeIfAbsent(key, k -> new long[xAxis.length + 1])[pos]++;
// long[] stat = data.computeIfAbsent(key, k -> new long[(xAxis.length + 1) * 2]);
// stat[pos * 2]++;
// stat[pos * 2 + 1] = Double.doubleToLongBits(Double.longBitsToDouble(stat[pos * 2 + 1]) + value);
}
@Override
public Object getValueAndReset() {
if (data == null || data.isEmpty()) {
return null;
}
Map<String, String> ret = new HashMap<>();
data.forEach((k, v) -> ret.put(k, stat2String(v)));
data = new HashMap<>();
return ret;
}
private String stat2String(long[] statData) {
StringBuilder sb = new StringBuilder();
sb.append(statData[0]);
for (int i = 1; i < statData.length; i++) {
sb.append(',');
sb.append(statData[i]);
}
// for (int i = 2; i < statData.length; i += 2) {
// sb.append(',');
// sb.append(statData[i]);
// }
// sb.append(";");
// sb.append(Double.longBitsToDouble(statData[1]));
// for (int i = 3; i < statData.length; i += 2) {
// sb.append(',');
// sb.append(Double.longBitsToDouble(statData[i]));
// }
return sb.toString();
}
}
| ADSC-Cloud/resa | resa-core/src/main/java/resa/metrics/StatMetric.java | Java | apache-2.0 | 1,913 |
// Copyright 2000-2019 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.intellij.internal.statistic.eventLog.validator.persistence;
import com.intellij.openapi.components.*;
import com.intellij.openapi.util.text.StringUtil;
import org.jdom.Element;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.util.HashMap;
import java.util.Map;
@State(
name = "EventLogWhitelist",
storages = @Storage(value = EventLogWhitelistSettingsPersistence.USAGE_STATISTICS_XML, roamingType = RoamingType.DISABLED)
)
public class EventLogWhitelistSettingsPersistence implements PersistentStateComponent<Element> {
public static final String USAGE_STATISTICS_XML = "usage.statistics.xml";
private final Map<String, Long> myLastModifications = new HashMap<>();
private final Map<String, WhitelistPathSettings> myRecorderToPathSettings = new HashMap<>();
private static final String WHITELIST_MODIFY = "update";
private static final String RECORDER_ID = "recorder-id";
private static final String LAST_MODIFIED = "last-modified";
private static final String PATH = "path";
private static final String CUSTOM_PATH = "custom-path";
private static final String USE_CUSTOM_PATH = "use-custom-path";
public static EventLogWhitelistSettingsPersistence getInstance() {
return ServiceManager.getService(EventLogWhitelistSettingsPersistence.class);
}
public long getLastModified(@NotNull String recorderId) {
return myLastModifications.containsKey(recorderId) ? Math.max(myLastModifications.get(recorderId), 0) : 0;
}
public void setLastModified(@NotNull String recorderId, long lastUpdate) {
myLastModifications.put(recorderId, Math.max(lastUpdate, 0));
}
@Nullable
public WhitelistPathSettings getPathSettings(@NotNull String recorderId) {
return myRecorderToPathSettings.get(recorderId);
}
public void setPathSettings(@NotNull String recorderId, @NotNull WhitelistPathSettings settings) {
myRecorderToPathSettings.put(recorderId, settings);
}
@Override
public void loadState(@NotNull final Element element) {
myLastModifications.clear();
for (Element update : element.getChildren(WHITELIST_MODIFY)) {
final String recorder = update.getAttributeValue(RECORDER_ID);
if (StringUtil.isNotEmpty(recorder)) {
final long lastUpdate = parseLastUpdate(update);
myLastModifications.put(recorder, lastUpdate);
}
}
myRecorderToPathSettings.clear();
for (Element path : element.getChildren(PATH)) {
final String recorder = path.getAttributeValue(RECORDER_ID);
if (StringUtil.isNotEmpty(recorder)) {
String customPath = path.getAttributeValue(CUSTOM_PATH);
if (customPath == null) continue;
boolean useCustomPath = parseUseCustomPath(path);
myRecorderToPathSettings.put(recorder, new WhitelistPathSettings(customPath, useCustomPath));
}
}
}
private static boolean parseUseCustomPath(@NotNull Element update) {
try {
return Boolean.parseBoolean(update.getAttributeValue(USE_CUSTOM_PATH, "false"));
}
catch (NumberFormatException e) {
return false;
}
}
private static long parseLastUpdate(@NotNull Element update) {
try {
return Long.parseLong(update.getAttributeValue(LAST_MODIFIED, "0"));
}
catch (NumberFormatException e) {
return 0;
}
}
@Override
public Element getState() {
final Element element = new Element("state");
for (Map.Entry<String, Long> entry : myLastModifications.entrySet()) {
final Element update = new Element(WHITELIST_MODIFY);
update.setAttribute(RECORDER_ID, entry.getKey());
update.setAttribute(LAST_MODIFIED, String.valueOf(entry.getValue()));
element.addContent(update);
}
for (Map.Entry<String, WhitelistPathSettings> entry : myRecorderToPathSettings.entrySet()) {
final Element path = new Element(PATH);
path.setAttribute(RECORDER_ID, entry.getKey());
WhitelistPathSettings value = entry.getValue();
path.setAttribute(CUSTOM_PATH, value.getCustomPath());
path.setAttribute(USE_CUSTOM_PATH, String.valueOf(value.isUseCustomPath()));
element.addContent(path);
}
return element;
}
@Override
public void noStateLoaded() {
}
} | leafclick/intellij-community | platform/statistics/src/com/intellij/internal/statistic/eventLog/validator/persistence/EventLogWhitelistSettingsPersistence.java | Java | apache-2.0 | 4,390 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.federation.router;
import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_HANDLER_COUNT_DEFAULT;
import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_HANDLER_COUNT_KEY;
import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_HANDLER_QUEUE_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_HANDLER_QUEUE_SIZE_KEY;
import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_READER_COUNT_DEFAULT;
import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_READER_COUNT_KEY;
import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_KEY;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.lang.reflect.Array;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Collection;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.AddBlockFlag;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.inotify.EventBatchList;
import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ECBlockGroupStats;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
import org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCMetrics;
import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo;
import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
import org.apache.hadoop.hdfs.server.federation.resolver.PathLocation;
import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RPC.Server;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.util.ReflectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.BlockingService;
/**
* This class is responsible for handling all of the RPC calls to the It is
* created, started, and stopped by {@link Router}. It implements the
* {@link ClientProtocol} to mimic a
* {@link org.apache.hadoop.hdfs.server.namenode.NameNode NameNode} and proxies
* the requests to the active
* {@link org.apache.hadoop.hdfs.server.namenode.NameNode NameNode}.
*/
public class RouterRpcServer extends AbstractService
implements ClientProtocol, NamenodeProtocol {
private static final Logger LOG =
LoggerFactory.getLogger(RouterRpcServer.class);
/** Configuration for the RPC server. */
private Configuration conf;
/** Identifier for the super user. */
private final String superUser;
/** Identifier for the super group. */
private final String superGroup;
/** Router using this RPC server. */
private final Router router;
/** The RPC server that listens to requests from clients. */
private final Server rpcServer;
/** The address for this RPC server. */
private final InetSocketAddress rpcAddress;
/** RPC clients to connect to the Namenodes. */
private final RouterRpcClient rpcClient;
/** Monitor metrics for the RPC calls. */
private final RouterRpcMonitor rpcMonitor;
/** Interface to identify the active NN for a nameservice or blockpool ID. */
private final ActiveNamenodeResolver namenodeResolver;
/** Interface to map global name space to HDFS subcluster name spaces. */
private final FileSubclusterResolver subclusterResolver;
/** If we are in safe mode, fail requests as if a standby NN. */
private volatile boolean safeMode;
/** Category of the operation that a thread is executing. */
private final ThreadLocal<OperationCategory> opCategory = new ThreadLocal<>();
// Modules implementing groups of RPC calls
/** Router Quota calls. */
private final Quota quotaCall;
/** Erasure coding calls. */
private final ErasureCoding erasureCoding;
/** NamenodeProtocol calls. */
private final RouterNamenodeProtocol nnProto;
/**
* Construct a router RPC server.
*
* @param configuration HDFS Configuration.
* @param nnResolver The NN resolver instance to determine active NNs in HA.
* @param fileResolver File resolver to resolve file paths to subclusters.
* @throws IOException If the RPC server could not be created.
*/
public RouterRpcServer(Configuration configuration, Router router,
ActiveNamenodeResolver nnResolver, FileSubclusterResolver fileResolver)
throws IOException {
super(RouterRpcServer.class.getName());
this.conf = configuration;
this.router = router;
this.namenodeResolver = nnResolver;
this.subclusterResolver = fileResolver;
// User and group for reporting
this.superUser = System.getProperty("user.name");
this.superGroup = this.conf.get(
DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
// RPC server settings
int handlerCount = this.conf.getInt(DFS_ROUTER_HANDLER_COUNT_KEY,
DFS_ROUTER_HANDLER_COUNT_DEFAULT);
int readerCount = this.conf.getInt(DFS_ROUTER_READER_COUNT_KEY,
DFS_ROUTER_READER_COUNT_DEFAULT);
int handlerQueueSize = this.conf.getInt(DFS_ROUTER_HANDLER_QUEUE_SIZE_KEY,
DFS_ROUTER_HANDLER_QUEUE_SIZE_DEFAULT);
// Override Hadoop Common IPC setting
int readerQueueSize = this.conf.getInt(DFS_ROUTER_READER_QUEUE_SIZE_KEY,
DFS_ROUTER_READER_QUEUE_SIZE_DEFAULT);
this.conf.setInt(
CommonConfigurationKeys.IPC_SERVER_RPC_READ_CONNECTION_QUEUE_SIZE_KEY,
readerQueueSize);
RPC.setProtocolEngine(this.conf, ClientNamenodeProtocolPB.class,
ProtobufRpcEngine.class);
ClientNamenodeProtocolServerSideTranslatorPB
clientProtocolServerTranslator =
new ClientNamenodeProtocolServerSideTranslatorPB(this);
BlockingService clientNNPbService = ClientNamenodeProtocol
.newReflectiveBlockingService(clientProtocolServerTranslator);
NamenodeProtocolServerSideTranslatorPB namenodeProtocolXlator =
new NamenodeProtocolServerSideTranslatorPB(this);
BlockingService nnPbService = NamenodeProtocolService
.newReflectiveBlockingService(namenodeProtocolXlator);
InetSocketAddress confRpcAddress = conf.getSocketAddr(
RBFConfigKeys.DFS_ROUTER_RPC_BIND_HOST_KEY,
RBFConfigKeys.DFS_ROUTER_RPC_ADDRESS_KEY,
RBFConfigKeys.DFS_ROUTER_RPC_ADDRESS_DEFAULT,
RBFConfigKeys.DFS_ROUTER_RPC_PORT_DEFAULT);
LOG.info("RPC server binding to {} with {} handlers for Router {}",
confRpcAddress, handlerCount, this.router.getRouterId());
this.rpcServer = new RPC.Builder(this.conf)
.setProtocol(ClientNamenodeProtocolPB.class)
.setInstance(clientNNPbService)
.setBindAddress(confRpcAddress.getHostName())
.setPort(confRpcAddress.getPort())
.setNumHandlers(handlerCount)
.setnumReaders(readerCount)
.setQueueSizePerHandler(handlerQueueSize)
.setVerbose(false)
.build();
// Add all the RPC protocols that the Router implements
DFSUtil.addPBProtocol(
conf, NamenodeProtocolPB.class, nnPbService, this.rpcServer);
// We don't want the server to log the full stack trace for some exceptions
this.rpcServer.addTerseExceptions(
RemoteException.class,
SafeModeException.class,
FileNotFoundException.class,
FileAlreadyExistsException.class,
AccessControlException.class,
LeaseExpiredException.class,
NotReplicatedYetException.class,
IOException.class);
this.rpcServer.addSuppressedLoggingExceptions(
StandbyException.class);
// The RPC-server port can be ephemeral... ensure we have the correct info
InetSocketAddress listenAddress = this.rpcServer.getListenerAddress();
this.rpcAddress = new InetSocketAddress(
confRpcAddress.getHostName(), listenAddress.getPort());
// Create metrics monitor
Class<? extends RouterRpcMonitor> rpcMonitorClass = this.conf.getClass(
RBFConfigKeys.DFS_ROUTER_METRICS_CLASS,
RBFConfigKeys.DFS_ROUTER_METRICS_CLASS_DEFAULT,
RouterRpcMonitor.class);
this.rpcMonitor = ReflectionUtils.newInstance(rpcMonitorClass, conf);
// Create the client
this.rpcClient = new RouterRpcClient(this.conf, this.router.getRouterId(),
this.namenodeResolver, this.rpcMonitor);
// Initialize modules
this.quotaCall = new Quota(this.router, this);
this.erasureCoding = new ErasureCoding(this);
this.nnProto = new RouterNamenodeProtocol(this);
}
@Override
protected void serviceInit(Configuration configuration) throws Exception {
this.conf = configuration;
if (this.rpcMonitor == null) {
LOG.error("Cannot instantiate Router RPC metrics class");
} else {
this.rpcMonitor.init(this.conf, this, this.router.getStateStore());
}
super.serviceInit(configuration);
}
@Override
protected void serviceStart() throws Exception {
if (this.rpcServer != null) {
this.rpcServer.start();
LOG.info("Router RPC up at: {}", this.getRpcAddress());
}
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
if (this.rpcServer != null) {
this.rpcServer.stop();
}
if (rpcMonitor != null) {
this.rpcMonitor.close();
}
super.serviceStop();
}
/**
* Get the RPC client to the Namenode.
*
* @return RPC clients to the Namenodes.
*/
public RouterRpcClient getRPCClient() {
return rpcClient;
}
/**
* Get the subcluster resolver.
*
* @return Subcluster resolver.
*/
public FileSubclusterResolver getSubclusterResolver() {
return subclusterResolver;
}
/**
* Get the RPC monitor and metrics.
*
* @return RPC monitor and metrics.
*/
public RouterRpcMonitor getRPCMonitor() {
return rpcMonitor;
}
/**
* Allow access to the client RPC server for testing.
*
* @return The RPC server.
*/
@VisibleForTesting
public Server getServer() {
return rpcServer;
}
/**
* Get the RPC address of the service.
*
* @return RPC service address.
*/
public InetSocketAddress getRpcAddress() {
return rpcAddress;
}
/**
* Check if the Router is in safe mode. We should only see READ, WRITE, and
* UNCHECKED. It includes a default handler when we haven't implemented an
* operation. If not supported, it always throws an exception reporting the
* operation.
*
* @param op Category of the operation to check.
* @param supported If the operation is supported or not. If not, it will
* throw an UnsupportedOperationException.
* @throws SafeModeException If the Router is in safe mode and cannot serve
* client requests.
* @throws UnsupportedOperationException If the operation is not supported.
*/
protected void checkOperation(OperationCategory op, boolean supported)
throws StandbyException, UnsupportedOperationException {
checkOperation(op);
if (!supported) {
if (rpcMonitor != null) {
rpcMonitor.proxyOpNotImplemented();
}
String methodName = getMethodName();
throw new UnsupportedOperationException(
"Operation \"" + methodName + "\" is not supported");
}
}
/**
* Check if the Router is in safe mode. We should only see READ, WRITE, and
* UNCHECKED. This function should be called by all ClientProtocol functions.
*
* @param op Category of the operation to check.
* @throws SafeModeException If the Router is in safe mode and cannot serve
* client requests.
*/
protected void checkOperation(OperationCategory op)
throws StandbyException {
// Log the function we are currently calling.
if (rpcMonitor != null) {
rpcMonitor.startOp();
}
// Log the function we are currently calling.
if (LOG.isDebugEnabled()) {
String methodName = getMethodName();
LOG.debug("Proxying operation: {}", methodName);
}
// Store the category of the operation category for this thread
opCategory.set(op);
// We allow unchecked and read operations
if (op == OperationCategory.UNCHECKED || op == OperationCategory.READ) {
return;
}
if (safeMode) {
// Throw standby exception, router is not available
if (rpcMonitor != null) {
rpcMonitor.routerFailureSafemode();
}
throw new StandbyException("Router " + router.getRouterId() +
" is in safe mode and cannot handle " + op + " requests");
}
}
/**
* In safe mode all RPC requests will fail and return a standby exception.
* The client will try another Router, similar to the client retry logic for
* HA.
*
* @param mode True if enabled, False if disabled.
*/
public void setSafeMode(boolean mode) {
this.safeMode = mode;
}
/**
* Check if the Router is in safe mode and cannot serve RPC calls.
*
* @return If the Router is in safe mode.
*/
public boolean isInSafeMode() {
return this.safeMode;
}
@Override // ClientProtocol
public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
throws IOException {
checkOperation(OperationCategory.WRITE, false);
return null;
}
/**
* The the delegation token from each name service.
* @param renewer
* @return Name service -> Token.
* @throws IOException
*/
public Map<FederationNamespaceInfo, Token<DelegationTokenIdentifier>>
getDelegationTokens(Text renewer) throws IOException {
checkOperation(OperationCategory.WRITE, false);
return null;
}
@Override // ClientProtocol
public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
throws IOException {
checkOperation(OperationCategory.WRITE, false);
return 0;
}
@Override // ClientProtocol
public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
throws IOException {
checkOperation(OperationCategory.WRITE, false);
}
@Override // ClientProtocol
public LocatedBlocks getBlockLocations(String src, final long offset,
final long length) throws IOException {
checkOperation(OperationCategory.READ);
List<RemoteLocation> locations = getLocationsForPath(src, false);
RemoteMethod remoteMethod = new RemoteMethod("getBlockLocations",
new Class<?>[] {String.class, long.class, long.class},
new RemoteParam(), offset, length);
return (LocatedBlocks) rpcClient.invokeSequential(locations, remoteMethod,
LocatedBlocks.class, null);
}
@Override // ClientProtocol
public FsServerDefaults getServerDefaults() throws IOException {
checkOperation(OperationCategory.READ);
RemoteMethod method = new RemoteMethod("getServerDefaults");
String ns = subclusterResolver.getDefaultNamespace();
return (FsServerDefaults) rpcClient.invokeSingle(ns, method);
}
@Override // ClientProtocol
public HdfsFileStatus create(String src, FsPermission masked,
String clientName, EnumSetWritable<CreateFlag> flag,
boolean createParent, short replication, long blockSize,
CryptoProtocolVersion[] supportedVersions, String ecPolicyName)
throws IOException {
checkOperation(OperationCategory.WRITE);
if (createParent && isPathAll(src)) {
int index = src.lastIndexOf(Path.SEPARATOR);
String parent = src.substring(0, index);
LOG.debug("Creating {} requires creating parent {}", src, parent);
FsPermission parentPermissions = getParentPermission(masked);
boolean success = mkdirs(parent, parentPermissions, createParent);
if (!success) {
// This shouldn't happen as mkdirs returns true or exception
LOG.error("Couldn't create parents for {}", src);
}
}
RemoteLocation createLocation = getCreateLocation(src);
RemoteMethod method = new RemoteMethod("create",
new Class<?>[] {String.class, FsPermission.class, String.class,
EnumSetWritable.class, boolean.class, short.class,
long.class, CryptoProtocolVersion[].class,
String.class},
createLocation.getDest(), masked, clientName, flag, createParent,
replication, blockSize, supportedVersions, ecPolicyName);
return (HdfsFileStatus) rpcClient.invokeSingle(createLocation, method);
}
/**
* Get the permissions for the parent of a child with given permissions.
* Add implicit u+wx permission for parent. This is based on
* @{FSDirMkdirOp#addImplicitUwx}.
* @param mask The permission mask of the child.
* @return The permission mask of the parent.
*/
private static FsPermission getParentPermission(final FsPermission mask) {
FsPermission ret = new FsPermission(
mask.getUserAction().or(FsAction.WRITE_EXECUTE),
mask.getGroupAction(),
mask.getOtherAction());
return ret;
}
/**
* Get the location to create a file. It checks if the file already existed
* in one of the locations.
*
* @param src Path of the file to check.
* @return The remote location for this file.
* @throws IOException If the file has no creation location.
*/
protected RemoteLocation getCreateLocation(final String src)
throws IOException {
final List<RemoteLocation> locations = getLocationsForPath(src, true);
if (locations == null || locations.isEmpty()) {
throw new IOException("Cannot get locations to create " + src);
}
RemoteLocation createLocation = locations.get(0);
if (locations.size() > 1) {
try {
// Check if this file already exists in other subclusters
LocatedBlocks existingLocation = getBlockLocations(src, 0, 1);
if (existingLocation != null) {
// Forward to the existing location and let the NN handle the error
LocatedBlock existingLocationLastLocatedBlock =
existingLocation.getLastLocatedBlock();
if (existingLocationLastLocatedBlock == null) {
// The block has no blocks yet, check for the meta data
for (RemoteLocation location : locations) {
RemoteMethod method = new RemoteMethod("getFileInfo",
new Class<?>[] {String.class}, new RemoteParam());
if (rpcClient.invokeSingle(location, method) != null) {
createLocation = location;
break;
}
}
} else {
ExtendedBlock existingLocationLastBlock =
existingLocationLastLocatedBlock.getBlock();
String blockPoolId = existingLocationLastBlock.getBlockPoolId();
createLocation = getLocationForPath(src, true, blockPoolId);
}
}
} catch (FileNotFoundException fne) {
// Ignore if the file is not found
}
}
return createLocation;
}
// Medium
@Override // ClientProtocol
public LastBlockWithStatus append(String src, final String clientName,
final EnumSetWritable<CreateFlag> flag) throws IOException {
checkOperation(OperationCategory.WRITE);
List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod("append",
new Class<?>[] {String.class, String.class, EnumSetWritable.class},
new RemoteParam(), clientName, flag);
return rpcClient.invokeSequential(
locations, method, LastBlockWithStatus.class, null);
}
// Low
@Override // ClientProtocol
public boolean recoverLease(String src, String clientName)
throws IOException {
checkOperation(OperationCategory.WRITE);
final List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod("recoverLease",
new Class<?>[] {String.class, String.class}, new RemoteParam(),
clientName);
Object result = rpcClient.invokeSequential(
locations, method, Boolean.class, Boolean.TRUE);
return (boolean) result;
}
@Override // ClientProtocol
public boolean setReplication(String src, short replication)
throws IOException {
checkOperation(OperationCategory.WRITE);
List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod("setReplication",
new Class<?>[] {String.class, short.class}, new RemoteParam(),
replication);
Object result = rpcClient.invokeSequential(
locations, method, Boolean.class, Boolean.TRUE);
return (boolean) result;
}
@Override
public void setStoragePolicy(String src, String policyName)
throws IOException {
checkOperation(OperationCategory.WRITE);
List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod("setStoragePolicy",
new Class<?>[] {String.class, String.class},
new RemoteParam(), policyName);
rpcClient.invokeSequential(locations, method, null, null);
}
@Override
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
checkOperation(OperationCategory.READ);
RemoteMethod method = new RemoteMethod("getStoragePolicies");
String ns = subclusterResolver.getDefaultNamespace();
return (BlockStoragePolicy[]) rpcClient.invokeSingle(ns, method);
}
@Override // ClientProtocol
public void setPermission(String src, FsPermission permissions)
throws IOException {
checkOperation(OperationCategory.WRITE);
final List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod("setPermission",
new Class<?>[] {String.class, FsPermission.class},
new RemoteParam(), permissions);
if (isPathAll(src)) {
rpcClient.invokeConcurrent(locations, method);
} else {
rpcClient.invokeSequential(locations, method);
}
}
@Override // ClientProtocol
public void setOwner(String src, String username, String groupname)
throws IOException {
checkOperation(OperationCategory.WRITE);
final List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod("setOwner",
new Class<?>[] {String.class, String.class, String.class},
new RemoteParam(), username, groupname);
if (isPathAll(src)) {
rpcClient.invokeConcurrent(locations, method);
} else {
rpcClient.invokeSequential(locations, method);
}
}
/**
* Excluded and favored nodes are not verified and will be ignored by
* placement policy if they are not in the same nameservice as the file.
*/
@Override // ClientProtocol
public LocatedBlock addBlock(String src, String clientName,
ExtendedBlock previous, DatanodeInfo[] excludedNodes, long fileId,
String[] favoredNodes, EnumSet<AddBlockFlag> addBlockFlags)
throws IOException {
checkOperation(OperationCategory.WRITE);
final List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod("addBlock",
new Class<?>[] {String.class, String.class, ExtendedBlock.class,
DatanodeInfo[].class, long.class, String[].class,
EnumSet.class},
new RemoteParam(), clientName, previous, excludedNodes, fileId,
favoredNodes, addBlockFlags);
// TODO verify the excludedNodes and favoredNodes are acceptable to this NN
return (LocatedBlock) rpcClient.invokeSequential(
locations, method, LocatedBlock.class, null);
}
/**
* Excluded nodes are not verified and will be ignored by placement if they
* are not in the same nameservice as the file.
*/
@Override // ClientProtocol
public LocatedBlock getAdditionalDatanode(final String src, final long fileId,
final ExtendedBlock blk, final DatanodeInfo[] existings,
final String[] existingStorageIDs, final DatanodeInfo[] excludes,
final int numAdditionalNodes, final String clientName)
throws IOException {
checkOperation(OperationCategory.READ);
final List<RemoteLocation> locations = getLocationsForPath(src, false);
RemoteMethod method = new RemoteMethod("getAdditionalDatanode",
new Class<?>[] {String.class, long.class, ExtendedBlock.class,
DatanodeInfo[].class, String[].class,
DatanodeInfo[].class, int.class, String.class},
new RemoteParam(), fileId, blk, existings, existingStorageIDs, excludes,
numAdditionalNodes, clientName);
return (LocatedBlock) rpcClient.invokeSequential(
locations, method, LocatedBlock.class, null);
}
@Override // ClientProtocol
public void abandonBlock(ExtendedBlock b, long fileId, String src,
String holder) throws IOException {
checkOperation(OperationCategory.WRITE);
RemoteMethod method = new RemoteMethod("abandonBlock",
new Class<?>[] {ExtendedBlock.class, long.class, String.class,
String.class},
b, fileId, new RemoteParam(), holder);
rpcClient.invokeSingle(b, method);
}
@Override // ClientProtocol
public boolean complete(String src, String clientName, ExtendedBlock last,
long fileId) throws IOException {
checkOperation(OperationCategory.WRITE);
final List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod("complete",
new Class<?>[] {String.class, String.class, ExtendedBlock.class,
long.class},
new RemoteParam(), clientName, last, fileId);
// Complete can return true/false, so don't expect a result
return ((Boolean) rpcClient.invokeSequential(
locations, method, Boolean.class, null)).booleanValue();
}
@Override // ClientProtocol
public LocatedBlock updateBlockForPipeline(
ExtendedBlock block, String clientName) throws IOException {
checkOperation(OperationCategory.WRITE);
RemoteMethod method = new RemoteMethod("updateBlockForPipeline",
new Class<?>[] {ExtendedBlock.class, String.class},
block, clientName);
return (LocatedBlock) rpcClient.invokeSingle(block, method);
}
/**
* Datanode are not verified to be in the same nameservice as the old block.
* TODO This may require validation.
*/
@Override // ClientProtocol
public void updatePipeline(String clientName, ExtendedBlock oldBlock,
ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs)
throws IOException {
checkOperation(OperationCategory.WRITE);
RemoteMethod method = new RemoteMethod("updatePipeline",
new Class<?>[] {String.class, ExtendedBlock.class, ExtendedBlock.class,
DatanodeID[].class, String[].class},
clientName, oldBlock, newBlock, newNodes, newStorageIDs);
rpcClient.invokeSingle(oldBlock, method);
}
@Override // ClientProtocol
public long getPreferredBlockSize(String src) throws IOException {
checkOperation(OperationCategory.READ);
final List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod("getPreferredBlockSize",
new Class<?>[] {String.class}, new RemoteParam());
return ((Long) rpcClient.invokeSequential(
locations, method, Long.class, null)).longValue();
}
/**
* Determines combinations of eligible src/dst locations for a rename. A
* rename cannot change the namespace. Renames are only allowed if there is an
* eligible dst location in the same namespace as the source.
*
* @param srcLocations List of all potential source destinations where the
* path may be located. On return this list is trimmed to include
* only the paths that have corresponding destinations in the same
* namespace.
* @param dst The destination path
* @return A map of all eligible source namespaces and their corresponding
* replacement value.
* @throws IOException If the dst paths could not be determined.
*/
private RemoteParam getRenameDestinations(
final List<RemoteLocation> srcLocations, final String dst)
throws IOException {
final List<RemoteLocation> dstLocations = getLocationsForPath(dst, true);
final Map<RemoteLocation, String> dstMap = new HashMap<>();
Iterator<RemoteLocation> iterator = srcLocations.iterator();
while (iterator.hasNext()) {
RemoteLocation srcLocation = iterator.next();
RemoteLocation eligibleDst =
getFirstMatchingLocation(srcLocation, dstLocations);
if (eligibleDst != null) {
// Use this dst for this source location
dstMap.put(srcLocation, eligibleDst.getDest());
} else {
// This src destination is not valid, remove from the source list
iterator.remove();
}
}
return new RemoteParam(dstMap);
}
/**
* Get first matching location.
*
* @param location Location we are looking for.
* @param locations List of locations.
* @return The first matchin location in the list.
*/
private RemoteLocation getFirstMatchingLocation(RemoteLocation location,
List<RemoteLocation> locations) {
for (RemoteLocation loc : locations) {
if (loc.getNameserviceId().equals(location.getNameserviceId())) {
// Return first matching location
return loc;
}
}
return null;
}
@Deprecated
@Override // ClientProtocol
public boolean rename(final String src, final String dst)
throws IOException {
checkOperation(OperationCategory.WRITE);
final List<RemoteLocation> srcLocations =
getLocationsForPath(src, true, false);
// srcLocations may be trimmed by getRenameDestinations()
final List<RemoteLocation> locs = new LinkedList<>(srcLocations);
RemoteParam dstParam = getRenameDestinations(locs, dst);
if (locs.isEmpty()) {
throw new IOException(
"Rename of " + src + " to " + dst + " is not allowed," +
" no eligible destination in the same namespace was found.");
}
RemoteMethod method = new RemoteMethod("rename",
new Class<?>[] {String.class, String.class},
new RemoteParam(), dstParam);
return ((Boolean) rpcClient.invokeSequential(
locs, method, Boolean.class, Boolean.TRUE)).booleanValue();
}
@Override // ClientProtocol
public void rename2(final String src, final String dst,
final Options.Rename... options) throws IOException {
checkOperation(OperationCategory.WRITE);
final List<RemoteLocation> srcLocations =
getLocationsForPath(src, true, false);
// srcLocations may be trimmed by getRenameDestinations()
final List<RemoteLocation> locs = new LinkedList<>(srcLocations);
RemoteParam dstParam = getRenameDestinations(locs, dst);
if (locs.isEmpty()) {
throw new IOException(
"Rename of " + src + " to " + dst + " is not allowed," +
" no eligible destination in the same namespace was found.");
}
RemoteMethod method = new RemoteMethod("rename2",
new Class<?>[] {String.class, String.class, options.getClass()},
new RemoteParam(), dstParam, options);
rpcClient.invokeSequential(locs, method, null, null);
}
@Override // ClientProtocol
public void concat(String trg, String[] src) throws IOException {
checkOperation(OperationCategory.WRITE);
// See if the src and target files are all in the same namespace
LocatedBlocks targetBlocks = getBlockLocations(trg, 0, 1);
if (targetBlocks == null) {
throw new IOException("Cannot locate blocks for target file - " + trg);
}
LocatedBlock lastLocatedBlock = targetBlocks.getLastLocatedBlock();
String targetBlockPoolId = lastLocatedBlock.getBlock().getBlockPoolId();
for (String source : src) {
LocatedBlocks sourceBlocks = getBlockLocations(source, 0, 1);
if (sourceBlocks == null) {
throw new IOException(
"Cannot located blocks for source file " + source);
}
String sourceBlockPoolId =
sourceBlocks.getLastLocatedBlock().getBlock().getBlockPoolId();
if (!sourceBlockPoolId.equals(targetBlockPoolId)) {
throw new IOException("Cannot concatenate source file " + source
+ " because it is located in a different namespace"
+ " with block pool id " + sourceBlockPoolId
+ " from the target file with block pool id "
+ targetBlockPoolId);
}
}
// Find locations in the matching namespace.
final RemoteLocation targetDestination =
getLocationForPath(trg, true, targetBlockPoolId);
String[] sourceDestinations = new String[src.length];
for (int i = 0; i < src.length; i++) {
String sourceFile = src[i];
RemoteLocation location =
getLocationForPath(sourceFile, true, targetBlockPoolId);
sourceDestinations[i] = location.getDest();
}
// Invoke
RemoteMethod method = new RemoteMethod("concat",
new Class<?>[] {String.class, String[].class},
targetDestination.getDest(), sourceDestinations);
rpcClient.invokeSingle(targetDestination, method);
}
@Override // ClientProtocol
public boolean truncate(String src, long newLength, String clientName)
throws IOException {
checkOperation(OperationCategory.WRITE);
final List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod("truncate",
new Class<?>[] {String.class, long.class, String.class},
new RemoteParam(), newLength, clientName);
return ((Boolean) rpcClient.invokeSequential(locations, method,
Boolean.class, Boolean.TRUE)).booleanValue();
}
@Override // ClientProtocol
public boolean delete(String src, boolean recursive) throws IOException {
checkOperation(OperationCategory.WRITE);
final List<RemoteLocation> locations =
getLocationsForPath(src, true, false);
RemoteMethod method = new RemoteMethod("delete",
new Class<?>[] {String.class, boolean.class}, new RemoteParam(),
recursive);
if (isPathAll(src)) {
return rpcClient.invokeAll(locations, method);
} else {
return rpcClient.invokeSequential(locations, method,
Boolean.class, Boolean.TRUE).booleanValue();
}
}
@Override // ClientProtocol
public boolean mkdirs(String src, FsPermission masked, boolean createParent)
throws IOException {
checkOperation(OperationCategory.WRITE);
final List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod("mkdirs",
new Class<?>[] {String.class, FsPermission.class, boolean.class},
new RemoteParam(), masked, createParent);
// Create in all locations
if (isPathAll(src)) {
return rpcClient.invokeAll(locations, method);
}
if (locations.size() > 1) {
// Check if this directory already exists
try {
HdfsFileStatus fileStatus = getFileInfo(src);
if (fileStatus != null) {
// When existing, the NN doesn't return an exception; return true
return true;
}
} catch (IOException ioe) {
// Can't query if this file exists or not.
LOG.error("Error requesting file info for path {} while proxing mkdirs",
src, ioe);
}
}
RemoteLocation firstLocation = locations.get(0);
return ((Boolean) rpcClient.invokeSingle(firstLocation, method))
.booleanValue();
}
@Override // ClientProtocol
public void renewLease(String clientName) throws IOException {
checkOperation(OperationCategory.WRITE);
RemoteMethod method = new RemoteMethod("renewLease",
new Class<?>[] {String.class}, clientName);
Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
rpcClient.invokeConcurrent(nss, method, false, false);
}
@Override // ClientProtocol
public DirectoryListing getListing(String src, byte[] startAfter,
boolean needLocation) throws IOException {
checkOperation(OperationCategory.READ);
// Locate the dir and fetch the listing
final List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod("getListing",
new Class<?>[] {String.class, startAfter.getClass(), boolean.class},
new RemoteParam(), startAfter, needLocation);
Map<RemoteLocation, DirectoryListing> listings =
rpcClient.invokeConcurrent(
locations, method, false, false, DirectoryListing.class);
Map<String, HdfsFileStatus> nnListing = new TreeMap<>();
int totalRemainingEntries = 0;
int remainingEntries = 0;
boolean namenodeListingExists = false;
if (listings != null) {
// Check the subcluster listing with the smallest name
String lastName = null;
for (Entry<RemoteLocation, DirectoryListing> entry :
listings.entrySet()) {
RemoteLocation location = entry.getKey();
DirectoryListing listing = entry.getValue();
if (listing == null) {
LOG.debug("Cannot get listing from {}", location);
} else {
totalRemainingEntries += listing.getRemainingEntries();
HdfsFileStatus[] partialListing = listing.getPartialListing();
int length = partialListing.length;
if (length > 0) {
HdfsFileStatus lastLocalEntry = partialListing[length-1];
String lastLocalName = lastLocalEntry.getLocalName();
if (lastName == null || lastName.compareTo(lastLocalName) > 0) {
lastName = lastLocalName;
}
}
}
}
// Add existing entries
for (Object value : listings.values()) {
DirectoryListing listing = (DirectoryListing) value;
if (listing != null) {
namenodeListingExists = true;
for (HdfsFileStatus file : listing.getPartialListing()) {
String filename = file.getLocalName();
if (totalRemainingEntries > 0 && filename.compareTo(lastName) > 0) {
// Discarding entries further than the lastName
remainingEntries++;
} else {
nnListing.put(filename, file);
}
}
remainingEntries += listing.getRemainingEntries();
}
}
}
// Add mount points at this level in the tree
final List<String> children = subclusterResolver.getMountPoints(src);
if (children != null) {
// Get the dates for each mount point
Map<String, Long> dates = getMountPointDates(src);
// Create virtual folder with the mount name
for (String child : children) {
long date = 0;
if (dates != null && dates.containsKey(child)) {
date = dates.get(child);
}
// TODO add number of children
HdfsFileStatus dirStatus = getMountPointStatus(child, 0, date);
// This may overwrite existing listing entries with the mount point
// TODO don't add if already there?
nnListing.put(child, dirStatus);
}
}
if (!namenodeListingExists && nnListing.size() == 0) {
// NN returns a null object if the directory cannot be found and has no
// listing. If we didn't retrieve any NN listing data, and there are no
// mount points here, return null.
return null;
}
// Generate combined listing
HdfsFileStatus[] combinedData = new HdfsFileStatus[nnListing.size()];
combinedData = nnListing.values().toArray(combinedData);
return new DirectoryListing(combinedData, remainingEntries);
}
@Override // ClientProtocol
public HdfsFileStatus getFileInfo(String src) throws IOException {
checkOperation(OperationCategory.READ);
final List<RemoteLocation> locations = getLocationsForPath(src, false);
RemoteMethod method = new RemoteMethod("getFileInfo",
new Class<?>[] {String.class}, new RemoteParam());
HdfsFileStatus ret = null;
// If it's a directory, we check in all locations
if (isPathAll(src)) {
ret = getFileInfoAll(locations, method);
} else {
// Check for file information sequentially
ret = (HdfsFileStatus) rpcClient.invokeSequential(
locations, method, HdfsFileStatus.class, null);
}
// If there is no real path, check mount points
if (ret == null) {
List<String> children = subclusterResolver.getMountPoints(src);
if (children != null && !children.isEmpty()) {
Map<String, Long> dates = getMountPointDates(src);
long date = 0;
if (dates != null && dates.containsKey(src)) {
date = dates.get(src);
}
ret = getMountPointStatus(src, children.size(), date);
}
}
return ret;
}
/**
* Get the file info from all the locations.
*
* @param locations Locations to check.
* @param method The file information method to run.
* @return The first file info if it's a file, the directory if it's
* everywhere.
* @throws IOException If all the locations throw an exception.
*/
private HdfsFileStatus getFileInfoAll(final List<RemoteLocation> locations,
final RemoteMethod method) throws IOException {
// Get the file info from everybody
Map<RemoteLocation, HdfsFileStatus> results =
rpcClient.invokeConcurrent(locations, method, HdfsFileStatus.class);
// We return the first file
HdfsFileStatus dirStatus = null;
for (RemoteLocation loc : locations) {
HdfsFileStatus fileStatus = results.get(loc);
if (fileStatus != null) {
if (!fileStatus.isDirectory()) {
return fileStatus;
} else if (dirStatus == null) {
dirStatus = fileStatus;
}
}
}
return dirStatus;
}
@Override // ClientProtocol
public boolean isFileClosed(String src) throws IOException {
checkOperation(OperationCategory.READ);
final List<RemoteLocation> locations = getLocationsForPath(src, false);
RemoteMethod method = new RemoteMethod("isFileClosed",
new Class<?>[] {String.class}, new RemoteParam());
return ((Boolean) rpcClient.invokeSequential(
locations, method, Boolean.class, Boolean.TRUE)).booleanValue();
}
@Override // ClientProtocol
public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
checkOperation(OperationCategory.READ);
final List<RemoteLocation> locations = getLocationsForPath(src, false);
RemoteMethod method = new RemoteMethod("getFileLinkInfo",
new Class<?>[] {String.class}, new RemoteParam());
return (HdfsFileStatus) rpcClient.invokeSequential(
locations, method, HdfsFileStatus.class, null);
}
@Override
public HdfsLocatedFileStatus getLocatedFileInfo(String src,
boolean needBlockToken) throws IOException {
checkOperation(OperationCategory.READ);
final List<RemoteLocation> locations = getLocationsForPath(src, false);
RemoteMethod method = new RemoteMethod("getLocatedFileInfo",
new Class<?>[] {String.class, boolean.class}, new RemoteParam(),
Boolean.valueOf(needBlockToken));
return (HdfsLocatedFileStatus) rpcClient.invokeSequential(
locations, method, HdfsFileStatus.class, null);
}
@Override // ClientProtocol
public long[] getStats() throws IOException {
checkOperation(OperationCategory.UNCHECKED);
RemoteMethod method = new RemoteMethod("getStats");
Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
Map<FederationNamespaceInfo, long[]> results =
rpcClient.invokeConcurrent(nss, method, true, false, long[].class);
long[] combinedData = new long[STATS_ARRAY_LENGTH];
for (long[] data : results.values()) {
for (int i = 0; i < combinedData.length && i < data.length; i++) {
if (data[i] >= 0) {
combinedData[i] += data[i];
}
}
}
return combinedData;
}
@Override // ClientProtocol
public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
throws IOException {
checkOperation(OperationCategory.UNCHECKED);
return getDatanodeReport(type, true, 0);
}
/**
* Get the datanode report with a timeout.
* @param type Type of the datanode.
* @param requireResponse If we require all the namespaces to report.
* @param timeOutMs Time out for the reply in milliseconds.
* @return List of datanodes.
* @throws IOException If it cannot get the report.
*/
public DatanodeInfo[] getDatanodeReport(
DatanodeReportType type, boolean requireResponse, long timeOutMs)
throws IOException {
checkOperation(OperationCategory.UNCHECKED);
Map<String, DatanodeInfo> datanodesMap = new LinkedHashMap<>();
RemoteMethod method = new RemoteMethod("getDatanodeReport",
new Class<?>[] {DatanodeReportType.class}, type);
Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
Map<FederationNamespaceInfo, DatanodeInfo[]> results =
rpcClient.invokeConcurrent(nss, method, requireResponse, false,
timeOutMs, DatanodeInfo[].class);
for (Entry<FederationNamespaceInfo, DatanodeInfo[]> entry :
results.entrySet()) {
FederationNamespaceInfo ns = entry.getKey();
DatanodeInfo[] result = entry.getValue();
for (DatanodeInfo node : result) {
String nodeId = node.getXferAddr();
if (!datanodesMap.containsKey(nodeId)) {
// Add the subcluster as a suffix to the network location
node.setNetworkLocation(
NodeBase.PATH_SEPARATOR_STR + ns.getNameserviceId() +
node.getNetworkLocation());
datanodesMap.put(nodeId, node);
} else {
LOG.debug("{} is in multiple subclusters", nodeId);
}
}
}
// Map -> Array
Collection<DatanodeInfo> datanodes = datanodesMap.values();
return toArray(datanodes, DatanodeInfo.class);
}
@Override // ClientProtocol
public DatanodeStorageReport[] getDatanodeStorageReport(
DatanodeReportType type) throws IOException {
checkOperation(OperationCategory.UNCHECKED);
Map<String, DatanodeStorageReport[]> dnSubcluster =
getDatanodeStorageReportMap(type);
// Avoid repeating machines in multiple subclusters
Map<String, DatanodeStorageReport> datanodesMap = new LinkedHashMap<>();
for (DatanodeStorageReport[] dns : dnSubcluster.values()) {
for (DatanodeStorageReport dn : dns) {
DatanodeInfo dnInfo = dn.getDatanodeInfo();
String nodeId = dnInfo.getXferAddr();
if (!datanodesMap.containsKey(nodeId)) {
datanodesMap.put(nodeId, dn);
}
// TODO merge somehow, right now it just takes the first one
}
}
Collection<DatanodeStorageReport> datanodes = datanodesMap.values();
DatanodeStorageReport[] combinedData =
new DatanodeStorageReport[datanodes.size()];
combinedData = datanodes.toArray(combinedData);
return combinedData;
}
/**
* Get the list of datanodes per subcluster.
*
* @param type Type of the datanodes to get.
* @return nsId -> datanode list.
* @throws IOException
*/
public Map<String, DatanodeStorageReport[]> getDatanodeStorageReportMap(
DatanodeReportType type) throws IOException {
Map<String, DatanodeStorageReport[]> ret = new LinkedHashMap<>();
RemoteMethod method = new RemoteMethod("getDatanodeStorageReport",
new Class<?>[] {DatanodeReportType.class}, type);
Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
Map<FederationNamespaceInfo, DatanodeStorageReport[]> results =
rpcClient.invokeConcurrent(
nss, method, true, false, DatanodeStorageReport[].class);
for (Entry<FederationNamespaceInfo, DatanodeStorageReport[]> entry :
results.entrySet()) {
FederationNamespaceInfo ns = entry.getKey();
String nsId = ns.getNameserviceId();
DatanodeStorageReport[] result = entry.getValue();
ret.put(nsId, result);
}
return ret;
}
@Override // ClientProtocol
public boolean setSafeMode(SafeModeAction action, boolean isChecked)
throws IOException {
checkOperation(OperationCategory.WRITE);
// Set safe mode in all the name spaces
RemoteMethod method = new RemoteMethod("setSafeMode",
new Class<?>[] {SafeModeAction.class, boolean.class},
action, isChecked);
Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
Map<FederationNamespaceInfo, Boolean> results =
rpcClient.invokeConcurrent(
nss, method, true, !isChecked, Boolean.class);
// We only report true if all the name space are in safe mode
int numSafemode = 0;
for (boolean safemode : results.values()) {
if (safemode) {
numSafemode++;
}
}
return numSafemode == results.size();
}
@Override // ClientProtocol
public boolean restoreFailedStorage(String arg) throws IOException {
checkOperation(OperationCategory.UNCHECKED);
RemoteMethod method = new RemoteMethod("restoreFailedStorage",
new Class<?>[] {String.class}, arg);
final Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
Map<FederationNamespaceInfo, Boolean> ret =
rpcClient.invokeConcurrent(nss, method, true, false, Boolean.class);
boolean success = true;
for (boolean s : ret.values()) {
if (!s) {
success = false;
break;
}
}
return success;
}
@Override // ClientProtocol
public boolean saveNamespace(long timeWindow, long txGap) throws IOException {
checkOperation(OperationCategory.UNCHECKED);
RemoteMethod method = new RemoteMethod("saveNamespace",
new Class<?>[] {Long.class, Long.class}, timeWindow, txGap);
final Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
Map<FederationNamespaceInfo, Boolean> ret =
rpcClient.invokeConcurrent(nss, method, true, false, boolean.class);
boolean success = true;
for (boolean s : ret.values()) {
if (!s) {
success = false;
break;
}
}
return success;
}
@Override // ClientProtocol
public long rollEdits() throws IOException {
checkOperation(OperationCategory.WRITE);
RemoteMethod method = new RemoteMethod("rollEdits", new Class<?>[] {});
final Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
Map<FederationNamespaceInfo, Long> ret =
rpcClient.invokeConcurrent(nss, method, true, false, long.class);
// Return the maximum txid
long txid = 0;
for (long t : ret.values()) {
if (t > txid) {
txid = t;
}
}
return txid;
}
@Override // ClientProtocol
public void refreshNodes() throws IOException {
checkOperation(OperationCategory.UNCHECKED);
RemoteMethod method = new RemoteMethod("refreshNodes", new Class<?>[] {});
final Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
rpcClient.invokeConcurrent(nss, method, true, true);
}
@Override // ClientProtocol
public void finalizeUpgrade() throws IOException {
checkOperation(OperationCategory.UNCHECKED);
RemoteMethod method = new RemoteMethod("finalizeUpgrade",
new Class<?>[] {});
final Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
rpcClient.invokeConcurrent(nss, method, true, false);
}
@Override // ClientProtocol
public boolean upgradeStatus() throws IOException {
String methodName = getMethodName();
throw new UnsupportedOperationException(
"Operation \"" + methodName + "\" is not supported");
}
@Override // ClientProtocol
public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action)
throws IOException {
checkOperation(OperationCategory.READ);
RemoteMethod method = new RemoteMethod("rollingUpgrade",
new Class<?>[] {RollingUpgradeAction.class}, action);
final Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
Map<FederationNamespaceInfo, RollingUpgradeInfo> ret =
rpcClient.invokeConcurrent(
nss, method, true, false, RollingUpgradeInfo.class);
// Return the first rolling upgrade info
RollingUpgradeInfo info = null;
for (RollingUpgradeInfo infoNs : ret.values()) {
if (info == null && infoNs != null) {
info = infoNs;
}
}
return info;
}
@Override // ClientProtocol
public void metaSave(String filename) throws IOException {
checkOperation(OperationCategory.UNCHECKED);
RemoteMethod method = new RemoteMethod("metaSave",
new Class<?>[] {String.class}, filename);
final Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
rpcClient.invokeConcurrent(nss, method, true, false);
}
@Override // ClientProtocol
public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
throws IOException {
checkOperation(OperationCategory.READ);
final List<RemoteLocation> locations = getLocationsForPath(path, false);
RemoteMethod method = new RemoteMethod("listCorruptFileBlocks",
new Class<?>[] {String.class, String.class},
new RemoteParam(), cookie);
return (CorruptFileBlocks) rpcClient.invokeSequential(
locations, method, CorruptFileBlocks.class, null);
}
@Override // ClientProtocol
public void setBalancerBandwidth(long bandwidth) throws IOException {
checkOperation(OperationCategory.UNCHECKED);
RemoteMethod method = new RemoteMethod("setBalancerBandwidth",
new Class<?>[] {Long.class}, bandwidth);
final Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
rpcClient.invokeConcurrent(nss, method, true, false);
}
@Override // ClientProtocol
public ContentSummary getContentSummary(String path) throws IOException {
checkOperation(OperationCategory.READ);
// Get the summaries from regular files
Collection<ContentSummary> summaries = new LinkedList<>();
FileNotFoundException notFoundException = null;
try {
final List<RemoteLocation> locations = getLocationsForPath(path, false);
RemoteMethod method = new RemoteMethod("getContentSummary",
new Class<?>[] {String.class}, new RemoteParam());
Map<RemoteLocation, ContentSummary> results =
rpcClient.invokeConcurrent(
locations, method, false, false, ContentSummary.class);
summaries.addAll(results.values());
} catch (FileNotFoundException e) {
notFoundException = e;
}
// Add mount points at this level in the tree
final List<String> children = subclusterResolver.getMountPoints(path);
if (children != null) {
for (String child : children) {
Path childPath = new Path(path, child);
try {
ContentSummary mountSummary = getContentSummary(childPath.toString());
if (mountSummary != null) {
summaries.add(mountSummary);
}
} catch (Exception e) {
LOG.error("Cannot get content summary for mount {}: {}",
childPath, e.getMessage());
}
}
}
// Throw original exception if no original nor mount points
if (summaries.isEmpty() && notFoundException != null) {
throw notFoundException;
}
return aggregateContentSummary(summaries);
}
/**
* Aggregate content summaries for each subcluster.
*
* @param summaries Collection of individual summaries.
* @return Aggregated content summary.
*/
private ContentSummary aggregateContentSummary(
Collection<ContentSummary> summaries) {
if (summaries.size() == 1) {
return summaries.iterator().next();
}
long length = 0;
long fileCount = 0;
long directoryCount = 0;
long quota = 0;
long spaceConsumed = 0;
long spaceQuota = 0;
for (ContentSummary summary : summaries) {
length += summary.getLength();
fileCount += summary.getFileCount();
directoryCount += summary.getDirectoryCount();
quota += summary.getQuota();
spaceConsumed += summary.getSpaceConsumed();
spaceQuota += summary.getSpaceQuota();
}
ContentSummary ret = new ContentSummary.Builder()
.length(length)
.fileCount(fileCount)
.directoryCount(directoryCount)
.quota(quota)
.spaceConsumed(spaceConsumed)
.spaceQuota(spaceQuota)
.build();
return ret;
}
@Override // ClientProtocol
public void fsync(String src, long fileId, String clientName,
long lastBlockLength) throws IOException {
checkOperation(OperationCategory.WRITE);
final List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod("fsync",
new Class<?>[] {String.class, long.class, String.class, long.class },
new RemoteParam(), fileId, clientName, lastBlockLength);
rpcClient.invokeSequential(locations, method);
}
@Override // ClientProtocol
public void setTimes(String src, long mtime, long atime) throws IOException {
checkOperation(OperationCategory.WRITE);
final List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod("setTimes",
new Class<?>[] {String.class, long.class, long.class},
new RemoteParam(), mtime, atime);
rpcClient.invokeSequential(locations, method);
}
@Override // ClientProtocol
public void createSymlink(String target, String link, FsPermission dirPerms,
boolean createParent) throws IOException {
checkOperation(OperationCategory.WRITE);
// TODO Verify that the link location is in the same NS as the targets
final List<RemoteLocation> targetLocations =
getLocationsForPath(target, true);
final List<RemoteLocation> linkLocations =
getLocationsForPath(link, true);
RemoteLocation linkLocation = linkLocations.get(0);
RemoteMethod method = new RemoteMethod("createSymlink",
new Class<?>[] {String.class, String.class, FsPermission.class,
boolean.class},
new RemoteParam(), linkLocation.getDest(), dirPerms, createParent);
rpcClient.invokeSequential(targetLocations, method);
}
@Override // ClientProtocol
public String getLinkTarget(String path) throws IOException {
checkOperation(OperationCategory.READ);
final List<RemoteLocation> locations = getLocationsForPath(path, true);
RemoteMethod method = new RemoteMethod("getLinkTarget",
new Class<?>[] {String.class}, new RemoteParam());
return (String) rpcClient.invokeSequential(
locations, method, String.class, null);
}
@Override // Client Protocol
public void allowSnapshot(String snapshotRoot) throws IOException {
checkOperation(OperationCategory.WRITE, false);
}
@Override // Client Protocol
public void disallowSnapshot(String snapshot) throws IOException {
checkOperation(OperationCategory.WRITE, false);
}
@Override // ClientProtocol
public void renameSnapshot(String snapshotRoot, String snapshotOldName,
String snapshotNewName) throws IOException {
checkOperation(OperationCategory.WRITE, false);
}
@Override // Client Protocol
public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws IOException {
checkOperation(OperationCategory.READ, false);
return null;
}
@Override // ClientProtocol
public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot,
String earlierSnapshotName, String laterSnapshotName) throws IOException {
checkOperation(OperationCategory.READ, false);
return null;
}
@Override // ClientProtocol
public SnapshotDiffReportListing getSnapshotDiffReportListing(
String snapshotRoot, String earlierSnapshotName, String laterSnapshotName,
byte[] startPath, int index) throws IOException {
checkOperation(OperationCategory.READ, false);
return null;
}
@Override // ClientProtocol
public long addCacheDirective(CacheDirectiveInfo path,
EnumSet<CacheFlag> flags) throws IOException {
checkOperation(OperationCategory.WRITE, false);
return 0;
}
@Override // ClientProtocol
public void modifyCacheDirective(CacheDirectiveInfo directive,
EnumSet<CacheFlag> flags) throws IOException {
checkOperation(OperationCategory.WRITE, false);
}
@Override // ClientProtocol
public void removeCacheDirective(long id) throws IOException {
checkOperation(OperationCategory.WRITE, false);
}
@Override // ClientProtocol
public BatchedEntries<CacheDirectiveEntry> listCacheDirectives(
long prevId, CacheDirectiveInfo filter) throws IOException {
checkOperation(OperationCategory.READ, false);
return null;
}
@Override // ClientProtocol
public void addCachePool(CachePoolInfo info) throws IOException {
checkOperation(OperationCategory.WRITE, false);
}
@Override // ClientProtocol
public void modifyCachePool(CachePoolInfo info) throws IOException {
checkOperation(OperationCategory.WRITE, false);
}
@Override // ClientProtocol
public void removeCachePool(String cachePoolName) throws IOException {
checkOperation(OperationCategory.WRITE, false);
}
@Override // ClientProtocol
public BatchedEntries<CachePoolEntry> listCachePools(String prevKey)
throws IOException {
checkOperation(OperationCategory.READ, false);
return null;
}
@Override // ClientProtocol
public void modifyAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
checkOperation(OperationCategory.WRITE);
// TODO handle virtual directories
final List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod("modifyAclEntries",
new Class<?>[] {String.class, List.class},
new RemoteParam(), aclSpec);
rpcClient.invokeSequential(locations, method, null, null);
}
@Override // ClienProtocol
public void removeAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
checkOperation(OperationCategory.WRITE);
// TODO handle virtual directories
final List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod("removeAclEntries",
new Class<?>[] {String.class, List.class},
new RemoteParam(), aclSpec);
rpcClient.invokeSequential(locations, method, null, null);
}
@Override // ClientProtocol
public void removeDefaultAcl(String src) throws IOException {
checkOperation(OperationCategory.WRITE);
// TODO handle virtual directories
final List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod("removeDefaultAcl",
new Class<?>[] {String.class}, new RemoteParam());
rpcClient.invokeSequential(locations, method);
}
@Override // ClientProtocol
public void removeAcl(String src) throws IOException {
checkOperation(OperationCategory.WRITE);
// TODO handle virtual directories
final List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod("removeAcl",
new Class<?>[] {String.class}, new RemoteParam());
rpcClient.invokeSequential(locations, method);
}
@Override // ClientProtocol
public void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
checkOperation(OperationCategory.WRITE);
// TODO handle virtual directories
final List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod(
"setAcl", new Class<?>[] {String.class, List.class},
new RemoteParam(), aclSpec);
rpcClient.invokeSequential(locations, method);
}
@Override // ClientProtocol
public AclStatus getAclStatus(String src) throws IOException {
checkOperation(OperationCategory.READ);
// TODO handle virtual directories
final List<RemoteLocation> locations = getLocationsForPath(src, false);
RemoteMethod method = new RemoteMethod("getAclStatus",
new Class<?>[] {String.class}, new RemoteParam());
return (AclStatus) rpcClient.invokeSequential(
locations, method, AclStatus.class, null);
}
@Override // ClientProtocol
public void createEncryptionZone(String src, String keyName)
throws IOException {
checkOperation(OperationCategory.WRITE);
// TODO handle virtual directories
final List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod("createEncryptionZone",
new Class<?>[] {String.class, String.class},
new RemoteParam(), keyName);
rpcClient.invokeSequential(locations, method);
}
@Override // ClientProtocol
public EncryptionZone getEZForPath(String src) throws IOException {
checkOperation(OperationCategory.READ);
// TODO handle virtual directories
final List<RemoteLocation> locations = getLocationsForPath(src, false);
RemoteMethod method = new RemoteMethod("getEZForPath",
new Class<?>[] {String.class}, new RemoteParam());
return (EncryptionZone) rpcClient.invokeSequential(
locations, method, EncryptionZone.class, null);
}
@Override // ClientProtocol
public BatchedEntries<EncryptionZone> listEncryptionZones(long prevId)
throws IOException {
checkOperation(OperationCategory.READ, false);
return null;
}
@Override // ClientProtocol
public void reencryptEncryptionZone(String zone, ReencryptAction action)
throws IOException {
checkOperation(OperationCategory.WRITE, false);
}
@Override // ClientProtocol
public BatchedEntries<ZoneReencryptionStatus> listReencryptionStatus(
long prevId) throws IOException {
checkOperation(OperationCategory.READ, false);
return null;
}
@Override // ClientProtocol
public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
throws IOException {
checkOperation(OperationCategory.WRITE);
// TODO handle virtual directories
final List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod("setXAttr",
new Class<?>[] {String.class, XAttr.class, EnumSet.class},
new RemoteParam(), xAttr, flag);
rpcClient.invokeSequential(locations, method);
}
@SuppressWarnings("unchecked")
@Override // ClientProtocol
public List<XAttr> getXAttrs(String src, List<XAttr> xAttrs)
throws IOException {
checkOperation(OperationCategory.READ);
// TODO handle virtual directories
final List<RemoteLocation> locations = getLocationsForPath(src, false);
RemoteMethod method = new RemoteMethod("getXAttrs",
new Class<?>[] {String.class, List.class}, new RemoteParam(), xAttrs);
return (List<XAttr>) rpcClient.invokeSequential(
locations, method, List.class, null);
}
@SuppressWarnings("unchecked")
@Override // ClientProtocol
public List<XAttr> listXAttrs(String src) throws IOException {
checkOperation(OperationCategory.READ);
// TODO handle virtual directories
final List<RemoteLocation> locations = getLocationsForPath(src, false);
RemoteMethod method = new RemoteMethod("listXAttrs",
new Class<?>[] {String.class}, new RemoteParam());
return (List<XAttr>) rpcClient.invokeSequential(
locations, method, List.class, null);
}
@Override // ClientProtocol
public void removeXAttr(String src, XAttr xAttr) throws IOException {
checkOperation(OperationCategory.WRITE);
// TODO handle virtual directories
final List<RemoteLocation> locations = getLocationsForPath(src, true);
RemoteMethod method = new RemoteMethod("removeXAttr",
new Class<?>[] {String.class, XAttr.class}, new RemoteParam(), xAttr);
rpcClient.invokeSequential(locations, method);
}
@Override // ClientProtocol
public void checkAccess(String path, FsAction mode) throws IOException {
checkOperation(OperationCategory.READ);
// TODO handle virtual directories
final List<RemoteLocation> locations = getLocationsForPath(path, true);
RemoteMethod method = new RemoteMethod("checkAccess",
new Class<?>[] {String.class, FsAction.class},
new RemoteParam(), mode);
rpcClient.invokeSequential(locations, method);
}
@Override // ClientProtocol
public long getCurrentEditLogTxid() throws IOException {
checkOperation(OperationCategory.READ);
RemoteMethod method = new RemoteMethod(
"getCurrentEditLogTxid", new Class<?>[] {});
final Set<FederationNamespaceInfo> nss = namenodeResolver.getNamespaces();
Map<FederationNamespaceInfo, Long> ret =
rpcClient.invokeConcurrent(nss, method, true, false, long.class);
// Return the maximum txid
long txid = 0;
for (long t : ret.values()) {
if (t > txid) {
txid = t;
}
}
return txid;
}
@Override // ClientProtocol
public EventBatchList getEditsFromTxid(long txid) throws IOException {
checkOperation(OperationCategory.READ, false);
return null;
}
@Override
public DataEncryptionKey getDataEncryptionKey() throws IOException {
checkOperation(OperationCategory.READ, false);
return null;
}
@Override
public String createSnapshot(String snapshotRoot, String snapshotName)
throws IOException {
checkOperation(OperationCategory.WRITE);
return null;
}
@Override
public void deleteSnapshot(String snapshotRoot, String snapshotName)
throws IOException {
checkOperation(OperationCategory.WRITE, false);
}
@Override // ClientProtocol
public void setQuota(String path, long namespaceQuota, long storagespaceQuota,
StorageType type) throws IOException {
this.quotaCall.setQuota(path, namespaceQuota, storagespaceQuota, type);
}
@Override // ClientProtocol
public QuotaUsage getQuotaUsage(String path) throws IOException {
checkOperation(OperationCategory.READ);
return this.quotaCall.getQuotaUsage(path);
}
@Override
public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
checkOperation(OperationCategory.WRITE);
// Block pool id -> blocks
Map<String, List<LocatedBlock>> blockLocations = new HashMap<>();
for (LocatedBlock block : blocks) {
String bpId = block.getBlock().getBlockPoolId();
List<LocatedBlock> bpBlocks = blockLocations.get(bpId);
if (bpBlocks == null) {
bpBlocks = new LinkedList<>();
blockLocations.put(bpId, bpBlocks);
}
bpBlocks.add(block);
}
// Invoke each block pool
for (Entry<String, List<LocatedBlock>> entry : blockLocations.entrySet()) {
String bpId = entry.getKey();
List<LocatedBlock> bpBlocks = entry.getValue();
LocatedBlock[] bpBlocksArray =
bpBlocks.toArray(new LocatedBlock[bpBlocks.size()]);
RemoteMethod method = new RemoteMethod("reportBadBlocks",
new Class<?>[] {LocatedBlock[].class},
new Object[] {bpBlocksArray});
rpcClient.invokeSingleBlockPool(bpId, method);
}
}
@Override
public void unsetStoragePolicy(String src) throws IOException {
checkOperation(OperationCategory.WRITE, false);
}
@Override
public BlockStoragePolicy getStoragePolicy(String path) throws IOException {
checkOperation(OperationCategory.READ, false);
return null;
}
@Override // ClientProtocol
public ErasureCodingPolicyInfo[] getErasureCodingPolicies()
throws IOException {
return erasureCoding.getErasureCodingPolicies();
}
@Override // ClientProtocol
public Map<String, String> getErasureCodingCodecs() throws IOException {
return erasureCoding.getErasureCodingCodecs();
}
@Override // ClientProtocol
public AddErasureCodingPolicyResponse[] addErasureCodingPolicies(
ErasureCodingPolicy[] policies) throws IOException {
return erasureCoding.addErasureCodingPolicies(policies);
}
@Override // ClientProtocol
public void removeErasureCodingPolicy(String ecPolicyName)
throws IOException {
erasureCoding.removeErasureCodingPolicy(ecPolicyName);
}
@Override // ClientProtocol
public void disableErasureCodingPolicy(String ecPolicyName)
throws IOException {
erasureCoding.disableErasureCodingPolicy(ecPolicyName);
}
@Override // ClientProtocol
public void enableErasureCodingPolicy(String ecPolicyName)
throws IOException {
erasureCoding.enableErasureCodingPolicy(ecPolicyName);
}
@Override // ClientProtocol
public ErasureCodingPolicy getErasureCodingPolicy(String src)
throws IOException {
return erasureCoding.getErasureCodingPolicy(src);
}
@Override // ClientProtocol
public void setErasureCodingPolicy(String src, String ecPolicyName)
throws IOException {
erasureCoding.setErasureCodingPolicy(src, ecPolicyName);
}
@Override // ClientProtocol
public void unsetErasureCodingPolicy(String src) throws IOException {
erasureCoding.unsetErasureCodingPolicy(src);
}
@Override
public ECBlockGroupStats getECBlockGroupStats() throws IOException {
return erasureCoding.getECBlockGroupStats();
}
@Override
public ReplicatedBlockStats getReplicatedBlockStats() throws IOException {
checkOperation(OperationCategory.READ, false);
return null;
}
@Deprecated
@Override
public BatchedEntries<OpenFileEntry> listOpenFiles(long prevId)
throws IOException {
return listOpenFiles(prevId, EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
OpenFilesIterator.FILTER_PATH_DEFAULT);
}
@Override
public BatchedEntries<OpenFileEntry> listOpenFiles(long prevId,
EnumSet<OpenFilesType> openFilesTypes, String path) throws IOException {
checkOperation(OperationCategory.READ, false);
return null;
}
@Override // NamenodeProtocol
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size,
long minBlockSize) throws IOException {
return nnProto.getBlocks(datanode, size, minBlockSize);
}
@Override // NamenodeProtocol
public ExportedBlockKeys getBlockKeys() throws IOException {
return nnProto.getBlockKeys();
}
@Override // NamenodeProtocol
public long getTransactionID() throws IOException {
return nnProto.getTransactionID();
}
@Override // NamenodeProtocol
public long getMostRecentCheckpointTxId() throws IOException {
return nnProto.getMostRecentCheckpointTxId();
}
@Override // NamenodeProtocol
public CheckpointSignature rollEditLog() throws IOException {
return nnProto.rollEditLog();
}
@Override // NamenodeProtocol
public NamespaceInfo versionRequest() throws IOException {
return nnProto.versionRequest();
}
@Override // NamenodeProtocol
public void errorReport(NamenodeRegistration registration, int errorCode,
String msg) throws IOException {
nnProto.errorReport(registration, errorCode, msg);
}
@Override // NamenodeProtocol
public NamenodeRegistration registerSubordinateNamenode(
NamenodeRegistration registration) throws IOException {
return nnProto.registerSubordinateNamenode(registration);
}
@Override // NamenodeProtocol
public NamenodeCommand startCheckpoint(NamenodeRegistration registration)
throws IOException {
return nnProto.startCheckpoint(registration);
}
@Override // NamenodeProtocol
public void endCheckpoint(NamenodeRegistration registration,
CheckpointSignature sig) throws IOException {
nnProto.endCheckpoint(registration, sig);
}
@Override // NamenodeProtocol
public RemoteEditLogManifest getEditLogManifest(long sinceTxId)
throws IOException {
return nnProto.getEditLogManifest(sinceTxId);
}
@Override // NamenodeProtocol
public boolean isUpgradeFinalized() throws IOException {
return nnProto.isUpgradeFinalized();
}
@Override // NamenodeProtocol
public boolean isRollingUpgrade() throws IOException {
return nnProto.isRollingUpgrade();
}
/**
* Locate the location with the matching block pool id.
*
* @param path Path to check.
* @param failIfLocked Fail the request if locked (top mount point).
* @param blockPoolId The block pool ID of the namespace to search for.
* @return Prioritized list of locations in the federated cluster.
* @throws IOException if the location for this path cannot be determined.
*/
private RemoteLocation getLocationForPath(
String path, boolean failIfLocked, String blockPoolId)
throws IOException {
final List<RemoteLocation> locations =
getLocationsForPath(path, failIfLocked);
String nameserviceId = null;
Set<FederationNamespaceInfo> namespaces =
this.namenodeResolver.getNamespaces();
for (FederationNamespaceInfo namespace : namespaces) {
if (namespace.getBlockPoolId().equals(blockPoolId)) {
nameserviceId = namespace.getNameserviceId();
break;
}
}
if (nameserviceId != null) {
for (RemoteLocation location : locations) {
if (location.getNameserviceId().equals(nameserviceId)) {
return location;
}
}
}
throw new IOException(
"Cannot locate a nameservice for block pool " + blockPoolId);
}
/**
* Get the possible locations of a path in the federated cluster.
* During the get operation, it will do the quota verification.
*
* @param path Path to check.
* @param failIfLocked Fail the request if locked (top mount point).
* @return Prioritized list of locations in the federated cluster.
* @throws IOException If the location for this path cannot be determined.
*/
protected List<RemoteLocation> getLocationsForPath(String path,
boolean failIfLocked) throws IOException {
return getLocationsForPath(path, failIfLocked, true);
}
/**
* Get the possible locations of a path in the federated cluster.
*
* @param path Path to check.
* @param failIfLocked Fail the request if locked (top mount point).
* @param needQuotaVerify If need to do the quota verification.
* @return Prioritized list of locations in the federated cluster.
* @throws IOException If the location for this path cannot be determined.
*/
protected List<RemoteLocation> getLocationsForPath(String path,
boolean failIfLocked, boolean needQuotaVerify) throws IOException {
try {
// Check the location for this path
final PathLocation location =
this.subclusterResolver.getDestinationForPath(path);
if (location == null) {
throw new IOException("Cannot find locations for " + path + " in " +
this.subclusterResolver);
}
// We may block some write operations
if (opCategory.get() == OperationCategory.WRITE) {
// Check if the path is in a read only mount point
if (isPathReadOnly(path)) {
if (this.rpcMonitor != null) {
this.rpcMonitor.routerFailureReadOnly();
}
throw new IOException(path + " is in a read only mount point");
}
// Check quota
if (this.router.isQuotaEnabled() && needQuotaVerify) {
RouterQuotaUsage quotaUsage = this.router.getQuotaManager()
.getQuotaUsage(path);
if (quotaUsage != null) {
quotaUsage.verifyNamespaceQuota();
quotaUsage.verifyStoragespaceQuota();
}
}
}
// Filter disabled subclusters
Set<String> disabled = namenodeResolver.getDisabledNamespaces();
List<RemoteLocation> locs = new ArrayList<>();
for (RemoteLocation loc : location.getDestinations()) {
if (!disabled.contains(loc.getNameserviceId())) {
locs.add(loc);
}
}
return locs;
} catch (IOException ioe) {
if (this.rpcMonitor != null) {
this.rpcMonitor.routerFailureStateStore();
}
throw ioe;
}
}
/**
* Check if a path should be in all subclusters.
*
* @param path Path to check.
* @return If a path should be in all subclusters.
*/
private boolean isPathAll(final String path) {
if (subclusterResolver instanceof MountTableResolver) {
try {
MountTableResolver mountTable = (MountTableResolver)subclusterResolver;
MountTable entry = mountTable.getMountPoint(path);
if (entry != null) {
return entry.isAll();
}
} catch (IOException e) {
LOG.error("Cannot get mount point", e);
}
}
return false;
}
/**
* Check if a path is in a read only mount point.
*
* @param path Path to check.
* @return If the path is in a read only mount point.
*/
private boolean isPathReadOnly(final String path) {
if (subclusterResolver instanceof MountTableResolver) {
try {
MountTableResolver mountTable = (MountTableResolver)subclusterResolver;
MountTable entry = mountTable.getMountPoint(path);
if (entry != null && entry.isReadOnly()) {
return true;
}
} catch (IOException e) {
LOG.error("Cannot get mount point", e);
}
}
return false;
}
/**
* Get the modification dates for mount points.
*
* @param path Name of the path to start checking dates from.
* @return Map with the modification dates for all sub-entries.
*/
private Map<String, Long> getMountPointDates(String path) {
Map<String, Long> ret = new TreeMap<>();
if (subclusterResolver instanceof MountTableResolver) {
try {
final List<String> children = subclusterResolver.getMountPoints(path);
for (String child : children) {
Long modTime = getModifiedTime(ret, path, child);
ret.put(child, modTime);
}
} catch (IOException e) {
LOG.error("Cannot get mount point", e);
}
}
return ret;
}
/**
* Get modified time for child. If the child is present in mount table it
* will return the modified time. If the child is not present but subdirs of
* this child are present then it will return latest modified subdir's time
* as modified time of the requested child.
* @param ret contains children and modified times.
* @param mountTable.
* @param path Name of the path to start checking dates from.
* @param child child of the requested path.
* @return modified time.
*/
private long getModifiedTime(Map<String, Long> ret, String path,
String child) {
MountTableResolver mountTable = (MountTableResolver)subclusterResolver;
String srcPath;
if (path.equals(Path.SEPARATOR)) {
srcPath = Path.SEPARATOR + child;
} else {
srcPath = path + Path.SEPARATOR + child;
}
Long modTime = 0L;
try {
// Get mount table entry for the srcPath
MountTable entry = mountTable.getMountPoint(srcPath);
// if srcPath is not in mount table but its subdirs are in mount
// table we will display latest modified subdir date/time.
if (entry == null) {
List<MountTable> entries = mountTable.getMounts(srcPath);
for (MountTable eachEntry : entries) {
// Get the latest date
if (ret.get(child) == null ||
ret.get(child) < eachEntry.getDateModified()) {
modTime = eachEntry.getDateModified();
}
}
} else {
modTime = entry.getDateModified();
}
} catch (IOException e) {
LOG.error("Cannot get mount point", e);
}
return modTime;
}
/**
* Create a new file status for a mount point.
*
* @param name Name of the mount point.
* @param childrenNum Number of children.
* @param date Map with the dates.
* @return New HDFS file status representing a mount point.
*/
private HdfsFileStatus getMountPointStatus(
String name, int childrenNum, long date) {
long modTime = date;
long accessTime = date;
FsPermission permission = FsPermission.getDirDefault();
String owner = this.superUser;
String group = this.superGroup;
try {
// TODO support users, it should be the user for the pointed folder
UserGroupInformation ugi = getRemoteUser();
owner = ugi.getUserName();
group = ugi.getPrimaryGroupName();
} catch (IOException e) {
LOG.error("Cannot get the remote user: {}", e.getMessage());
}
long inodeId = 0;
return new HdfsFileStatus.Builder()
.isdir(true)
.mtime(modTime)
.atime(accessTime)
.perm(permission)
.owner(owner)
.group(group)
.symlink(new byte[0])
.path(DFSUtil.string2Bytes(name))
.fileId(inodeId)
.children(childrenNum)
.build();
}
/**
* Get the name of the method that is calling this function.
*
* @return Name of the method calling this function.
*/
private static String getMethodName() {
final StackTraceElement[] stack = Thread.currentThread().getStackTrace();
String methodName = stack[3].getMethodName();
return methodName;
}
/**
* Get the user that is invoking this operation.
*
* @return Remote user group information.
* @throws IOException If we cannot get the user information.
*/
static UserGroupInformation getRemoteUser() throws IOException {
UserGroupInformation ugi = Server.getRemoteUser();
return (ugi != null) ? ugi : UserGroupInformation.getCurrentUser();
}
/**
* Merge the outputs from multiple namespaces.
* @param map Namespace -> Output array.
* @param clazz Class of the values.
* @return Array with the outputs.
*/
protected static <T> T[] merge(
Map<FederationNamespaceInfo, T[]> map, Class<T> clazz) {
// Put all results into a set to avoid repeats
Set<T> ret = new LinkedHashSet<>();
for (T[] values : map.values()) {
for (T val : values) {
ret.add(val);
}
}
return toArray(ret, clazz);
}
/**
* Convert a set of values into an array.
* @param set Input set.
* @param clazz Class of the values.
* @return Array with the values in set.
*/
private static <T> T[] toArray(Collection<T> set, Class<T> clazz) {
@SuppressWarnings("unchecked")
T[] combinedData = (T[]) Array.newInstance(clazz, set.size());
combinedData = set.toArray(combinedData);
return combinedData;
}
/**
* Get quota module implement.
*/
public Quota getQuotaModule() {
return this.quotaCall;
}
/**
* Get RPC metrics info.
* @return The instance of FederationRPCMetrics.
*/
public FederationRPCMetrics getRPCMetrics() {
return this.rpcMonitor.getRPCMetrics();
}
}
| szegedim/hadoop | hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java | Java | apache-2.0 | 92,997 |
// Copyright 2017 Twitter. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.twitter.heron.downloader;
import java.net.URI;
import java.nio.file.Path;
public interface Downloader {
void download(URI uri, Path destination) throws Exception;
}
| lucperkins/heron | heron/downloaders/src/java/com/twitter/heron/downloader/Downloader.java | Java | apache-2.0 | 793 |
package org.strategoxt.lang.gradual;
import java.io.Serializable;
public interface Type extends Serializable {
boolean equals(Object o);
int hashCode();
public String toString();
}
| metaborg/strategoxt | strategoxt/stratego-libraries/java-backend/java/runtime/org/strategoxt/lang/gradual/Type.java | Java | apache-2.0 | 195 |
/*
* Copyright 2015 Marek Kaput
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.rustidea.psi.impl;
import com.intellij.lang.ASTNode;
import com.intellij.psi.tree.TokenSet;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.rustidea.psi.IRsType;
import org.rustidea.psi.RsElementVisitor;
import org.rustidea.psi.RsStruct;
import org.rustidea.psi.RsTypeParameterList;
import org.rustidea.psi.types.RsPsiTypes;
import org.rustidea.stubs.RsStructStub;
public class RsStructImpl extends IRsNamedItemPsiElement<RsStructStub> implements RsStruct {
private static final TokenSet STRUCT_OR_TUPLE_TYPE = TokenSet.create(RsPsiTypes.STRUCT_TYPE, RsPsiTypes.TUPLE_TYPE);
public RsStructImpl(@NotNull RsStructStub stub) {
super(stub, RsPsiTypes.STRUCT);
}
public RsStructImpl(@NotNull ASTNode node) {
super(node);
}
@Nullable
@Override
public RsTypeParameterList getTypeParameterList() {
return findChildByType(RsPsiTypes.TYPE_PARAMETER_LIST);
}
@Nullable
@Override
public IRsType getDefinition() {
return findChildByType(STRUCT_OR_TUPLE_TYPE);
}
@Override
public void accept(@NotNull RsElementVisitor visitor) {
visitor.visitStruct(this);
}
}
| jajakobyly/rustidea | src/org/rustidea/psi/impl/RsStructImpl.java | Java | apache-2.0 | 1,812 |
/*-
* Copyright 2012 Benedikt Meurer
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.benediktmeurer.eui4j;
import static org.testng.Assert.assertEquals;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
/**
* Unit tests for the {@link EUI48XmlAdapter} class.
*
* @author Benedikt Meurer
* @see EUI48XmlAdapter
*/
public class EUI48XmlAdapterTest {
@DataProvider(name = "names")
public String[][] dataProviderNames() {
return new String[][] {
{ null },
{ "00:00:00:00:00:00" },
{ "00:11:22:33:44:55" },
{ "00:00:00:11:11:11" },
{ "ff:ff:ff:ff:ff:ff" }
};
}
@Test(dataProvider = "names")
public void testMarshall(String name) throws Exception {
assertEquals(new EUI48XmlAdapter().marshal(name == null ? null : EUI48.fromString(name)), name);
}
@Test(dataProvider = "names")
public void testUnmarshall(String name) throws Exception {
assertEquals(new EUI48XmlAdapter().unmarshal(name), name == null ? null : EUI48.fromString(name));
}
}
| bmeurer/eui4j | src/test/java/de/benediktmeurer/eui4j/EUI48XmlAdapterTest.java | Java | apache-2.0 | 1,651 |
Ext.define('MCLM.view.cenarios.GerenciarGruposCenarioWindow', {
requires: [
'MCLM.store.Grupo',
'Ext.grid.plugin.DragDrop',
'MCLM.view.cenarios.GerenciarGruposCenarioController'
],
extend: 'Ext.window.Window',
id: 'gerenciarGruposCenarioWindow',
itemId: 'gerenciarGruposCenarioWindow',
controller: 'gerenciar-grupos-cenario',
modal: true,
width: '60%',
height: 500,
layout: {
type: 'vbox',
align: 'stretch',
pack: 'start'
},
tbar: [
{iconCls: 'save-icon', tooltip: '<b>Salvar Alterações</b>', handler: 'onSaveBtnClick'}
],
items: [
{
xtype: 'container', layout: 'hbox',
height: '50%', width: '100%',
items: [
{
itemId: 'associatedGroupsGrid', xtype: 'grid',
title: 'Grupos Associados', titleAlign: 'center',
scrollable: true, width: '50%',
height: '100%', store: {
proxy: 'memory',
sorters: ['name'],
autoSort: true
},
tools: [{
iconCls: 'group-add-icon',
tooltip: '<b>Compartilhar com Grupos</b>',
handler: () => Ext.getCmp('gerenciarGruposCenarioWindow').down('#groupsGrid').expand()
}],
columns: [
{
dataIndex: 'name', text: 'Nome',
width: '90%', items: [
{
xtype: 'textfield',
emptyText: 'Filtrar',
enableKeyEvents: true,
listeners: {
keyup: 'onAssociatedGroupFilterKeyup',
buffer: 500
}
}
]
},
{
xtype: 'actioncolumn', width: '5%',
items: [
{iconCls: "details-icon", tooltip: 'Detalhes', handler: 'onGroupDetailsBtnClick'}
]
},
{
xtype: 'actioncolumn', width: '5%',
items: [
{tooltip: 'Remover Associação', iconCls: 'cancel-icon', handler: 'onAssociatedGroupRemoveBtnClick'}
]}
]
},
{
itemId: 'groupsGrid', xtype: 'grid',
title: 'Grupos', titleAlign: 'center',
scrollable: true, width: '50%',
collapsed: true, collapsible: true,
animCollapse: true, height: '100%',
store: {
type: 'grupo',
pageSize: 0
},
columns: [
{
dataIndex: 'name', text: 'Nome',
width: '90%', items: [
{
xtype: 'textfield',
emptyText: 'Filtrar',
enableKeyEvents: true,
listeners: {
keyup: 'onGroupFilterKeyup',
buffer: 500
}
}
]
},
{
xtype: 'actioncolumn', width: '5%',
items: [
{iconCls: 'details-icon', tooltip: 'Detalhes', handler: 'onGroupDetailsBtnClick'}
]
},
{
xtype: 'actioncolumn', width: '5%',
items: [
{tooltip: 'Associar', iconCls: 'plus-icon', handler: 'onGroupAssociationBtnClick'}
]}
],
listeners: {
rowdblclick: 'onGroupRowDblClick'
}
}
]
},
{
xtype: 'container', layout: 'hbox',
height: '50%', width: '100%',
items: [
{
itemId: 'associatedUsersGrid', xtype: 'grid',
titleAlign: 'center', scrollable: true,
title: 'Usuários Associados', width: '50%',
height: '100%', store: {
proxy: 'memory',
sorters: ['nome'],
autoSort: true
},
tools: [{
iconCls: 'user-add-icon',
tooltip: '<b>Compartilhar com Usuários</b>',
handler: () => Ext.getCmp('gerenciarGruposCenarioWindow').down('#usersGrid').expand()
}],
columns: [
{
dataIndex: 'cpf', text: 'CPF',
renderer: ColumnRenderer.cpf, width: '30%',
items: [
{
xtype: 'textfield',
emptyText: 'Filtrar',
enableKeyEvents: true,
listeners: {
keyup: 'onAssociatedUserFilterKeyup',
buffer: 500
}
}
]
},
{
dataIndex: 'nome', text: 'Nome',
width: '40%', items: [
{
xtype: 'textfield',
emptyText: 'Filtrar',
enableKeyEvents: true,
listeners: {
keyup: 'onAssociatedUserFilterKeyup',
buffer: 500
}
}
]
},
{dataIndex: 'siglaOm', text: 'OM', width: '10%'},
{dataIndex: 'siglaForca', text: 'Força', width: '10%'},
{xtype: 'actioncolumn', width: '10%', items: [
{tooltip: 'Remover', iconCls: 'cancel-icon', handler: 'onAssociatedUserRemoveBtnClick'}
]}
]
},
{
itemId: 'usersGrid', xtype: 'grid',
titleAlign: 'center', scrollable: true,
title: 'Usuários', width: '50%',
collapsed: true, collapsible: true,
animCollapse: true, height: '100%',
store: {
type: 'apolo-user',
autoLoad: true,
pageSize: 10
},
columns: [
{
dataIndex: 'cpf', text: 'CPF',
renderer: ColumnRenderer.cpf, width: '30%',
items: [
{
xtype: 'textfield',
emptyText: 'Filtrar',
enableKeyEvents: true,
listeners: {
keyup: 'onUserFilterKeyup',
buffer: 500
}
}
]
},
{
dataIndex: 'nome', text: 'Nome',
width: '40%', items: [
{
xtype: 'textfield',
emptyText: 'Filtrar',
enableKeyEvents: true,
listeners: {
keyup: 'onUserFilterKeyup',
buffer: 500
}
}
]
},
{dataIndex: 'siglaOm', text: 'OM', width: '10%'},
{dataIndex: 'siglaForca', text: 'Força', width: '10%'},
{xtype: 'actioncolumn', width: '10%', items: [
{tooltip: 'Associar', iconCls: 'plus-icon', handler: 'onUserAssociationBtnClick'}
]}
],
listeners: {
rowdblclick: 'onUserRowDblClick'
},
dockedItems: [{
xtype: 'pagingtoolbar',
store: {
type: 'apolo-user',
autoLoad: true,
pageSize: 10
}, // same store GridPanel is using
dock: 'bottom',
displayInfo: true
}]
}
]
}
],
listeners: {show: 'onShow'}
});
| icemagno/mclm | src/main/webapp/app/view/cenarios/GerenciarGruposCenarioWindow.js | JavaScript | apache-2.0 | 10,122 |
package org.agilewiki.jactor2.core.impl;
import java.util.Timer;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import com.blockwithme.util.base.SystemUtils;
/**
* A scheduler for Plant, created by PlantConfiguration.
*/
public class JActorStTestPlantScheduler implements
org.agilewiki.jactor2.core.plant.PlantScheduler {
@SuppressWarnings("rawtypes")
private class MyTimerTask extends MyAbstractTimerTask {
private volatile Runnable runnable;
private volatile boolean cancelled;
private volatile boolean done;
private final boolean once;
public MyTimerTask(final Runnable runnable, final boolean once) {
this.runnable = runnable;
this.once = once;
}
/* (non-Javadoc)
* @see java.util.TimerTask#run()
*/
@Override
public void run() {
if (die) {
cancel();
runnable = null;
timer.purge();
} else {
if (once) {
done = true;
}
runnable.run();
}
}
/* (non-Javadoc)
* @see java.util.concurrent.Delayed#getDelay(java.util.concurrent.TimeUnit)
*/
@Override
public long getDelay(final TimeUnit unit) {
return unit.convert(
scheduledExecutionTime() - System.currentTimeMillis(),
TimeUnit.MILLISECONDS);
}
@Override
public boolean cancel() {
cancelled = true;
return super.cancel();
}
/* (non-Javadoc)
* @see java.util.concurrent.Future#isCancelled()
*/
@Override
public boolean isCancelled() {
return cancelled;
}
/* (non-Javadoc)
* @see java.util.concurrent.Future#cancel(boolean)
*/
@Override
public boolean cancel(final boolean mayInterruptIfRunning) {
return cancel();
}
/* (non-Javadoc)
* @see java.util.concurrent.Future#isDone()
*/
@Override
public boolean isDone() {
return done;
}
/* (non-Javadoc)
* @see java.util.concurrent.Future#get()
*/
@Override
public Object get() throws InterruptedException, ExecutionException {
if (done) {
return null;
}
throw new InterruptedException();
}
/* (non-Javadoc)
* @see java.util.concurrent.Future#get(long, java.util.concurrent.TimeUnit)
*/
@Override
public Object get(final long timeout, final TimeUnit unit)
throws InterruptedException, ExecutionException,
TimeoutException {
if (done) {
return null;
}
throw new InterruptedException();
}
}
private volatile long currentTimeMillis;
private volatile boolean die;
private final Timer timer;
/**
* Creates the default plantScheduler.
*/
public JActorStTestPlantScheduler() {
timer = SystemUtils.getTimer();
currentTimeMillis = System.currentTimeMillis();
timer.scheduleAtFixedRate(new MyTimerTask(new Runnable() {
@Override
public void run() {
currentTimeMillis = System.currentTimeMillis();
}
}, false), getHeartbeatMillis(), getHeartbeatMillis());
}
/**
* Controls how often currentTimeMillis is updated: every 500 milliseconds.
*
* @return The number of milliseconds between updates to currentTimeMillis.
*/
protected long getHeartbeatMillis() {
return 500;
}
/**
* Determines the size of the scheduledThreadPool: 2.
*
* @return Returns the number of threads in the scheduledThreadPool.
*/
protected int getSchedulerPoolSize() {
return 1;
}
@Override
public double currentTimeMillis() {
return currentTimeMillis;
}
@Override
public ScheduledFuture<?> schedule(final Runnable runnable,
final int _millisecondDelay) {
final MyTimerTask result = new MyTimerTask(runnable, true);
timer.schedule(result, _millisecondDelay);
return result;
}
@Override
public ScheduledFuture<?> scheduleAtFixedRate(final Runnable runnable,
final int _millisecondDelay) {
final MyTimerTask result = new MyTimerTask(runnable, false);
timer.scheduleAtFixedRate(result, _millisecondDelay, _millisecondDelay);
return result;
}
@Override
public void close() {
// No way to get the tasks from the Timer. :(
die = true;
}
/* (non-Javadoc)
* @see org.agilewiki.jactor2.core.plant.PlantScheduler#cancel(java.lang.Object)
*/
@Override
public void cancel(final Object task) {
if (task == null) {
throw new NullPointerException("task");
}
if (!(task instanceof ScheduledFuture)) {
throw new IllegalArgumentException("task: " + task.getClass());
}
((ScheduledFuture<?>) task).cancel(false);
}
}
| skunkiferous/Util | jactor2-coreSt/src/test/java/org/agilewiki/jactor2/core/impl/JActorStTestPlantScheduler.java | Java | apache-2.0 | 5,403 |
package licenseutils
import (
"context"
"github.com/docker/licensing"
"github.com/docker/licensing/model"
)
type (
fakeLicensingClient struct {
loginViaAuthFunc func(ctx context.Context, username, password string) (authToken string, err error)
getHubUserOrgsFunc func(ctx context.Context, authToken string) (orgs []model.Org, err error)
getHubUserByNameFunc func(ctx context.Context, username string) (user *model.User, err error)
verifyLicenseFunc func(ctx context.Context, license model.IssuedLicense) (res *model.CheckResponse, err error)
generateNewTrialSubscriptionFunc func(ctx context.Context, authToken, dockerID string) (subscriptionID string, err error)
listSubscriptionsFunc func(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error)
listSubscriptionsDetailsFunc func(ctx context.Context, authToken, dockerID string) (response []*model.SubscriptionDetail, err error)
downloadLicenseFromHubFunc func(ctx context.Context, authToken, subscriptionID string) (license *model.IssuedLicense, err error)
parseLicenseFunc func(license []byte) (parsedLicense *model.IssuedLicense, err error)
storeLicenseFunc func(ctx context.Context, dclnt licensing.WrappedDockerClient, licenses *model.IssuedLicense, localRootDir string) error
loadLocalLicenseFunc func(ctx context.Context, dclnt licensing.WrappedDockerClient) (*model.Subscription, error)
summarizeLicenseFunc func(*model.CheckResponse, string) *model.Subscription
}
)
func (c *fakeLicensingClient) LoginViaAuth(ctx context.Context, username, password string) (authToken string, err error) {
if c.loginViaAuthFunc != nil {
return c.loginViaAuthFunc(ctx, username, password)
}
return "", nil
}
func (c *fakeLicensingClient) GetHubUserOrgs(ctx context.Context, authToken string) (orgs []model.Org, err error) {
if c.getHubUserOrgsFunc != nil {
return c.getHubUserOrgsFunc(ctx, authToken)
}
return nil, nil
}
func (c *fakeLicensingClient) GetHubUserByName(ctx context.Context, username string) (user *model.User, err error) {
if c.getHubUserByNameFunc != nil {
return c.getHubUserByNameFunc(ctx, username)
}
return nil, nil
}
func (c *fakeLicensingClient) VerifyLicense(ctx context.Context, license model.IssuedLicense) (res *model.CheckResponse, err error) {
if c.verifyLicenseFunc != nil {
return c.verifyLicenseFunc(ctx, license)
}
return nil, nil
}
func (c *fakeLicensingClient) GenerateNewTrialSubscription(ctx context.Context, authToken, dockerID string) (subscriptionID string, err error) {
if c.generateNewTrialSubscriptionFunc != nil {
return c.generateNewTrialSubscriptionFunc(ctx, authToken, dockerID)
}
return "", nil
}
func (c *fakeLicensingClient) ListSubscriptions(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) {
if c.listSubscriptionsFunc != nil {
return c.listSubscriptionsFunc(ctx, authToken, dockerID)
}
return nil, nil
}
func (c *fakeLicensingClient) ListSubscriptionsDetails(ctx context.Context, authToken, dockerID string) (response []*model.SubscriptionDetail, err error) {
if c.listSubscriptionsDetailsFunc != nil {
return c.listSubscriptionsDetailsFunc(ctx, authToken, dockerID)
}
return nil, nil
}
func (c *fakeLicensingClient) DownloadLicenseFromHub(ctx context.Context, authToken, subscriptionID string) (license *model.IssuedLicense, err error) {
if c.downloadLicenseFromHubFunc != nil {
return c.downloadLicenseFromHubFunc(ctx, authToken, subscriptionID)
}
return nil, nil
}
func (c *fakeLicensingClient) ParseLicense(license []byte) (parsedLicense *model.IssuedLicense, err error) {
if c.parseLicenseFunc != nil {
return c.parseLicenseFunc(license)
}
return nil, nil
}
func (c *fakeLicensingClient) StoreLicense(ctx context.Context, dclnt licensing.WrappedDockerClient, licenses *model.IssuedLicense, localRootDir string) error {
if c.storeLicenseFunc != nil {
return c.storeLicenseFunc(ctx, dclnt, licenses, localRootDir)
}
return nil
}
func (c *fakeLicensingClient) LoadLocalLicense(ctx context.Context, dclnt licensing.WrappedDockerClient) (*model.Subscription, error) {
if c.loadLocalLicenseFunc != nil {
return c.loadLocalLicenseFunc(ctx, dclnt)
}
return nil, nil
}
func (c *fakeLicensingClient) SummarizeLicense(cr *model.CheckResponse, keyid string) *model.Subscription {
if c.summarizeLicenseFunc != nil {
return c.summarizeLicenseFunc(cr, keyid)
}
return nil
}
| cpuguy83/cli | internal/licenseutils/client_test.go | GO | apache-2.0 | 4,580 |
/* global QUnit */
sap.ui.define([
"sap/ui/core/Control",
"sap/ui/fl/write/api/SmartVariantManagementWriteAPI",
"sap/ui/fl/Layer",
"sap/ui/rta/command/CommandFactory",
"sap/ui/thirdparty/sinon-4"
], function(
Control,
SmartVariantManagementWriteAPI,
Layer,
CommandFactory,
sinon
) {
"use strict";
var sandbox = sinon.createSandbox();
QUnit.module("Given a control", {
beforeEach: function() {
this.oControl = new Control();
},
afterEach: function() {
this.oControl.destroy();
sandbox.restore();
}
}, function() {
QUnit.test("Update in the Save scenario", function(assert) {
var oUpdateCommand;
var sVariantId = "variantId";
var oContent = {foo: "bar"};
var oUpdateControlStub = sandbox.stub();
this.oControl.updateVariant = oUpdateControlStub;
var oSetModifiedStub = sandbox.stub();
this.oControl.setModified = oSetModifiedStub;
var oUpdateFlAPIStub = sandbox.stub(SmartVariantManagementWriteAPI, "updateVariant");
var oUndoVariantFlAPIStub = sandbox.stub(SmartVariantManagementWriteAPI, "revert");
return CommandFactory.getCommandFor(this.oControl, "compVariantUpdate", {
newVariantProperties: {
variantId: {
content: oContent
}
},
onlySave: true
}, {})
.then(function(oCreatedCommand) {
oUpdateCommand = oCreatedCommand;
return oUpdateCommand.execute();
}).then(function() {
assert.equal(oUpdateFlAPIStub.callCount, 1, "the FL update function was called");
var mExpectedProperties = {
id: sVariantId,
control: this.oControl,
content: oContent,
generator: "sap.ui.rta.command",
command: "compVariantUpdate",
layer: Layer.CUSTOMER
};
assert.deepEqual(oUpdateFlAPIStub.lastCall.args[0], mExpectedProperties, "the FL API was called with the correct properties");
assert.equal(oSetModifiedStub.callCount, 1, "the setModified was called..");
assert.equal(oSetModifiedStub.lastCall.args[0], false, "and set to false");
return oUpdateCommand.undo();
}.bind(this)).then(function() {
assert.equal(oUndoVariantFlAPIStub.callCount, 1, "the undo function was called");
assert.equal(oSetModifiedStub.callCount, 2, "the setModified was called again..");
assert.equal(oSetModifiedStub.lastCall.args[0], true, "and set to true");
return oUpdateCommand.execute();
}).then(function() {
assert.equal(oUpdateFlAPIStub.callCount, 2, "the FL update function was called again");
var mExpectedProperties = {
id: sVariantId,
control: this.oControl,
content: oContent,
generator: "sap.ui.rta.command",
command: "compVariantUpdate",
layer: Layer.CUSTOMER
};
assert.deepEqual(oUpdateFlAPIStub.lastCall.args[0], mExpectedProperties, "the FL API was called with the correct properties");
assert.equal(oSetModifiedStub.callCount, 3, "the setModified was called again..");
assert.equal(oSetModifiedStub.lastCall.args[0], false, "and set to false");
}.bind(this));
});
QUnit.test("Update in the Manage Views scenario", function(assert) {
var oUpdateCommand;
var oUpdateControlStub = sandbox.stub();
this.oControl.updateVariant = oUpdateControlStub;
var oRemoveControlStub = sandbox.stub();
this.oControl.removeVariant = oRemoveControlStub;
var oAddControlStub = sandbox.stub();
this.oControl.addVariant = oAddControlStub;
var oSetDefaultControlStub = sandbox.stub();
this.oControl.setDefaultVariantId = oSetDefaultControlStub;
var oSetModifiedStub = sandbox.stub();
this.oControl.setModified = oSetModifiedStub;
var oUpdateFlAPIStub = sandbox.stub(SmartVariantManagementWriteAPI, "updateVariant").callsFake(function(mPropertyBag) {
return mPropertyBag.id;
});
var oSetDefaultFlAPIStub = sandbox.stub(SmartVariantManagementWriteAPI, "setDefaultVariantId");
var oRevertDefaultFlAPIStub = sandbox.stub(SmartVariantManagementWriteAPI, "revertSetDefaultVariantId");
var oRemoveVariantFlAPIStub = sandbox.stub(SmartVariantManagementWriteAPI, "removeVariant");
var oRevertFlAPIStub = sandbox.stub(SmartVariantManagementWriteAPI, "revert").callsFake(function(mPropertyBag) {
return mPropertyBag.id;
});
function assertExecute(oControl) {
assert.equal(oUpdateFlAPIStub.callCount, 2, "the FL update function was called twice");
var mExpectedProperties1 = {
id: "variant2",
control: oControl,
generator: "sap.ui.rta.command",
command: "compVariantUpdate",
layer: Layer.CUSTOMER,
favorite: false
};
assert.deepEqual(oUpdateFlAPIStub.getCall(0).args[0], mExpectedProperties1, "the FL API was called with the correct properties 2");
var mExpectedProperties2 = {
id: "variant3",
control: oControl,
generator: "sap.ui.rta.command",
command: "compVariantUpdate",
layer: Layer.CUSTOMER,
executeOnSelection: true,
name: "newName",
oldName: "oldName",
favorite: true
};
assert.deepEqual(oUpdateFlAPIStub.getCall(1).args[0], mExpectedProperties2, "the FL API was called with the correct properties 3");
assert.equal(oSetDefaultFlAPIStub.callCount, 1, "the FL API setDefault was called");
assert.equal(oSetDefaultFlAPIStub.lastCall.args[0].defaultVariantId, "variant3", "the correct variant id was passed");
assert.equal(oRemoveVariantFlAPIStub.callCount, 1, "the FL API removeVariant was called");
assert.equal(oRemoveVariantFlAPIStub.lastCall.args[0].id, "variant1", "the correct variant id was passed");
assert.equal(oUpdateControlStub.callCount, 2, "the control API updateVariant was called twice");
assert.equal(oUpdateControlStub.getCall(0).args[0], "variant2", "with the return value of FL updateVariant");
assert.equal(oUpdateControlStub.getCall(1).args[0], "variant3", "with the return value of FL updateVariant");
assert.equal(oSetDefaultControlStub.callCount, 1, "the control API setDefault was called");
assert.equal(oSetDefaultControlStub.lastCall.args[0], "variant3", "the correct variant id was passed");
assert.equal(oRemoveControlStub.callCount, 1, "the control API removeVariant was called");
assert.equal(oRemoveControlStub.lastCall.args[0].variantId, "variant1", "the correct variant id was passed");
}
return CommandFactory.getCommandFor(this.oControl, "compVariantUpdate", {
newVariantProperties: {
variant1: {
executeOnSelection: false,
deleted: true
},
variant2: {
favorite: false
},
variant3: {
executeOnSelection: true,
name: "newName",
oldName: "oldName",
favorite: true
}
},
newDefaultVariantId: "variant3",
oldDefaultVariantId: "variant1"
}, {})
.then(function(oCreatedCommand) {
oUpdateCommand = oCreatedCommand;
return oUpdateCommand.execute();
}).then(function() {
assertExecute(this.oControl);
return oUpdateCommand.undo();
}.bind(this)).then(function() {
assert.equal(oRevertFlAPIStub.callCount, 3, "the revert function was called thrice");
assert.equal(oRevertFlAPIStub.getCall(0).args[0].id, "variant1", "the correct variant id was passed 1");
assert.equal(oRevertFlAPIStub.getCall(1).args[0].id, "variant2", "the correct variant id was passed 2");
assert.equal(oRevertFlAPIStub.getCall(2).args[0].id, "variant3", "the correct variant id was passed 3");
assert.equal(oRevertDefaultFlAPIStub.callCount, 1, "the revertSetDefaultVariantId function was called once");
assert.equal(oAddControlStub.lastCall.args[0], "variant1", "the correct variant was added");
assert.equal(oAddControlStub.callCount, 1, "the addVariant function on the control was called once");
assert.equal(oAddControlStub.lastCall.args[0], "variant1", "the correct variant was added");
assert.equal(oUpdateControlStub.callCount, 4, "the updateVariant function on the control was called twice");
assert.equal(oUpdateControlStub.getCall(2).args[0], "variant2", "the correct variant was updated 1");
assert.equal(oUpdateControlStub.getCall(3).args[0], "variant3", "the correct variant was updated 2");
sandbox.resetHistory();
return oUpdateCommand.execute();
}).then(function() {
assertExecute(this.oControl);
}.bind(this));
});
});
}); | SAP/openui5 | src/sap.ui.rta/test/sap/ui/rta/qunit/command/compVariant/CompVariantUpdate.qunit.js | JavaScript | apache-2.0 | 8,251 |
#!/astro/apps/pkg/python/bin/python
import pyfits
import SDSSfits
import numpy
from tools import create_fits
import os
def main(OUT_DIR = "/astro/net/scratch1/vanderplas/SDSS_GAL_RESTFRAME/",
DIR_ROOT = "/astro/net/scratch1/sdssspec/spectro/1d_26/*/1d",
LINES_FILE = "LINES_SHORT.TXT",
z_min = 0.0, #zmax is set such that SII lines will
z_max = 0.36, # fall in range of 3830 to 9200 angstroms
rebin_coeff0 = 3.583, # rebin parameters give a wavelength
rebin_coeff1 = 0.0002464, # range from 3830A to 9200A
rebin_length = 1000,
remove_sky_absorption = True,
normalize = True):
LINES = []
KEYS = ['TARGET','Z','Z_ERR','SPEC_CLN','MAG_G','MAG_R','MAG_I','N_BAD_PIX']
if LINES_FILE is not None:
for line in open(LINES_FILE):
line = line.split()
if len(line)==0:continue
W = float(line[0])
if W<3000 or W>7000:continue
LINES.append('%.2f'%W)
for info in ('flux','dflux','width','dwidth','nsigma'):
KEYS.append('%.2f_%s' % (W,info) )
for SET in os.listdir(DIR_ROOT.split('*')[0]):
if not SET.isdigit():
continue
DIR = DIR_ROOT.replace('*',SET)
if not os.path.exists(DIR):
continue
OUT_FILE = os.path.join(OUT_DIR,SET+'.dat')
print 'writing %s' % os.path.join(OUT_DIR,SET+'.dat')
col_dict = dict([(KEY,[]) for KEY in KEYS])
spec_list = []
NUMS = []
for F in os.listdir(DIR):
if not F.endswith('.fit'): continue
num = int( F.strip('.fit').split('-')[-1] )
if num in NUMS:
#print " - already measured: skipping %s" % F
continue
#open hdu file and glean necessary info
SPEC = SDSSfits.SDSSfits(os.path.join(DIR,F),LINES)
if SPEC.D['SPEC_CLN'] not in (1,2,3,4):
continue
if SPEC.z<z_min:
#print " - negative z: skipping %s" % F
continue
if SPEC.z>z_max:
#print " - z>z_max: skipping %s" % F
continue
if SPEC.numlines == 0:
#print " - no line measurements: skipping %s" % F
continue
if remove_sky_absorption:
#cover up strong oxygen absorption
SPEC.remove_O_lines()
#move to restframe, rebin, and normalize
SPEC.move_to_restframe()
try:
SPEC = SPEC.rebin(rebin_coeff0,rebin_coeff1,rebin_length)
except:
print " rebin failed. Skipping %s" % F
continue
if normalize:
try:
SPEC.normalize()
except:
print " normalize failed. Skipping %s" % F
continue
if min(SPEC.spectrum) < -4*max(SPEC.spectrum):
print " goes too far negative. Skipping %s" % F
NUMS.append(num)
spec_list.append(SPEC.spectrum.tolist())
for KEY in KEYS:
col_dict[KEY].append(SPEC.D[KEY])
del SPEC
if os.path.exists(OUT_FILE):
os.system('rm %s' % OUT_FILE)
col_dict['coeff0'] = rebin_coeff0
col_dict['coeff1'] = rebin_coeff1
create_fits(OUT_FILE,numpy.asarray( spec_list ),**col_dict)
print " - wrote %i spectra" % len(NUMS)
if __name__ == '__main__':
main(OUT_DIR = "/astro/net/scratch1/vanderplas/SDSS_GAL_RESTFRAME/",
DIR_ROOT = "/astro/net/scratch1/sdssspec/spectro/1d_26/*/1d",
#LINES_FILE = "LINES_SHORT.TXT",
LINES_FILE = None,
z_min = 0.0, #zmax is set such that SII lines will
z_max = 0.36, # fall in range of 3830 to 9200 angstroms
rebin_coeff0 = 3.583, # rebin parameters give a wavelength
rebin_coeff1 = 0.0002464, # range from 3830A to 9200A
rebin_length = 1000,
remove_sky_absorption = False,
normalize = False)
| excelly/xpy-ml | sdss/jake_lib/make_condensed_fits.py | Python | apache-2.0 | 4,199 |
# -*- coding: utf-8 -*-
'''
部分通用的数据结构
container.py :
NetInterface : 描述容器的一个虚拟网卡
-name : 虚拟网卡名称
-hostVeth : 虚拟网卡对应的主机veth名称
-ip : IP地址
-mac : mac地址
-vethMac : 主机veth的mac地址
+ NetInterface::create : 创建一个虚拟网卡,返回NetInterface对象
container : 目标容器
vName : 容器端peer名字
h_vName : 主机端peer的名字
Container : 描述一个容器的数据结构,可持久化存储
-host : 容器所属的主机
-pid : 主机中容器的pid
-id : docker daemon 赋予容器的ID
-ifaces [list] : 容器的虚拟网卡列表 ,为Interface对象集合
-netns : 容器的网络命名空间,为NetworkNamespace对象实例
-image : 创建容器所用的镜像名称
-dataDirectory : 容器数据存储路径
-createTime : 创建时间
-state : 当前运行状态
-belongsTo : 所属用户
+attachToNetworkNamespace : 加入一个命名空间
netns : 要加入的命名空间对象
+detachNetworkNamespace : 离开命名空间
netns : 要离开的命名空间对象
net.py :
NetworkNamespace : 描述网络命名空间的数据结构
-uid : 唯一ID,初始化时通过uuid函数生成
-addrs [list] : 网络命名空间所属IP,可谓多个,为cidr地址
-containers : 加入网络的容器
-initHost : 初始化该命名空间时,该命名空间所属的主机
-createTime : 创建时间
-belongsTo : 所属用户
utils.py:
Host : 描述主机的数据结构
-mac : mac地址
-transportIp : 数据传输所用IP
-containers : 主机所包行的容器,为Container对象列表
-proxys : 主机上的容器创建代理代理列表
+getConcreteProxy :获取特定的容器创建代理类型
ProxyClass : 代理类型
Switch : 描述主机上安装着的虚拟交换机
-host : 所属主机
-portsToContainers : 交换机端口和容器的对应关系
-portsInfo : 每个端口的相关信息
-bridgeName : 网桥名称
exceptions.py :
ContainerCreatorTypeInvalidError : 容器创建器与容器创建代理类型不匹配
tools.py :
''' | onlysheep5200/NetnsEx | lib/__init__.py | Python | apache-2.0 | 2,175 |
package com.example.android.sunshine.app.widget;
import android.annotation.TargetApi;
import android.app.PendingIntent;
import android.appwidget.AppWidgetManager;
import android.appwidget.AppWidgetProvider;
import android.content.ComponentName;
import android.content.Context;
import android.content.Intent;
import android.os.Build;
import android.support.annotation.NonNull;
import android.support.v4.app.TaskStackBuilder;
import android.util.Log;
import android.widget.RemoteViews;
import com.example.android.sunshine.app.DetailActivity;
import com.example.android.sunshine.app.MainActivity;
import com.example.android.sunshine.app.R;
import com.example.android.sunshine.app.Utility;
import com.example.android.sunshine.app.sync.SunshineSyncAdapter;
/**
* Provider for a scrollable weather detail widget
*/
@TargetApi(Build.VERSION_CODES.HONEYCOMB)
public class DetailWidgetProvider extends AppWidgetProvider {
public final String LOG_TAG = DetailWidgetProvider.class.getSimpleName();
public void onUpdate(Context context, AppWidgetManager appWidgetManager, int[] appWidgetIds) {
// Perform this loop procedure for each App Widget that belongs to this provider
for (int appWidgetId : appWidgetIds) {
RemoteViews views = new RemoteViews(context.getPackageName(), R.layout.widget_detail);
// Create an Intent to launch MainActivity
Intent intent = new Intent(context, MainActivity.class);
PendingIntent pendingIntent = PendingIntent.getActivity(context, 0, intent, 0);
views.setOnClickPendingIntent(R.id.widget, pendingIntent);
String location = Utility.getCurrentAddress(context);
Log.d(LOG_TAG, "위젯에서 현재 위치는:" + location);
views.setTextViewText(R.id.widget_item_address_textview, location);
// Set up the collection
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH) {
setRemoteAdapter(context, views);
} else {
setRemoteAdapterV11(context, views);
}
boolean useDetailActivity = context.getResources()
.getBoolean(R.bool.use_detail_activity);
Intent clickIntentTemplate = useDetailActivity
? new Intent(context, DetailActivity.class)
: new Intent(context, MainActivity.class);
PendingIntent clickPendingIntentTemplate = TaskStackBuilder.create(context)
.addNextIntentWithParentStack(clickIntentTemplate)
.getPendingIntent(0, PendingIntent.FLAG_UPDATE_CURRENT);
views.setPendingIntentTemplate(R.id.widget_list, clickPendingIntentTemplate);
views.setEmptyView(R.id.widget_list, R.id.widget_empty);
// Tell the AppWidgetManager to perform an update on the current app widget
appWidgetManager.updateAppWidget(appWidgetId, views);
}
}
@Override
public void onReceive(@NonNull Context context, @NonNull Intent intent) {
super.onReceive(context, intent);
if (SunshineSyncAdapter.ACTION_DATA_UPDATED.equals(intent.getAction())) {
AppWidgetManager appWidgetManager = AppWidgetManager.getInstance(context);
int[] appWidgetIds = appWidgetManager.getAppWidgetIds(
new ComponentName(context, getClass()));
appWidgetManager.notifyAppWidgetViewDataChanged(appWidgetIds, R.id.widget_list);
}
}
/**
* Sets the remote adapter used to fill in the list items
*
* @param views RemoteViews to set the RemoteAdapter
*/
@TargetApi(Build.VERSION_CODES.ICE_CREAM_SANDWICH)
private void setRemoteAdapter(Context context, @NonNull final RemoteViews views) {
views.setRemoteAdapter(R.id.widget_list,
new Intent(context, DetailWidgetRemoteViewsService.class));
}
/**
* Sets the remote adapter used to fill in the list items
*
* @param views RemoteViews to set the RemoteAdapter
*/
@SuppressWarnings("deprecation")
private void setRemoteAdapterV11(Context context, @NonNull final RemoteViews views) {
views.setRemoteAdapter(0, R.id.widget_list,
new Intent(context, DetailWidgetRemoteViewsService.class));
}
}
| chayoungrock/weather | app/src/main/java/com/example/android/sunshine/app/widget/DetailWidgetProvider.java | Java | apache-2.0 | 4,337 |
/*
* Copyright (C) 2015 The Gravitee team (http://gravitee.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Protractor configuration file, see link for more information
// https://github.com/angular/protractor/blob/master/lib/config.ts
const { SpecReporter } = require('jasmine-spec-reporter');
exports.config = {
allScriptsTimeout: 11000,
specs: [
'./e2e/**/*.e2e-spec.ts'
],
capabilities: {
'browserName': 'chrome'
},
directConnect: true,
baseUrl: 'http://localhost:4200/',
framework: 'jasmine',
jasmineNodeOpts: {
showColors: true,
defaultTimeoutInterval: 30000,
print: function() {}
},
beforeLaunch: function() {
require('ts-node').register({
project: 'e2e/tsconfig.e2e.json'
});
},
onPrepare() {
jasmine.getEnv().addReporter(new SpecReporter({ spec: { displayStacktrace: true } }));
}
};
| gravitee-io/graviteeio-access-management | gravitee-am-ui/protractor.conf.js | JavaScript | apache-2.0 | 1,386 |
package com.myweather.app.activity;
import com.myweather.app.R;
import android.content.Context;
import android.view.LayoutInflater;
import android.widget.LinearLayout;
public class BottomLayout extends LinearLayout{
public BottomLayout(Context context) {
super(context);
LayoutInflater.from(context).inflate(R.layout.bottom_menu, this);
}
}
| taohaox/MyWeather | src/com/myweather/app/activity/BottomLayout.java | Java | apache-2.0 | 370 |
package org.jasig.cas.ticket.registry.support;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.support.ClassPathXmlApplicationContext;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.orm.jpa.SharedEntityManagerCreator;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.transaction.TransactionStatus;
import org.springframework.transaction.support.TransactionCallback;
import org.springframework.transaction.support.TransactionTemplate;
import javax.persistence.EntityManagerFactory;
import javax.sql.DataSource;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import static org.junit.Assert.*;
/**
* Unit test for {@link JpaLockingStrategy}.
*
* @author Marvin S. Addison
* @since 3.0.0
*/
public class JpaLockingStrategyTests {
/** Number of clients contending for lock in concurrent test. */
private static final int CONCURRENT_SIZE = 13;
/** Logger instance. */
private final Logger logger = LoggerFactory.getLogger(getClass());
private PlatformTransactionManager txManager;
private EntityManagerFactory factory;
private DataSource dataSource;
@Before
public void setup() {
final ClassPathXmlApplicationContext ctx = new
ClassPathXmlApplicationContext("classpath:/jpaSpringContext.xml");
this.factory = ctx.getBean("ticketEntityManagerFactory", EntityManagerFactory.class);
this.txManager = ctx.getBean("ticketTransactionManager", PlatformTransactionManager.class);
this.dataSource = ctx.getBean("dataSourceTicket", DataSource.class);
}
/**
* Test basic acquire/release semantics.
*
* @throws Exception On errors.
*/
@Test
public void verifyAcquireAndRelease() throws Exception {
final String appId = "basic";
final String uniqueId = appId + "-1";
final LockingStrategy lock = newLockTxProxy(appId, uniqueId, JpaLockingStrategy.DEFAULT_LOCK_TIMEOUT);
try {
assertTrue(lock.acquire());
assertEquals(uniqueId, getOwner(appId));
lock.release();
assertNull(getOwner(appId));
} catch (final Exception e) {
logger.debug("testAcquireAndRelease produced an error", e);
fail("testAcquireAndRelease failed");
}
}
/**
* Test lock expiration.
*
* @throws Exception On errors.
*/
@Test
public void verifyLockExpiration() throws Exception {
final String appId = "expquick";
final String uniqueId = appId + "-1";
final LockingStrategy lock = newLockTxProxy(appId, uniqueId, 1);
try {
assertTrue(lock.acquire());
assertEquals(uniqueId, getOwner(appId));
assertFalse(lock.acquire());
Thread.sleep(1500);
assertTrue(lock.acquire());
assertEquals(uniqueId, getOwner(appId));
lock.release();
assertNull(getOwner(appId));
} catch (final Exception e) {
logger.debug("testLockExpiration produced an error", e);
fail("testLockExpiration failed");
}
}
/**
* Verify non-reentrant behavior.
*/
@Test
public void verifyNonReentrantBehavior() {
final String appId = "reentrant";
final String uniqueId = appId + "-1";
final LockingStrategy lock = newLockTxProxy(appId, uniqueId, JpaLockingStrategy.DEFAULT_LOCK_TIMEOUT);
try {
assertTrue(lock.acquire());
assertEquals(uniqueId, getOwner(appId));
assertFalse(lock.acquire());
lock.release();
assertNull(getOwner(appId));
} catch (final Exception e) {
logger.debug("testNonReentrantBehavior produced an error", e);
fail("testNonReentrantBehavior failed.");
}
}
/**
* Test concurrent acquire/release semantics.
*/
@Test
public void verifyConcurrentAcquireAndRelease() throws Exception {
final ExecutorService executor = Executors.newFixedThreadPool(CONCURRENT_SIZE);
try {
testConcurrency(executor, getConcurrentLocks("concurrent-new"));
} catch (final Exception e) {
logger.debug("testConcurrentAcquireAndRelease produced an error", e);
fail("testConcurrentAcquireAndRelease failed.");
} finally {
executor.shutdownNow();
}
}
/**
* Test concurrent acquire/release semantics for existing lock.
*/
@Test
public void verifyConcurrentAcquireAndReleaseOnExistingLock() throws Exception {
final LockingStrategy[] locks = getConcurrentLocks("concurrent-exists");
locks[0].acquire();
locks[0].release();
final ExecutorService executor = Executors.newFixedThreadPool(CONCURRENT_SIZE);
try {
testConcurrency(executor, locks);
} catch (final Exception e) {
logger.debug("testConcurrentAcquireAndReleaseOnExistingLock produced an error", e);
fail("testConcurrentAcquireAndReleaseOnExistingLock failed.");
} finally {
executor.shutdownNow();
}
}
private LockingStrategy[] getConcurrentLocks(final String appId) {
final LockingStrategy[] locks = new LockingStrategy[CONCURRENT_SIZE];
for (int i = 1; i <= locks.length; i++) {
locks[i - 1] = newLockTxProxy(appId, appId + '-' + i, JpaLockingStrategy.DEFAULT_LOCK_TIMEOUT);
}
return locks;
}
private LockingStrategy newLockTxProxy(final String appId, final String uniqueId, final int ttl) {
final JpaLockingStrategy lock = new JpaLockingStrategy();
lock.entityManager = SharedEntityManagerCreator.createSharedEntityManager(factory);
lock.setApplicationId(appId);
lock.setUniqueId(uniqueId);
lock.setLockTimeout(ttl);
return (LockingStrategy) Proxy.newProxyInstance(
JpaLockingStrategy.class.getClassLoader(),
new Class[] {LockingStrategy.class},
new TransactionalLockInvocationHandler(lock, this.txManager));
}
private String getOwner(final String appId) {
final JdbcTemplate simpleJdbcTemplate = new JdbcTemplate(dataSource);
final List<Map<String, Object>> results = simpleJdbcTemplate.queryForList(
"SELECT unique_id FROM locks WHERE application_id=?", appId);
if (results.isEmpty()) {
return null;
}
return (String) results.get(0).get("unique_id");
}
private void testConcurrency(final ExecutorService executor, final LockingStrategy[] locks) throws Exception {
final List<Locker> lockers = new ArrayList<>(locks.length);
for (int i = 0; i < locks.length; i++) {
lockers.add(new Locker(locks[i]));
}
int lockCount = 0;
for (final Future<Boolean> result : executor.invokeAll(lockers)) {
if (result.get()) {
lockCount++;
}
}
assertTrue("Lock count should be <= 1 but was " + lockCount, lockCount <= 1);
final List<Releaser> releasers = new ArrayList<>(locks.length);
for (int i = 0; i < locks.length; i++) {
releasers.add(new Releaser(locks[i]));
}
int releaseCount = 0;
for (final Future<Boolean> result : executor.invokeAll(lockers)) {
if (result.get()) {
releaseCount++;
}
}
assertTrue("Release count should be <= 1 but was " + releaseCount, releaseCount <= 1);
}
private static class TransactionalLockInvocationHandler implements InvocationHandler {
private final Logger logger = LoggerFactory.getLogger(this.getClass());
private final JpaLockingStrategy jpaLock;
private final PlatformTransactionManager txManager;
TransactionalLockInvocationHandler(final JpaLockingStrategy lock,
final PlatformTransactionManager txManager) {
jpaLock = lock;
this.txManager = txManager;
}
public JpaLockingStrategy getLock() {
return this.jpaLock;
}
@Override
public Object invoke(final Object proxy, final Method method, final Object[] args) throws Throwable {
return new TransactionTemplate(txManager).execute(new TransactionCallback<Object>() {
@Override
public Object doInTransaction(final TransactionStatus status) {
try {
final Object result = method.invoke(jpaLock, args);
jpaLock.entityManager.flush();
logger.debug("Performed {} on {}", method.getName(), jpaLock);
return result;
// Force result of transaction to database
} catch (final Exception e) {
throw new RuntimeException("Transactional method invocation failed.", e);
}
}
});
}
}
private static class Locker implements Callable<Boolean> {
private final Logger logger = LoggerFactory.getLogger(this.getClass());
private final LockingStrategy lock;
Locker(final LockingStrategy l) {
lock = l;
}
@Override
public Boolean call() throws Exception {
try {
return lock.acquire();
} catch (final Exception e) {
logger.debug("{} failed to acquire lock", lock, e);
return false;
}
}
}
private static class Releaser implements Callable<Boolean> {
private final Logger logger = LoggerFactory.getLogger(this.getClass());
private final LockingStrategy lock;
Releaser(final LockingStrategy l) {
lock = l;
}
@Override
public Boolean call() throws Exception {
try {
lock.release();
return true;
} catch (final Exception e) {
logger.debug("{} failed to release lock", lock, e);
return false;
}
}
}
}
| moghaddam/cas | cas-server-support-jpa-ticket-registry/src/test/java/org/jasig/cas/ticket/registry/support/JpaLockingStrategyTests.java | Java | apache-2.0 | 10,675 |
package com.planet_ink.coffee_mud.MOBS;
import com.planet_ink.coffee_mud.core.interfaces.*;
import com.planet_ink.coffee_mud.core.*;
import com.planet_ink.coffee_mud.core.collections.*;
import com.planet_ink.coffee_mud.Abilities.interfaces.*;
import com.planet_ink.coffee_mud.Areas.interfaces.*;
import com.planet_ink.coffee_mud.Behaviors.interfaces.*;
import com.planet_ink.coffee_mud.CharClasses.interfaces.*;
import com.planet_ink.coffee_mud.Commands.interfaces.*;
import com.planet_ink.coffee_mud.Common.interfaces.*;
import com.planet_ink.coffee_mud.Exits.interfaces.*;
import com.planet_ink.coffee_mud.Items.interfaces.*;
import com.planet_ink.coffee_mud.Libraries.interfaces.*;
import com.planet_ink.coffee_mud.Locales.interfaces.*;
import com.planet_ink.coffee_mud.MOBS.interfaces.*;
import com.planet_ink.coffee_mud.Races.interfaces.*;
import java.util.*;
/*
Copyright 2000-2022 Lee H. Fox
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
public class DrowElf extends StdMOB
{
@Override
public String ID()
{
return "DrowElf";
}
public static final int MALE = 0;
public static final int FEMALE = 1;
public int darkDown=4;
public DrowElf()
{
super();
final Random randomizer = new Random(System.currentTimeMillis());
basePhyStats().setLevel(4 + Math.abs(randomizer.nextInt() % 7));
final int gender = Math.abs(randomizer.nextInt() % 2);
String sex = null;
if (gender == MALE)
sex = "male";
else
sex = "female";
// ===== set the basics
_name="a Drow Elf";
setDescription("a " + sex + " Drow Fighter");
setDisplayText("The drow is armored in black chain mail and carrying a nice arsenal of weapons");
baseState.setHitPoints(CMLib.dice().roll(basePhyStats().level(),20,basePhyStats().level()));
setMoney((int)Math.round(CMath.div((50 * basePhyStats().level()),(randomizer.nextInt() % 10 + 1))));
basePhyStats.setWeight(70 + Math.abs(randomizer.nextInt() % 20));
setWimpHitPoint(5);
basePhyStats().setSpeed(2.0);
basePhyStats().setSensesMask(PhyStats.CAN_SEE_DARK | PhyStats.CAN_SEE_INFRARED);
if(gender == MALE)
baseCharStats().setStat(CharStats.STAT_GENDER,'M');
else
baseCharStats().setStat(CharStats.STAT_GENDER,'F');
baseCharStats().setStat(CharStats.STAT_STRENGTH,12 + Math.abs(randomizer.nextInt() % 6));
baseCharStats().setStat(CharStats.STAT_INTELLIGENCE,14 + Math.abs(randomizer.nextInt() % 6));
baseCharStats().setStat(CharStats.STAT_WISDOM,13 + Math.abs(randomizer.nextInt() % 6));
baseCharStats().setStat(CharStats.STAT_DEXTERITY,15 + Math.abs(randomizer.nextInt() % 6));
baseCharStats().setStat(CharStats.STAT_CONSTITUTION,12 + Math.abs(randomizer.nextInt() % 6));
baseCharStats().setStat(CharStats.STAT_CHARISMA,13 + Math.abs(randomizer.nextInt() % 6));
baseCharStats().setMyRace(CMClass.getRace("Elf"));
baseCharStats().getMyRace().startRacing(this,false);
recoverMaxState();
resetToMaxState();
recoverPhyStats();
recoverCharStats();
}
@Override
public boolean tick(final Tickable ticking, final int tickID)
{
if((!amDead())&&(tickID==Tickable.TICKID_MOB))
{
if (isInCombat())
{
if((--darkDown)<=0)
{
darkDown=4;
castDarkness();
}
}
}
return super.tick(ticking,tickID);
}
protected boolean castDarkness()
{
if(this.location()==null)
return true;
if(CMLib.flags().isInDark(this.location()))
return true;
Ability dark=CMClass.getAbility("Spell_Darkness");
dark.setProficiency(100);
dark.setSavable(false);
if(this.fetchAbility(dark.ID())==null)
this.addAbility(dark);
else
dark=this.fetchAbility(dark.ID());
if(dark!=null)
dark.invoke(this,null,true,0);
return true;
}
}
| bozimmerman/CoffeeMud | com/planet_ink/coffee_mud/MOBS/DrowElf.java | Java | apache-2.0 | 4,315 |
/**
* Copyright (C) 2015 The Gravitee team (http://gravitee.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gravitee.am.common.event;
/**
* @author Jeoffrey HAEYAERT (jeoffrey.haeyaert at graviteesource.com)
* @author GraviteeSource Team
*/
public interface AlertEventKeys {
String PROCESSOR_GEOIP = "geoip";
String PROCESSOR_USERAGENT = "useragent";
String CONTEXT_NODE_ID = "node.id";
String CONTEXT_NODE_HOSTNAME = "node.hostname";
String CONTEXT_NODE_APPLICATION = "node.application";
String CONTEXT_GATEWAY_PORT = "gateway.port";
String PROPERTY_DOMAIN = "domain";
String PROPERTY_APPLICATION = "application";
String PROPERTY_USER = "user";
String PROPERTY_IP = "ip";
String PROPERTY_USER_AGENT = "user_agent";
String PROPERTY_TRANSACTION_ID = "transaction_id";
String PROPERTY_AUTHENTICATION_STATUS = "authentication.status";
String TYPE_AUTHENTICATION = "AUTHENTICATION";
}
| gravitee-io/graviteeio-access-management | gravitee-am-common/src/main/java/io/gravitee/am/common/event/AlertEventKeys.java | Java | apache-2.0 | 1,476 |
window.onload = function() {
var Synth = function(audiolet) {
AudioletGroup.apply(this, [audiolet, 0, 1]);
// Basic wave
this.saw = new Saw(audiolet, 100);
// Frequency LFO
this.frequencyLFO = new Sine(audiolet, 2);
this.frequencyMA = new MulAdd(audiolet, 10, 100);
// Filter
this.filter = new LowPassFilter(audiolet, 1000);
// Filter LFO
this.filterLFO = new Sine(audiolet, 8);
this.filterMA = new MulAdd(audiolet, 900, 1000);
// Gain envelope
this.gain = new Gain(audiolet);
this.env = new ADSREnvelope(audiolet,
1, // Gate
1.5, // Attack
0.2, // Decay
0.9, // Sustain
2); // Release
// Main signal path
this.saw.connect(this.filter);
this.filter.connect(this.gain);
this.gain.connect(this.outputs[0]);
// Frequency LFO
this.frequencyLFO.connect(this.frequencyMA);
this.frequencyMA.connect(this.saw);
// Filter LFO
this.filterLFO.connect(this.filterMA);
this.filterMA.connect(this.filter, 0, 1);
// Envelope
this.env.connect(this.gain, 0, 1);
};
extend(Synth, AudioletGroup);
var audiolet = new Audiolet();
var synth = new Synth(audiolet);
var frequencyPattern = new PSequence([55, 55, 98, 98, 73, 73, 98, 98],
Infinity);
var filterLFOPattern = new PChoose([2, 4, 6, 8], Infinity);
var gatePattern = new PSequence([1, 0], Infinity);
var patterns = [frequencyPattern, filterLFOPattern, gatePattern];
audiolet.scheduler.play(patterns, 2,
function(frequency, filterLFOFrequency, gate) {
this.frequencyMA.add.setValue(frequency);
this.filterLFO.frequency.setValue(filterLFOFrequency);
this.env.gate.setValue(gate);
}.bind(synth)
);
synth.connect(audiolet.output);
};
| accraze/Audiolet | examples/synth/js/synth.js | JavaScript | apache-2.0 | 2,107 |
package com.twu.biblioteca;
import org.junit.Test;
import static org.junit.Assert.*;
public class MovieTest {
String title = "random title";
int year = 2042;
String director = "random author";
int rating = 6;
Movie testMovie = new Movie(title,year,director,rating);
@Test
public void testMovieConstructor() {
assertEquals(title,testMovie.getTitle());
assertEquals(year,testMovie.getYear());
assertEquals(director, testMovie.getDirector());
assertEquals(rating,testMovie.getRating());
}
@Test
public void testSetAvailable() {
testMovie.setAvailable(false);
assertFalse(testMovie.getAvailability());
testMovie.setAvailable(true);
assertTrue(testMovie.getAvailability());
}
} | Sebastian-henn/twu-biblioteca-sebastian-henn | test/com/twu/biblioteca/MovieTest.java | Java | apache-2.0 | 815 |
package com.twu.biblioteca.menus;
import com.twu.biblioteca.options.*;
import java.util.ArrayList;
/**
* Created by aloysiusang on 17/6/15.
*/
public class UserMainMenu extends MainMenu {
public UserMainMenu() {
super(new ArrayList<MainMenuOption>() {{
add(new ListBooksOption());
add(new CheckOutBookOption());
add(new ReturnBookOption());
add(new ListMoviesOption());
add(new CheckOutMovieOption());
add(new ReturnMovieOption());
add(new UserInformationOption());
add(new QuitOption());
}});
}
}
| aloysiusang/twu-biblioteca-aloysiusang | src/com/twu/biblioteca/menus/UserMainMenu.java | Java | apache-2.0 | 624 |
package com.lzh.mdzhihudaily_mvp.presenter;
import android.support.annotation.NonNull;
import com.lzh.mdzhihudaily_mvp.contract.ThemeDailyContract;
import com.lzh.mdzhihudaily_mvp.model.DataRepository;
import com.lzh.mdzhihudaily_mvp.model.Entity.ThemeNews;
import rx.Subscriber;
import rx.Subscription;
/**
* @author lzh
* @desc:
* @date Created on 2017/3/5 23:59
* @github: https://github.com/lisuperhong
*/
public class ThemeDailyPresenter implements ThemeDailyContract.Presenter {
private ThemeDailyContract.View themeDailyView;
private int themeId;
private Subscription subscription;
public ThemeDailyPresenter(@NonNull ThemeDailyContract.View view, int themeId) {
themeDailyView = view;
this.themeId = themeId;
}
@Override
public void start() {
themeDailyView.showLoading();
getThemeNews(false);
}
@Override
public void refreshData() {
getThemeNews(true);
}
private void getThemeNews(final boolean isRefresh) {
unsubscript();
subscription = DataRepository.getInstance()
.getThemeNews(themeId)
.subscribe(new Subscriber<ThemeNews>() {
@Override
public void onCompleted() {
if (isRefresh) {
themeDailyView.stopRefreshLayout();
} else {
themeDailyView.hideLoading();
}
}
@Override
public void onError(Throwable e) {
if (isRefresh) {
themeDailyView.stopRefreshLayout();
} else {
themeDailyView.hideLoading();
}
}
@Override
public void onNext(ThemeNews themeNews) {
themeDailyView.setData(themeNews);
}
});
}
@Override
public void unsubscript() {
if (subscription != null && !subscription.isUnsubscribed()) {
subscription.unsubscribe();
}
}
}
| lisuperhong/MDZhihuDaily | app/src/main/java/com/lzh/mdzhihudaily_mvp/presenter/ThemeDailyPresenter.java | Java | apache-2.0 | 2,219 |
/*
* ******************************************************************************
* Copyright 2014-2016 Spectra Logic Corporation. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use
* this file except in compliance with the License. A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file.
* This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
* ****************************************************************************
*/
using System;
namespace Ds3.Runtime
{
public class Ds3RequestException : Exception
{
public Ds3RequestException(string message)
: base(message)
{
}
public Ds3RequestException(string message, Exception innerException)
: base(message, innerException)
{
}
}
}
| rpmoore/ds3_net_sdk | Ds3/Runtime/Ds3RequestException.cs | C# | apache-2.0 | 1,105 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import unittest
import apache_beam as beam
from apache_beam.runners.portability import fn_api_runner
from apache_beam.runners.portability import maptask_executor_runner_test
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class FnApiRunnerTest(
maptask_executor_runner_test.MapTaskExecutorRunnerTest):
def create_pipeline(self):
return beam.Pipeline(
runner=fn_api_runner.FnApiRunner())
def test_combine_per_key(self):
# TODO(BEAM-1348): Enable once Partial GBK is supported in fn API.
pass
def test_combine_per_key(self):
# TODO(BEAM-1348): Enable once Partial GBK is supported in fn API.
pass
def test_pardo_side_inputs(self):
# TODO(BEAM-1348): Enable once side inputs are supported in fn API.
pass
def test_pardo_unfusable_side_inputs(self):
# TODO(BEAM-1348): Enable once side inputs are supported in fn API.
pass
def test_assert_that(self):
# TODO: figure out a way for fn_api_runner to parse and raise the
# underlying exception.
with self.assertRaisesRegexp(Exception, 'Failed assert'):
with self.create_pipeline() as p:
assert_that(p | beam.Create(['a', 'b']), equal_to(['a']))
# Inherits all tests from maptask_executor_runner.MapTaskExecutorRunner
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| eljefe6a/incubator-beam | sdks/python/apache_beam/runners/portability/fn_api_runner_test.py | Python | apache-2.0 | 2,196 |
using JetBrains.Annotations;
using JetBrains.ReSharper.Plugins.Unity.Yaml.Psi.DeferredCaches.AnimationEventsUsages;
using JetBrains.ReSharper.Plugins.Unity.Yaml.Psi.DeferredCaches.AssetHierarchy.References;
using JetBrains.ReSharper.Psi;
namespace JetBrains.ReSharper.Plugins.Unity.Yaml.Psi.Search
{
public class UnityAnimationEventFindResults : UnityAssetFindResult
{
public UnityAnimationEventFindResults([NotNull] IPsiSourceFile sourceFile,
[NotNull] IDeclaredElement declaredElement,
[NotNull] AnimationUsage usage,
LocalReference owningElementLocation)
: base(sourceFile, declaredElement, owningElementLocation)
{
Usage = usage;
}
[NotNull]
public AnimationUsage Usage { get; }
public override bool Equals(object obj)
{
if (ReferenceEquals(null, obj)) return false;
if (ReferenceEquals(this, obj)) return true;
return obj.GetType() == GetType() && Equals((UnityAnimationEventFindResults) obj);
}
private bool Equals([NotNull] UnityAnimationEventFindResults other)
{
return base.Equals(other) && Usage.Equals(other.Usage);
}
public override int GetHashCode()
{
unchecked
{
return (base.GetHashCode() * 397) ^ Usage.GetHashCode();
}
}
}
} | JetBrains/resharper-unity | resharper/resharper-unity/src/Unity/Yaml/Psi/Search/UnityAnimationEventFindResults.cs | C# | apache-2.0 | 1,533 |
(function ($) {
"use strict";
/*----------------------------
price-slider active
------------------------------ */
var range = $('#slider-range');
var amount = $('#amount');
range.slider({
range: true,
min: 2,
max: 300,
values: [ 2, 300 ],
slide: function( event, ui ) {
amount.val( "$" + ui.values[ 0 ] + " - $" + ui.values[ 1 ] );
}
});
amount.val( "$" + range.slider( "values", 0 ) +
" - $" + range.slider( "values", 1 ) );
/*----------------------------
jQuery MeanMenu
------------------------------ */
jQuery('#mobile-menu-active').meanmenu();
/*----------------------
Carousel Activation
----------------------*/
$(".let_new_carasel").owlCarousel({
autoPlay: true,
slideSpeed:2000,
pagination:true,
navigation:true,
items : 1,
/* transitionStyle : "fade", */ /* [This code for animation ] */
navigationText:["<i class='fa fa-caret-left'></i>","<i class='fa fa-caret-right'></i>"],
itemsDesktop : [1199,1],
itemsDesktopSmall : [980,1],
itemsTablet: [768,1],
itemsMobile : [767,1],
});
/*----------------------------
Tooltip
------------------------------ */
$('[data-toggle="tooltip"]').tooltip({
animated: 'fade',
placement: 'top',
container: 'body'
});
/*----------------------------
single portfolio activation
------------------------------ */
$(".sub_pix").owlCarousel({
autoPlay: true,
slideSpeed:2000,
pagination:true,
navigation:false,
items : 5,
/* transitionStyle : "fade", */ /* [This code for animation ] */
navigationText:["<i class='fa fa-angle-left'></i>","<i class='fa fa-angle-right'></i>"],
itemsDesktop : [1199,4],
itemsDesktopSmall : [980,3],
itemsTablet: [768,5],
itemsMobile : [767,3],
});
/*----------------------------
toggole active
------------------------------ */
$( ".all_catagories" ).on("click", function() {
$( ".cat_mega_start" ).slideToggle( "slow" );
});
$( ".showmore-items" ).on("click", function() {
$( ".cost-menu" ).slideToggle( "slow" );
});
/*----------------------
New Products Carousel Activation
----------------------*/
$(".whole_product").owlCarousel({
autoPlay: false,
slideSpeed:2000,
pagination:false,
navigation:true,
items : 3,
/* transitionStyle : "fade", */ /* [This code for animation ] */
navigationText:["<i class='fa fa-angle-left'></i>","<i class='fa fa-angle-right'></i>"],
itemsDesktop : [1199,3],
itemsDesktopSmall : [980,3],
itemsTablet: [768,1],
itemsMobile : [767,1],
});
/*----------------------
Hot Deals Carousel Activation
----------------------*/
$(".new_cosmatic").owlCarousel({
autoPlay: false,
slideSpeed:2000,
pagination:false,
navigation:true,
items : 1,
/* transitionStyle : "fade", */ /* [This code for animation ] */
navigationText:["<i class='fa fa-angle-left'></i>","<i class='fa fa-angle-right'></i>"],
itemsDesktop : [1199,1],
itemsDesktopSmall : [980,3],
itemsTablet: [768,2],
itemsMobile : [767,1],
});
/*---------------------
countdown
--------------------- */
$('[data-countdown]').each(function() {
var $this = $(this), finalDate = $(this).data('countdown');
$this.countdown(finalDate, function(event) {
$this.html(event.strftime('<span class="cdown days"><span class="time-count">%-D</span> <p>Days</p></span> <span class="cdown hour"><span class="time-count">%-H</span> <p>Hour</p></span> <span class="cdown minutes"><span class="time-count">%M</span> <p>Min</p></span> <span class="cdown second"> <span><span class="time-count">%S</span> <p>Sec</p></span>'));
});
});
/*----------------------
Products Catagory Carousel Activation
----------------------*/
$(".feature-carousel").owlCarousel({
autoPlay: false,
slideSpeed:2000,
pagination:false,
navigation:true,
items : 4,
/* transitionStyle : "fade", */ /* [This code for animation ] */
navigationText:["<i class='fa fa-angle-left'></i>","<i class='fa fa-angle-right'></i>"],
itemsDesktop : [1199,3],
itemsDesktopSmall : [980,3],
itemsTablet: [768,2],
itemsMobile : [767,1],
});
/*----------------------------
Top Rate Carousel Activation
------------------------------ */
$(".all_ayntex").owlCarousel({
autoPlay: false,
slideSpeed:2000,
pagination:false,
navigation:true,
items : 1,
/* transitionStyle : "fade", */ /* [This code for animation ] */
navigationText:["<i class='fa fa-angle-left'></i>","<i class='fa fa-angle-right'></i>"],
itemsDesktop : [1199,1],
itemsDesktopSmall : [980,3],
itemsTablet: [768,2],
itemsMobile : [767,1],
});
/*----------------------------
Featured Catagories Carousel Activation
------------------------------ */
$(".achard_all").owlCarousel({
autoPlay: false,
slideSpeed:2000,
pagination:false,
navigation:true,
items : 5,
/* transitionStyle : "fade", */ /* [This code for animation ] */
navigationText:["<i class='fa fa-angle-left'></i>","<i class='fa fa-angle-right'></i>"],
itemsDesktop : [1199,4],
itemsDesktopSmall : [980,3],
itemsTablet: [768,4],
itemsMobile : [767,2
],
});
/*----------------------------
Blog Post Carousel Activation
------------------------------ */
$(".blog_carasel").owlCarousel({
autoPlay: false,
slideSpeed:2000,
pagination:false,
navigation:true,
items : 3,
/* transitionStyle : "fade", */ /* [This code for animation ] */
navigationText:["<i class='fa fa-angle-left'></i>","<i class='fa fa-angle-right'></i>"],
itemsDesktop : [1199,2],
itemsDesktopSmall : [980,3],
itemsTablet: [768,2],
itemsMobile : [767,1],
});
/*----------------------------
Brand Logo Carousel Activation
------------------------------ */
$(".all_brand").owlCarousel({
autoPlay: false,
slideSpeed:2000,
pagination:false,
navigation:true,
items : 6,
/* transitionStyle : "fade", */ /* [This code for animation ] */
navigationText:["<i class='fa fa-angle-left'></i>","<i class='fa fa-angle-right'></i>"],
itemsDesktop : [1199,4],
itemsDesktopSmall : [980,3],
itemsTablet: [768,2],
itemsMobile : [480,2],
});
/*----------------------
scrollUp
----------------------*/
$.scrollUp({
scrollText: '<i class="fa fa-angle-double-up"></i>',
easingType: 'linear',
scrollSpeed: 900,
animation: 'fade'
});
/*----------------------
New Products home-page-2 Carousel Activation
----------------------*/
$(".product_2").owlCarousel({
autoPlay: false,
slideSpeed:2000,
pagination:false,
navigation:true,
items : 4,
/* transitionStyle : "fade", */ /* [This code for animation ] */
navigationText:["<i class='fa fa-angle-left'></i>","<i class='fa fa-angle-right'></i>"],
itemsDesktop : [1199,3],
itemsDesktopSmall : [980,4],
itemsTablet: [768,2],
itemsMobile : [767,1],
});
/*----------------------------
Blog Post home-page-2 Carousel Activation
------------------------------ */
$(".blog_new_carasel_2").owlCarousel({
autoPlay: false,
slideSpeed:2000,
pagination:false,
navigation:true,
items : 2,
/* transitionStyle : "fade", */ /* [This code for animation ] */
navigationText:["<i class='fa fa-angle-left'></i>","<i class='fa fa-angle-right'></i>"],
itemsDesktop : [1199,2],
itemsDesktopSmall : [980,2],
itemsTablet: [768,1],
itemsMobile : [767,1],
});
/*----------------------------
Products Catagory-2 Carousel Activation
------------------------------ */
$(".feature-carousel-2").owlCarousel({
autoPlay: false,
slideSpeed:2000,
pagination:false,
navigation:true,
items : 2,
/* transitionStyle : "fade", */ /* [This code for animation ] */
navigationText:["<i class='fa fa-angle-left'></i>","<i class='fa fa-angle-right'></i>"],
itemsDesktop : [1199,2],
itemsDesktopSmall : [980,3],
itemsTablet: [768,2],
itemsMobile : [767,1],
});
/*----------------------------
Blog Post home-page-3 Carousel Activation
------------------------------ */
$(".blog_carasel_5").owlCarousel({
autoPlay: false,
slideSpeed:2000,
pagination:false,
navigation:true,
items : 4,
/* transitionStyle : "fade", */ /* [This code for animation ] */
navigationText:["<i class='fa fa-angle-left'></i>","<i class='fa fa-angle-right'></i>"],
itemsDesktop : [1199,4],
itemsDesktopSmall : [980,3],
itemsTablet: [768,2],
itemsMobile : [767,1],
});
/*-----------------------------
Category Menu toggle
-------------------------------*/
$('.expandable a').on('click', function() {
$(this).parent().find('.category-sub').toggleClass('submenu-active');
$(this).toggleClass('submenu-active');
return false;
});
/*----------------------------
MixItUp:
------------------------------ */
$('#Container') .mixItUp();
/*----------------------------
magnificPopup:
------------------------------ */
$('.magnify').magnificPopup({type:'image'});
/*-------------------------
Create an account toggle function
--------------------------*/
$( "#cbox" ).on("click", function() {
$( "#cbox_info" ).slideToggle(900);
});
$( '#showlogin, #showcoupon' ).on('click', function() {
$(this).parent().next().slideToggle(600);
});
/*-------------------------
accordion toggle function
--------------------------*/
$('.payment-accordion').find('.payment-accordion-toggle').on('click', function(){
//Expand or collapse this panel
$(this).next().slideToggle(500);
//Hide the other panels
$(".payment-content").not($(this).next()).slideUp(500);
});
/* -------------------------------------------------------
accordion active class for style
----------------------------------------------------------*/
$('.payment-accordion-toggle').on('click', function(event) {
$(this).siblings('.active').removeClass('active');
$(this).addClass('active');
event.preventDefault();
});
})(jQuery);
| TZClub/OMIPlatform | shopping-platfrom/src/main/webapp/resources/js/main.js | JavaScript | apache-2.0 | 10,332 |
/*
* Copyright (c) 2010, Christophe Souvignier.
* Copyright (c) 2010, Paul Merlin.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.n0pe.mojo.asadmin;
import java.util.Iterator;
import java.util.Map;
import org.apache.maven.plugin.MojoExecutionException;
import org.apache.maven.plugin.MojoFailureException;
import org.n0pe.asadmin.AsAdminCmdList;
import org.n0pe.asadmin.commands.CreateJdbcConnectionPool;
/**
* @goal create-jdbc-connection-pool
*/
public class CreateJdbcConnectionPoolMojo
extends AbstractAsadminMojo
{
/**
* @parameter default-value="org.apache.derby.jdbc.ClientXADataSource"
* @required
*/
private String poolDataSource;
/**
* @parameter
* @required
*/
private String poolName;
/**
* @parameter default-value="javax.sql.XADataSource"
* @required
*/
private String restype;
/**
* @parameter
*/
private Map poolProperties;
@Override
protected AsAdminCmdList getAsCommandList()
throws MojoExecutionException, MojoFailureException
{
getLog().info( "Creating auth realm: " + poolName );
final AsAdminCmdList list = new AsAdminCmdList();
final CreateJdbcConnectionPool cmd = new CreateJdbcConnectionPool( poolName ).withDataSource( poolDataSource ).withRestype( restype );
if( poolProperties != null && !poolProperties.isEmpty() )
{
final Iterator it = poolProperties.keySet().iterator();
while( it.hasNext() )
{
final String key = (String) it.next();
cmd.addProperty( key, (String) poolProperties.get( key ) );
}
}
list.add( cmd );
return list;
}
}
| eskatos/asadmin | asadmin-maven-plugin/src/main/java/org/n0pe/mojo/asadmin/CreateJdbcConnectionPoolMojo.java | Java | apache-2.0 | 2,248 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/imagebuilder/model/ComponentState.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
namespace Aws
{
namespace imagebuilder
{
namespace Model
{
ComponentState::ComponentState() :
m_status(ComponentStatus::NOT_SET),
m_statusHasBeenSet(false),
m_reasonHasBeenSet(false)
{
}
ComponentState::ComponentState(JsonView jsonValue) :
m_status(ComponentStatus::NOT_SET),
m_statusHasBeenSet(false),
m_reasonHasBeenSet(false)
{
*this = jsonValue;
}
ComponentState& ComponentState::operator =(JsonView jsonValue)
{
if(jsonValue.ValueExists("status"))
{
m_status = ComponentStatusMapper::GetComponentStatusForName(jsonValue.GetString("status"));
m_statusHasBeenSet = true;
}
if(jsonValue.ValueExists("reason"))
{
m_reason = jsonValue.GetString("reason");
m_reasonHasBeenSet = true;
}
return *this;
}
JsonValue ComponentState::Jsonize() const
{
JsonValue payload;
if(m_statusHasBeenSet)
{
payload.WithString("status", ComponentStatusMapper::GetNameForComponentStatus(m_status));
}
if(m_reasonHasBeenSet)
{
payload.WithString("reason", m_reason);
}
return payload;
}
} // namespace Model
} // namespace imagebuilder
} // namespace Aws
| aws/aws-sdk-cpp | aws-cpp-sdk-imagebuilder/source/model/ComponentState.cpp | C++ | apache-2.0 | 1,432 |
/*
* Licensed to the Technische Universität Darmstadt under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The Technische Universität Darmstadt
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License.
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.tudarmstadt.ukp.clarin.webanno.api.dao.export.exporters;
import static org.apache.commons.io.FileUtils.copyInputStreamToFile;
import static org.apache.commons.io.FileUtils.forceMkdir;
import java.io.File;
import java.io.IOException;
import java.util.Enumeration;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.FilenameUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import de.tudarmstadt.ukp.clarin.webanno.api.ProjectService;
import de.tudarmstadt.ukp.clarin.webanno.api.export.ProjectExportRequest;
import de.tudarmstadt.ukp.clarin.webanno.api.export.ProjectExportTaskMonitor;
import de.tudarmstadt.ukp.clarin.webanno.api.export.ProjectExporter;
import de.tudarmstadt.ukp.clarin.webanno.api.export.ProjectImportRequest;
import de.tudarmstadt.ukp.clarin.webanno.export.model.ExportedProject;
import de.tudarmstadt.ukp.clarin.webanno.model.Project;
import de.tudarmstadt.ukp.clarin.webanno.support.ZipUtils;
@Component
public class GuildelinesExporter
implements ProjectExporter
{
public static final String GUIDELINE = "guideline";
private static final String GUIDELINES_FOLDER = "/" + GUIDELINE;
private final Logger log = LoggerFactory.getLogger(getClass());
private @Autowired ProjectService projectService;
/**
* Copy Project guidelines from the file system of this project to the export folder
*/
@Override
public void exportData(ProjectExportRequest aRequest, ProjectExportTaskMonitor aMonitor,
ExportedProject aExProject, File aStage)
throws Exception
{
File guidelineDir = new File(aStage + GUIDELINES_FOLDER);
FileUtils.forceMkdir(guidelineDir);
File annotationGuidlines = projectService.getGuidelinesFolder(aRequest.getProject());
if (annotationGuidlines.exists()) {
for (File annotationGuideline : annotationGuidlines.listFiles()) {
FileUtils.copyFileToDirectory(annotationGuideline, guidelineDir);
}
}
}
/**
* Copy guidelines from the exported project
*
* @param aZip
* the ZIP file.
* @param aProject
* the project.
* @throws IOException
* if an I/O error occurs.
*/
@Override
public void importData(ProjectImportRequest aRequest, Project aProject,
ExportedProject aExProject, ZipFile aZip)
throws Exception
{
for (Enumeration<? extends ZipEntry> zipEnumerate = aZip.entries(); zipEnumerate
.hasMoreElements();) {
ZipEntry entry = (ZipEntry) zipEnumerate.nextElement();
// Strip leading "/" that we had in ZIP files prior to 2.0.8 (bug #985)
String entryName = ZipUtils.normalizeEntryName(entry);
if (entryName.startsWith(GUIDELINE + "/")) {
String fileName = FilenameUtils.getName(entry.getName());
if (fileName.trim().isEmpty()) {
continue;
}
File guidelineDir = projectService.getGuidelinesFolder(aProject);
forceMkdir(guidelineDir);
copyInputStreamToFile(aZip.getInputStream(entry), new File(guidelineDir, fileName));
log.info("Imported guideline [" + fileName + "] for project [" + aProject.getName()
+ "] with id [" + aProject.getId() + "]");
}
}
}
}
| webanno/webanno | webanno-api-dao/src/main/java/de/tudarmstadt/ukp/clarin/webanno/api/dao/export/exporters/GuildelinesExporter.java | Java | apache-2.0 | 4,420 |
# Copyright (c) 2016. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from .effect_collection import EffectCollection
from .effect_ordering import (
effect_priority,
top_priority_effect,
)
from .effect_prediction import (
predict_variant_effects,
predict_variant_effect_on_transcript,
predict_variant_effect_on_transcript_or_failure,
)
from .effect_classes import (
MutationEffect,
TranscriptMutationEffect,
NonsilentCodingMutation,
Failure,
IncompleteTranscript,
Intergenic,
Intragenic,
NoncodingTranscript,
Intronic,
ThreePrimeUTR,
FivePrimeUTR,
Silent,
Substitution,
Insertion,
Deletion,
ComplexSubstitution,
AlternateStartCodon,
IntronicSpliceSite,
ExonicSpliceSite,
StopLoss,
SpliceDonor,
SpliceAcceptor,
PrematureStop,
FrameShiftTruncation,
StartLoss,
FrameShift,
ExonLoss,
)
__all__ = [
"EffectCollection",
# effect ordering
"effect_priority",
"top_priority_effect",
# prediction functions
"predict_variant_effects",
"predict_variant_effect_on_transcript",
"predict_variant_effect_on_transcript_or_failure",
# effect classes
"MutationEffect",
"TranscriptMutationEffect",
"Failure",
"IncompleteTranscript",
"Intergenic",
"Intragenic",
"IncompleteTranscript",
"NoncodingTranscript",
"ThreePrimeUTR",
"FivePrimeUTR",
"Intronic",
"Silent",
"NonsilentCodingMutation",
"Substitution",
"Insertion",
"Deletion",
"ComplexSubstitution",
"AlternateStartCodon",
"IntronicSpliceSite",
"ExonicSpliceSite",
"StopLoss",
"SpliceDonor",
"SpliceAcceptor",
"PrematureStop",
"FrameShiftTruncation",
"StartLoss",
"FrameShift",
"ExonLoss",
]
| hammerlab/varcode | varcode/effects/__init__.py | Python | apache-2.0 | 2,401 |
using System.Data.Common;
using System.Data.Entity.Infrastructure.Interception;
namespace Advertise.Utility.Persians
{
/// <summary>
/// </summary>
public class PersianYeKeInterceptor : IDbCommandInterceptor
{
/// <summary>
/// </summary>
/// <param name="command"></param>
/// <param name="interceptionContext"></param>
public void ReaderExecuting(DbCommand command, DbCommandInterceptionContext<DbDataReader> interceptionContext)
{
command.ApplyCorrectYeKe();
}
/// <summary>
/// </summary>
/// <param name="command"></param>
/// <param name="interceptionContext"></param>
public void NonQueryExecuted(DbCommand command, DbCommandInterceptionContext<int> interceptionContext)
{
}
/// <summary>
/// </summary>
/// <param name="command"></param>
/// <param name="interceptionContext"></param>
public void NonQueryExecuting(DbCommand command, DbCommandInterceptionContext<int> interceptionContext)
{
command.ApplyCorrectYeKe();
}
/// <summary>
/// </summary>
/// <param name="command"></param>
/// <param name="interceptionContext"></param>
public void ReaderExecuted(DbCommand command, DbCommandInterceptionContext<DbDataReader> interceptionContext)
{
}
/// <summary>
/// </summary>
/// <param name="command"></param>
/// <param name="interceptionContext"></param>
public void ScalarExecuted(DbCommand command, DbCommandInterceptionContext<object> interceptionContext)
{
}
/// <summary>
/// </summary>
/// <param name="command"></param>
/// <param name="interceptionContext"></param>
public void ScalarExecuting(DbCommand command, DbCommandInterceptionContext<object> interceptionContext)
{
command.ApplyCorrectYeKe();
}
}
} | imangit/Advertise | Advertise/Advertise.Utility/Persians/PersianYeKeInterceptor.cs | C# | apache-2.0 | 2,025 |
package org.wikipedia.edit;
import android.support.annotation.Nullable;
import org.wikipedia.dataclient.mwapi.MwPostResponse;
class Edit extends MwPostResponse {
@SuppressWarnings("unused,") @Nullable private Result edit;
@Nullable Result edit() {
return edit;
}
boolean hasEditResult() {
return edit != null;
}
class Result {
@SuppressWarnings("unused") @Nullable private String result;
@SuppressWarnings("unused") private int newrevid;
@SuppressWarnings("unused") @Nullable private Captcha captcha;
@SuppressWarnings("unused") @Nullable private String code;
@SuppressWarnings("unused") @Nullable private String spamblacklist;
@Nullable String status() {
return result;
}
int newRevId() {
return newrevid;
}
@Nullable String captchaId() {
return captcha == null ? null : captcha.id();
}
boolean hasErrorCode() {
return code != null;
}
boolean hasCaptchaResponse() {
return captcha != null;
}
@Nullable String spamblacklist() {
return spamblacklist;
}
boolean hasSpamBlacklistResponse() {
return spamblacklist != null;
}
}
private static class Captcha {
@SuppressWarnings("unused") @Nullable private String id;
@Nullable String id() {
return id;
}
}
}
| anirudh24seven/apps-android-wikipedia | app/src/main/java/org/wikipedia/edit/Edit.java | Java | apache-2.0 | 1,491 |
require('./second.js');
var i = 0;
console.log('Hello Webpack!');
console.log('Webpack is cool.');
| jitendraag/webpack-2-examples | example2/input.js | JavaScript | apache-2.0 | 99 |
CONTACT_HANDLE = 'contact'
NEW_CONTACT_HANDLE = 'new_contact'
OTHER_CONTACT_HANDLE = 'other_contact'
BLANK_CONTACT_HANDLE = ''
NON_EXISTING_CONTACT_HANDLE = 'non_existing'
def contact_does_not_exist handle = CONTACT_HANDLE
contact = Contact.find_by(handle: handle)
contact.delete if contact
end
def contact_exists handle = CONTACT_HANDLE, under: nil
contact_does_not_exist handle
other_partner = under
partner = other_partner ? Partner.find_by(name: under) : @current_partner
create :contact, partner: partner, handle: handle
end
def other_contact_exists
contact_exists OTHER_CONTACT_HANDLE
end
def create_contact with: { partner: nil, json_request: nil }
params = with
json_request ||= { handle: CONTACT_HANDLE }
json_request[:partner] = NON_ADMIN_PARTNER if @current_user.admin?
json_request[:partner] = params[:partner] if params[:partner]
json_request = params[:json_request] if params[:json_request]
post contacts_url, json_request
end
def update_contact with: { handle: nil, partner: nil }
params = with
json_request = {
name: 'new_name',
organization: 'new_organization',
street: 'new_street',
street2: 'new_street2',
street3: 'new_street3',
city: 'new_city',
state: 'new_state',
postal_code: 'new_postal_code',
country_code: 'new_country',
local_name: 'New local name',
local_organization: 'New local organization',
local_street: 'New local street',
local_street2: 'New local street 2',
local_street3: 'New local street 3',
local_city: 'New local city',
local_state: 'New local state',
local_postal_code: 'New local postal code',
local_country_code: 'New local country code',
voice: 'new_phone',
voice_ext: '1234',
fax: 'new_fax',
fax_ext: '1234',
email: 'new_email@contact.ph',
}
json_request[:handle] = params[:handle] if params[:handle]
json_request[:partner] = params[:partner] if params[:partner]
patch contact_path(CONTACT_HANDLE), json_request
end
def assert_contact_created
assert_response_status_must_be_created
expected_response = {
handle: CONTACT_HANDLE,
name: nil,
organization: nil,
street: nil,
street2: nil,
street3: nil,
city: nil,
state: nil,
postal_code: nil,
country_code: nil,
local_name: nil,
local_organization: nil,
local_street: nil,
local_street2: nil,
local_street3: nil,
local_city: nil,
local_state: nil,
local_postal_code: nil,
local_country_code: nil,
voice: nil,
voice_ext: nil,
fax: nil,
fax_ext: nil,
email: nil,
}
json_response.must_equal expected_response
Contact.find_by(handle: CONTACT_HANDLE).wont_be_nil
end
def assert_create_contact_history_created
assert_contact_history_created
end
def assert_update_contact_history_created
assert_contact_history_created count: 2
end
def assert_contact_history_created count: 1
contact = Contact.find_by(handle: CONTACT_HANDLE)
contact.contact_histories.count.must_equal count
assert_contact_history contact.contact_histories.last, contact
end
def assert_contact_history contact_history, contact
contact_history.handle.must_equal contact.handle
contact_history.partner.must_equal contact.partner
contact_history.name.must_equal contact.name
contact_history.organization.must_equal contact.organization
contact_history.street.must_equal contact.street
contact_history.street2.must_equal contact.street2
contact_history.street3.must_equal contact.street3
contact_history.city.must_equal contact.city
contact_history.state.must_equal contact.state
contact_history.postal_code.must_equal contact.postal_code
contact_history.country_code.must_equal contact.country_code
contact_history.local_name.must_equal contact.local_name
contact_history.local_organization.must_equal contact.local_organization
contact_history.local_street.must_equal contact.local_street
contact_history.local_street2.must_equal contact.local_street2
contact_history.local_street3.must_equal contact.local_street3
contact_history.local_city.must_equal contact.local_city
contact_history.local_state.must_equal contact.local_state
contact_history.local_postal_code.must_equal contact.local_postal_code
contact_history.local_country_code.must_equal contact.local_country_code
contact_history.voice.must_equal contact.voice
contact_history.voice_ext.must_equal contact.voice_ext
contact_history.fax.must_equal contact.fax
contact_history.fax_ext.must_equal contact.fax_ext
contact_history.email.must_equal contact.email
end
def assert_contact_updated
assert_response_status_must_be_ok
expected_response = {
handle: CONTACT_HANDLE,
name: 'new_name',
organization: 'new_organization',
street: 'new_street',
street2: 'new_street2',
street3: 'new_street3',
city: 'new_city',
state: 'new_state',
postal_code: 'new_postal_code',
country_code: 'new_country',
local_name: 'New local name',
local_organization: 'New local organization',
local_street: 'New local street',
local_street2: 'New local street 2',
local_street3: 'New local street 3',
local_city: 'New local city',
local_state: 'New local state',
local_postal_code: 'New local postal code',
local_country_code: 'New local country code',
voice: 'new_phone',
voice_ext: '1234',
fax: 'new_fax',
fax_ext: '1234',
email: 'new_email@contact.ph'
}
json_response.must_equal expected_response
end
def assert_contacts_displayed
assert_response_status_must_be_ok
json_response.length.must_equal 2
json_response.must_equal contacts_response
end
def assert_no_contacts_displayed
assert_response_status_must_be_ok
json_response.length.must_equal 0
end
def view_contacts
get contacts_path
end
def contacts_response
[
{:handle=>"contact", :name=>nil, :organization=>nil, :street=>nil, :street2=>nil, :street3=>nil, :city=>nil, :state=>nil, :postal_code=>nil, :country_code=>nil, :local_name=>nil, :local_organization=>nil, :local_street=>nil, :local_street2=>nil, :local_street3=>nil, :local_city=>nil, :local_state=>nil, :local_postal_code=>nil, :local_country_code=>nil, :voice=>nil, :voice_ext=>nil, :fax=>nil, :fax_ext=>nil, :email=>nil},
{:handle=>"other_contact", :name=>nil, :organization=>nil, :street=>nil, :street2=>nil, :street3=>nil, :city=>nil, :state=>nil, :postal_code=>nil, :country_code=>nil, :local_name=>nil, :local_organization=>nil, :local_street=>nil, :local_street2=>nil, :local_street3=>nil, :local_city=>nil, :local_state=>nil, :local_postal_code=>nil, :local_country_code=>nil, :voice=>nil, :voice_ext=>nil, :fax=>nil, :fax_ext=>nil, :email=>nil}
]
end
| dotph/registry | features/services/contact_service.rb | Ruby | apache-2.0 | 6,724 |
/*
* Copyright 2014-2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language
* governing permissions and limitations under the License.
*/
package org.docksidestage.dockside.dbflute.bsbhv.pmbean;
import java.util.*;
import org.dbflute.outsidesql.paging.SimplePagingBean;
import org.dbflute.outsidesql.typed.*;
import org.dbflute.jdbc.*;
import org.dbflute.cbean.coption.LikeSearchOption;
import org.dbflute.outsidesql.PmbCustodial;
import org.dbflute.util.DfTypeUtil;
import org.docksidestage.dockside.dbflute.allcommon.*;
import org.docksidestage.dockside.dbflute.exbhv.*;
import org.docksidestage.dockside.dbflute.exentity.customize.*;
/**
* The base class for typed parameter-bean of PurchaseMaxPriceMember. <br>
* This is related to "<span style="color: #AD4747">selectPurchaseMaxPriceMember</span>" on MemberBhv.
* @author DBFlute(AutoGenerator)
*/
public class BsPurchaseMaxPriceMemberPmb extends SimplePagingBean implements EntityHandlingPmb<MemberBhv, PurchaseMaxPriceMember>, ManualPagingHandlingPmb<MemberBhv, PurchaseMaxPriceMember>, FetchBean {
// ===================================================================================
// Attribute
// =========
/** The parameter of memberId. */
protected Integer _memberId;
/** The parameter of memberNameList:likePrefix. */
protected List<String> _memberNameList;
/** The option of like-search for memberNameList. */
protected LikeSearchOption _memberNameListInternalLikeSearchOption;
/** The parameter of memberStatusCodeList:cls(MemberStatus). */
protected List<org.docksidestage.dockside.dbflute.allcommon.CDef.MemberStatus> _memberStatusCodeList;
/** The time-zone for filtering e.g. from-to. (NullAllowed: if null, default zone) */
protected TimeZone _timeZone;
// ===================================================================================
// Constructor
// ===========
/**
* Constructor for the typed parameter-bean of PurchaseMaxPriceMember. <br>
* This is related to "<span style="color: #AD4747">selectPurchaseMaxPriceMember</span>" on MemberBhv.
*/
public BsPurchaseMaxPriceMemberPmb() {
if (DBFluteConfig.getInstance().isPagingCountLater()) {
enablePagingCountLater();
}
}
// ===================================================================================
// Typed Implementation
// ====================
/**
* {@inheritDoc}
*/
public String getOutsideSqlPath() { return "selectPurchaseMaxPriceMember"; }
/**
* Get the type of an entity for result. (implementation)
* @return The type instance of an entity, customize entity. (NotNull)
*/
public Class<PurchaseMaxPriceMember> getEntityType() { return PurchaseMaxPriceMember.class; }
// ===================================================================================
// Assist Helper
// =============
// -----------------------------------------------------
// String
// ------
protected String filterStringParameter(String value) { return isEmptyStringParameterAllowed() ? value : convertEmptyToNull(value); }
protected boolean isEmptyStringParameterAllowed() { return DBFluteConfig.getInstance().isEmptyStringParameterAllowed(); }
protected String convertEmptyToNull(String value) { return PmbCustodial.convertEmptyToNull(value); }
protected void assertLikeSearchOptionValid(String name, LikeSearchOption option) { PmbCustodial.assertLikeSearchOptionValid(name, option); }
// -----------------------------------------------------
// Date
// ----
protected Date toUtilDate(Object date) { return PmbCustodial.toUtilDate(date, _timeZone); }
protected <DATE> DATE toLocalDate(Date date, Class<DATE> localType) { return PmbCustodial.toLocalDate(date, localType, chooseRealTimeZone()); }
protected TimeZone chooseRealTimeZone() { return PmbCustodial.chooseRealTimeZone(_timeZone); }
/**
* Set time-zone, basically for LocalDate conversion. <br>
* Normally you don't need to set this, you can adjust other ways. <br>
* (DBFlute system's time-zone is used as default)
* @param timeZone The time-zone for filtering. (NullAllowed: if null, default zone)
*/
public void zone(TimeZone timeZone) { _timeZone = timeZone; }
// -----------------------------------------------------
// by Option Handling
// ------------------
// might be called by option handling
protected <NUMBER extends Number> NUMBER toNumber(Object obj, Class<NUMBER> type) { return PmbCustodial.toNumber(obj, type); }
protected Boolean toBoolean(Object obj) { return PmbCustodial.toBoolean(obj); }
@SuppressWarnings("unchecked")
protected <ELEMENT> ArrayList<ELEMENT> newArrayList(ELEMENT... elements) { return PmbCustodial.newArrayList(elements); }
// ===================================================================================
// Basic Override
// ==============
/**
* @return The display string of all parameters. (NotNull)
*/
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append(DfTypeUtil.toClassTitle(this)).append(":");
sb.append(xbuildColumnString());
return sb.toString();
}
protected String xbuildColumnString() {
final String dm = ", ";
final StringBuilder sb = new StringBuilder();
sb.append(dm).append(_memberId);
sb.append(dm).append(_memberNameList);
sb.append(dm).append(_memberStatusCodeList);
if (sb.length() > 0) { sb.delete(0, dm.length()); }
sb.insert(0, "{").append("}");
return sb.toString();
}
// ===================================================================================
// Accessor
// ========
/**
* [get] memberId <br>
* // not required / used as equal
* @return The value of memberId. (NullAllowed, NotEmptyString(when String): if empty string, returns null)
*/
public Integer getMemberId() {
return _memberId;
}
/**
* [set] memberId <br>
* // not required / used as equal
* @param memberId The value of memberId. (NullAllowed)
*/
public void setMemberId(Integer memberId) {
_memberId = memberId;
}
/**
* [get] memberNameList:likePrefix <br>
* // list of prefix keyword
* @return The value of memberNameList. (NullAllowed, NotEmptyString(when String): if empty string, returns null)
*/
public List<String> getMemberNameList() {
return _memberNameList;
}
/**
* [set as prefixSearch] memberNameList:likePrefix <br>
* // list of prefix keyword
* @param memberNameList The value of memberNameList. (NullAllowed)
*/
public void setMemberNameList_PrefixSearch(List<String> memberNameList) {
_memberNameList = memberNameList;
_memberNameListInternalLikeSearchOption = new LikeSearchOption().likePrefix();
}
/**
* Get the internal option of likeSearch for memberNameList. {Internal Method: Don't invoke this}
* @return The internal option of likeSearch for memberNameList. (NullAllowed)
*/
public LikeSearchOption getMemberNameListInternalLikeSearchOption() {
return _memberNameListInternalLikeSearchOption;
}
/**
* [get] memberStatusCodeList:cls(MemberStatus) <br>
* @return The value of memberStatusCodeList. (NullAllowed, NotEmptyString(when String): if empty string, returns null)
*/
public List<org.docksidestage.dockside.dbflute.allcommon.CDef.MemberStatus> getMemberStatusCodeList() {
return _memberStatusCodeList;
}
/**
* [set] memberStatusCodeList:cls(MemberStatus) <br>
* @param memberStatusCodeList The value of memberStatusCodeList. (NullAllowed)
*/
public void setMemberStatusCodeList(List<org.docksidestage.dockside.dbflute.allcommon.CDef.MemberStatus> memberStatusCodeList) {
_memberStatusCodeList = memberStatusCodeList;
}
}
| dbflute-test/dbflute-test-active-dockside | src/main/java/org/docksidestage/dockside/dbflute/bsbhv/pmbean/BsPurchaseMaxPriceMemberPmb.java | Java | apache-2.0 | 9,658 |
package com.annimon.stream.longstreamtests;
import com.annimon.stream.LongStream;
import org.junit.Test;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.is;
public final class SumTest {
@Test
public void testSum() {
assertThat(LongStream.of(100, 20, 3).sum(), is(123L));
assertThat(LongStream.empty().sum(), is(0L));
}
}
| aNNiMON/Lightweight-Stream-API | stream/src/test/java/com/annimon/stream/longstreamtests/SumTest.java | Java | apache-2.0 | 393 |
package de.fhg.iais.cortex.services.institution;
import java.util.LinkedList;
import java.util.List;
import org.codehaus.jackson.annotate.JsonProperty;
import org.codehaus.jackson.annotate.JsonPropertyOrder;
import org.codehaus.jackson.map.annotate.JsonSerialize;
@JsonPropertyOrder({
"id", "name", "sector", "latitude", "longitude", "locationDisplayName", "hasItems", "numberOfItems", "children", "level", "detailViewUri"
})
@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
public class Institution {
@JsonProperty("id")
private final String id;
@JsonProperty("name")
private final String name;
@JsonProperty("sector")
private final String sector;
@JsonProperty("latitude")
private final String latitude;
@JsonProperty("longitude")
private final String longitude;
@JsonProperty("locationDisplayName")
private final String locationDisplayName;
@JsonProperty("hasItems")
private final boolean hasItems;
@JsonProperty("numberOfItems")
private final long numberOfItems;
@JsonProperty("children")
private final List<Institution> children;
@JsonProperty("level")
private int level;
@JsonProperty("detailViewUri")
private String detailViewUri;
/*
* Only needed for automatic serialization and deserialization.
*/
@SuppressWarnings("unused")
private Institution() {
this(null, null, null, null, null, null, false, 0);
}
public Institution(
String id,
String name,
String sector,
String latitude,
String longitude,
String locationDisplayName,
boolean hasItems,
long numberOfItems) {
super();
this.id = id;
this.name = name;
this.sector = sector;
this.latitude = latitude;
this.longitude = longitude;
this.locationDisplayName = locationDisplayName;
this.hasItems = hasItems;
this.numberOfItems = numberOfItems;
this.children = new LinkedList<Institution>();
this.level = -1;
this.detailViewUri = null;
}
public String getId() {
return this.id;
}
public String getName() {
return this.name;
}
public String getSector() {
return this.sector;
}
public String getLatitude() {
return this.latitude;
}
public String getLongitude() {
return this.longitude;
}
public List<Institution> getChildren() {
return this.children;
}
public void addChild(Institution institution) {
this.children.add(institution);
}
public void setChildren(List<Institution> objects) {
this.children.clear();
this.children.addAll(objects);
}
public int getLevel() {
return this.level;
}
public void setLevel(int level) {
this.level = level;
}
public void setDetailViewUri(String detailViewUri) {
this.detailViewUri = detailViewUri;
}
public String getLocationDisplayName() {
return this.locationDisplayName;
}
public String getDetailViewUri() {
return this.detailViewUri;
}
public boolean hasItems() {
return this.hasItems;
}
public long getNumberOfItems() {
return this.numberOfItems;
}
public Institution copy() {
Institution newInstitution =
new Institution(this.id, this.name, this.sector, this.latitude, this.longitude, this.locationDisplayName, this.hasItems, this.numberOfItems);
for ( Institution inst : this.children ) {
newInstitution.getChildren().add(inst.copy());
}
return newInstitution;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = (prime * result) + ((this.detailViewUri == null) ? 0 : this.detailViewUri.hashCode());
result = (prime * result) + (this.hasItems ? 1231 : 1237);
result = (prime * result) + ((this.id == null) ? 0 : this.id.hashCode());
result = (prime * result) + ((this.latitude == null) ? 0 : this.latitude.hashCode());
result = (prime * result) + this.level;
result = (prime * result) + ((this.locationDisplayName == null) ? 0 : this.locationDisplayName.hashCode());
result = (prime * result) + ((this.longitude == null) ? 0 : this.longitude.hashCode());
result = (prime * result) + ((this.name == null) ? 0 : this.name.hashCode());
result = (prime * result) + (int) (this.numberOfItems ^ (this.numberOfItems >>> 32));
result = (prime * result) + ((this.sector == null) ? 0 : this.sector.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if ( this == obj ) {
return true;
}
if ( obj == null ) {
return false;
}
if ( getClass() != obj.getClass() ) {
return false;
}
Institution other = (Institution) obj;
if ( this.detailViewUri == null ) {
if ( other.detailViewUri != null ) {
return false;
}
} else if ( !this.detailViewUri.equals(other.detailViewUri) ) {
return false;
}
if ( this.hasItems != other.hasItems ) {
return false;
}
if ( this.id == null ) {
if ( other.id != null ) {
return false;
}
} else if ( !this.id.equals(other.id) ) {
return false;
}
if ( this.latitude == null ) {
if ( other.latitude != null ) {
return false;
}
} else if ( !this.latitude.equals(other.latitude) ) {
return false;
}
if ( this.level != other.level ) {
return false;
}
if ( this.locationDisplayName == null ) {
if ( other.locationDisplayName != null ) {
return false;
}
} else if ( !this.locationDisplayName.equals(other.locationDisplayName) ) {
return false;
}
if ( this.longitude == null ) {
if ( other.longitude != null ) {
return false;
}
} else if ( !this.longitude.equals(other.longitude) ) {
return false;
}
if ( this.name == null ) {
if ( other.name != null ) {
return false;
}
} else if ( !this.name.equals(other.name) ) {
return false;
}
if ( this.numberOfItems != other.numberOfItems ) {
return false;
}
if ( this.sector == null ) {
if ( other.sector != null ) {
return false;
}
} else if ( !this.sector.equals(other.sector) ) {
return false;
}
return true;
}
@Override
public String toString() {
return "Institution [id="
+ this.id
+ ", name="
+ this.name
+ ", sector="
+ this.sector
+ ", latitude="
+ this.latitude
+ ", longitude="
+ this.longitude
+ ", locationDisplayName="
+ this.locationDisplayName
+ ", hasItems="
+ this.hasItems
+ ", numberOfItems="
+ this.numberOfItems
+ ", children="
+ this.children
+ ", level="
+ this.level
+ ", detailViewUri="
+ this.detailViewUri
+ "]";
}
}
| Deutsche-Digitale-Bibliothek/ddb-backend | CoreServices/src/main/java/de/fhg/iais/cortex/services/institution/Institution.java | Java | apache-2.0 | 7,623 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from ... import *
from talon.signature.learning import featurespace as fs
def test_apply_features():
s = '''This is John Doe
Tuesday @3pm suits. I'll chat to you then.
VP Research and Development, Xxxx Xxxx Xxxxx
555-226-2345
john@example.com'''
sender = 'John <john@example.com>'
features = fs.features(sender)
result = fs.apply_features(s, features)
# note that we don't consider the first line because signatures don't
# usually take all the text, empty lines are not considered
eq_(result, [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
with patch.object(fs, 'SIGNATURE_MAX_LINES', 5):
features = fs.features(sender)
new_result = fs.apply_features(s, features)
# result remains the same because we don't consider empty lines
eq_(result, new_result)
def test_build_pattern():
s = '''John Doe
VP Research and Development, Xxxx Xxxx Xxxxx
555-226-2345
john@example.com'''
sender = 'John <john@example.com>'
features = fs.features(sender)
result = fs.build_pattern(s, features)
eq_(result, [2, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1])
| mailgun/talon | tests/signature/learning/featurespace_test.py | Python | apache-2.0 | 1,402 |
#!/usr/bin/env python3
"""Generate an updated requirements_all.txt."""
import importlib
import os
import pathlib
import pkgutil
import re
import sys
from script.hassfest.model import Integration
COMMENT_REQUIREMENTS = (
"Adafruit-DHT",
"Adafruit_BBIO",
"avion",
"beacontools",
"blinkt",
"bluepy",
"bme680",
"credstash",
"decora",
"envirophat",
"evdev",
"face_recognition",
"fritzconnection",
"i2csense",
"opencv-python-headless",
"py_noaa",
"VL53L1X2",
"pybluez",
"pycups",
"PySwitchbot",
"pySwitchmate",
"python-eq3bt",
"python-lirc",
"pyuserinput",
"raspihats",
"rpi-rf",
"RPi.GPIO",
"smbus-cffi",
)
TEST_REQUIREMENTS = (
"adguardhome",
"ambiclimate",
"aioambient",
"aioautomatic",
"aiobotocore",
"aioesphomeapi",
"aiohttp_cors",
"aiohue",
"aionotion",
"aiounifi",
"aioswitcher",
"aiowwlln",
"apns2",
"aprslib",
"av",
"axis",
"caldav",
"coinmarketcap",
"defusedxml",
"dsmr_parser",
"eebrightbox",
"emulated_roku",
"enocean",
"ephem",
"evohomeclient",
"feedparser-homeassistant",
"foobot_async",
"geojson_client",
"geopy",
"georss_generic_client",
"georss_ign_sismologia_client",
"georss_qld_bushfire_alert_client",
"getmac",
"google-api-python-client",
"gTTS-token",
"ha-ffmpeg",
"hangups",
"HAP-python",
"hass-nabucasa",
"haversine",
"hbmqtt",
"hdate",
"holidays",
"home-assistant-frontend",
"homekit[IP]",
"homematicip",
"httplib2",
"huawei-lte-api",
"influxdb",
"jsonpath",
"libpurecool",
"libsoundtouch",
"luftdaten",
"pyMetno",
"mbddns",
"mficlient",
"netdisco",
"numpy",
"oauth2client",
"paho-mqtt",
"pexpect",
"pilight",
"pmsensor",
"prometheus_client",
"ptvsd",
"pushbullet.py",
"py-canary",
"pyblackbird",
"pydeconz",
"pydispatcher",
"pyheos",
"pyhomematic",
"pyiqvia",
"pylitejet",
"pymfy",
"pymonoprice",
"pynx584",
"pyopenuv",
"pyotp",
"pyps4-homeassistant",
"pysmartapp",
"pysmartthings",
"pysonos",
"pyqwikswitch",
"PyRMVtransport",
"PyTransportNSW",
"pyspcwebgw",
"python-forecastio",
"python-nest",
"python_awair",
"python-velbus",
"pytradfri[async]",
"pyunifi",
"pyupnp-async",
"pyvesync",
"pywebpush",
"pyHS100",
"PyNaCl",
"regenmaschine",
"restrictedpython",
"rflink",
"ring_doorbell",
"rxv",
"simplisafe-python",
"sleepyq",
"smhi-pkg",
"somecomfort",
"sqlalchemy",
"srpenergy",
"statsd",
"toonapilib",
"twentemilieu",
"uvcclient",
"vsure",
"warrant",
"pythonwhois",
"wakeonlan",
"vultr",
"YesssSMS",
"ruamel.yaml",
"zeroconf",
"zigpy-homeassistant",
"bellows-homeassistant",
"py17track",
)
IGNORE_PIN = ("colorlog>2.1,<3", "keyring>=9.3,<10.0", "urllib3")
IGNORE_REQ = ("colorama<=1",) # Windows only requirement in check_config
URL_PIN = (
"https://developers.home-assistant.io/docs/"
"creating_platform_code_review.html#1-requirements"
)
CONSTRAINT_PATH = os.path.join(
os.path.dirname(__file__), "../homeassistant/package_constraints.txt"
)
CONSTRAINT_BASE = """
pycryptodome>=3.6.6
# Breaks Python 3.6 and is not needed for our supported Python versions
enum34==1000000000.0.0
# This is a old unmaintained library and is replaced with pycryptodome
pycrypto==1000000000.0.0
# Contains code to modify Home Assistant to work around our rules
python-systemair-savecair==1000000000.0.0
"""
def explore_module(package, explore_children):
"""Explore the modules."""
module = importlib.import_module(package)
found = []
if not hasattr(module, "__path__"):
return found
for _, name, _ in pkgutil.iter_modules(module.__path__, package + "."):
found.append(name)
if explore_children:
found.extend(explore_module(name, False))
return found
def core_requirements():
"""Gather core requirements out of setup.py."""
with open("setup.py") as inp:
reqs_raw = re.search(r"REQUIRES = \[(.*?)\]", inp.read(), re.S).group(1)
return [x[1] for x in re.findall(r"(['\"])(.*?)\1", reqs_raw)]
def gather_recursive_requirements(domain, seen=None):
"""Recursively gather requirements from a module."""
if seen is None:
seen = set()
seen.add(domain)
integration = Integration(
pathlib.Path("homeassistant/components/{}".format(domain))
)
integration.load_manifest()
reqs = set(integration.manifest["requirements"])
for dep_domain in integration.manifest["dependencies"]:
reqs.update(gather_recursive_requirements(dep_domain, seen))
return reqs
def comment_requirement(req):
"""Comment out requirement. Some don't install on all systems."""
return any(ign in req for ign in COMMENT_REQUIREMENTS)
def gather_modules():
"""Collect the information."""
reqs = {}
errors = []
gather_requirements_from_manifests(errors, reqs)
gather_requirements_from_modules(errors, reqs)
for key in reqs:
reqs[key] = sorted(reqs[key], key=lambda name: (len(name.split(".")), name))
if errors:
print("******* ERROR")
print("Errors while importing: ", ", ".join(errors))
return None
return reqs
def gather_requirements_from_manifests(errors, reqs):
"""Gather all of the requirements from manifests."""
integrations = Integration.load_dir(pathlib.Path("homeassistant/components"))
for domain in sorted(integrations):
integration = integrations[domain]
if not integration.manifest:
errors.append("The manifest for integration {} is invalid.".format(domain))
continue
process_requirements(
errors,
integration.manifest["requirements"],
"homeassistant.components.{}".format(domain),
reqs,
)
def gather_requirements_from_modules(errors, reqs):
"""Collect the requirements from the modules directly."""
for package in sorted(
explore_module("homeassistant.scripts", True)
+ explore_module("homeassistant.auth", True)
):
try:
module = importlib.import_module(package)
except ImportError as err:
print("{}: {}".format(package.replace(".", "/") + ".py", err))
errors.append(package)
continue
if getattr(module, "REQUIREMENTS", None):
process_requirements(errors, module.REQUIREMENTS, package, reqs)
def process_requirements(errors, module_requirements, package, reqs):
"""Process all of the requirements."""
for req in module_requirements:
if req in IGNORE_REQ:
continue
if "://" in req:
errors.append(
"{}[Only pypi dependencies are allowed: {}]".format(package, req)
)
if req.partition("==")[1] == "" and req not in IGNORE_PIN:
errors.append(
"{}[Please pin requirement {}, see {}]".format(package, req, URL_PIN)
)
reqs.setdefault(req, []).append(package)
def generate_requirements_list(reqs):
"""Generate a pip file based on requirements."""
output = []
for pkg, requirements in sorted(reqs.items(), key=lambda item: item[0]):
for req in sorted(requirements):
output.append("\n# {}".format(req))
if comment_requirement(pkg):
output.append("\n# {}\n".format(pkg))
else:
output.append("\n{}\n".format(pkg))
return "".join(output)
def requirements_all_output(reqs):
"""Generate output for requirements_all."""
output = []
output.append("# Home Assistant core")
output.append("\n")
output.append("\n".join(core_requirements()))
output.append("\n")
output.append(generate_requirements_list(reqs))
return "".join(output)
def requirements_test_output(reqs):
"""Generate output for test_requirements."""
output = []
output.append("# Home Assistant test")
output.append("\n")
with open("requirements_test.txt") as test_file:
output.append(test_file.read())
output.append("\n")
filtered = {
key: value
for key, value in reqs.items()
if any(
re.search(r"(^|#){}($|[=><])".format(re.escape(ign)), key) is not None
for ign in TEST_REQUIREMENTS
)
}
output.append(generate_requirements_list(filtered))
return "".join(output)
def gather_constraints():
"""Construct output for constraint file."""
return "\n".join(
sorted(
core_requirements() + list(gather_recursive_requirements("default_config"))
)
+ [""]
)
def write_requirements_file(data):
"""Write the modules to the requirements_all.txt."""
with open("requirements_all.txt", "w+", newline="\n") as req_file:
req_file.write(data)
def write_test_requirements_file(data):
"""Write the modules to the requirements_test_all.txt."""
with open("requirements_test_all.txt", "w+", newline="\n") as req_file:
req_file.write(data)
def write_constraints_file(data):
"""Write constraints to a file."""
with open(CONSTRAINT_PATH, "w+", newline="\n") as req_file:
req_file.write(data + CONSTRAINT_BASE)
def validate_requirements_file(data):
"""Validate if requirements_all.txt is up to date."""
with open("requirements_all.txt", "r") as req_file:
return data == req_file.read()
def validate_requirements_test_file(data):
"""Validate if requirements_test_all.txt is up to date."""
with open("requirements_test_all.txt", "r") as req_file:
return data == req_file.read()
def validate_constraints_file(data):
"""Validate if constraints is up to date."""
with open(CONSTRAINT_PATH, "r") as req_file:
return data + CONSTRAINT_BASE == req_file.read()
def main(validate):
"""Run the script."""
if not os.path.isfile("requirements_all.txt"):
print("Run this from HA root dir")
return 1
data = gather_modules()
if data is None:
return 1
constraints = gather_constraints()
reqs_file = requirements_all_output(data)
reqs_test_file = requirements_test_output(data)
if validate:
errors = []
if not validate_requirements_file(reqs_file):
errors.append("requirements_all.txt is not up to date")
if not validate_requirements_test_file(reqs_test_file):
errors.append("requirements_test_all.txt is not up to date")
if not validate_constraints_file(constraints):
errors.append("home-assistant/package_constraints.txt is not up to date")
if errors:
print("******* ERROR")
print("\n".join(errors))
print("Please run script/gen_requirements_all.py")
return 1
return 0
write_requirements_file(reqs_file)
write_test_requirements_file(reqs_test_file)
write_constraints_file(constraints)
return 0
if __name__ == "__main__":
_VAL = sys.argv[-1] == "validate"
sys.exit(main(_VAL))
| fbradyirl/home-assistant | script/gen_requirements_all.py | Python | apache-2.0 | 11,383 |
/**
*
*/
package me.learn.personal.month2;
/**
* Title 1252 :
*
* Given n and m which are the dimensions of a matrix initialized by zeros and
* given an array indices where indices[i] = [ri, ci]. For each pair of [ri, ci]
* you have to increment all cells in row ri and column ci by 1.
*
* Return the number of cells with odd values in the matrix after applying the
* increment to all indices.
*
* @author bramanarayan
* @date Jun 13, 2020
*/
public class CellsWithOddValuesInMatrix {
/**
* @param args
*/
public static void main(String[] args) {
CellsWithOddValuesInMatrix solution = new CellsWithOddValuesInMatrix();
System.out.println(solution.oddCells(2, 3, new int[][] { { 0, 1 }, { 1, 1 } }));
}
public int oddCells(int n, int m, int[][] indices) {
int row[] = new int[n];
int col[] = new int[m];
int[][] res = new int[n][m];
// Increment the row and col array based on indicies
for (int i = 0; i < indices.length; i++) {
int x = indices[i][0];
int y = indices[i][1];
row[x] += 1;
col[y] += 1;
}
// Increment the row
for (int i = 0; i < n; i++) {
if (row[i] > 0) {
int j = 0;
while (j < m) {
res[i][j] += row[i];
j++;
}
}
}
// Increment the column
for (int i = 0; i < m; i++) {
if (col[i] > 0) {
int j = 0;
while (j < n) {
res[j][i] += col[i];
j++;
}
}
}
// Check the odd values
int oddCount = 0;
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
oddCount = (res[i][j] % 2 != 0) ? oddCount + 1 : oddCount;
}
}
return oddCount;
}
}
| balajiboggaram/algorithms | src/me/learn/personal/month2/CellsWithOddValuesInMatrix.java | Java | apache-2.0 | 1,601 |
//
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.4-2
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2013.11.29 at 12:35:53 PM GMT
//
package org.mule.modules.hybris.model;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlType;
/**
* <p>Java class for variantTypeDTO complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="variantTypeDTO">
* <complexContent>
* <extension base="{}composedTypeDTO">
* <sequence>
* </sequence>
* </extension>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "variantTypeDTO")
public class VariantTypeDTO
extends ComposedTypeDTO
{
}
| ryandcarter/hybris-connector | src/main/java/org/mule/modules/hybris/model/VariantTypeDTO.java | Java | apache-2.0 | 1,073 |
/**
* @license Copyright 2020 The Lighthouse Authors. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
'use strict';
/**
* @fileoverview Audit which identifies third-party code on the page which can be lazy loaded.
* The audit will recommend a facade alternative which is used to imitate the third-party resource until it is needed.
*
* Entity: Set of domains which are used by a company or product area to deliver third-party resources
* Product: Specific piece of software belonging to an entity. Entities can have multiple products.
* Facade: Placeholder for a product which looks likes the actual product and replaces itself with that product when the user needs it.
*/
/** @typedef {import("third-party-web").IEntity} ThirdPartyEntity */
/** @typedef {import("third-party-web").IProduct} ThirdPartyProduct*/
/** @typedef {import("third-party-web").IFacade} ThirdPartyFacade*/
/** @typedef {{product: ThirdPartyProduct, entity: ThirdPartyEntity}} FacadableProduct */
const Audit = require('./audit.js');
const i18n = require('../lib/i18n/i18n.js');
const thirdPartyWeb = require('../lib/third-party-web.js');
const NetworkRecords = require('../computed/network-records.js');
const MainResource = require('../computed/main-resource.js');
const MainThreadTasks = require('../computed/main-thread-tasks.js');
const ThirdPartySummary = require('./third-party-summary.js');
const UIStrings = {
/** Title of a diagnostic audit that provides details about the third-party code on a web page that can be lazy loaded with a facade alternative. This descriptive title is shown to users when no resources have facade alternatives available. A facade is a lightweight component which looks like the desired resource. Lazy loading means resources are deferred until they are needed. Third-party code refers to resources that are not within the control of the site owner. */
title: 'Lazy load third-party resources with facades',
/** Title of a diagnostic audit that provides details about the third-party code on a web page that can be lazy loaded with a facade alternative. This descriptive title is shown to users when one or more third-party resources have available facade alternatives. A facade is a lightweight component which looks like the desired resource. Lazy loading means resources are deferred until they are needed. Third-party code refers to resources that are not within the control of the site owner. */
failureTitle: 'Some third-party resources can be lazy loaded with a facade',
/** Description of a Lighthouse audit that identifies the third-party code on the page that can be lazy loaded with a facade alternative. This is displayed after a user expands the section to see more. No character length limits. 'Learn More' becomes link text to additional documentation. A facade is a lightweight component which looks like the desired resource. Lazy loading means resources are deferred until they are needed. Third-party code refers to resources that are not within the control of the site owner. */
description: 'Some third-party embeds can be lazy loaded. ' +
'Consider replacing them with a facade until they are required. [Learn more](https://web.dev/third-party-facades/).',
/** Summary text for the result of a Lighthouse audit that identifies the third-party code on a web page that can be lazy loaded with a facade alternative. This text summarizes the number of lazy loading facades that can be used on the page. A facade is a lightweight component which looks like the desired resource. */
displayValue: `{itemCount, plural,
=1 {# facade alternative available}
other {# facade alternatives available}
}`,
/** Label for a table column that displays the name of the product that a URL is used for. The products in the column will be pieces of software used on the page, like the "YouTube Embedded Player" or the "Drift Live Chat" box. */
columnProduct: 'Product',
/**
* @description Template for a table entry that gives the name of a product which we categorize as video related.
* @example {YouTube Embedded Player} productName
*/
categoryVideo: '{productName} (Video)',
/**
* @description Template for a table entry that gives the name of a product which we categorize as customer success related. Customer success means the product supports customers by offering chat and contact solutions.
* @example {Intercom Widget} productName
*/
categoryCustomerSuccess: '{productName} (Customer Success)',
/**
* @description Template for a table entry that gives the name of a product which we categorize as marketing related.
* @example {Drift Live Chat} productName
*/
categoryMarketing: '{productName} (Marketing)',
/**
* @description Template for a table entry that gives the name of a product which we categorize as social related.
* @example {Facebook Messenger Customer Chat} productName
*/
categorySocial: '{productName} (Social)',
};
const str_ = i18n.createMessageInstanceIdFn(__filename, UIStrings);
/** @type {Record<string, string>} */
const CATEGORY_UI_MAP = {
'video': UIStrings.categoryVideo,
'customer-success': UIStrings.categoryCustomerSuccess,
'marketing': UIStrings.categoryMarketing,
'social': UIStrings.categorySocial,
};
class ThirdPartyFacades extends Audit {
/**
* @return {LH.Audit.Meta}
*/
static get meta() {
return {
id: 'third-party-facades',
title: str_(UIStrings.title),
failureTitle: str_(UIStrings.failureTitle),
description: str_(UIStrings.description),
requiredArtifacts: ['traces', 'devtoolsLogs', 'URL'],
};
}
/**
* Sort items by transfer size and combine small items into a single row.
* Items will be mutated in place to a maximum of 6 rows.
* @param {ThirdPartySummary.URLSummary[]} items
*/
static condenseItems(items) {
items.sort((a, b) => b.transferSize - a.transferSize);
// Items <1KB are condensed. If all items are <1KB, condense all but the largest.
let splitIndex = items.findIndex((item) => item.transferSize < 1000) || 1;
// Show details for top 5 items.
if (splitIndex === -1 || splitIndex > 5) splitIndex = 5;
// If there is only 1 item to condense, leave it as is.
if (splitIndex >= items.length - 1) return;
const remainder = items.splice(splitIndex);
const finalItem = remainder.reduce((result, item) => {
result.transferSize += item.transferSize;
result.blockingTime += item.blockingTime;
return result;
});
// If condensed row is still <1KB, don't show it.
if (finalItem.transferSize < 1000) return;
finalItem.url = str_(i18n.UIStrings.otherResourcesLabel);
items.push(finalItem);
}
/**
* @param {Map<string, ThirdPartySummary.Summary>} byURL
* @param {ThirdPartyEntity | undefined} mainEntity
* @return {FacadableProduct[]}
*/
static getProductsWithFacade(byURL, mainEntity) {
/** @type {Map<string, FacadableProduct>} */
const facadableProductMap = new Map();
for (const url of byURL.keys()) {
const entity = thirdPartyWeb.getEntity(url);
if (!entity || thirdPartyWeb.isFirstParty(url, mainEntity)) continue;
const product = thirdPartyWeb.getProduct(url);
if (!product || !product.facades || !product.facades.length) continue;
if (facadableProductMap.has(product.name)) continue;
facadableProductMap.set(product.name, {product, entity});
}
return Array.from(facadableProductMap.values());
}
/**
* @param {LH.Artifacts} artifacts
* @param {LH.Audit.Context} context
* @return {Promise<LH.Audit.Product>}
*/
static async audit(artifacts, context) {
const settings = context.settings;
const trace = artifacts.traces[Audit.DEFAULT_PASS];
const devtoolsLog = artifacts.devtoolsLogs[Audit.DEFAULT_PASS];
const networkRecords = await NetworkRecords.request(devtoolsLog, context);
const mainResource = await MainResource.request({devtoolsLog, URL: artifacts.URL}, context);
const mainEntity = thirdPartyWeb.getEntity(mainResource.url);
const tasks = await MainThreadTasks.request(trace, context);
const multiplier = settings.throttlingMethod === 'simulate' ?
settings.throttling.cpuSlowdownMultiplier : 1;
const summaries = ThirdPartySummary.getSummaries(networkRecords, tasks, multiplier);
const facadableProducts =
ThirdPartyFacades.getProductsWithFacade(summaries.byURL, mainEntity);
/** @type {LH.Audit.Details.TableItem[]} */
const results = [];
for (const {product, entity} of facadableProducts) {
const categoryTemplate = CATEGORY_UI_MAP[product.categories[0]];
let productWithCategory;
if (categoryTemplate) {
// Display product name with category next to it in the same column.
productWithCategory = str_(categoryTemplate, {productName: product.name});
} else {
// Just display product name if no category is found.
productWithCategory = product.name;
}
const urls = summaries.urls.get(entity);
const entitySummary = summaries.byEntity.get(entity);
if (!urls || !entitySummary) continue;
const items = Array.from(urls).map((url) => {
const urlStats = summaries.byURL.get(url);
return /** @type {ThirdPartySummary.URLSummary} */ ({url, ...urlStats});
});
this.condenseItems(items);
results.push({
product: productWithCategory,
transferSize: entitySummary.transferSize,
blockingTime: entitySummary.blockingTime,
subItems: {type: 'subitems', items},
});
}
if (!results.length) {
return {
score: 1,
notApplicable: true,
};
}
/** @type {LH.Audit.Details.Table['headings']} */
const headings = [
/* eslint-disable max-len */
{key: 'product', itemType: 'text', subItemsHeading: {key: 'url', itemType: 'url'}, text: str_(UIStrings.columnProduct)},
{key: 'transferSize', itemType: 'bytes', subItemsHeading: {key: 'transferSize'}, granularity: 1, text: str_(i18n.UIStrings.columnTransferSize)},
{key: 'blockingTime', itemType: 'ms', subItemsHeading: {key: 'blockingTime'}, granularity: 1, text: str_(i18n.UIStrings.columnBlockingTime)},
/* eslint-enable max-len */
];
return {
score: 0,
displayValue: str_(UIStrings.displayValue, {
itemCount: results.length,
}),
details: Audit.makeTableDetails(headings, results),
};
}
}
module.exports = ThirdPartyFacades;
module.exports.UIStrings = UIStrings;
| ev1stensberg/lighthouse | lighthouse-core/audits/third-party-facades.js | JavaScript | apache-2.0 | 11,065 |
/*
* Copyright 2012 LBi Netherlands B.V.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
namespace LBi.Cli.Arguments.Parsing.Ast
{
public enum LiteralValueType
{
Numeric,
String,
Null,
Boolean,
}
}
| LBiNetherlands/LBi.Cli.Arguments | LBi.Cli.Arguments/Parsing/Ast/LiteralValueType.cs | C# | apache-2.0 | 759 |
/**
* Copyright 2015 Groupon.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.groupon.promise.function;
import com.groupon.promise.AsyncPromiseFunction;
import com.groupon.promise.ComparablePromiseFunction;
import com.groupon.promise.DefaultPromiseFuture;
import com.groupon.promise.PromiseFuture;
import com.groupon.promise.SyncPromiseFunction;
/**
* Wrapper for getting the future result from a synchronous promise function.
*
* @author Stuart Siegrist (fsiegrist at groupon dot com)
* @since 0.1
*/
public class PromiseFunctionResult<T, O> implements AsyncPromiseFunction<T, O>, ComparablePromiseFunction {
protected SyncPromiseFunction<T, ? extends O> promiseFunction;
public PromiseFunctionResult(SyncPromiseFunction<T, ? extends O> promiseFunction) {
this.promiseFunction = promiseFunction;
}
@Override
public PromiseFuture<O> handle(T data) {
PromiseFuture<O> future = new DefaultPromiseFuture<>();
try {
future.setResult(promiseFunction.handle(data));
} catch (Throwable t) {
future.setFailure(t);
}
return future;
}
@Override
public boolean equivalent(Object o) {
return this == o || (o != null && o instanceof ComparablePromiseFunction &&
((ComparablePromiseFunction) o).equivalent(promiseFunction));
}
}
| groupon/promise | src/main/java/com/groupon/promise/function/PromiseFunctionResult.java | Java | apache-2.0 | 1,887 |
package com.neilren.neilren4j.dao;
import com.neilren.neilren4j.dbentity.TLogSendEmail;
import java.util.List;
public interface TLogSendEmailMapper {
int deleteByPrimaryKey(Long id);
int insert(TLogSendEmail record);
TLogSendEmail selectByPrimaryKey(Long id);
List<TLogSendEmail> selectAll();
int updateByPrimaryKey(TLogSendEmail record);
} | NeilRen/NEILREN4J | src/main/java/com/neilren/neilren4j/dao/TLogSendEmailMapper.java | Java | apache-2.0 | 366 |
package com.myapp.demo.spring.proxy;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
/**
* Created by Zhourl on 2017/8/11.
*/
public class PerformanceHandler implements InvocationHandler {//①实现InvocationHandler
private Object target;
public PerformanceHandler(Object target) {//②target为目标的业务类
this.target = target;
}
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
PerformanceMonitor.begin(target.getClass().getName() + "." + method.getName());
Object obj = method.invoke(target, args);
PerformanceMonitor.end();
return obj;
}
}
| zhouronglv/myapp | myapp-demo/src/main/java/com/myapp/demo/spring/proxy/PerformanceHandler.java | Java | apache-2.0 | 696 |
package io.github.suxil.service;
import org.mybatis.spring.annotation.MapperScan;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.cloud.client.discovery.EnableDiscoveryClient;
import org.springframework.cloud.client.loadbalancer.LoadBalanced;
import org.springframework.cloud.netflix.ribbon.RibbonClients;
import org.springframework.cloud.openfeign.EnableFeignClients;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.transaction.annotation.EnableTransactionManagement;
import org.springframework.web.client.RestTemplate;
/**
* @Author: luxq
* @Description:
* @Date: Created in 2018/5/27 0027 12:08
*/
@SpringBootApplication
@ComponentScan("io.github.suxil")
@EnableDiscoveryClient
@EnableFeignClients("io.github.suxil")
@RibbonClients
@EnableTransactionManagement
@MapperScan("io.github.suxil.service.**.mapper")
public class LearnServiceApplication {
@Bean
@LoadBalanced
public RestTemplate restTemplate() {
return new RestTemplate();
}
public static void main(String[] args) {
new SpringApplicationBuilder(LearnServiceApplication.class).run(args);
}
}
| suxi-lu/learn | learn-service/src/main/java/io/github/suxil/service/LearnServiceApplication.java | Java | apache-2.0 | 1,309 |
package b2
import (
"math/rand"
"os"
"strings"
"testing"
"time"
isi "github.com/cheekybits/is"
"github.com/graymeta/stow"
"github.com/graymeta/stow/test"
)
func TestStow(t *testing.T) {
is := isi.New(t)
accountID := os.Getenv("B2_ACCOUNT_ID")
applicationKey := os.Getenv("B2_APPLICATION_KEY")
if accountID == "" || applicationKey == "" {
t.Skip("Backblaze credentials missing from environment. Skipping tests")
}
cfg := stow.ConfigMap{
"account_id": accountID,
"application_key": applicationKey,
}
location, err := stow.Dial("b2", cfg)
is.NoErr(err)
is.OK(location)
t.Run("basic stow interface tests", func(t *testing.T) {
test.All(t, "b2", cfg)
})
// This test is designed to test the container.Items() function. B2 doesn't
// support listing items in a bucket by prefix, so our implementation fakes this
// functionality by requesting additional pages of files
t.Run("Items with prefix", func(t *testing.T) {
is := isi.New(t)
container, err := location.CreateContainer("stowtest" + randName(10))
is.NoErr(err)
is.OK(container)
defer func() {
is.NoErr(location.RemoveContainer(container.ID()))
}()
// add some items to the container
content := "foo"
item1, err := container.Put("b/a", strings.NewReader(content), int64(len(content)), nil)
is.NoErr(err)
item2, err := container.Put("b/bb", strings.NewReader(content), int64(len(content)), nil)
is.NoErr(err)
item3, err := container.Put("b/bc", strings.NewReader(content), int64(len(content)), nil)
is.NoErr(err)
item4, err := container.Put("b/bd", strings.NewReader(content), int64(len(content)), nil)
is.NoErr(err)
defer func() {
is.NoErr(container.RemoveItem(item1.ID()))
is.NoErr(container.RemoveItem(item2.ID()))
is.NoErr(container.RemoveItem(item3.ID()))
is.NoErr(container.RemoveItem(item4.ID()))
}()
items, cursor, err := container.Items("b/b", stow.CursorStart, 2)
is.NoErr(err)
is.Equal(len(items), 2)
is.Equal(cursor, "b/bc ")
items, cursor, err = container.Items("", stow.CursorStart, 2)
is.NoErr(err)
is.Equal(len(items), 2)
is.Equal(cursor, "b/bb ")
})
t.Run("Item Delete", func(t *testing.T) {
is := isi.New(t)
container, err := location.CreateContainer("stowtest" + randName(10))
is.NoErr(err)
is.OK(container)
defer func() {
is.NoErr(location.RemoveContainer(container.ID()))
}()
// Put an item twice, creating two versions of the file
content := "foo"
i, err := container.Put("foo", strings.NewReader(content), int64(len(content)), nil)
is.NoErr(err)
content = "foo_v2"
_, err = container.Put("foo", strings.NewReader(content), int64(len(content)), nil)
is.NoErr(err)
is.NoErr(container.RemoveItem(i.ID()))
// verify item is gone
_, err = container.Item(i.ID())
is.Equal(err, stow.ErrNotFound)
})
}
func randName(length int) string {
b := make([]rune, length)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
var letters = []rune("abcdefghijklmnopqrstuvwxyz")
func init() {
rand.Seed(int64(time.Now().Nanosecond()))
}
| graymeta/stow | b2/stow_test.go | GO | apache-2.0 | 3,088 |
package marki.at.Client.events;
import marki.at.Client.utils.Message;
/**
* Created by marki on 29.10.13.
*/
public class newMessageEvent {
public final Message message;
public newMessageEvent(Message message) {
this.message = message;
}
}
| markini/ServiceMonitoringTestSystem | Client/src/main/java/marki/at/Client/events/newMessageEvent.java | Java | apache-2.0 | 266 |
using System;
using System.Collections.Generic;
using System.Text;
using WikiClientLibrary.Client;
namespace WikiClientLibrary.Sites
{
/// <summary>
/// Represents a token placeholder in the <see cref="MediaWikiFormRequestMessage"/>.
/// This enables <see cref="WikiSite"/> to detect bad tokens.
/// </summary>
public sealed class WikiSiteToken
{
public static WikiSiteToken Edit = new WikiSiteToken("edit");
public static WikiSiteToken Move = new WikiSiteToken("move");
public static WikiSiteToken Delete = new WikiSiteToken("delete");
public static WikiSiteToken Patrol = new WikiSiteToken("patrol");
public WikiSiteToken(string type)
{
Type = type ?? throw new ArgumentNullException(nameof(type));
}
public string Type { get; }
}
}
| CXuesong/WikiClientLibrary | WikiClientLibrary/Sites/WikiSiteToken.cs | C# | apache-2.0 | 850 |
/**
* Code contributed to the Learning Layers project
* http://www.learning-layers.eu
* Development is partly funded by the FP7 Programme of the European Commission under
* Grant Agreement FP7-ICT-318209.
* Copyright (c) 2016, Graz University of Technology - KTI (Knowledge Technologies Institute).
* For a list of contributors see the AUTHORS file at the top-level directory of this distribution.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package at.tugraz.sss.servs.livingdocument.datatype;
import at.tugraz.sss.serv.db.api.*;
public enum SSLivingDocSQLTableE implements SSSQLTableI{
livingdoc,
livingdocusers;
}
| learning-layers/SocialSemanticServer | servs/livingdocument/livingdocument.datatype/src/main/java/at/tugraz/sss/servs/livingdocument/datatype/SSLivingDocSQLTableE.java | Java | apache-2.0 | 1,146 |
/* @flow */
import Crypto from '../components/cryptography';
import Config from '../components/config';
import ListenerManager from '../components/listener_manager';
import ReconnectionManager from '../components/reconnection_manager';
import utils from '../utils';
import { MessageAnnouncement, SubscribeEnvelope, StatusAnnouncement, PresenceAnnouncement } from '../flow_interfaces';
import categoryConstants from '../constants/categories';
type SubscribeArgs = {
channels: Array<string>,
channelGroups: Array<string>,
withPresence: ?boolean,
timetoken: ?number
}
type UnsubscribeArgs = {
channels: Array<string>,
channelGroups: Array<string>
}
type StateArgs = {
channels: Array<string>,
channelGroups: Array<string>,
state: Object
}
type SubscriptionManagerConsturct = {
leaveEndpoint: Function,
subscribeEndpoint: Function,
timeEndpoint: Function,
heartbeatEndpoint: Function,
setStateEndpoint: Function,
config: Config,
crypto: Crypto,
listenerManager: ListenerManager
}
export default class {
_crypto: Crypto;
_config: Config;
_listenerManager: ListenerManager;
_reconnectionManager: ReconnectionManager;
_leaveEndpoint: Function;
_heartbeatEndpoint: Function;
_setStateEndpoint: Function;
_subscribeEndpoint: Function;
_channels: Object;
_presenceChannels: Object;
_channelGroups: Object;
_presenceChannelGroups: Object;
_timetoken: number;
_region: number;
_subscribeCall: ?Object;
_heartbeatTimer: ?number;
_subscriptionStatusAnnounced: boolean;
constructor({ subscribeEndpoint, leaveEndpoint, heartbeatEndpoint, setStateEndpoint, timeEndpoint, config, crypto, listenerManager }: SubscriptionManagerConsturct) {
this._listenerManager = listenerManager;
this._config = config;
this._leaveEndpoint = leaveEndpoint;
this._heartbeatEndpoint = heartbeatEndpoint;
this._setStateEndpoint = setStateEndpoint;
this._subscribeEndpoint = subscribeEndpoint;
this._crypto = crypto;
this._channels = {};
this._presenceChannels = {};
this._channelGroups = {};
this._presenceChannelGroups = {};
this._timetoken = 0;
this._subscriptionStatusAnnounced = false;
this._reconnectionManager = new ReconnectionManager({ timeEndpoint });
}
adaptStateChange(args: StateArgs, callback: Function) {
const { state, channels = [], channelGroups = [] } = args;
channels.forEach((channel) => {
if (channel in this._channels) this._channels[channel].state = state;
});
channelGroups.forEach((channelGroup) => {
if (channelGroup in this._channelGroups) this._channelGroups[channelGroup].state = state;
});
this._setStateEndpoint({ state, channels, channelGroups }, callback);
}
adaptSubscribeChange(args: SubscribeArgs) {
const { timetoken, channels = [], channelGroups = [], withPresence = false } = args;
if (timetoken) this._timetoken = timetoken;
channels.forEach((channel) => {
this._channels[channel] = { state: {} };
if (withPresence) this._presenceChannels[channel] = {};
});
channelGroups.forEach((channelGroup) => {
this._channelGroups[channelGroup] = { state: {} };
if (withPresence) this._presenceChannelGroups[channelGroup] = {};
});
this._subscriptionStatusAnnounced = false;
this.reconnect();
}
adaptUnsubscribeChange(args: UnsubscribeArgs) {
const { channels = [], channelGroups = [] } = args;
channels.forEach((channel) => {
if (channel in this._channels) delete this._channels[channel];
if (channel in this._presenceChannels) delete this._presenceChannels[channel];
});
channelGroups.forEach((channelGroup) => {
if (channelGroup in this._channelGroups) delete this._channelGroups[channelGroup];
if (channelGroup in this._presenceChannelGroups) delete this._channelGroups[channelGroup];
});
if (this._config.suppressLeaveEvents === false) {
this._leaveEndpoint({ channels, channelGroups }, (status) => {
this._listenerManager.announceStatus(status);
});
}
this.reconnect();
}
unsubscribeAll() {
this.adaptUnsubscribeChange({ channels: this.getSubscribedChannels(), channelGroups: this.getSubscribedChannelGroups() });
}
getSubscribedChannels() {
return Object.keys(this._channels);
}
getSubscribedChannelGroups() {
return Object.keys(this._channelGroups);
}
reconnect() {
this._startSubscribeLoop();
this._registerHeartbeatTimer();
}
disconnect() {
this._stopSubscribeLoop();
this._stopHeartbeatTimer();
}
_registerHeartbeatTimer() {
this._stopHeartbeatTimer();
this._performHeartbeatLoop();
this._heartbeatTimer = setInterval(this._performHeartbeatLoop.bind(this), this._config.getHeartbeatInterval() * 1000);
}
_stopHeartbeatTimer() {
if (this._heartbeatTimer) {
clearInterval(this._heartbeatTimer);
this._heartbeatTimer = null;
}
}
_performHeartbeatLoop() {
let presenceChannels = Object.keys(this._channels);
let presenceChannelGroups = Object.keys(this._channelGroups);
let presenceState = {};
if (presenceChannels.length === 0 && presenceChannelGroups.length === 0) {
return;
}
presenceChannels.forEach((channel) => {
let channelState = this._channels[channel].state;
if (Object.keys(channelState).length) presenceState[channel] = channelState;
});
presenceChannelGroups.forEach((channelGroup) => {
let channelGroupState = this._channelGroups[channelGroup].state;
if (Object.keys(channelGroupState).length) presenceState[channelGroup] = channelGroupState;
});
let onHeartbeat = (status: StatusAnnouncement) => {
if (status.error && this._config.announceFailedHeartbeats) {
this._listenerManager.announceStatus(status);
}
if (!status.error && this._config.announceSuccessfulHeartbeats) {
this._listenerManager.announceStatus(status);
}
};
this._heartbeatEndpoint({
channels: presenceChannels,
channelGroups: presenceChannelGroups,
state: presenceState }, onHeartbeat.bind(this));
}
_startSubscribeLoop() {
this._stopSubscribeLoop();
let channels = [];
let channelGroups = [];
Object.keys(this._channels).forEach(channel => channels.push(channel));
Object.keys(this._presenceChannels).forEach(channel => channels.push(channel + '-pnpres'));
Object.keys(this._channelGroups).forEach(channelGroup => channelGroups.push(channelGroup));
Object.keys(this._presenceChannelGroups).forEach(channelGroup => channelGroups.push(channelGroup + '-pnpres'));
if (channels.length === 0 && channelGroups.length === 0) {
return;
}
const subscribeArgs = {
channels,
channelGroups,
timetoken: this._timetoken,
filterExpression: this._config.filterExpression,
region: this._region
};
this._subscribeCall = this._subscribeEndpoint(subscribeArgs, this._processSubscribeResponse.bind(this));
}
_processSubscribeResponse(status: StatusAnnouncement, payload: SubscribeEnvelope) {
if (status.error) {
// if we timeout from server, restart the loop.
if (status.category === categoryConstants.PNTimeoutCategory) {
this._startSubscribeLoop();
}
// we lost internet connection, alert the reconnection manager and terminate all loops
if (status.category === categoryConstants.PNNetworkIssuesCategory) {
this.disconnect();
this._reconnectionManager.onReconnection(() => {
this.reconnect();
this._subscriptionStatusAnnounced = true;
let reconnectedAnnounce: StatusAnnouncement = {
category: categoryConstants.PNReconnectedCategory,
operation: status.operation
};
this._listenerManager.announceStatus(reconnectedAnnounce);
});
this._reconnectionManager.startPolling();
this._listenerManager.announceStatus(status);
}
return;
}
if (!this._subscriptionStatusAnnounced) {
let connectedAnnounce: StatusAnnouncement = {};
connectedAnnounce.category = categoryConstants.PNConnectedCategory;
connectedAnnounce.operation = status.operation;
this._subscriptionStatusAnnounced = true;
this._listenerManager.announceStatus(connectedAnnounce);
}
payload.messages.forEach((message) => {
let channel = message.channel;
let subscriptionMatch = message.subscriptionMatch;
let publishMetaData = message.publishMetaData;
if (channel === subscriptionMatch) {
subscriptionMatch = null;
}
if (utils.endsWith(message.channel, '-pnpres')) {
let announce: PresenceAnnouncement = {};
announce.channel = null;
announce.subscription = null;
// deprecated -->
announce.actualChannel = (subscriptionMatch != null) ? channel : null;
announce.subscribedChannel = subscriptionMatch != null ? subscriptionMatch : channel;
// <-- deprecated
if (channel) {
announce.channel = channel.substring(0, channel.lastIndexOf('-pnpres'));
}
if (subscriptionMatch) {
announce.subscription = subscriptionMatch.substring(0, subscriptionMatch.lastIndexOf('-pnpres'));
}
announce.action = message.payload.action;
announce.state = message.payload.data;
announce.timetoken = publishMetaData.publishTimetoken;
announce.occupancy = message.payload.occupancy;
announce.uuid = message.payload.uuid;
announce.timestamp = message.payload.timestamp;
this._listenerManager.announcePresence(announce);
} else {
let announce: MessageAnnouncement = {};
announce.channel = null;
announce.subscription = null;
// deprecated -->
announce.actualChannel = (subscriptionMatch != null) ? channel : null;
announce.subscribedChannel = subscriptionMatch != null ? subscriptionMatch : channel;
// <-- deprecated
announce.channel = channel;
announce.subscription = subscriptionMatch;
announce.timetoken = publishMetaData.publishTimetoken;
if (this._config.cipherKey) {
announce.message = this._crypto.decrypt(message.payload);
} else {
announce.message = message.payload;
}
this._listenerManager.announceMessage(announce);
}
});
this._region = payload.metadata.region;
this._timetoken = payload.metadata.timetoken;
this._startSubscribeLoop();
}
_stopSubscribeLoop() {
if (this._subscribeCall) {
this._subscribeCall.abort();
this._subscribeCall = null;
}
}
}
| amriteshkumar1/sales-service | node_modules/pubnub/src/core/components/subscription_manager.js | JavaScript | apache-2.0 | 10,768 |
package com.antarescraft.kloudy.slots.events;
import org.bukkit.command.Command;
import org.bukkit.command.CommandExecutor;
import org.bukkit.command.CommandSender;
import org.bukkit.entity.Player;
import com.antarescraft.kloudy.hologuiapi.plugincore.command.CommandHandler;
import com.antarescraft.kloudy.hologuiapi.plugincore.command.CommandParser;
import com.antarescraft.kloudy.hologuiapi.plugincore.messaging.MessageManager;
import com.antarescraft.kloudy.slots.Slots;
import com.antarescraft.kloudy.slots.SlotsConfiguration;
import com.antarescraft.kloudy.slots.pagemodels.SlotsPageModel;
public class CommandEvent implements CommandExecutor
{
protected Slots slots;
protected SlotsConfiguration config;
public CommandEvent(Slots plugin)
{
this.slots = plugin;
config = SlotsConfiguration.getSlotsConfiguration(slots);
}
@Override
public boolean onCommand(CommandSender sender, Command cmd, String label, String[] args)
{
return CommandParser.parseCommand(slots, this, "slots", cmd.getName(), sender, args);
}
@CommandHandler(description = "Reloads the config files",
mustBePlayer = false, permission = "slots.admin", subcommands = "reload")
public void reload(CommandSender sender, String[] args)
{
slots.getHoloGUIApi().destroyGUIPages(slots);
slots.removeAllPlayers();
SlotsConfiguration.loadConfig(slots);
MessageManager.info(sender, "Reloaded the config");
}
@CommandHandler(description = "Opens the Slots GUI",
mustBePlayer = true, permission = "slots.play", subcommands = "play")
public void play(CommandSender sender, String[] args)
{
Player player = (Player)sender;
if(!slots.isPlaying(player))
{
SlotsPageModel model = new SlotsPageModel(slots, slots.getGUIPage("slot-machine"), player);
slots.getHoloGUIApi().openGUIPage(slots, model);
slots.isPlaying(player, true);
}
else
{
player.sendMessage(config.getAlreadyPlayingMessage());
}
}
} | Kloudy/Slots | src/com/antarescraft/kloudy/slots/events/CommandEvent.java | Java | apache-2.0 | 1,946 |
package com.vmware.vim25;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlType;
/**
* <p>Java class for AnswerFileSerializedCreateSpec complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="AnswerFileSerializedCreateSpec">
* <complexContent>
* <extension base="{urn:vim25}AnswerFileCreateSpec">
* <sequence>
* <element name="answerFileConfigString" type="{http://www.w3.org/2001/XMLSchema}string"/>
* </sequence>
* </extension>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "AnswerFileSerializedCreateSpec", propOrder = {
"answerFileConfigString"
})
public class AnswerFileSerializedCreateSpec
extends AnswerFileCreateSpec
{
@XmlElement(required = true)
protected String answerFileConfigString;
/**
* Gets the value of the answerFileConfigString property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getAnswerFileConfigString() {
return answerFileConfigString;
}
/**
* Sets the value of the answerFileConfigString property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setAnswerFileConfigString(String value) {
this.answerFileConfigString = value;
}
}
| jdgwartney/vsphere-ws | java/JAXWS/samples/com/vmware/vim25/AnswerFileSerializedCreateSpec.java | Java | apache-2.0 | 1,691 |
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
package org.deeplearning4j.core.loader.impl;
import org.deeplearning4j.core.loader.DataSetLoader;
import org.nd4j.common.loader.Source;
import org.nd4j.linalg.dataset.DataSet;
import java.io.IOException;
import java.io.InputStream;
/**
* Loads DataSets using {@link DataSet#load(InputStream)}
*
* @author Alex Black
*/
public class SerializedDataSetLoader implements DataSetLoader {
@Override
public DataSet load(Source source) throws IOException {
DataSet ds = new DataSet();
try(InputStream is = source.getInputStream()){
ds.load(is);
}
return ds;
}
}
| deeplearning4j/deeplearning4j | deeplearning4j/deeplearning4j-core/src/main/java/org/deeplearning4j/core/loader/impl/SerializedDataSetLoader.java | Java | apache-2.0 | 1,380 |
/*
* $Id$
*
* SARL is an general-purpose agent programming language.
* More details on http://www.sarl.io
*
* Copyright (C) 2014-2021 the original authors or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.sarl.lang.sarl.actionprototype;
import org.eclipse.emf.ecore.EObject;
import org.eclipse.xtext.common.types.JvmTypeReference;
import org.eclipse.xtext.xbase.XExpression;
/** An object able to provide the name and the type of a formal parameter.
*
* @author $Author: sgalland$
* @version $FullVersion$
* @mavengroupid $GroupId$
* @mavenartifactid $ArtifactId$
*/
public interface FormalParameterProvider {
/** Replies the number of formal parameters.
*
* @return the number of formal parameters.
*/
int getFormalParameterCount();
/** Replies the name of the formal parameter at the given position.
*
* @param position the position of the formal parameter.
* @return the name of the formal parameter.
*/
String getFormalParameterName(int position);
/** Replies the type of the formal parameter at the given position.
*
* @param position the position of the formal parameter.
* @param isVarargs indicates if the parameter should be considered as a vararg parameter.
* @return the type of the formal parameter.
*/
String getFormalParameterType(int position, boolean isVarargs);
/** Replies the type of the formal parameter at the given position.
*
* @param position the position of the formal parameter.
* @param isVarargs indicates if the parameter should be considered as a vararg parameter.
* @return the type of the formal parameter.
*/
JvmTypeReference getFormalParameterTypeReference(int position, boolean isVarargs);
/** Replies if the formal parameter at the given position has a default value.
*
* @param position the position of the formal parameter.
* @return <code>true</code> if the parameter has a default value, <code>false</code> if not.
*/
boolean hasFormalParameterDefaultValue(int position);
/** Replies the default value of the formal parameter at the given position.
*
* <p>This function replies the Xbase expression for the default value.
*
* <p>If this function replies {@code null}, the string representation of the
* default value may be still available. See {@link #getFormalParameterDefaultValueString(int)}.
*
* @param position the position of the formal parameter.
* @return the default value, or {@code null} if none.
* @see #getFormalParameterDefaultValueString(int)
*/
XExpression getFormalParameterDefaultValue(int position);
/** Replies the default value of the formal parameter at the given position.
*
* <p>This function replies the string representation of the default value.
*
* <p>If this function replies {@code null} or an empty string of characters, the Xbase representation of the
* default value may be still available. See {@link #getFormalParameterDefaultValue(int)}.
*
* @param position the position of the formal parameter.
* @return the default value, or {@code null} if none.
* @see #getFormalParameterDefaultValue(int)
*/
String getFormalParameterDefaultValueString(int position);
/** Replies the formal parameter at the given position.
*
* @param position the position of the formal parameter.
* @return the formal parameter
*/
EObject getFormalParameter(int position);
}
| sarl/sarl | main/coreplugins/io.sarl.lang/src/io/sarl/lang/sarl/actionprototype/FormalParameterProvider.java | Java | apache-2.0 | 3,887 |
/*
* Copyright 2016 Open Networking Laboratory
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.drivers.lumentum;
import com.google.common.collect.Lists;
import org.apache.commons.lang3.tuple.Pair;
import org.onosproject.net.ChannelSpacing;
import org.onosproject.net.GridType;
import org.onosproject.net.OchSignal;
import org.onosproject.net.OchSignalType;
import org.onosproject.net.Port;
import org.onosproject.net.PortNumber;
import org.onosproject.net.device.DeviceService;
import org.onosproject.net.driver.AbstractHandlerBehaviour;
import org.onosproject.net.flow.DefaultFlowEntry;
import org.onosproject.net.flow.DefaultFlowRule;
import org.onosproject.net.flow.DefaultTrafficSelector;
import org.onosproject.net.flow.DefaultTrafficTreatment;
import org.onosproject.net.flow.FlowEntry;
import org.onosproject.net.flow.FlowId;
import org.onosproject.net.flow.FlowRule;
import org.onosproject.net.flow.FlowRuleProgrammable;
import org.onosproject.net.flow.TrafficSelector;
import org.onosproject.net.flow.TrafficTreatment;
import org.onosproject.net.flow.criteria.Criteria;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.snmp4j.PDU;
import org.snmp4j.event.ResponseEvent;
import org.snmp4j.smi.Integer32;
import org.snmp4j.smi.OID;
import org.snmp4j.smi.UnsignedInteger32;
import org.snmp4j.smi.VariableBinding;
import org.snmp4j.util.TreeEvent;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import static com.google.common.base.Preconditions.checkArgument;
// TODO: need to convert between OChSignal and XC channel number
public class LumentumFlowRuleDriver extends AbstractHandlerBehaviour implements FlowRuleProgrammable {
private static final Logger log =
LoggerFactory.getLogger(LumentumFlowRuleDriver.class);
// Default values
private static final int DEFAULT_TARGET_GAIN_PREAMP = 150;
private static final int DEFAULT_TARGET_GAIN_BOOSTER = 200;
private static final int DISABLE_CHANNEL_TARGET_POWER = -650;
private static final int DEFAULT_CHANNEL_TARGET_POWER = -30;
private static final int DISABLE_CHANNEL_ABSOLUTE_ATTENUATION = 160;
private static final int DEFAULT_CHANNEL_ABSOLUTE_ATTENUATION = 50;
private static final int OUT_OF_SERVICE = 1;
private static final int IN_SERVICE = 2;
private static final int OPEN_LOOP = 1;
private static final int CLOSED_LOOP = 2;
// OIDs
private static final String CTRL_AMP_MODULE_SERVICE_STATE_PREAMP = ".1.3.6.1.4.1.46184.1.4.4.1.2.1";
private static final String CTRL_AMP_MODULE_SERVICE_STATE_BOOSTER = ".1.3.6.1.4.1.46184.1.4.4.1.2.2";
private static final String CTRL_AMP_MODULE_TARGET_GAIN_PREAMP = ".1.3.6.1.4.1.46184.1.4.4.1.8.1";
private static final String CTRL_AMP_MODULE_TARGET_GAIN_BOOSTER = ".1.3.6.1.4.1.46184.1.4.4.1.8.2";
private static final String CTRL_CHANNEL_STATE = ".1.3.6.1.4.1.46184.1.4.2.1.3.";
private static final String CTRL_CHANNEL_MODE = ".1.3.6.1.4.1.46184.1.4.2.1.4.";
private static final String CTRL_CHANNEL_TARGET_POWER = ".1.3.6.1.4.1.46184.1.4.2.1.6.";
private static final String CTRL_CHANNEL_ADD_DROP_PORT_INDEX = ".1.3.6.1.4.1.46184.1.4.2.1.13.";
private static final String CTRL_CHANNEL_ABSOLUTE_ATTENUATION = ".1.3.6.1.4.1.46184.1.4.2.1.5.";
private LumentumSnmpDevice snmp;
@Override
public Collection<FlowEntry> getFlowEntries() {
try {
snmp = new LumentumSnmpDevice(handler().data().deviceId());
} catch (IOException e) {
log.error("Failed to connect to device: ", e);
return Collections.emptyList();
}
// Line in is last but one port, line out is last
DeviceService deviceService = this.handler().get(DeviceService.class);
List<Port> ports = deviceService.getPorts(data().deviceId());
if (ports.size() < 2) {
return Collections.emptyList();
}
PortNumber lineIn = ports.get(ports.size() - 2).number();
PortNumber lineOut = ports.get(ports.size() - 1).number();
Collection<FlowEntry> entries = Lists.newLinkedList();
// Add rules
OID addOid = new OID(CTRL_CHANNEL_STATE + "1");
entries.addAll(
fetchRules(addOid, true, lineOut).stream()
.map(fr -> new DefaultFlowEntry(fr, FlowEntry.FlowEntryState.ADDED, 0, 0, 0))
.collect(Collectors.toList())
);
// Drop rules
OID dropOid = new OID(CTRL_CHANNEL_STATE + "2");
entries.addAll(
fetchRules(dropOid, false, lineIn).stream()
.map(fr -> new DefaultFlowEntry(fr, FlowEntry.FlowEntryState.ADDED, 0, 0, 0))
.collect(Collectors.toList())
);
return entries;
}
@Override
public Collection<FlowRule> applyFlowRules(Collection<FlowRule> rules) {
try {
snmp = new LumentumSnmpDevice(data().deviceId());
} catch (IOException e) {
log.error("Failed to connect to device: ", e);
}
// Line ports
DeviceService deviceService = this.handler().get(DeviceService.class);
List<Port> ports = deviceService.getPorts(data().deviceId());
List<PortNumber> linePorts = ports.subList(ports.size() - 2, ports.size()).stream()
.map(p -> p.number())
.collect(Collectors.toList());
// Apply the valid rules on the device
Collection<FlowRule> added = rules.stream()
.map(r -> new CrossConnectFlowRule(r, linePorts))
.filter(xc -> installCrossConnect(xc))
.collect(Collectors.toList());
// Cache the cookie/priority
CrossConnectCache cache = this.handler().get(CrossConnectCache.class);
added.stream()
.forEach(xc -> cache.set(
Objects.hash(data().deviceId(), xc.selector(), xc.treatment()),
xc.id(),
xc.priority()));
return added;
}
@Override
public Collection<FlowRule> removeFlowRules(Collection<FlowRule> rules) {
try {
snmp = new LumentumSnmpDevice(data().deviceId());
} catch (IOException e) {
log.error("Failed to connect to device: ", e);
}
// Line ports
DeviceService deviceService = this.handler().get(DeviceService.class);
List<Port> ports = deviceService.getPorts(data().deviceId());
List<PortNumber> linePorts = ports.subList(ports.size() - 2, ports.size()).stream()
.map(p -> p.number())
.collect(Collectors.toList());
// Apply the valid rules on the device
Collection<FlowRule> removed = rules.stream()
.map(r -> new CrossConnectFlowRule(r, linePorts))
.filter(xc -> removeCrossConnect(xc))
.collect(Collectors.toList());
// Remove flow rule from cache
CrossConnectCache cache = this.handler().get(CrossConnectCache.class);
removed.stream()
.forEach(xc -> cache.remove(
Objects.hash(data().deviceId(), xc.selector(), xc.treatment())));
return removed;
}
// Installs cross connect on device
private boolean installCrossConnect(CrossConnectFlowRule xc) {
int channel = toChannel(xc.ochSignal());
long addDrop = xc.addDrop().toLong();
// Create the PDU object
PDU pdu = new PDU();
pdu.setType(PDU.SET);
// Enable preamp & booster
List<OID> oids = Arrays.asList(new OID(CTRL_AMP_MODULE_SERVICE_STATE_PREAMP),
new OID(CTRL_AMP_MODULE_SERVICE_STATE_BOOSTER));
oids.forEach(
oid -> pdu.add(new VariableBinding(oid, new Integer32(IN_SERVICE)))
);
// Set target gain on preamp & booster
OID ctrlAmpModuleTargetGainPreamp = new OID(CTRL_AMP_MODULE_TARGET_GAIN_PREAMP);
pdu.add(new VariableBinding(ctrlAmpModuleTargetGainPreamp, new Integer32(DEFAULT_TARGET_GAIN_PREAMP)));
OID ctrlAmpModuleTargetGainBooster = new OID(CTRL_AMP_MODULE_TARGET_GAIN_BOOSTER);
pdu.add(new VariableBinding(ctrlAmpModuleTargetGainBooster, new Integer32(DEFAULT_TARGET_GAIN_BOOSTER)));
// Make cross connect
OID ctrlChannelAddDropPortIndex = new OID(CTRL_CHANNEL_ADD_DROP_PORT_INDEX +
(xc.isAddRule() ? "1." : "2.") + channel);
pdu.add(new VariableBinding(ctrlChannelAddDropPortIndex, new UnsignedInteger32(addDrop)));
// Add rules use closed loop, drop rules open loop
// Add rules are set to target power, drop rules are attenuated
if (xc.isAddRule()) {
OID ctrlChannelMode = new OID(CTRL_CHANNEL_MODE + "1." + channel);
pdu.add(new VariableBinding(ctrlChannelMode, new Integer32(CLOSED_LOOP)));
OID ctrlChannelTargetPower = new OID(CTRL_CHANNEL_TARGET_POWER + "1." + channel);
pdu.add(new VariableBinding(ctrlChannelTargetPower, new Integer32(DEFAULT_CHANNEL_TARGET_POWER)));
} else {
OID ctrlChannelMode = new OID(CTRL_CHANNEL_MODE + "2." + channel);
pdu.add(new VariableBinding(ctrlChannelMode, new Integer32(OPEN_LOOP)));
OID ctrlChannelAbsoluteAttenuation = new OID(CTRL_CHANNEL_ABSOLUTE_ATTENUATION + "2." + channel);
pdu.add(new VariableBinding(
ctrlChannelAbsoluteAttenuation, new UnsignedInteger32(DEFAULT_CHANNEL_ABSOLUTE_ATTENUATION)));
}
// Final step is to enable the channel
OID ctrlChannelState = new OID(CTRL_CHANNEL_STATE + (xc.isAddRule() ? "1." : "2.") + channel);
pdu.add(new VariableBinding(ctrlChannelState, new Integer32(IN_SERVICE)));
try {
ResponseEvent response = snmp.set(pdu);
// TODO: parse response
} catch (IOException e) {
log.error("Failed to create cross connect, unable to connect to device: ", e);
}
return true;
}
// Removes cross connect on device
private boolean removeCrossConnect(CrossConnectFlowRule xc) {
int channel = toChannel(xc.ochSignal());
// Create the PDU object
PDU pdu = new PDU();
pdu.setType(PDU.SET);
// Disable the channel
OID ctrlChannelState = new OID(CTRL_CHANNEL_STATE + (xc.isAddRule() ? "1." : "2.") + channel);
pdu.add(new VariableBinding(ctrlChannelState, new Integer32(OUT_OF_SERVICE)));
// Put cross connect back into default port 1
OID ctrlChannelAddDropPortIndex = new OID(CTRL_CHANNEL_ADD_DROP_PORT_INDEX +
(xc.isAddRule() ? "1." : "2.") + channel);
pdu.add(new VariableBinding(ctrlChannelAddDropPortIndex, new UnsignedInteger32(OUT_OF_SERVICE)));
// Put port/channel back to open loop
OID ctrlChannelMode = new OID(CTRL_CHANNEL_MODE + (xc.isAddRule() ? "1." : "2.") + channel);
pdu.add(new VariableBinding(ctrlChannelMode, new Integer32(OPEN_LOOP)));
// Add rules are set to target power, drop rules are attenuated
if (xc.isAddRule()) {
OID ctrlChannelTargetPower = new OID(CTRL_CHANNEL_TARGET_POWER + "1." + channel);
pdu.add(new VariableBinding(ctrlChannelTargetPower, new Integer32(DISABLE_CHANNEL_TARGET_POWER)));
} else {
OID ctrlChannelAbsoluteAttenuation = new OID(CTRL_CHANNEL_ABSOLUTE_ATTENUATION + "2." + channel);
pdu.add(new VariableBinding(
ctrlChannelAbsoluteAttenuation, new UnsignedInteger32(DISABLE_CHANNEL_ABSOLUTE_ATTENUATION)));
}
try {
ResponseEvent response = snmp.set(pdu);
// TODO: parse response
} catch (IOException e) {
log.error("Failed to remove cross connect, unable to connect to device: ", e);
return false;
}
return true;
}
/**
* Convert OCh signal to Lumentum channel ID.
*
* @param ochSignal OCh signal
* @return Lumentum channel ID
*/
public static int toChannel(OchSignal ochSignal) {
// FIXME: move to cross connect validation
checkArgument(ochSignal.channelSpacing() == ChannelSpacing.CHL_50GHZ);
checkArgument(LumentumSnmpDevice.START_CENTER_FREQ.compareTo(ochSignal.centralFrequency()) <= 0);
checkArgument(LumentumSnmpDevice.END_CENTER_FREQ.compareTo(ochSignal.centralFrequency()) >= 0);
return ochSignal.spacingMultiplier() + LumentumSnmpDevice.MULTIPLIER_SHIFT;
}
/**
* Convert Lumentum channel ID to OCh signal.
*
* @param channel Lumentum channel ID
* @return OCh signal
*/
public static OchSignal toOchSignal(int channel) {
checkArgument(1 <= channel);
checkArgument(channel <= 96);
return new OchSignal(GridType.DWDM, ChannelSpacing.CHL_50GHZ,
channel - LumentumSnmpDevice.MULTIPLIER_SHIFT, 4);
}
// Returns the currently configured add/drop port for the given channel.
private PortNumber getAddDropPort(int channel, boolean isAddPort) {
OID oid = new OID(CTRL_CHANNEL_ADD_DROP_PORT_INDEX + (isAddPort ? "1" : "2"));
for (TreeEvent event : snmp.get(oid)) {
if (event == null) {
return null;
}
VariableBinding[] varBindings = event.getVariableBindings();
for (VariableBinding varBinding : varBindings) {
if (varBinding.getOid().last() == channel) {
int port = varBinding.getVariable().toInt();
return PortNumber.portNumber(port);
}
}
}
return null;
}
// Returns the currently installed flow entries on the device.
private List<FlowRule> fetchRules(OID oid, boolean isAdd, PortNumber linePort) {
List<FlowRule> rules = new LinkedList<>();
for (TreeEvent event : snmp.get(oid)) {
if (event == null) {
continue;
}
VariableBinding[] varBindings = event.getVariableBindings();
for (VariableBinding varBinding : varBindings) {
CrossConnectCache cache = this.handler().get(CrossConnectCache.class);
if (varBinding.getVariable().toInt() == IN_SERVICE) {
int channel = varBinding.getOid().removeLast();
PortNumber addDropPort = getAddDropPort(channel, isAdd);
if (addDropPort == null) {
continue;
}
TrafficSelector selector = DefaultTrafficSelector.builder()
.matchInPort(isAdd ? addDropPort : linePort)
.add(Criteria.matchOchSignalType(OchSignalType.FIXED_GRID))
.add(Criteria.matchLambda(toOchSignal(channel)))
.build();
TrafficTreatment treatment = DefaultTrafficTreatment.builder()
.setOutput(isAdd ? linePort : addDropPort)
.build();
// Lookup flow ID and priority
int hash = Objects.hash(data().deviceId(), selector, treatment);
Pair<FlowId, Integer> lookup = cache.get(hash);
if (lookup == null) {
continue;
}
FlowRule fr = DefaultFlowRule.builder()
.forDevice(data().deviceId())
.makePermanent()
.withSelector(selector)
.withTreatment(treatment)
.withPriority(lookup.getRight())
.withCookie(lookup.getLeft().value())
.build();
rules.add(fr);
}
}
}
return rules;
}
}
| sonu283304/onos | drivers/lumentum/src/main/java/org/onosproject/drivers/lumentum/LumentumFlowRuleDriver.java | Java | apache-2.0 | 16,751 |
/*
* Copyright (C) 2017 Circulo Odontologico del Chaco
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ar.com.circuloodontochaco.co.model.contraints;
import javax.validation.ConstraintValidator;
import javax.validation.ConstraintValidatorContext;
import ar.com.circuloodontochaco.co.model.Remesa;
/**
* @author Jorge E. Villaverde
*
*/
public class RemesaValidator implements ConstraintValidator<ValidRemesa, Remesa> {
@SuppressWarnings("unused")
private ValidRemesa valid;
@Override
public void initialize(ValidRemesa valid) {
this.valid = valid;
}
@Override
public boolean isValid(Remesa remesa, ConstraintValidatorContext context) {
if (remesa == null) {
return false;
}
boolean isValid = true;
if(remesa.getImporte() == null || remesa.getImporte().compareTo(remesa.getOrigen().getSaldo()) > 0){
isValid = false;
context.disableDefaultConstraintViolation();
context.buildConstraintViolationWithTemplate(
"{ar.com.circuloodontochaco.co.model.contraints.RemesaImporte.message}")
.addConstraintViolation();
}
if(remesa.getOrigen().getCuenta().equals(remesa.getDestino().getCuenta())) {
isValid = false;
context.disableDefaultConstraintViolation();
context.buildConstraintViolationWithTemplate(
"{ar.com.circuloodontochaco.co.model.contraints.RemesaSameAccount.message}")
.addConstraintViolation();
}
return isValid;
}
}
| jorgevillaverde/co | src/main/java/ar/com/circuloodontochaco/co/model/contraints/RemesaValidator.java | Java | apache-2.0 | 2,000 |
<?php
/*
* Your installation or use of this SugarCRM file is subject to the applicable
* terms available at
* http://support.sugarcrm.com/06_Customer_Center/10_Master_Subscription_Agreements/.
* If you do not agree to all of the applicable terms or do not have the
* authority to bind the entity as an authorized representative, then do not
* install or use this SugarCRM file.
*
* Copyright (C) SugarCRM Inc. All rights reserved.
*/
$module_name = 'sa_SudoAudit';
$viewdefs[$module_name]['base']['view']['search-list'] = array(
'panels' => array(
array(
'name' => 'primary',
'fields' => array(
array(
'name' => 'picture',
'type' => 'avatar',
'size' => 'medium',
'readonly' => true,
'css_class' => 'pull-left',
),
array(
'name' => 'name',
'type' => 'name',
'link' => true,
'label' => 'LBL_SUBJECT',
),
),
),
),
);
| sugarcrmlabs/AdminSudo | SudoAudit2016_11_08_114528/SugarModules/modules/sa_SudoAudit/clients/base/views/search-list/search-list.php | PHP | apache-2.0 | 1,127 |
package com.wonders.alpha.bo;
import java.io.Serializable;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.Id;
import javax.persistence.Table;
@Entity
@Table(name="alpha")
public class Alpha implements Serializable{
private String id;
@Id
@Column(name="ID")
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
} | LotteryOne/tools | src/com/wonders/alpha/bo/Alpha.java | Java | apache-2.0 | 409 |
package de.undercouch.citeproc;
import de.undercouch.citeproc.helper.CSLUtils;
import java.io.IOException;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
/**
* Default implementation of {@link LocaleProvider}. Loads locales from
* the classpath.
* @author Michel Kraemer
*/
public class DefaultLocaleProvider implements LocaleProvider {
/**
* A cache for the serialized XML of locales
*/
private Map<String, String> locales = new HashMap<>();
/**
* Retrieves the serialized XML for the given locale from the classpath.
* For example, if the locale is <code>en-US</code> this method loads
* the file <code>/locales-en-US.xml</code> from the classpath.
*/
@Override
public String retrieveLocale(String lang) {
String r = locales.get(lang);
if (r == null) {
try {
URL u = getClass().getResource("/locales-" + lang + ".xml");
if (u == null) {
throw new IllegalArgumentException("Unable to load locale " +
lang + ". Make sure you have a file called " +
"'/locales-" + lang + ".xml' at the root of your " +
"classpath. Did you add the CSL locale files to "
+ "your classpath?");
}
r = CSLUtils.readURLToString(u, "UTF-8");
} catch (IOException e) {
throw new RuntimeException(e);
}
locales.put(lang, r);
}
return r;
}
}
| michel-kraemer/citeproc-java | citeproc-java/src/main/java/de/undercouch/citeproc/DefaultLocaleProvider.java | Java | apache-2.0 | 1,590 |
'use strict';
/**
* Requirements
* @ignore
*/
const BaseValueObject = require('../BaseValueObject.js').BaseValueObject;
const Entity = require('./Entity.js').Entity;
const Site = require('../site/Site.js').Site;
const ContentKind = require('../ContentKind.js');
const BaseMap = require('../../base/BaseMap.js').BaseMap;
const EntityIdTemplate = require('./EntityIdTemplate.js').EntityIdTemplate;
const assertParameter = require('../../utils/assert.js').assertParameter;
/**
* @namespace model.entity
*/
class EntityAspect extends BaseValueObject
{
/**
* @param {model.entity.Entity} entity
* @param {model.site.Site} site
*/
constructor(entity, site, entityIdTemplate)
{
super();
//Check params
assertParameter(this, 'entity', entity, true, Entity);
assertParameter(this, 'site', site, true, Site);
//assertParameter(this, 'entityIdTemplate', entityIdTemplate, true, EntityIdTemplate);
// Add initial values
this._entity = entity;
this._site = site;
this._entityIdTemplate = entityIdTemplate;
// Extend id
this._entityId = this._entity.id.clone();
this._entityId.site = this._site;
// Get extended sites
this._extendedSites = [];
let currentSite = this._site;
while(currentSite)
{
this._extendedSites.unshift(currentSite);
currentSite = currentSite.extends;
}
// Extend files, properties, documentation & tests
const properties = new BaseMap();
const examples = {};
const macros = {};
const texts = [];
const datamodels = [];
const tests = [];
for (const s of this._extendedSites)
{
// Files
this.files.load(this._entity.files.filter(file => file.site === s));
// Examples
const siteExamples = this._entity.documentation.filter(doc => doc.contentKind === ContentKind.EXAMPLE);
for (const siteExample of siteExamples)
{
examples[siteExample.file.basename] = siteExample;
}
// Models
const siteDatamodels = this._entity.documentation.filter(doc => doc.contentKind === ContentKind.DATAMODEL);
for (const siteDatamodel of siteDatamodels)
{
datamodels.push(siteDatamodel);
}
// Macros
const siteMacros = this._entity.documentation.filter(doc => doc.contentKind === ContentKind.MACRO);
for (const siteMacro of siteMacros)
{
macros[siteMacro.name] = siteMacro;
}
// Text
const siteTexts = this._entity.documentation.filter(doc => doc.contentKind === ContentKind.TEXT);
for (const siteText of siteTexts)
{
texts.push(siteText);
}
// Properties
const siteProperties = this._entity.properties.getByPath(s.name.toLowerCase(), {});
properties.merge(siteProperties);
// Tests
this.tests.load(this._entity.tests.filter(test => test.site === s));
}
this.properties.load(properties);
this.documentation.load(examples);
this.documentation.load(datamodels);
this.documentation.load(macros);
this.documentation.load(texts);
}
/**
* @inheritDoc
*/
static get injections()
{
return { 'parameters': [Entity, Site, EntityIdTemplate] };
}
/**
* @inheritDoc
*/
static get className()
{
return 'model.entity/EntityAspect';
}
/**
* @property {*}
*/
get uniqueId()
{
return this.pathString;
}
/**
* @property {entity.EntityId}
*/
get id()
{
return this._entityId;
}
/**
* @property {String}
*/
get idString()
{
return this._entityId.idString;
}
/**
* @property {String}
*/
get pathString()
{
return this._entityId.pathString;
}
/**
* @property {model.entity.Entity}
*/
get entity()
{
return this._entity;
}
/**
* @property {model.site.Site}
*/
get site()
{
return this._site;
}
/**
* @property {Bool}
*/
get isGlobal()
{
return this._entity.isGlobal;
}
/**
* @inheritDoc
*/
toString()
{
return `[${this.className} ${this.site.name}/${this.id.category.longName}-${this.id.name}]`;
}
}
/**
* Exports
* @ignore
*/
module.exports.EntityAspect = EntityAspect;
| entoj/entoj-core | source/model/entity/EntityAspect.js | JavaScript | apache-2.0 | 4,718 |
/*
* Licensed to The Apereo Foundation under one or more contributor license
* agreements. See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*
* The Apereo Foundation licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.unitime.timetable.onlinesectioning.custom.purdue;
import java.lang.reflect.Type;
import java.util.ArrayList;
import java.util.List;
import org.joda.time.DateTime;
import com.google.gson.reflect.TypeToken;
public class XEInterface {
public static class Registration {
public String subject;
public String subjectDescription;
public String courseNumber;
public String courseReferenceNumber;
public String courseTitle;
/**
* 40 CEC 40% refund
* 60 CEC 60% refund
* 80 CEC 80% refund
* AA Auditor Access
* AU Audit
* CA Cancel Administratively
* DB Boiler Gold Rush Drop Course
* DC Drop Course
* DD Drop/Delete
* DT Drop Course-TSW
* DW Drop (Web)
* RC **ReAdd Course**
* RE **Registered**
* RT **Web Registered**
* RW **Web Registered**
* W Withdrawn-W
* W1 Withdrawn
* W2 Withdrawn
* W3 Withdrawn
* W4 Withdrawn
* W5 Withdrawn
* WF Withdrawn-WF
* WG Withdrawn-pending grade
* WN Withdrawn-WN
* WT Withdrawn-W
* WU Withdrawn-WU
* WL Waitlist
*/
public String courseRegistrationStatus;
public String courseRegistrationStatusDescription;
public Double creditHour;
public String gradingMode;
public String gradingModeDescription;
public String level;
public String levelDescription;
public DateTime registrationStatusDate;
public String scheduleDescription;
public String scheduleType;
public String sequenceNumber;
public String statusDescription;
/**
* P = pending
* R = registered
* D = dropped
* L = waitlisted
* F = fatal error prevented registration
* W = withdrawn
*/
public String statusIndicator;
public List<CrnError> crnErrors;
public String term;
public String campus;
public List<RegistrationAction> registrationActions;
public boolean can(String status) {
if (registrationActions != null)
for (RegistrationAction action: registrationActions) {
if (status.equals(action.courseRegistrationStatus))
return true;
}
return false;
}
public boolean canDrop() {
return can("DW");
}
public boolean canAdd(boolean admin) {
return can(admin ? "RE" : "RW");
}
public boolean isRegistered() {
return "R".equals(statusIndicator);
}
}
public static class CrnError {
public String errorFlag;
public String message;
public String messageType;
}
public static class RegistrationAction {
public String courseRegistrationStatus;
public String description;
public Boolean remove;
public String voiceType;
}
public static class TimeTicket {
public DateTime beginDate;
public DateTime endDate;
public String startTime;
public String endTime;
}
public static class FailedRegistration {
public String failedCRN;
public String failure;
public Registration registration;
}
public static class RegisterResponse {
public static final Type TYPE_LIST = new TypeToken<ArrayList<RegisterResponse>>() {}.getType();
public List<FailedRegistration> failedRegistrations;
public List<String> failureReasons;
public List<Registration> registrations;
public List<TimeTicket> timeTickets;
public Boolean validStudent;
public String registrationException;
}
public static class CourseReferenceNumber {
public String courseReferenceNumber;
public String courseRegistrationStatus;
public CourseReferenceNumber() {}
public CourseReferenceNumber(String crn) {
this.courseReferenceNumber = crn;
}
public CourseReferenceNumber(String crn, String status) {
this.courseReferenceNumber = crn;
this.courseRegistrationStatus = status;
}
}
public static class RegisterAction {
public String courseReferenceNumber;
public String selectedAction;
public String selectedLevel;
public String selectedGradingMode;
public String selectedStudyPath;
public String selectedCreditHour;
public RegisterAction(String action, String crn) {
selectedAction = action;
courseReferenceNumber = crn;
}
}
public static class RegisterRequest {
public String bannerId;
public String term;
public String altPin;
public String systemIn;
public List<CourseReferenceNumber> courseReferenceNumbers;
public List<RegisterAction> actionsAndOptions;
public RegisterRequest(String term, String bannerId, String pin, boolean admin) {
this.term = term; this.bannerId = bannerId; this.altPin = pin; this.systemIn = (admin ? "SB" : "WA");
}
public RegisterRequest drop(String crn) {
if (actionsAndOptions == null) actionsAndOptions = new ArrayList<RegisterAction>();
actionsAndOptions.add(new RegisterAction("DW", crn));
return this;
}
public RegisterRequest keep(String crn) {
if (courseReferenceNumbers == null)
courseReferenceNumbers = new ArrayList<XEInterface.CourseReferenceNumber>();
courseReferenceNumbers.add(new CourseReferenceNumber(crn));
return this;
}
public RegisterRequest add(String crn, boolean changeStatus) {
if (changeStatus) {
if (actionsAndOptions == null) actionsAndOptions = new ArrayList<RegisterAction>();
actionsAndOptions.add(new RegisterAction("SB".equals(systemIn) ? "RE" : "RW", crn));
} else {
if (courseReferenceNumbers == null)
courseReferenceNumbers = new ArrayList<XEInterface.CourseReferenceNumber>();
// if ("SB".equals(systemIn)) courseReferenceNumbers.add(new CourseReferenceNumber(crn, "RW")); else
courseReferenceNumbers.add(new CourseReferenceNumber(crn));
}
return this;
}
public RegisterRequest empty() {
if (courseReferenceNumbers == null)
courseReferenceNumbers = new ArrayList<XEInterface.CourseReferenceNumber>();
courseReferenceNumbers.add(new CourseReferenceNumber());
return this;
}
public boolean isEmpty() {
return (actionsAndOptions == null || actionsAndOptions.isEmpty()) && (courseReferenceNumbers == null || courseReferenceNumbers.isEmpty());
}
}
public static class DegreePlan {
public static final Type TYPE_LIST = new TypeToken<ArrayList<DegreePlan>>() {}.getType();
public String id;
public String description;
public Student student;
public CodeDescription degree;
public CodeDescription school;
public List<Year> years;
}
public static class Student {
public String id;
public String name;
}
public static class CodeDescription {
public String code;
public String description;
}
public static class Year extends CodeDescription {
public List<Term> terms;
}
public static class Term {
public String id;
public CodeDescription term;
public Group group;
}
public static class Group {
public CodeDescription groupType;
public List<Course> plannedClasses;
public List<Group> groups;
public List<PlaceHolder> plannedPlaceholders;
public String summaryDescription;
public boolean isGroupSelection;
}
public static class Course {
public String id;
public String title;
public String courseNumber;
public String courseDiscipline;
public boolean isGroupSelection;
}
public static class PlaceHolder {
public String id;
public CodeDescription placeholderType;
public String placeholderValue;
}
public static class ErrorResponse {
public List<Error> errors;
public Error getError() {
return (errors == null || errors.isEmpty() ? null : errors.get(0));
}
}
public static class Error {
public String code;
public String message;
public String description;
public String type;
public String errorMessage;
}
}
| rafati/unitime | JavaSource/org/unitime/timetable/onlinesectioning/custom/purdue/XEInterface.java | Java | apache-2.0 | 8,329 |
/*
* Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may
* not use this file except in compliance with the License. A copy of the
* License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "LICENSE" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.blox.integ;
import com.amazonaws.blox.integ.CloudFormationStacks.CfnStack;
import java.util.Collections;
import java.util.List;
import lombok.RequiredArgsConstructor;
import software.amazon.awssdk.services.ecs.ECSClient;
import software.amazon.awssdk.services.ecs.model.DescribeTasksRequest;
import software.amazon.awssdk.services.ecs.model.DesiredStatus;
import software.amazon.awssdk.services.ecs.model.ListTasksRequest;
import software.amazon.awssdk.services.ecs.model.StopTaskRequest;
import software.amazon.awssdk.services.ecs.model.Task;
/** Wrapper for interacting with a test ECS cluster */
@RequiredArgsConstructor
public class ECSClusterWrapper {
private final ECSClient ecs;
// TODO: For now, act on all tasks that match startedBy, we should change this to filter by prefix
private final String startedBy = "blox";
private final CfnStack stack;
public ECSClusterWrapper(ECSClient ecs, CloudFormationStacks stacks) {
this(ecs, stacks.get("blox-test-cluster"));
}
public String getTransientTaskDefinition() {
return stack.output("transienttask");
}
public String getPersistentTaskDefinition() {
return stack.output("persistenttask");
}
public String getCluster() {
return stack.output("cluster");
}
public List<Task> describeTasks() {
List<String> taskArns = listTasks();
if (taskArns.isEmpty()) {
return Collections.emptyList();
}
return ecs.describeTasks(
DescribeTasksRequest.builder().cluster(getCluster()).tasks(taskArns).build())
.tasks();
}
private List<String> listTasks() {
return ecs.listTasks(
ListTasksRequest.builder()
.cluster(getCluster())
.startedBy(startedBy)
.desiredStatus(DesiredStatus.RUNNING)
.build())
.taskArns();
}
public void reset() {
for (String task : listTasks()) {
ecs.stopTask(StopTaskRequest.builder().cluster(getCluster()).task(task).build());
}
}
}
| blox/blox | end-to-end-tests/src/main/java/com/amazonaws/blox/integ/ECSClusterWrapper.java | Java | apache-2.0 | 2,607 |
function swl_scrollStopExtend() {
var a = SWL.$.event.special, b = "D" + +new Date, c = "D" + (+new Date + 1);
a.scrollstart = {
setup : function() {
var c, d = function(b) {
var d = this, e = arguments;
c ? clearTimeout(c) : (b.type = "scrollstart", SWL.$.event.handle.apply(d, e)), c = setTimeout(function() {
c = null
}, a.scrollstop.latency)
};
SWL.$(this).bind("scroll", d).data(b, d)
},
teardown : function() {
SWL.$(this).unbind("scroll", SWL.$(this).data(b))
}
}, a.scrollstop = {
latency : 300,
setup : function() {
var b, d = function(c) {
var d = this, e = arguments;
b && clearTimeout(b), b = setTimeout(function() {
b = null, c.type = "scrollstop", SWL.$.event.handle.apply(d, e)
}, a.scrollstop.latency)
};
SWL.$(this).bind("scroll", d).data(c, d)
},
teardown : function() {
SWL.$(this).unbind("scroll", SWL.$(this).data(c))
}
}
}
function swl_scrollStopInit() {
return "undefined" == typeof SWL ? (window.setTimeout(function() {
swl_scrollStopInit()
}, 50),
void 0) : (swl_scrollStopExtend(),
void 0)
}swl_scrollStopInit(); | EZWebvietnam/vieclam24h | template/home/js/jquery.scrollstore.js | JavaScript | apache-2.0 | 1,126 |
/**
* (C) Copyright 2016-2019 teecube
* (https://teecu.be) and others.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package t3.site.gitlab.tags;
import com.fasterxml.jackson.annotation.*;
import java.util.HashMap;
import java.util.Map;
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonPropertyOrder({
"tag_name",
"description"
})
public class Release {
/**
*
* (Required)
*
*/
@JsonProperty("tag_name")
private String tagName;
/**
*
* (Required)
*
*/
@JsonProperty("description")
private String description;
@JsonIgnore
private Map<String, Object> additionalProperties = new HashMap<String, Object>();
/**
*
* (Required)
*
*/
@JsonProperty("tag_name")
public String getTagName() {
return tagName;
}
/**
*
* (Required)
*
*/
@JsonProperty("tag_name")
public void setTagName(String tagName) {
this.tagName = tagName;
}
/**
*
* (Required)
*
*/
@JsonProperty("description")
public String getDescription() {
return description;
}
/**
*
* (Required)
*
*/
@JsonProperty("description")
public void setDescription(String description) {
this.description = description;
}
@JsonAnyGetter
public Map<String, Object> getAdditionalProperties() {
return this.additionalProperties;
}
@JsonAnySetter
public void setAdditionalProperty(String name, Object value) {
this.additionalProperties.put(name, value);
}
}
| teecube/t3 | t3-site-enhancer/src/main/java/t3/site/gitlab/tags/Release.java | Java | apache-2.0 | 2,129 |
package com.example.android.sunshine.app.sync;
import android.accounts.Account;
import android.accounts.AccountManager;
import android.annotation.SuppressLint;
import android.app.NotificationManager;
import android.app.PendingIntent;
import android.content.AbstractThreadedSyncAdapter;
import android.content.ContentProviderClient;
import android.content.ContentResolver;
import android.content.ContentUris;
import android.content.ContentValues;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.content.SyncRequest;
import android.content.SyncResult;
import android.content.res.Resources;
import android.database.Cursor;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.net.Uri;
import android.os.Build;
import android.os.Bundle;
import android.preference.PreferenceManager;
import android.support.annotation.IntDef;
import android.support.v4.app.NotificationCompat;
import android.support.v4.app.TaskStackBuilder;
import android.text.format.Time;
import android.util.Log;
import com.bumptech.glide.Glide;
import com.example.android.sunshine.app.BuildConfig;
import com.example.android.sunshine.app.MainActivity;
import com.example.android.sunshine.app.R;
import com.example.android.sunshine.app.Utility;
import com.example.android.sunshine.app.data.WeatherContract;
import com.example.android.sunshine.app.muzei.WeatherMuzeiSource;
import com.example.android.sunshine.app.wear.WearIntentService;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.Vector;
import java.util.concurrent.ExecutionException;
public class SunshineSyncAdapter extends AbstractThreadedSyncAdapter {
public final String LOG_TAG = SunshineSyncAdapter.class.getSimpleName();
public static final String ACTION_DATA_UPDATED =
"com.example.android.sunshine.app.ACTION_DATA_UPDATED";
public static final String ACTION_WEAR_UPDATED =
"com.example.android.sunshine.app.ACTION_WEAR_UPDATED";
// Interval at which to sync with the weather, in seconds.
// 60 seconds (1 minute) * 180 = 3 hours
public static final int SYNC_INTERVAL = 60 * 180;
public static final int SYNC_FLEXTIME = SYNC_INTERVAL/3;
private static final long DAY_IN_MILLIS = 1000 * 60 * 60 * 24;
private static final int WEATHER_NOTIFICATION_ID = 3004;
private static final String[] NOTIFY_WEATHER_PROJECTION = new String[] {
WeatherContract.WeatherEntry.COLUMN_WEATHER_ID,
WeatherContract.WeatherEntry.COLUMN_MAX_TEMP,
WeatherContract.WeatherEntry.COLUMN_MIN_TEMP,
WeatherContract.WeatherEntry.COLUMN_SHORT_DESC
};
// these indices must match the projection
private static final int INDEX_WEATHER_ID = 0;
private static final int INDEX_MAX_TEMP = 1;
private static final int INDEX_MIN_TEMP = 2;
private static final int INDEX_SHORT_DESC = 3;
@Retention(RetentionPolicy.SOURCE)
@IntDef({LOCATION_STATUS_OK, LOCATION_STATUS_SERVER_DOWN, LOCATION_STATUS_SERVER_INVALID, LOCATION_STATUS_UNKNOWN, LOCATION_STATUS_INVALID})
public @interface LocationStatus {}
public static final int LOCATION_STATUS_OK = 0;
public static final int LOCATION_STATUS_SERVER_DOWN = 1;
public static final int LOCATION_STATUS_SERVER_INVALID = 2;
public static final int LOCATION_STATUS_UNKNOWN = 3;
public static final int LOCATION_STATUS_INVALID = 4;
public SunshineSyncAdapter(Context context, boolean autoInitialize) {
super(context, autoInitialize);
}
@Override
public void onPerformSync(Account account, Bundle extras, String authority, ContentProviderClient provider, SyncResult syncResult) {
Log.d(LOG_TAG, "Starting sync");
String locationQuery = Utility.getPreferredLocation(getContext());
// These two need to be declared outside the try/catch
// so that they can be closed in the finally block.
HttpURLConnection urlConnection = null;
BufferedReader reader = null;
// Will contain the raw JSON response as a string.
String forecastJsonStr = null;
String format = "json";
String units = "metric";
int numDays = 14;
try {
// Construct the URL for the OpenWeatherMap query
// Possible parameters are avaiable at OWM's forecast API page, at
// http://openweathermap.org/API#forecast
final String FORECAST_BASE_URL =
"http://api.openweathermap.org/data/2.5/forecast/daily?";
final String QUERY_PARAM = "q";
final String FORMAT_PARAM = "mode";
final String UNITS_PARAM = "units";
final String DAYS_PARAM = "cnt";
final String APPID_PARAM = "APPID";
Uri builtUri = Uri.parse(FORECAST_BASE_URL).buildUpon()
.appendQueryParameter(QUERY_PARAM, locationQuery)
.appendQueryParameter(FORMAT_PARAM, format)
.appendQueryParameter(UNITS_PARAM, units)
.appendQueryParameter(DAYS_PARAM, Integer.toString(numDays))
.appendQueryParameter(APPID_PARAM, BuildConfig.OPEN_WEATHER_MAP_API_KEY)
.build();
URL url = new URL(builtUri.toString());
// Create the request to OpenWeatherMap, and open the connection
urlConnection = (HttpURLConnection) url.openConnection();
urlConnection.setRequestMethod("GET");
urlConnection.connect();
// Read the input stream into a String
InputStream inputStream = urlConnection.getInputStream();
StringBuffer buffer = new StringBuffer();
if (inputStream == null) {
// Nothing to do.
return;
}
reader = new BufferedReader(new InputStreamReader(inputStream));
String line;
while ((line = reader.readLine()) != null) {
// Since it's JSON, adding a newline isn't necessary (it won't affect parsing)
// But it does make debugging a *lot* easier if you print out the completed
// buffer for debugging.
buffer.append(line + "\n");
}
if (buffer.length() == 0) {
// Stream was empty. No point in parsing.
setLocationStatus(getContext(), LOCATION_STATUS_SERVER_DOWN);
return;
}
forecastJsonStr = buffer.toString();
getWeatherDataFromJson(forecastJsonStr, locationQuery);
} catch (IOException e) {
Log.e(LOG_TAG, "Error ", e);
// If the code didn't successfully get the weather data, there's no point in attempting
// to parse it.
setLocationStatus(getContext(), LOCATION_STATUS_SERVER_DOWN);
} catch (JSONException e) {
Log.e(LOG_TAG, e.getMessage(), e);
e.printStackTrace();
setLocationStatus(getContext(), LOCATION_STATUS_SERVER_INVALID);
} finally {
if (urlConnection != null) {
urlConnection.disconnect();
}
if (reader != null) {
try {
reader.close();
} catch (final IOException e) {
Log.e(LOG_TAG, "Error closing stream", e);
}
}
}
return;
}
/**
* Take the String representing the complete forecast in JSON Format and
* pull out the data we need to construct the Strings needed for the wireframes.
*
* Fortunately parsing is easy: constructor takes the JSON string and converts it
* into an Object hierarchy for us.
*/
private void getWeatherDataFromJson(String forecastJsonStr,
String locationSetting)
throws JSONException {
// Now we have a String representing the complete forecast in JSON Format.
// Fortunately parsing is easy: constructor takes the JSON string and converts it
// into an Object hierarchy for us.
// These are the names of the JSON objects that need to be extracted.
// Location information
final String OWM_CITY = "city";
final String OWM_CITY_NAME = "name";
final String OWM_COORD = "coord";
// Location coordinate
final String OWM_LATITUDE = "lat";
final String OWM_LONGITUDE = "lon";
// Weather information. Each day's forecast info is an element of the "list" array.
final String OWM_LIST = "list";
final String OWM_PRESSURE = "pressure";
final String OWM_HUMIDITY = "humidity";
final String OWM_WINDSPEED = "speed";
final String OWM_WIND_DIRECTION = "deg";
// All temperatures are children of the "temp" object.
final String OWM_TEMPERATURE = "temp";
final String OWM_MAX = "max";
final String OWM_MIN = "min";
final String OWM_WEATHER = "weather";
final String OWM_DESCRIPTION = "main";
final String OWM_WEATHER_ID = "id";
final String OWM_MESSAGE_CODE = "cod";
try {
JSONObject forecastJson = new JSONObject(forecastJsonStr);
// do we have an error?
if ( forecastJson.has(OWM_MESSAGE_CODE) ) {
int errorCode = forecastJson.getInt(OWM_MESSAGE_CODE);
switch (errorCode) {
case HttpURLConnection.HTTP_OK:
break;
case HttpURLConnection.HTTP_NOT_FOUND:
setLocationStatus(getContext(), LOCATION_STATUS_INVALID);
return;
default:
setLocationStatus(getContext(), LOCATION_STATUS_SERVER_DOWN);
return;
}
}
JSONArray weatherArray = forecastJson.getJSONArray(OWM_LIST);
JSONObject cityJson = forecastJson.getJSONObject(OWM_CITY);
String cityName = cityJson.getString(OWM_CITY_NAME);
JSONObject cityCoord = cityJson.getJSONObject(OWM_COORD);
double cityLatitude = cityCoord.getDouble(OWM_LATITUDE);
double cityLongitude = cityCoord.getDouble(OWM_LONGITUDE);
long locationId = addLocation(locationSetting, cityName, cityLatitude, cityLongitude);
// Insert the new weather information into the database
Vector<ContentValues> cVVector = new Vector<ContentValues>(weatherArray.length());
// OWM returns daily forecasts based upon the local time of the city that is being
// asked for, which means that we need to know the GMT offset to translate this data
// properly.
// Since this data is also sent in-order and the first day is always the
// current day, we're going to take advantage of that to get a nice
// normalized UTC date for all of our weather.
Time dayTime = new Time();
dayTime.setToNow();
// we start at the day returned by local time. Otherwise this is a mess.
int julianStartDay = Time.getJulianDay(System.currentTimeMillis(), dayTime.gmtoff);
// now we work exclusively in UTC
dayTime = new Time();
for(int i = 0; i < weatherArray.length(); i++) {
// These are the values that will be collected.
long dateTime;
double pressure;
int humidity;
double windSpeed;
double windDirection;
double high;
double low;
String description;
int weatherId;
// Get the JSON object representing the day
JSONObject dayForecast = weatherArray.getJSONObject(i);
// Cheating to convert this to UTC time, which is what we want anyhow
dateTime = dayTime.setJulianDay(julianStartDay+i);
pressure = dayForecast.getDouble(OWM_PRESSURE);
humidity = dayForecast.getInt(OWM_HUMIDITY);
windSpeed = dayForecast.getDouble(OWM_WINDSPEED);
windDirection = dayForecast.getDouble(OWM_WIND_DIRECTION);
// Description is in a child array called "weather", which is 1 element long.
// That element also contains a weather code.
JSONObject weatherObject =
dayForecast.getJSONArray(OWM_WEATHER).getJSONObject(0);
description = weatherObject.getString(OWM_DESCRIPTION);
weatherId = weatherObject.getInt(OWM_WEATHER_ID);
// Temperatures are in a child object called "temp". Try not to name variables
// "temp" when working with temperature. It confuses everybody.
JSONObject temperatureObject = dayForecast.getJSONObject(OWM_TEMPERATURE);
high = temperatureObject.getDouble(OWM_MAX);
low = temperatureObject.getDouble(OWM_MIN);
ContentValues weatherValues = new ContentValues();
weatherValues.put(WeatherContract.WeatherEntry.COLUMN_LOC_KEY, locationId);
weatherValues.put(WeatherContract.WeatherEntry.COLUMN_DATE, dateTime);
weatherValues.put(WeatherContract.WeatherEntry.COLUMN_HUMIDITY, humidity);
weatherValues.put(WeatherContract.WeatherEntry.COLUMN_PRESSURE, pressure);
weatherValues.put(WeatherContract.WeatherEntry.COLUMN_WIND_SPEED, windSpeed);
weatherValues.put(WeatherContract.WeatherEntry.COLUMN_DEGREES, windDirection);
weatherValues.put(WeatherContract.WeatherEntry.COLUMN_MAX_TEMP, high);
weatherValues.put(WeatherContract.WeatherEntry.COLUMN_MIN_TEMP, low);
weatherValues.put(WeatherContract.WeatherEntry.COLUMN_SHORT_DESC, description);
weatherValues.put(WeatherContract.WeatherEntry.COLUMN_WEATHER_ID, weatherId);
cVVector.add(weatherValues);
}
int inserted = 0;
// add to database
if ( cVVector.size() > 0 ) {
ContentValues[] cvArray = new ContentValues[cVVector.size()];
cVVector.toArray(cvArray);
getContext().getContentResolver().bulkInsert(WeatherContract.WeatherEntry.CONTENT_URI, cvArray);
// delete old data so we don't build up an endless history
getContext().getContentResolver().delete(WeatherContract.WeatherEntry.CONTENT_URI,
WeatherContract.WeatherEntry.COLUMN_DATE + " <= ?",
new String[] {Long.toString(dayTime.setJulianDay(julianStartDay-1))});
updateWidgets();
updateWearables();
updateMuzei();
notifyWeather();
}
Log.d(LOG_TAG, "Sync Complete. " + cVVector.size() + " Inserted");
setLocationStatus(getContext(), LOCATION_STATUS_OK);
} catch (JSONException e) {
Log.e(LOG_TAG, e.getMessage(), e);
e.printStackTrace();
setLocationStatus(getContext(), LOCATION_STATUS_SERVER_INVALID);
}
}
private void updateWidgets() {
Context context = getContext();
// Setting the package ensures that only components in our app will receive the broadcast
Intent dataUpdatedIntent = new Intent(ACTION_DATA_UPDATED)
.setPackage(context.getPackageName());
context.sendBroadcast(dataUpdatedIntent);
}
private void updateWearables() {
Context context = getContext();
context.startService(new Intent(ACTION_WEAR_UPDATED)
.setClass(context,WearIntentService.class));
}
private void updateMuzei() {
// Muzei is only compatible with Jelly Bean MR1+ devices, so there's no need to update the
// Muzei background on lower API level devices
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1) {
Context context = getContext();
context.startService(new Intent(ACTION_DATA_UPDATED)
.setClass(context, WeatherMuzeiSource.class));
}
}
private void notifyWeather() {
Context context = getContext();
//checking the last update and notify if it' the first of the day
SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(context);
String displayNotificationsKey = context.getString(R.string.pref_enable_notifications_key);
boolean displayNotifications = prefs.getBoolean(displayNotificationsKey,
Boolean.parseBoolean(context.getString(R.string.pref_enable_notifications_default)));
if ( displayNotifications ) {
String lastNotificationKey = context.getString(R.string.pref_last_notification);
long lastSync = prefs.getLong(lastNotificationKey, 0);
if (System.currentTimeMillis() - lastSync >= DAY_IN_MILLIS) {
// Last sync was more than 1 day ago, let's send a notification with the weather.
String locationQuery = Utility.getPreferredLocation(context);
Uri weatherUri = WeatherContract.WeatherEntry.buildWeatherLocationWithDate(locationQuery, System.currentTimeMillis());
// we'll query our contentProvider, as always
Cursor cursor = context.getContentResolver().query(weatherUri, NOTIFY_WEATHER_PROJECTION, null, null, null);
if (cursor.moveToFirst()) {
int weatherId = cursor.getInt(INDEX_WEATHER_ID);
double high = cursor.getDouble(INDEX_MAX_TEMP);
double low = cursor.getDouble(INDEX_MIN_TEMP);
String desc = cursor.getString(INDEX_SHORT_DESC);
int iconId = Utility.getIconResourceForWeatherCondition(weatherId);
Resources resources = context.getResources();
int artResourceId = Utility.getArtResourceForWeatherCondition(weatherId);
String artUrl = Utility.getArtUrlForWeatherCondition(context, weatherId);
// On Honeycomb and higher devices, we can retrieve the size of the large icon
// Prior to that, we use a fixed size
@SuppressLint("InlinedApi")
int largeIconWidth = Build.VERSION.SDK_INT >= Build.VERSION_CODES.HONEYCOMB
? resources.getDimensionPixelSize(android.R.dimen.notification_large_icon_width)
: resources.getDimensionPixelSize(R.dimen.notification_large_icon_default);
@SuppressLint("InlinedApi")
int largeIconHeight = Build.VERSION.SDK_INT >= Build.VERSION_CODES.HONEYCOMB
? resources.getDimensionPixelSize(android.R.dimen.notification_large_icon_height)
: resources.getDimensionPixelSize(R.dimen.notification_large_icon_default);
// Retrieve the large icon
Bitmap largeIcon;
try {
largeIcon = Glide.with(context)
.load(artUrl)
.asBitmap()
.error(artResourceId)
.fitCenter()
.into(largeIconWidth, largeIconHeight).get();
} catch (InterruptedException | ExecutionException e) {
Log.e(LOG_TAG, "Error retrieving large icon from " + artUrl, e);
largeIcon = BitmapFactory.decodeResource(resources, artResourceId);
}
String title = context.getString(R.string.app_name);
// Define the text of the forecast.
String contentText = String.format(context.getString(R.string.format_notification),
desc,
Utility.formatTemperature(context, high),
Utility.formatTemperature(context, low));
// NotificationCompatBuilder is a very convenient way to build backward-compatible
// notifications. Just throw in some data.
NotificationCompat.Builder mBuilder =
new NotificationCompat.Builder(getContext())
.setColor(resources.getColor(R.color.primary_light))
.setSmallIcon(iconId)
.setLargeIcon(largeIcon)
.setContentTitle(title)
.setContentText(contentText);
// Make something interesting happen when the user clicks on the notification.
// In this case, opening the app is sufficient.
Intent resultIntent = new Intent(context, MainActivity.class);
// The stack builder object will contain an artificial back stack for the
// started Activity.
// This ensures that navigating backward from the Activity leads out of
// your application to the Home screen.
TaskStackBuilder stackBuilder = TaskStackBuilder.create(context);
stackBuilder.addNextIntent(resultIntent);
PendingIntent resultPendingIntent =
stackBuilder.getPendingIntent(
0,
PendingIntent.FLAG_UPDATE_CURRENT
);
mBuilder.setContentIntent(resultPendingIntent);
NotificationManager mNotificationManager =
(NotificationManager) getContext().getSystemService(Context.NOTIFICATION_SERVICE);
// WEATHER_NOTIFICATION_ID allows you to update the notification later on.
mNotificationManager.notify(WEATHER_NOTIFICATION_ID, mBuilder.build());
//refreshing last sync
SharedPreferences.Editor editor = prefs.edit();
editor.putLong(lastNotificationKey, System.currentTimeMillis());
editor.commit();
}
cursor.close();
}
}
}
/**
* Helper method to handle insertion of a new location in the weather database.
*
* @param locationSetting The location string used to request updates from the server.
* @param cityName A human-readable city name, e.g "Mountain View"
* @param lat the latitude of the city
* @param lon the longitude of the city
* @return the row ID of the added location.
*/
long addLocation(String locationSetting, String cityName, double lat, double lon) {
long locationId;
// First, check if the location with this city name exists in the db
Cursor locationCursor = getContext().getContentResolver().query(
WeatherContract.LocationEntry.CONTENT_URI,
new String[]{WeatherContract.LocationEntry._ID},
WeatherContract.LocationEntry.COLUMN_LOCATION_SETTING + " = ?",
new String[]{locationSetting},
null);
if (locationCursor.moveToFirst()) {
int locationIdIndex = locationCursor.getColumnIndex(WeatherContract.LocationEntry._ID);
locationId = locationCursor.getLong(locationIdIndex);
} else {
// Now that the content provider is set up, inserting rows of data is pretty simple.
// First create a ContentValues object to hold the data you want to insert.
ContentValues locationValues = new ContentValues();
// Then add the data, along with the corresponding name of the data type,
// so the content provider knows what kind of value is being inserted.
locationValues.put(WeatherContract.LocationEntry.COLUMN_CITY_NAME, cityName);
locationValues.put(WeatherContract.LocationEntry.COLUMN_LOCATION_SETTING, locationSetting);
locationValues.put(WeatherContract.LocationEntry.COLUMN_COORD_LAT, lat);
locationValues.put(WeatherContract.LocationEntry.COLUMN_COORD_LONG, lon);
// Finally, insert location data into the database.
Uri insertedUri = getContext().getContentResolver().insert(
WeatherContract.LocationEntry.CONTENT_URI,
locationValues
);
// The resulting URI contains the ID for the row. Extract the locationId from the Uri.
locationId = ContentUris.parseId(insertedUri);
}
locationCursor.close();
// Wait, that worked? Yes!
return locationId;
}
/**
* Helper method to schedule the sync adapter periodic execution
*/
public static void configurePeriodicSync(Context context, int syncInterval, int flexTime) {
Account account = getSyncAccount(context);
String authority = context.getString(R.string.content_authority);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT) {
// we can enable inexact timers in our periodic sync
SyncRequest request = new SyncRequest.Builder().
syncPeriodic(syncInterval, flexTime).
setSyncAdapter(account, authority).
setExtras(new Bundle()).build();
ContentResolver.requestSync(request);
} else {
ContentResolver.addPeriodicSync(account,
authority, new Bundle(), syncInterval);
}
}
/**
* Helper method to have the sync adapter sync immediately
* @param context The context used to access the account service
*/
public static void syncImmediately(Context context) {
Bundle bundle = new Bundle();
bundle.putBoolean(ContentResolver.SYNC_EXTRAS_EXPEDITED, true);
bundle.putBoolean(ContentResolver.SYNC_EXTRAS_MANUAL, true);
ContentResolver.requestSync(getSyncAccount(context),
context.getString(R.string.content_authority), bundle);
}
/**
* Helper method to get the fake account to be used with SyncAdapter, or make a new one
* if the fake account doesn't exist yet. If we make a new account, we call the
* onAccountCreated method so we can initialize things.
*
* @param context The context used to access the account service
* @return a fake account.
*/
public static Account getSyncAccount(Context context) {
// Get an instance of the Android account manager
AccountManager accountManager =
(AccountManager) context.getSystemService(Context.ACCOUNT_SERVICE);
// Create the account type and default account
Account newAccount = new Account(
context.getString(R.string.app_name), context.getString(R.string.sync_account_type));
// If the password doesn't exist, the account doesn't exist
if ( null == accountManager.getPassword(newAccount) ) {
/*
* Add the account and account type, no password or user data
* If successful, return the Account object, otherwise report an error.
*/
if (!accountManager.addAccountExplicitly(newAccount, "", null)) {
return null;
}
/*
* If you don't set android:syncable="true" in
* in your <provider> element in the manifest,
* then call ContentResolver.setIsSyncable(account, AUTHORITY, 1)
* here.
*/
onAccountCreated(newAccount, context);
}
return newAccount;
}
private static void onAccountCreated(Account newAccount, Context context) {
/*
* Since we've created an account
*/
SunshineSyncAdapter.configurePeriodicSync(context, SYNC_INTERVAL, SYNC_FLEXTIME);
/*
* Without calling setSyncAutomatically, our periodic sync will not be enabled.
*/
ContentResolver.setSyncAutomatically(newAccount, context.getString(R.string.content_authority), true);
/*
* Finally, let's do a sync to get things started
*/
syncImmediately(context);
}
public static void initializeSyncAdapter(Context context) {
getSyncAccount(context);
}
/**
* Sets the location status into shared preference. This function should not be called from
* the UI thread because it uses commit to write to the shared preferences.
* @param c Context to get the PreferenceManager from.
* @param locationStatus The IntDef value to set
*/
static private void setLocationStatus(Context c, @LocationStatus int locationStatus){
SharedPreferences sp = PreferenceManager.getDefaultSharedPreferences(c);
SharedPreferences.Editor spe = sp.edit();
spe.putInt(c.getString(R.string.pref_location_status_key), locationStatus);
spe.commit();
}
} | dconz13/MySunshine-Watch-Face | app/src/main/java/com/example/android/sunshine/app/sync/SunshineSyncAdapter.java | Java | apache-2.0 | 29,790 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace Versionr.Objects
{
[ProtoBuf.ProtoContract]
public class VaultLock
{
[ProtoBuf.ProtoMember(1)]
[SQLite.PrimaryKey]
public Guid ID { get; set; }
[ProtoBuf.ProtoMember(2)]
public Guid? Branch { get; set; }
[ProtoBuf.ProtoMember(3)]
public string Path { get; set; }
[ProtoBuf.ProtoMember(4)]
public string User { get; set; }
}
}
| eatplayhate/versionr | VersionrCore/Objects/VaultLock.cs | C# | apache-2.0 | 566 |
<?php
use Illuminate\Support\Facades\Schema;
use Illuminate\Database\Schema\Blueprint;
use Illuminate\Database\Migrations\Migration;
class CreateInstanceIdTable extends Migration
{
/**
* Run the migrations.
*
* @return void
*/
public function up()
{
Schema::create('instance_id', function (Blueprint $t) {
$t->increments('id');
$t->string('instance_id');
$t->timestamp('created_date')->nullable();
$t->timestamp('last_modified_date')->useCurrent();
});
}
/**
* Reverse the migrations.
*
* @return void
*/
public function down()
{
Schema::dropIfExists('instance_id');
}
}
| dreamfactorysoftware/df-core | database/migrations/2020_03_02_121555_create_instance_id_table.php | PHP | apache-2.0 | 719 |
// //
// Copyright 2017 Mirko Raner //
// //
// Licensed under the Apache License, Version 2.0 (the "License"); //
// you may not use this file except in compliance with the License. //
// You may obtain a copy of the License at //
// //
// http://www.apache.org/licenses/LICENSE-2.0 //
// //
// Unless required by applicable law or agreed to in writing, software //
// distributed under the License is distributed on an "AS IS" BASIS, //
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
// See the License for the specific language governing permissions and //
// limitations under the License. //
// //
package top.java.matrix;
public interface MatrixFactory
{
<M extends Dimension, N extends Dimension> Matrix<M, N> create(Dimension rows, Dimension columns, float[] columnMajorArray, MatrixOperation... operations);
}
| raner/top.java.matrix | src/main/java/top/java/matrix/MatrixFactory.java | Java | apache-2.0 | 1,406 |
//===--- LetPropertiesOpts.cpp - Optimize let properties ------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
// Promote values of non-static let properties initialized by means
// of constant values of simple types into their uses.
//
// For any given non-static let property this optimization is only possible
// if this pass can prove that it has analyzed all assignments of an initial
// value to this property and all those assignments assign the same value
// to this property.
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "let-properties-opt"
#include "swift/SIL/DebugUtils.h"
#include "swift/SIL/InstructionUtils.h"
#include "swift/SIL/SILBasicBlock.h"
#include "swift/SIL/SILInstruction.h"
#include "swift/SIL/SILLinkage.h"
#include "swift/SILOptimizer/PassManager/Passes.h"
#include "swift/SILOptimizer/PassManager/Transforms.h"
#include "swift/SILOptimizer/Utils/Local.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
using namespace swift;
namespace {
using InstructionList = SmallVector<SILInstruction *, 8>;
struct InitSequence {
InstructionList Instructions;
SILValue Result;
bool isValid() const {
return (bool) Result;
}
};
/// Promote values of non-static let properties initialized by means
/// of constant values of simple types into their uses.
///
/// TODO: Don't occupy any storage for such let properties with constant
/// initializers.
///
/// Note: Storage from a let property can only be removed if this
/// property can never be referenced from another module.
class LetPropertiesOpt {
SILModule *Module;
typedef SmallVector<VarDecl *, 4> Properties;
llvm::SetVector<SILFunction *> ChangedFunctions;
// Map each let property to a set of instructions accessing it.
llvm::MapVector<VarDecl *, InstructionList> AccessMap;
// Map each let property to the instruction sequence which initializes it.
llvm::MapVector<VarDecl *, InitSequence> InitMap;
// Properties in this set should not be processed by this pass
// anymore.
llvm::SmallPtrSet<VarDecl *, 16> SkipProcessing;
// Types in this set should not be processed by this pass
// anymore.
llvm::SmallPtrSet<NominalTypeDecl *, 16> SkipTypeProcessing;
// Properties in this set cannot be removed.
llvm::SmallPtrSet<VarDecl *, 16> CannotRemove;
// Set of let properties in a given nominal type.
llvm::MapVector<NominalTypeDecl *, Properties> NominalTypeLetProperties;
// Set of properties which already fulfill all conditions, except
// the available of constant, statically known initializer.
llvm::SmallPtrSet<VarDecl *, 16> PotentialConstantLetProperty;
public:
LetPropertiesOpt(SILModule *M): Module(M) {}
void run(SILModuleTransform *T);
protected:
bool isConstantLetProperty(VarDecl *Property);
void collectPropertyAccess(SILInstruction *I, VarDecl *Property, bool NonRemovable);
void collectStructPropertiesAccess(StructInst *SI, bool NonRemovable);
void optimizeLetPropertyAccess(VarDecl *SILG, const InitSequence &Init);
bool analyzeInitValue(SILInstruction *I, VarDecl *Prop);
};
/// Helper class to copy only a set of SIL instructions providing in the
/// constructor.
class InitSequenceCloner : public SILClonerWithScopes<InitSequenceCloner> {
friend class SILInstructionVisitor<InitSequenceCloner>;
friend class SILCloner<InitSequenceCloner>;
const InitSequence &Init;
SILInstruction *DestIP;
public:
InitSequenceCloner(const InitSequence &init, SILInstruction *destIP)
: SILClonerWithScopes(*destIP->getFunction()), Init(init), DestIP(destIP) {}
void process(SILInstruction *I) { visit(I); }
SILBasicBlock *remapBasicBlock(SILBasicBlock *BB) { return BB; }
SILValue getMappedValue(SILValue Value) {
return SILCloner<InitSequenceCloner>::getMappedValue(Value);
}
void postProcess(SILInstruction *orig, SILInstruction *cloned) {
DestIP->getParent()->push_front(cloned);
cloned->moveBefore(DestIP);
SILClonerWithScopes<InitSequenceCloner>::postProcess(orig, cloned);
}
/// Clone all the instructions from Insns into the destination function,
/// immediately before the destination block, and return the value of
/// the result.
SILValue clone() {
for (auto I : Init.Instructions)
process(I);
return getMappedValue(Init.Result);
}
};
} // end anonymous namespace
#ifndef NDEBUG
// For debugging only.
static raw_ostream &operator<<(raw_ostream &OS, const VarDecl &decl) {
auto *Ty = dyn_cast<NominalTypeDecl>(decl.getDeclContext());
if (Ty)
OS << Ty->getName() << "::";
OS << decl.getName();
return OS;
}
#endif
/// Optimize access to the let property, which is known
/// to have a constant value. Replace all loads from the
/// property by its constant value.
void LetPropertiesOpt::optimizeLetPropertyAccess(VarDecl *Property,
const InitSequence &init) {
assert(init.isValid());
if (SkipProcessing.count(Property))
return;
auto *Ty = dyn_cast<NominalTypeDecl>(Property->getDeclContext());
if (SkipTypeProcessing.count(Ty))
return;
LLVM_DEBUG(llvm::dbgs() << "Replacing access to property '" << *Property
<< "' by its constant initializer\n");
auto PropertyAccess = Property->getEffectiveAccess();
auto TypeAccess = Ty->getEffectiveAccess();
auto CanRemove = false;
// Check if a given let property can be removed, because it
// is not accessible elsewhere. This can happen if this property
// is private or if it is internal and WMO mode is used.
if (TypeAccess <= AccessLevel::FilePrivate ||
PropertyAccess <= AccessLevel::FilePrivate
|| ((TypeAccess <= AccessLevel::Internal ||
PropertyAccess <= AccessLevel::Internal) &&
Module->isWholeModule())) {
CanRemove = true;
LLVM_DEBUG(llvm::dbgs() << "Storage for property '" << *Property
<< "' can be eliminated\n");
}
if (CannotRemove.count(Property))
CanRemove = false;
if (!AccessMap.count(Property)) {
LLVM_DEBUG(llvm::dbgs() << "Property '" << *Property <<"' is never read\n");
if (CanRemove) {
// TODO: Remove the let property, because it is never accessed.
}
return;
}
auto &Loads = AccessMap[Property];
unsigned NumReplaced = 0;
for (auto Load: Loads) {
SILFunction *F = Load->getFunction();
// A helper function to copy the initializer into the target function
// at the target insertion point.
auto cloneInitAt = [&](SILInstruction *insertionPoint) -> SILValue {
InitSequenceCloner cloner(init, insertionPoint);
return cloner.clone();
};
// Look for any instructions accessing let properties.
if (isa<RefElementAddrInst>(Load) || isa<StructElementAddrInst>(Load)
|| isa<BeginAccessInst>(Load)) {
auto proj = cast<SingleValueInstruction>(Load);
// Copy the initializer into the function
// Replace the access to a let property by the value
// computed by this initializer.
SILValue clonedInit = cloneInitAt(proj);
for (auto UI = proj->use_begin(), E = proj->use_end(); UI != E;) {
auto *User = UI->getUser();
++UI;
// A nested begin_access will be mapped as a separate "Load".
if (isa<BeginAccessInst>(User))
continue;
if (!canReplaceLoadSequence(User))
continue;
replaceLoadSequence(User, clonedInit);
eraseUsesOfInstruction(User);
User->eraseFromParent();
++NumReplaced;
}
ChangedFunctions.insert(F);
} else if (auto proj = dyn_cast<StructExtractInst>(Load)) {
// Copy the initializer into the function
// Replace the access to a let property by the value
// computed by this initializer.
SILValue clonedInit = cloneInitAt(proj);
proj->replaceAllUsesWith(clonedInit);
LLVM_DEBUG(llvm::dbgs() << "Access to " << *Property <<" was replaced:\n";
clonedInit->dumpInContext());
proj->eraseFromParent();
++NumReplaced;
ChangedFunctions.insert(F);
}
}
LLVM_DEBUG(llvm::dbgs() << "Access to " << *Property << " was replaced "
<< NumReplaced << " time(s)\n");
if (CanRemove) {
// TODO: Remove the let property, because it is never accessed.
}
}
/// Compare two SILValues structurally.
static bool isStructurallyIdentical(SILValue LHS, SILValue RHS) {
if (LHS == RHS)
return true;
if (LHS->getType() != RHS->getType())
return false;
auto lResult = LHS->getDefiningInstructionResult();
auto rResult = RHS->getDefiningInstructionResult();
assert(lResult && rResult &&
"operands of instructions approved by analyzeStaticInitializer "
"should always be defined by instructions");
return (lResult->ResultIndex == rResult->ResultIndex &&
lResult->Instruction->isIdenticalTo(rResult->Instruction,
isStructurallyIdentical));
};
/// Compare two sequences of SIL instructions. They should be structurally
/// equivalent.
static bool isSameInitSequence(const InitSequence &LHS,
const InitSequence &RHS) {
assert(LHS.isValid() && RHS.isValid());
// This will recursively check all the instructions. It's possible
// that they'll be composed slightly differently, but it shouldn't matter.
return isStructurallyIdentical(LHS.Result, RHS.Result);
}
/// Check if a given let property can be assigned externally.
static bool isAssignableExternally(VarDecl *Property, SILModule *Module) {
if (Module->isVisibleExternally(Property)) {
// If at least one of the properties of the enclosing type cannot be
// used externally, then no initializer can be implemented externally as
// it wouldn't be able to initialize such a property.
// More over, for classes, only the class itself can initialize its
// let properties. Subclasses and extensions cannot do it.
// For structs, external extensions may initialize let properties. But to do
// that they need to be able to initialize all properties, i.e. all
// properties should be accessible by the extension.
auto *Ty = dyn_cast<NominalTypeDecl>(Property->getDeclContext());
// Initializer for a let property of a class cannot exist externally.
// It cannot be defined by an extension or a derived class.
if (isa<ClassDecl>(Ty))
return false;
// Check if there are any private properties or any internal properties and
// it is a whole module compilation. In this case, no external initializer
// may exist.
for (auto SP : Ty->getStoredProperties()) {
auto storedPropertyAccess = SP->getEffectiveAccess();
if (storedPropertyAccess <= AccessLevel::FilePrivate ||
(storedPropertyAccess <= AccessLevel::Internal &&
Module->isWholeModule())) {
LLVM_DEBUG(llvm::dbgs() << "Property " << *Property
<< " cannot be set externally\n");
return false;
}
}
LLVM_DEBUG(llvm::dbgs() << "Property " << *Property
<< " can be used externally\n");
return true;
}
return false;
}
// Checks if a given property may have any unknown uses which cannot
// be analyzed by this pass.
static bool mayHaveUnknownUses(VarDecl *Property, SILModule *Module) {
if (Property->getDeclContext()->getParentModule() !=
Module->getSwiftModule()) {
LLVM_DEBUG(llvm::dbgs() << "Property " << *Property
<< " is defined in a different module\n");
// We don't see the bodies of initializers from a different module
// unless all of them are fragile.
// TODO: Support fragile initializers.
return true;
}
// If let properties can be assigned externally, we don't know
// the values they may get.
if (isAssignableExternally(Property, Module)) {
return true;
}
return false;
}
/// Check if a given property is a non-static let property
/// with known constant value.
bool LetPropertiesOpt::isConstantLetProperty(VarDecl *Property) {
// Process only non-static let properties here.
if (!Property->isLet() || Property->isStatic())
return false;
// Do not re-process already known properties.
if (SkipProcessing.count(Property))
return false;
// If these checks were performed already, no need to
// repeat them.
if (PotentialConstantLetProperty.count(Property))
return true;
// Check the visibility of this property. If its visibility
// implies that this optimization pass cannot analyze all uses,
// don't process it.
if (mayHaveUnknownUses(Property, Module)) {
LLVM_DEBUG(llvm::dbgs() << "Property '" << *Property
<< "' may have unknown uses\n");
SkipProcessing.insert(Property);
return false;
}
LLVM_DEBUG(llvm::dbgs() << "Property '" << *Property
<< "' has no unknown uses\n");
// Only properties of simple types can be optimized.
if (!isSimpleType(Module->Types.getLoweredType(Property->getType()), *Module)) {
LLVM_DEBUG(llvm::dbgs() << "Property '" << *Property
<< "' is not of trivial type\n");
SkipProcessing.insert(Property);
return false;
}
PotentialConstantLetProperty.insert(Property);
return true;
}
static bool isProjectionOfProperty(SILValue addr, VarDecl *Property) {
if (auto *REA = dyn_cast<RefElementAddrInst>(addr)) {
return REA->getField() == Property;
}
if (auto *SEA = dyn_cast<StructElementAddrInst>(addr)) {
return SEA->getField() == Property;
}
return false;
}
// Analyze the init value being stored by the instruction into a property.
bool
LetPropertiesOpt::analyzeInitValue(SILInstruction *I, VarDecl *Property) {
SILValue value;
if (auto SI = dyn_cast<StructInst>(I)) {
value = SI->getFieldValue(Property);
} else if (auto SI = dyn_cast<StoreInst>(I)) {
auto Dest = stripAddressAccess(SI->getDest());
assert(isProjectionOfProperty(stripAddressAccess(SI->getDest()), Property)
&& "Store instruction should store into a proper let property");
(void) Dest;
value = SI->getSrc();
}
// Check if it's just a copy from another instance of the struct.
if (auto *LI = dyn_cast<LoadInst>(value)) {
SILValue addr = LI->getOperand();
if (isProjectionOfProperty(addr, Property))
return true;
}
// Bail if a value of a property is not a statically known constant init.
InitSequence sequence;
sequence.Result = value;
if (!analyzeStaticInitializer(value, sequence.Instructions))
return false;
auto &cachedSequence = InitMap[Property];
if (cachedSequence.isValid() &&
!isSameInitSequence(cachedSequence, sequence)) {
// The found init value is different from the already seen init value.
return false;
} else {
LLVM_DEBUG(llvm::dbgs() << "The value of property '" << *Property
<< "' is statically known so far\n");
// Remember the statically known value.
cachedSequence = std::move(sequence);
return true;
}
}
// Analyze the 'struct' instruction and check if it initializes
// any let properties by statically known constant initializers.
void LetPropertiesOpt::collectStructPropertiesAccess(StructInst *SI,
bool NonRemovable) {
auto structDecl = SI->getStructDecl();
// Check if this struct has any let properties.
// Bail, if this struct is known to contain nothing interesting.
if (SkipTypeProcessing.count(structDecl))
return;
// Get the set of let properties defined by this struct.
if (!NominalTypeLetProperties.count(structDecl)) {
// Compute the let properties of this struct.
SmallVector<VarDecl *, 4> LetProps;
for (auto Prop : structDecl->getStoredProperties()) {
if (!isConstantLetProperty(Prop))
continue;
LetProps.push_back(Prop);
}
if (LetProps.empty()) {
// No interesting let properties in this struct.
SkipTypeProcessing.insert(structDecl);
return;
}
NominalTypeLetProperties[structDecl] = LetProps;
LLVM_DEBUG(llvm::dbgs() << "Computed set of let properties for struct '"
<< structDecl->getName() << "'\n");
}
auto &Props = NominalTypeLetProperties[structDecl];
LLVM_DEBUG(llvm::dbgs() << "Found a struct instruction initializing some "
"let properties: ";
SI->dumpInContext());
// Figure out the initializing sequence for each
// of the properties.
for (auto Prop : Props) {
if (SkipProcessing.count(Prop))
continue;
SILValue PropValue = SI->getOperandForField(Prop)->get();
LLVM_DEBUG(llvm::dbgs() << "Check the value of property '" << *Prop
<< "' :" << PropValue << "\n");
if (!analyzeInitValue(SI, Prop)) {
SkipProcessing.insert(Prop);
LLVM_DEBUG(llvm::dbgs() << "The value of a let property '" << *Prop
<< "' is not statically known\n");
}
(void) PropValue;
}
}
/// Check if I is a sequence of projections followed by a load.
/// Since it is supposed to be a load from a let property with
/// statically known constant initializer, only struct_element_addr
/// and tuple_element_addr projections are considered.
static bool isValidPropertyLoad(SILInstruction *I) {
if (isa<LoadInst>(I))
return true;
if (isa<StructElementAddrInst>(I) || isa<TupleElementAddrInst>(I)) {
auto projection = cast<SingleValueInstruction>(I);
for (auto Use : getNonDebugUses(projection)) {
if (isIncidentalUse(Use->getUser()))
continue;
if (!isValidPropertyLoad(Use->getUser()))
return false;
}
return true;
}
return false;
}
/// Remember where this property is accessed.
void LetPropertiesOpt::collectPropertyAccess(SILInstruction *I,
VarDecl *Property,
bool NonRemovable) {
if (!isConstantLetProperty(Property))
return;
LLVM_DEBUG(llvm::dbgs() << "Collecting property access for property '"
<< *Property << "':\n";
llvm::dbgs() << "The instructions are:\n"; I->dumpInContext());
if (isa<RefElementAddrInst>(I) || isa<StructElementAddrInst>(I)
|| isa<BeginAccessInst>(I)) {
// Check if there is a store to this property.
auto projection = cast<SingleValueInstruction>(I);
for (auto Use : getNonDebugUses(projection)) {
auto *User = Use->getUser();
if (isIncidentalUse(User))
continue;
// Each begin_access is analyzed as a separate property access. Do not
// consider a begin_access a use of the current projection.
if (isa<BeginAccessInst>(User))
continue;
if (auto *SI = dyn_cast<StoreInst>(User)) {
// There is a store into this property.
// Analyze the assigned value and check if it is a constant
// statically known initializer.
if (SI->getDest() != projection || !analyzeInitValue(SI, Property)) {
SkipProcessing.insert(Property);
return;
}
continue;
}
// Follow the chain of projections and check if it ends up with a load.
// If this is not the case, it is potentially a store into sub-property
// of a property.
// We cannot handle such cases yet, so bail.
if (!isValidPropertyLoad(User)) {
SkipProcessing.insert(Property);
return;
}
}
}
AccessMap[Property].push_back(I);
// If any property is marked as non-removable, their initialization
// and storage cannot be completely removed. But their constant
// values can still be propagated into their uses whenever possible.
if (NonRemovable)
CannotRemove.insert(Property);
}
void LetPropertiesOpt::run(SILModuleTransform *T) {
// Collect property access information for the whole module.
for (auto &F : *Module) {
// Take into account even those functions that should not be
// optimized, because they may contain access to the let
// properties.
bool NonRemovable = !F.shouldOptimize();
// FIXME: We should be able to handle ownership.
NonRemovable &= !F.hasOwnership();
for (auto &BB : F) {
for (auto &I : BB)
// Look for any instructions accessing let properties.
// It includes referencing this specific property (both reads and
// stores), as well as implicit stores by means of e.g.
// a struct instruction.
if (auto *BAI = dyn_cast<BeginAccessInst>(&I)) {
if (auto *REAI =
dyn_cast<RefElementAddrInst>(stripAddressAccess(BAI))) {
collectPropertyAccess(BAI, REAI->getField(), NonRemovable);
}
} else if (auto *REAI = dyn_cast<RefElementAddrInst>(&I)) {
collectPropertyAccess(REAI, REAI->getField(), NonRemovable);
} else if (auto *SEI = dyn_cast<StructExtractInst>(&I)) {
collectPropertyAccess(SEI, SEI->getField(), NonRemovable);
} else if (auto *SEAI = dyn_cast<StructElementAddrInst>(&I)) {
collectPropertyAccess(SEAI, SEAI->getField(), NonRemovable);
} else if (auto *SI = dyn_cast<StructInst>(&I)) {
collectStructPropertiesAccess(SI, NonRemovable);
}
}
}
for (auto &Init: InitMap) {
optimizeLetPropertyAccess(Init.first, Init.second);
}
for (SILFunction *ChangedFn : ChangedFunctions) {
// Program flow is not changed by this pass.
T->invalidateAnalysis(ChangedFn,
SILAnalysis::InvalidationKind::Instructions);
}
}
namespace {
class LetPropertiesOptPass : public SILModuleTransform
{
void run() override {
LetPropertiesOpt(getModule()).run(this);
}
};
} // end anonymous namespace
SILTransform *swift::createLetPropertiesOpt() {
return new LetPropertiesOptPass();
}
| practicalswift/swift | lib/SILOptimizer/IPO/LetPropertiesOpts.cpp | C++ | apache-2.0 | 22,467 |
//===- InstCombineVectorOps.cpp -------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements instcombine for ExtractElement, InsertElement and
// ShuffleVector.
//
//===----------------------------------------------------------------------===//
#include "InstCombineInternal.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/PatternMatch.h"
using namespace llvm;
using namespace PatternMatch;
#define DEBUG_TYPE "instcombine"
/// CheapToScalarize - Return true if the value is cheaper to scalarize than it
/// is to leave as a vector operation. isConstant indicates whether we're
/// extracting one known element. If false we're extracting a variable index.
static bool CheapToScalarize(Value *V, bool isConstant) {
if (Constant *C = dyn_cast<Constant>(V)) {
if (isConstant) return true;
// If all elts are the same, we can extract it and use any of the values.
if (Constant *Op0 = C->getAggregateElement(0U)) {
for (unsigned i = 1, e = V->getType()->getVectorNumElements(); i != e;
++i)
if (C->getAggregateElement(i) != Op0)
return false;
return true;
}
}
Instruction *I = dyn_cast<Instruction>(V);
if (!I) return false;
// Insert element gets simplified to the inserted element or is deleted if
// this is constant idx extract element and its a constant idx insertelt.
if (I->getOpcode() == Instruction::InsertElement && isConstant &&
isa<ConstantInt>(I->getOperand(2)))
return true;
if (I->getOpcode() == Instruction::Load && I->hasOneUse())
return true;
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I))
if (BO->hasOneUse() &&
(CheapToScalarize(BO->getOperand(0), isConstant) ||
CheapToScalarize(BO->getOperand(1), isConstant)))
return true;
if (CmpInst *CI = dyn_cast<CmpInst>(I))
if (CI->hasOneUse() &&
(CheapToScalarize(CI->getOperand(0), isConstant) ||
CheapToScalarize(CI->getOperand(1), isConstant)))
return true;
return false;
}
/// FindScalarElement - Given a vector and an element number, see if the scalar
/// value is already around as a register, for example if it were inserted then
/// extracted from the vector.
static Value *FindScalarElement(Value *V, unsigned EltNo) {
assert(V->getType()->isVectorTy() && "Not looking at a vector?");
VectorType *VTy = cast<VectorType>(V->getType());
unsigned Width = VTy->getNumElements();
if (EltNo >= Width) // Out of range access.
return UndefValue::get(VTy->getElementType());
if (Constant *C = dyn_cast<Constant>(V))
return C->getAggregateElement(EltNo);
if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
// If this is an insert to a variable element, we don't know what it is.
if (!isa<ConstantInt>(III->getOperand(2)))
return nullptr;
unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
// If this is an insert to the element we are looking for, return the
// inserted value.
if (EltNo == IIElt)
return III->getOperand(1);
// Otherwise, the insertelement doesn't modify the value, recurse on its
// vector input.
return FindScalarElement(III->getOperand(0), EltNo);
}
if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V)) {
unsigned LHSWidth = SVI->getOperand(0)->getType()->getVectorNumElements();
int InEl = SVI->getMaskValue(EltNo);
if (InEl < 0)
return UndefValue::get(VTy->getElementType());
if (InEl < (int)LHSWidth)
return FindScalarElement(SVI->getOperand(0), InEl);
return FindScalarElement(SVI->getOperand(1), InEl - LHSWidth);
}
// Extract a value from a vector add operation with a constant zero.
Value *Val = nullptr; Constant *Con = nullptr;
if (match(V, m_Add(m_Value(Val), m_Constant(Con)))) {
if (Con->getAggregateElement(EltNo)->isNullValue())
return FindScalarElement(Val, EltNo);
}
// Otherwise, we don't know.
return nullptr;
}
// If we have a PHI node with a vector type that has only 2 uses: feed
// itself and be an operand of extractelement at a constant location,
// try to replace the PHI of the vector type with a PHI of a scalar type.
Instruction *InstCombiner::scalarizePHI(ExtractElementInst &EI, PHINode *PN) {
// Verify that the PHI node has exactly 2 uses. Otherwise return NULL.
if (!PN->hasNUses(2))
return nullptr;
// If so, it's known at this point that one operand is PHI and the other is
// an extractelement node. Find the PHI user that is not the extractelement
// node.
auto iu = PN->user_begin();
Instruction *PHIUser = dyn_cast<Instruction>(*iu);
if (PHIUser == cast<Instruction>(&EI))
PHIUser = cast<Instruction>(*(++iu));
// Verify that this PHI user has one use, which is the PHI itself,
// and that it is a binary operation which is cheap to scalarize.
// otherwise return NULL.
if (!PHIUser->hasOneUse() || !(PHIUser->user_back() == PN) ||
!(isa<BinaryOperator>(PHIUser)) || !CheapToScalarize(PHIUser, true))
return nullptr;
// Create a scalar PHI node that will replace the vector PHI node
// just before the current PHI node.
PHINode *scalarPHI = cast<PHINode>(InsertNewInstWith(
PHINode::Create(EI.getType(), PN->getNumIncomingValues(), ""), *PN));
// Scalarize each PHI operand.
for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) {
Value *PHIInVal = PN->getIncomingValue(i);
BasicBlock *inBB = PN->getIncomingBlock(i);
Value *Elt = EI.getIndexOperand();
// If the operand is the PHI induction variable:
if (PHIInVal == PHIUser) {
// Scalarize the binary operation. Its first operand is the
// scalar PHI, and the second operand is extracted from the other
// vector operand.
BinaryOperator *B0 = cast<BinaryOperator>(PHIUser);
unsigned opId = (B0->getOperand(0) == PN) ? 1 : 0;
Value *Op = InsertNewInstWith(
ExtractElementInst::Create(B0->getOperand(opId), Elt,
B0->getOperand(opId)->getName() + ".Elt"),
*B0);
Value *newPHIUser = InsertNewInstWith(
BinaryOperator::Create(B0->getOpcode(), scalarPHI, Op), *B0);
scalarPHI->addIncoming(newPHIUser, inBB);
} else {
// Scalarize PHI input:
Instruction *newEI = ExtractElementInst::Create(PHIInVal, Elt, "");
// Insert the new instruction into the predecessor basic block.
Instruction *pos = dyn_cast<Instruction>(PHIInVal);
BasicBlock::iterator InsertPos;
if (pos && !isa<PHINode>(pos)) {
InsertPos = pos;
++InsertPos;
} else {
InsertPos = inBB->getFirstInsertionPt();
}
InsertNewInstWith(newEI, *InsertPos);
scalarPHI->addIncoming(newEI, inBB);
}
}
return ReplaceInstUsesWith(EI, scalarPHI);
}
Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
// If vector val is constant with all elements the same, replace EI with
// that element. We handle a known element # below.
if (Constant *C = dyn_cast<Constant>(EI.getOperand(0)))
if (CheapToScalarize(C, false))
return ReplaceInstUsesWith(EI, C->getAggregateElement(0U));
// If extracting a specified index from the vector, see if we can recursively
// find a previously computed scalar that was inserted into the vector.
if (ConstantInt *IdxC = dyn_cast<ConstantInt>(EI.getOperand(1))) {
unsigned IndexVal = IdxC->getZExtValue();
unsigned VectorWidth = EI.getVectorOperandType()->getNumElements();
// If this is extracting an invalid index, turn this into undef, to avoid
// crashing the code below.
if (IndexVal >= VectorWidth)
return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
// This instruction only demands the single element from the input vector.
// If the input vector has a single use, simplify it based on this use
// property.
if (EI.getOperand(0)->hasOneUse() && VectorWidth != 1) {
APInt UndefElts(VectorWidth, 0);
APInt DemandedMask(VectorWidth, 0);
DemandedMask.setBit(IndexVal);
if (Value *V = SimplifyDemandedVectorElts(EI.getOperand(0), DemandedMask,
UndefElts)) {
EI.setOperand(0, V);
return &EI;
}
}
if (Value *Elt = FindScalarElement(EI.getOperand(0), IndexVal))
return ReplaceInstUsesWith(EI, Elt);
// If the this extractelement is directly using a bitcast from a vector of
// the same number of elements, see if we can find the source element from
// it. In this case, we will end up needing to bitcast the scalars.
if (BitCastInst *BCI = dyn_cast<BitCastInst>(EI.getOperand(0))) {
if (VectorType *VT = dyn_cast<VectorType>(BCI->getOperand(0)->getType()))
if (VT->getNumElements() == VectorWidth)
if (Value *Elt = FindScalarElement(BCI->getOperand(0), IndexVal))
return new BitCastInst(Elt, EI.getType());
}
// If there's a vector PHI feeding a scalar use through this extractelement
// instruction, try to scalarize the PHI.
if (PHINode *PN = dyn_cast<PHINode>(EI.getOperand(0))) {
Instruction *scalarPHI = scalarizePHI(EI, PN);
if (scalarPHI)
return scalarPHI;
}
}
if (Instruction *I = dyn_cast<Instruction>(EI.getOperand(0))) {
// Push extractelement into predecessor operation if legal and
// profitable to do so
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
if (I->hasOneUse() &&
CheapToScalarize(BO, isa<ConstantInt>(EI.getOperand(1)))) {
Value *newEI0 =
Builder->CreateExtractElement(BO->getOperand(0), EI.getOperand(1),
EI.getName()+".lhs");
Value *newEI1 =
Builder->CreateExtractElement(BO->getOperand(1), EI.getOperand(1),
EI.getName()+".rhs");
return BinaryOperator::Create(BO->getOpcode(), newEI0, newEI1);
}
} else if (InsertElementInst *IE = dyn_cast<InsertElementInst>(I)) {
// Extracting the inserted element?
if (IE->getOperand(2) == EI.getOperand(1))
return ReplaceInstUsesWith(EI, IE->getOperand(1));
// If the inserted and extracted elements are constants, they must not
// be the same value, extract from the pre-inserted value instead.
if (isa<Constant>(IE->getOperand(2)) && isa<Constant>(EI.getOperand(1))) {
Worklist.AddValue(EI.getOperand(0));
EI.setOperand(0, IE->getOperand(0));
return &EI;
}
} else if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) {
// If this is extracting an element from a shufflevector, figure out where
// it came from and extract from the appropriate input element instead.
if (ConstantInt *Elt = dyn_cast<ConstantInt>(EI.getOperand(1))) {
int SrcIdx = SVI->getMaskValue(Elt->getZExtValue());
Value *Src;
unsigned LHSWidth =
SVI->getOperand(0)->getType()->getVectorNumElements();
if (SrcIdx < 0)
return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
if (SrcIdx < (int)LHSWidth)
Src = SVI->getOperand(0);
else {
SrcIdx -= LHSWidth;
Src = SVI->getOperand(1);
}
Type *Int32Ty = Type::getInt32Ty(EI.getContext());
return ExtractElementInst::Create(Src,
ConstantInt::get(Int32Ty,
SrcIdx, false));
}
} else if (CastInst *CI = dyn_cast<CastInst>(I)) {
// Canonicalize extractelement(cast) -> cast(extractelement)
// bitcasts can change the number of vector elements and they cost nothing
if (CI->hasOneUse() && (CI->getOpcode() != Instruction::BitCast)) {
Value *EE = Builder->CreateExtractElement(CI->getOperand(0),
EI.getIndexOperand());
Worklist.AddValue(EE);
return CastInst::Create(CI->getOpcode(), EE, EI.getType());
}
} else if (SelectInst *SI = dyn_cast<SelectInst>(I)) {
if (SI->hasOneUse()) {
// TODO: For a select on vectors, it might be useful to do this if it
// has multiple extractelement uses. For vector select, that seems to
// fight the vectorizer.
// If we are extracting an element from a vector select or a select on
// vectors, a select on the scalars extracted from the vector arguments.
Value *TrueVal = SI->getTrueValue();
Value *FalseVal = SI->getFalseValue();
Value *Cond = SI->getCondition();
if (Cond->getType()->isVectorTy()) {
Cond = Builder->CreateExtractElement(Cond,
EI.getIndexOperand(),
Cond->getName() + ".elt");
}
Value *V1Elem
= Builder->CreateExtractElement(TrueVal,
EI.getIndexOperand(),
TrueVal->getName() + ".elt");
Value *V2Elem
= Builder->CreateExtractElement(FalseVal,
EI.getIndexOperand(),
FalseVal->getName() + ".elt");
return SelectInst::Create(Cond,
V1Elem,
V2Elem,
SI->getName() + ".elt");
}
}
}
return nullptr;
}
/// CollectSingleShuffleElements - If V is a shuffle of values that ONLY returns
/// elements from either LHS or RHS, return the shuffle mask and true.
/// Otherwise, return false.
static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
SmallVectorImpl<Constant*> &Mask) {
assert(LHS->getType() == RHS->getType() &&
"Invalid CollectSingleShuffleElements");
unsigned NumElts = V->getType()->getVectorNumElements();
if (isa<UndefValue>(V)) {
Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(V->getContext())));
return true;
}
if (V == LHS) {
for (unsigned i = 0; i != NumElts; ++i)
Mask.push_back(ConstantInt::get(Type::getInt32Ty(V->getContext()), i));
return true;
}
if (V == RHS) {
for (unsigned i = 0; i != NumElts; ++i)
Mask.push_back(ConstantInt::get(Type::getInt32Ty(V->getContext()),
i+NumElts));
return true;
}
if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
// If this is an insert of an extract from some other vector, include it.
Value *VecOp = IEI->getOperand(0);
Value *ScalarOp = IEI->getOperand(1);
Value *IdxOp = IEI->getOperand(2);
if (!isa<ConstantInt>(IdxOp))
return false;
unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
if (isa<UndefValue>(ScalarOp)) { // inserting undef into vector.
// We can handle this if the vector we are inserting into is
// transitively ok.
if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask)) {
// If so, update the mask to reflect the inserted undef.
Mask[InsertedIdx] = UndefValue::get(Type::getInt32Ty(V->getContext()));
return true;
}
} else if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)){
if (isa<ConstantInt>(EI->getOperand(1))) {
unsigned ExtractedIdx =
cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
unsigned NumLHSElts = LHS->getType()->getVectorNumElements();
// This must be extracting from either LHS or RHS.
if (EI->getOperand(0) == LHS || EI->getOperand(0) == RHS) {
// We can handle this if the vector we are inserting into is
// transitively ok.
if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask)) {
// If so, update the mask to reflect the inserted value.
if (EI->getOperand(0) == LHS) {
Mask[InsertedIdx % NumElts] =
ConstantInt::get(Type::getInt32Ty(V->getContext()),
ExtractedIdx);
} else {
assert(EI->getOperand(0) == RHS);
Mask[InsertedIdx % NumElts] =
ConstantInt::get(Type::getInt32Ty(V->getContext()),
ExtractedIdx + NumLHSElts);
}
return true;
}
}
}
}
}
return false;
}
/// We are building a shuffle to create V, which is a sequence of insertelement,
/// extractelement pairs. If PermittedRHS is set, then we must either use it or
/// not rely on the second vector source. Return a std::pair containing the
/// left and right vectors of the proposed shuffle (or 0), and set the Mask
/// parameter as required.
///
/// Note: we intentionally don't try to fold earlier shuffles since they have
/// often been chosen carefully to be efficiently implementable on the target.
typedef std::pair<Value *, Value *> ShuffleOps;
static ShuffleOps CollectShuffleElements(Value *V,
SmallVectorImpl<Constant *> &Mask,
Value *PermittedRHS) {
assert(V->getType()->isVectorTy() && "Invalid shuffle!");
unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
if (isa<UndefValue>(V)) {
Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(V->getContext())));
return std::make_pair(
PermittedRHS ? UndefValue::get(PermittedRHS->getType()) : V, nullptr);
}
if (isa<ConstantAggregateZero>(V)) {
Mask.assign(NumElts, ConstantInt::get(Type::getInt32Ty(V->getContext()),0));
return std::make_pair(V, nullptr);
}
if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
// If this is an insert of an extract from some other vector, include it.
Value *VecOp = IEI->getOperand(0);
Value *ScalarOp = IEI->getOperand(1);
Value *IdxOp = IEI->getOperand(2);
if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp)) {
unsigned ExtractedIdx =
cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
// Either the extracted from or inserted into vector must be RHSVec,
// otherwise we'd end up with a shuffle of three inputs.
if (EI->getOperand(0) == PermittedRHS || PermittedRHS == nullptr) {
Value *RHS = EI->getOperand(0);
ShuffleOps LR = CollectShuffleElements(VecOp, Mask, RHS);
assert(LR.second == nullptr || LR.second == RHS);
if (LR.first->getType() != RHS->getType()) {
// We tried our best, but we can't find anything compatible with RHS
// further up the chain. Return a trivial shuffle.
for (unsigned i = 0; i < NumElts; ++i)
Mask[i] = ConstantInt::get(Type::getInt32Ty(V->getContext()), i);
return std::make_pair(V, nullptr);
}
unsigned NumLHSElts = RHS->getType()->getVectorNumElements();
Mask[InsertedIdx % NumElts] =
ConstantInt::get(Type::getInt32Ty(V->getContext()),
NumLHSElts+ExtractedIdx);
return std::make_pair(LR.first, RHS);
}
if (VecOp == PermittedRHS) {
// We've gone as far as we can: anything on the other side of the
// extractelement will already have been converted into a shuffle.
unsigned NumLHSElts =
EI->getOperand(0)->getType()->getVectorNumElements();
for (unsigned i = 0; i != NumElts; ++i)
Mask.push_back(ConstantInt::get(
Type::getInt32Ty(V->getContext()),
i == InsertedIdx ? ExtractedIdx : NumLHSElts + i));
return std::make_pair(EI->getOperand(0), PermittedRHS);
}
// If this insertelement is a chain that comes from exactly these two
// vectors, return the vector and the effective shuffle.
if (EI->getOperand(0)->getType() == PermittedRHS->getType() &&
CollectSingleShuffleElements(IEI, EI->getOperand(0), PermittedRHS,
Mask))
return std::make_pair(EI->getOperand(0), PermittedRHS);
}
}
}
// Otherwise, can't do anything fancy. Return an identity vector.
for (unsigned i = 0; i != NumElts; ++i)
Mask.push_back(ConstantInt::get(Type::getInt32Ty(V->getContext()), i));
return std::make_pair(V, nullptr);
}
/// Try to find redundant insertvalue instructions, like the following ones:
/// %0 = insertvalue { i8, i32 } undef, i8 %x, 0
/// %1 = insertvalue { i8, i32 } %0, i8 %y, 0
/// Here the second instruction inserts values at the same indices, as the
/// first one, making the first one redundant.
/// It should be transformed to:
/// %0 = insertvalue { i8, i32 } undef, i8 %y, 0
Instruction *InstCombiner::visitInsertValueInst(InsertValueInst &I) {
bool IsRedundant = false;
ArrayRef<unsigned int> FirstIndices = I.getIndices();
// If there is a chain of insertvalue instructions (each of them except the
// last one has only one use and it's another insertvalue insn from this
// chain), check if any of the 'children' uses the same indices as the first
// instruction. In this case, the first one is redundant.
Value *V = &I;
unsigned Depth = 0;
while (V->hasOneUse() && Depth < 10) {
User *U = V->user_back();
auto UserInsInst = dyn_cast<InsertValueInst>(U);
if (!UserInsInst || U->getOperand(0) != V)
break;
if (UserInsInst->getIndices() == FirstIndices) {
IsRedundant = true;
break;
}
V = UserInsInst;
Depth++;
}
if (IsRedundant)
return ReplaceInstUsesWith(I, I.getOperand(0));
return nullptr;
}
Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
Value *VecOp = IE.getOperand(0);
Value *ScalarOp = IE.getOperand(1);
Value *IdxOp = IE.getOperand(2);
// Inserting an undef or into an undefined place, remove this.
if (isa<UndefValue>(ScalarOp) || isa<UndefValue>(IdxOp))
ReplaceInstUsesWith(IE, VecOp);
// If the inserted element was extracted from some other vector, and if the
// indexes are constant, try to turn this into a shufflevector operation.
if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp)) {
unsigned NumInsertVectorElts = IE.getType()->getNumElements();
unsigned NumExtractVectorElts =
EI->getOperand(0)->getType()->getVectorNumElements();
unsigned ExtractedIdx =
cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
if (ExtractedIdx >= NumExtractVectorElts) // Out of range extract.
return ReplaceInstUsesWith(IE, VecOp);
if (InsertedIdx >= NumInsertVectorElts) // Out of range insert.
return ReplaceInstUsesWith(IE, UndefValue::get(IE.getType()));
// If we are extracting a value from a vector, then inserting it right
// back into the same place, just use the input vector.
if (EI->getOperand(0) == VecOp && ExtractedIdx == InsertedIdx)
return ReplaceInstUsesWith(IE, VecOp);
// If this insertelement isn't used by some other insertelement, turn it
// (and any insertelements it points to), into one big shuffle.
if (!IE.hasOneUse() || !isa<InsertElementInst>(IE.user_back())) {
SmallVector<Constant*, 16> Mask;
ShuffleOps LR = CollectShuffleElements(&IE, Mask, nullptr);
// The proposed shuffle may be trivial, in which case we shouldn't
// perform the combine.
if (LR.first != &IE && LR.second != &IE) {
// We now have a shuffle of LHS, RHS, Mask.
if (LR.second == nullptr)
LR.second = UndefValue::get(LR.first->getType());
return new ShuffleVectorInst(LR.first, LR.second,
ConstantVector::get(Mask));
}
}
}
}
unsigned VWidth = cast<VectorType>(VecOp->getType())->getNumElements();
APInt UndefElts(VWidth, 0);
APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
if (Value *V = SimplifyDemandedVectorElts(&IE, AllOnesEltMask, UndefElts)) {
if (V != &IE)
return ReplaceInstUsesWith(IE, V);
return &IE;
}
return nullptr;
}
/// Return true if we can evaluate the specified expression tree if the vector
/// elements were shuffled in a different order.
static bool CanEvaluateShuffled(Value *V, ArrayRef<int> Mask,
unsigned Depth = 5) {
// We can always reorder the elements of a constant.
if (isa<Constant>(V))
return true;
// We won't reorder vector arguments. No IPO here.
Instruction *I = dyn_cast<Instruction>(V);
if (!I) return false;
// Two users may expect different orders of the elements. Don't try it.
if (!I->hasOneUse())
return false;
if (Depth == 0) return false;
switch (I->getOpcode()) {
case Instruction::Add:
case Instruction::FAdd:
case Instruction::Sub:
case Instruction::FSub:
case Instruction::Mul:
case Instruction::FMul:
case Instruction::UDiv:
case Instruction::SDiv:
case Instruction::FDiv:
case Instruction::URem:
case Instruction::SRem:
case Instruction::FRem:
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr:
case Instruction::And:
case Instruction::Or:
case Instruction::Xor:
case Instruction::ICmp:
case Instruction::FCmp:
case Instruction::Trunc:
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPToUI:
case Instruction::FPToSI:
case Instruction::UIToFP:
case Instruction::SIToFP:
case Instruction::FPTrunc:
case Instruction::FPExt:
case Instruction::GetElementPtr: {
for (int i = 0, e = I->getNumOperands(); i != e; ++i) {
if (!CanEvaluateShuffled(I->getOperand(i), Mask, Depth-1))
return false;
}
return true;
}
case Instruction::InsertElement: {
ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(2));
if (!CI) return false;
int ElementNumber = CI->getLimitedValue();
// Verify that 'CI' does not occur twice in Mask. A single 'insertelement'
// can't put an element into multiple indices.
bool SeenOnce = false;
for (int i = 0, e = Mask.size(); i != e; ++i) {
if (Mask[i] == ElementNumber) {
if (SeenOnce)
return false;
SeenOnce = true;
}
}
return CanEvaluateShuffled(I->getOperand(0), Mask, Depth-1);
}
}
return false;
}
/// Rebuild a new instruction just like 'I' but with the new operands given.
/// In the event of type mismatch, the type of the operands is correct.
static Value *BuildNew(Instruction *I, ArrayRef<Value*> NewOps) {
// We don't want to use the IRBuilder here because we want the replacement
// instructions to appear next to 'I', not the builder's insertion point.
switch (I->getOpcode()) {
case Instruction::Add:
case Instruction::FAdd:
case Instruction::Sub:
case Instruction::FSub:
case Instruction::Mul:
case Instruction::FMul:
case Instruction::UDiv:
case Instruction::SDiv:
case Instruction::FDiv:
case Instruction::URem:
case Instruction::SRem:
case Instruction::FRem:
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr:
case Instruction::And:
case Instruction::Or:
case Instruction::Xor: {
BinaryOperator *BO = cast<BinaryOperator>(I);
assert(NewOps.size() == 2 && "binary operator with #ops != 2");
BinaryOperator *New =
BinaryOperator::Create(cast<BinaryOperator>(I)->getOpcode(),
NewOps[0], NewOps[1], "", BO);
if (isa<OverflowingBinaryOperator>(BO)) {
New->setHasNoUnsignedWrap(BO->hasNoUnsignedWrap());
New->setHasNoSignedWrap(BO->hasNoSignedWrap());
}
if (isa<PossiblyExactOperator>(BO)) {
New->setIsExact(BO->isExact());
}
if (isa<FPMathOperator>(BO))
New->copyFastMathFlags(I);
return New;
}
case Instruction::ICmp:
assert(NewOps.size() == 2 && "icmp with #ops != 2");
return new ICmpInst(I, cast<ICmpInst>(I)->getPredicate(),
NewOps[0], NewOps[1]);
case Instruction::FCmp:
assert(NewOps.size() == 2 && "fcmp with #ops != 2");
return new FCmpInst(I, cast<FCmpInst>(I)->getPredicate(),
NewOps[0], NewOps[1]);
case Instruction::Trunc:
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPToUI:
case Instruction::FPToSI:
case Instruction::UIToFP:
case Instruction::SIToFP:
case Instruction::FPTrunc:
case Instruction::FPExt: {
// It's possible that the mask has a different number of elements from
// the original cast. We recompute the destination type to match the mask.
Type *DestTy =
VectorType::get(I->getType()->getScalarType(),
NewOps[0]->getType()->getVectorNumElements());
assert(NewOps.size() == 1 && "cast with #ops != 1");
return CastInst::Create(cast<CastInst>(I)->getOpcode(), NewOps[0], DestTy,
"", I);
}
case Instruction::GetElementPtr: {
Value *Ptr = NewOps[0];
ArrayRef<Value*> Idx = NewOps.slice(1);
GetElementPtrInst *GEP = GetElementPtrInst::Create(
cast<GetElementPtrInst>(I)->getSourceElementType(), Ptr, Idx, "", I);
GEP->setIsInBounds(cast<GetElementPtrInst>(I)->isInBounds());
return GEP;
}
}
llvm_unreachable("failed to rebuild vector instructions");
}
Value *
InstCombiner::EvaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask) {
// Mask.size() does not need to be equal to the number of vector elements.
assert(V->getType()->isVectorTy() && "can't reorder non-vector elements");
if (isa<UndefValue>(V)) {
return UndefValue::get(VectorType::get(V->getType()->getScalarType(),
Mask.size()));
}
if (isa<ConstantAggregateZero>(V)) {
return ConstantAggregateZero::get(
VectorType::get(V->getType()->getScalarType(),
Mask.size()));
}
if (Constant *C = dyn_cast<Constant>(V)) {
SmallVector<Constant *, 16> MaskValues;
for (int i = 0, e = Mask.size(); i != e; ++i) {
if (Mask[i] == -1)
MaskValues.push_back(UndefValue::get(Builder->getInt32Ty()));
else
MaskValues.push_back(Builder->getInt32(Mask[i]));
}
return ConstantExpr::getShuffleVector(C, UndefValue::get(C->getType()),
ConstantVector::get(MaskValues));
}
Instruction *I = cast<Instruction>(V);
switch (I->getOpcode()) {
case Instruction::Add:
case Instruction::FAdd:
case Instruction::Sub:
case Instruction::FSub:
case Instruction::Mul:
case Instruction::FMul:
case Instruction::UDiv:
case Instruction::SDiv:
case Instruction::FDiv:
case Instruction::URem:
case Instruction::SRem:
case Instruction::FRem:
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr:
case Instruction::And:
case Instruction::Or:
case Instruction::Xor:
case Instruction::ICmp:
case Instruction::FCmp:
case Instruction::Trunc:
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPToUI:
case Instruction::FPToSI:
case Instruction::UIToFP:
case Instruction::SIToFP:
case Instruction::FPTrunc:
case Instruction::FPExt:
case Instruction::Select:
case Instruction::GetElementPtr: {
SmallVector<Value*, 8> NewOps;
bool NeedsRebuild = (Mask.size() != I->getType()->getVectorNumElements());
for (int i = 0, e = I->getNumOperands(); i != e; ++i) {
Value *V = EvaluateInDifferentElementOrder(I->getOperand(i), Mask);
NewOps.push_back(V);
NeedsRebuild |= (V != I->getOperand(i));
}
if (NeedsRebuild) {
return BuildNew(I, NewOps);
}
return I;
}
case Instruction::InsertElement: {
int Element = cast<ConstantInt>(I->getOperand(2))->getLimitedValue();
// The insertelement was inserting at Element. Figure out which element
// that becomes after shuffling. The answer is guaranteed to be unique
// by CanEvaluateShuffled.
bool Found = false;
int Index = 0;
for (int e = Mask.size(); Index != e; ++Index) {
if (Mask[Index] == Element) {
Found = true;
break;
}
}
// If element is not in Mask, no need to handle the operand 1 (element to
// be inserted). Just evaluate values in operand 0 according to Mask.
if (!Found)
return EvaluateInDifferentElementOrder(I->getOperand(0), Mask);
Value *V = EvaluateInDifferentElementOrder(I->getOperand(0), Mask);
return InsertElementInst::Create(V, I->getOperand(1),
Builder->getInt32(Index), "", I);
}
}
llvm_unreachable("failed to reorder elements of vector instruction!");
}
static void RecognizeIdentityMask(const SmallVectorImpl<int> &Mask,
bool &isLHSID, bool &isRHSID) {
isLHSID = isRHSID = true;
for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
if (Mask[i] < 0) continue; // Ignore undef values.
// Is this an identity shuffle of the LHS value?
isLHSID &= (Mask[i] == (int)i);
// Is this an identity shuffle of the RHS value?
isRHSID &= (Mask[i]-e == i);
}
}
// Returns true if the shuffle is extracting a contiguous range of values from
// LHS, for example:
// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
// Input: |AA|BB|CC|DD|EE|FF|GG|HH|II|JJ|KK|LL|MM|NN|OO|PP|
// Shuffles to: |EE|FF|GG|HH|
// +--+--+--+--+
static bool isShuffleExtractingFromLHS(ShuffleVectorInst &SVI,
SmallVector<int, 16> &Mask) {
unsigned LHSElems =
cast<VectorType>(SVI.getOperand(0)->getType())->getNumElements();
unsigned MaskElems = Mask.size();
unsigned BegIdx = Mask.front();
unsigned EndIdx = Mask.back();
if (BegIdx > EndIdx || EndIdx >= LHSElems || EndIdx - BegIdx != MaskElems - 1)
return false;
for (unsigned I = 0; I != MaskElems; ++I)
if (static_cast<unsigned>(Mask[I]) != BegIdx + I)
return false;
return true;
}
Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
Value *LHS = SVI.getOperand(0);
Value *RHS = SVI.getOperand(1);
SmallVector<int, 16> Mask = SVI.getShuffleMask();
Type *Int32Ty = Type::getInt32Ty(SVI.getContext());
bool MadeChange = false;
// Undefined shuffle mask -> undefined value.
if (isa<UndefValue>(SVI.getOperand(2)))
return ReplaceInstUsesWith(SVI, UndefValue::get(SVI.getType()));
unsigned VWidth = cast<VectorType>(SVI.getType())->getNumElements();
APInt UndefElts(VWidth, 0);
APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
if (Value *V = SimplifyDemandedVectorElts(&SVI, AllOnesEltMask, UndefElts)) {
if (V != &SVI)
return ReplaceInstUsesWith(SVI, V);
LHS = SVI.getOperand(0);
RHS = SVI.getOperand(1);
MadeChange = true;
}
unsigned LHSWidth = cast<VectorType>(LHS->getType())->getNumElements();
// Canonicalize shuffle(x ,x,mask) -> shuffle(x, undef,mask')
// Canonicalize shuffle(undef,x,mask) -> shuffle(x, undef,mask').
if (LHS == RHS || isa<UndefValue>(LHS)) {
if (isa<UndefValue>(LHS) && LHS == RHS) {
// shuffle(undef,undef,mask) -> undef.
Value *Result = (VWidth == LHSWidth)
? LHS : UndefValue::get(SVI.getType());
return ReplaceInstUsesWith(SVI, Result);
}
// Remap any references to RHS to use LHS.
SmallVector<Constant*, 16> Elts;
for (unsigned i = 0, e = LHSWidth; i != VWidth; ++i) {
if (Mask[i] < 0) {
Elts.push_back(UndefValue::get(Int32Ty));
continue;
}
if ((Mask[i] >= (int)e && isa<UndefValue>(RHS)) ||
(Mask[i] < (int)e && isa<UndefValue>(LHS))) {
Mask[i] = -1; // Turn into undef.
Elts.push_back(UndefValue::get(Int32Ty));
} else {
Mask[i] = Mask[i] % e; // Force to LHS.
Elts.push_back(ConstantInt::get(Int32Ty, Mask[i]));
}
}
SVI.setOperand(0, SVI.getOperand(1));
SVI.setOperand(1, UndefValue::get(RHS->getType()));
SVI.setOperand(2, ConstantVector::get(Elts));
LHS = SVI.getOperand(0);
RHS = SVI.getOperand(1);
MadeChange = true;
}
if (VWidth == LHSWidth) {
// Analyze the shuffle, are the LHS or RHS and identity shuffles?
bool isLHSID, isRHSID;
RecognizeIdentityMask(Mask, isLHSID, isRHSID);
// Eliminate identity shuffles.
if (isLHSID) return ReplaceInstUsesWith(SVI, LHS);
if (isRHSID) return ReplaceInstUsesWith(SVI, RHS);
}
if (isa<UndefValue>(RHS) && CanEvaluateShuffled(LHS, Mask)) {
Value *V = EvaluateInDifferentElementOrder(LHS, Mask);
return ReplaceInstUsesWith(SVI, V);
}
// SROA generates shuffle+bitcast when the extracted sub-vector is bitcast to
// a non-vector type. We can instead bitcast the original vector followed by
// an extract of the desired element:
//
// %sroa = shufflevector <16 x i8> %in, <16 x i8> undef,
// <4 x i32> <i32 0, i32 1, i32 2, i32 3>
// %1 = bitcast <4 x i8> %sroa to i32
// Becomes:
// %bc = bitcast <16 x i8> %in to <4 x i32>
// %ext = extractelement <4 x i32> %bc, i32 0
//
// If the shuffle is extracting a contiguous range of values from the input
// vector then each use which is a bitcast of the extracted size can be
// replaced. This will work if the vector types are compatible, and the begin
// index is aligned to a value in the casted vector type. If the begin index
// isn't aligned then we can shuffle the original vector (keeping the same
// vector type) before extracting.
//
// This code will bail out if the target type is fundamentally incompatible
// with vectors of the source type.
//
// Example of <16 x i8>, target type i32:
// Index range [4,8): v-----------v Will work.
// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
// <16 x i8>: | | | | | | | | | | | | | | | | |
// <4 x i32>: | | | | |
// +-----------+-----------+-----------+-----------+
// Index range [6,10): ^-----------^ Needs an extra shuffle.
// Target type i40: ^--------------^ Won't work, bail.
if (isShuffleExtractingFromLHS(SVI, Mask)) {
Value *V = LHS;
unsigned MaskElems = Mask.size();
unsigned BegIdx = Mask.front();
VectorType *SrcTy = cast<VectorType>(V->getType());
unsigned VecBitWidth = SrcTy->getBitWidth();
unsigned SrcElemBitWidth = DL.getTypeSizeInBits(SrcTy->getElementType());
assert(SrcElemBitWidth && "vector elements must have a bitwidth");
unsigned SrcNumElems = SrcTy->getNumElements();
SmallVector<BitCastInst *, 8> BCs;
DenseMap<Type *, Value *> NewBCs;
for (User *U : SVI.users())
if (BitCastInst *BC = dyn_cast<BitCastInst>(U))
if (!BC->use_empty())
// Only visit bitcasts that weren't previously handled.
BCs.push_back(BC);
for (BitCastInst *BC : BCs) {
Type *TgtTy = BC->getDestTy();
unsigned TgtElemBitWidth = DL.getTypeSizeInBits(TgtTy);
if (!TgtElemBitWidth)
continue;
unsigned TgtNumElems = VecBitWidth / TgtElemBitWidth;
bool VecBitWidthsEqual = VecBitWidth == TgtNumElems * TgtElemBitWidth;
bool BegIsAligned = 0 == ((SrcElemBitWidth * BegIdx) % TgtElemBitWidth);
if (!VecBitWidthsEqual)
continue;
if (!VectorType::isValidElementType(TgtTy))
continue;
VectorType *CastSrcTy = VectorType::get(TgtTy, TgtNumElems);
if (!BegIsAligned) {
// Shuffle the input so [0,NumElements) contains the output, and
// [NumElems,SrcNumElems) is undef.
SmallVector<Constant *, 16> ShuffleMask(SrcNumElems,
UndefValue::get(Int32Ty));
for (unsigned I = 0, E = MaskElems, Idx = BegIdx; I != E; ++Idx, ++I)
ShuffleMask[I] = ConstantInt::get(Int32Ty, Idx);
V = Builder->CreateShuffleVector(V, UndefValue::get(V->getType()),
ConstantVector::get(ShuffleMask),
SVI.getName() + ".extract");
BegIdx = 0;
}
unsigned SrcElemsPerTgtElem = TgtElemBitWidth / SrcElemBitWidth;
assert(SrcElemsPerTgtElem);
BegIdx /= SrcElemsPerTgtElem;
bool BCAlreadyExists = NewBCs.find(CastSrcTy) != NewBCs.end();
auto *NewBC =
BCAlreadyExists
? NewBCs[CastSrcTy]
: Builder->CreateBitCast(V, CastSrcTy, SVI.getName() + ".bc");
if (!BCAlreadyExists)
NewBCs[CastSrcTy] = NewBC;
auto *Ext = Builder->CreateExtractElement(
NewBC, ConstantInt::get(Int32Ty, BegIdx), SVI.getName() + ".extract");
// The shufflevector isn't being replaced: the bitcast that used it
// is. InstCombine will visit the newly-created instructions.
ReplaceInstUsesWith(*BC, Ext);
MadeChange = true;
}
}
// If the LHS is a shufflevector itself, see if we can combine it with this
// one without producing an unusual shuffle.
// Cases that might be simplified:
// 1.
// x1=shuffle(v1,v2,mask1)
// x=shuffle(x1,undef,mask)
// ==>
// x=shuffle(v1,undef,newMask)
// newMask[i] = (mask[i] < x1.size()) ? mask1[mask[i]] : -1
// 2.
// x1=shuffle(v1,undef,mask1)
// x=shuffle(x1,x2,mask)
// where v1.size() == mask1.size()
// ==>
// x=shuffle(v1,x2,newMask)
// newMask[i] = (mask[i] < x1.size()) ? mask1[mask[i]] : mask[i]
// 3.
// x2=shuffle(v2,undef,mask2)
// x=shuffle(x1,x2,mask)
// where v2.size() == mask2.size()
// ==>
// x=shuffle(x1,v2,newMask)
// newMask[i] = (mask[i] < x1.size())
// ? mask[i] : mask2[mask[i]-x1.size()]+x1.size()
// 4.
// x1=shuffle(v1,undef,mask1)
// x2=shuffle(v2,undef,mask2)
// x=shuffle(x1,x2,mask)
// where v1.size() == v2.size()
// ==>
// x=shuffle(v1,v2,newMask)
// newMask[i] = (mask[i] < x1.size())
// ? mask1[mask[i]] : mask2[mask[i]-x1.size()]+v1.size()
//
// Here we are really conservative:
// we are absolutely afraid of producing a shuffle mask not in the input
// program, because the code gen may not be smart enough to turn a merged
// shuffle into two specific shuffles: it may produce worse code. As such,
// we only merge two shuffles if the result is either a splat or one of the
// input shuffle masks. In this case, merging the shuffles just removes
// one instruction, which we know is safe. This is good for things like
// turning: (splat(splat)) -> splat, or
// merge(V[0..n], V[n+1..2n]) -> V[0..2n]
ShuffleVectorInst* LHSShuffle = dyn_cast<ShuffleVectorInst>(LHS);
ShuffleVectorInst* RHSShuffle = dyn_cast<ShuffleVectorInst>(RHS);
if (LHSShuffle)
if (!isa<UndefValue>(LHSShuffle->getOperand(1)) && !isa<UndefValue>(RHS))
LHSShuffle = nullptr;
if (RHSShuffle)
if (!isa<UndefValue>(RHSShuffle->getOperand(1)))
RHSShuffle = nullptr;
if (!LHSShuffle && !RHSShuffle)
return MadeChange ? &SVI : nullptr;
Value* LHSOp0 = nullptr;
Value* LHSOp1 = nullptr;
Value* RHSOp0 = nullptr;
unsigned LHSOp0Width = 0;
unsigned RHSOp0Width = 0;
if (LHSShuffle) {
LHSOp0 = LHSShuffle->getOperand(0);
LHSOp1 = LHSShuffle->getOperand(1);
LHSOp0Width = cast<VectorType>(LHSOp0->getType())->getNumElements();
}
if (RHSShuffle) {
RHSOp0 = RHSShuffle->getOperand(0);
RHSOp0Width = cast<VectorType>(RHSOp0->getType())->getNumElements();
}
Value* newLHS = LHS;
Value* newRHS = RHS;
if (LHSShuffle) {
// case 1
if (isa<UndefValue>(RHS)) {
newLHS = LHSOp0;
newRHS = LHSOp1;
}
// case 2 or 4
else if (LHSOp0Width == LHSWidth) {
newLHS = LHSOp0;
}
}
// case 3 or 4
if (RHSShuffle && RHSOp0Width == LHSWidth) {
newRHS = RHSOp0;
}
// case 4
if (LHSOp0 == RHSOp0) {
newLHS = LHSOp0;
newRHS = nullptr;
}
if (newLHS == LHS && newRHS == RHS)
return MadeChange ? &SVI : nullptr;
SmallVector<int, 16> LHSMask;
SmallVector<int, 16> RHSMask;
if (newLHS != LHS)
LHSMask = LHSShuffle->getShuffleMask();
if (RHSShuffle && newRHS != RHS)
RHSMask = RHSShuffle->getShuffleMask();
unsigned newLHSWidth = (newLHS != LHS) ? LHSOp0Width : LHSWidth;
SmallVector<int, 16> newMask;
bool isSplat = true;
int SplatElt = -1;
// Create a new mask for the new ShuffleVectorInst so that the new
// ShuffleVectorInst is equivalent to the original one.
for (unsigned i = 0; i < VWidth; ++i) {
int eltMask;
if (Mask[i] < 0) {
// This element is an undef value.
eltMask = -1;
} else if (Mask[i] < (int)LHSWidth) {
// This element is from left hand side vector operand.
//
// If LHS is going to be replaced (case 1, 2, or 4), calculate the
// new mask value for the element.
if (newLHS != LHS) {
eltMask = LHSMask[Mask[i]];
// If the value selected is an undef value, explicitly specify it
// with a -1 mask value.
if (eltMask >= (int)LHSOp0Width && isa<UndefValue>(LHSOp1))
eltMask = -1;
} else
eltMask = Mask[i];
} else {
// This element is from right hand side vector operand
//
// If the value selected is an undef value, explicitly specify it
// with a -1 mask value. (case 1)
if (isa<UndefValue>(RHS))
eltMask = -1;
// If RHS is going to be replaced (case 3 or 4), calculate the
// new mask value for the element.
else if (newRHS != RHS) {
eltMask = RHSMask[Mask[i]-LHSWidth];
// If the value selected is an undef value, explicitly specify it
// with a -1 mask value.
if (eltMask >= (int)RHSOp0Width) {
assert(isa<UndefValue>(RHSShuffle->getOperand(1))
&& "should have been check above");
eltMask = -1;
}
} else
eltMask = Mask[i]-LHSWidth;
// If LHS's width is changed, shift the mask value accordingly.
// If newRHS == NULL, i.e. LHSOp0 == RHSOp0, we want to remap any
// references from RHSOp0 to LHSOp0, so we don't need to shift the mask.
// If newRHS == newLHS, we want to remap any references from newRHS to
// newLHS so that we can properly identify splats that may occur due to
// obfuscation across the two vectors.
if (eltMask >= 0 && newRHS != nullptr && newLHS != newRHS)
eltMask += newLHSWidth;
}
// Check if this could still be a splat.
if (eltMask >= 0) {
if (SplatElt >= 0 && SplatElt != eltMask)
isSplat = false;
SplatElt = eltMask;
}
newMask.push_back(eltMask);
}
// If the result mask is equal to one of the original shuffle masks,
// or is a splat, do the replacement.
//
// XXX EMSCRIPTEN: Add '|| true' so that we always do the replacement.
// We're targetting SIMD.js, so there's less of an expectation that a
// particular shuffle mask will always map onto a particular instruction on
// a particular ISA because we aren't targetting a particular ISA (what the
// JS engine does is another story). We may wish to re-evaluate this choice
// as we move on to higher-element-count vectors, but especially for now this
// is quite desirable.
if (isSplat || newMask == LHSMask || newMask == RHSMask || newMask == Mask ||
true)
{
SmallVector<Constant*, 16> Elts;
for (unsigned i = 0, e = newMask.size(); i != e; ++i) {
if (newMask[i] < 0) {
Elts.push_back(UndefValue::get(Int32Ty));
} else {
Elts.push_back(ConstantInt::get(Int32Ty, newMask[i]));
}
}
if (!newRHS)
newRHS = UndefValue::get(newLHS->getType());
return new ShuffleVectorInst(newLHS, newRHS, ConstantVector::get(Elts));
}
// If the result mask is an identity, replace uses of this instruction with
// corresponding argument.
bool isLHSID, isRHSID;
RecognizeIdentityMask(newMask, isLHSID, isRHSID);
if (isLHSID && VWidth == LHSOp0Width) return ReplaceInstUsesWith(SVI, newLHS);
if (isRHSID && VWidth == RHSOp0Width) return ReplaceInstUsesWith(SVI, newRHS);
return MadeChange ? &SVI : nullptr;
}
| slightperturbation/Cobalt | ext/emsdk_portable/clang/tag-e1.34.1/src/lib/Transforms/InstCombine/InstCombineVectorOps.cpp | C++ | apache-2.0 | 49,354 |
package jpaint.model.bean;
import java.awt.Color;
import java.awt.Graphics;
import java.util.Date;
public abstract class Figura {
private int pkfigura = -1;
private int x;
private int y;
private int largura;
private int altura;
private int tipo;
private Color colorBorda;
private Color colorInternal;
/**
* construtor da classe abstrata figura que recebe todos seus argumentos
* como parametro
*
* @param x
* @param y
* @param largura
* @param altura
* @param tipo
* @param colorBorda
* @param colorInternal
*/
public Figura(int x, int y, int largura, int altura, int tipo, Color colorBorda, Color colorInternal) {
setX(x);
setY(y);
setLargura(largura);
setAltura(altura);
setTipo(tipo);
setColorBorda(colorBorda);
setColorInternal(colorInternal);
}
public int getX() {
return x;
}
public void setX(int x) {
if (x >= 0) {
this.x = x;
} else {
throw new RuntimeException("Valor inválido");
}
}
public int getY() {
return y;
}
public void setY(int y) {
this.y = y;
}
public int getLargura() {
return largura;
}
/**
* esse metodo trata corrige o problema da figura não ser desenhada
* corretamente na view
*
* @param largura
*/
public void setLargura(int largura) {
if (largura < 0) {
largura *= -1;
x -= largura;
}
this.largura = largura;
}
public int getAltura() {
return altura;
}
/**
* esse metodo trata corrige o problema da figura não ser desenhada
* corretamente na view
*
* @param altura
*/
public void setAltura(int altura) {
if (altura < 0) {
altura *= -1;
y -= altura;
}
this.altura = altura;
}
public void setTipo(int tipo) {
if (tipo >= 0 && tipo <= 4) {
this.tipo = tipo;
} else {
this.tipo = 0;
}
}
public int getTipo() {
return tipo;
}
public int getPkfigura() {
return pkfigura;
}
public void setPkfigura(int pkfigura) {
this.pkfigura = pkfigura;
}
public Color getColorBorda() {
return colorBorda;
}
public void setColorBorda(Color colorBorda) {
this.colorBorda = colorBorda;
}
public Color getColorInternal() {
return colorInternal;
}
public void setColorInternal(Color colorInternal) {
this.colorInternal = colorInternal;
}
/**
* metodo de modelo padrão para todas classe filhas que pinta a borda dos
* desenhos
*
* @param g
*/
public abstract void desenheMe(Graphics g);
/**
* metodo usado para calcular altura de uma figura
*
* @param y0
* @param y1
* @return altura (y1 , y0)
*/
public static int calcAltura(int y0, int y1) {
return y1 - y0;
}
/**
* metodo usado para calcular a largura de uma figura
*
* @param x0
* @param x1
* @return largura(x1 - x0)
*/
public static int calcLargura(int x0, int x1) {
return x1 - x0;
}
/**
* metodos foi resecrito para que possa ser visualizado as cordenadas dos
* pixeis da figura em tempo de execulção
*
* @return cordenadas da figura
*/
@Override
public String toString() {
return "Figura{" + "pkfigura=" + pkfigura
+ ", x=" + x + ", y=" + y + ", largura="
+ largura + ", altura=" + altura + ", tipo="
+ tipo + ", colorBorda=" + colorBorda
+ ", colorInternal=" + colorInternal + '}';
}
}
| Pompeu/Jpaint | src/jpaint/model/bean/Figura.java | Java | apache-2.0 | 4,031 |
package com.ax.demo.entity;
import javax.validation.constraints.Min;
/**
* A Hipster. Used as entity in this demo application.
*/
public class Hipster {
public enum JeansType {
SKINNY, SUPERSKINNY;
}
@Min(value = 0, message = "Id must be positive")
private int id;
private String name;
private JeansType jeans;
private boolean hornRimmedGlasses;
private String imagePath = "";
public Hipster() {
}
public Hipster(int id, String name, JeansType jeans,
boolean hornRimmedGlasses, String imagePath) {
super();
this.id = id;
this.name = name;
this.jeans = jeans;
this.hornRimmedGlasses = hornRimmedGlasses;
this.imagePath = imagePath;
}
public int getId() {
return id;
}
public String getName() {
return name;
}
public JeansType getJeans() {
return jeans;
}
public boolean isHornRimmedGlasses() {
return hornRimmedGlasses;
}
public void setImagePath(final String imagePath) {
this.imagePath = imagePath;
}
public String getImagePath() {
return imagePath;
}
@Override
public String toString() {
return "Hipster [id=" + id + ", name=" + name + ", jeans=" + jeans
+ ", hornRimmedGlasses=" + hornRimmedGlasses + ", imagePath="
+ imagePath + "]";
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (hornRimmedGlasses ? 1231 : 1237);
result = prime * result + id;
result = prime * result
+ ((imagePath == null) ? 0 : imagePath.hashCode());
result = prime * result + ((jeans == null) ? 0 : jeans.hashCode());
result = prime * result + ((name == null) ? 0 : name.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Hipster other = (Hipster) obj;
if (hornRimmedGlasses != other.hornRimmedGlasses)
return false;
if (id != other.id)
return false;
if (imagePath == null) {
if (other.imagePath != null)
return false;
} else if (!imagePath.equals(other.imagePath))
return false;
if (jeans != other.jeans)
return false;
if (name == null) {
if (other.name != null)
return false;
} else if (!name.equals(other.name))
return false;
return true;
}
}
| fexbraun/hipster-o-mat | src/main/java/com/ax/demo/entity/Hipster.java | Java | apache-2.0 | 2,283 |
/*
* Copyright 2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.buildinit.plugins.internal;
import org.gradle.util.GUtil;
public class ModuleNameBuilder {
public static String toModuleName(String name) {
return GUtil.toCamelCase(name);
}
}
| robinverduijn/gradle | subprojects/build-init/src/main/java/org/gradle/buildinit/plugins/internal/ModuleNameBuilder.java | Java | apache-2.0 | 830 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.*;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockStoragePolicySpi;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.blockmanagement.*;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.junit.Assert;
import org.junit.Test;
/** Test {@link BlockStoragePolicy} */
public class TestBlockStoragePolicy {
public static final BlockStoragePolicySuite POLICY_SUITE;
public static final BlockStoragePolicy DEFAULT_STORAGE_POLICY;
public static final Configuration conf;
static {
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
POLICY_SUITE = BlockStoragePolicySuite.createDefaultSuite();
DEFAULT_STORAGE_POLICY = POLICY_SUITE.getDefaultPolicy();
}
static final EnumSet<StorageType> none = EnumSet.noneOf(StorageType.class);
static final EnumSet<StorageType> archive = EnumSet.of(StorageType.ARCHIVE);
static final EnumSet<StorageType> disk = EnumSet.of(StorageType.DISK);
static final EnumSet<StorageType> ssd = EnumSet.of(StorageType.SSD);
static final EnumSet<StorageType> disk_archive = EnumSet.of(StorageType.DISK,
StorageType.ARCHIVE);
static final EnumSet<StorageType> all = EnumSet.of(StorageType.SSD,
StorageType.DISK, StorageType.ARCHIVE);
static final long FILE_LEN = 1024;
static final short REPLICATION = 3;
static final byte COLD = HdfsConstants.COLD_STORAGE_POLICY_ID;
static final byte WARM = HdfsConstants.WARM_STORAGE_POLICY_ID;
static final byte HOT = HdfsConstants.HOT_STORAGE_POLICY_ID;
static final byte ONESSD = HdfsConstants.ONESSD_STORAGE_POLICY_ID;
static final byte ALLSSD = HdfsConstants.ALLSSD_STORAGE_POLICY_ID;
static final byte LAZY_PERSIST = HdfsConstants.MEMORY_STORAGE_POLICY_ID;
@Test (timeout=300000)
public void testConfigKeyEnabled() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(1).build();
try {
cluster.waitActive();
cluster.getFileSystem().setStoragePolicy(new Path("/"),
HdfsConstants.COLD_STORAGE_POLICY_NAME);
} finally {
cluster.shutdown();
}
}
/**
* Ensure that setStoragePolicy throws IOException when
* dfs.storage.policy.enabled is set to false.
* @throws IOException
*/
@Test (timeout=300000, expected=IOException.class)
public void testConfigKeyDisabled() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY, false);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(1).build();
try {
cluster.waitActive();
cluster.getFileSystem().setStoragePolicy(new Path("/"),
HdfsConstants.COLD_STORAGE_POLICY_NAME);
} finally {
cluster.shutdown();
}
}
@Test
public void testDefaultPolicies() {
final Map<Byte, String> expectedPolicyStrings = new HashMap<Byte, String>();
expectedPolicyStrings.put(COLD,
"BlockStoragePolicy{COLD:" + COLD + ", storageTypes=[ARCHIVE], " +
"creationFallbacks=[], replicationFallbacks=[]}");
expectedPolicyStrings.put(WARM,
"BlockStoragePolicy{WARM:" + WARM + ", storageTypes=[DISK, ARCHIVE], " +
"creationFallbacks=[DISK, ARCHIVE], " +
"replicationFallbacks=[DISK, ARCHIVE]}");
expectedPolicyStrings.put(HOT,
"BlockStoragePolicy{HOT:" + HOT + ", storageTypes=[DISK], " +
"creationFallbacks=[], replicationFallbacks=[ARCHIVE]}");
expectedPolicyStrings.put(ONESSD, "BlockStoragePolicy{ONE_SSD:" + ONESSD +
", storageTypes=[SSD, DISK], creationFallbacks=[SSD, DISK], " +
"replicationFallbacks=[SSD, DISK]}");
expectedPolicyStrings.put(ALLSSD, "BlockStoragePolicy{ALL_SSD:" + ALLSSD +
", storageTypes=[SSD], creationFallbacks=[DISK], " +
"replicationFallbacks=[DISK]}");
expectedPolicyStrings.put(LAZY_PERSIST,
"BlockStoragePolicy{LAZY_PERSIST:" + LAZY_PERSIST + ", storageTypes=[RAM_DISK, DISK], " +
"creationFallbacks=[DISK], replicationFallbacks=[DISK]}");
for(byte i = 1; i < 16; i++) {
final BlockStoragePolicy policy = POLICY_SUITE.getPolicy(i);
if (policy != null) {
final String s = policy.toString();
Assert.assertEquals(expectedPolicyStrings.get(i), s);
}
}
Assert.assertEquals(POLICY_SUITE.getPolicy(HOT), POLICY_SUITE.getDefaultPolicy());
{ // check Cold policy
final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
for(short replication = 1; replication < 6; replication++) {
final List<StorageType> computed = cold.chooseStorageTypes(replication);
assertStorageType(computed, replication, StorageType.ARCHIVE);
}
assertCreationFallback(cold, null, null, null, null, null);
assertReplicationFallback(cold, null, null, null, null);
}
{ // check Warm policy
final BlockStoragePolicy warm = POLICY_SUITE.getPolicy(WARM);
for(short replication = 1; replication < 6; replication++) {
final List<StorageType> computed = warm.chooseStorageTypes(replication);
assertStorageType(computed, replication, StorageType.DISK, StorageType.ARCHIVE);
}
assertCreationFallback(warm, StorageType.DISK, StorageType.DISK,
StorageType.ARCHIVE, StorageType.DISK, null);
assertReplicationFallback(warm, StorageType.DISK, StorageType.DISK,
StorageType.ARCHIVE, StorageType.DISK);
}
{ // check Hot policy
final BlockStoragePolicy hot = POLICY_SUITE.getPolicy(HOT);
for(short replication = 1; replication < 6; replication++) {
final List<StorageType> computed = hot.chooseStorageTypes(replication);
assertStorageType(computed, replication, StorageType.DISK);
}
assertCreationFallback(hot, null, null, null, null, null);
assertReplicationFallback(hot, StorageType.ARCHIVE, null,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{ // check ONE_SSD policy
final BlockStoragePolicy onessd = POLICY_SUITE.getPolicy(ONESSD);
for (short replication = 1; replication < 6; replication++) {
final List<StorageType> computed = onessd
.chooseStorageTypes(replication);
assertStorageType(computed, replication, StorageType.SSD,
StorageType.DISK);
}
assertCreationFallback(onessd, StorageType.SSD, StorageType.SSD,
StorageType.SSD, StorageType.DISK, StorageType.SSD);
assertReplicationFallback(onessd, StorageType.SSD, StorageType.SSD,
StorageType.SSD, StorageType.DISK);
}
{ // check ALL_SSD policy
final BlockStoragePolicy allssd = POLICY_SUITE.getPolicy(ALLSSD);
for (short replication = 1; replication < 6; replication++) {
final List<StorageType> computed = allssd
.chooseStorageTypes(replication);
assertStorageType(computed, replication, StorageType.SSD);
}
assertCreationFallback(allssd, StorageType.DISK, StorageType.DISK, null,
StorageType.DISK, null);
assertReplicationFallback(allssd, StorageType.DISK, StorageType.DISK,
null, StorageType.DISK);
}
{ // check LAZY_PERSIST policy
final BlockStoragePolicy lazyPersist = POLICY_SUITE
.getPolicy(LAZY_PERSIST);
for (short replication = 1; replication < 6; replication++) {
final List<StorageType> computed = lazyPersist
.chooseStorageTypes(replication);
assertStorageType(computed, replication, StorageType.DISK);
}
assertCreationFallback(lazyPersist, StorageType.DISK, StorageType.DISK,
null, StorageType.DISK, null);
assertReplicationFallback(lazyPersist, StorageType.DISK,
StorageType.DISK, null, StorageType.DISK);
}
}
static StorageType[] newStorageTypes(int nDisk, int nArchive) {
final StorageType[] t = new StorageType[nDisk + nArchive];
Arrays.fill(t, 0, nDisk, StorageType.DISK);
Arrays.fill(t, nDisk, t.length, StorageType.ARCHIVE);
return t;
}
static List<StorageType> asList(int nDisk, int nArchive) {
return Arrays.asList(newStorageTypes(nDisk, nArchive));
}
static void assertStorageType(List<StorageType> computed, short replication,
StorageType... answers) {
Assert.assertEquals(replication, computed.size());
final StorageType last = answers[answers.length - 1];
for(int i = 0; i < computed.size(); i++) {
final StorageType expected = i < answers.length? answers[i]: last;
Assert.assertEquals(expected, computed.get(i));
}
}
static void assertCreationFallback(BlockStoragePolicy policy,
StorageType noneExpected, StorageType archiveExpected,
StorageType diskExpected, StorageType ssdExpected,
StorageType disk_archiveExpected) {
Assert.assertEquals(noneExpected, policy.getCreationFallback(none));
Assert.assertEquals(archiveExpected, policy.getCreationFallback(archive));
Assert.assertEquals(diskExpected, policy.getCreationFallback(disk));
Assert.assertEquals(ssdExpected, policy.getCreationFallback(ssd));
Assert.assertEquals(disk_archiveExpected,
policy.getCreationFallback(disk_archive));
Assert.assertEquals(null, policy.getCreationFallback(all));
}
static void assertReplicationFallback(BlockStoragePolicy policy,
StorageType noneExpected, StorageType archiveExpected,
StorageType diskExpected, StorageType ssdExpected) {
Assert.assertEquals(noneExpected, policy.getReplicationFallback(none));
Assert
.assertEquals(archiveExpected, policy.getReplicationFallback(archive));
Assert.assertEquals(diskExpected, policy.getReplicationFallback(disk));
Assert.assertEquals(ssdExpected, policy.getReplicationFallback(ssd));
Assert.assertEquals(null, policy.getReplicationFallback(all));
}
private static interface CheckChooseStorageTypes {
public void checkChooseStorageTypes(BlockStoragePolicy p, short replication,
List<StorageType> chosen, StorageType... expected);
/** Basic case: pass only replication and chosen */
static final CheckChooseStorageTypes Basic = new CheckChooseStorageTypes() {
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p, short replication,
List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication, chosen);
assertStorageTypes(types, expected);
}
};
/** With empty unavailables and isNewBlock=true */
static final CheckChooseStorageTypes EmptyUnavailablesAndNewBlock
= new CheckChooseStorageTypes() {
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p,
short replication, List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication,
chosen, none, true);
assertStorageTypes(types, expected);
}
};
/** With empty unavailables and isNewBlock=false */
static final CheckChooseStorageTypes EmptyUnavailablesAndNonNewBlock
= new CheckChooseStorageTypes() {
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p,
short replication, List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication,
chosen, none, false);
assertStorageTypes(types, expected);
}
};
/** With both DISK and ARCHIVE unavailables and isNewBlock=true */
static final CheckChooseStorageTypes BothUnavailableAndNewBlock
= new CheckChooseStorageTypes() {
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p,
short replication, List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication,
chosen, disk_archive, true);
assertStorageTypes(types, expected);
}
};
/** With both DISK and ARCHIVE unavailable and isNewBlock=false */
static final CheckChooseStorageTypes BothUnavailableAndNonNewBlock
= new CheckChooseStorageTypes() {
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p,
short replication, List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication,
chosen, disk_archive, false);
assertStorageTypes(types, expected);
}
};
/** With ARCHIVE unavailable and isNewBlock=true */
static final CheckChooseStorageTypes ArchivalUnavailableAndNewBlock
= new CheckChooseStorageTypes() {
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p,
short replication, List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication,
chosen, archive, true);
assertStorageTypes(types, expected);
}
};
/** With ARCHIVE unavailable and isNewBlock=true */
static final CheckChooseStorageTypes ArchivalUnavailableAndNonNewBlock
= new CheckChooseStorageTypes() {
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p,
short replication, List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication,
chosen, archive, false);
assertStorageTypes(types, expected);
}
};
}
@Test
public void testChooseStorageTypes() {
run(CheckChooseStorageTypes.Basic);
run(CheckChooseStorageTypes.EmptyUnavailablesAndNewBlock);
run(CheckChooseStorageTypes.EmptyUnavailablesAndNonNewBlock);
}
private static void run(CheckChooseStorageTypes method) {
final BlockStoragePolicy hot = POLICY_SUITE.getPolicy(HOT);
final BlockStoragePolicy warm = POLICY_SUITE.getPolicy(WARM);
final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
final short replication = 3;
{
final List<StorageType> chosen = Lists.newArrayList();
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK, StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.DISK);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK, StorageType.ARCHIVE);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.ARCHIVE);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(hot, replication, chosen);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.ARCHIVE);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen);
method.checkChooseStorageTypes(cold, replication, chosen,
StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
}
@Test
public void testChooseStorageTypesWithBothUnavailable() {
runWithBothUnavailable(CheckChooseStorageTypes.BothUnavailableAndNewBlock);
runWithBothUnavailable(CheckChooseStorageTypes.BothUnavailableAndNonNewBlock);
}
private static void runWithBothUnavailable(CheckChooseStorageTypes method) {
final BlockStoragePolicy hot = POLICY_SUITE.getPolicy(HOT);
final BlockStoragePolicy warm = POLICY_SUITE.getPolicy(WARM);
final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
final short replication = 3;
for(int n = 0; n <= 3; n++) {
for(int d = 0; d <= n; d++) {
final int a = n - d;
final List<StorageType> chosen = asList(d, a);
method.checkChooseStorageTypes(hot, replication, chosen);
method.checkChooseStorageTypes(warm, replication, chosen);
method.checkChooseStorageTypes(cold, replication, chosen);
}
}
}
@Test
public void testChooseStorageTypesWithDiskUnavailableAndNewBlock() {
final BlockStoragePolicy hot = POLICY_SUITE.getPolicy(HOT);
final BlockStoragePolicy warm = POLICY_SUITE.getPolicy(WARM);
final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
final short replication = 3;
final EnumSet<StorageType> unavailables = disk;
final boolean isNewBlock = true;
{
final List<StorageType> chosen = Lists.newArrayList();
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.DISK);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.DISK);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock);
}
}
@Test
public void testChooseStorageTypesWithArchiveUnavailable() {
runWithArchiveUnavailable(CheckChooseStorageTypes.ArchivalUnavailableAndNewBlock);
runWithArchiveUnavailable(CheckChooseStorageTypes.ArchivalUnavailableAndNonNewBlock);
}
private static void runWithArchiveUnavailable(CheckChooseStorageTypes method) {
final BlockStoragePolicy hot = POLICY_SUITE.getPolicy(HOT);
final BlockStoragePolicy warm = POLICY_SUITE.getPolicy(WARM);
final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
final short replication = 3;
{
final List<StorageType> chosen = Lists.newArrayList();
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.DISK);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(hot, replication, chosen);
method.checkChooseStorageTypes(warm, replication, chosen);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen);
method.checkChooseStorageTypes(cold, replication, chosen);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
method.checkChooseStorageTypes(hot, replication, chosen,
StorageType.DISK, StorageType.DISK, StorageType.DISK);
method.checkChooseStorageTypes(warm, replication, chosen,
StorageType.DISK);
method.checkChooseStorageTypes(cold, replication, chosen);
}
}
@Test
public void testChooseStorageTypesWithDiskUnavailableAndNonNewBlock() {
final BlockStoragePolicy hot = POLICY_SUITE.getPolicy(HOT);
final BlockStoragePolicy warm = POLICY_SUITE.getPolicy(WARM);
final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
final short replication = 3;
final EnumSet<StorageType> unavailables = disk;
final boolean isNewBlock = false;
{
final List<StorageType> chosen = Lists.newArrayList();
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.DISK);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.DISK);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.DISK, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE, StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.DISK, StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock,
StorageType.ARCHIVE);
}
{
final List<StorageType> chosen = Arrays.asList(
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE);
checkChooseStorageTypes(hot, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(warm, replication, chosen, unavailables, isNewBlock);
checkChooseStorageTypes(cold, replication, chosen, unavailables, isNewBlock);
}
}
static void checkChooseStorageTypes(BlockStoragePolicy p, short replication,
List<StorageType> chosen, EnumSet<StorageType> unavailables,
boolean isNewBlock, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication, chosen,
unavailables, isNewBlock);
assertStorageTypes(types, expected);
}
static void assertStorageTypes(List<StorageType> computed, StorageType... expected) {
assertStorageTypes(computed.toArray(StorageType.EMPTY_ARRAY), expected);
}
static void assertStorageTypes(StorageType[] computed, StorageType... expected) {
Arrays.sort(expected);
Arrays.sort(computed);
Assert.assertArrayEquals(expected, computed);
}
@Test
public void testChooseExcess() {
final BlockStoragePolicy hot = POLICY_SUITE.getPolicy(HOT);
final BlockStoragePolicy warm = POLICY_SUITE.getPolicy(WARM);
final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
final short replication = 3;
for(int n = 0; n <= 6; n++) {
for(int d = 0; d <= n; d++) {
final int a = n - d;
final List<StorageType> chosen = asList(d, a);
{
final int nDisk = Math.max(0, d - replication);
final int nArchive = a;
final StorageType[] expected = newStorageTypes(nDisk, nArchive);
checkChooseExcess(hot, replication, chosen, expected);
}
{
final int nDisk = Math.max(0, d - 1);
final int nArchive = Math.max(0, a - replication + 1);
final StorageType[] expected = newStorageTypes(nDisk, nArchive);
checkChooseExcess(warm, replication, chosen, expected);
}
{
final int nDisk = d;
final int nArchive = Math.max(0, a - replication );
final StorageType[] expected = newStorageTypes(nDisk, nArchive);
checkChooseExcess(cold, replication, chosen, expected);
}
}
}
}
static void checkChooseExcess(BlockStoragePolicy p, short replication,
List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseExcess(replication, chosen);
assertStorageTypes(types, expected);
}
private void checkDirectoryListing(HdfsFileStatus[] stats, byte... policies) {
Assert.assertEquals(stats.length, policies.length);
for (int i = 0; i < stats.length; i++) {
Assert.assertEquals(stats[i].getStoragePolicy(), policies[i]);
}
}
@Test
public void testSetStoragePolicy() throws Exception {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION).build();
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
try {
final Path dir = new Path("/testSetStoragePolicy");
final Path fooFile = new Path(dir, "foo");
final Path barDir = new Path(dir, "bar");
final Path barFile1= new Path(barDir, "f1");
final Path barFile2= new Path(barDir, "f2");
DFSTestUtil.createFile(fs, fooFile, FILE_LEN, REPLICATION, 0L);
DFSTestUtil.createFile(fs, barFile1, FILE_LEN, REPLICATION, 0L);
DFSTestUtil.createFile(fs, barFile2, FILE_LEN, REPLICATION, 0L);
final String invalidPolicyName = "INVALID-POLICY";
try {
fs.setStoragePolicy(fooFile, invalidPolicyName);
Assert.fail("Should throw a HadoopIllegalArgumentException");
} catch (RemoteException e) {
GenericTestUtils.assertExceptionContains(invalidPolicyName, e);
}
// check storage policy
HdfsFileStatus[] dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
HdfsFileStatus[] barList = fs.getClient().listPaths(barDir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(dirList, BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
checkDirectoryListing(barList, BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
final Path invalidPath = new Path("/invalidPath");
try {
fs.setStoragePolicy(invalidPath, HdfsConstants.WARM_STORAGE_POLICY_NAME);
Assert.fail("Should throw a FileNotFoundException");
} catch (FileNotFoundException e) {
GenericTestUtils.assertExceptionContains(invalidPath.toString(), e);
}
try {
fs.getStoragePolicy(invalidPath);
Assert.fail("Should throw a FileNotFoundException");
} catch (FileNotFoundException e) {
GenericTestUtils.assertExceptionContains(invalidPath.toString(), e);
}
fs.setStoragePolicy(fooFile, HdfsConstants.COLD_STORAGE_POLICY_NAME);
fs.setStoragePolicy(barDir, HdfsConstants.WARM_STORAGE_POLICY_NAME);
fs.setStoragePolicy(barFile2, HdfsConstants.HOT_STORAGE_POLICY_NAME);
Assert.assertEquals("File storage policy should be COLD",
HdfsConstants.COLD_STORAGE_POLICY_NAME,
fs.getStoragePolicy(fooFile).getName());
Assert.assertEquals("File storage policy should be WARM",
HdfsConstants.WARM_STORAGE_POLICY_NAME,
fs.getStoragePolicy(barDir).getName());
Assert.assertEquals("File storage policy should be HOT",
HdfsConstants.HOT_STORAGE_POLICY_NAME,
fs.getStoragePolicy(barFile2).getName());
dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
barList = fs.getClient().listPaths(barDir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
checkDirectoryListing(dirList, WARM, COLD); // bar is warm, foo is cold
checkDirectoryListing(barList, WARM, HOT);
// restart namenode to make sure the editlog is correct
cluster.restartNameNode(true);
dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
barList = fs.getClient().listPaths(barDir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(dirList, WARM, COLD); // bar is warm, foo is cold
checkDirectoryListing(barList, WARM, HOT);
// restart namenode with checkpoint to make sure the fsimage is correct
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNode(true);
dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
barList = fs.getClient().listPaths(barDir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
checkDirectoryListing(dirList, WARM, COLD); // bar is warm, foo is cold
checkDirectoryListing(barList, WARM, HOT);
} finally {
cluster.shutdown();
}
}
@Test
public void testGetStoragePolicy() throws Exception {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION).build();
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
try {
final Path dir = new Path("/testGetStoragePolicy");
final Path fooFile = new Path(dir, "foo");
DFSTestUtil.createFile(fs, fooFile, FILE_LEN, REPLICATION, 0L);
DFSClient client = new DFSClient(cluster.getNameNode(0)
.getNameNodeAddress(), conf);
client.setStoragePolicy("/testGetStoragePolicy/foo",
HdfsConstants.COLD_STORAGE_POLICY_NAME);
String policyName = client.getStoragePolicy("/testGetStoragePolicy/foo")
.getName();
Assert.assertEquals("File storage policy should be COLD",
HdfsConstants.COLD_STORAGE_POLICY_NAME, policyName);
} finally {
cluster.shutdown();
}
}
@Test
public void testSetStoragePolicyWithSnapshot() throws Exception {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION).build();
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
try {
final Path dir = new Path("/testSetStoragePolicyWithSnapshot");
final Path fooDir = new Path(dir, "foo");
final Path fooFile1= new Path(fooDir, "f1");
final Path fooFile2= new Path(fooDir, "f2");
DFSTestUtil.createFile(fs, fooFile1, FILE_LEN, REPLICATION, 0L);
DFSTestUtil.createFile(fs, fooFile2, FILE_LEN, REPLICATION, 0L);
fs.setStoragePolicy(fooDir, HdfsConstants.WARM_STORAGE_POLICY_NAME);
HdfsFileStatus[] dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(dirList, WARM);
HdfsFileStatus[] fooList = fs.getClient().listPaths(fooDir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(fooList, WARM, WARM);
// take snapshot
SnapshotTestHelper.createSnapshot(fs, dir, "s1");
// change the storage policy of fooFile1
fs.setStoragePolicy(fooFile1, HdfsConstants.COLD_STORAGE_POLICY_NAME);
fooList = fs.getClient().listPaths(fooDir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
checkDirectoryListing(fooList, COLD, WARM);
// check the policy for /dir/.snapshot/s1/foo/f1. Note we always return
// the latest storage policy for a file/directory.
Path s1f1 = SnapshotTestHelper.getSnapshotPath(dir, "s1", "foo/f1");
DirectoryListing f1Listing = fs.getClient().listPaths(s1f1.toString(),
HdfsFileStatus.EMPTY_NAME);
checkDirectoryListing(f1Listing.getPartialListing(), COLD);
// delete f1
fs.delete(fooFile1, true);
fooList = fs.getClient().listPaths(fooDir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
checkDirectoryListing(fooList, WARM);
// check the policy for /dir/.snapshot/s1/foo/f1 again after the deletion
checkDirectoryListing(fs.getClient().listPaths(s1f1.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing(), COLD);
// change the storage policy of foo dir
fs.setStoragePolicy(fooDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
// /dir/foo is now hot
dirList = fs.getClient().listPaths(dir.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(dirList, HOT);
// /dir/foo/f2 is hot
fooList = fs.getClient().listPaths(fooDir.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing();
checkDirectoryListing(fooList, HOT);
// check storage policy of snapshot path
Path s1 = SnapshotTestHelper.getSnapshotRoot(dir, "s1");
Path s1foo = SnapshotTestHelper.getSnapshotPath(dir, "s1", "foo");
checkDirectoryListing(fs.getClient().listPaths(s1.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing(), HOT);
// /dir/.snapshot/.s1/foo/f1 and /dir/.snapshot/.s1/foo/f2 should still
// follow the latest
checkDirectoryListing(fs.getClient().listPaths(s1foo.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing(), COLD, HOT);
// delete foo
fs.delete(fooDir, true);
checkDirectoryListing(fs.getClient().listPaths(s1.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing(), HOT);
checkDirectoryListing(fs.getClient().listPaths(s1foo.toString(),
HdfsFileStatus.EMPTY_NAME).getPartialListing(), COLD, HOT);
} finally {
cluster.shutdown();
}
}
private static StorageType[][] genStorageTypes(int numDataNodes) {
StorageType[][] types = new StorageType[numDataNodes][];
for (int i = 0; i < types.length; i++) {
types[i] = new StorageType[]{StorageType.DISK, StorageType.ARCHIVE};
}
return types;
}
private void checkLocatedBlocks(HdfsLocatedFileStatus status, int blockNum,
int replicaNum, StorageType... types) {
List<StorageType> typeList = Lists.newArrayList();
Collections.addAll(typeList, types);
LocatedBlocks lbs = status.getBlockLocations();
Assert.assertEquals(blockNum, lbs.getLocatedBlocks().size());
for (LocatedBlock lb : lbs.getLocatedBlocks()) {
Assert.assertEquals(replicaNum, lb.getStorageTypes().length);
for (StorageType type : lb.getStorageTypes()) {
Assert.assertTrue(typeList.remove(type));
}
}
Assert.assertTrue(typeList.isEmpty());
}
private void testChangeFileRep(String policyName, byte policyId,
StorageType[] before,
StorageType[] after) throws Exception {
final int numDataNodes = 5;
final StorageType[][] types = genStorageTypes(numDataNodes);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDataNodes).storageTypes(types).build();
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
try {
final Path dir = new Path("/test");
fs.mkdirs(dir);
fs.setStoragePolicy(dir, policyName);
final Path foo = new Path(dir, "foo");
DFSTestUtil.createFile(fs, foo, FILE_LEN, REPLICATION, 0L);
HdfsFileStatus[] status = fs.getClient().listPaths(foo.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(status, policyId);
HdfsLocatedFileStatus fooStatus = (HdfsLocatedFileStatus) status[0];
checkLocatedBlocks(fooStatus, 1, 3, before);
// change the replication factor to 5
fs.setReplication(foo, (short) numDataNodes);
Thread.sleep(1000);
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.triggerHeartbeat(dn);
}
Thread.sleep(1000);
status = fs.getClient().listPaths(foo.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(status, policyId);
fooStatus = (HdfsLocatedFileStatus) status[0];
checkLocatedBlocks(fooStatus, 1, numDataNodes, after);
// change the replication factor back to 3
fs.setReplication(foo, REPLICATION);
Thread.sleep(1000);
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.triggerHeartbeat(dn);
}
Thread.sleep(1000);
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.triggerBlockReport(dn);
}
Thread.sleep(1000);
status = fs.getClient().listPaths(foo.toString(),
HdfsFileStatus.EMPTY_NAME, true).getPartialListing();
checkDirectoryListing(status, policyId);
fooStatus = (HdfsLocatedFileStatus) status[0];
checkLocatedBlocks(fooStatus, 1, REPLICATION, before);
} finally {
cluster.shutdown();
}
}
/**
* Consider a File with Hot storage policy. Increase replication factor of
* that file from 3 to 5. Make sure all replications are created in DISKS.
*/
@Test
public void testChangeHotFileRep() throws Exception {
testChangeFileRep(HdfsConstants.HOT_STORAGE_POLICY_NAME, HOT,
new StorageType[]{StorageType.DISK, StorageType.DISK,
StorageType.DISK},
new StorageType[]{StorageType.DISK, StorageType.DISK, StorageType.DISK,
StorageType.DISK, StorageType.DISK});
}
/**
* Consider a File with Warm temperature. Increase replication factor of
* that file from 3 to 5. Make sure all replicas are created in DISKS
* and ARCHIVE.
*/
@Test
public void testChangeWarmRep() throws Exception {
testChangeFileRep(HdfsConstants.WARM_STORAGE_POLICY_NAME, WARM,
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE,
StorageType.ARCHIVE},
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE});
}
/**
* Consider a File with Cold temperature. Increase replication factor of
* that file from 3 to 5. Make sure all replicas are created in ARCHIVE.
*/
@Test
public void testChangeColdRep() throws Exception {
testChangeFileRep(HdfsConstants.COLD_STORAGE_POLICY_NAME, COLD,
new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,
StorageType.ARCHIVE},
new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE});
}
@Test
public void testChooseTargetWithTopology() throws Exception {
BlockStoragePolicy policy1 = new BlockStoragePolicy((byte) 9, "TEST1",
new StorageType[]{StorageType.SSD, StorageType.DISK,
StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});
BlockStoragePolicy policy2 = new BlockStoragePolicy((byte) 11, "TEST2",
new StorageType[]{StorageType.DISK, StorageType.SSD,
StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});
final String[] racks = {"/d1/r1", "/d1/r2", "/d1/r2"};
final String[] hosts = {"host1", "host2", "host3"};
final StorageType[] types = {StorageType.DISK, StorageType.SSD,
StorageType.ARCHIVE};
final DatanodeStorageInfo[] storages = DFSTestUtil
.createDatanodeStorageInfos(3, racks, hosts, types);
final DatanodeDescriptor[] dataNodes = DFSTestUtil
.toDatanodeDescriptor(storages);
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(baseDir, "name").getPath());
DFSTestUtil.formatNameNode(conf);
NameNode namenode = new NameNode(conf);
final BlockManager bm = namenode.getNamesystem().getBlockManager();
BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
for (DatanodeDescriptor datanode : dataNodes) {
cluster.add(datanode);
}
DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
new HashSet<Node>(), 0, policy1);
System.out.println(Arrays.asList(targets));
Assert.assertEquals(3, targets.length);
targets = replicator.chooseTarget("/foo", 3,
dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
new HashSet<Node>(), 0, policy2);
System.out.println(Arrays.asList(targets));
Assert.assertEquals(3, targets.length);
}
/**
* Test getting all the storage policies from the namenode
*/
@Test
public void testGetAllStoragePolicies() throws Exception {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
try {
BlockStoragePolicy[] policies = fs.getStoragePolicies();
Assert.assertEquals(6, policies.length);
Assert.assertEquals(POLICY_SUITE.getPolicy(COLD).toString(),
policies[0].toString());
Assert.assertEquals(POLICY_SUITE.getPolicy(WARM).toString(),
policies[1].toString());
Assert.assertEquals(POLICY_SUITE.getPolicy(HOT).toString(),
policies[2].toString());
} finally {
IOUtils.cleanup(null, fs);
cluster.shutdown();
}
}
@Test
public void testChooseSsdOverDisk() throws Exception {
BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1",
new StorageType[]{StorageType.SSD, StorageType.DISK,
StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});
final String[] racks = {"/d1/r1", "/d1/r1", "/d1/r1"};
final String[] hosts = {"host1", "host2", "host3"};
final StorageType[] disks = {StorageType.DISK, StorageType.DISK, StorageType.DISK};
final DatanodeStorageInfo[] diskStorages
= DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks);
final DatanodeDescriptor[] dataNodes
= DFSTestUtil.toDatanodeDescriptor(diskStorages);
for(int i = 0; i < dataNodes.length; i++) {
BlockManagerTestUtil.updateStorage(dataNodes[i],
new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL,
StorageType.SSD));
}
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
new File(baseDir, "name").getPath());
DFSTestUtil.formatNameNode(conf);
NameNode namenode = new NameNode(conf);
final BlockManager bm = namenode.getNamesystem().getBlockManager();
BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
for (DatanodeDescriptor datanode : dataNodes) {
cluster.add(datanode);
}
DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
new HashSet<Node>(), 0, policy);
System.out.println(policy.getName() + ": " + Arrays.asList(targets));
Assert.assertEquals(2, targets.length);
Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
}
@Test
public void testGetFileStoragePolicyAfterRestartNN() throws Exception {
//HDFS8219
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION)
.storageTypes(
new StorageType[] {StorageType.DISK, StorageType.ARCHIVE})
.build();
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
try {
final String file = "/testScheduleWithinSameNode/file";
Path dir = new Path("/testScheduleWithinSameNode");
fs.mkdirs(dir);
// 2. Set Dir policy
fs.setStoragePolicy(dir, "COLD");
// 3. Create file
final FSDataOutputStream out = fs.create(new Path(file));
out.writeChars("testScheduleWithinSameNode");
out.close();
// 4. Set Dir policy
fs.setStoragePolicy(dir, "HOT");
HdfsFileStatus status = fs.getClient().getFileInfo(file);
// 5. get file policy, it should be parent policy.
Assert
.assertTrue("File storage policy should be HOT",
status.getStoragePolicy() == HOT);
// 6. restart NameNode for reloading edits logs.
cluster.restartNameNode(true);
// 7. get file policy, it should be parent policy.
status = fs.getClient().getFileInfo(file);
Assert
.assertTrue("File storage policy should be HOT",
status.getStoragePolicy() == HOT);
} finally {
cluster.shutdown();
}
}
/**
* Verify that {@link FileSystem#getAllStoragePolicies} returns all
* known storage policies for DFS.
*
* @throws IOException
*/
@Test
public void testGetAllStoragePoliciesFromFs() throws IOException {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION)
.storageTypes(
new StorageType[] {StorageType.DISK, StorageType.ARCHIVE})
.build();
try {
cluster.waitActive();
// Get policies via {@link FileSystem#getAllStoragePolicies}
Set<String> policyNamesSet1 = new HashSet<>();
for (BlockStoragePolicySpi policy :
cluster.getFileSystem().getAllStoragePolicies()) {
policyNamesSet1.add(policy.getName());
}
// Get policies from the default BlockStoragePolicySuite.
BlockStoragePolicySuite suite = BlockStoragePolicySuite.createDefaultSuite();
Set<String> policyNamesSet2 = new HashSet<>();
for (BlockStoragePolicy policy : suite.getAllPolicies()) {
policyNamesSet2.add(policy.getName());
}
// Ensure that we got the same set of policies in both cases.
Assert.assertTrue(Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
Assert.assertTrue(Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
} finally {
cluster.shutdown();
}
}
@Test
public void testStorageType() {
final EnumMap<StorageType, Integer> map = new EnumMap<>(StorageType.class);
//put storage type is reversed order
map.put(StorageType.ARCHIVE, 1);
map.put(StorageType.DISK, 1);
map.put(StorageType.SSD, 1);
map.put(StorageType.RAM_DISK, 1);
{
final Iterator<StorageType> i = map.keySet().iterator();
Assert.assertEquals(StorageType.RAM_DISK, i.next());
Assert.assertEquals(StorageType.SSD, i.next());
Assert.assertEquals(StorageType.DISK, i.next());
Assert.assertEquals(StorageType.ARCHIVE, i.next());
}
{
final Iterator<Map.Entry<StorageType, Integer>> i
= map.entrySet().iterator();
Assert.assertEquals(StorageType.RAM_DISK, i.next().getKey());
Assert.assertEquals(StorageType.SSD, i.next().getKey());
Assert.assertEquals(StorageType.DISK, i.next().getKey());
Assert.assertEquals(StorageType.ARCHIVE, i.next().getKey());
}
}
}
| Microsoft-CISL/hadoop-prototype | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java | Java | apache-2.0 | 61,467 |
/**
* Copyright (C) 2011 rwitzel75@googlemail.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.rwitzel.streamflyer.regex.addons.util;
import java.util.regex.Pattern;
/**
* This class supports the creation of regular expressions with embedded flag expressions.
*
* @author rwoo
* @since 1.1.0
*/
public class EmbeddedFlagUtil {
/**
* Creates a regular expression with an embedded flag expression.
* <p>
* Supports all flags of JDK7 {@link java.util.regex.Pattern}, i.e. the following flags:
* <ul>
* <li>{@link Pattern#CASE_INSENSITIVE}
* <li>{@link Pattern#UNIX_LINES}
* <li>{@link Pattern#MULTILINE}
* <li>{@link Pattern#DOTALL}
* <li>{@link Pattern#UNICODE_CASE}
* <li>{@link Pattern#COMMENTS}
* </ul>
* <p>
* EXAMPLE:
* <ul>
* <li>For <code>("abc", Pattern.CASE_INSENSITIVE ^ Pattern.MULTILINE)</code> the method returns <code>
* "(?im:abc)"</code>.</li>
* <li>For <code>("abc", 0)</code> the method returns <code>"abc"</code>.</li>
* </ul>
*
* @param regex
* @param flags
* @return Returns the given regex enriched with an embedded flag expression that represents the given flags. If
* there is no flag given, the returned regex is equal to the given regex.
*/
public String embedFlags(String regex, int flags) {
if (flags == 0) {
return regex;
} else {
return "(?" + mapFlagsToEmbeddedFlags(flags) + ":" + regex + ")";
}
}
/**
* See {@link #embedFlags(String, int)}.
*/
protected String mapFlagsToEmbeddedFlags(int flags) {
String flagsAsString = "";
if ((flags & Pattern.CASE_INSENSITIVE) != 0) {
flagsAsString += "i";
}
if ((flags & Pattern.UNIX_LINES) != 0) {
flagsAsString += "d";
}
if ((flags & Pattern.MULTILINE) != 0) {
flagsAsString += "m";
}
if ((flags & Pattern.DOTALL) != 0) {
flagsAsString += "s";
}
if ((flags & Pattern.UNICODE_CASE) != 0) {
flagsAsString += "u";
}
if ((flags & Pattern.COMMENTS) != 0) {
flagsAsString += "x";
}
return flagsAsString;
}
}
| rwitzel/streamflyer | streamflyer-core/src/main/java/com/github/rwitzel/streamflyer/regex/addons/util/EmbeddedFlagUtil.java | Java | apache-2.0 | 2,915 |
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package namer
import (
"reflect"
"testing"
"k8s.io/gengo/types"
)
func TestNameStrategy(t *testing.T) {
u := types.Universe{}
// Add some types.
base := u.Type(types.Name{Package: "foo/bar", Name: "Baz"})
base.Kind = types.Struct
tmp := u.Type(types.Name{Package: "", Name: "[]bar.Baz"})
tmp.Kind = types.Slice
tmp.Elem = base
tmp = u.Type(types.Name{Package: "", Name: "map[string]bar.Baz"})
tmp.Kind = types.Map
tmp.Key = types.String
tmp.Elem = base
tmp = u.Type(types.Name{Package: "foo/other", Name: "Baz"})
tmp.Kind = types.Struct
tmp.Members = []types.Member{{
Embedded: true,
Type: base,
}}
tmp = u.Type(types.Name{Package: "", Name: "chan Baz"})
tmp.Kind = types.Chan
tmp.Elem = base
tmp = u.Type(types.Name{Package: "", Name: "[4]Baz"})
tmp.Kind = types.Array
tmp.Elem = base
tmp.Len = 4
u.Type(types.Name{Package: "", Name: "string"})
o := Orderer{NewPublicNamer(0)}
order := o.OrderUniverse(u)
orderedNames := make([]string, len(order))
for i, t := range order {
orderedNames[i] = o.Name(t)
}
expect := []string{"Array4Baz", "Baz", "Baz", "ChanBaz", "MapStringToBaz", "SliceBaz", "String"}
if e, a := expect, orderedNames; !reflect.DeepEqual(e, a) {
t.Errorf("Wanted %#v, got %#v", e, a)
}
o = Orderer{NewRawNamer("my/package", nil)}
order = o.OrderUniverse(u)
orderedNames = make([]string, len(order))
for i, t := range order {
orderedNames[i] = o.Name(t)
}
expect = []string{"[4]bar.Baz", "[]bar.Baz", "bar.Baz", "chan bar.Baz", "map[string]bar.Baz", "other.Baz", "string"}
if e, a := expect, orderedNames; !reflect.DeepEqual(e, a) {
t.Errorf("Wanted %#v, got %#v", e, a)
}
o = Orderer{NewRawNamer("foo/bar", nil)}
order = o.OrderUniverse(u)
orderedNames = make([]string, len(order))
for i, t := range order {
orderedNames[i] = o.Name(t)
}
expect = []string{"Baz", "[4]Baz", "[]Baz", "chan Baz", "map[string]Baz", "other.Baz", "string"}
if e, a := expect, orderedNames; !reflect.DeepEqual(e, a) {
t.Errorf("Wanted %#v, got %#v", e, a)
}
o = Orderer{NewPublicNamer(1)}
order = o.OrderUniverse(u)
orderedNames = make([]string, len(order))
for i, t := range order {
orderedNames[i] = o.Name(t)
}
expect = []string{"Array4BarBaz", "BarBaz", "ChanBarBaz", "MapStringToBarBaz", "OtherBaz", "SliceBarBaz", "String"}
if e, a := expect, orderedNames; !reflect.DeepEqual(e, a) {
t.Errorf("Wanted %#v, got %#v", e, a)
}
}
| kubernetes/gengo | namer/namer_test.go | GO | apache-2.0 | 2,994 |
package controllers
import (
. "appinhouse/server/constants"
"appinhouse/server/models"
"github.com/astaxie/beego"
)
type CreateAppController struct {
BaseController
}
func (c *CreateAppController) CreateApp() {
dto := NewSuccessResponseDto()
app := c.Ctx.Input.Param(":app")
desc := c.GetString("description")
alias := c.GetString("alias")
if app == "" || len(app) > App_Name_Len || desc == "" {
beego.Info("AddApp param name error !name:", app, "desc:", desc)
c.setError4Dto(ErrorParam, dto)
return
}
has, err := models.AppDao.Exist(app)
if err != nil {
beego.Info("AddApp Exist app error !name:", app, "error:", err.Error())
c.setError4Dto(ErrorParam, dto)
return
}
if !has {
appinfo := new(models.AppInfo)
appinfo.App = app
appinfo.Description = desc
appinfo.Alias = alias
err = models.AppDao.Save(appinfo)
if err != nil {
beego.Info("AddApp save app error !name:", app, "error:", err.Error())
c.setError4Dto(err, dto)
return
}
err = models.AppListDao.Save(app)
if err != nil {
beego.Info("AddApp save applist error !name:", app, "error:", err.Error())
c.setError4Dto(err, dto)
return
}
} else {
c.setError4Dto(ErrorAppExistError, dto)
return
}
c.Data["json"] = dto
c.ServeJSON()
}
| rog2/appinhouse | server/controllers/createapp.go | GO | apache-2.0 | 1,269 |
package me.pjq.pushup.utils;
/**
* Created by pjq on 11/9/13.
*/
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import android.app.Activity;
import android.graphics.Bitmap;
import android.graphics.Rect;
import android.view.View;
import me.pjq.pushup.LocalPathResolver;
public class ScreenshotUtils {
/**
* @param pActivity
* @return bitmap
*/
public static Bitmap takeScreenShot(Activity pActivity) {
Bitmap bitmap = null;
View view = pActivity.getWindow().getDecorView();
view.setDrawingCacheEnabled(true);
view.buildDrawingCache();
bitmap = view.getDrawingCache();
Rect frame = new Rect();
view.getWindowVisibleDisplayFrame(frame);
int stautsHeight = frame.top;
//stautsHeight = 0;
int width = pActivity.getWindowManager().getDefaultDisplay().getWidth();
int height = pActivity.getWindowManager().getDefaultDisplay().getHeight();
bitmap = Bitmap.createBitmap(bitmap, 0, stautsHeight, width, height - stautsHeight);
view.setDrawingCacheEnabled(false);
return bitmap;
}
/**
* @param pBitmap
*/
private static boolean savePic(Bitmap pBitmap, String strName) {
FileOutputStream fos = null;
try {
fos = new FileOutputStream(strName);
if (null != fos) {
pBitmap.compress(Bitmap.CompressFormat.PNG, 90, fos);
fos.flush();
fos.close();
return true;
}
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
return false;
}
public static String getshotFilePath() {
String imagePath = LocalPathResolver.getCachePath("images");
File file = new File(imagePath);
if (!file.exists()) {
file.mkdirs();
}
return imagePath + System.currentTimeMillis() + ".png";
}
public static String getshotFilePathByDay() {
String imagePath = LocalPathResolver.getCachePath("images");
File file = new File(imagePath);
if (!file.exists()) {
file.mkdirs();
}
return imagePath + Utils.time2DateKey("" + System.currentTimeMillis()) + ".png";
}
/**
* @param pActivity
*/
public static boolean shotBitmap(Activity pActivity, String filePath) {
return ScreenshotUtils.savePic(takeScreenShot(pActivity), filePath);
}
public static Bitmap shotBitmap2(Activity pActivity, String filePath) {
Bitmap bitmap = takeScreenShot(pActivity);
ScreenshotUtils.savePic(bitmap, filePath);
return bitmap;
}
}
| pjq/pushup | PushUp/src/main/java/me/pjq/pushup/utils/ScreenshotUtils.java | Java | apache-2.0 | 2,814 |
/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package Gemstone;
import java.io.File;
import java.util.ArrayList;
/**
*
* @author SBANTA
* - 04/04/2012 - updated for Gemstone
*/
public class CachingUserRecord {
private static String SName = sagex.api.Global.IsClient() ? "SageDiamondTeam" + sagex.api.Global.GetUIContextName() : "SageDiamondTeam";
public static void main(String[] args) {
Object[] stores = sagex.api.UserRecordAPI.GetAllUserRecords(SName);
for (Object curr : stores) {
String[] store = sagex.api.UserRecordAPI.GetUserRecordNames(curr);
for (String currs : store) {
System.out.println("CurrentStore=" + currs);
System.out.println("Value=" + sagex.api.UserRecordAPI.GetUserRecordData(curr, currs));
}
}
}
public static void DeleteStoredLocations() {
sagex.api.UserRecordAPI.DeleteAllUserRecords(SName);
}
public static Boolean HasStoredLocation(String ID, String Type) {
Object Record = sagex.api.UserRecordAPI.GetUserRecord(SName, ID);
if (Record == null) {
return false;
}
String Curr = sagex.api.UserRecordAPI.GetUserRecordData(Record, Type);
return Curr != null && !Curr.equals("");
}
public static Boolean HasStoredLocation(String ID) {
Object Record = sagex.api.UserRecordAPI.GetUserRecord(SName, ID);
System.out.println("Recordforfanaat=" + Record);
return Record != null;
}
public static String[] GetAllStoresForID(String ID) {
Object Record = sagex.api.UserRecordAPI.GetUserRecord(SName, ID);
String[] Stores = sagex.api.UserRecordAPI.GetUserRecordNames(Record);
return Stores;
}
public static void DeleteStoresForID(String ID) {
Object Record = sagex.api.UserRecordAPI.GetUserRecord(SName, ID);
sagex.api.UserRecordAPI.DeleteUserRecord(Record);
}
public static ArrayList<File> GetAllCacheLocationsForID(String ID) {
ArrayList<File> Cached = new ArrayList<File>();
Object Record = sagex.api.UserRecordAPI.GetUserRecord(SName, ID);
String[] Stores = sagex.api.UserRecordAPI.GetUserRecordNames(Record);
for (String curr : Stores) {
Cached.add(new File(sagex.api.UserRecordAPI.GetUserRecordData(Record, curr)));
}
return Cached;
}
public static String GetStoredLocation(String ID, String Type) {
Object Record = sagex.api.UserRecordAPI.GetUserRecord(SName, ID);
return sagex.api.UserRecordAPI.GetUserRecordData(Record, Type);
}
public static void setStoredLocation(String ID, String Type, String Location) {
sagex.api.UserRecordAPI.AddUserRecord(SName, ID);
Object Record = sagex.api.UserRecordAPI.GetUserRecord(SName, ID);
sagex.api.UserRecordAPI.SetUserRecordData(Record, Type, Location);
}
public static void deleteStoredLocation(String ID, String Type, String Location) {
Object Record = sagex.api.UserRecordAPI.GetUserRecord(SName, ID);
sagex.api.UserRecordAPI.DeleteUserRecord(Record);
}
public static String[] GetStoredFanart(String ID) {
Object Record = sagex.api.UserRecordAPI.GetUserRecord(SName, ID);
return sagex.api.UserRecordAPI.GetUserRecordNames(Record);
}
}
| jusjoken/gemstone2 | src/main/java/Gemstone/CachingUserRecord.java | Java | apache-2.0 | 3,416 |
/*
*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package springfox.documentation.spring.web.readers.parameter;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Optional;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.core.Ordered;
import org.springframework.core.annotation.Order;
import org.springframework.stereotype.Component;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestHeader;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RequestPart;
import org.springframework.web.bind.annotation.ValueConstants;
import springfox.documentation.service.ResolvedMethodParameter;
import springfox.documentation.spi.DocumentationType;
import springfox.documentation.spi.service.ParameterBuilderPlugin;
import springfox.documentation.spi.service.contexts.ParameterContext;
import springfox.documentation.spring.web.DescriptionResolver;
import java.util.HashSet;
import java.util.Set;
import static com.google.common.base.Strings.*;
@Component
@Order(Ordered.HIGHEST_PRECEDENCE)
public class ParameterRequiredReader implements ParameterBuilderPlugin {
private final DescriptionResolver descriptions;
@Autowired
public ParameterRequiredReader(DescriptionResolver descriptions) {
this.descriptions = descriptions;
}
@Override
public void apply(ParameterContext context) {
ResolvedMethodParameter methodParameter = context.resolvedMethodParameter();
context.parameterBuilder().required(getAnnotatedRequired(methodParameter));
}
@Override
public boolean supports(DocumentationType delimiter) {
return true;
}
private Boolean getAnnotatedRequired(ResolvedMethodParameter methodParameter) {
Set<Boolean> requiredSet = new HashSet<Boolean>();
// when the type is Optional, the required property of @RequestParam/@RequestHeader doesn't matter,
// since the value is always a non-null Optional after conversion
boolean optional = isOptional(methodParameter);
Optional<RequestParam> requestParam = methodParameter.findAnnotation(RequestParam.class);
if (requestParam.isPresent()) {
requiredSet.add(!optional && isRequired(requestParam.get()));
}
Optional<RequestHeader> requestHeader = methodParameter.findAnnotation(RequestHeader.class);
if (requestHeader.isPresent()) {
requiredSet.add(!optional && requestHeader.get().required());
}
Optional<PathVariable> pathVariable = methodParameter.findAnnotation(PathVariable.class);
if (pathVariable.isPresent()) {
requiredSet.add(true);
}
Optional<RequestBody> requestBody = methodParameter.findAnnotation(RequestBody.class);
if (requestBody.isPresent()) {
requiredSet.add(!optional && requestBody.get().required());
}
Optional<RequestPart> requestPart = methodParameter.findAnnotation(RequestPart.class);
if (requestPart.isPresent()) {
requiredSet.add(!optional && requestPart.get().required());
}
return requiredSet.contains(true);
}
@VisibleForTesting
@SuppressWarnings("squid:S1872")
boolean isOptional(ResolvedMethodParameter methodParameter) {
return "java.util.Optional".equals(methodParameter.getParameterType().getErasedType().getName());
}
private boolean isRequired(RequestParam annotation) {
String defaultValue = descriptions.resolve(annotation.defaultValue());
boolean missingDefaultValue = ValueConstants.DEFAULT_NONE.equals(defaultValue) ||
isNullOrEmpty(defaultValue);
return annotation.required() && missingDefaultValue;
}
}
| yelhouti/springfox | springfox-spring-web/src/main/java/springfox/documentation/spring/web/readers/parameter/ParameterRequiredReader.java | Java | apache-2.0 | 4,326 |
package gex.newsml.nitf;
import lombok.ToString;
import java.util.HashMap;
import java.util.Map;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlAnyAttribute;
import javax.xml.bind.annotation.XmlAttribute;
import javax.xml.bind.annotation.XmlID;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlSchemaType;
import javax.xml.bind.annotation.XmlType;
import javax.xml.bind.annotation.adapters.CollapsedStringAdapter;
import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter;
import javax.xml.namespace.QName;
/**
* <p>
* Java class for anonymous complex type.
*
* <p>
* The following schema fragment specifies the expected content contained within
* this class.
*
* <pre>
* <complexType>
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <attGroup ref="{http://iptc.org/std/NITF/2006-10-18/}globalNITFAttributes"/>
* <attribute name="generation" type="{http://www.w3.org/2001/XMLSchema}NMTOKEN" />
* <attribute name="part" type="{http://www.w3.org/2001/XMLSchema}NMTOKEN" />
* <attribute name="version" type="{http://www.w3.org/2001/XMLSchema}NMTOKEN" />
* <attribute name="key" type="{http://www.w3.org/2001/XMLSchema}string" />
* <anyAttribute processContents='lax' namespace='##other'/>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "")
@XmlRootElement(name = "du-key")
@ToString
public class DuKey {
@XmlAttribute(name = "generation")
@XmlJavaTypeAdapter(CollapsedStringAdapter.class)
@XmlSchemaType(name = "NMTOKEN")
protected String generation;
@XmlAttribute(name = "part")
@XmlJavaTypeAdapter(CollapsedStringAdapter.class)
@XmlSchemaType(name = "NMTOKEN")
protected String part;
@XmlAttribute(name = "version")
@XmlJavaTypeAdapter(CollapsedStringAdapter.class)
@XmlSchemaType(name = "NMTOKEN")
protected String version;
@XmlAttribute(name = "key")
protected String key;
@XmlAttribute(name = "id")
@XmlJavaTypeAdapter(CollapsedStringAdapter.class)
@XmlID
@XmlSchemaType(name = "ID")
protected String id;
@XmlAnyAttribute
private Map<QName, String> otherAttributes = new HashMap<QName, String>();
/**
* Gets the value of the generation property.
*
* @return possible object is {@link String }
*
*/
public String getGeneration() {
return generation;
}
/**
* Sets the value of the generation property.
*
* @param value
* allowed object is {@link String }
*
*/
public void setGeneration(String value) {
this.generation = value;
}
/**
* Gets the value of the part property.
*
* @return possible object is {@link String }
*
*/
public String getPart() {
return part;
}
/**
* Sets the value of the part property.
*
* @param value
* allowed object is {@link String }
*
*/
public void setPart(String value) {
this.part = value;
}
/**
* Gets the value of the version property.
*
* @return possible object is {@link String }
*
*/
public String getVersion() {
return version;
}
/**
* Sets the value of the version property.
*
* @param value
* allowed object is {@link String }
*
*/
public void setVersion(String value) {
this.version = value;
}
/**
* Gets the value of the key property.
*
* @return possible object is {@link String }
*
*/
public String getKey() {
return key;
}
/**
* Sets the value of the key property.
*
* @param value
* allowed object is {@link String }
*
*/
public void setKey(String value) {
this.key = value;
}
/**
* Gets the value of the id property.
*
* @return possible object is {@link String }
*
*/
public String getId() {
return id;
}
/**
* Sets the value of the id property.
*
* @param value
* allowed object is {@link String }
*
*/
public void setId(String value) {
this.id = value;
}
/**
* Gets a map that contains attributes that aren't bound to any typed
* property on this class.
*
* <p>
* the map is keyed by the name of the attribute and the value is the string
* value of the attribute.
*
* the map returned by this method is live, and you can add new attribute by
* updating the map directly. Because of this design, there's no setter.
*
*
* @return always non-null
*/
public Map<QName, String> getOtherAttributes() {
return otherAttributes;
}
}
| gextech/iptc-newsml | src/main/java/gex/newsml/nitf/DuKey.java | Java | apache-2.0 | 4,635 |
// Copyright 2015 go-swagger maintainers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package generator
import (
"fmt"
"testing"
"github.com/go-openapi/loads"
"github.com/go-openapi/spec"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func schTypeVals() []struct{ Type, Format, Expected string } {
return []struct{ Type, Format, Expected string }{
{"boolean", "", "bool"},
{"string", "", "string"},
{"integer", "int8", "int8"},
{"integer", "int16", "int16"},
{"integer", "int32", "int32"},
{"integer", "int64", "int64"},
{"integer", "", "int64"},
{"integer", "uint8", "uint8"},
{"integer", "uint16", "uint16"},
{"integer", "uint32", "uint32"},
{"integer", "uint64", "uint64"},
{"number", "float", "float32"},
{"number", "double", "float64"},
{"number", "", "float64"},
{"string", "byte", "strfmt.Base64"},
{"string", "date", "strfmt.Date"},
{"string", "date-time", "strfmt.DateTime"},
{"string", "uri", "strfmt.URI"},
{"string", "email", "strfmt.Email"},
{"string", "hostname", "strfmt.Hostname"},
{"string", "ipv4", "strfmt.IPv4"},
{"string", "ipv6", "strfmt.IPv6"},
{"string", "mac", "strfmt.MAC"},
{"string", "uuid", "strfmt.UUID"},
{"string", "uuid3", "strfmt.UUID3"},
{"string", "uuid4", "strfmt.UUID4"},
{"string", "uuid5", "strfmt.UUID5"},
{"string", "isbn", "strfmt.ISBN"},
{"string", "isbn10", "strfmt.ISBN10"},
{"string", "isbn13", "strfmt.ISBN13"},
{"string", "creditcard", "strfmt.CreditCard"},
{"string", "ssn", "strfmt.SSN"},
{"string", "hexcolor", "strfmt.HexColor"},
{"string", "rgbcolor", "strfmt.RGBColor"},
{"string", "duration", "strfmt.Duration"},
{"string", "ObjectId", "strfmt.ObjectId"},
{"string", "password", "strfmt.Password"},
{"string", "uint8", "string"},
{"string", "uint16", "string"},
{"string", "uint32", "string"},
{"string", "uint64", "string"},
{"string", "int8", "string"},
{"string", "int16", "string"},
{"string", "int32", "string"},
{"string", "int64", "string"},
{"file", "", "io.ReadCloser"},
}
}
func schRefVals() []struct{ Type, GoType, Expected string } {
return []struct{ Type, GoType, Expected string }{
{"Comment", "", "models.Comment"},
{"UserCard", "UserItem", "models.UserItem"},
}
}
func TestTypeResolver_AdditionalItems(t *testing.T) {
_, resolver, e := basicTaskListResolver(t)
require.NoError(t, e)
tpe := spec.StringProperty()
// arrays of primitives and string formats with additional formats
for _, val := range schTypeVals() {
var sch spec.Schema
sch.Typed(val.Type, val.Format)
var coll spec.Schema
coll.Type = []string{"array"}
coll.Items = new(spec.SchemaOrArray)
coll.Items.Schema = tpe
coll.AdditionalItems = new(spec.SchemaOrBool)
coll.AdditionalItems.Schema = &sch
rt, err := resolver.ResolveSchema(&coll, true, true)
require.NoError(t, err)
require.True(t, rt.IsArray)
assert.True(t, rt.HasAdditionalItems)
assert.False(t, rt.IsNullable)
}
}
func TestTypeResolver_BasicTypes(t *testing.T) {
_, resolver, e := basicTaskListResolver(t)
require.NoError(t, e)
// primitives and string formats
for _, val := range schTypeVals() {
sch := new(spec.Schema)
sch.Typed(val.Type, val.Format)
rt, err := resolver.ResolveSchema(sch, true, false)
require.NoError(t, err)
assert.False(t, rt.IsNullable, "expected %s with format %q to not be nullable", val.Type, val.Format)
assertPrimitiveResolve(t, val.Type, val.Format, val.Expected, rt)
}
// arrays of primitives and string formats
for _, val := range schTypeVals() {
var sch spec.Schema
sch.Typed(val.Type, val.Format)
rt, err := resolver.ResolveSchema(new(spec.Schema).CollectionOf(sch), true, true)
require.NoError(t, err)
assert.True(t, rt.IsArray)
assert.False(t, rt.IsEmptyOmitted)
s := new(spec.Schema).CollectionOf(sch)
s.AddExtension(xOmitEmpty, false)
rt, err = resolver.ResolveSchema(s, true, true)
require.NoError(t, err)
assert.True(t, rt.IsArray)
assert.False(t, rt.IsEmptyOmitted)
s = new(spec.Schema).CollectionOf(sch)
s.AddExtension(xOmitEmpty, true)
rt, err = resolver.ResolveSchema(s, true, true)
require.NoError(t, err)
assert.True(t, rt.IsArray)
assert.True(t, rt.IsEmptyOmitted)
}
// primitives and string formats
for _, val := range schTypeVals() {
sch := new(spec.Schema)
sch.Typed(val.Type, val.Format)
sch.Extensions = make(spec.Extensions)
sch.Extensions[xIsNullable] = true
rt, err := resolver.ResolveSchema(sch, true, false)
require.NoError(t, err)
if val.Type == "file" {
assert.False(t, rt.IsNullable, "expected %q (%q) to not be nullable", val.Type, val.Format)
} else {
assert.True(t, rt.IsNullable, "expected %q (%q) to be nullable", val.Type, val.Format)
}
assertPrimitiveResolve(t, val.Type, val.Format, val.Expected, rt)
// Test x-nullable overrides x-isnullable
sch.Extensions[xIsNullable] = false
sch.Extensions[xNullable] = true
rt, err = resolver.ResolveSchema(sch, true, true)
require.NoError(t, err)
if val.Type == "file" {
assert.False(t, rt.IsNullable, "expected %q (%q) to not be nullable", val.Type, val.Format)
} else {
assert.True(t, rt.IsNullable, "expected %q (%q) to be nullable", val.Type, val.Format)
}
assertPrimitiveResolve(t, val.Type, val.Format, val.Expected, rt)
// Test x-nullable without x-isnullable
delete(sch.Extensions, xIsNullable)
sch.Extensions[xNullable] = true
rt, err = resolver.ResolveSchema(sch, true, true)
require.NoError(t, err)
if val.Type == "file" {
assert.False(t, rt.IsNullable, "expected %q (%q) to not be nullable", val.Type, val.Format)
} else {
assert.True(t, rt.IsNullable, "expected %q (%q) to be nullable", val.Type, val.Format)
}
assertPrimitiveResolve(t, val.Type, val.Format, val.Expected, rt)
}
// arrays of primitives and string formats
for _, val := range schTypeVals() {
var sch spec.Schema
sch.Typed(val.Type, val.Format)
sch.AddExtension(xIsNullable, true)
rt, err := resolver.ResolveSchema(new(spec.Schema).CollectionOf(sch), true, true)
require.NoError(t, err)
assert.True(t, rt.IsArray)
}
}
func TestTypeResolver_Refs(t *testing.T) {
_, resolver, e := basicTaskListResolver(t)
require.NoError(t, e)
// referenced objects
for _, val := range schRefVals() {
sch := new(spec.Schema)
sch.Ref, _ = spec.NewRef("#/definitions/" + val.Type)
rt, err := resolver.ResolveSchema(sch, true, true)
require.NoError(t, err)
assert.Equal(t, val.Expected, rt.GoType)
assert.False(t, rt.IsAnonymous)
assert.True(t, rt.IsNullable)
assert.Equal(t, "object", rt.SwaggerType)
}
// referenced array objects
for _, val := range schRefVals() {
sch := new(spec.Schema)
sch.Ref, _ = spec.NewRef("#/definitions/" + val.Type)
rt, err := resolver.ResolveSchema(new(spec.Schema).CollectionOf(*sch), true, true)
require.NoError(t, err)
assert.True(t, rt.IsArray)
// now this behavior has moved down to the type resolver:
// * it used to be hidden to the type resolver, but rendered like that eventually
assert.Equal(t, "[]*"+val.Expected, rt.GoType)
}
// for named objects
// referenced objects
for _, val := range schRefVals() {
sch := new(spec.Schema)
sch.Ref, _ = spec.NewRef("#/definitions/" + val.Type)
rt, err := resolver.ResolveSchema(sch, false, true)
require.NoError(t, err)
assert.Equal(t, val.Expected, rt.GoType)
assert.False(t, rt.IsAnonymous)
assert.True(t, rt.IsNullable)
assert.Equal(t, "object", rt.SwaggerType)
}
// referenced array objects
for _, val := range schRefVals() {
sch := new(spec.Schema)
sch.Ref, _ = spec.NewRef("#/definitions/" + val.Type)
rt, err := resolver.ResolveSchema(new(spec.Schema).CollectionOf(*sch), false, true)
require.NoError(t, err)
assert.True(t, rt.IsArray)
// now this behavior has moved down to the type resolver:
// * it used to be hidden to the type resolver, but rendered like that eventually
assert.Equal(t, "[]*"+val.Expected, rt.GoType)
}
}
func TestTypeResolver_AdditionalProperties(t *testing.T) {
_, resolver, err := basicTaskListResolver(t)
require.NoError(t, err)
// primitives as additional properties
for _, val := range schTypeVals() {
sch := new(spec.Schema)
sch.Typed(val.Type, val.Format)
parent := new(spec.Schema)
parent.AdditionalProperties = new(spec.SchemaOrBool)
parent.AdditionalProperties.Schema = sch
rt, err := resolver.ResolveSchema(parent, true, false)
require.NoError(t, err)
assert.True(t, rt.IsMap)
assert.False(t, rt.IsComplexObject)
assert.Equal(t, "map[string]"+val.Expected, rt.GoType)
assert.Equal(t, "object", rt.SwaggerType)
}
// array of primitives as additional properties
for _, val := range schTypeVals() {
sch := new(spec.Schema)
sch.Typed(val.Type, val.Format)
parent := new(spec.Schema)
parent.AdditionalProperties = new(spec.SchemaOrBool)
parent.AdditionalProperties.Schema = new(spec.Schema).CollectionOf(*sch)
rt, err := resolver.ResolveSchema(parent, true, false)
require.NoError(t, err)
assert.True(t, rt.IsMap)
assert.False(t, rt.IsComplexObject)
assert.Equal(t, "map[string][]"+val.Expected, rt.GoType)
assert.Equal(t, "object", rt.SwaggerType)
}
// refs as additional properties
for _, val := range schRefVals() {
sch := new(spec.Schema)
sch.Ref, _ = spec.NewRef("#/definitions/" + val.Type)
parent := new(spec.Schema)
parent.AdditionalProperties = new(spec.SchemaOrBool)
parent.AdditionalProperties.Schema = sch
rt, err := resolver.ResolveSchema(parent, true, true)
require.NoError(t, err)
assert.True(t, rt.IsMap)
assert.False(t, rt.IsComplexObject)
assert.Equal(t, "map[string]"+val.Expected, rt.GoType)
assert.Equal(t, "object", rt.SwaggerType)
}
// when additional properties and properties present, it's a complex object
// primitives as additional properties
for _, val := range schTypeVals() {
sch := new(spec.Schema)
sch.Typed(val.Type, val.Format)
parent := new(spec.Schema)
parent.Properties = make(map[string]spec.Schema)
parent.Properties["id"] = *spec.Int32Property()
parent.AdditionalProperties = new(spec.SchemaOrBool)
parent.AdditionalProperties.Schema = sch
rt, err := resolver.ResolveSchema(parent, true, true)
require.NoError(t, err)
assert.True(t, rt.IsComplexObject)
assert.False(t, rt.IsMap)
assert.Equal(t, "map[string]"+val.Expected, rt.GoType)
assert.Equal(t, "object", rt.SwaggerType)
}
// array of primitives as additional properties
for _, val := range schTypeVals() {
sch := new(spec.Schema)
sch.Typed(val.Type, val.Format)
parent := new(spec.Schema)
parent.Properties = make(map[string]spec.Schema)
parent.Properties["id"] = *spec.Int32Property()
parent.AdditionalProperties = new(spec.SchemaOrBool)
parent.AdditionalProperties.Schema = new(spec.Schema).CollectionOf(*sch)
rt, err := resolver.ResolveSchema(parent, true, true)
require.NoError(t, err)
assert.True(t, rt.IsComplexObject)
assert.False(t, rt.IsMap)
assert.Equal(t, "map[string][]"+val.Expected, rt.GoType)
assert.Equal(t, "object", rt.SwaggerType)
}
// refs as additional properties
for _, val := range schRefVals() {
sch := new(spec.Schema)
sch.Ref, _ = spec.NewRef("#/definitions/" + val.Type)
parent := new(spec.Schema)
parent.Properties = make(map[string]spec.Schema)
parent.Properties["id"] = *spec.Int32Property()
parent.AdditionalProperties = new(spec.SchemaOrBool)
parent.AdditionalProperties.Schema = sch
rt, err := resolver.ResolveSchema(parent, true, true)
require.NoError(t, err)
assert.True(t, rt.IsComplexObject)
assert.False(t, rt.IsMap)
assert.Equal(t, "map[string]"+val.Expected, rt.GoType)
assert.Equal(t, "object", rt.SwaggerType)
}
}
func TestTypeResolver_Notables(t *testing.T) {
doc, resolver, err := specResolver(t, "../fixtures/codegen/todolist.models.yml")
require.NoError(t, err)
def := doc.Spec().Definitions["Notables"]
rest, err := resolver.ResolveSchema(&def, false, true)
require.NoError(t, err)
assert.True(t, rest.IsArray)
assert.False(t, rest.IsAnonymous)
assert.False(t, rest.IsNullable)
assert.Equal(t, "[]*models.Notable", rest.GoType)
}
func specResolver(t testing.TB, path string) (*loads.Document, *typeResolver, error) {
tlb, err := loads.Spec(path)
if err != nil {
return nil, nil, err
}
resolver := &typeResolver{
Doc: tlb,
ModelsPackage: "models",
}
resolver.KnownDefs = make(map[string]struct{})
for k := range tlb.Spec().Definitions {
resolver.KnownDefs[k] = struct{}{}
}
return tlb, resolver, nil
}
func basicTaskListResolver(t testing.TB) (*loads.Document, *typeResolver, error) {
tlb, err := loads.Spec("../fixtures/codegen/tasklist.basic.yml")
if err != nil {
return nil, nil, err
}
swsp := tlb.Spec()
uc := swsp.Definitions["UserCard"]
uc.AddExtension(xGoName, "UserItem")
swsp.Definitions["UserCard"] = uc
resolver := &typeResolver{
Doc: tlb,
ModelsPackage: "models",
}
resolver.KnownDefs = make(map[string]struct{})
for k, sch := range swsp.Definitions {
resolver.KnownDefs[k] = struct{}{}
if nm, ok := sch.Extensions[xGoName]; ok {
resolver.KnownDefs[nm.(string)] = struct{}{}
}
}
return tlb, resolver, nil
}
func TestTypeResolver_TupleTypes(t *testing.T) {
_, resolver, err := basicTaskListResolver(t)
require.NoError(t, err)
// tuple type (items with multiple schemas)
parent := new(spec.Schema)
parent.Typed("array", "")
parent.Items = new(spec.SchemaOrArray)
parent.Items.Schemas = append(
parent.Items.Schemas,
*spec.StringProperty(),
*spec.Int64Property(),
*spec.Float64Property(),
*spec.BoolProperty(),
*spec.ArrayProperty(spec.StringProperty()),
*spec.RefProperty("#/definitions/Comment"),
)
rt, err := resolver.ResolveSchema(parent, true, true)
require.NoError(t, err)
assert.False(t, rt.IsArray)
assert.True(t, rt.IsTuple)
}
func TestTypeResolver_AnonymousStructs(t *testing.T) {
_, resolver, err := basicTaskListResolver(t)
require.NoError(t, err)
// anonymous structs should be accounted for
parent := new(spec.Schema)
parent.Typed("object", "")
parent.Properties = make(map[string]spec.Schema)
parent.Properties["name"] = *spec.StringProperty()
parent.Properties["age"] = *spec.Int32Property()
rt, err := resolver.ResolveSchema(parent, true, true)
require.NoError(t, err)
assert.True(t, rt.IsNullable)
assert.True(t, rt.IsAnonymous)
assert.True(t, rt.IsComplexObject)
parent.Extensions = make(spec.Extensions)
parent.Extensions[xIsNullable] = true
rt, err = resolver.ResolveSchema(parent, true, true)
require.NoError(t, err)
assert.True(t, rt.IsNullable)
assert.True(t, rt.IsAnonymous)
assert.True(t, rt.IsComplexObject)
// Also test that it's nullable with just x-nullable
parent.Extensions[xIsNullable] = false
parent.Extensions[xNullable] = false
rt, err = resolver.ResolveSchema(parent, true, true)
require.NoError(t, err)
assert.False(t, rt.IsNullable)
assert.True(t, rt.IsAnonymous)
assert.True(t, rt.IsComplexObject)
}
func TestTypeResolver_ObjectType(t *testing.T) {
_, resolver, e := basicTaskListResolver(t)
require.NoError(t, e)
resolver.ModelName = "TheModel"
resolver.KnownDefs["TheModel"] = struct{}{}
defer func() { resolver.ModelName = "" }()
// very poor schema definitions (as in none)
types := []string{"object", ""}
for _, tpe := range types {
sch := new(spec.Schema)
sch.Typed(tpe, "")
rt, err := resolver.ResolveSchema(sch, true, true)
require.NoError(t, err)
assert.True(t, rt.IsMap)
assert.False(t, rt.IsComplexObject)
assert.Equal(t, "interface{}", rt.GoType)
assert.Equal(t, "object", rt.SwaggerType)
sch.Properties = make(map[string]spec.Schema)
var ss spec.Schema
sch.Properties["tags"] = *(&ss).CollectionOf(*spec.StringProperty())
rt, err = resolver.ResolveSchema(sch, false, true)
require.NoError(t, err)
assert.True(t, rt.IsComplexObject)
assert.False(t, rt.IsMap)
assert.Equal(t, "models.TheModel", rt.GoType)
assert.Equal(t, "object", rt.SwaggerType)
sch.Properties = nil
nsch := new(spec.Schema)
nsch.Typed(tpe, "")
nsch.AllOf = []spec.Schema{*sch}
rt, err = resolver.ResolveSchema(nsch, false, true)
require.NoError(t, err)
assert.True(t, rt.IsComplexObject)
assert.False(t, rt.IsMap)
assert.Equal(t, "models.TheModel", rt.GoType)
assert.Equal(t, "object", rt.SwaggerType)
sch = new(spec.Schema)
rt, err = resolver.ResolveSchema(sch, true, true)
require.NoError(t, err)
assert.True(t, rt.IsMap)
assert.False(t, rt.IsComplexObject)
assert.Equal(t, "interface{}", rt.GoType)
assert.Equal(t, "object", rt.SwaggerType)
sch = new(spec.Schema)
var sp spec.Schema
sp.Typed("object", "")
sch.AllOf = []spec.Schema{sp}
rt, err = resolver.ResolveSchema(sch, true, true)
require.NoError(t, err)
assert.True(t, rt.IsComplexObject)
assert.False(t, rt.IsMap)
assert.Equal(t, "models.TheModel", rt.GoType)
assert.Equal(t, "object", rt.SwaggerType)
}
}
func TestTypeResolver_AliasTypes(t *testing.T) {
doc, resolver, err := basicTaskListResolver(t)
require.NoError(t, err)
resolver.ModelsPackage = ""
resolver.ModelName = "Currency"
defer func() {
resolver.ModelName = ""
resolver.ModelsPackage = "models"
}()
defs := doc.Spec().Definitions[resolver.ModelName]
rt, err := resolver.ResolveSchema(&defs, false, true)
require.NoError(t, err)
assert.False(t, rt.IsAnonymous)
assert.True(t, rt.IsAliased)
assert.True(t, rt.IsPrimitive)
assert.Equal(t, "Currency", rt.GoType)
assert.Equal(t, "string", rt.AliasedType)
}
func assertPrimitiveResolve(t testing.TB, tpe, tfmt, exp string, tr resolvedType) {
assert.Equal(t, tpe, tr.SwaggerType, fmt.Sprintf("expected %q (%q, %q) to for the swagger type but got %q", tpe, tfmt, exp, tr.SwaggerType))
assert.Equal(t, tfmt, tr.SwaggerFormat, fmt.Sprintf("expected %q (%q, %q) to for the swagger format but got %q", tfmt, tpe, exp, tr.SwaggerFormat))
assert.Equal(t, exp, tr.GoType, fmt.Sprintf("expected %q (%q, %q) to for the go type but got %q", exp, tpe, tfmt, tr.GoType))
}
func TestTypeResolver_ExistingModel(t *testing.T) {
doc, err := loads.Spec("../fixtures/codegen/existing-model.yml")
resolver := newTypeResolver("model", "", doc)
require.NoError(t, err)
def := doc.Spec().Definitions["JsonWebKey"]
tpe, pkg, alias := resolver.knownDefGoType("JsonWebKey", def, nil)
assert.Equal(t, "jwk.Key", tpe)
assert.Equal(t, "github.com/user/package", pkg)
assert.Equal(t, "jwk", alias)
rest, err := resolver.ResolveSchema(&def, false, true)
require.NoError(t, err)
assert.False(t, rest.IsMap)
assert.False(t, rest.IsArray)
assert.False(t, rest.IsTuple)
assert.False(t, rest.IsStream)
assert.True(t, rest.IsAliased)
assert.False(t, rest.IsBaseType)
assert.False(t, rest.IsInterface)
assert.True(t, rest.IsNullable)
assert.False(t, rest.IsPrimitive)
assert.False(t, rest.IsAnonymous)
assert.True(t, rest.IsComplexObject)
assert.False(t, rest.IsCustomFormatter)
assert.Equal(t, "jwk.Key", rest.GoType)
assert.Equal(t, "github.com/user/package", rest.Pkg)
assert.Equal(t, "jwk", rest.PkgAlias)
def = doc.Spec().Definitions["JsonWebKeySet"].Properties["keys"]
rest, err = resolver.ResolveSchema(&def, false, true)
require.NoError(t, err)
assert.False(t, rest.IsMap)
assert.True(t, rest.IsArray)
assert.False(t, rest.IsTuple)
assert.False(t, rest.IsStream)
assert.False(t, rest.IsAliased)
assert.False(t, rest.IsBaseType)
assert.False(t, rest.IsInterface)
assert.False(t, rest.IsNullable)
assert.False(t, rest.IsPrimitive)
assert.False(t, rest.IsAnonymous)
assert.False(t, rest.IsComplexObject)
assert.False(t, rest.IsCustomFormatter)
assert.Equal(t, "[]*jwk.Key", rest.GoType)
assert.Equal(t, "", rest.Pkg)
assert.Equal(t, "", rest.PkgAlias)
}
| go-swagger/go-swagger | generator/typeresolver_test.go | GO | apache-2.0 | 20,309 |
package router
import (
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"strconv"
"strings"
"github.com/golang/glog"
"github.com/spf13/cobra"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
kclient "k8s.io/kubernetes/pkg/client/unversioned"
kclientcmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
"k8s.io/kubernetes/pkg/controller/serviceaccount"
"k8s.io/kubernetes/pkg/fields"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
kutil "k8s.io/kubernetes/pkg/util"
"github.com/openshift/origin/pkg/cmd/util/clientcmd"
"github.com/openshift/origin/pkg/cmd/util/variable"
configcmd "github.com/openshift/origin/pkg/config/cmd"
dapi "github.com/openshift/origin/pkg/deploy/api"
"github.com/openshift/origin/pkg/generate/app"
"github.com/openshift/origin/pkg/security/admission"
)
const (
routerLong = `
Install or configure a router
This command helps to setup a router to take edge traffic and balance it to
your application. With no arguments, the command will check for an existing router
service called 'router' and create one if it does not exist. If you want to test whether
a router has already been created add the --dry-run flag and the command will exit with
1 if the registry does not exist.
If a router does not exist with the given name, this command will
create a deployment configuration and service that will run the router. If you are
running your router in production, you should pass --replicas=2 or higher to ensure
you have failover protection.`
routerExample = ` # Check the default router ("router")
$ %[1]s %[2]s --dry-run
# See what the router would look like if created
$ %[1]s %[2]s -o json --credentials=/path/to/openshift-router.kubeconfig --service-account=myserviceaccount
# Create a router if it does not exist
$ %[1]s %[2]s router-west --credentials=/path/to/openshift-router.kubeconfig --service-account=myserviceaccount --replicas=2
# Use a different router image and see the router configuration
$ %[1]s %[2]s region-west -o yaml --credentials=/path/to/openshift-router.kubeconfig --service-account=myserviceaccount --images=myrepo/somerouter:mytag
# Run the router with a hint to the underlying implementation to _not_ expose statistics.
$ %[1]s %[2]s router-west --credentials=/path/to/openshift-router.kubeconfig --service-account=myserviceaccount --stats-port=0
`
secretsVolumeName = "secret-volume"
secretsPath = "/etc/secret-volume"
privkeySecretName = "external-host-private-key-secret"
privkeyVolumeName = "external-host-private-key-volume"
privkeyName = "router.pem"
privkeyPath = secretsPath + "/" + privkeyName
)
// RouterConfig contains the configuration parameters necessary to
// launch a router, including general parameters, type of router, and
// type-specific parameters.
type RouterConfig struct {
// Type is the router type, which determines which plugin to use (f5
// or template).
Type string
// ImageTemplate specifies the image from which the router will be created.
ImageTemplate variable.ImageTemplate
// Ports specifies the container ports for the router.
Ports string
// Replicas specifies the initial replica count for the router.
Replicas int
// Labels specifies the label or labels that will be assigned to the router
// pod.
Labels string
// DryRun specifies that the router command should not launch a router but
// should instead exit with code 1 to indicate if a router is already running
// or code 0 otherwise.
DryRun bool
// Credentials specifies the path to a .kubeconfig file with the credentials
// with which the router may contact the master.
Credentials string
// DefaultCertificate holds the certificate that will be used if no more
// specific certificate is found. This is typically a wildcard certificate.
DefaultCertificate string
// Selector specifies a label or set of labels that determines the nodes on
// which the router pod can be scheduled.
Selector string
// StatsPort specifies a port at which the router can provide statistics.
StatsPort int
// StatsPassword specifies a password required to authenticate connections to
// the statistics port.
StatsPassword string
// StatsUsername specifies a username required to authenticate connections to
// the statistics port.
StatsUsername string
// HostNetwork specifies whether to configure the router pod to use the host's
// network namespace or the container's.
HostNetwork bool
// ServiceAccount specifies the service account under which the router will
// run.
ServiceAccount string
// ExternalHost specifies the hostname or IP address of an external host for
// router plugins that integrate with an external load balancer (such as f5).
ExternalHost string
// ExternalHostUsername specifies the username for authenticating with the
// external host.
ExternalHostUsername string
// ExternalHostPassword specifies the password for authenticating with the
// external host.
ExternalHostPassword string
// ExternalHostHttpVserver specifies the virtual server for HTTP connections.
ExternalHostHttpVserver string
// ExternalHostHttpsVserver specifies the virtual server for HTTPS connections.
ExternalHostHttpsVserver string
// ExternalHostPrivateKey specifies an SSH private key for authenticating with
// the external host.
ExternalHostPrivateKey string
// ExternalHostInsecure specifies that the router should skip strict
// certificate verification when connecting to the external host.
ExternalHostInsecure bool
// ExternalHostPartitionPath specifies the partition path to use.
// This is used by some routers to create access access control
// boundaries for users and applications.
ExternalHostPartitionPath string
}
var errExit = fmt.Errorf("exit")
const (
defaultLabel = "router=<name>"
// Default port numbers to expose and bind/listen on.
defaultPorts = "80:80,443:443"
)
// NewCmdRouter implements the OpenShift CLI router command.
func NewCmdRouter(f *clientcmd.Factory, parentName, name string, out io.Writer) *cobra.Command {
cfg := &RouterConfig{
ImageTemplate: variable.NewDefaultImageTemplate(),
Labels: defaultLabel,
Ports: defaultPorts,
Replicas: 1,
StatsUsername: "admin",
StatsPort: 1936,
HostNetwork: true,
}
cmd := &cobra.Command{
Use: fmt.Sprintf("%s [NAME]", name),
Short: "Install a router",
Long: routerLong,
Example: fmt.Sprintf(routerExample, parentName, name),
Run: func(cmd *cobra.Command, args []string) {
err := RunCmdRouter(f, cmd, out, cfg, args)
if err != errExit {
cmdutil.CheckErr(err)
} else {
os.Exit(1)
}
},
}
cmd.Flags().StringVar(&cfg.Type, "type", "haproxy-router", "The type of router to use - if you specify --images this flag may be ignored.")
cmd.Flags().StringVar(&cfg.ImageTemplate.Format, "images", cfg.ImageTemplate.Format, "The image to base this router on - ${component} will be replaced with --type")
cmd.Flags().BoolVar(&cfg.ImageTemplate.Latest, "latest-images", cfg.ImageTemplate.Latest, "If true, attempt to use the latest images for the router instead of the latest release.")
cmd.Flags().StringVar(&cfg.Ports, "ports", cfg.Ports, "A comma delimited list of ports or port pairs to expose on the router pod. The default is set for HAProxy.")
cmd.Flags().IntVar(&cfg.Replicas, "replicas", cfg.Replicas, "The replication factor of the router; commonly 2 when high availability is desired.")
cmd.Flags().StringVar(&cfg.Labels, "labels", cfg.Labels, "A set of labels to uniquely identify the router and its components.")
cmd.Flags().BoolVar(&cfg.DryRun, "dry-run", cfg.DryRun, "Exit with code 1 if the specified router does not exist.")
cmd.Flags().Bool("create", false, "deprecated; this is now the default behavior")
cmd.Flags().StringVar(&cfg.Credentials, "credentials", "", "Path to a .kubeconfig file that will contain the credentials the router should use to contact the master.")
cmd.Flags().StringVar(&cfg.DefaultCertificate, "default-cert", cfg.DefaultCertificate, "Optional path to a certificate file that be used as the default certificate. The file should contain the cert, key, and any CA certs necessary for the router to serve the certificate.")
cmd.Flags().StringVar(&cfg.Selector, "selector", cfg.Selector, "Selector used to filter nodes on deployment. Used to run routers on a specific set of nodes.")
cmd.Flags().StringVar(&cfg.ServiceAccount, "service-account", cfg.ServiceAccount, "Name of the service account to use to run the router pod.")
cmd.Flags().IntVar(&cfg.StatsPort, "stats-port", cfg.StatsPort, "If the underlying router implementation can provide statistics this is a hint to expose it on this port. Specify 0 if you want to turn off exposing the statistics.")
cmd.Flags().StringVar(&cfg.StatsPassword, "stats-password", cfg.StatsPassword, "If the underlying router implementation can provide statistics this is the requested password for auth. If not set a password will be generated.")
cmd.Flags().StringVar(&cfg.StatsUsername, "stats-user", cfg.StatsUsername, "If the underlying router implementation can provide statistics this is the requested username for auth.")
cmd.Flags().BoolVar(&cfg.HostNetwork, "host-network", cfg.HostNetwork, "If true (the default), then use host networking rather than using a separate container network stack.")
cmd.Flags().StringVar(&cfg.ExternalHost, "external-host", cfg.ExternalHost, "If the underlying router implementation connects with an external host, this is the external host's hostname.")
cmd.Flags().StringVar(&cfg.ExternalHostUsername, "external-host-username", cfg.ExternalHostUsername, "If the underlying router implementation connects with an external host, this is the username for authenticating with the external host.")
cmd.Flags().StringVar(&cfg.ExternalHostPassword, "external-host-password", cfg.ExternalHostPassword, "If the underlying router implementation connects with an external host, this is the password for authenticating with the external host.")
cmd.Flags().StringVar(&cfg.ExternalHostHttpVserver, "external-host-http-vserver", cfg.ExternalHostHttpVserver, "If the underlying router implementation uses virtual servers, this is the name of the virtual server for HTTP connections.")
cmd.Flags().StringVar(&cfg.ExternalHostHttpsVserver, "external-host-https-vserver", cfg.ExternalHostHttpsVserver, "If the underlying router implementation uses virtual servers, this is the name of the virtual server for HTTPS connections.")
cmd.Flags().StringVar(&cfg.ExternalHostPrivateKey, "external-host-private-key", cfg.ExternalHostPrivateKey, "If the underlying router implementation requires an SSH private key, this is the path to the private key file.")
cmd.Flags().BoolVar(&cfg.ExternalHostInsecure, "external-host-insecure", cfg.ExternalHostInsecure, "If the underlying router implementation connects with an external host over a secure connection, this causes the router to skip strict certificate verification with the external host.")
cmd.Flags().StringVar(&cfg.ExternalHostPartitionPath, "external-host-partition-path", cfg.ExternalHostPartitionPath, "If the underlying router implementation uses partitions for control boundaries, this is the path to use for that partition.")
cmd.MarkFlagFilename("credentials", "kubeconfig")
cmdutil.AddPrinterFlags(cmd)
return cmd
}
// Read the specified file and return it as a bytes array.
func loadData(file string) ([]byte, error) {
if len(file) == 0 {
return []byte{}, nil
}
bytes, err := ioutil.ReadFile(file)
if err != nil {
return []byte{}, err
}
return bytes, nil
}
// Read the specified certificate file and return it as a string.
func loadCert(file string) (string, error) {
bytes, err := loadData(file)
return string(bytes), err
}
// Read the specified key file and return it as a bytes array.
func loadKey(file string) ([]byte, error) {
return loadData(file)
}
// generateSecretsConfig generates any Secret and Volume objects, such
// as SSH private keys, that are necessary for the router container.
func generateSecretsConfig(cfg *RouterConfig, kClient *kclient.Client,
namespace string) ([]*kapi.Secret, []kapi.Volume, []kapi.VolumeMount,
error) {
secrets := []*kapi.Secret{}
volumes := []kapi.Volume{}
mounts := []kapi.VolumeMount{}
if len(cfg.ExternalHostPrivateKey) != 0 {
privkeyData, err := loadKey(cfg.ExternalHostPrivateKey)
if err != nil {
return secrets, volumes, mounts, fmt.Errorf("error reading private key"+
" for external host: %v", err)
}
serviceAccount, err := kClient.ServiceAccounts(namespace).Get(cfg.ServiceAccount)
if err != nil {
return secrets, volumes, mounts, fmt.Errorf("error looking up"+
" service account %s: %v", cfg.ServiceAccount, err)
}
privkeySecret := &kapi.Secret{
ObjectMeta: kapi.ObjectMeta{
Name: privkeySecretName,
Annotations: map[string]string{
kapi.ServiceAccountNameKey: serviceAccount.Name,
kapi.ServiceAccountUIDKey: string(serviceAccount.UID),
},
},
Data: map[string][]byte{privkeyName: privkeyData},
}
secrets = append(secrets, privkeySecret)
}
// We need a secrets volume and mount iff we have secrets.
if len(secrets) != 0 {
secretsVolume := kapi.Volume{
Name: secretsVolumeName,
VolumeSource: kapi.VolumeSource{
Secret: &kapi.SecretVolumeSource{
SecretName: privkeySecretName,
},
},
}
secretsMount := kapi.VolumeMount{
Name: secretsVolumeName,
ReadOnly: true,
MountPath: secretsPath,
}
volumes = []kapi.Volume{secretsVolume}
mounts = []kapi.VolumeMount{secretsMount}
}
return secrets, volumes, mounts, nil
}
func generateLivenessProbeConfig(cfg *RouterConfig,
ports []kapi.ContainerPort) *kapi.Probe {
var probe *kapi.Probe
if cfg.Type == "haproxy-router" {
probe = &kapi.Probe{
Handler: kapi.Handler{
TCPSocket: &kapi.TCPSocketAction{
Port: kutil.IntOrString{
IntVal: ports[0].ContainerPort,
},
},
},
InitialDelaySeconds: 10,
}
}
return probe
}
// RunCmdRouter contains all the necessary functionality for the
// OpenShift CLI router command.
func RunCmdRouter(f *clientcmd.Factory, cmd *cobra.Command, out io.Writer, cfg *RouterConfig, args []string) error {
var name string
switch len(args) {
case 0:
name = "router"
case 1:
name = args[0]
default:
return cmdutil.UsageError(cmd, "You may pass zero or one arguments to provide a name for the router")
}
if len(cfg.StatsUsername) > 0 {
if strings.Contains(cfg.StatsUsername, ":") {
return cmdutil.UsageError(cmd, "username %s must not contain ':'", cfg.StatsUsername)
}
}
ports, err := app.ContainerPortsFromString(cfg.Ports)
if err != nil {
glog.Fatal(err)
}
// For the host networking case, ensure the ports match.
if cfg.HostNetwork {
for i := 0; i < len(ports); i++ {
if ports[i].ContainerPort != ports[i].HostPort {
return cmdutil.UsageError(cmd, "For host networking mode, please ensure that the container [%v] and host [%v] ports match", ports[i].ContainerPort, ports[i].HostPort)
}
}
}
if cfg.StatsPort > 0 {
ports = append(ports, kapi.ContainerPort{
Name: "stats",
HostPort: cfg.StatsPort,
ContainerPort: cfg.StatsPort,
Protocol: kapi.ProtocolTCP,
})
}
label := map[string]string{"router": name}
if cfg.Labels != defaultLabel {
valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Labels, ","))
if err != nil {
glog.Fatal(err)
}
if len(remove) > 0 {
return cmdutil.UsageError(cmd, "You may not pass negative labels in %q", cfg.Labels)
}
label = valid
}
nodeSelector := map[string]string{}
if len(cfg.Selector) > 0 {
valid, remove, err := app.LabelsFromSpec(strings.Split(cfg.Selector, ","))
if err != nil {
glog.Fatal(err)
}
if len(remove) > 0 {
return cmdutil.UsageError(cmd, "You may not pass negative labels in selector %q", cfg.Selector)
}
nodeSelector = valid
}
image := cfg.ImageTemplate.ExpandOrDie(cfg.Type)
namespace, _, err := f.OpenShiftClientConfig.Namespace()
if err != nil {
return fmt.Errorf("error getting client: %v", err)
}
_, kClient, err := f.Clients()
if err != nil {
return fmt.Errorf("error getting client: %v", err)
}
_, output, err := cmdutil.PrinterForCommand(cmd)
if err != nil {
return fmt.Errorf("unable to configure printer: %v", err)
}
generate := output
if !generate {
_, err = kClient.Services(namespace).Get(name)
if err != nil {
if !errors.IsNotFound(err) {
return fmt.Errorf("can't check for existing router %q: %v", name, err)
}
generate = true
}
}
if generate {
if cfg.DryRun && !output {
return fmt.Errorf("router %q does not exist (no service)", name)
}
if len(cfg.ServiceAccount) == 0 {
return fmt.Errorf("router could not be created; you must specify a service account with --service-account")
}
err := validateServiceAccount(kClient, namespace, cfg.ServiceAccount)
if err != nil {
return fmt.Errorf("router could not be created; %v", err)
}
// create new router
if len(cfg.Credentials) == 0 {
return fmt.Errorf("router could not be created; you must specify a .kubeconfig file path containing credentials for connecting the router to the master with --credentials")
}
clientConfigLoadingRules := &kclientcmd.ClientConfigLoadingRules{ExplicitPath: cfg.Credentials, Precedence: []string{}}
credentials, err := clientConfigLoadingRules.Load()
if err != nil {
return fmt.Errorf("router could not be created; the provided credentials %q could not be loaded: %v", cfg.Credentials, err)
}
config, err := kclientcmd.NewDefaultClientConfig(*credentials, &kclientcmd.ConfigOverrides{}).ClientConfig()
if err != nil {
return fmt.Errorf("router could not be created; the provided credentials %q could not be used: %v", cfg.Credentials, err)
}
if err := kclient.LoadTLSFiles(config); err != nil {
return fmt.Errorf("router could not be created; the provided credentials %q could not load certificate info: %v", cfg.Credentials, err)
}
insecure := "false"
if config.Insecure {
insecure = "true"
}
defaultCert, err := loadCert(cfg.DefaultCertificate)
if err != nil {
return fmt.Errorf("router could not be created; error reading default certificate file: %v", err)
}
if len(cfg.StatsPassword) == 0 {
cfg.StatsPassword = generateStatsPassword()
fmt.Fprintf(out, "password for stats user %s has been set to %s\n", cfg.StatsUsername, cfg.StatsPassword)
}
env := app.Environment{
"OPENSHIFT_MASTER": config.Host,
"OPENSHIFT_CA_DATA": string(config.CAData),
"OPENSHIFT_KEY_DATA": string(config.KeyData),
"OPENSHIFT_CERT_DATA": string(config.CertData),
"OPENSHIFT_INSECURE": insecure,
"DEFAULT_CERTIFICATE": defaultCert,
"ROUTER_SERVICE_NAME": name,
"ROUTER_SERVICE_NAMESPACE": namespace,
"ROUTER_EXTERNAL_HOST_HOSTNAME": cfg.ExternalHost,
"ROUTER_EXTERNAL_HOST_USERNAME": cfg.ExternalHostUsername,
"ROUTER_EXTERNAL_HOST_PASSWORD": cfg.ExternalHostPassword,
"ROUTER_EXTERNAL_HOST_HTTP_VSERVER": cfg.ExternalHostHttpVserver,
"ROUTER_EXTERNAL_HOST_HTTPS_VSERVER": cfg.ExternalHostHttpsVserver,
"ROUTER_EXTERNAL_HOST_INSECURE": strconv.FormatBool(cfg.ExternalHostInsecure),
"ROUTER_EXTERNAL_HOST_PARTITION_PATH": cfg.ExternalHostPartitionPath,
"ROUTER_EXTERNAL_HOST_PRIVKEY": privkeyPath,
"STATS_PORT": strconv.Itoa(cfg.StatsPort),
"STATS_USERNAME": cfg.StatsUsername,
"STATS_PASSWORD": cfg.StatsPassword,
}
updatePercent := int(-25)
secrets, volumes, mounts, err := generateSecretsConfig(cfg, kClient,
namespace)
if err != nil {
return fmt.Errorf("router could not be created: %v", err)
}
livenessProbe := generateLivenessProbeConfig(cfg, ports)
objects := []runtime.Object{
&dapi.DeploymentConfig{
ObjectMeta: kapi.ObjectMeta{
Name: name,
Labels: label,
},
Triggers: []dapi.DeploymentTriggerPolicy{
{Type: dapi.DeploymentTriggerOnConfigChange},
},
Template: dapi.DeploymentTemplate{
Strategy: dapi.DeploymentStrategy{
Type: dapi.DeploymentStrategyTypeRolling,
RollingParams: &dapi.RollingDeploymentStrategyParams{UpdatePercent: &updatePercent},
},
ControllerTemplate: kapi.ReplicationControllerSpec{
Replicas: cfg.Replicas,
Selector: label,
Template: &kapi.PodTemplateSpec{
ObjectMeta: kapi.ObjectMeta{Labels: label},
Spec: kapi.PodSpec{
HostNetwork: cfg.HostNetwork,
ServiceAccountName: cfg.ServiceAccount,
NodeSelector: nodeSelector,
Containers: []kapi.Container{
{
Name: "router",
Image: image,
Ports: ports,
Env: env.List(),
LivenessProbe: livenessProbe,
ImagePullPolicy: kapi.PullIfNotPresent,
VolumeMounts: mounts,
},
},
Volumes: volumes,
},
},
},
},
},
}
if len(secrets) != 0 {
serviceAccount, err := kClient.ServiceAccounts(namespace).Get(cfg.ServiceAccount)
if err != nil {
return fmt.Errorf("error looking up service account %s: %v",
cfg.ServiceAccount, err)
}
for _, secret := range secrets {
objects = append(objects, secret)
serviceAccount.Secrets = append(serviceAccount.Secrets,
kapi.ObjectReference{Name: secret.Name})
}
_, err = kClient.ServiceAccounts(namespace).Update(serviceAccount)
if err != nil {
return fmt.Errorf("error adding secret key to service account %s: %v",
cfg.ServiceAccount, err)
}
}
objects = app.AddServices(objects, true)
// TODO: label all created objects with the same label - router=<name>
list := &kapi.List{Items: objects}
if output {
if err := f.PrintObject(cmd, list, out); err != nil {
return fmt.Errorf("Unable to print object: %v", err)
}
return nil
}
mapper, typer := f.Factory.Object()
bulk := configcmd.Bulk{
Mapper: mapper,
Typer: typer,
RESTClientFactory: f.Factory.RESTClient,
After: configcmd.NewPrintNameOrErrorAfter(out, os.Stderr),
}
if errs := bulk.Create(list, namespace); len(errs) != 0 {
return errExit
}
return nil
}
fmt.Fprintf(out, "Router %q service exists\n", name)
return nil
}
// generateStatsPassword creates a random password.
func generateStatsPassword() string {
allowableChars := []rune("abcdefghijlkmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890")
allowableCharLength := len(allowableChars)
password := []string{}
for i := 0; i < 10; i++ {
char := allowableChars[rand.Intn(allowableCharLength)]
password = append(password, string(char))
}
return strings.Join(password, "")
}
func validateServiceAccount(kClient *kclient.Client, ns string, sa string) error {
// get cluster sccs
sccList, err := kClient.SecurityContextConstraints().List(labels.Everything(), fields.Everything())
if err != nil {
return fmt.Errorf("unable to validate service account %v", err)
}
// get set of sccs applicable to the service account
userInfo := serviceaccount.UserInfo(ns, sa, "")
for _, scc := range sccList.Items {
if admission.ConstraintAppliesTo(&scc, userInfo) {
if scc.AllowHostPorts {
return nil
}
}
}
return fmt.Errorf("unable to validate service account, host ports are forbidden")
}
| burmanm/origin | pkg/cmd/admin/router/router.go | GO | apache-2.0 | 23,695 |
using Autofac;
using DFWin.Core;
namespace DFWin
{
public static class Setup
{
public static IContainer CreateIoC()
{
var containerBuilder = new ContainerBuilder();
containerBuilder.RegisterModule<GameModule>();
containerBuilder.RegisterModule<CoreModule>();
return containerBuilder.Build();
}
}
}
| michaelbradley91/DFWin | DFWin/DFWin/Setup.cs | C# | apache-2.0 | 406 |
import { resolve, join } from 'path'
import merge from 'webpack-merge'
import parts from './webpack/parts'
if (process.env.WDS_HOST === undefined) process.env.WDS_HOST = 'localhost'
if (process.env.WDS_PORT === undefined) process.env.WDS_PORT = 3001
const isVendor = ({ resource }) => resource && resource.indexOf('node_modules') >= 0 && resource.match(/\.js$/)
const PATHS = {
root: resolve(__dirname),
sources: join(__dirname, 'src'),
build: join(__dirname, 'build'),
exclude: [
join(__dirname, 'build'),
/node_modules/,
],
}
const commonConfig = merge([
{
context: PATHS.sources,
output: {
path: PATHS.build,
filename: '[name].js',
publicPath: '/',
},
},
parts.lintStyles({ include: PATHS.sources }),
parts.lintJavascript({ include: PATHS.sources }),
parts.loadHtml(),
parts.loadAssets(),
parts.loadJavascript({ include: PATHS.sources, exclude: PATHS.exclude }),
parts.namedModulesPlugin(),
parts.noErrorsPlugin(),
])
const developmentConfig = merge([
{
output: { pathinfo: true },
},
parts.loadStyles({ include: PATHS.sources, exclude: PATHS.exclude }),
parts.devServer({ host: 'localhost', port: 3001 }),
parts.generateSourceMaps('cheap-module-eval-source-map'),
])
const productionConfig = merge([
{
output: {
chunkFilename: '[name].[chunkhash:8].js',
filename: '[name].[chunkhash:8].js',
},
performance: {
hints: 'warning',
maxEntrypointSize: 100000,
maxAssetSize: 450000,
},
},
parts.cleanPlugin({ path: PATHS.build, root: PATHS.root }),
parts.definePlugin({
'process.env': {
NODE_ENV: JSON.stringify('production'),
},
}),
parts.minifyJavascript(),
parts.extractStyles(),
parts.extractJavascript([
{ name: 'vendor', chunks: [ 'app' ], minChunks: isVendor },
{ name: 'manifest', minChunks: Infinity },
]),
parts.hashedModuleIdsPlugin(),
parts.generateSourceMaps('source-map'),
])
export default (env) => {
process.env.NODE_ENV = env
process.env.BABEL_ENV = env
const isDevelopment = env === 'development'
const config = merge([
parts.page({
title: 'React Skellington Test',
template: 'index.ejs',
entry: {
app: (
isDevelopment ?
parts.hotloader() : []
).concat([ './client/index.js' ]),
},
}),
commonConfig,
isDevelopment ?
developmentConfig : productionConfig,
])
// console.dir(config, { depth: null, colors: true })
return config
}
| buildit/bookit-web | webpack.config.babel.js | JavaScript | apache-2.0 | 2,535 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Analyzer;
import org.elasticsearch.Version;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.mapper.TextFieldMapper;
import java.io.Closeable;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import static java.util.Collections.unmodifiableMap;
/**
*
*/
public class AnalysisService extends AbstractIndexComponent implements Closeable {
private final Map<String, NamedAnalyzer> analyzers;
private final Map<String, TokenizerFactory> tokenizers;
private final Map<String, CharFilterFactory> charFilters;
private final Map<String, TokenFilterFactory> tokenFilters;
private final NamedAnalyzer defaultIndexAnalyzer;
private final NamedAnalyzer defaultSearchAnalyzer;
private final NamedAnalyzer defaultSearchQuoteAnalyzer;
public AnalysisService(IndexSettings indexSettings,
Map<String, AnalyzerProvider<?>> analyzerProviders,
Map<String, TokenizerFactory> tokenizerFactoryFactories,
Map<String, CharFilterFactory> charFilterFactoryFactories,
Map<String, TokenFilterFactory> tokenFilterFactoryFactories) {
super(indexSettings);
this.tokenizers = unmodifiableMap(tokenizerFactoryFactories);
this.charFilters = unmodifiableMap(charFilterFactoryFactories);
this.tokenFilters = unmodifiableMap(tokenFilterFactoryFactories);
analyzerProviders = new HashMap<>(analyzerProviders);
Map<String, NamedAnalyzer> analyzerAliases = new HashMap<>();
Map<String, NamedAnalyzer> analyzers = new HashMap<>();
for (Map.Entry<String, AnalyzerProvider<?>> entry : analyzerProviders.entrySet()) {
processAnalyzerFactory(entry.getKey(), entry.getValue(), analyzerAliases, analyzers);
}
for (Map.Entry<String, NamedAnalyzer> entry : analyzerAliases.entrySet()) {
String key = entry.getKey();
if (analyzers.containsKey(key) &&
("default".equals(key) || "default_search".equals(key) || "default_search_quoted".equals(key)) == false) {
throw new IllegalStateException("already registered analyzer with name: " + key);
} else {
NamedAnalyzer configured = entry.getValue();
analyzers.put(key, configured);
}
}
if (!analyzers.containsKey("default")) {
processAnalyzerFactory("default", new StandardAnalyzerProvider(indexSettings, null, "default", Settings.Builder.EMPTY_SETTINGS),
analyzerAliases, analyzers);
}
if (!analyzers.containsKey("default_search")) {
analyzers.put("default_search", analyzers.get("default"));
}
if (!analyzers.containsKey("default_search_quoted")) {
analyzers.put("default_search_quoted", analyzers.get("default_search"));
}
NamedAnalyzer defaultAnalyzer = analyzers.get("default");
if (defaultAnalyzer == null) {
throw new IllegalArgumentException("no default analyzer configured");
}
if (analyzers.containsKey("default_index")) {
final Version createdVersion = indexSettings.getIndexVersionCreated();
if (createdVersion.onOrAfter(Version.V_5_0_0_alpha1)) {
throw new IllegalArgumentException("setting [index.analysis.analyzer.default_index] is not supported anymore, use [index.analysis.analyzer.default] instead for index [" + index().getName() + "]");
} else {
deprecationLogger.deprecated("setting [index.analysis.analyzer.default_index] is deprecated, use [index.analysis.analyzer.default] instead for index [{}]", index().getName());
}
}
defaultIndexAnalyzer = analyzers.containsKey("default_index") ? analyzers.get("default_index") : defaultAnalyzer;
defaultSearchAnalyzer = analyzers.containsKey("default_search") ? analyzers.get("default_search") : defaultAnalyzer;
defaultSearchQuoteAnalyzer = analyzers.containsKey("default_search_quote") ? analyzers.get("default_search_quote") : defaultSearchAnalyzer;
for (Map.Entry<String, NamedAnalyzer> analyzer : analyzers.entrySet()) {
if (analyzer.getKey().startsWith("_")) {
throw new IllegalArgumentException("analyzer name must not start with '_'. got \"" + analyzer.getKey() + "\"");
}
}
this.analyzers = unmodifiableMap(analyzers);
}
private void processAnalyzerFactory(String name, AnalyzerProvider<?> analyzerFactory, Map<String, NamedAnalyzer> analyzerAliases, Map<String, NamedAnalyzer> analyzers) {
/*
* Lucene defaults positionIncrementGap to 0 in all analyzers but
* Elasticsearch defaults them to 0 only before version 2.0
* and 100 afterwards so we override the positionIncrementGap if it
* doesn't match here.
*/
int overridePositionIncrementGap = TextFieldMapper.Defaults.POSITION_INCREMENT_GAP;
if (analyzerFactory instanceof CustomAnalyzerProvider) {
((CustomAnalyzerProvider) analyzerFactory).build(this);
/*
* Custom analyzers already default to the correct, version
* dependent positionIncrementGap and the user is be able to
* configure the positionIncrementGap directly on the analyzer so
* we disable overriding the positionIncrementGap to preserve the
* user's setting.
*/
overridePositionIncrementGap = Integer.MIN_VALUE;
}
Analyzer analyzerF = analyzerFactory.get();
if (analyzerF == null) {
throw new IllegalArgumentException("analyzer [" + analyzerFactory.name() + "] created null analyzer");
}
NamedAnalyzer analyzer;
if (analyzerF instanceof NamedAnalyzer) {
// if we got a named analyzer back, use it...
analyzer = (NamedAnalyzer) analyzerF;
if (overridePositionIncrementGap >= 0 && analyzer.getPositionIncrementGap(analyzer.name()) != overridePositionIncrementGap) {
// unless the positionIncrementGap needs to be overridden
analyzer = new NamedAnalyzer(analyzer, overridePositionIncrementGap);
}
} else {
analyzer = new NamedAnalyzer(name, analyzerFactory.scope(), analyzerF, overridePositionIncrementGap);
}
if (analyzers.containsKey(name)) {
throw new IllegalStateException("already registered analyzer with name: " + name);
}
analyzers.put(name, analyzer);
String strAliases = this.indexSettings.getSettings().get("index.analysis.analyzer." + analyzerFactory.name() + ".alias");
Set<String> aliases = new HashSet<>();
if (strAliases != null) {
aliases.addAll(Strings.commaDelimitedListToSet(strAliases));
}
aliases.addAll(Arrays.asList(this.indexSettings.getSettings()
.getAsArray("index.analysis.analyzer." + analyzerFactory.name() + ".alias")));
for (String alias : aliases) {
if (analyzerAliases.putIfAbsent(alias, analyzer) != null) {
throw new IllegalStateException("alias [" + alias + "] is already used by [" + analyzerAliases.get(alias).name() + "]");
}
}
}
@Override
public void close() {
for (NamedAnalyzer analyzer : analyzers.values()) {
if (analyzer.scope() == AnalyzerScope.INDEX) {
try {
analyzer.close();
} catch (NullPointerException e) {
// because analyzers are aliased, they might be closed several times
// an NPE is thrown in this case, so ignore....
} catch (Exception e) {
logger.debug("failed to close analyzer {}", analyzer);
}
}
}
}
public NamedAnalyzer analyzer(String name) {
return analyzers.get(name);
}
public NamedAnalyzer defaultIndexAnalyzer() {
return defaultIndexAnalyzer;
}
public NamedAnalyzer defaultSearchAnalyzer() {
return defaultSearchAnalyzer;
}
public NamedAnalyzer defaultSearchQuoteAnalyzer() {
return defaultSearchQuoteAnalyzer;
}
public TokenizerFactory tokenizer(String name) {
return tokenizers.get(name);
}
public CharFilterFactory charFilter(String name) {
return charFilters.get(name);
}
public TokenFilterFactory tokenFilter(String name) {
return tokenFilters.get(name);
}
}
| zkidkid/elasticsearch | core/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java | Java | apache-2.0 | 9,751 |
package nricheton.utils.io;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.UnsupportedEncodingException;
/**
* This input streams removes invalid XML Characters from the stream. As a
* result you should be able to read invalid documents.
* <p>
* Feel free to submit patchs on this class.
*
* @author Nicolas Richeton
*/
public class XmlCleanupInputStream extends InputStream {
private static final String ENCODING = "UTF-8";
private InputStream originalIS;
private BufferedReader originalReader;
private byte[] buffer = new byte[0];
private int position = 0;
UnsupportedEncodingException error = null;
public XmlCleanupInputStream(InputStream is) {
originalIS = is;
try {
originalReader = new BufferedReader(new InputStreamReader(originalIS, ENCODING));
} catch (UnsupportedEncodingException e) {
error = e;
}
}
@Override
public int read() throws IOException {
if (error != null) {
throw new IOException(error);
}
if (buffer == null) {
return -1;
}
while (position >= buffer.length) {
String temp = originalReader.readLine();
String temp2 = null;
if (temp != null) {
temp2 = temp.replaceAll("[^\\x20-\\x7e]", "");
buffer = temp2.getBytes(ENCODING);
position = 0;
} else {
buffer = null;
break;
}
}
if (buffer == null) {
return -1;
}
int result = buffer[position];
position++;
return result;
}
}
| nricheton/java-utils | XmlCleanupInputStream.java | Java | apache-2.0 | 1,495 |
"use strict";
jest.autoMockOff();
const MockDate = require("mockdate");
const DataStore = require("../../src/resources/DataStore");
const FIELDS = ["foo", "bar", "baz"];
describe("DataStore construction", () => {
it("tests constructor fails", () => {
const check = (fields) => {
try {
new DataStore(fields);
expect(false).toBe(true);
} catch (Exception) {
expect(true).toBe(true);
}
};
check(null);
check(undefined);
check("this is not an array");
check(["col1", "time", "col2"]);
check(["TimE", "col1"]);
check(["col1", "time_offset", "col2"]);
check(["Time_OffseT", "col2"]);
check(["col1", "all", "col2"]);
check(["ALL", "none"]);
check(["fine", undefined]);
check(["fine", null]);
check(["fine", ""]);
check(["fine", 5]);
});
it("tests constructor", () => {
const batch = new DataStore(FIELDS);
const have = batch.fields();
expect(have.length).toBe(FIELDS.length);
expect(batch instanceof DataStore).toBe(true);
for (let i = 0; i < FIELDS.length; i++) {
expect(have[i]).toBe(FIELDS[i]);
}
});
it("tests constructor deep copies", () => {
const temp = ["a", "b", "c"];
const batch = new DataStore(temp);
let have = batch.fields();
expect(have.length).toBe(temp.length);
expect(batch instanceof DataStore).toBe(true);
for (let i = 0; i < temp.length; i++) {
expect(have[i]).toBe(temp[i]);
}
temp.push("d");
have = batch.fields();
expect(have.length).toBe(temp.length - 1);
});
});
describe("adding to batches", () => {
it("tests full row", () => {
const check = (have, wantTime, wantData) => {
expect(have.time).toBe(wantTime);
expect(have.foo).toBe(wantData.foo);
expect(have.bar).toBe(wantData.bar);
expect(have.baz).toBe(wantData.baz);
};
const batch = new DataStore(FIELDS);
const row1 = {foo: 0.0, bar: 2.0, baz: 3.0};
batch.add(0, row1);
let have = batch.rows();
expect(have.length).toBe(1);
check(have[0], 0, row1);
const row2 = {foo: 4.0, bar: 5.0, baz: 6.0};
batch.add(1000, row2);
have = batch.rows();
expect(have.length).toBe(2);
check(have[0], 0, row1);
check(have[1], 1000, row2);
});
it("tests sparse row", () => {
const batch = new DataStore(FIELDS);
const row1 = {foo: 1.0, baz: 3.0};
batch.add(0, row1);
let have = batch.rows();
expect(have.length).toBe(1);
expect(have[0].time).toBe(0);
expect(have[0].foo).toBe(row1.foo);
expect(have[0].bar).toBe(null);
expect(have[0].baz).toBe(row1.baz);
const row2 = {foo: 1.0};
batch.add(1000, row2);
have = batch.rows();
expect(have.length).toBe(2);
expect(have[1].time).toBe(1000);
expect(have[1].foo).toBe(row1.foo);
expect(have[1].bar).toBe(null);
expect(have[1].baz).toBe(null);
});
it("tests bad data errors", () => {
const check = (badRow) => {
try {
batch.add(0, badRow);
expect(false).toBe(true);
} catch (Exception) {
expect(batch.rows().length).toBe(0);
}
};
const batch = new DataStore(FIELDS);
check({wrong: 5.0});
check({foo: 1.0, bar: 2.0, baz: 3.0, bad: 5.0});
check(null);
check(undefined);
});
});
describe("add now", () => {
const check = (have, wantTime, wantData) => {
expect(have.time).toBe(wantTime);
expect(have.foo).toBe(wantData.foo);
expect(have.bar).toBe(wantData.bar);
expect(have.baz).toBe(wantData.baz);
};
it("tests functionality", () => {
let now = 1000;
MockDate.set(now);
const batch = new DataStore(FIELDS);
const row1 = {foo: 1.0, bar: 2.0, baz: 3.0};
batch.addNow(row1);
let have = batch.rows();
expect(have.length).toBe(1);
check(have[0], now, row1);
now = 2000;
MockDate.set(now);
const row2 = {foo: 4.0, bar: 5.0, baz: 6.0};
batch.addNow(row2);
have = batch.rows();
expect(have.length).toBe(2);
check(have[1], now, row2);
});
});
describe("adding too many to batch", () => {
const batch = new DataStore(FIELDS);
it("tests size function", () => {
for (let i = 0; i < 166; i++) {
batch.add(i, {foo: i, bar: i, baz: i});
expect(batch.size()).toBe((i + 1) * 3);
}
});
it("tests than > 500 fails", () => {
try {
batch.add(167, {foo: 167});
expect(false).toBe(true);
} catch (Exception) {
expect(batch.size()).toBe(498);
}
});
});
describe("tests reset", () => {
const batch = new DataStore(FIELDS);
it("tests reset removes all rows", () => {
for (let i = 0; i < 100; i++) {
batch.add(i, {foo: i, bar: i, baz: i});
expect(batch.size()).toBe((i + 1) * 3);
}
batch.reset();
expect(batch.size()).toBe(0);
expect(batch.rows().length).toBe(0);
batch.add(0, {foo: 0, bar: 0, baz: 0});
expect(batch.rows().length).toBe(1);
});
});
describe("tests snapshot", () => {
const batch = new DataStore(FIELDS);
for (let i = 0; i < 5; i++) {
batch.add(i, {foo: i, bar: i, baz: i});
}
it("tests snapshot is a copy", () => {
const batch2 = batch.snapshot();
expect(batch2 instanceof DataStore).toBe(true);
expect(batch2.size()).toBe(batch.size());
expect(batch2.rows().length).toBe(batch.rows().length);
for (let i = 0; i < batch2.fields().length; i++ ) {
expect(batch2.fields()[i]).toBe(batch.fields()[i]);
}
for (let i = 0; i < batch2.rows().length; i++ ) {
const t2 = batch2.rows()[i];
const t1 = batch.rows()[i];
expect(Object.keys(t2).length).toBe(Object.keys(t1).length);
Object.keys(t2).forEach((k) => {
expect(t2[k]).toBe(t1[k]);
});
}
});
it("tests snapshot is deep copy", () => {
const batch2 = batch.snapshot();
expect(batch2 instanceof DataStore).toBe(true);
expect(batch2.size()).toBe(batch.size());
expect(batch2.rows().length).toBe(batch.rows().length);
batch.add(6, {foo: 6});
expect(batch2.size()).toBe(batch.size() - 3);
expect(batch2.rows().length).toBe(batch.rows().length - 1);
});
});
| iobeam/iobeam-client-node | tests/resources/test_DataStore.js | JavaScript | apache-2.0 | 6,869 |
/**
* Copyright (c) 2014 SQUARESPACE, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.squarespace.less.model;
import static com.squarespace.less.core.LessUtils.safeEquals;
import static com.squarespace.less.model.NodeType.UNICODE_RANGE;
import com.squarespace.less.core.Buffer;
/**
* A wrapper around a CSS Unicode character range.
*/
public class UnicodeRange extends BaseNode {
/**
* String representing the Unicode character range.
*/
protected final String value;
/**
* Constructs a Unicode range node wrapping the given string.
* @param value
*/
public UnicodeRange(String value) {
this.value = value;
}
/**
* Returns the Unicode range string.
*/
public String value() {
return value;
}
/**
* See {@link Node#type()}
*/
@Override
public NodeType type() {
return UNICODE_RANGE;
}
/**
* See {@link Node#repr(Buffer)}
*/
@Override
public void repr(Buffer buf) {
buf.append(value);
}
/**
* See {@link Node#modelRepr(Buffer)}
*/
@Override
public void modelRepr(Buffer buf) {
typeRepr(buf);
posRepr(buf);
buf.append('(').append(value).append(')');
}
@Override
public boolean equals(Object obj) {
return (obj instanceof UnicodeRange) ? safeEquals(value, ((UnicodeRange)obj).value) : false;
}
@Override
public int hashCode() {
return hashCode == 0 ? buildHashCode(value) : hashCode;
}
}
| phensley/less-compiler | less-core/src/main/java/com/squarespace/less/model/UnicodeRange.java | Java | apache-2.0 | 1,955 |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Source file annotation for coverage.py."""
import os
import re
from coverage.files import flat_rootname
from coverage.misc import ensure_dir, isolate_module
from coverage.report import get_analysis_to_report
os = isolate_module(os)
class AnnotateReporter:
"""Generate annotated source files showing line coverage.
This reporter creates annotated copies of the measured source files. Each
.py file is copied as a .py,cover file, with a left-hand margin annotating
each line::
> def h(x):
- if 0: #pragma: no cover
- pass
> if x == 1:
! a = 1
> else:
> a = 2
> h(2)
Executed lines use '>', lines not executed use '!', lines excluded from
consideration use '-'.
"""
def __init__(self, coverage):
self.coverage = coverage
self.config = self.coverage.config
self.directory = None
blank_re = re.compile(r"\s*(#|$)")
else_re = re.compile(r"\s*else\s*:\s*(#|$)")
def report(self, morfs, directory=None):
"""Run the report.
See `coverage.report()` for arguments.
"""
self.directory = directory
self.coverage.get_data()
for fr, analysis in get_analysis_to_report(self.coverage, morfs):
self.annotate_file(fr, analysis)
def annotate_file(self, fr, analysis):
"""Annotate a single file.
`fr` is the FileReporter for the file to annotate.
"""
statements = sorted(analysis.statements)
missing = sorted(analysis.missing)
excluded = sorted(analysis.excluded)
if self.directory:
ensure_dir(self.directory)
dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename()))
if dest_file.endswith("_py"):
dest_file = dest_file[:-3] + ".py"
dest_file += ",cover"
else:
dest_file = fr.filename + ",cover"
with open(dest_file, 'w', encoding='utf8') as dest:
i = 0
j = 0
covered = True
source = fr.source()
for lineno, line in enumerate(source.splitlines(True), start=1):
while i < len(statements) and statements[i] < lineno:
i += 1
while j < len(missing) and missing[j] < lineno:
j += 1
if i < len(statements) and statements[i] == lineno:
covered = j >= len(missing) or missing[j] > lineno
if self.blank_re.match(line):
dest.write(' ')
elif self.else_re.match(line):
# Special logic for lines containing only 'else:'.
if i >= len(statements) and j >= len(missing):
dest.write('! ')
elif i >= len(statements) or j >= len(missing):
dest.write('> ')
elif statements[i] == missing[j]:
dest.write('! ')
else:
dest.write('> ')
elif lineno in excluded:
dest.write('- ')
elif covered:
dest.write('> ')
else:
dest.write('! ')
dest.write(line)
| sonntagsgesicht/regtest | .aux/venv/lib/python3.9/site-packages/coverage/annotate.py | Python | apache-2.0 | 3,528 |
/*
* Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.simpleemailv2.model;
import java.io.Serializable;
import javax.annotation.Generated;
/**
* <p>
* A list of suppressed email addresses.
* </p>
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/sesv2-2019-09-27/ListSuppressedDestinations" target="_top">AWS
* API Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class ListSuppressedDestinationsResult extends com.amazonaws.AmazonWebServiceResult<com.amazonaws.ResponseMetadata> implements Serializable, Cloneable {
/**
* <p>
* A list of summaries, each containing a summary for a suppressed email destination.
* </p>
*/
private java.util.List<SuppressedDestinationSummary> suppressedDestinationSummaries;
/**
* <p>
* A token that indicates that there are additional email addresses on the suppression list for your account. To
* view additional suppressed addresses, issue another request to <code>ListSuppressedDestinations</code>, and pass
* this token in the <code>NextToken</code> parameter.
* </p>
*/
private String nextToken;
/**
* <p>
* A list of summaries, each containing a summary for a suppressed email destination.
* </p>
*
* @return A list of summaries, each containing a summary for a suppressed email destination.
*/
public java.util.List<SuppressedDestinationSummary> getSuppressedDestinationSummaries() {
return suppressedDestinationSummaries;
}
/**
* <p>
* A list of summaries, each containing a summary for a suppressed email destination.
* </p>
*
* @param suppressedDestinationSummaries
* A list of summaries, each containing a summary for a suppressed email destination.
*/
public void setSuppressedDestinationSummaries(java.util.Collection<SuppressedDestinationSummary> suppressedDestinationSummaries) {
if (suppressedDestinationSummaries == null) {
this.suppressedDestinationSummaries = null;
return;
}
this.suppressedDestinationSummaries = new java.util.ArrayList<SuppressedDestinationSummary>(suppressedDestinationSummaries);
}
/**
* <p>
* A list of summaries, each containing a summary for a suppressed email destination.
* </p>
* <p>
* <b>NOTE:</b> This method appends the values to the existing list (if any). Use
* {@link #setSuppressedDestinationSummaries(java.util.Collection)} or
* {@link #withSuppressedDestinationSummaries(java.util.Collection)} if you want to override the existing values.
* </p>
*
* @param suppressedDestinationSummaries
* A list of summaries, each containing a summary for a suppressed email destination.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ListSuppressedDestinationsResult withSuppressedDestinationSummaries(SuppressedDestinationSummary... suppressedDestinationSummaries) {
if (this.suppressedDestinationSummaries == null) {
setSuppressedDestinationSummaries(new java.util.ArrayList<SuppressedDestinationSummary>(suppressedDestinationSummaries.length));
}
for (SuppressedDestinationSummary ele : suppressedDestinationSummaries) {
this.suppressedDestinationSummaries.add(ele);
}
return this;
}
/**
* <p>
* A list of summaries, each containing a summary for a suppressed email destination.
* </p>
*
* @param suppressedDestinationSummaries
* A list of summaries, each containing a summary for a suppressed email destination.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ListSuppressedDestinationsResult withSuppressedDestinationSummaries(java.util.Collection<SuppressedDestinationSummary> suppressedDestinationSummaries) {
setSuppressedDestinationSummaries(suppressedDestinationSummaries);
return this;
}
/**
* <p>
* A token that indicates that there are additional email addresses on the suppression list for your account. To
* view additional suppressed addresses, issue another request to <code>ListSuppressedDestinations</code>, and pass
* this token in the <code>NextToken</code> parameter.
* </p>
*
* @param nextToken
* A token that indicates that there are additional email addresses on the suppression list for your account.
* To view additional suppressed addresses, issue another request to <code>ListSuppressedDestinations</code>,
* and pass this token in the <code>NextToken</code> parameter.
*/
public void setNextToken(String nextToken) {
this.nextToken = nextToken;
}
/**
* <p>
* A token that indicates that there are additional email addresses on the suppression list for your account. To
* view additional suppressed addresses, issue another request to <code>ListSuppressedDestinations</code>, and pass
* this token in the <code>NextToken</code> parameter.
* </p>
*
* @return A token that indicates that there are additional email addresses on the suppression list for your
* account. To view additional suppressed addresses, issue another request to
* <code>ListSuppressedDestinations</code>, and pass this token in the <code>NextToken</code> parameter.
*/
public String getNextToken() {
return this.nextToken;
}
/**
* <p>
* A token that indicates that there are additional email addresses on the suppression list for your account. To
* view additional suppressed addresses, issue another request to <code>ListSuppressedDestinations</code>, and pass
* this token in the <code>NextToken</code> parameter.
* </p>
*
* @param nextToken
* A token that indicates that there are additional email addresses on the suppression list for your account.
* To view additional suppressed addresses, issue another request to <code>ListSuppressedDestinations</code>,
* and pass this token in the <code>NextToken</code> parameter.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ListSuppressedDestinationsResult withNextToken(String nextToken) {
setNextToken(nextToken);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getSuppressedDestinationSummaries() != null)
sb.append("SuppressedDestinationSummaries: ").append(getSuppressedDestinationSummaries()).append(",");
if (getNextToken() != null)
sb.append("NextToken: ").append(getNextToken());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof ListSuppressedDestinationsResult == false)
return false;
ListSuppressedDestinationsResult other = (ListSuppressedDestinationsResult) obj;
if (other.getSuppressedDestinationSummaries() == null ^ this.getSuppressedDestinationSummaries() == null)
return false;
if (other.getSuppressedDestinationSummaries() != null
&& other.getSuppressedDestinationSummaries().equals(this.getSuppressedDestinationSummaries()) == false)
return false;
if (other.getNextToken() == null ^ this.getNextToken() == null)
return false;
if (other.getNextToken() != null && other.getNextToken().equals(this.getNextToken()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getSuppressedDestinationSummaries() == null) ? 0 : getSuppressedDestinationSummaries().hashCode());
hashCode = prime * hashCode + ((getNextToken() == null) ? 0 : getNextToken().hashCode());
return hashCode;
}
@Override
public ListSuppressedDestinationsResult clone() {
try {
return (ListSuppressedDestinationsResult) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
}
| aws/aws-sdk-java | aws-java-sdk-sesv2/src/main/java/com/amazonaws/services/simpleemailv2/model/ListSuppressedDestinationsResult.java | Java | apache-2.0 | 9,507 |