code
stringlengths
3
1.05M
repo_name
stringlengths
4
116
path
stringlengths
4
991
language
stringclasses
9 values
license
stringclasses
15 values
size
int32
3
1.05M
/* * Copyright 2017-2021 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.lettuce.core.cluster.api.async; import java.util.List; import java.util.Set; import io.lettuce.core.GeoAddArgs; import io.lettuce.core.GeoArgs; import io.lettuce.core.GeoCoordinates; import io.lettuce.core.GeoRadiusStoreArgs; import io.lettuce.core.GeoSearch; import io.lettuce.core.GeoValue; import io.lettuce.core.GeoWithin; import io.lettuce.core.Value; /** * Asynchronous executed commands on a node selection for the Geo-API. * * @author Mark Paluch * @since 4.0 * @generated by io.lettuce.apigenerator.CreateAsyncNodeSelectionClusterApi */ public interface NodeSelectionGeoAsyncCommands<K, V> { /** * Single geo add. * * @param key the key of the geo set. * @param longitude the longitude coordinate according to WGS84. * @param latitude the latitude coordinate according to WGS84. * @param member the member to add. * @return Long integer-reply the number of elements that were added to the set. */ AsyncExecutions<Long> geoadd(K key, double longitude, double latitude, V member); /** * Single geo add. * * @param key the key of the geo set. * @param longitude the longitude coordinate according to WGS84. * @param latitude the latitude coordinate according to WGS84. * @param member the member to add. * @param args additional arguments. * @return Long integer-reply the number of elements that were added to the set. * @since 6.1 */ AsyncExecutions<Long> geoadd(K key, double longitude, double latitude, V member, GeoAddArgs args); /** * Multi geo add. * * @param key the key of the geo set. * @param lngLatMember triplets of double longitude, double latitude and V member. * @return Long integer-reply the number of elements that were added to the set. */ AsyncExecutions<Long> geoadd(K key, Object... lngLatMember); /** * Multi geo add. * * @param key the key of the geo set. * @param values {@link io.lettuce.core.GeoValue} values to add. * @return Long integer-reply the number of elements that were added to the set. * @since 6.1 */ AsyncExecutions<Long> geoadd(K key, GeoValue<V>... values); /** * Multi geo add. * * @param key the key of the geo set. * @param args additional arguments. * @param lngLatMember triplets of double longitude, double latitude and V member. * @return Long integer-reply the number of elements that were added to the set. * @since 6.1 */ AsyncExecutions<Long> geoadd(K key, GeoAddArgs args, Object... lngLatMember); /** * Multi geo add. * * @param key the key of the geo set. * @param args additional arguments. * @param values {@link io.lettuce.core.GeoValue} values to add. * @return Long integer-reply the number of elements that were added to the set. * @since 6.1 */ AsyncExecutions<Long> geoadd(K key, GeoAddArgs args, GeoValue<V>... values); /** * Retrieve distance between points {@code from} and {@code to}. If one or more elements are missing {@code null} is * returned. Default in meters by, otherwise according to {@code unit} * * @param key the key of the geo set. * @param from from member. * @param to to member. * @param unit distance unit. * @return distance between points {@code from} and {@code to}. If one or more elements are missing {@code null} is * returned. */ AsyncExecutions<Double> geodist(K key, V from, V to, GeoArgs.Unit unit); /** * Retrieve Geohash strings representing the position of one or more elements in a sorted set value representing a * geospatial index. * * @param key the key of the geo set. * @param members the members. * @return bulk reply Geohash strings in the order of {@code members}. Returns {@code null} if a member is not found. */ AsyncExecutions<List<Value<String>>> geohash(K key, V... members); /** * Get geo coordinates for the {@code members}. * * @param key the key of the geo set. * @param members the members. * @return a list of {@link GeoCoordinates}s representing the x,y position of each element specified in the arguments. For * missing elements {@code null} is returned. */ AsyncExecutions<List<GeoCoordinates>> geopos(K key, V... members); /** * Retrieve members selected by distance with the center of {@code longitude} and {@code latitude}. * * @param key the key of the geo set. * @param longitude the longitude coordinate according to WGS84. * @param latitude the latitude coordinate according to WGS84. * @param distance radius distance. * @param unit distance unit. * @return bulk reply. */ AsyncExecutions<Set<V>> georadius(K key, double longitude, double latitude, double distance, GeoArgs.Unit unit); /** * Retrieve members selected by distance with the center of {@code longitude} and {@code latitude}. * * @param key the key of the geo set. * @param longitude the longitude coordinate according to WGS84. * @param latitude the latitude coordinate according to WGS84. * @param distance radius distance. * @param unit distance unit. * @param geoArgs args to control the result. * @return nested multi-bulk reply. The {@link GeoWithin} contains only fields which were requested by {@link GeoArgs}. */ AsyncExecutions<List<GeoWithin<V>>> georadius(K key, double longitude, double latitude, double distance, GeoArgs.Unit unit, GeoArgs geoArgs); /** * Perform a {@link #georadius(Object, double, double, double, GeoArgs.Unit, GeoArgs)} query and store the results in a * sorted set. * * @param key the key of the geo set. * @param longitude the longitude coordinate according to WGS84. * @param latitude the latitude coordinate according to WGS84. * @param distance radius distance. * @param unit distance unit. * @param geoRadiusStoreArgs args to store either the resulting elements with their distance or the resulting elements with * their locations a sorted set. * @return Long integer-reply the number of elements in the result. */ AsyncExecutions<Long> georadius(K key, double longitude, double latitude, double distance, GeoArgs.Unit unit, GeoRadiusStoreArgs<K> geoRadiusStoreArgs); /** * Retrieve members selected by distance with the center of {@code member}. The member itself is always contained in the * results. * * @param key the key of the geo set. * @param member reference member. * @param distance radius distance. * @param unit distance unit. * @return set of members. */ AsyncExecutions<Set<V>> georadiusbymember(K key, V member, double distance, GeoArgs.Unit unit); /** * Retrieve members selected by distance with the center of {@code member}. The member itself is always contained in the * results. * * @param key the key of the geo set. * @param member reference member. * @param distance radius distance. * @param unit distance unit. * @param geoArgs args to control the result. * @return nested multi-bulk reply. The {@link GeoWithin} contains only fields which were requested by {@link GeoArgs}. */ AsyncExecutions<List<GeoWithin<V>>> georadiusbymember(K key, V member, double distance, GeoArgs.Unit unit, GeoArgs geoArgs); /** * Perform a {@link #georadiusbymember(Object, Object, double, GeoArgs.Unit, GeoArgs)} query and store the results in a * sorted set. * * @param key the key of the geo set. * @param member reference member. * @param distance radius distance. * @param unit distance unit. * @param geoRadiusStoreArgs args to store either the resulting elements with their distance or the resulting elements with * their locations a sorted set. * @return Long integer-reply the number of elements in the result. */ AsyncExecutions<Long> georadiusbymember(K key, V member, double distance, GeoArgs.Unit unit, GeoRadiusStoreArgs<K> geoRadiusStoreArgs); /** * Retrieve members selected by distance with the center of {@code reference} the search {@code predicate}. * Use {@link GeoSearch} to create reference and predicate objects. * * @param key the key of the geo set. * @param reference the reference member or longitude/latitude coordinates. * @param predicate the bounding box or radius to search in. * @return bulk reply. * @since 6.1 */ AsyncExecutions<Set<V>> geosearch(K key, GeoSearch.GeoRef<K> reference, GeoSearch.GeoPredicate predicate); /** * Retrieve members selected by distance with the center of {@code reference} the search {@code predicate}. * Use {@link GeoSearch} to create reference and predicate objects. * * @param key the key of the geo set. * @param reference the reference member or longitude/latitude coordinates. * @param predicate the bounding box or radius to search in. * @param geoArgs args to control the result. * @return nested multi-bulk reply. The {@link GeoWithin} contains only fields which were requested by {@link GeoArgs}. * @since 6.1 */ AsyncExecutions<List<GeoWithin<V>>> geosearch(K key, GeoSearch.GeoRef<K> reference, GeoSearch.GeoPredicate predicate, GeoArgs geoArgs); /** * Perform a {@link #geosearch(Object, GeoSearch.GeoRef, GeoSearch.GeoPredicate, GeoArgs)} query and store the results in a * sorted set. * * @param destination the destination where to store results. * @param key the key of the geo set. * @param reference the reference member or longitude/latitude coordinates. * @param predicate the bounding box or radius to search in. * @param geoArgs args to control the result. * @param storeDist stores the items in a sorted set populated with their distance from the center of the circle or box, as a floating-point number, in the same unit specified for that shape. * @return Long integer-reply the number of elements in the result. * @since 6.1 */ AsyncExecutions<Long> geosearchstore(K destination, K key, GeoSearch.GeoRef<K> reference, GeoSearch.GeoPredicate predicate, GeoArgs geoArgs, boolean storeDist); }
lettuce-io/lettuce-core
src/main/java/io/lettuce/core/cluster/api/async/NodeSelectionGeoAsyncCommands.java
Java
apache-2.0
11,078
export const removeWeight = (element) => { try { element.removeAttribute('data-weight'); } catch (e) { // We are now in IE11 territory if (!!element) { element.setAttribute('data-weight', null); } } };
OpenConext/OpenConext-engineblock
theme/base/javascripts/wayf/search/removeWeight.js
JavaScript
apache-2.0
230
using MO.Core.Forms.Common; using System; using System.ComponentModel; using System.Drawing; using System.Windows.Forms; namespace MO.Core.Forms.Controls { //============================================================ // <T>颜色选取控件<T> //============================================================ public partial class QColorPicker : QControl { // 最大高度 protected int _heighMax = 20; // 颜色宽度 protected int _colorWidth = 18; // 颜色 protected Color _selectColor = Color.Black; //============================================================ // <T>构造颜色选取控件<T> //============================================================ public QColorPicker() { InitializeComponent(); } //============================================================ // <T>设置颜色<T> //============================================================ protected void InnerSetSize(int width, int height) { // 设置尺寸 Width = width; Height = height = _heighMax; // 设置信息 int contentLeft = _borderOuter.Left.Width + _borderInner.Left.Width; int contentRight = _borderOuter.Bottom.Width + _borderInner.Bottom.Width; int contentTop = _borderOuter.Top.Width + _borderInner.Top.Width; int contentBottom = _borderOuter.Bottom.Width + _borderInner.Bottom.Width; // 设置容器 pnlContanier.SetBounds( contentLeft, contentTop, width - contentLeft - contentRight, height - contentTop - contentBottom); // 设置内容框 txtValue.SetBounds( contentLeft, contentTop, width - _colorWidth - contentLeft - contentRight - 2, height - contentTop - contentBottom); // 设置颜色框 pnlColor.SetBounds( width - _colorWidth - contentLeft - contentRight, contentTop - 2, _colorWidth, height - contentTop - contentBottom); Invalidate(); } //============================================================ // <T>大小</T> //============================================================ [Browsable(true)] public new Size Size { get { return base.Size; } set { InnerSetSize(value.Width, value.Height); } } //============================================================ // <T>刷新颜色处理。</T> //============================================================ public void RefreshColor(){ pnlColor.BackColor = _selectColor; txtValue.Text = RColor.FormatHex(_selectColor.ToArgb()); } //============================================================ // <T>获取或获得选中颜色。</T> //============================================================ [Browsable(true)] [DefaultValue(typeof(Color), "Color.Black")] public Color SelectColor { get { return _selectColor; } set { _selectColor = value; RefreshColor(); } } //============================================================ // <T>获取或获得选中颜色文本。</T> //============================================================ [Browsable(true)] [DefaultValue("FF000000")] public string SelectColorText { get { return RColor.FormatHex(_selectColor.ToArgb()); } set { _selectColor = RColor.ParseHexColor(value); RefreshColor(); } } //============================================================ // <T>获取或获得选中颜色内容。</T> //============================================================ [Browsable(true)] [DefaultValue(-16777216)] public int SelectColorValue { get { return _selectColor.ToArgb(); } set { _selectColor = Color.FromArgb(value); RefreshColor(); } } //============================================================ // <T>调整下拉框的高度</T> //============================================================ private void QColorPicker_Resize(object sender, EventArgs e) { InnerSetSize(Width, Height); } //============================================================ // <T>鼠标点击事件处理。</T> //============================================================ private void pnlColor_Click(object sender, EventArgs e) { dlgColor.Color = _selectColor; DialogResult resultCd = dlgColor.ShowDialog(); if (resultCd == DialogResult.OK) { _selectColor = dlgColor.Color; RefreshColor(); } } //============================================================ // <T>文本变更。</T> //============================================================ private void txtValue_Leave(object sender, EventArgs e) { _selectColor = RColor.ParseHexColor(txtValue.Text); RefreshColor(); } } }
favedit/MoCross
Tools/1 - Common/MoCore/Forms/Controls/QColorPicker.cs
C#
apache-2.0
5,282
/** * Copyright (C) 2015 The Gravitee team (http://gravitee.io) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.gravitee.gateway.services.sync.cache; import com.hazelcast.core.HazelcastInstance; import org.springframework.beans.factory.annotation.Autowired; import java.util.Map; /** * @author David BRASSELY (david.brassely at graviteesource.com) * @author GraviteeSource Team */ public final class CacheManager { @Autowired private HazelcastInstance hzInstance; public <K, V> Map<K, V> getCache(String name) { return hzInstance.getMap(name); } }
gravitee-io/gateway
gravitee-gateway-services/gravitee-gateway-services-sync/src/main/java/io/gravitee/gateway/services/sync/cache/CacheManager.java
Java
apache-2.0
1,110
/* * Copyright 2017 Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <folly/detail/CacheLocality.h> #include <folly/portability/GTest.h> #include <sched.h> #include <memory> #include <thread> #include <type_traits> #include <unordered_map> #include <glog/logging.h> using namespace folly::detail; /// This is the relevant nodes from a production box's sysfs tree. If you /// think this map is ugly you should see the version of this test that /// used a real directory tree. To reduce the chance of testing error /// I haven't tried to remove the common prefix static std::unordered_map<std::string, std::string> fakeSysfsTree = { {"/sys/devices/system/cpu/cpu0/cache/index0/shared_cpu_list", "0,17"}, {"/sys/devices/system/cpu/cpu0/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu0/cache/index1/shared_cpu_list", "0,17"}, {"/sys/devices/system/cpu/cpu0/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu0/cache/index2/shared_cpu_list", "0,17"}, {"/sys/devices/system/cpu/cpu0/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu0/cache/index3/shared_cpu_list", "0-8,17-23"}, {"/sys/devices/system/cpu/cpu0/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu1/cache/index0/shared_cpu_list", "1,18"}, {"/sys/devices/system/cpu/cpu1/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu1/cache/index1/shared_cpu_list", "1,18"}, {"/sys/devices/system/cpu/cpu1/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu1/cache/index2/shared_cpu_list", "1,18"}, {"/sys/devices/system/cpu/cpu1/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu1/cache/index3/shared_cpu_list", "0-8,17-23"}, {"/sys/devices/system/cpu/cpu1/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu2/cache/index0/shared_cpu_list", "2,19"}, {"/sys/devices/system/cpu/cpu2/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu2/cache/index1/shared_cpu_list", "2,19"}, {"/sys/devices/system/cpu/cpu2/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu2/cache/index2/shared_cpu_list", "2,19"}, {"/sys/devices/system/cpu/cpu2/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu2/cache/index3/shared_cpu_list", "0-8,17-23"}, {"/sys/devices/system/cpu/cpu2/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu3/cache/index0/shared_cpu_list", "3,20"}, {"/sys/devices/system/cpu/cpu3/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu3/cache/index1/shared_cpu_list", "3,20"}, {"/sys/devices/system/cpu/cpu3/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu3/cache/index2/shared_cpu_list", "3,20"}, {"/sys/devices/system/cpu/cpu3/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu3/cache/index3/shared_cpu_list", "0-8,17-23"}, {"/sys/devices/system/cpu/cpu3/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu4/cache/index0/shared_cpu_list", "4,21"}, {"/sys/devices/system/cpu/cpu4/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu4/cache/index1/shared_cpu_list", "4,21"}, {"/sys/devices/system/cpu/cpu4/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu4/cache/index2/shared_cpu_list", "4,21"}, {"/sys/devices/system/cpu/cpu4/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu4/cache/index3/shared_cpu_list", "0-8,17-23"}, {"/sys/devices/system/cpu/cpu4/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu5/cache/index0/shared_cpu_list", "5-6"}, {"/sys/devices/system/cpu/cpu5/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu5/cache/index1/shared_cpu_list", "5-6"}, {"/sys/devices/system/cpu/cpu5/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu5/cache/index2/shared_cpu_list", "5-6"}, {"/sys/devices/system/cpu/cpu5/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu5/cache/index3/shared_cpu_list", "0-8,17-23"}, {"/sys/devices/system/cpu/cpu5/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu6/cache/index0/shared_cpu_list", "5-6"}, {"/sys/devices/system/cpu/cpu6/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu6/cache/index1/shared_cpu_list", "5-6"}, {"/sys/devices/system/cpu/cpu6/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu6/cache/index2/shared_cpu_list", "5-6"}, {"/sys/devices/system/cpu/cpu6/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu6/cache/index3/shared_cpu_list", "0-8,17-23"}, {"/sys/devices/system/cpu/cpu6/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu7/cache/index0/shared_cpu_list", "7,22"}, {"/sys/devices/system/cpu/cpu7/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu7/cache/index1/shared_cpu_list", "7,22"}, {"/sys/devices/system/cpu/cpu7/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu7/cache/index2/shared_cpu_list", "7,22"}, {"/sys/devices/system/cpu/cpu7/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu7/cache/index3/shared_cpu_list", "0-8,17-23"}, {"/sys/devices/system/cpu/cpu7/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu8/cache/index0/shared_cpu_list", "8,23"}, {"/sys/devices/system/cpu/cpu8/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu8/cache/index1/shared_cpu_list", "8,23"}, {"/sys/devices/system/cpu/cpu8/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu8/cache/index2/shared_cpu_list", "8,23"}, {"/sys/devices/system/cpu/cpu8/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu8/cache/index3/shared_cpu_list", "0-8,17-23"}, {"/sys/devices/system/cpu/cpu8/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu9/cache/index0/shared_cpu_list", "9,24"}, {"/sys/devices/system/cpu/cpu9/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu9/cache/index1/shared_cpu_list", "9,24"}, {"/sys/devices/system/cpu/cpu9/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu9/cache/index2/shared_cpu_list", "9,24"}, {"/sys/devices/system/cpu/cpu9/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu9/cache/index3/shared_cpu_list", "9-16,24-31"}, {"/sys/devices/system/cpu/cpu9/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu10/cache/index0/shared_cpu_list", "10,25"}, {"/sys/devices/system/cpu/cpu10/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu10/cache/index1/shared_cpu_list", "10,25"}, {"/sys/devices/system/cpu/cpu10/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu10/cache/index2/shared_cpu_list", "10,25"}, {"/sys/devices/system/cpu/cpu10/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu10/cache/index3/shared_cpu_list", "9-16,24-31"}, {"/sys/devices/system/cpu/cpu10/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu11/cache/index0/shared_cpu_list", "11,26"}, {"/sys/devices/system/cpu/cpu11/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu11/cache/index1/shared_cpu_list", "11,26"}, {"/sys/devices/system/cpu/cpu11/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu11/cache/index2/shared_cpu_list", "11,26"}, {"/sys/devices/system/cpu/cpu11/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu11/cache/index3/shared_cpu_list", "9-16,24-31"}, {"/sys/devices/system/cpu/cpu11/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu12/cache/index0/shared_cpu_list", "12,27"}, {"/sys/devices/system/cpu/cpu12/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu12/cache/index1/shared_cpu_list", "12,27"}, {"/sys/devices/system/cpu/cpu12/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu12/cache/index2/shared_cpu_list", "12,27"}, {"/sys/devices/system/cpu/cpu12/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu12/cache/index3/shared_cpu_list", "9-16,24-31"}, {"/sys/devices/system/cpu/cpu12/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu13/cache/index0/shared_cpu_list", "13,28"}, {"/sys/devices/system/cpu/cpu13/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu13/cache/index1/shared_cpu_list", "13,28"}, {"/sys/devices/system/cpu/cpu13/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu13/cache/index2/shared_cpu_list", "13,28"}, {"/sys/devices/system/cpu/cpu13/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu13/cache/index3/shared_cpu_list", "9-16,24-31"}, {"/sys/devices/system/cpu/cpu13/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu14/cache/index0/shared_cpu_list", "14,29"}, {"/sys/devices/system/cpu/cpu14/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu14/cache/index1/shared_cpu_list", "14,29"}, {"/sys/devices/system/cpu/cpu14/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu14/cache/index2/shared_cpu_list", "14,29"}, {"/sys/devices/system/cpu/cpu14/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu14/cache/index3/shared_cpu_list", "9-16,24-31"}, {"/sys/devices/system/cpu/cpu14/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu15/cache/index0/shared_cpu_list", "15,30"}, {"/sys/devices/system/cpu/cpu15/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu15/cache/index1/shared_cpu_list", "15,30"}, {"/sys/devices/system/cpu/cpu15/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu15/cache/index2/shared_cpu_list", "15,30"}, {"/sys/devices/system/cpu/cpu15/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu15/cache/index3/shared_cpu_list", "9-16,24-31"}, {"/sys/devices/system/cpu/cpu15/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu16/cache/index0/shared_cpu_list", "16,31"}, {"/sys/devices/system/cpu/cpu16/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu16/cache/index1/shared_cpu_list", "16,31"}, {"/sys/devices/system/cpu/cpu16/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu16/cache/index2/shared_cpu_list", "16,31"}, {"/sys/devices/system/cpu/cpu16/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu16/cache/index3/shared_cpu_list", "9-16,24-31"}, {"/sys/devices/system/cpu/cpu16/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu17/cache/index0/shared_cpu_list", "0,17"}, {"/sys/devices/system/cpu/cpu17/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu17/cache/index1/shared_cpu_list", "0,17"}, {"/sys/devices/system/cpu/cpu17/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu17/cache/index2/shared_cpu_list", "0,17"}, {"/sys/devices/system/cpu/cpu17/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu17/cache/index3/shared_cpu_list", "0-8,17-23"}, {"/sys/devices/system/cpu/cpu17/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu18/cache/index0/shared_cpu_list", "1,18"}, {"/sys/devices/system/cpu/cpu18/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu18/cache/index1/shared_cpu_list", "1,18"}, {"/sys/devices/system/cpu/cpu18/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu18/cache/index2/shared_cpu_list", "1,18"}, {"/sys/devices/system/cpu/cpu18/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu18/cache/index3/shared_cpu_list", "0-8,17-23"}, {"/sys/devices/system/cpu/cpu18/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu19/cache/index0/shared_cpu_list", "2,19"}, {"/sys/devices/system/cpu/cpu19/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu19/cache/index1/shared_cpu_list", "2,19"}, {"/sys/devices/system/cpu/cpu19/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu19/cache/index2/shared_cpu_list", "2,19"}, {"/sys/devices/system/cpu/cpu19/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu19/cache/index3/shared_cpu_list", "0-8,17-23"}, {"/sys/devices/system/cpu/cpu19/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu20/cache/index0/shared_cpu_list", "3,20"}, {"/sys/devices/system/cpu/cpu20/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu20/cache/index1/shared_cpu_list", "3,20"}, {"/sys/devices/system/cpu/cpu20/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu20/cache/index2/shared_cpu_list", "3,20"}, {"/sys/devices/system/cpu/cpu20/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu20/cache/index3/shared_cpu_list", "0-8,17-23"}, {"/sys/devices/system/cpu/cpu20/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu21/cache/index0/shared_cpu_list", "4,21"}, {"/sys/devices/system/cpu/cpu21/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu21/cache/index1/shared_cpu_list", "4,21"}, {"/sys/devices/system/cpu/cpu21/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu21/cache/index2/shared_cpu_list", "4,21"}, {"/sys/devices/system/cpu/cpu21/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu21/cache/index3/shared_cpu_list", "0-8,17-23"}, {"/sys/devices/system/cpu/cpu21/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu22/cache/index0/shared_cpu_list", "7,22"}, {"/sys/devices/system/cpu/cpu22/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu22/cache/index1/shared_cpu_list", "7,22"}, {"/sys/devices/system/cpu/cpu22/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu22/cache/index2/shared_cpu_list", "7,22"}, {"/sys/devices/system/cpu/cpu22/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu22/cache/index3/shared_cpu_list", "0-8,17-23"}, {"/sys/devices/system/cpu/cpu22/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu23/cache/index0/shared_cpu_list", "8,23"}, {"/sys/devices/system/cpu/cpu23/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu23/cache/index1/shared_cpu_list", "8,23"}, {"/sys/devices/system/cpu/cpu23/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu23/cache/index2/shared_cpu_list", "8,23"}, {"/sys/devices/system/cpu/cpu23/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu23/cache/index3/shared_cpu_list", "0-8,17-23"}, {"/sys/devices/system/cpu/cpu23/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu24/cache/index0/shared_cpu_list", "9,24"}, {"/sys/devices/system/cpu/cpu24/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu24/cache/index1/shared_cpu_list", "9,24"}, {"/sys/devices/system/cpu/cpu24/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu24/cache/index2/shared_cpu_list", "9,24"}, {"/sys/devices/system/cpu/cpu24/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu24/cache/index3/shared_cpu_list", "9-16,24-31"}, {"/sys/devices/system/cpu/cpu24/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu25/cache/index0/shared_cpu_list", "10,25"}, {"/sys/devices/system/cpu/cpu25/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu25/cache/index1/shared_cpu_list", "10,25"}, {"/sys/devices/system/cpu/cpu25/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu25/cache/index2/shared_cpu_list", "10,25"}, {"/sys/devices/system/cpu/cpu25/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu25/cache/index3/shared_cpu_list", "9-16,24-31"}, {"/sys/devices/system/cpu/cpu25/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu26/cache/index0/shared_cpu_list", "11,26"}, {"/sys/devices/system/cpu/cpu26/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu26/cache/index1/shared_cpu_list", "11,26"}, {"/sys/devices/system/cpu/cpu26/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu26/cache/index2/shared_cpu_list", "11,26"}, {"/sys/devices/system/cpu/cpu26/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu26/cache/index3/shared_cpu_list", "9-16,24-31"}, {"/sys/devices/system/cpu/cpu26/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu27/cache/index0/shared_cpu_list", "12,27"}, {"/sys/devices/system/cpu/cpu27/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu27/cache/index1/shared_cpu_list", "12,27"}, {"/sys/devices/system/cpu/cpu27/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu27/cache/index2/shared_cpu_list", "12,27"}, {"/sys/devices/system/cpu/cpu27/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu27/cache/index3/shared_cpu_list", "9-16,24-31"}, {"/sys/devices/system/cpu/cpu27/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu28/cache/index0/shared_cpu_list", "13,28"}, {"/sys/devices/system/cpu/cpu28/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu28/cache/index1/shared_cpu_list", "13,28"}, {"/sys/devices/system/cpu/cpu28/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu28/cache/index2/shared_cpu_list", "13,28"}, {"/sys/devices/system/cpu/cpu28/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu28/cache/index3/shared_cpu_list", "9-16,24-31"}, {"/sys/devices/system/cpu/cpu28/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu29/cache/index0/shared_cpu_list", "14,29"}, {"/sys/devices/system/cpu/cpu29/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu29/cache/index1/shared_cpu_list", "14,29"}, {"/sys/devices/system/cpu/cpu29/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu29/cache/index2/shared_cpu_list", "14,29"}, {"/sys/devices/system/cpu/cpu29/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu29/cache/index3/shared_cpu_list", "9-16,24-31"}, {"/sys/devices/system/cpu/cpu29/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu30/cache/index0/shared_cpu_list", "15,30"}, {"/sys/devices/system/cpu/cpu30/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu30/cache/index1/shared_cpu_list", "15,30"}, {"/sys/devices/system/cpu/cpu30/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu30/cache/index2/shared_cpu_list", "15,30"}, {"/sys/devices/system/cpu/cpu30/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu30/cache/index3/shared_cpu_list", "9-16,24-31"}, {"/sys/devices/system/cpu/cpu30/cache/index3/type", "Unified"}, {"/sys/devices/system/cpu/cpu31/cache/index0/shared_cpu_list", "16,31"}, {"/sys/devices/system/cpu/cpu31/cache/index0/type", "Data"}, {"/sys/devices/system/cpu/cpu31/cache/index1/shared_cpu_list", "16,31"}, {"/sys/devices/system/cpu/cpu31/cache/index1/type", "Instruction"}, {"/sys/devices/system/cpu/cpu31/cache/index2/shared_cpu_list", "16,31"}, {"/sys/devices/system/cpu/cpu31/cache/index2/type", "Unified"}, {"/sys/devices/system/cpu/cpu31/cache/index3/shared_cpu_list", "9-16,24-31"}, {"/sys/devices/system/cpu/cpu31/cache/index3/type", "Unified"}}; /// This is the expected CacheLocality structure for fakeSysfsTree static const CacheLocality nonUniformExampleLocality = {32, {16, 16, 2}, {0, 2, 4, 6, 8, 10, 11, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 1, 3, 5, 7, 9, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31}}; TEST(CacheLocality, FakeSysfs) { auto parsed = CacheLocality::readFromSysfsTree([](std::string name) { auto iter = fakeSysfsTree.find(name); return iter == fakeSysfsTree.end() ? std::string() : iter->second; }); auto& expected = nonUniformExampleLocality; EXPECT_EQ(expected.numCpus, parsed.numCpus); EXPECT_EQ(expected.numCachesByLevel, parsed.numCachesByLevel); EXPECT_EQ(expected.localityIndexByCpu, parsed.localityIndexByCpu); } #if FOLLY_HAVE_LINUX_VDSO TEST(Getcpu, VdsoGetcpu) { unsigned cpu; Getcpu::resolveVdsoFunc()(&cpu, nullptr, nullptr); EXPECT_TRUE(cpu < CPU_SETSIZE); } #endif #ifdef FOLLY_TLS TEST(ThreadId, SimpleTls) { unsigned cpu = 0; auto rv = folly::detail::FallbackGetcpu<SequentialThreadId<std::atomic>>::getcpu( &cpu, nullptr, nullptr); EXPECT_EQ(rv, 0); EXPECT_TRUE(cpu > 0); unsigned again; folly::detail::FallbackGetcpu<SequentialThreadId<std::atomic>>::getcpu( &again, nullptr, nullptr); EXPECT_EQ(cpu, again); } #endif TEST(ThreadId, SimplePthread) { unsigned cpu = 0; auto rv = folly::detail::FallbackGetcpu<HashingThreadId>::getcpu( &cpu, nullptr, nullptr); EXPECT_EQ(rv, 0); EXPECT_TRUE(cpu > 0); unsigned again; folly::detail::FallbackGetcpu<HashingThreadId>::getcpu( &again, nullptr, nullptr); EXPECT_EQ(cpu, again); } #ifdef FOLLY_TLS static FOLLY_TLS unsigned testingCpu = 0; static int testingGetcpu(unsigned* cpu, unsigned* node, void* /* unused */) { if (cpu != nullptr) { *cpu = testingCpu; } if (node != nullptr) { *node = testingCpu; } return 0; } #endif TEST(AccessSpreader, Simple) { for (size_t s = 1; s < 200; ++s) { EXPECT_LT(AccessSpreader<>::current(s), s); } } #ifdef FOLLY_TLS #define DECLARE_SPREADER_TAG(tag, locality, func) \ namespace { \ template <typename dummy> \ struct tag {}; \ } \ namespace folly { \ namespace detail { \ template <> \ const CacheLocality& CacheLocality::system<tag>() { \ static auto* inst = new CacheLocality(locality); \ return *inst; \ } \ template <> \ Getcpu::Func AccessSpreader<tag>::pickGetcpuFunc() { \ return func; \ } \ } \ } DECLARE_SPREADER_TAG(ManualTag, CacheLocality::uniform(16), testingGetcpu) TEST(AccessSpreader, Wrapping) { // this test won't pass unless locality.numCpus divides kMaxCpus auto numCpus = CacheLocality::system<ManualTag>().numCpus; EXPECT_EQ(0, 128 % numCpus); for (size_t s = 1; s < 200; ++s) { for (size_t c = 0; c < 400; ++c) { testingCpu = c; auto observed = AccessSpreader<ManualTag>::current(s); testingCpu = c % numCpus; auto expected = AccessSpreader<ManualTag>::current(s); EXPECT_EQ(expected, observed) << "numCpus=" << numCpus << ", s=" << s << ", c=" << c; } } } #endif
charsyam/folly
folly/test/CacheLocalityTest.cpp
C++
apache-2.0
25,381
/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.codecommit.model; import javax.annotation.Generated; /** * <p> * The number of approvals required for the approval rule exceeds the maximum number allowed. * </p> */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class MaximumNumberOfApprovalsExceededException extends com.amazonaws.services.codecommit.model.AWSCodeCommitException { private static final long serialVersionUID = 1L; /** * Constructs a new MaximumNumberOfApprovalsExceededException with the specified error message. * * @param message * Describes the error encountered. */ public MaximumNumberOfApprovalsExceededException(String message) { super(message); } }
aws/aws-sdk-java
aws-java-sdk-codecommit/src/main/java/com/amazonaws/services/codecommit/model/MaximumNumberOfApprovalsExceededException.java
Java
apache-2.0
1,318
#define NETCORE using System; using System.Linq; using System.Reflection; namespace Foundatio.Force.DeepCloner.Helpers { internal static class ReflectionHelper { public static bool IsEnum(this Type t) { #if NETCORE return t.GetTypeInfo().IsEnum; #else return t.IsEnum; #endif } public static bool IsValueType(this Type t) { #if NETCORE return t.GetTypeInfo().IsValueType; #else return t.IsValueType; #endif } public static bool IsClass(this Type t) { #if NETCORE return t.GetTypeInfo().IsClass; #else return t.IsClass; #endif } public static Type BaseType(this Type t) { #if NETCORE return t.GetTypeInfo().BaseType; #else return t.BaseType; #endif } public static FieldInfo[] GetAllFields(this Type t) { #if NETCORE return t.GetTypeInfo().DeclaredFields.Where(x => !x.IsStatic).ToArray(); #else return t.GetFields(BindingFlags.Instance | BindingFlags.NonPublic | BindingFlags.Public); #endif } public static PropertyInfo[] GetPublicProperties(this Type t) { #if NETCORE return t.GetTypeInfo().DeclaredProperties.ToArray(); #else return t.GetProperties(BindingFlags.Instance | BindingFlags.Public); #endif } public static FieldInfo[] GetDeclaredFields(this Type t) { #if NETCORE return t.GetTypeInfo().DeclaredFields.Where(x => !x.IsStatic).ToArray(); #else return t.GetFields(BindingFlags.Instance | BindingFlags.NonPublic | BindingFlags.Public | BindingFlags.DeclaredOnly); #endif } public static ConstructorInfo[] GetPrivateConstructors(this Type t) { #if NETCORE return t.GetTypeInfo().DeclaredConstructors.ToArray(); #else return t.GetConstructors(BindingFlags.NonPublic | BindingFlags.Instance); #endif } public static ConstructorInfo[] GetPublicConstructors(this Type t) { #if NETCORE return t.GetTypeInfo().DeclaredConstructors.ToArray(); #else return t.GetConstructors(BindingFlags.Public | BindingFlags.Instance); #endif } public static MethodInfo GetPrivateMethod(this Type t, string methodName) { #if NETCORE return t.GetTypeInfo().GetDeclaredMethod(methodName); #else return t.GetMethod(methodName, BindingFlags.NonPublic | BindingFlags.Instance); #endif } public static MethodInfo GetMethod(this Type t, string methodName) { #if NETCORE return t.GetTypeInfo().GetDeclaredMethod(methodName); #else return t.GetMethod(methodName); #endif } public static MethodInfo GetPrivateStaticMethod(this Type t, string methodName) { #if NETCORE return t.GetTypeInfo().GetDeclaredMethod(methodName); #else return t.GetMethod(methodName, BindingFlags.NonPublic | BindingFlags.Static); #endif } public static FieldInfo GetPrivateField(this Type t, string fieldName) { #if NETCORE return t.GetTypeInfo().GetDeclaredField(fieldName); #else return t.GetField(fieldName, BindingFlags.NonPublic | BindingFlags.Instance); #endif } public static FieldInfo GetPrivateStaticField(this Type t, string fieldName) { #if NETCORE return t.GetTypeInfo().GetDeclaredField(fieldName); #else return t.GetField(fieldName, BindingFlags.NonPublic | BindingFlags.Static); #endif } #if NETCORE public static bool IsSubclassOfTypeByName(this Type t, string typeName) { while (t != null) { if (t.Name == typeName) return true; t = t.BaseType(); } return false; } #endif #if NETCORE public static bool IsAssignableFrom(this Type from, Type to) { return from.GetTypeInfo().IsAssignableFrom(to.GetTypeInfo()); } public static bool IsInstanceOfType(this Type from, object to) { return from.IsAssignableFrom(to.GetType()); } #endif public static Type[] GenericArguments(this Type t) { #if NETCORE return t.GetTypeInfo().GenericTypeArguments; #else return t.GetGenericArguments(); #endif } } }
FoundatioFx/Foundatio
src/Foundatio/DeepCloner/Helpers/ReflectionHelper.cs
C#
apache-2.0
3,817
"""Translation helper functions.""" import locale import os import re import sys import gettext as gettext_module from cStringIO import StringIO from django.utils.importlib import import_module from django.utils.safestring import mark_safe, SafeData from django.utils.thread_support import currentThread # Translations are cached in a dictionary for every language+app tuple. # The active translations are stored by threadid to make them thread local. _translations = {} _active = {} # The default translation is based on the settings file. _default = None # This is a cache for normalized accept-header languages to prevent multiple # file lookups when checking the same locale on repeated requests. _accepted = {} # Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9. accept_language_re = re.compile(r''' ([A-Za-z]{1,8}(?:-[A-Za-z]{1,8})*|\*) # "en", "en-au", "x-y-z", "*" (?:;q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8" (?:\s*,\s*|$) # Multiple accepts per header. ''', re.VERBOSE) def to_locale(language, to_lower=False): """ Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is True, the last component is lower-cased (en_us). """ p = language.find('-') if p >= 0: if to_lower: return language[:p].lower()+'_'+language[p+1:].lower() else: return language[:p].lower()+'_'+language[p+1:].upper() else: return language.lower() def to_language(locale): """Turns a locale name (en_US) into a language name (en-us).""" p = locale.find('_') if p >= 0: return locale[:p].lower()+'-'+locale[p+1:].lower() else: return locale.lower() class DjangoTranslation(gettext_module.GNUTranslations): """ This class sets up the GNUTranslations context with regard to output charset. Django uses a defined DEFAULT_CHARSET as the output charset on Python 2.4. With Python 2.3, use DjangoTranslation23. """ def __init__(self, *args, **kw): from django.conf import settings gettext_module.GNUTranslations.__init__(self, *args, **kw) # Starting with Python 2.4, there's a function to define # the output charset. Before 2.4, the output charset is # identical with the translation file charset. try: self.set_output_charset('utf-8') except AttributeError: pass self.django_output_charset = 'utf-8' self.__language = '??' def merge(self, other): self._catalog.update(other._catalog) def set_language(self, language): self.__language = language def language(self): return self.__language def __repr__(self): return "<DjangoTranslation lang:%s>" % self.__language class DjangoTranslation23(DjangoTranslation): """ Compatibility class that is only used with Python 2.3. Python 2.3 doesn't support set_output_charset on translation objects and needs this wrapper class to make sure input charsets from translation files are correctly translated to output charsets. With a full switch to Python 2.4, this can be removed from the source. """ def gettext(self, msgid): res = self.ugettext(msgid) return res.encode(self.django_output_charset) def ngettext(self, msgid1, msgid2, n): res = self.ungettext(msgid1, msgid2, n) return res.encode(self.django_output_charset) def translation(language): """ Returns a translation object. This translation object will be constructed out of multiple GNUTranslations objects by merging their catalogs. It will construct a object for the requested language and add a fallback to the default language, if it's different from the requested language. """ global _translations t = _translations.get(language, None) if t is not None: return t from django.conf import settings # set up the right translation class klass = DjangoTranslation if sys.version_info < (2, 4): klass = DjangoTranslation23 globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale') if settings.SETTINGS_MODULE is not None: parts = settings.SETTINGS_MODULE.split('.') project = import_module(parts[0]) projectpath = os.path.join(os.path.dirname(project.__file__), 'locale') else: projectpath = None def _fetch(lang, fallback=None): global _translations loc = to_locale(lang) res = _translations.get(lang, None) if res is not None: return res def _translation(path): try: t = gettext_module.translation('django', path, [loc], klass) t.set_language(lang) return t except IOError, e: return None res = _translation(globalpath) # We want to ensure that, for example, "en-gb" and "en-us" don't share # the same translation object (thus, merging en-us with a local update # doesn't affect en-gb), even though they will both use the core "en" # translation. So we have to subvert Python's internal gettext caching. base_lang = lambda x: x.split('-', 1)[0] if base_lang(lang) in [base_lang(trans) for trans in _translations]: res._info = res._info.copy() res._catalog = res._catalog.copy() def _merge(path): t = _translation(path) if t is not None: if res is None: return t else: res.merge(t) return res for localepath in settings.LOCALE_PATHS: if os.path.isdir(localepath): res = _merge(localepath) if projectpath and os.path.isdir(projectpath): res = _merge(projectpath) for appname in settings.INSTALLED_APPS: app = import_module(appname) apppath = os.path.join(os.path.dirname(app.__file__), 'locale') if os.path.isdir(apppath): res = _merge(apppath) if res is None: if fallback is not None: res = fallback else: return gettext_module.NullTranslations() _translations[lang] = res return res default_translation = _fetch(settings.LANGUAGE_CODE) current_translation = _fetch(language, fallback=default_translation) return current_translation def activate(language): """ Fetches the translation object for a given tuple of application name and language and installs it as the current translation object for the current thread. """ _active[currentThread()] = translation(language) def deactivate(): """ Deinstalls the currently active translation object so that further _ calls will resolve against the default translation object, again. """ global _active if currentThread() in _active: del _active[currentThread()] def deactivate_all(): """ Makes the active translation object a NullTranslations() instance. This is useful when we want delayed translations to appear as the original string for some reason. """ _active[currentThread()] = gettext_module.NullTranslations() def get_language(): """Returns the currently selected language.""" t = _active.get(currentThread(), None) if t is not None: try: return to_language(t.language()) except AttributeError: pass # If we don't have a real translation object, assume it's the default language. from django.conf import settings return settings.LANGUAGE_CODE def get_language_bidi(): """ Returns selected language's BiDi layout. False = left-to-right layout True = right-to-left layout """ from django.conf import settings base_lang = get_language().split('-')[0] return base_lang in settings.LANGUAGES_BIDI def catalog(): """ Returns the current active catalog for further processing. This can be used if you need to modify the catalog or want to access the whole message catalog instead of just translating one string. """ global _default, _active t = _active.get(currentThread(), None) if t is not None: return t if _default is None: from django.conf import settings _default = translation(settings.LANGUAGE_CODE) return _default def do_translate(message, translation_function): """ Translates 'message' using the given 'translation_function' name -- which will be either gettext or ugettext. It uses the current thread to find the translation object to use. If no current translation is activated, the message will be run through the default translation object. """ global _default, _active t = _active.get(currentThread(), None) if t is not None: result = getattr(t, translation_function)(message) else: if _default is None: from django.conf import settings _default = translation(settings.LANGUAGE_CODE) result = getattr(_default, translation_function)(message) if isinstance(message, SafeData): return mark_safe(result) return result def gettext(message): return do_translate(message, 'gettext') def ugettext(message): return do_translate(message, 'ugettext') def gettext_noop(message): """ Marks strings for translation but doesn't translate them now. This can be used to store strings in global variables that should stay in the base language (because they might be used externally) and will be translated later. """ return message def do_ntranslate(singular, plural, number, translation_function): global _default, _active t = _active.get(currentThread(), None) if t is not None: return getattr(t, translation_function)(singular, plural, number) if _default is None: from django.conf import settings _default = translation(settings.LANGUAGE_CODE) return getattr(_default, translation_function)(singular, plural, number) def ngettext(singular, plural, number): """ Returns a UTF-8 bytestring of the translation of either the singular or plural, based on the number. """ return do_ntranslate(singular, plural, number, 'ngettext') def ungettext(singular, plural, number): """ Returns a unicode strings of the translation of either the singular or plural, based on the number. """ return do_ntranslate(singular, plural, number, 'ungettext') def check_for_language(lang_code): """ Checks whether there is a global language file for the given language code. This is used to decide whether a user-provided language is available. This is only used for language codes from either the cookies or session. """ from django.conf import settings globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale') if gettext_module.find('django', globalpath, [to_locale(lang_code)]) is not None: return True else: return False def get_language_from_request(request): """ Analyzes the request to find what language the user wants the system to show. Only languages listed in settings.LANGUAGES are taken into account. If the user requests a sublanguage where we have a main language, we send out the main language. """ global _accepted from django.conf import settings globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale') supported = dict(settings.LANGUAGES) if hasattr(request, 'session'): lang_code = request.session.get('django_language', None) if lang_code in supported and lang_code is not None and check_for_language(lang_code): return lang_code lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME) if lang_code and lang_code in supported and check_for_language(lang_code): return lang_code accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '') for accept_lang, unused in parse_accept_lang_header(accept): if accept_lang == '*': break # We have a very restricted form for our language files (no encoding # specifier, since they all must be UTF-8 and only one possible # language each time. So we avoid the overhead of gettext.find() and # work out the MO file manually. # 'normalized' is the root name of the locale in POSIX format (which is # the format used for the directories holding the MO files). normalized = locale.locale_alias.get(to_locale(accept_lang, True)) if not normalized: continue # Remove the default encoding from locale_alias. normalized = normalized.split('.')[0] if normalized in _accepted: # We've seen this locale before and have an MO file for it, so no # need to check again. return _accepted[normalized] for lang, dirname in ((accept_lang, normalized), (accept_lang.split('-')[0], normalized.split('_')[0])): if lang.lower() not in supported: continue langfile = os.path.join(globalpath, dirname, 'LC_MESSAGES', 'django.mo') if os.path.exists(langfile): _accepted[normalized] = lang return lang return settings.LANGUAGE_CODE def get_date_formats(): """ Checks whether translation files provide a translation for some technical message ID to store date and time formats. If it doesn't contain one, the formats provided in the settings will be used. """ from django.conf import settings date_format = ugettext('DATE_FORMAT') datetime_format = ugettext('DATETIME_FORMAT') time_format = ugettext('TIME_FORMAT') if date_format == 'DATE_FORMAT': date_format = settings.DATE_FORMAT if datetime_format == 'DATETIME_FORMAT': datetime_format = settings.DATETIME_FORMAT if time_format == 'TIME_FORMAT': time_format = settings.TIME_FORMAT return date_format, datetime_format, time_format def get_partial_date_formats(): """ Checks whether translation files provide a translation for some technical message ID to store partial date formats. If it doesn't contain one, the formats provided in the settings will be used. """ from django.conf import settings year_month_format = ugettext('YEAR_MONTH_FORMAT') month_day_format = ugettext('MONTH_DAY_FORMAT') if year_month_format == 'YEAR_MONTH_FORMAT': year_month_format = settings.YEAR_MONTH_FORMAT if month_day_format == 'MONTH_DAY_FORMAT': month_day_format = settings.MONTH_DAY_FORMAT return year_month_format, month_day_format dot_re = re.compile(r'\S') def blankout(src, char): """ Changes every non-whitespace character to the given char. Used in the templatize function. """ return dot_re.sub(char, src) inline_re = re.compile(r"""^\s*trans\s+((?:".*?")|(?:'.*?'))\s*""") block_re = re.compile(r"""^\s*blocktrans(?:\s+|$)""") endblock_re = re.compile(r"""^\s*endblocktrans$""") plural_re = re.compile(r"""^\s*plural$""") constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""") def templatize(src): """ Turns a Django template into something that is understood by xgettext. It does so by translating the Django translation tags into standard gettext function invocations. """ from django.template import Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK out = StringIO() intrans = False inplural = False singular = [] plural = [] for t in Lexer(src, None).tokenize(): if intrans: if t.token_type == TOKEN_BLOCK: endbmatch = endblock_re.match(t.contents) pluralmatch = plural_re.match(t.contents) if endbmatch: if inplural: out.write(' ngettext(%r,%r,count) ' % (''.join(singular), ''.join(plural))) for part in singular: out.write(blankout(part, 'S')) for part in plural: out.write(blankout(part, 'P')) else: out.write(' gettext(%r) ' % ''.join(singular)) for part in singular: out.write(blankout(part, 'S')) intrans = False inplural = False singular = [] plural = [] elif pluralmatch: inplural = True else: raise SyntaxError("Translation blocks must not include other block tags: %s" % t.contents) elif t.token_type == TOKEN_VAR: if inplural: plural.append('%%(%s)s' % t.contents) else: singular.append('%%(%s)s' % t.contents) elif t.token_type == TOKEN_TEXT: if inplural: plural.append(t.contents) else: singular.append(t.contents) else: if t.token_type == TOKEN_BLOCK: imatch = inline_re.match(t.contents) bmatch = block_re.match(t.contents) cmatches = constant_re.findall(t.contents) if imatch: g = imatch.group(1) if g[0] == '"': g = g.strip('"') elif g[0] == "'": g = g.strip("'") out.write(' gettext(%r) ' % g) elif bmatch: for fmatch in constant_re.findall(t.contents): out.write(' _(%s) ' % fmatch) intrans = True inplural = False singular = [] plural = [] elif cmatches: for cmatch in cmatches: out.write(' _(%s) ' % cmatch) else: out.write(blankout(t.contents, 'B')) elif t.token_type == TOKEN_VAR: parts = t.contents.split('|') cmatch = constant_re.match(parts[0]) if cmatch: out.write(' _(%s) ' % cmatch.group(1)) for p in parts[1:]: if p.find(':_(') >= 0: out.write(' %s ' % p.split(':',1)[1]) else: out.write(blankout(p, 'F')) else: out.write(blankout(t.contents, 'X')) return out.getvalue() def parse_accept_lang_header(lang_string): """ Parses the lang_string, which is the body of an HTTP Accept-Language header, and returns a list of (lang, q-value), ordered by 'q' values. Any format errors in lang_string results in an empty list being returned. """ result = [] pieces = accept_language_re.split(lang_string) if pieces[-1]: return [] for i in range(0, len(pieces) - 1, 3): first, lang, priority = pieces[i : i + 3] if first: return [] priority = priority and float(priority) or 1.0 result.append((lang, priority)) result.sort(lambda x, y: -cmp(x[1], y[1])) return result
greggian/TapdIn
django/utils/translation/trans_real.py
Python
apache-2.0
20,192
package org.myrobotlab.framework; import static org.myrobotlab.framework.StatusLevel.DEBUG; import static org.myrobotlab.framework.StatusLevel.ERROR; import static org.myrobotlab.framework.StatusLevel.INFO; import static org.myrobotlab.framework.StatusLevel.SUCCESS; import static org.myrobotlab.framework.StatusLevel.WARN; import java.io.IOException; import java.io.PrintWriter; import java.io.Serializable; import java.io.StringWriter; import java.util.Objects; import org.myrobotlab.codec.CodecUtils; import org.myrobotlab.logging.Level; import org.myrobotlab.logging.LoggerFactory; import org.myrobotlab.logging.LoggingFactory; import org.slf4j.Logger; /** * Goal is to have a very simple Pojo with only a few (native Java helper * methods) WARNING !!! - this class used to extend Exception or Throwable - but * the gson serializer would stack overflow with self reference issue * * TODO - allow radix tree searches for "keys" ??? * */ public class Status implements Serializable {// extends Exception { private static final long serialVersionUID = 1L; public final static Logger log = LoggerFactory.getLogger(Status.class); public String name; // service name ??? /** * FIXME - should probably be an enum now that serialization mostly works now * with enums [debug|info|warn|error|success] - yes the last part is different * than "logging" but could still be a status... * */ public String level; /** * The key is the non changing part and good identifier of what went on... For * Exceptions I would recommend the Exception.class.getSimpleName() for the * key, whilst the "detail" is for "changing" detail. This becomes important * when Stati are aggregated - and humans are interested in "high" counts of * specific Status while the details are not important unless diagnosing one. * * Violating Servo limits is a good example - "key" can be "Outside servo * limits". The key can contain spaces and punctuation - the important part is * that it is STATIC. * * "details" contain dynamic specifics - for example: "key":"Outside servo * limits", "detail":"servo01 moveTo(75) limit is greater than 100" */ public String key; /** * Dynamic of verbose explanation of the status. e.g. "detail":"servo01 * moveTo(75) limit is greater than 100" or complete stack trace from an * exception */ public String detail; /** * optional source of status */ public Object source; // --- static creation of typed Status objects ---- public static Status debug(String format, Object... args) { Status status = new Status(String.format(format, args)); status.level = DEBUG; return status; } public static Status error(Exception e) { Status s = new Status(e); s.level = ERROR; return s; } public static Status error(String msg) { Status s = new Status(msg); s.level = ERROR; return s; } public static Status error(String format, Object... args) { Status status = new Status(String.format(format, args)); status.level = ERROR; return status; } public static Status warn(String msg) { Status s = new Status(msg); s.level = ERROR; return s; } public static Status warn(String format, Object... args) { Status status = new Status(String.format(format, args)); status.level = WARN; return status; } public static Status info(String msg) { Status s = new Status(msg); s.level = INFO; return s; } public static Status info(String format, Object... args) { String formattedInfo = String.format(format, args); Status status = new Status(formattedInfo); status.level = INFO; return status; } public final static String stackToString(final Throwable e) { StringWriter sw; try { sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); e.printStackTrace(pw); } catch (Exception e2) { return "bad stackToString"; } return "------\r\n" + sw.toString() + "------\r\n"; } public Status(Exception e) { this.level = ERROR; StringWriter sw; try { sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); e.printStackTrace(pw); detail = sw.toString(); } catch (Exception e2) { } this.key = String.format("%s - %s", e.getClass().getSimpleName(), e.getMessage()); } public Status(Status s) { if (s == null) { return; } this.name = s.name; this.level = s.level; this.key = s.key; this.detail = s.detail; } /** * for minimal amount of information error is assumed, and info is detail of * an ERROR * * @param detail * d */ public Status(String detail) { this.level = ERROR; this.detail = detail; } public Status(String name, String level, String key, String detail) { this.name = name; this.level = level; this.key = key; this.detail = detail; } public boolean isDebug() { return DEBUG.equals(level); } public boolean isError() { return ERROR.equals(level); } public boolean isInfo() { return INFO.equals(level); } public boolean isWarn() { return WARN.equals(level); } @Override public String toString() { StringBuffer sb = new StringBuffer(); if (name != null) { sb.append(name); sb.append(" "); } if (level != null) { sb.append(level); sb.append(" "); } if (key != null) { sb.append(key); sb.append(" "); } if (detail != null) { sb.append(detail); } return sb.toString(); } static public final Status newInstance(String name, String level, String key, String detail) { Status s = new Status(name, level, key, detail); return s; } @Override public boolean equals(Object o) { if (o == this) return true; if (!(o instanceof Status)) { return false; } Status status = (Status) o; return Objects.equals(name, status.name) && Objects.equals(level, status.level) && Objects.equals(key, status.key) && Objects.equals(detail, status.detail); } @Override public int hashCode() { return Objects.hash(name, level, key, detail); } public static void main(String[] args) throws IOException, InterruptedException { LoggingFactory.init(Level.INFO); Status test = new Status("i am pessimistic"); // Status subTest = new Status("i am sub pessimistic"); // test.add(subTest); String json = CodecUtils.toJson(test); Status z = CodecUtils.fromJson(json, Status.class); log.info(json); log.info(z.toString()); } public static Status success() { Status s = new Status(SUCCESS); s.level = SUCCESS; return s; } public boolean isSuccess() { return SUCCESS.equals(level); } public static Status success(String detail) { Status s = new Status(SUCCESS); s.level = SUCCESS; s.detail = detail; return s; } }
MyRobotLab/myrobotlab
src/main/java/org/myrobotlab/framework/Status.java
Java
apache-2.0
7,254
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. using System.Threading; using Microsoft.CodeAnalysis.CSharp.Extensions; using Microsoft.CodeAnalysis.CSharp.Syntax; using Microsoft.CodeAnalysis.Shared.Collections; using Microsoft.CodeAnalysis.Structure; using Microsoft.CodeAnalysis.Text; namespace Microsoft.CodeAnalysis.CSharp.Structure { internal class RegionDirectiveStructureProvider : AbstractSyntaxNodeStructureProvider<RegionDirectiveTriviaSyntax> { private static string GetBannerText(DirectiveTriviaSyntax simpleDirective) { var kw = simpleDirective.DirectiveNameToken; var prefixLength = kw.Span.End - simpleDirective.Span.Start; var text = simpleDirective.ToString().Substring(prefixLength).Trim(); if (text.Length == 0) { return simpleDirective.HashToken.ToString() + kw.ToString(); } else { return text; } } protected override void CollectBlockSpans( SyntaxToken previousToken, RegionDirectiveTriviaSyntax regionDirective, ref TemporaryArray<BlockSpan> spans, BlockStructureOptionProvider optionProvider, CancellationToken cancellationToken) { var match = regionDirective.GetMatchingDirective(cancellationToken); if (match != null) { // Always auto-collapse regions for Metadata As Source. These generated files only have one region at // the top of the file, which has content like the following: // // #region Assembly System.Runtime, Version=4.2.2.0, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a // // C:\Program Files\dotnet\packs\Microsoft.NETCore.App.Ref\3.1.0\ref\netcoreapp3.1\System.Runtime.dll // #endregion // // For other files, auto-collapse regions based on the user option. var autoCollapse = optionProvider.IsMetadataAsSource || optionProvider.GetOption( BlockStructureOptions.CollapseRegionsWhenCollapsingToDefinitions, LanguageNames.CSharp); spans.Add(new BlockSpan( isCollapsible: true, textSpan: TextSpan.FromBounds(regionDirective.SpanStart, match.Span.End), type: BlockTypes.PreprocessorRegion, bannerText: GetBannerText(regionDirective), autoCollapse: autoCollapse, isDefaultCollapsed: !optionProvider.IsMetadataAsSource)); } } } }
eriawan/roslyn
src/Features/CSharp/Portable/Structure/Providers/RegionDirectiveStructureProvider.cs
C#
apache-2.0
2,849
""" Support for EBox. Get data from 'My Usage Page' page: https://client.ebox.ca/myusage For more details about this platform, please refer to the documentation at https://home-assistant.io/components/sensor.ebox/ """ import logging from datetime import timedelta import voluptuous as vol import homeassistant.helpers.config_validation as cv from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import ( CONF_USERNAME, CONF_PASSWORD, CONF_NAME, CONF_MONITORED_VARIABLES, ) from homeassistant.helpers.entity import Entity from homeassistant.util import Throttle from homeassistant.exceptions import PlatformNotReady _LOGGER = logging.getLogger(__name__) GIGABITS = "Gb" PRICE = "CAD" DAYS = "days" PERCENT = "%" DEFAULT_NAME = "EBox" REQUESTS_TIMEOUT = 15 SCAN_INTERVAL = timedelta(minutes=15) MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15) SENSOR_TYPES = { "usage": ["Usage", PERCENT, "mdi:percent"], "balance": ["Balance", PRICE, "mdi:square-inc-cash"], "limit": ["Data limit", GIGABITS, "mdi:download"], "days_left": ["Days left", DAYS, "mdi:calendar-today"], "before_offpeak_download": ["Download before offpeak", GIGABITS, "mdi:download"], "before_offpeak_upload": ["Upload before offpeak", GIGABITS, "mdi:upload"], "before_offpeak_total": ["Total before offpeak", GIGABITS, "mdi:download"], "offpeak_download": ["Offpeak download", GIGABITS, "mdi:download"], "offpeak_upload": ["Offpeak Upload", GIGABITS, "mdi:upload"], "offpeak_total": ["Offpeak Total", GIGABITS, "mdi:download"], "download": ["Download", GIGABITS, "mdi:download"], "upload": ["Upload", GIGABITS, "mdi:upload"], "total": ["Total", GIGABITS, "mdi:download"], } PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_MONITORED_VARIABLES): vol.All( cv.ensure_list, [vol.In(SENSOR_TYPES)] ), vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ) async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up the EBox sensor.""" username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) httpsession = hass.helpers.aiohttp_client.async_get_clientsession() ebox_data = EBoxData(username, password, httpsession) name = config.get(CONF_NAME) from pyebox.client import PyEboxError try: await ebox_data.async_update() except PyEboxError as exp: _LOGGER.error("Failed login: %s", exp) raise PlatformNotReady sensors = [] for variable in config[CONF_MONITORED_VARIABLES]: sensors.append(EBoxSensor(ebox_data, variable, name)) async_add_entities(sensors, True) class EBoxSensor(Entity): """Implementation of a EBox sensor.""" def __init__(self, ebox_data, sensor_type, name): """Initialize the sensor.""" self.client_name = name self.type = sensor_type self._name = SENSOR_TYPES[sensor_type][0] self._unit_of_measurement = SENSOR_TYPES[sensor_type][1] self._icon = SENSOR_TYPES[sensor_type][2] self.ebox_data = ebox_data self._state = None @property def name(self): """Return the name of the sensor.""" return f"{self.client_name} {self._name}" @property def state(self): """Return the state of the sensor.""" return self._state @property def unit_of_measurement(self): """Return the unit of measurement of this entity, if any.""" return self._unit_of_measurement @property def icon(self): """Icon to use in the frontend, if any.""" return self._icon async def async_update(self): """Get the latest data from EBox and update the state.""" await self.ebox_data.async_update() if self.type in self.ebox_data.data: self._state = round(self.ebox_data.data[self.type], 2) class EBoxData: """Get data from Ebox.""" def __init__(self, username, password, httpsession): """Initialize the data object.""" from pyebox import EboxClient self.client = EboxClient(username, password, REQUESTS_TIMEOUT, httpsession) self.data = {} @Throttle(MIN_TIME_BETWEEN_UPDATES) async def async_update(self): """Get the latest data from Ebox.""" from pyebox.client import PyEboxError try: await self.client.fetch_data() except PyEboxError as exp: _LOGGER.error("Error on receive last EBox data: %s", exp) return # Update data self.data = self.client.get_data()
Cinntax/home-assistant
homeassistant/components/ebox/sensor.py
Python
apache-2.0
4,756
/* * Licensed to The Apereo Foundation under one or more contributor license * agreements. See the NOTICE file distributed with this work for * additional information regarding copyright ownership. * * The Apereo Foundation licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. * */ package org.unitime.timetable.solver.exam.ui; import java.io.PrintWriter; import java.io.Serializable; import java.util.Collection; import java.util.Collections; import java.util.Enumeration; import java.util.HashSet; import java.util.Hashtable; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeSet; import java.util.Vector; import javax.servlet.jsp.JspWriter; import org.cpsolver.exam.model.Exam; import org.cpsolver.exam.model.ExamDistributionConstraint; import org.cpsolver.exam.model.ExamInstructor; import org.cpsolver.exam.model.ExamPlacement; import org.cpsolver.exam.model.ExamRoom; import org.cpsolver.exam.model.ExamRoomPlacement; import org.cpsolver.exam.model.ExamStudent; import org.cpsolver.ifs.extension.AssignedValue; import org.cpsolver.ifs.extension.ConflictStatistics; import org.cpsolver.ifs.model.Constraint; import org.dom4j.Element; import org.unitime.timetable.model.PreferenceLevel; import org.unitime.timetable.solver.ui.TimetableInfo; import org.unitime.timetable.webutil.timegrid.ExamGridTable; /** * @author Tomas Muller */ public class ExamConflictStatisticsInfo implements TimetableInfo, Serializable { private static final long serialVersionUID = 7L; public static int sVersion = 7; // to be able to do some changes in the future public static final int sConstraintTypeRoom = 1; public static final int sConstraintTypeInstructor = 2; public static final int sConstraintTypeGroup = 3; public static final int sConstraintTypeStudent = 4; private Hashtable iVariables = new Hashtable(); public Collection getCBS() { return iVariables.values(); } public CBSVariable getCBS(Long classId) { return (CBSVariable)iVariables.get(classId); } public void load(ConflictStatistics cbs) { load(cbs, null); } public ExamConflictStatisticsInfo getConflictStatisticsSubInfo(Vector variables) { ExamConflictStatisticsInfo ret = new ExamConflictStatisticsInfo(); for (Enumeration e=variables.elements();e.hasMoreElements();) { Exam exam = (Exam)e.nextElement(); CBSVariable var = (CBSVariable)iVariables.get(exam.getId()); if (var!=null) ret.iVariables.put(exam.getId(),var); } return ret; } public void merge(ExamConflictStatisticsInfo info) { if (info!=null) iVariables.putAll(info.iVariables); } public void load(ConflictStatistics cbs, Long examId) { iVariables.clear(); for (Iterator i1=cbs.getNoGoods().entrySet().iterator();i1.hasNext();) { Map.Entry entry = (Map.Entry)i1.next(); AssignedValue assignment = (AssignedValue)entry.getKey(); ExamPlacement placement = (ExamPlacement)assignment.getValue(); Exam exam = (Exam)placement.variable(); if (examId!=null && !examId.equals(exam.getId())) continue; CBSVariable var = (CBSVariable)iVariables.get(exam.getId()); if (var==null) { String pref = PreferenceLevel.sNeutral;//SolverGridModel.hardConflicts2pref(exam,null); var = new CBSVariable(exam.getId(),exam.getName(),pref); iVariables.put(exam.getId(),var); } Vector roomIds = new Vector(); Vector roomNames = new Vector(); Vector roomPrefs = new Vector(); for (Iterator i=new TreeSet(placement.getRoomPlacements()).iterator();i.hasNext();) { ExamRoomPlacement room = (ExamRoomPlacement)i.next(); roomIds.add(room.getId()); roomNames.add(room.getName()); roomPrefs.add(exam.getRoomPlacements().size()==placement.getRoomPlacements().size()?PreferenceLevel.sIntLevelRequired:room.getPenalty(placement.getPeriod())); } CBSValue val = new CBSValue(var, placement.getPeriod().getId(), placement.getPeriod().getDayStr()+" "+placement.getPeriod().getTimeStr(), (exam.getPeriodPlacements().size()==1?PreferenceLevel.sIntLevelRequired:placement.getPeriodPlacement().getPenalty()), roomIds, roomNames, roomPrefs); var.values().add(val); List noGoods = (List)entry.getValue(); Hashtable constr2assignments = new Hashtable(); for (Iterator e2=noGoods.iterator();e2.hasNext();) { AssignedValue noGood = (AssignedValue)e2.next(); if (noGood.getConstraint()==null) continue; Vector aaa = (Vector)constr2assignments.get(noGood.getConstraint()); if (aaa == null) { aaa = new Vector(); constr2assignments.put(noGood.getConstraint(), aaa); } aaa.addElement(noGood); } for (Iterator i2=constr2assignments.entrySet().iterator();i2.hasNext();) { Map.Entry entry2 = (Map.Entry)i2.next(); Constraint constraint = (Constraint)entry2.getKey(); Vector noGoodsThisConstraint = (Vector)entry2.getValue(); CBSConstraint con = null; if (constraint instanceof ExamRoom) { con = new CBSConstraint(val, sConstraintTypeRoom, constraint.getId(), constraint.getName(), PreferenceLevel.sRequired); } else if (constraint instanceof ExamInstructor) { con = new CBSConstraint(val, sConstraintTypeInstructor, constraint.getId(), constraint.getName(), PreferenceLevel.sRequired); } else if (constraint instanceof ExamStudent) { con = new CBSConstraint(val, sConstraintTypeStudent, constraint.getId(), constraint.getName(), PreferenceLevel.sRequired); } else if (constraint instanceof ExamDistributionConstraint) { con = new CBSConstraint(val, sConstraintTypeGroup, constraint.getId(), ((ExamDistributionConstraint)constraint).getTypeString(), (constraint.isHard()?PreferenceLevel.sRequired:PreferenceLevel.int2prolog(((ExamDistributionConstraint)constraint).getWeight()))); } else { con = new CBSConstraint(val, -1, constraint.getId(), constraint.getName(), PreferenceLevel.sRequired); } val.constraints().add(con); for (Enumeration e3=noGoodsThisConstraint.elements();e3.hasMoreElements();) { AssignedValue ass = (AssignedValue)e3.nextElement(); ExamPlacement p = (ExamPlacement)ass.getValue(); Exam x = (Exam)p.variable(); String pr = PreferenceLevel.sNeutral;//SolverGridModel.hardConflicts2pref(x,p); Vector aroomIds = new Vector(); Vector aroomNames = new Vector(); Vector aroomPrefs = new Vector(); for (Iterator i=new TreeSet(p.getRoomPlacements()).iterator();i.hasNext();) { ExamRoomPlacement room = (ExamRoomPlacement)i.next(); aroomIds.add(room.getId()); aroomNames.add(room.getName()); aroomPrefs.add(x.getRoomPlacements().size()==p.getRoomPlacements().size()?PreferenceLevel.sIntLevelRequired:room.getPenalty(p.getPeriod())); } CBSAssignment a = new CBSAssignment(con, x.getId(), x.getName(), pr, p.getPeriod().getId(), p.getPeriod().getDayStr()+" "+p.getPeriod().getTimeStr(), (x.getPeriodPlacements().size()==1?PreferenceLevel.sIntLevelRequired:p.getPeriodPlacement().getPenalty()), aroomIds, aroomNames, aroomPrefs); con.assignments().add(a); a.incCounter((int)ass.getCounter(0)); } } } } public void load(Element root) { int version = Integer.parseInt(root.attributeValue("version")); if (version==sVersion) { iVariables.clear(); for (Iterator i1=root.elementIterator("var");i1.hasNext();) { CBSVariable var = new CBSVariable((Element)i1.next()); iVariables.put(Long.valueOf(var.getId()),var); } } } public void save(Element root) { root.addAttribute("version", String.valueOf(sVersion)); for (Iterator i1=iVariables.values().iterator();i1.hasNext();) { ((CBSVariable)i1.next()).save(root.addElement("var")); } } public static interface Counter { public int getCounter(); public void incCounter(int value); } public static class CBSVariable implements Counter, Comparable, Serializable { private static final long serialVersionUID = 1L; int iCounter = 0; long iExamId; String iName; HashSet iValues = new HashSet(); CBSConstraint iConstraint = null; String iPref = null; CBSVariable(long examId, String name, String pref) { iExamId = examId; iName = name; iPref = pref; } CBSVariable(CBSConstraint constraint, long classId, String examId, String pref) { iConstraint = constraint; iExamId = classId; iName = examId; iPref = pref; } CBSVariable(Element element) { iExamId = Long.parseLong(element.attributeValue("exam")); iName = element.attributeValue("name"); iPref = element.attributeValue("pref"); for (Iterator i=element.elementIterator("val");i.hasNext();) iValues.add(new CBSValue(this,(Element)i.next())); } public long getId() { return iExamId; } public int getCounter() { return iCounter; } public String getName() { return iName; } public String getPref() { return iPref; } public void incCounter(int value) { iCounter+=value; if (iConstraint!=null) iConstraint.incCounter(value); } public Set values() { return iValues; } public int hashCode() { return (Long.valueOf(iExamId)).hashCode(); } public boolean equals(Object o) { if (o==null || !(o instanceof CBSVariable)) return false; return ((CBSVariable)o).getId()==getId(); } public int compareTo(Object o) { if (o==null || !(o instanceof CBSVariable)) return -1; int ret = -(Integer.valueOf(iCounter)).compareTo(Integer.valueOf(((CBSVariable)o).getCounter())); if (ret!=0) return ret; return toString().compareTo(o.toString()); } public String toString() { return iName; } public void save(Element element) { element.addAttribute("exam",String.valueOf(iExamId)); element.addAttribute("name", iName); if (iPref!=null) element.addAttribute("pref", iPref); for (Iterator i=iValues.iterator();i.hasNext();) ((CBSValue)i.next()).save(element.addElement("val")); } } public static class CBSValue implements Counter, Comparable, Serializable { private static final long serialVersionUID = 1L; int iCounter = 0; Long iPeriodId; String iPeriodName; int iPeriodPref; Vector iRoomIds; String iInstructorName = null; Vector iRoomNames; Vector iRoomPrefs; CBSVariable iVariable = null; HashSet iConstraints = new HashSet(); HashSet iAssignments = new HashSet(); int iLength; CBSValue(CBSVariable var, Long periodId, String periodName, int periodPref, Vector roomIds, Vector roomNames, Vector roomPrefs) { iVariable = var; iRoomIds = roomIds; iRoomNames = roomNames; iRoomPrefs = roomPrefs; iPeriodId = periodId; iPeriodName = periodName; iPeriodPref = periodPref; } CBSValue(CBSVariable var, Element element) { iVariable = var; iPeriodId = Long.valueOf(element.attributeValue("period")); iPeriodName = element.attributeValue("name"); iPeriodPref = Integer.parseInt(element.attributeValue("pref")); iRoomIds = new Vector(); iRoomNames = new Vector(); iRoomPrefs = new Vector(); for (Iterator i=element.elementIterator("room");i.hasNext();) { Element r = (Element)i.next(); iRoomIds.addElement(Integer.valueOf(r.attributeValue("id"))); iRoomNames.addElement(r.attributeValue("name")); iRoomPrefs.addElement(Integer.valueOf(r.attributeValue("pref"))); } for (Iterator i=element.elementIterator("cons");i.hasNext();) iConstraints.add(new CBSConstraint(this,(Element)i.next())); } public CBSVariable variable() { return iVariable; } public Long getPeriodId() { return iPeriodId; } public String getPeriodName() { return iPeriodName; } public int getPeriodPref() { return iPeriodPref; } public Vector getRoomNames() { return iRoomNames; } public Vector getRoomPrefs() { return iRoomPrefs; } public String toString() { return iPeriodName+" "+iRoomNames; } public int getCounter() { return iCounter; } public void incCounter(int value) { iCounter+=value; if (iVariable!=null) iVariable.incCounter(value); } public Vector getRoomIds() { return iRoomIds; } public Set constraints() { return iConstraints; } public Set assignments() { return iAssignments; } public int hashCode() { return combine(iPeriodId.hashCode(), (iRoomIds==null?0:iRoomIds.hashCode())); } public boolean equals(Object o) { if (o==null || !(o instanceof CBSValue)) return false; CBSValue v = (CBSValue)o; return v.getRoomIds().equals(getRoomIds()) && v.getPeriodId().equals(getPeriodId()); } public int compareTo(Object o) { if (o==null || !(o instanceof CBSValue)) return -1; int ret = -(Integer.valueOf(iCounter)).compareTo(Integer.valueOf(((CBSValue)o).getCounter())); if (ret!=0) return ret; return toString().compareTo(o.toString()); } public void save(Element element) { element.addAttribute("period",String.valueOf(iPeriodId)); element.addAttribute("pref",String.valueOf(iPeriodPref)); element.addAttribute("name", iPeriodName); for (int i=0;i<iRoomIds.size();i++) { Element r = element.addElement("room"); r.addAttribute("id",iRoomIds.elementAt(i).toString()); r.addAttribute("name",iRoomNames.elementAt(i).toString()); r.addAttribute("pref",iRoomPrefs.elementAt(i).toString()); } for (Iterator i=iConstraints.iterator();i.hasNext();) ((CBSConstraint)i.next()).save(element.addElement("cons")); } } public static class CBSConstraint implements Counter, Comparable, Serializable { private static final long serialVersionUID = 1L; CBSValue iValue; int iCounter = 0; long iId; String iName = null; int iType; HashSet iAssignments = new HashSet(); HashSet iVariables = new HashSet(); String iPref; CBSConstraint(int type, long id, String name, String pref) { iId = id; iType = type; iName = name; iPref = pref; } CBSConstraint(CBSValue value, int type, long id, String name, String pref) { iId = id; iType = type; iValue = value; iName = name; iPref = pref; } CBSConstraint(CBSValue value, Element element) { iValue = value; iId = Integer.parseInt(element.attributeValue("id")); iType = Integer.parseInt(element.attributeValue("type")); iName = element.attributeValue("name"); iPref = element.attributeValue("pref"); for (Iterator i=element.elementIterator("nogood");i.hasNext();) iAssignments.add(new CBSAssignment(this,(Element)i.next())); } public long getId() { return iId; } public int getType() { return iType; } public String getName() { return iName; } public CBSValue value() { return iValue; } public Set variables() { return iVariables; } public Set assignments() { return iAssignments; } public String getPref() { return iPref; } public int getCounter() { return iCounter; } public void incCounter(int value) { iCounter+=value; if (iValue!=null) iValue.incCounter(value); } public int hashCode() { return combine((int)iId,iType); } public boolean equals(Object o) { if (o==null || !(o instanceof CBSConstraint)) return false; CBSConstraint c = (CBSConstraint)o; return c.getId()==getId() && c.getType()==getType(); } public int compareTo(Object o) { if (o==null || !(o instanceof CBSConstraint)) return -1; int ret = -(Integer.valueOf(iCounter)).compareTo(Integer.valueOf(((CBSConstraint)o).getCounter())); if (ret!=0) return ret; return toString().compareTo(o.toString()); } public void save(Element element) { element.addAttribute("id",String.valueOf(iId)); element.addAttribute("type",String.valueOf(iType)); if (iName!=null) element.addAttribute("name", iName); if (iPref!=null) element.addAttribute("pref", iPref); for (Iterator i=iAssignments.iterator();i.hasNext();) ((CBSAssignment)i.next()).save(element.addElement("nogood")); } } public static class CBSAssignment implements Counter, Comparable, Serializable { private static final long serialVersionUID = 1L; CBSConstraint iConstraint; Long iExamId; String iExamName; String iExamPref; Long iPeriodId; String iPeriodName; int iPeriodPref; int iCounter = 0; Vector iRoomIds; Vector iRoomPrefs; Vector iRoomNames; CBSAssignment(CBSConstraint constraint, Long examId, String examName, String examPref, Long periodId, String periodName, int periodPref, Vector roomIds, Vector roomNames, Vector roomPrefs) { iExamId = examId; iExamName = examName; iExamPref = examPref; iPeriodId = periodId; iPeriodName = periodName; iPeriodPref = periodPref; iRoomIds = roomIds; iRoomNames = roomNames; iRoomPrefs = roomPrefs; iConstraint = constraint; } CBSAssignment(CBSConstraint constraint, Element element) { iConstraint = constraint; iExamId = Long.valueOf(element.attributeValue("exam")); iExamName = element.attributeValue("name"); iExamPref = element.attributeValue("pref"); iRoomIds = new Vector(); iRoomNames = new Vector(); iRoomPrefs = new Vector(); for (Iterator i=element.elementIterator("room");i.hasNext();) { Element r = (Element)i.next(); iRoomIds.addElement(Integer.valueOf(r.attributeValue("id"))); iRoomNames.addElement(r.attributeValue("name")); iRoomPrefs.addElement(Integer.valueOf(r.attributeValue("pref"))); } iPeriodId = Long.valueOf(element.attributeValue("period")); iPeriodName = element.attributeValue("periodName"); iPeriodPref = Integer.parseInt(element.attributeValue("periodPref")); incCounter(Integer.parseInt(element.attributeValue("cnt"))); } public Long getId() { return iExamId; } public CBSConstraint getConstraint() { return iConstraint; } public String getName() { return iExamName; } public String getPref() { return iExamPref; } public Long getPeriodId() { return iPeriodId; } public String getPeriodName() { return iPeriodName; } public int getPeriodPref() { return iPeriodPref; } public String toString() { return iExamName+" "+iPeriodName+" "+iRoomNames; } public Vector getRoomNames() { return iRoomNames; } public Vector getRoomIds() { return iRoomIds; } public Vector getRoomPrefs() { return iRoomPrefs; } public int hashCode() { return combine(iExamId.hashCode(),combine(iRoomIds.hashCode(),iPeriodId.hashCode())); } public int getCounter() { return iCounter; } public void incCounter(int value) { iCounter+=value; if (iConstraint!=null) iConstraint.incCounter(value); } public boolean equals(Object o) { if (o==null || !(o instanceof CBSAssignment)) return false; CBSAssignment a = (CBSAssignment)o; return a.getId().equals(getId()) && a.getRoomIds().equals(getRoomIds()) && a.getPeriodId().equals(getPeriodId()); } public int compareTo(Object o) { if (o==null || !(o instanceof CBSAssignment)) return -1; int ret = -(Integer.valueOf(iCounter)).compareTo(Integer.valueOf(((CBSAssignment)o).getCounter())); if (ret!=0) return ret; return toString().compareTo(o.toString()); } public void save(Element element) { element.addAttribute("exam",String.valueOf(iExamId)); element.addAttribute("name",iExamName); element.addAttribute("pref",iExamPref); for (int i=0;i<iRoomIds.size();i++) { Element r = element.addElement("room"); r.addAttribute("id",iRoomIds.elementAt(i).toString()); r.addAttribute("name",iRoomNames.elementAt(i).toString()); r.addAttribute("pref",iRoomPrefs.elementAt(i).toString()); } element.addAttribute("period", String.valueOf(iPeriodId)); element.addAttribute("periodName", iPeriodName); element.addAttribute("periodPref", String.valueOf(iPeriodPref)); element.addAttribute("cnt", String.valueOf(iCounter)); } } private static int combine(int a, int b) { int ret = 0; for (int i=0;i<15;i++) ret = ret | ((a & (1<<i))<<i) | ((b & (1<<i))<<(i+1)); return ret; } //--------- toHtml ------------------------------------------------- private static String IMG_BASE = "images/"; private static String IMG_EXPAND = IMG_BASE+"expand_node_btn.gif"; private static String IMG_COLLAPSE = IMG_BASE+"collapse_node_btn.gif"; private static String IMG_LEAF = IMG_BASE+"end_node_btn.gif"; public static int TYPE_VARIABLE_BASED = 0; public static int TYPE_CONSTRAINT_BASED = 1; private void menu_item(PrintWriter out, String id, String name, String description, String page, boolean isCollapsed) { out.println("<div style=\"margin-left:5px;\">"); out.println("<A style=\"border:0;background:0\" id=\"__idMenu"+id+"\" href=\"javascript:toggle('"+id+"')\" name=\""+name+"\">"); out.println("<img id=\"__idMenuImg"+id+"\" border=\"0\" src=\""+(isCollapsed ? IMG_EXPAND : IMG_COLLAPSE)+"\" align=\"absmiddle\"></A>"); out.println("&nbsp;<A class='noFancyLinks' target=\"__idContentFrame\" "+(page == null ? "" : page+" onmouseover=\"this.style.cursor='hand';this.style.cursor='pointer';\" ")+"title=\""+(description == null ? "" : description)+"\" >"+ name+(description == null?"":" <font color='gray'>[" + description + "]</font>")+"</A><br>"); out.println("</div>"); out.println("<div ID=\"__idMenuDiv"+id+"\" style=\"display:"+(isCollapsed ? "none" : "block")+";position:relative;margin-left:18px;\">"); } private void leaf_item(PrintWriter out, String name, String description, String page) { out.println("<div style=\"margin-left:5px;\">"); out.println("<img border=\"0\" src=\""+IMG_LEAF+"\" align=\"absmiddle\">"); out.println("&nbsp;<A class='noFancyLinks' target=\"__idContentFrame\" "+(page == null ? "" : page + " onmouseover=\"this.style.cursor='hand';this.style.cursor='pointer';\" ")+"title=\""+(description == null ? "" : description)+"\" >"+name+(description == null ? "" : " <font color='gray'>[" + description + "]</font>")+"</A><br>"); out.println("</div>"); } private void end_item(PrintWriter out) { out.println("</div>"); } private void unassignedVariableMenuItem(PrintWriter out, String menuId, CBSVariable variable, boolean clickable) { String name = "<font color='"+PreferenceLevel.prolog2color(variable.getPref())+"'>"+ variable.getName()+ "</font>"; String description = null; String onClick = null; if (clickable) onClick = "onclick=\"(parent ? parent : window).showGwtDialog('Examination Assignment', 'examInfo.do?examId="+variable.getId()+"&op=Reset','900','90%');\""; menu_item(out, menuId, variable.getCounter() + "&times; " + name, description, onClick, true); } private void unassignmentMenuItem(PrintWriter out, String menuId, CBSValue value, boolean clickable) { String name = "<font color='"+PreferenceLevel.int2color(value.getPeriodPref())+"'>"+ value.getPeriodName()+ "</font> "; String roomLink = ""; for (int i=0;i<value.getRoomIds().size();i++) { name += (i>0?", ":"")+"<font color='"+PreferenceLevel.int2color(((Integer)value.getRoomPrefs().elementAt(i)).intValue())+"'>"+ value.getRoomNames().elementAt(i)+"</font>"; roomLink += (i>0?":":"")+value.getRoomIds().elementAt(i); } String description = null; String onClick = null; if (clickable) onClick = "onclick=\"(parent ? parent : window).showGwtDialog('Examination Assignment', 'examInfo.do?examId="+value.variable().getId()+"&period="+value.getPeriodId()+"&room="+roomLink+"&op=Try&reset=1','900','90%');\""; menu_item(out, menuId, value.getCounter() + "&times; " + name, description, onClick, true); } private void constraintMenuItem(PrintWriter out, String menuId, CBSConstraint constraint, boolean clickable) { String name = "<font color='"+PreferenceLevel.prolog2color(constraint.getPref())+"'>"; String link = null; switch (constraint.getType()) { case sConstraintTypeGroup : name += "Distribution "+constraint.getName(); break; case sConstraintTypeInstructor : name += "Instructor "+constraint.getName(); if (clickable) link = "examGrid.do?filter="+constraint.getName()+"&resource="+ExamGridTable.sResourceInstructor+"&op=Cbs"; break; case sConstraintTypeRoom : name += "Room "+constraint.getName(); if (clickable) link = "examGrid.do?filter="+constraint.getName()+"&resource="+ExamGridTable.sResourceRoom+"&op=Cbs"; break; case sConstraintTypeStudent : name += "Student "+constraint.getName(); break; default : name += (constraint.getName()==null?"Unknown":constraint.getName()); } name += "</font>"; String description = null; String onClick = null; if (link!=null) onClick = "href=\""+link+"\""; menu_item(out, menuId, constraint.getCounter() + "&times; " + name, description, onClick, true); } private void assignmentLeafItem(PrintWriter out, CBSAssignment assignment, boolean clickable) { String name = "<font color='"+PreferenceLevel.prolog2color(assignment.getPref())+"'>"+ assignment.getName()+ "</font> &larr; "+ "<font color='"+PreferenceLevel.int2color(assignment.getPeriodPref())+"'>"+ assignment.getPeriodName()+ "</font> "; String roomLink = ""; for (int i=0;i<assignment.getRoomIds().size();i++) { name += (i>0?", ":"")+"<font color='"+PreferenceLevel.int2color(((Integer)assignment.getRoomPrefs().elementAt(i)).intValue())+"'>"+ assignment.getRoomNames().elementAt(i)+"</font>"; roomLink += (i>0?":":"")+assignment.getRoomIds().elementAt(i); } String onClick = null; if (clickable) onClick = "onclick=\"(parent ? parent : window).showGwtDialog('Examination Assignment', 'examInfo.do?examId="+assignment.getId()+"&period="+assignment.getPeriodId()+"&room="+roomLink+"&op=Try&reset=1','900','90%');\""; leaf_item(out, assignment.getCounter()+"&times; "+name, null, onClick); } public static void printHtmlHeader(JspWriter jsp) { PrintWriter out = new PrintWriter(jsp); printHtmlHeader(out, false); } public static void printHtmlHeader(PrintWriter out, boolean style) { if (style) { out.println("<style type=\"text/css\">"); out.println("<!--"); out.println("A:link { color: blue; text-decoration: none; border:0; background:0; }"); out.println("A:visited { color: blue; text-decoration: none; border:0; background:0; }"); out.println("A:active { color: blue; text-decoration: none; border:0; background:0; }"); out.println("A:hover { color: blue; text-decoration: none; border:0; background:0; }"); out.println(".TextBody { background-color: white; color:black; font-size: 12px; }"); out.println(".WelcomeHead { color: black; margin-top: 0px; margin-left: 0px; font-weight: bold; text-align: right; font-size: 30px; font-family: Comic Sans MS}"); out.println("-->"); out.println("</style>"); out.println(); } out.println("<script language=\"javascript\" type=\"text/javascript\">"); out.println("function toggle(item) {"); out.println(" obj=document.getElementById(\"__idMenuDiv\"+item);"); out.println(" visible=(obj.style.display!=\"none\");"); out.println(" img=document.getElementById(\"__idMenuImg\" + item);"); out.println(" menu=document.getElementById(\"__idMenu\" + item);"); out.println(" if (visible) {obj.style.display=\"none\";img.src=\""+IMG_EXPAND+"\";}"); out.println(" else {obj.style.display=\"block\";img.src=\""+IMG_COLLAPSE+"\";}"); out.println("}"); out.println("</script>"); out.flush(); } private Vector filter(Collection counters, double limit) { Vector cnt = new Vector(counters); Collections.sort(cnt); int total = 0; for (Enumeration e=cnt.elements();e.hasMoreElements();) total += ((Counter)e.nextElement()).getCounter(); int totalLimit = (int)Math.ceil(limit*total); int current = 0; Vector ret = new Vector(); for (Enumeration e=cnt.elements();e.hasMoreElements();) { Counter c = (Counter)e.nextElement(); ret.addElement(c); current += c.getCounter(); if (current>=totalLimit) break; } return ret; } /** Print conflict-based statistics in HTML format */ public void printHtml(JspWriter jsp, double limit, int type, boolean clickable) { printHtml(jsp, null, new double[] {limit,limit,limit,limit}, type, clickable); } /** Print conflict-based statistics in HTML format */ public void printHtml(PrintWriter out, double limit, int type, boolean clickable) { printHtml(out, null, new double[] {limit,limit,limit,limit}, type, clickable); } /** Print conflict-based statistics in HTML format */ public void printHtml(JspWriter jsp, double[] limit, int type, boolean clickable) { printHtml(jsp, null, limit, type, clickable); } /** Print conflict-based statistics in HTML format */ public void printHtml(PrintWriter out, double[] limit, int type, boolean clickable) { printHtml(out, null, limit, type, clickable); } /** Print conflict-based statistics in HTML format */ public void printHtml(JspWriter jsp, Long classId, double limit, int type, boolean clickable) { printHtml(jsp, classId, new double[] {limit,limit,limit,limit}, type, clickable); } /** Print conflict-based statistics in HTML format */ public void printHtml(PrintWriter out, Long classId, double limit, int type, boolean clickable) { printHtml(out, classId, new double[] {limit,limit,limit,limit}, type, clickable); } /** Print conflict-based statistics in HTML format */ public void printHtml(JspWriter jsp, Long classId, double[] limit, int type, boolean clickable) { PrintWriter out = new PrintWriter(jsp); printHtml(out, classId, limit, type, clickable); } /** Print conflict-based statistics in HTML format */ public void printHtml(PrintWriter out, Long classId, double[] limit, int type, boolean clickable) { if (type == TYPE_VARIABLE_BASED) { Vector vars = filter(iVariables.values(), limit[0]); if (classId!=null) { CBSVariable var = (CBSVariable)iVariables.get(classId); vars.clear(); if (var!=null) vars.add(var); } for (Enumeration e1 = vars.elements(); e1.hasMoreElements();) { CBSVariable variable = (CBSVariable)e1.nextElement(); String m1 = String.valueOf(variable.getId()); if (classId==null) unassignedVariableMenuItem(out,m1,variable, clickable); Vector vals = filter(variable.values(), limit[1]); int id = 0; for (Enumeration e2 = vals.elements();e2.hasMoreElements();) { CBSValue value = (CBSValue)e2.nextElement(); String m2 = m1+"."+(id++); unassignmentMenuItem(out,m2,value, clickable); Vector constraints =filter(value.constraints(),limit[2]); for (Enumeration e3 = constraints.elements(); e3.hasMoreElements();) { CBSConstraint constraint = (CBSConstraint)e3.nextElement(); String m3 = m2 + constraint.getType()+"."+constraint.getId(); constraintMenuItem(out,m3,constraint, clickable); Vector assignments = filter(constraint.assignments(),limit[3]); for (Enumeration e4 = assignments.elements();e4.hasMoreElements();) { CBSAssignment assignment = (CBSAssignment)e4.nextElement(); assignmentLeafItem(out, assignment, clickable); } end_item(out); } end_item(out); } end_item(out); } } else if (type == TYPE_CONSTRAINT_BASED) { Hashtable constraints = new Hashtable(); for (Enumeration e1 = iVariables.elements(); e1.hasMoreElements();) { CBSVariable variable = (CBSVariable)e1.nextElement(); if (classId!=null && classId.longValue()!=variable.getId()) continue; for (Iterator e2=variable.values().iterator();e2.hasNext();) { CBSValue value = (CBSValue)e2.next(); for (Iterator e3=value.constraints().iterator();e3.hasNext();) { CBSConstraint constraint = (CBSConstraint)e3.next(); CBSConstraint xConstraint = (CBSConstraint)constraints.get(constraint.getType()+"."+constraint.getId()); if (xConstraint==null) { xConstraint = new CBSConstraint(constraint.getType(),constraint.getId(),constraint.getName(),constraint.getPref()); constraints.put(constraint.getType()+"."+constraint.getId(),xConstraint); } CBSVariable xVariable = null; for (Iterator i=xConstraint.variables().iterator();i.hasNext();) { CBSVariable v = (CBSVariable)i.next(); if (v.getId()==variable.getId()) { xVariable = v; break; } } if (xVariable==null) { xVariable = new CBSVariable(xConstraint,variable.getId(),variable.getName(),variable.getPref()); xConstraint.variables().add(xVariable); } CBSValue xValue = new CBSValue(xVariable, value.getPeriodId(), value.getPeriodName(), value.getPeriodPref(), value.getRoomIds(), value.getRoomNames(), value.getRoomPrefs()); xVariable.values().add(xValue); for (Iterator e4=constraint.assignments().iterator();e4.hasNext();) { CBSAssignment assignment = (CBSAssignment)e4.next(); xValue.assignments().add(assignment); xValue.incCounter(assignment.getCounter()); } } } } Vector consts = filter(constraints.values(), limit[0]); for (Enumeration e1 = consts.elements(); e1.hasMoreElements();) { CBSConstraint constraint = (CBSConstraint)e1.nextElement(); String m1 = constraint.getType()+"."+constraint.getId(); constraintMenuItem(out,m1,constraint, clickable); Vector variables = filter(constraint.variables(), limit[1]); Collections.sort(variables); for (Enumeration e2 = variables.elements(); e2.hasMoreElements();) { CBSVariable variable = (CBSVariable)e2.nextElement(); String m2 = m1+"."+variable.getId(); if (classId==null) unassignedVariableMenuItem(out,m2,variable, clickable); Vector vals = filter(variable.values(), limit[2]); int id = 0; for (Enumeration e3 = vals.elements();e3.hasMoreElements();) { CBSValue value = (CBSValue)e3.nextElement(); String m3 = m2+"."+(id++); unassignmentMenuItem(out,m3,value, clickable); Vector assignments = filter(value.assignments(), limit[3]); for (Enumeration e4 = assignments.elements();e4.hasMoreElements();) { CBSAssignment assignment = (CBSAssignment)e4.nextElement(); assignmentLeafItem(out, assignment, clickable); } end_item(out); } if (classId==null) end_item(out); } end_item(out); } } out.flush(); } public boolean saveToFile() { return true; } }
UniTime/unitime
JavaSource/org/unitime/timetable/solver/exam/ui/ExamConflictStatisticsInfo.java
Java
apache-2.0
36,624
package no.nb.nna.veidemann.chrome.client.ws; import no.nb.nna.veidemann.chrome.client.ws.GetBrowserVersionCmd.Response; public class GetBrowserVersionCmd extends Command<Response> { public GetBrowserVersionCmd(Cdp client) { super(client, "Browser", "getVersion", Response.class); } public static class Response { private String protocolVersion; private String product; private String revision; private String userAgent; private String jsVersion; /** * Protocol version. */ public String protocolVersion() { return protocolVersion; } /** * Product name. */ public String product() { return product; } /** * Product revision. */ public String revision() { return revision; } /** * User-Agent. */ public String userAgent() { return userAgent; } /** * V8 version. */ public String jsVersion() { return jsVersion; } public String toString() { return "Version{protocolVersion=" + protocolVersion + ", product=" + product + ", revision=" + revision + ", userAgent=" + userAgent + ", jsVersion=" + jsVersion + "}"; } } }
nlnwa/broprox
veidemann-chrome-client/src/main/java/no/nb/nna/veidemann/chrome/client/ws/GetBrowserVersionCmd.java
Java
apache-2.0
1,385
package fr.javatronic.blog.massive.annotation1; import fr.javatronic.blog.processor.Annotation_001; @Annotation_001 public class Class_914 { }
lesaint/experimenting-annotation-processing
experimenting-rounds/massive-count-of-annotated-classes/src/main/java/fr/javatronic/blog/massive/annotation1/Class_914.java
Java
apache-2.0
145
OC.L10N.register( "settings", { "Security & Setup Warnings" : "Säkerhets & Inställningsvarningar", "Cron" : "Cron", "Sharing" : "Dela", "Security" : "Säkerhet", "Email Server" : "E-postserver", "Log" : "Logg", "Authentication error" : "Fel vid autentisering", "Your full name has been changed." : "Hela ditt namn har ändrats", "Unable to change full name" : "Kunde inte ändra hela namnet", "Files decrypted successfully" : "Filerna dekrypterades utan fel", "Couldn't decrypt your files, please check your owncloud.log or ask your administrator" : "Det gick inte att dekryptera dina filer, kontrollera din owncloud.log eller fråga administratören", "Couldn't decrypt your files, check your password and try again" : "Det gick inte att dekryptera filerna, kontrollera ditt lösenord och försök igen", "Encryption keys deleted permanently" : "Krypteringsnycklar raderades permanent", "Couldn't permanently delete your encryption keys, please check your owncloud.log or ask your administrator" : "Det gick inte att permanent ta bort dina krypteringsnycklar, kontrollera din owncloud.log eller fråga din administratör", "Couldn't remove app." : "Kunde inte ta bort applikationen.", "Backups restored successfully" : "Återställning av säkerhetskopior lyckades", "Couldn't restore your encryption keys, please check your owncloud.log or ask your administrator" : "Kan inte återställa dina krypteringsnycklar, vänligen kontrollera din owncloud.log eller fråga din administratör.", "Language changed" : "Språk ändrades", "Invalid request" : "Ogiltig begäran", "Admins can't remove themself from the admin group" : "Administratörer kan inte ta bort sig själva från admingruppen", "Unable to add user to group %s" : "Kan inte lägga till användare i gruppen %s", "Unable to remove user from group %s" : "Kan inte radera användare från gruppen %s", "Couldn't update app." : "Kunde inte uppdatera appen.", "Wrong password" : "Fel lösenord", "No user supplied" : "Ingen användare angiven", "Please provide an admin recovery password, otherwise all user data will be lost" : "Ange ett återställningslösenord för administratören. Annars kommer all användardata förloras", "Wrong admin recovery password. Please check the password and try again." : "Felaktigt återställningslösenord för administratör. Kolla lösenordet och prova igen.", "Back-end doesn't support password change, but the users encryption key was successfully updated." : "Gränssnittet stödjer inte byte av lösenord, men användarnas krypteringsnyckel blev uppdaterad.", "Unable to change password" : "Kunde inte ändra lösenord", "Enabled" : "Aktiverad", "Not enabled" : "Inte aktiverad", "Recommended" : "Rekomenderad", "Group already exists." : "Gruppen finns redan.", "Unable to add group." : "Lyckades inte lägga till grupp.", "Unable to delete group." : "Lyckades inte radera grupp.", "log-level out of allowed range" : "logg-nivå utanför tillåtet område", "Saved" : "Sparad", "test email settings" : "testa e-post inställningar", "If you received this email, the settings seem to be correct." : "Om du mottog detta e-postmeddelande, verkar dina inställningar vara korrekta.", "A problem occurred while sending the email. Please revise your settings." : "Ett problem uppstod när e-postmeddelandet skickades. Vänligen se över dina inställningar.", "Email sent" : "E-post skickat", "You need to set your user email before being able to send test emails." : "Du behöver ställa in din användares e-postadress före du kan skicka test e-post.", "Invalid mail address" : "Ogiltig e-postadress", "Unable to create user." : "Kan inte skapa användare.", "Your %s account was created" : "Ditt %s konto skapades", "Unable to delete user." : "Kan inte radera användare.", "Forbidden" : "Förbjuden", "Invalid user" : "Ogiltig användare", "Unable to change mail address" : "Kan inte ändra e-postadress", "Email saved" : "E-post sparad", "Are you really sure you want add \"{domain}\" as trusted domain?" : "Är du verkligen säker på att du vill lägga till \"{domain}\" som en trusted domian?", "Add trusted domain" : "Lägg till betrodd domän", "Sending..." : "Skickar ...", "All" : "Alla", "Please wait...." : "Var god vänta ...", "Error while disabling app" : "Fel vid inaktivering av app", "Disable" : "Deaktivera", "Enable" : "Aktivera", "Error while enabling app" : "Fel vid aktivering av app", "Updating...." : "Uppdaterar ...", "Error while updating app" : "Fel uppstod vid uppdatering av appen", "Updated" : "Uppdaterad", "Uninstalling ...." : "Avinstallerar ...", "Error while uninstalling app" : "Ett fel inträffade när applikatonen avinstallerades", "Uninstall" : "Avinstallera", "Select a profile picture" : "Välj en profilbild", "Very weak password" : "Väldigt svagt lösenord", "Weak password" : "Svagt lösenord", "So-so password" : "Okej lösenord", "Good password" : "Bra lösenord", "Strong password" : "Starkt lösenord", "Valid until {date}" : "Giltig t.o.m. {date}", "Delete" : "Radera", "Decrypting files... Please wait, this can take some time." : "Dekrypterar filer ... Vänligen vänta, detta kan ta en stund.", "Delete encryption keys permanently." : "Radera krypteringsnycklar permanent", "Restore encryption keys." : "Återställ krypteringsnycklar", "Groups" : "Grupper", "Unable to delete {objName}" : "Kunde inte radera {objName}", "Error creating group" : "Fel vid skapande av grupp", "A valid group name must be provided" : "Ett giltigt gruppnamn måste anges", "deleted {groupName}" : "raderade {groupName} ", "undo" : "ångra", "no group" : "ingen grupp", "never" : "aldrig", "deleted {userName}" : "raderade {userName}", "add group" : "lägg till grupp", "A valid username must be provided" : "Ett giltigt användarnamn måste anges", "Error creating user" : "Fel vid skapande av användare", "A valid password must be provided" : "Ett giltigt lösenord måste anges", "A valid email must be provided" : "En giltig e-postadress måste anges", "__language_name__" : "__language_name__", "Personal Info" : "Personlig info", "SSL root certificates" : "SSL rotcertifikat", "Encryption" : "Kryptering", "Everything (fatal issues, errors, warnings, info, debug)" : "Allting (allvarliga fel, fel, varningar, info, debug)", "Info, warnings, errors and fatal issues" : "Info, varningar och allvarliga fel", "Warnings, errors and fatal issues" : "Varningar, fel och allvarliga fel", "Errors and fatal issues" : "Fel och allvarliga fel", "Fatal issues only" : "Endast allvarliga fel", "None" : "Ingen", "Login" : "Logga in", "Plain" : "Enkel", "NT LAN Manager" : "NT LAN Manager", "SSL" : "SSL", "TLS" : "TLS", "Security Warning" : "Säkerhetsvarning", "You are accessing %s via HTTP. We strongly suggest you configure your server to require using HTTPS instead." : "Du ansluter till %s via HTTP. Vi rekommenderar starkt att du konfigurerar din server att använda HTTPS istället.", "Read-Only config enabled" : "Skrivskyddad konfiguration påslagen", "The Read-Only config has been enabled. This prevents setting some configurations via the web-interface. Furthermore, the file needs to be made writable manually for every update." : "Lär-bara konfigureringen har blivit aktiv. Detta förhindrar att några konfigureringar kan sättas via web-gränssnittet.", "Setup Warning" : "Installationsvarning", "PHP is apparently setup to strip inline doc blocks. This will make several core apps inaccessible." : "PHP är tydligen inställd för att rensa inline doc block. Detta kommer att göra flera kärnapplikationer otillgängliga.", "This is probably caused by a cache/accelerator such as Zend OPcache or eAccelerator." : "Detta orsakas troligtvis av en cache/accelerator som t ex Zend OPchache eller eAccelerator.", "Database Performance Info" : "Databasprestanda Information", "Microsoft Windows Platform" : "Microsoft Windows-platform", "Your server is running on Microsoft Windows. We highly recommend Linux for optimal user experience." : "Din server använder Microsoft Windows. Vi rekommenderar starkt Linux för en optimal användarerfarenhet.", "Module 'fileinfo' missing" : "Modulen \"fileinfo\" saknas", "The PHP module 'fileinfo' is missing. We strongly recommend to enable this module to get best results with mime-type detection." : "PHP-modulen 'fileinfo' saknas. Vi rekommenderar starkt att aktivera den här modulen för att kunna upptäcka korrekt mime-typ.", "PHP charset is not set to UTF-8" : "PHP-teckenuppsättning är inte satt till UTF-8", "PHP charset is not set to UTF-8. This can cause major issues with non-ASCII characters in file names. We highly recommend to change the value of 'default_charset' php.ini to 'UTF-8'." : "PHP-teckenuppsättning är inte satt till UTF-8. Detta kan orsaka stora problem med icke-ASCII-tecken i filnamn. Vi rekommenderar starkt att ändra värdet \"default_charset\" i php.ini till \"UTF-8\".", "Locale not working" : "\"Locale\" fungerar inte", "System locale can not be set to a one which supports UTF-8." : "Systemspråk kan inte ställas in till ett som stödjer UTF-8.", "This means that there might be problems with certain characters in file names." : "Detta betyder att där kan komma att uppstå problem med vissa tecken i filnamn.", "We strongly suggest installing the required packages on your system to support one of the following locales: %s." : "Vi rekommenderar starkt att installera de nödvändiga paketen på ditt system för att stödja en av följande språkversioner: %s.", "URL generation in notification emails" : "URL-generering i notifieringsmejl", "If your installation is not installed in the root of the domain and uses system cron, there can be issues with the URL generation. To avoid these problems, please set the \"overwrite.cli.url\" option in your config.php file to the webroot path of your installation (Suggested: \"%s\")" : "Om din installation inte installerades på roten av domänen och använder system cron så kan det uppstå problem med URL-genereringen. För att undvika dessa problem, var vänlig sätt \"overwrite.cli.url\"-inställningen i din config.php-fil till webbrotsökvägen av din installation (Föreslagen: \"%s\")", "Configuration Checks" : "Konfigurationskontroller", "No problems found" : "Inga problem hittades", "Please double check the <a href='%s'>installation guides</a>." : "Var god kontrollera <a href='%s'>installationsguiden</a>.", "Last cron was executed at %s." : "Sista cron kördes vid %s", "Last cron was executed at %s. This is more than an hour ago, something seems wrong." : "Sista cron kördes vid %s. Detta är mer än en timme sedan, något verkar fel.", "Cron was not executed yet!" : "Cron kördes inte ännu!", "Execute one task with each page loaded" : "Exekvera en uppgift vid varje sidladdning", "cron.php is registered at a webcron service to call cron.php every 15 minutes over http." : "cron.php är registrerad som en webcron service att ropa på cron.php varje 15 minuter över http.", "Use system's cron service to call the cron.php file every 15 minutes." : "Använd systemets cron-tjänst för att anropa cron.php var 15:e minut.", "Allow apps to use the Share API" : "Tillåt applikationer att använda delat API", "Allow users to share via link" : "Tillåt användare att dela via länk", "Enforce password protection" : "Tillämpa lösenordskydd", "Allow public uploads" : "Tillåt offentlig uppladdning", "Allow users to send mail notification for shared files" : "Tillåt användare att skicka mailnotifieringar för delade filer", "Set default expiration date" : "Ställ in standardutgångsdatum", "Expire after " : "Förfaller efter", "days" : "dagar", "Enforce expiration date" : "Tillämpa förfallodatum", "Allow resharing" : "Tillåt vidaredelning", "Restrict users to only share with users in their groups" : "Begränsa användare till att enbart kunna dela med användare i deras grupper", "Allow users to send mail notification for shared files to other users" : "Tillåt användare att skicka mejlnotifiering för delade filer till andra användare", "Exclude groups from sharing" : "Exkludera grupp från att dela", "These groups will still be able to receive shares, but not to initiate them." : "Dessa grupper kommer fortfarande kunna ta emot delningar, men inte skapa delningar.", "Enforce HTTPS" : "Kräv HTTPS", "Forces the clients to connect to %s via an encrypted connection." : "Tvingar klienterna att ansluta till %s via en krypterad anslutning.", "Enforce HTTPS for subdomains" : "Framtvinga HTTPS för underdomäner", "Forces the clients to connect to %s and subdomains via an encrypted connection." : "Tvingar klienter att ansluta till %s och underdomäner via en krypterad anslutning.", "Please connect to your %s via HTTPS to enable or disable the SSL enforcement." : "Anslut till din %s via HTTPS för att aktivera/deaktivera SSL", "This is used for sending out notifications." : "Detta används för att skicka ut notifieringar.", "Send mode" : "Sändningsläge", "From address" : "Från adress", "mail" : "mail", "Authentication method" : "Autentiseringsmetod", "Authentication required" : "Autentisering krävs", "Server address" : "Serveradress", "Port" : "Port", "Credentials" : "Inloggningsuppgifter", "SMTP Username" : "SMTP-användarnamn", "SMTP Password" : "SMTP-lösenord", "Store credentials" : "Lagra inloggningsuppgifter", "Test email settings" : "Testa e-postinställningar", "Send email" : "Skicka e-post", "Log level" : "Nivå på loggning", "Download logfile" : "Ladda ner loggfil", "More" : "Mer", "Less" : "Mindre", "The logfile is bigger than 100MB. Downloading it may take some time!" : "Loggfilen är större än 100MB. Att ladda ner den kan ta lite tid!", "Version" : "Version", "Developed by the <a href=\"http://ownCloud.org/contact\" target=\"_blank\">ownCloud community</a>, the <a href=\"https://github.com/owncloud\" target=\"_blank\">source code</a> is licensed under the <a href=\"http://www.gnu.org/licenses/agpl-3.0.html\" target=\"_blank\"><abbr title=\"Affero General Public License\">AGPL</abbr></a>." : "Utvecklad av <a href=\"http://ownCloud.org/contact\" target=\"_blank\">ownCloud Community</a>, <a href=\"https://github.com/owncloud\" target=\"_blank\">källkoden</a> är licenserad under <a href=\"http://www.gnu.org/licenses/agpl-3.0.html\" target=\"_blank\"><abbr title=\"Affero General Public License\">AGPL</abbr></a>.", "More apps" : "Fler appar", "Add your app" : "Lägg till din app", "by" : "av", "licensed" : "licensierad", "Documentation:" : "Dokumentation:", "User Documentation" : "Användardokumentation", "Admin Documentation" : "Administratörsdokumentation", "This app cannot be installed because the following dependencies are not fulfilled:" : "Denna applikation kan inte installeras då följande beroenden inte är uppfyllda: %s", "Update to %s" : "Uppdatera till %s", "Enable only for specific groups" : "Aktivera endast för specifika grupper", "Uninstall App" : "Avinstallera applikation", "Hey there,<br><br>just letting you know that you now have an %s account.<br><br>Your username: %s<br>Access it: <a href=\"%s\">%s</a><br><br>" : "Hej där,<br><br>vill bara informera dig om att du nu har ett %s konto.<br><br>Ditt användarnamn: %s<br>Accessa det genom: <a href=\"%s\">%s</a><br><br>", "Cheers!" : "Ha de fint!", "Hey there,\n\njust letting you know that you now have an %s account.\n\nYour username: %s\nAccess it: %s\n\n" : "Hej där,\n\nvill bara informera dig om att du nu har ett %s konto.\n\nDitt användarnamn: %s\nAccessa det genom: %s\n", "Administrator Documentation" : "Administratörsdokumentation", "Online Documentation" : "Onlinedokumentation", "Forum" : "Forum", "Bugtracker" : "Bugtracker", "Commercial Support" : "Kommersiell support", "Get the apps to sync your files" : "Skaffa apparna för att synkronisera dina filer", "Desktop client" : "Skrivbordsklient", "Android app" : "Android-app", "iOS app" : "iOS-app", "If you want to support the project\n\t\t<a href=\"https://owncloud.org/contribute\"\n\t\t\ttarget=\"_blank\">join development</a>\n\t\tor\n\t\t<a href=\"https://owncloud.org/promote\"\n\t\t\ttarget=\"_blank\">spread the word</a>!" : "Om du vill stödja projektet\n<a href=\"https://owncloud.org/contribute\"\n\t\t\ttarget=\"_blank\">hjälp till med utvecklingen</a>\n\t\teller\n\t\t<a href=\"https://owncloud.org/promote\"\n\t\t\ttarget=\"_blank\">sprid budskapet vidare</a>!", "Show First Run Wizard again" : "Visa Första uppstarts-guiden igen", "You have used <strong>%s</strong> of the available <strong>%s</strong>" : "Du har använt <strong>%s</strong> av tillgängliga <strong>%s</strong>", "Password" : "Lösenord", "Your password was changed" : "Ditt lösenord har ändrats", "Unable to change your password" : "Kunde inte ändra ditt lösenord", "Current password" : "Nuvarande lösenord", "New password" : "Nytt lösenord", "Change password" : "Ändra lösenord", "Full Name" : "Hela namnet", "No display name set" : "Inget visningsnamn angivet", "Email" : "E-post", "Your email address" : "Din e-postadress", "Fill in an email address to enable password recovery and receive notifications" : "Fyll i en e-postadress för att aktivera återställning av lösenord och mottagande av notifieringar", "No email address set" : "Ingen e-postadress angiven", "Profile picture" : "Profilbild", "Upload new" : "Ladda upp ny", "Select new from Files" : "Välj ny från filer", "Remove image" : "Radera bild", "Either png or jpg. Ideally square but you will be able to crop it." : "Antingen png eller jpg. Helst fyrkantig, men du kommer att kunna beskära den.", "Your avatar is provided by your original account." : "Din avatar tillhandahålls av ditt ursprungliga konto.", "Cancel" : "Avbryt", "Choose as profile image" : "Välj som profilbild", "Language" : "Språk", "Help translate" : "Hjälp att översätta", "Common Name" : "Vanligt namn", "Valid until" : "Giltigt till", "Issued By" : "Utfärdat av", "Valid until %s" : "Giltigt till %s", "Import Root Certificate" : "Importera rotcertifikat", "The encryption app is no longer enabled, please decrypt all your files" : "Krypteringsapplikationen är inte längre aktiverad, vänligen dekryptera alla dina filer", "Log-in password" : "Inloggningslösenord", "Decrypt all Files" : "Dekryptera alla filer", "Your encryption keys are moved to a backup location. If something went wrong you can restore the keys. Only delete them permanently if you are sure that all files are decrypted correctly." : "Dina krypteringsnycklar flyttas till en backup. Om något gick fel kan du återställa nycklarna. Bara ta bort dem permanent om du är säker på att alla filer dekrypteras korrekt.", "Restore Encryption Keys" : "Återställ krypteringsnycklar", "Delete Encryption Keys" : "Radera krypteringsnycklar", "Show storage location" : "Visa lagringsplats", "Show last log in" : "Visa senaste inloggning", "Show user backend" : "Visa användar-back-end", "Send email to new user" : "Skicka e-post till ny användare", "Show email address" : "Visa e-postadress", "Username" : "Användarnamn", "E-Mail" : "E-post", "Create" : "Skapa", "Admin Recovery Password" : "Admin-återställningslösenord", "Enter the recovery password in order to recover the users files during password change" : "Ange återställningslösenordet för att återställa användarnas filer vid lösenordsbyte", "Search Users" : "Sök användare", "Add Group" : "Lägg till Grupp", "Group" : "Grupp", "Everyone" : "Alla", "Admins" : "Administratörer", "Default Quota" : "Förvald datakvot", "Please enter storage quota (ex: \"512 MB\" or \"12 GB\")" : "Var god skriv in lagringskvot (ex: \"512MB\" eller \"12 GB\")", "Unlimited" : "Obegränsad", "Other" : "Annat", "Group Admin for" : "Gruppadministratör för", "Quota" : "Kvot", "Storage Location" : "Lagringsplats", "User Backend" : "Användar-back-end", "Last Login" : "Senaste inloggning", "change full name" : "ändra hela namnet", "set new password" : "ange nytt lösenord", "change email address" : "ändra e-postadress", "Default" : "Förvald" }, "nplurals=2; plural=(n != 1);");
kebenxiaoming/owncloudRedis
settings/l10n/sv.js
JavaScript
apache-2.0
21,151
package org.wikipedia.concurrency; // Copied from Android 4.4.2_r2 source // so we can use executeOnExecutor :P // // https://android.googlesource.com/platform/frameworks/base/+/android-4.4.2_r2/core/java/android/os/AsyncTask.java /* * Copyright (C) 2008 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import android.os.Handler; import android.os.Message; import android.os.Process; import android.support.annotation.NonNull; import java.util.ArrayDeque; import java.util.concurrent.BlockingQueue; import java.util.concurrent.Callable; import java.util.concurrent.CancellationException; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.FutureTask; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; /** * <p>AsyncTask enables proper and easy use of the UI thread. This class allows to * perform background operations and publish results on the UI thread without * having to manipulate threads and/or handlers.</p> * * <p>AsyncTask is designed to be a helper class around {@link Thread} and {@link Handler} * and does not constitute a generic threading framework. AsyncTasks should ideally be * used for short operations (a few seconds at the most.) If you need to keep threads * running for long periods of time, it is highly recommended you use the various APIs * provided by the <code>java.util.concurrent</code> pacakge such as {@link Executor}, * {@link ThreadPoolExecutor} and {@link FutureTask}.</p> * * <p>An asynchronous task is defined by a computation that runs on a background thread and * whose result is published on the UI thread. An asynchronous task is defined by 3 generic * types, called <code>Params</code>, <code>Progress</code> and <code>Result</code>, * and 4 steps, called <code>onPreExecute</code>, <code>doInBackground</code>, * <code>onProgressUpdate</code> and <code>onPostExecute</code>.</p> * * <div class="special reference"> * <h3>Developer Guides</h3> * <p>For more information about using tasks and threads, read the * <a href="{@docRoot}guide/topics/fundamentals/processes-and-threads.html">Processes and * Threads</a> developer guide.</p> * </div> * * <h2>Usage</h2> * <p>AsyncTask must be subclassed to be used. The subclass will override at least * one method ({@link #doInBackground}), and most often will override a * second one ({@link #onPostExecute}.)</p> * * <p>Here is an example of subclassing:</p> * <pre class="prettyprint"> * private class DownloadFilesTask extends AsyncTask&lt;URL, Integer, Long&gt; { * protected Long doInBackground(URL... urls) { * int count = urls.length; * long totalSize = 0; * for (int i = 0; i < count; i++) { * totalSize += Downloader.downloadFile(urls[i]); * publishProgress((int) ((i / (float) count) * 100)); * // Escape early if cancel() is called * if (isCancelled()) break; * } * return totalSize; * } * * protected void onProgressUpdate(Integer... progress) { * setProgressPercent(progress[0]); * } * * protected void onPostExecute(Long result) { * showDialog("Downloaded " + result + " bytes"); * } * } * </pre> * * <p>Once created, a task is executed very simply:</p> * <pre class="prettyprint"> * new DownloadFilesTask().execute(url1, url2, url3); * </pre> * * <h2>AsyncTask's generic types</h2> * <p>The three types used by an asynchronous task are the following:</p> * <ol> * <li><code>Params</code>, the type of the parameters sent to the task upon * execution.</li> * <li><code>Progress</code>, the type of the progress units published during * the background computation.</li> * <li><code>Result</code>, the type of the result of the background * computation.</li> * </ol> * <p>Not all types are always used by an asynchronous task. To mark a type as unused, * simply use the type {@link Void}:</p> * <pre> * private class MyTask extends AsyncTask&lt;Void, Void, Void&gt; { ... } * </pre> * * <h2>The 4 steps</h2> * <p>When an asynchronous task is executed, the task goes through 4 steps:</p> * <ol> * <li>{@link #onPreExecute()}, invoked on the UI thread before the task * is executed. This step is normally used to setup the task, for instance by * showing a progress bar in the user interface.</li> * <li>{@link #doInBackground}, invoked on the background thread * immediately after {@link #onPreExecute()} finishes executing. This step is used * to perform background computation that can take a long time. The parameters * of the asynchronous task are passed to this step. The result of the computation must * be returned by this step and will be passed back to the last step. This step * can also use {@link #publishProgress} to publish one or more units * of progress. These values are published on the UI thread, in the * {@link #onProgressUpdate} step.</li> * <li>{@link #onProgressUpdate}, invoked on the UI thread after a * call to {@link #publishProgress}. The timing of the execution is * undefined. This method is used to display any form of progress in the user * interface while the background computation is still executing. For instance, * it can be used to animate a progress bar or show logs in a text field.</li> * <li>{@link #onPostExecute}, invoked on the UI thread after the background * computation finishes. The result of the background computation is passed to * this step as a parameter.</li> * </ol> * * <h2>Cancelling a task</h2> * <p>A task can be cancelled at any time by invoking {@link #cancel(boolean)}. Invoking * this method will cause subsequent calls to {@link #isCancelled()} to return true. * After invoking this method, {@link #onCancelled(Object)}, instead of * {@link #onPostExecute(Object)} will be invoked after {@link #doInBackground(Object[])} * returns. To ensure that a task is cancelled as quickly as possible, you should always * check the return value of {@link #isCancelled()} periodically from * {@link #doInBackground(Object[])}, if possible (inside a loop for instance.)</p> * * <h2>Threading rules</h2> * <p>There are a few threading rules that must be followed for this class to * work properly:</p> * <ul> * <li>The AsyncTask class must be loaded on the UI thread. This is done * automatically as of {@link android.os.Build.VERSION_CODES#JELLY_BEAN}.</li> * <li>The task instance must be created on the UI thread.</li> * <li>{@link #execute} must be invoked on the UI thread.</li> * <li>Do not call {@link #onPreExecute()}, {@link #onPostExecute}, * {@link #doInBackground}, {@link #onProgressUpdate} manually.</li> * <li>The task can be executed only once (an exception will be thrown if * a second execution is attempted.)</li> * </ul> * * <h2>Memory observability</h2> * <p>AsyncTask guarantees that all callback calls are synchronized in such a way that the following * operations are safe without explicit synchronizations.</p> * <ul> * <li>Set member fields in the constructor or {@link #onPreExecute}, and refer to them * in {@link #doInBackground}. * <li>Set member fields in {@link #doInBackground}, and refer to them in * {@link #onProgressUpdate} and {@link #onPostExecute}. * </ul> * * <h2>Order of execution</h2> * <p>When first introduced, AsyncTasks were executed serially on a single background * thread. Starting with {@link android.os.Build.VERSION_CODES#DONUT}, this was changed * to a pool of threads allowing multiple tasks to operate in parallel. Starting with * {@link android.os.Build.VERSION_CODES#HONEYCOMB}, tasks are executed on a single * thread to avoid common application errors caused by parallel execution.</p> * <p>If you truly want parallel execution, you can invoke * {@link #executeOnExecutor(java.util.concurrent.Executor, Object[])} with * {@link #THREAD_POOL_EXECUTOR}.</p> */ public abstract class AsyncTask<Params, Progress, Result> { private static final String LOG_TAG = "AsyncTask"; private static final int CPU_COUNT = Runtime.getRuntime().availableProcessors(); private static final int CORE_POOL_SIZE = CPU_COUNT + 1; private static final int MAXIMUM_POOL_SIZE = CPU_COUNT * 2 + 1; private static final int KEEP_ALIVE = 1; private static final ThreadFactory sThreadFactory = new ThreadFactory() { private final AtomicInteger mCount = new AtomicInteger(1); public Thread newThread(@NonNull Runnable r) { return new Thread(r, "AsyncTask #" + mCount.getAndIncrement()); } }; private static final BlockingQueue<Runnable> sPoolWorkQueue = new LinkedBlockingQueue<>(128); /** * An {@link Executor} that can be used to execute tasks in parallel. */ public static final Executor THREAD_POOL_EXECUTOR = new ThreadPoolExecutor(CORE_POOL_SIZE, MAXIMUM_POOL_SIZE, KEEP_ALIVE, TimeUnit.SECONDS, sPoolWorkQueue, sThreadFactory); /** * An {@link Executor} that executes tasks one at a time in serial * order. This serialization is global to a particular process. */ public static final Executor SERIAL_EXECUTOR = new SerialExecutor(); private static final int MESSAGE_POST_RESULT = 0x1; private static final int MESSAGE_POST_PROGRESS = 0x2; private static final InternalHandler sHandler = new InternalHandler(); private static volatile Executor sDefaultExecutor = SERIAL_EXECUTOR; private final WorkerRunnable<Params, Result> mWorker; private final FutureTask<Result> mFuture; private volatile Status mStatus = Status.PENDING; private final AtomicBoolean mCancelled = new AtomicBoolean(); private final AtomicBoolean mTaskInvoked = new AtomicBoolean(); private static class SerialExecutor implements Executor { final ArrayDeque<Runnable> mTasks = new ArrayDeque<>(); Runnable mActive; public synchronized void execute(@NonNull final Runnable r) { mTasks.offer(new Runnable() { public void run() { try { r.run(); } finally { scheduleNext(); } } }); if (mActive == null) { scheduleNext(); } } protected synchronized void scheduleNext() { if ((mActive = mTasks.poll()) != null) { THREAD_POOL_EXECUTOR.execute(mActive); } } } /** * Indicates the current status of the task. Each status will be set only once * during the lifetime of a task. */ public enum Status { /** * Indicates that the task has not been executed yet. */ PENDING, /** * Indicates that the task is running. */ RUNNING, /** * Indicates that {@link AsyncTask#onPostExecute} has finished. */ FINISHED, } /** @hide Used to force static handler to be created. */ public static void init() { sHandler.getLooper(); } /** @hide */ public static void setDefaultExecutor(Executor exec) { sDefaultExecutor = exec; } /** * Creates a new asynchronous task. This constructor must be invoked on the UI thread. */ public AsyncTask() { mWorker = new WorkerRunnable<Params, Result>() { public Result call() throws Exception { mTaskInvoked.set(true); Process.setThreadPriority(Process.THREAD_PRIORITY_BACKGROUND); //noinspection unchecked return postResult(doInBackground(mParams)); } }; mFuture = new FutureTask<Result>(mWorker) { @Override protected void done() { try { postResultIfNotInvoked(get()); } catch (InterruptedException e) { android.util.Log.w(LOG_TAG, e); } catch (ExecutionException e) { throw new RuntimeException("An error occured while executing doInBackground()", e.getCause()); } catch (CancellationException e) { postResultIfNotInvoked(null); } } }; } private void postResultIfNotInvoked(Result result) { final boolean wasTaskInvoked = mTaskInvoked.get(); if (!wasTaskInvoked) { postResult(result); } } private Result postResult(Result result) { @SuppressWarnings("unchecked") Message message = sHandler.obtainMessage(MESSAGE_POST_RESULT, new AsyncTaskResult<>(this, result)); message.sendToTarget(); return result; } /** * Returns the current status of this task. * * @return The current status. */ public final Status getStatus() { return mStatus; } /** * Override this method to perform a computation on a background thread. The * specified parameters are the parameters passed to {@link #execute} * by the caller of this task. * * This method can call {@link #publishProgress} to publish updates * on the UI thread. * * @param params The parameters of the task. * * @return A result, defined by the subclass of this task. * * @see #onPreExecute() * @see #onPostExecute * @see #publishProgress */ protected abstract Result doInBackground(Params... params); /** * Runs on the UI thread before {@link #doInBackground}. * * @see #onPostExecute * @see #doInBackground */ protected void onPreExecute() { } /** * <p>Runs on the UI thread after {@link #doInBackground}. The * specified result is the value returned by {@link #doInBackground}.</p> * * <p>This method won't be invoked if the task was cancelled.</p> * * @param result The result of the operation computed by {@link #doInBackground}. * * @see #onPreExecute * @see #doInBackground * @see #onCancelled(Object) */ @SuppressWarnings({"UnusedDeclaration"}) protected void onPostExecute(Result result) { } /** * Runs on the UI thread after {@link #publishProgress} is invoked. * The specified values are the values passed to {@link #publishProgress}. * * @param values The values indicating progress. * * @see #publishProgress * @see #doInBackground */ @SuppressWarnings({"UnusedDeclaration"}) protected void onProgressUpdate(Progress... values) { } /** * <p>Runs on the UI thread after {@link #cancel(boolean)} is invoked and * {@link #doInBackground(Object[])} has finished.</p> * * <p>The default implementation simply invokes {@link #onCancelled()} and * ignores the result. If you write your own implementation, do not call * <code>super.onCancelled(result)</code>.</p> * * @param result The result, if any, computed in * {@link #doInBackground(Object[])}, can be null * * @see #cancel(boolean) * @see #isCancelled() */ @SuppressWarnings({"UnusedParameters"}) protected void onCancelled(Result result) { onCancelled(); } /** * <p>Applications should preferably override {@link #onCancelled(Object)}. * This method is invoked by the default implementation of * {@link #onCancelled(Object)}.</p> * * <p>Runs on the UI thread after {@link #cancel(boolean)} is invoked and * {@link #doInBackground(Object[])} has finished.</p> * * @see #onCancelled(Object) * @see #cancel(boolean) * @see #isCancelled() */ protected void onCancelled() { } /** * Returns <tt>true</tt> if this task was cancelled before it completed * normally. If you are calling {@link #cancel(boolean)} on the task, * the value returned by this method should be checked periodically from * {@link #doInBackground(Object[])} to end the task as soon as possible. * * @return <tt>true</tt> if task was cancelled before it completed * * @see #cancel(boolean) */ public final boolean isCancelled() { return mCancelled.get(); } /** * <p>Attempts to cancel execution of this task. This attempt will * fail if the task has already completed, already been cancelled, * or could not be cancelled for some other reason. If successful, * and this task has not started when <tt>cancel</tt> is called, * this task should never run. If the task has already started, * then the <tt>mayInterruptIfRunning</tt> parameter determines * whether the thread executing this task should be interrupted in * an attempt to stop the task.</p> * * <p>Calling this method will result in {@link #onCancelled(Object)} being * invoked on the UI thread after {@link #doInBackground(Object[])} * returns. Calling this method guarantees that {@link #onPostExecute(Object)} * is never invoked. After invoking this method, you should check the * value returned by {@link #isCancelled()} periodically from * {@link #doInBackground(Object[])} to finish the task as early as * possible.</p> * * @param mayInterruptIfRunning <tt>true</tt> if the thread executing this * task should be interrupted; otherwise, in-progress tasks are allowed * to complete. * * @return <tt>false</tt> if the task could not be cancelled, * typically because it has already completed normally; * <tt>true</tt> otherwise * * @see #isCancelled() * @see #onCancelled(Object) */ public final boolean cancel(boolean mayInterruptIfRunning) { mCancelled.set(true); return mFuture.cancel(mayInterruptIfRunning); } /** * Waits if necessary for the computation to complete, and then * retrieves its result. * * @return The computed result. * * @throws CancellationException If the computation was cancelled. * @throws ExecutionException If the computation threw an exception. * @throws InterruptedException If the current thread was interrupted * while waiting. */ public final Result get() throws InterruptedException, ExecutionException { return mFuture.get(); } /** * Waits if necessary for at most the given time for the computation * to complete, and then retrieves its result. * * @param timeout Time to wait before cancelling the operation. * @param unit The time unit for the timeout. * * @return The computed result. * * @throws CancellationException If the computation was cancelled. * @throws ExecutionException If the computation threw an exception. * @throws InterruptedException If the current thread was interrupted * while waiting. * @throws TimeoutException If the wait timed out. */ public final Result get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { return mFuture.get(timeout, unit); } /** * Executes the task with the specified parameters. The task returns * itself (this) so that the caller can keep a reference to it. * * <p>Note: this function schedules the task on a queue for a single background * thread or pool of threads depending on the platform version. When first * introduced, AsyncTasks were executed serially on a single background thread. * Starting with {@link android.os.Build.VERSION_CODES#DONUT}, this was changed * to a pool of threads allowing multiple tasks to operate in parallel. Starting * {@link android.os.Build.VERSION_CODES#HONEYCOMB}, tasks are back to being * executed on a single thread to avoid common application errors caused * by parallel execution. If you truly want parallel execution, you can use * the {@link #executeOnExecutor} version of this method * with {@link #THREAD_POOL_EXECUTOR}; however, see commentary there for warnings * on its use. * * <p>This method must be invoked on the UI thread. * * @param params The parameters of the task. * * @return This instance of AsyncTask. * * @throws IllegalStateException If {@link #getStatus()} returns either * {@link AsyncTask.Status#RUNNING} or {@link AsyncTask.Status#FINISHED}. * * @see #executeOnExecutor(java.util.concurrent.Executor, Object[]) * @see #execute(Runnable) */ public final AsyncTask<Params, Progress, Result> execute(Params... params) { return executeOnExecutor(sDefaultExecutor, params); } /** * Executes the task with the specified parameters. The task returns * itself (this) so that the caller can keep a reference to it. * * <p>This method is typically used with {@link #THREAD_POOL_EXECUTOR} to * allow multiple tasks to run in parallel on a pool of threads managed by * AsyncTask, however you can also use your own {@link Executor} for custom * behavior. * * <p><em>Warning:</em> Allowing multiple tasks to run in parallel from * a thread pool is generally <em>not</em> what one wants, because the order * of their operation is not defined. For example, if these tasks are used * to modify any state in common (such as writing a file due to a button click), * there are no guarantees on the order of the modifications. * Without careful work it is possible in rare cases for the newer version * of the data to be over-written by an older one, leading to obscure data * loss and stability issues. Such changes are best * executed in serial; to guarantee such work is serialized regardless of * platform version you can use this function with {@link #SERIAL_EXECUTOR}. * * <p>This method must be invoked on the UI thread. * * @param exec The executor to use. {@link #THREAD_POOL_EXECUTOR} is available as a * convenient process-wide thread pool for tasks that are loosely coupled. * @param params The parameters of the task. * * @return This instance of AsyncTask. * * @throws IllegalStateException If {@link #getStatus()} returns either * {@link AsyncTask.Status#RUNNING} or {@link AsyncTask.Status#FINISHED}. * * @see #execute(Object[]) */ public final AsyncTask<Params, Progress, Result> executeOnExecutor(Executor exec, Params... params) { if (mStatus != Status.PENDING) { switch (mStatus) { case RUNNING: throw new IllegalStateException("Cannot execute task:" + " the task is already running."); case FINISHED: throw new IllegalStateException("Cannot execute task:" + " the task has already been executed " + "(a task can be executed only once)"); } } mStatus = Status.RUNNING; onPreExecute(); mWorker.mParams = params; exec.execute(mFuture); return this; } /** * Convenience version of {@link #execute(Object...)} for use with * a simple Runnable object. See {@link #execute(Object[])} for more * information on the order of execution. * * @see #execute(Object[]) * @see #executeOnExecutor(java.util.concurrent.Executor, Object[]) */ public static void execute(Runnable runnable) { sDefaultExecutor.execute(runnable); } /** * This method can be invoked from {@link #doInBackground} to * publish updates on the UI thread while the background computation is * still running. Each call to this method will trigger the execution of * {@link #onProgressUpdate} on the UI thread. * * {@link #onProgressUpdate} will note be called if the task has been * canceled. * * @param values The progress values to update the UI with. * * @see #onProgressUpdate * @see #doInBackground */ protected final void publishProgress(Progress... values) { if (!isCancelled()) { sHandler.obtainMessage(MESSAGE_POST_PROGRESS, new AsyncTaskResult<>(this, values)).sendToTarget(); } } private void finish(Result result) { if (isCancelled()) { onCancelled(result); } else { onPostExecute(result); } mStatus = Status.FINISHED; } private static class InternalHandler extends Handler { @SuppressWarnings({"unchecked", "RawUseOfParameterizedType"}) @Override public void handleMessage(Message msg) { AsyncTaskResult result = (AsyncTaskResult) msg.obj; switch (msg.what) { case MESSAGE_POST_RESULT: // There is only one result result.mTask.finish(result.mData[0]); break; case MESSAGE_POST_PROGRESS: result.mTask.onProgressUpdate(result.mData); break; } } } private static abstract class WorkerRunnable<Params, Result> implements Callable<Result> { Params[] mParams; } @SuppressWarnings({"RawUseOfParameterizedType"}) private static class AsyncTaskResult<Data> { final AsyncTask mTask; final Data[] mData; AsyncTaskResult(AsyncTask task, Data... data) { mTask = task; mData = data; } } }
reproio/apps-android-wikipedia
wikipedia/src/main/java/org/wikipedia/concurrency/AsyncTask.java
Java
apache-2.0
26,978
package cn.edu.hhu.reg.vo; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Table; @Entity @Table(name="doctor_login") public class DoctorLogin { @Id @GeneratedValue(strategy=GenerationType.IDENTITY) @Column(length = 16) private Integer id; /** * 医生id */ @Column(name="doctor_id",length=16) private Integer doctorId; /** * 医生登录名 */ @Column(name="login_name",length=50) private String loginName; /** * 医生登录密码 */ @Column(name="password",length=50) private String password; public Integer getId() { return id; } public void setId(Integer id) { this.id = id; } public Integer getDoctorId() { return doctorId; } public void setDoctorId(Integer doctorId) { this.doctorId = doctorId; } public String getLoginName() { return loginName; } public void setLoginName(String loginName) { this.loginName = loginName; } public String getPassword() { return password; } public void setPassword(String password) { this.password = password; } public DoctorLogin() { } }
pqpo/registration_api
src/cn/edu/hhu/reg/vo/DoctorLogin.java
Java
apache-2.0
1,216
/* * Copyright 2018 Aleksander Jagiełło * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pl.themolka.arcade.team; import org.bukkit.ChatColor; import pl.themolka.arcade.command.CommandException; import pl.themolka.arcade.command.CommandUtils; import pl.themolka.arcade.command.Sender; import pl.themolka.arcade.game.GamePlayer; import pl.themolka.arcade.match.Observers; import pl.themolka.arcade.parser.Context; import pl.themolka.arcade.util.Color; import java.util.ArrayList; import java.util.Collection; public class TeamCommands { private final TeamsGame game; public TeamCommands(TeamsGame game) { this.game = game; } // // Commands // public void clearCommand(Sender sender, String teamId) { Team team = this.fetchTeam(teamId); if (team.isObservers()) { throw new CommandException("Cannot clear observers."); } Observers observers = this.game.getMatch().getObservers(); int result = 0; for (GamePlayer player : new ArrayList<>(team.getOnlineMembers())) { observers.joinForce(player); result++; } if (result > 0) { sender.sendSuccess(team.getName() + " has been cleared (" + result + " players) and moved to " + observers.getName() + "."); } else { sender.sendError("No players to clear."); } } public void forceCommand(Sender sender, String username, String teamId) { GamePlayer player = this.fetchPlayer(username); Team team = this.fetchTeam(teamId); if (team.contains(player)) { throw new CommandException(player.getUsername() + " is already member of " + team.getName() + "."); } team.joinForce(player); sender.sendSuccess(player.getUsername() + " has been moved to " + team.getName() + "."); } public void friendlyCommand(Sender sender, String teamId, boolean friendly) { Team team = this.fetchTeam(teamId); if (team.isObservers()) { throw new CommandException("Cannot edit observers."); } if (friendly == team.isFriendlyFire()) { if (friendly) { throw new CommandException(team.getName() + " is already in friendly-fire."); } else { throw new CommandException(team.getName() + " is already not in friendly-fire"); } } Team oldState = new Team(team); team.setFriendlyFire(friendly); this.callEditEvent(team, oldState, TeamEditEvent.Reason.FRIENDLY_FIRE); if (friendly) { sender.sendSuccess(oldState.getName() + " is now in friendly-fire."); } else { sender.sendSuccess(oldState.getName() + " is now not in friendly-fire."); } } public void infoCommand(Sender sender) { Collection<Team> teams = this.game.getTeams(); CommandUtils.sendTitleMessage(sender, "Teams", Integer.toString(teams.size())); for (Team team : teams) { sender.send(String.format("%s - %s/%s - %s minimal to play and %s overfill", team.getPrettyName() + ChatColor.GRAY, ChatColor.GOLD.toString() + team.getOnlineMembers().size() + ChatColor.GRAY, Integer.toString(team.getSlots()), ChatColor.GREEN.toString() + team.getMinPlayers() + ChatColor.GRAY, ChatColor.RED.toString() + team.getMaxPlayers() + ChatColor.GRAY)); } } public void kickCommand(Sender sender, String username) { GamePlayer player = this.fetchPlayer(username); Team team = this.game.getTeam(player); if (team.isObservers()) { throw new CommandException("Cannot kick from observers."); } team.leaveForce(player); team.getMatch().getObservers().joinForce(player); sender.sendSuccess(player.getUsername() + " has been kicked from " + team.getName() + "."); } public void minCommand(Sender sender, String teamId, int min) { Team team = this.fetchTeam(teamId); if (team.isObservers()) { throw new CommandException("Cannot edit observers."); } else if (min < 0) { throw new CommandException("Number cannot be negative."); } Team oldState = new Team(team); team.setMinPlayers(min); this.callEditEvent(team, oldState, TeamEditEvent.Reason.MIN_PLAYERS); sender.sendSuccess(oldState.getName() + " has been edited."); } public void overfillCommand(Sender sender, String teamId, int overfill) { Team team = this.fetchTeam(teamId); if (team.isObservers()) { throw new CommandException("Cannot edit observers."); } // set to unlimited if zero or negative int max = Integer.MAX_VALUE; if (overfill > 0) { max = overfill; } Team oldState = new Team(team); team.setMaxPlayers(max); if (max > team.getSlots()) { team.setSlots(max); // slots } this.callEditEvent(team, oldState, TeamEditEvent.Reason.MAX_PLAYERS); sender.sendSuccess(oldState.getName() + " has been edited."); } public void paintCommand(Sender sender, String teamId, String paint) { Team team = this.fetchTeam(teamId); ChatColor color = Color.parseChat(new Context(this.game.getPlugin()), paint); if (color == null) { StringBuilder colors = new StringBuilder(); for (int i = 0; i < ChatColor.values().length; i++) { ChatColor value = ChatColor.values()[i]; if (i != 0) { colors.append(", "); } ChatColor result = ChatColor.RED; if (!value.equals(ChatColor.MAGIC)) { result = value; } colors.append(result).append(value.name().toLowerCase().replace("_", "-")) .append(ChatColor.RESET).append(ChatColor.RED); } throw new CommandException("Available colors: " + colors.toString() + "."); } Team oldState = new Team(team); team.setChatColor(color); this.callEditEvent(team, oldState, TeamEditEvent.Reason.PAINT); sender.sendSuccess(oldState.getName() + " has been painted from " + oldState.getChatColor().name().toLowerCase().replace("_", "-") + " to " + team.getChatColor().name().toLowerCase().replace("_", "-") + "."); } public void renameCommand(Sender sender, String teamId, String name) { Team team = this.fetchTeam(teamId); if (name == null) { throw new CommandException("New name not given."); } else if (name.length() > Team.NAME_MAX_LENGTH) { throw new CommandException("Name too long (greater than " + Team.NAME_MAX_LENGTH + " characters)."); } else if (team.getName().equals(name)) { throw new CommandException("Already named '" + team.getName() + "'."); } Team oldState = new Team(team); team.setName(name); this.callEditEvent(team, oldState, TeamEditEvent.Reason.RENAME); sender.sendSuccess(oldState.getName() + " has been renamed to " + team.getName() + "."); } public void slotsCommand(Sender sender, String teamId, int slots) { Team team = this.fetchTeam(teamId); if (team.isObservers()) { throw new CommandException("Cannot edit observers."); } // set to unlimited if zero or negative int max = Integer.MAX_VALUE; if (slots > 0) { max = slots; } Team oldState = new Team(team); team.setSlots(max); if (max > team.getMaxPlayers()) { team.setMaxPlayers(max); // overfill } this.callEditEvent(team, oldState, TeamEditEvent.Reason.SLOTS); sender.sendSuccess(oldState.getName() + " has been edited."); } // // Command Utilities // private void callEditEvent(Team newState, Team oldState, TeamEditEvent.Reason reason) { this.game.getPlugin().getEventBus().publish(new TeamEditEvent( this.game.getPlugin(), newState, oldState, reason)); } private GamePlayer fetchPlayer(String player) { if (player != null && !player.isEmpty()) { GamePlayer result = this.game.getGame().findPlayer(player); if (result != null) { return result; } } throw new CommandException("Player not found."); } private Team fetchTeam(String team) { if (team != null && !team.isEmpty()) { Team result = this.game.findTeamById(team); if (result != null) { return result; } } throw new CommandException("Team not found."); } }
ShootGame/Arcade2
src/main/java/pl/themolka/arcade/team/TeamCommands.java
Java
apache-2.0
9,514
package problems; import java.util.Arrays; import java.util.PriorityQueue; /** * Leetcode: Super Ugly Number * Created by alan on 2/24/2016. */ public class SuperUglyNumber { class Node implements Comparable<Node> { int val; final int prime_index; public Node(int value, int prime_idx) { this.val = value; this.prime_index = prime_idx; } public int compareTo(Node a) { return this.val - a.val; } } public int[] nthSuperUglyNumber(int n, int[] primes) { int[] nums = new int[n]; nums[0] = 1; int[] index = new int[primes.length]; PriorityQueue<Node> pq = new PriorityQueue<>(); for (int i = 0; i < primes.length; i++) pq.add(new Node(primes[i], i)); for (int i = 1; i < n; i++) { Node node = pq.poll(); while (node.val == nums[i - 1]) { node.val = nums[++index[node.prime_index]] * primes[node.prime_index]; pq.add(node); node = pq.poll(); } nums[i] = node.val; node.val = nums[++index[node.prime_index]] * primes[node.prime_index]; pq.add(node); } return nums; } public static void main(String[] args) { SuperUglyNumber sn = new SuperUglyNumber(); int[] primes = {2, 7, 13, 19}; System.out.println(Arrays.toString(primes)); System.out.println(Arrays.toString(sn.nthSuperUglyNumber(12, primes))); } }
alyiwang/LeetCode
src/problems/SuperUglyNumber.java
Java
apache-2.0
1,548
package yuku.alkitab.base.util; import android.app.Activity; import android.app.Dialog; import android.content.Intent; import android.database.Cursor; import android.database.DatabaseUtils; import android.database.sqlite.SQLiteDatabase; import android.os.AsyncTask; import android.support.annotation.NonNull; import android.support.annotation.Nullable; import android.util.Xml; import com.afollestad.materialdialogs.MaterialDialog; import gnu.trove.list.TIntList; import gnu.trove.list.array.TIntArrayList; import gnu.trove.map.hash.TIntLongHashMap; import gnu.trove.map.hash.TIntObjectHashMap; import gnu.trove.map.hash.TObjectIntHashMap; import org.xml.sax.Attributes; import org.xml.sax.SAXException; import org.xml.sax.ext.DefaultHandler2; import yuku.alkitab.base.App; import yuku.alkitab.base.IsiActivity; import yuku.alkitab.base.S; import yuku.alkitab.base.storage.Db; import yuku.alkitab.base.storage.InternalDb; import yuku.alkitab.debug.R; import yuku.alkitab.model.Label; import yuku.alkitab.model.Marker; import yuku.alkitab.model.Marker_Label; import java.io.InputStream; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; import static yuku.alkitab.base.util.Literals.ToStringArray; // Imported from v3. Used for once-only migration from v3 to v4. public class BookmarkImporter { static final String TAG = BookmarkImporter.class.getSimpleName(); // constants static class Bookmark2_Label { // DO NOT CHANGE CONSTANT VALUES! public static final String XMLTAG_Bookmark2_Label = "Bukmak2_Label"; public static final String XMLATTR_bookmark2_relId = "bukmak2_relId"; public static final String XMLATTR_label_relId = "label_relId"; } // constants static class BackupManager { public static final String XMLTAG_Bukmak2 = "Bukmak2"; private static final String XMLATTR_ari = "ari"; private static final String XMLATTR_kind = "jenis"; private static final String XMLATTR_caption = "tulisan"; private static final String XMLATTR_addTime = "waktuTambah"; private static final String XMLATTR_modifyTime = "waktuUbah"; private static final String XMLATTR_relId = "relId"; private static final String XMLVAL_bookmark = "bukmak"; private static final String XMLVAL_note = "catatan"; private static final String XMLVAL_highlight = "stabilo"; public static final String XMLTAG_Label = "Label"; private static final String XMLATTR_title = "judul"; private static final String XMLATTR_bgColor = "warnaLatar"; @Nullable public static Marker markerFromAttributes(Attributes attributes) { int ari = Integer.parseInt(attributes.getValue("", XMLATTR_ari)); String kind_s = attributes.getValue("", XMLATTR_kind); Marker.Kind kind = kind_s.equals(XMLVAL_bookmark) ? Marker.Kind.bookmark : kind_s.equals(XMLVAL_note) ? Marker.Kind.note : kind_s.equals(XMLVAL_highlight) ? Marker.Kind.highlight : null; String caption = unescapeHighUnicode(attributes.getValue("", XMLATTR_caption)); Date addTime = Sqlitil.toDate(Integer.parseInt(attributes.getValue("", XMLATTR_addTime))); Date modifyTime = Sqlitil.toDate(Integer.parseInt(attributes.getValue("", XMLATTR_modifyTime))); if (kind == null) { // invalid return null; } return Marker.createNewMarker(ari, kind, caption, 1, addTime, modifyTime); } public static int getRelId(Attributes attributes) { String s = attributes.getValue("", XMLATTR_relId); return s == null ? 0 : Integer.parseInt(s); } public static Label labelFromAttributes(Attributes attributes) { String title = unescapeHighUnicode(attributes.getValue("", XMLATTR_title)); String bgColor = attributes.getValue("", XMLATTR_bgColor); return Label.createNewLabel(title, 0, bgColor); } static ThreadLocal<Matcher> highUnicodeMatcher = new ThreadLocal<Matcher>() { @Override protected Matcher initialValue() { return Pattern.compile("\\[\\[~U([0-9A-Fa-f]{6})~\\]\\]").matcher(""); } }; public static String unescapeHighUnicode(String input) { if (input == null) return null; final Matcher m = highUnicodeMatcher.get(); m.reset(input); StringBuffer res = new StringBuffer(); while (m.find()) { String s = m.group(1); final int cp = Integer.parseInt(s, 16); m.appendReplacement(res, new String(new int[]{cp}, 0, 1)); } m.appendTail(res); return res.toString(); } } public static void importBookmarks(final Activity activity, @NonNull final InputStream fis, final boolean finishActivityAfterwards, final Runnable runWhenDone) { final MaterialDialog pd = new MaterialDialog.Builder(activity) .content(R.string.mengimpor_titiktiga) .cancelable(false) .progress(true, 0) .show(); new AsyncTask<Boolean, Integer, Object>() { int count_bookmark = 0; int count_label = 0; @Override protected Object doInBackground(Boolean... params) { final List<Marker> markers = new ArrayList<>(); final TObjectIntHashMap<Marker> markerToRelIdMap = new TObjectIntHashMap<>(); final List<Label> labels = new ArrayList<>(); final TObjectIntHashMap<Label> labelToRelIdMap = new TObjectIntHashMap<>(); final TIntLongHashMap labelRelIdToAbsIdMap = new TIntLongHashMap(); final TIntObjectHashMap<TIntList> markerRelIdToLabelRelIdsMap = new TIntObjectHashMap<>(); try { Xml.parse(fis, Xml.Encoding.UTF_8, new DefaultHandler2() { @Override public void startElement(String uri, String localName, String qName, Attributes attributes) throws SAXException { switch (localName) { case BackupManager.XMLTAG_Bukmak2: final Marker marker = BackupManager.markerFromAttributes(attributes); if (marker != null) { markers.add(marker); final int bookmark2_relId = BackupManager.getRelId(attributes); markerToRelIdMap.put(marker, bookmark2_relId); count_bookmark++; } break; case BackupManager.XMLTAG_Label: { final Label label = BackupManager.labelFromAttributes(attributes); int label_relId = BackupManager.getRelId(attributes); labels.add(label); labelToRelIdMap.put(label, label_relId); count_label++; break; } case Bookmark2_Label.XMLTAG_Bookmark2_Label: { final int bookmark2_relId = Integer.parseInt(attributes.getValue("", Bookmark2_Label.XMLATTR_bookmark2_relId)); final int label_relId = Integer.parseInt(attributes.getValue("", Bookmark2_Label.XMLATTR_label_relId)); TIntList labelRelIds = markerRelIdToLabelRelIdsMap.get(bookmark2_relId); if (labelRelIds == null) { labelRelIds = new TIntArrayList(); markerRelIdToLabelRelIdsMap.put(bookmark2_relId, labelRelIds); } labelRelIds.add(label_relId); break; } } } }); fis.close(); } catch (Exception e) { return e; } { // bikin label-label yang diperlukan, juga map relId dengan id dari label. final HashMap<String, Label> judulMap = new HashMap<>(); final List<Label> xlabelLama = S.getDb().listAllLabels(); for (Label labelLama : xlabelLama) { judulMap.put(labelLama.title, labelLama); } for (Label label : labels) { // cari apakah label yang judulnya persis sama udah ada Label labelLama = judulMap.get(label.title); final int labelRelId = labelToRelIdMap.get(label); if (labelLama != null) { // removed from v3: update warna label lama labelRelIdToAbsIdMap.put(labelRelId, labelLama._id); AppLog.d(TAG, "label (lama) r->a : " + labelRelId + "->" + labelLama._id); } else { // belum ada, harus bikin baru Label labelBaru = S.getDb().insertLabel(label.title, label.backgroundColor); labelRelIdToAbsIdMap.put(labelRelId, labelBaru._id); AppLog.d(TAG, "label (baru) r->a : " + labelRelId + "->" + labelBaru._id); } } } importBookmarks(markers, markerToRelIdMap, labelRelIdToAbsIdMap, markerRelIdToLabelRelIdsMap); return null; } @Override protected void onPostExecute(@NonNull Object result) { pd.dismiss(); if (result instanceof Exception) { AppLog.e(TAG, "Error when importing markers", (Throwable) result); new MaterialDialog.Builder(activity) .content(activity.getString(R.string.terjadi_kesalahan_ketika_mengimpor_pesan, ((Exception) result).getMessage())) .positiveText(R.string.ok) .show(); } else { final Dialog dialog = new MaterialDialog.Builder(activity) .content(activity.getString(R.string.impor_berhasil_angka_diproses, count_bookmark, count_label)) .positiveText(R.string.ok) .show(); if (finishActivityAfterwards) { dialog.setOnDismissListener(dialog1 -> activity.finish()); } } if (runWhenDone != null) runWhenDone.run(); } }.execute(); } public static void importBookmarks(List<Marker> markers, TObjectIntHashMap<Marker> markerToRelIdMap, TIntLongHashMap labelRelIdToAbsIdMap, TIntObjectHashMap<TIntList> markerRelIdToLabelRelIdsMap) { SQLiteDatabase db = S.getDb().getWritableDatabase(); db.beginTransaction(); try { final TIntObjectHashMap<Marker> markerRelIdToMarker = new TIntObjectHashMap<>(); { // write new markers (if not available yet) for (int i = 0; i < markers.size(); i++) { Marker marker = markers.get(i); final int marker_relId = markerToRelIdMap.get(marker); // migrate: look for existing marker with same kind, ari, and content try (Cursor cursor = db.query( Db.TABLE_Marker, null, Db.Marker.ari + "=? and " + Db.Marker.kind + "=? and " + Db.Marker.caption + "=?", ToStringArray(marker.ari, marker.kind.code, marker.caption), null, null, null )) { if (cursor.moveToNext()) { marker = InternalDb.markerFromCursor(cursor); markers.set(i, marker); } else { InternalDb.insertMarker(db, marker); } // map it markerRelIdToMarker.put(marker_relId, marker); } } } { // now is marker-label assignments for (final int marker_relId : markerRelIdToLabelRelIdsMap.keys()) { final TIntList label_relIds = markerRelIdToLabelRelIdsMap.get(marker_relId); final Marker marker = markerRelIdToMarker.get(marker_relId); if (marker != null) { // existing labels > 0: ignore // existing labels == 0: insert final int existing_label_count = (int) DatabaseUtils.queryNumEntries(db, Db.TABLE_Marker_Label, Db.Marker_Label.marker_gid + "=?", ToStringArray(marker.gid)); if (existing_label_count == 0) { for (int label_relId : label_relIds.toArray()) { final long label_id = labelRelIdToAbsIdMap.get(label_relId); if (label_id > 0) { final Label label = S.getDb().getLabelById(label_id); final Marker_Label marker_label = Marker_Label.createNewMarker_Label(marker.gid, label.gid); InternalDb.insertMarker_LabelIfNotExists(db, marker_label); } else { AppLog.w(TAG, "label_id is invalid!: " + label_id); } } } } else { AppLog.w(TAG, "wrong marker_relId: " + marker_relId); } } } db.setTransactionSuccessful(); } finally { db.endTransaction(); } App.getLbm().sendBroadcast(new Intent(IsiActivity.ACTION_ATTRIBUTE_MAP_CHANGED)); } }
infojulio/androidbible
Alkitab/src/main/java/yuku/alkitab/base/util/BookmarkImporter.java
Java
apache-2.0
11,461
/** * Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies * * Please see distribution for license. */ package com.opengamma.core.region; import java.util.Set; import org.joda.beans.impl.flexi.FlexiBean; import org.threeten.bp.ZoneId; import com.opengamma.id.ExternalBundleIdentifiable; import com.opengamma.id.ExternalIdBundle; import com.opengamma.id.UniqueId; import com.opengamma.id.UniqueIdentifiable; import com.opengamma.util.PublicAPI; import com.opengamma.util.i18n.Country; import com.opengamma.util.money.Currency; /** * A region of the world. * <p> * Many aspects of business, algorithms and contracts are specific to a region. The region may be of any size, from a municipality to a super-national group. * <p> * This interface is read-only. Implementations may be mutable. */ @PublicAPI public interface Region extends UniqueIdentifiable, ExternalBundleIdentifiable { /** * Gets the unique identifier of the region. * <p> * This specifies a single version-correction of the region. * * @return the unique identifier for this region, not null within the engine */ @Override UniqueId getUniqueId(); /** * Gets the external identifier bundle that defines the region. * <p> * Each external system has one or more identifiers by which they refer to the region. * Some of these may be unique within that system, while others may be more descriptive. * This bundle stores the set of these external identifiers. * <p> * This will include the country, currency and time-zone. * * @return the bundle defining the region, not null */ @Override // override for Javadoc ExternalIdBundle getExternalIdBundle(); /** * Gets the classification of the region. * * @return the classification of region, such as SUPER_NATIONAL or INDEPENDENT_STATE, not null */ RegionClassification getClassification(); /** * Gets the unique identifiers of the regions that this region is a member of. For example, a country might be a member * of the World, UN, European Union and NATO. * * @return the parent unique identifiers, null if this is the root entry */ Set<UniqueId> getParentRegionIds(); /** * Gets the country. * * @return the country, null if not applicable */ Country getCountry(); /** * Gets the currency. * * @return the currency, null if not applicable */ Currency getCurrency(); /** * Gets the time-zone. For larger regions, there can be multiple time-zones, so this is only reliable for municipalities. * * @return the time-zone, null if not applicable */ ZoneId getTimeZone(); /** * Gets the short descriptive name of the region. * * @return the name of the region, not null */ String getName(); /** * Gets the full descriptive name of the region. * * @return the full name of the region, not null */ String getFullName(); /** * Gets the extensible data store for additional information. Applications may store additional region based information here. * * @return the additional data, not null */ FlexiBean getData(); }
McLeodMoores/starling
projects/core/src/main/java/com/opengamma/core/region/Region.java
Java
apache-2.0
3,168
/* $Id$ * $URL: https://dev.almende.com/svn/abms/coala-common/src/main/java/com/almende/coala/time/NanoInstant.java $ * * Part of the EU project Adapt4EE, see http://www.adapt4ee.eu/ * * @license * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. * * Copyright (c) 2010-2013 Almende B.V. */ package io.coala.time; /** * {@link NanoInstant} has the nano-second as base time unit * * @date $Date: 2014-06-03 14:26:09 +0200 (Tue, 03 Jun 2014) $ * @version $Revision: 296 $ * @author <a href="mailto:Rick@almende.org">Rick</a> */ public class NanoInstant extends AbstractInstant<NanoInstant> { /** */ private static final long serialVersionUID = 1L; /** */ // private static final Logger LOG = LogUtil.getLogger(NanoInstant.class); /** */ // private static final TimeUnit BASE_UNIT = TimeUnit.NANOS; /** */ public static final NanoInstant ZERO = new NanoInstant(null, 0); /** * {@link NanoInstant} constructor * * @param value */ public NanoInstant(final ClockID clockID, final Number value) { super(clockID, value, TimeUnit.NANOS); } // /** // * {@link NanoInstant} constructor // * // * @param value // */ // public NanoInstant(final ClockID clockID, final Number value, // final TimeUnit unit) // { // super(clockID, value, unit); // } // // /** @see Instant#getBaseUnit() */ // @Override // public TimeUnit getBaseUnit() // { // return BASE_UNIT; // } /** @see Instant#toUnit(TimeUnit) */ @Override public NanoInstant toUnit(final TimeUnit unit) { throw new RuntimeException( "Can't convert NanoInstant to another TimeUnit"); } /** @see Instant#plus(Number) */ @Override public NanoInstant plus(final Number value) { return new NanoInstant(getClockID(), getValue().doubleValue() + value.doubleValue()); } }
krevelen/coala
coala-core/src/main/java/io/coala/time/NanoInstant.java
Java
apache-2.0
2,299
package cat.ereza.customactivityoncrash.activity; import android.app.Activity; import android.content.Intent; import android.os.Bundle; import cat.ereza.customactivityoncrash.CustomActivityOnCrash; /** * Created by zhy on 15/8/4. */ public class ClearStack extends Activity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); Intent intent = getIntent().getParcelableExtra(CustomActivityOnCrash.KEY_CURRENT_INTENT); startActivity(intent); finish(); Runtime.getRuntime().exit(0); } }
hongyangAndroid/CustomActivityOnCrash
library/src/main/java/cat/ereza/customactivityoncrash/activity/ClearStack.java
Java
apache-2.0
596
using System; using System.Collections.Generic; using System.Data.Services.Common; using System.IO; using System.Linq; using NuGet.Resources; namespace NuGet { [DataServiceKey("Id", "Version")] [EntityPropertyMapping("LastUpdated", SyndicationItemProperty.Updated, SyndicationTextContentKind.Plaintext, keepInContent: false)] [EntityPropertyMapping("Id", SyndicationItemProperty.Title, SyndicationTextContentKind.Plaintext, keepInContent: false)] [EntityPropertyMapping("Authors", SyndicationItemProperty.AuthorName, SyndicationTextContentKind.Plaintext, keepInContent: false)] [EntityPropertyMapping("Summary", SyndicationItemProperty.Summary, SyndicationTextContentKind.Plaintext, keepInContent: false)] [CLSCompliant(false)] public class DataServicePackage : IPackage { private readonly LazyWithRecreate<IPackage> _package; public DataServicePackage() { _package = new LazyWithRecreate<IPackage>(DownloadAndVerifyPackage, ShouldUpdatePackage); } public string Id { get; set; } public string Version { get; set; } public string Title { get; set; } public string Authors { get; set; } public string Owners { get; set; } public Uri IconUrl { get; set; } public Uri LicenseUrl { get; set; } public Uri ProjectUrl { get; set; } public Uri ReportAbuseUrl { get; set; } public Uri GalleryDetailsUrl { get; set; } public Uri DownloadUrl { get { return Context.GetReadStreamUri(this); } } public DateTimeOffset Published { get; set; } public DateTimeOffset LastUpdated { get; set; } public int DownloadCount { get; set; } public double Rating { get; set; } public int RatingsCount { get; set; } public bool RequireLicenseAcceptance { get; set; } public string Description { get; set; } public string Summary { get; set; } public string Language { get; set; } public string Tags { get; set; } public string Dependencies { get; set; } public string PackageHash { get; set; } internal string OldHash { get; set; } internal IDataServiceContext Context { get; set; } internal PackageDownloader Downloader { get; set; } IEnumerable<string> IPackageMetadata.Authors { get { if (String.IsNullOrEmpty(Authors)) { return Enumerable.Empty<string>(); } return Authors.Split(new[] { ',' }, StringSplitOptions.RemoveEmptyEntries); } } IEnumerable<string> IPackageMetadata.Owners { get { if (String.IsNullOrEmpty(Owners)) { return Enumerable.Empty<string>(); } return Owners.Split(new[] { ',' }, StringSplitOptions.RemoveEmptyEntries); } } IEnumerable<PackageDependency> IPackageMetadata.Dependencies { get { if (String.IsNullOrEmpty(Dependencies)) { return Enumerable.Empty<PackageDependency>(); } return from d in Dependencies.Split('|') let dependency = ParseDependency(d) where dependency != null select dependency; } } Version IPackageMetadata.Version { get { if (Version != null) { return new Version(Version); } return null; } } public IEnumerable<IPackageAssemblyReference> AssemblyReferences { get { return _package.Value.AssemblyReferences; } } public IEnumerable<FrameworkAssemblyReference> FrameworkAssemblies { get { return _package.Value.FrameworkAssemblies; } } public IEnumerable<IPackageFile> GetFiles() { return _package.Value.GetFiles(); } public Stream GetStream() { return _package.Value.GetStream(); } public override string ToString() { return this.GetFullName(); } private bool ShouldUpdatePackage() { return ShouldUpdatePackage(MachineCache.Default); } internal bool ShouldUpdatePackage(IPackageRepository repository) { // If the hash changed re-download the package. if (OldHash != PackageHash) { return true; } // If the package hasn't been cached, then re-download the package. IPackage package = GetPackage(repository); if (package == null) { return true; } // If the cached package hash isn't the same as incoming package hash // then re-download the package. string cachedHash = package.GetHash(); if (cachedHash != PackageHash) { return true; } return false; } private IPackage DownloadAndVerifyPackage() { return DownloadAndVerifyPackage(MachineCache.Default); } internal IPackage DownloadAndVerifyPackage(IPackageRepository repository) { if (String.IsNullOrEmpty(PackageHash)) { throw new InvalidOperationException(NuGetResources.PackageContentsVerifyError); } IPackage package = null; // If OldHash is null, we're looking at a new instance of the data service package. // The package might be stored in the cache so we're going to try the looking there before attempting a download. if (OldHash == null) { package = GetPackage(repository); } if (package == null) { byte[] hashBytes = Convert.FromBase64String(PackageHash); package = Downloader.DownloadPackage(DownloadUrl, hashBytes, this); // Add the package to the cache repository.AddPackage(package); // Clear the cache for this package ZipPackage.ClearCache(package); } // Update the hash OldHash = PackageHash; return package; } /// <summary> /// Parses a dependency from the feed in the format: /// id:versionSpec or id /// </summary> private static PackageDependency ParseDependency(string value) { if (String.IsNullOrWhiteSpace(value)) { return null; } string[] tokens = value.Trim().Split(new[] { ':' }, StringSplitOptions.RemoveEmptyEntries); if (tokens.Length == 0) { return null; } // Trim the id string id = tokens[0].Trim(); IVersionSpec versionSpec = null; if (tokens.Length > 1) { // Attempt to parse the version VersionUtility.TryParseVersionSpec(tokens[1], out versionSpec); } return new PackageDependency(id, versionSpec); } private IPackage GetPackage(IPackageRepository repository) { return repository.FindPackage(Id, ((IPackageMetadata)this).Version); } /// <summary> /// We can't use the built in Lazy for 2 reasons: /// 1. It caches the exception if any is thrown from the creator func (this means it won't retry calling the function). /// 2. There's no way to force a retry or expiration of the cache. /// </summary> private class LazyWithRecreate<T> { private readonly Func<T> _creator; private readonly Func<bool> _shouldRecreate; private T _value; private bool _isValueCreated; public LazyWithRecreate(Func<T> creator, Func<bool> shouldRecreate) { _creator = creator; _shouldRecreate = shouldRecreate; } public T Value { get { if (_shouldRecreate() || !_isValueCreated) { _value = _creator(); _isValueCreated = true; } return _value; } } } } }
grendello/nuget
src/Core/Packages/DataServicePackage.cs
C#
apache-2.0
9,582
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.cxf.tools.common; import javax.xml.namespace.QName; public final class ToolConstants { //public static final String TOOLSPECS_BASE = "/org/apache/cxf/tools/common/toolspec/toolspecs/"; public static final String TOOLSPECS_BASE = "/org/apache/cxf/tools/"; public static final String SCHEMA_URI = "http://www.w3.org/2001/XMLSchema"; public static final String XML_NAMESPACE_URI = "http://www.w3.org/XML/1998/namespace"; public static final String WSDL_NAMESPACE_URI = "http://schemas.xmlsoap.org/wsdl/"; public static final String WSA_NAMESPACE_URI = "http://www.w3.org/2005/08/addressing"; /** * Tools permit caller to pass in additional bean definitions. */ public static final String CFG_BEAN_CONFIG = "beans"; public static final String DEFAULT_TEMP_DIR = "gen_tmp"; public static final String CFG_OUTPUTDIR = "outputdir"; public static final String CFG_OUTPUTFILE = "outputfile"; public static final String CFG_WSDLURL = "wsdlurl"; public static final String CFG_WSDLLOCATION = "wsdlLocation"; public static final String CFG_WSDLLIST = "wsdlList"; public static final String CFG_NAMESPACE = "namespace"; public static final String CFG_VERBOSE = "verbose"; public static final String CFG_PORT = "port"; public static final String CFG_BINDING = "binding"; public static final String CFG_AUTORESOLVE = "autoNameResolution"; public static final String CFG_WEBSERVICE = "webservice"; public static final String CFG_SERVER = "server"; public static final String CFG_CLIENT = "client"; public static final String CFG_ALL = "all"; public static final String CFG_IMPL = "impl"; public static final String CFG_PACKAGENAME = "packagename"; public static final String CFG_JSPACKAGEPREFIX = "jspackageprefix"; public static final String CFG_NINCLUDE = "ninclude"; public static final String CFG_NEXCLUDE = "nexclude"; public static final String CFG_CMD_ARG = "args"; public static final String CFG_INSTALL_DIR = "install.dir"; public static final String CFG_PLATFORM_VERSION = "platform.version"; public static final String CFG_COMPILE = "compile"; public static final String CFG_CLASSDIR = "classdir"; public static final String CFG_EXTRA_SOAPHEADER = "exsoapheader"; public static final String CFG_DEFAULT_NS = "defaultns"; public static final String CFG_DEFAULT_EX = "defaultex"; public static final String CFG_NO_TYPES = "notypes"; public static final String CFG_XJC_ARGS = "xjc"; public static final String CFG_CATALOG = "catalog"; public static final String CFG_BAREMETHODS = "bareMethods"; public static final String CFG_ASYNCMETHODS = "asyncMethods"; public static final String CFG_MIMEMETHODS = "mimeMethods"; public static final String CFG_DEFAULT_VALUES = "defaultValues"; public static final String CFG_JAVASCRIPT_UTILS = "javascriptUtils"; public static final String CFG_VALIDATE_WSDL = "validate"; public static final String CFG_CREATE_XSD_IMPORTS = "createxsdimports"; /** * Front-end selection command-line option to java2ws. */ public static final String CFG_FRONTEND = "frontend"; public static final String CFG_DATABINDING = "databinding"; public static final String DEFAULT_ADDRESS = "http://localhost:9090"; // WSDL2Java Constants public static final String CFG_TYPES = "types"; public static final String CFG_INTERFACE = "interface"; public static final String CFG_NIGNOREEXCLUDE = "nignoreexclude"; public static final String CFG_ANT = "ant"; public static final String CFG_LIB_REF = "library.references"; public static final String CFG_ANT_PROP = "ant.prop"; public static final String CFG_NO_ADDRESS_BINDING = "noAddressBinding"; public static final String CFG_ALLOW_ELEMENT_REFS = "allowElementReferences"; public static final String CFG_RESERVE_NAME = "reserveClass"; public static final String CFG_FAULT_SERIAL_VERSION_UID = "faultSerialVersionUID"; public static final String CFG_EXCEPTION_SUPER = "exceptionSuper"; public static final String CFG_MARK_GENERATED = "mark-generated"; //Internal Flag to generate public static final String CFG_IMPL_CLASS = "implClass"; public static final String CFG_GEN_CLIENT = "genClient"; public static final String CFG_GEN_SERVER = "genServer"; public static final String CFG_GEN_IMPL = "genImpl"; public static final String CFG_GEN_TYPES = "genTypes"; public static final String CFG_GEN_SEI = "genSEI"; public static final String CFG_GEN_ANT = "genAnt"; public static final String CFG_GEN_SERVICE = "genService"; public static final String CFG_GEN_OVERWRITE = "overwrite"; public static final String CFG_GEN_FAULT = "genFault"; public static final String CFG_GEN_NEW_ONLY = "newonly"; // Java2WSDL Constants public static final String CFG_CLASSPATH = "classpath"; public static final String CFG_TNS = "tns"; public static final String CFG_SERVICENAME = "servicename"; public static final String CFG_SCHEMANS = "schemans"; public static final String CFG_USETYPES = "usetypes"; public static final String CFG_CLASSNAME = "classname"; public static final String CFG_PORTTYPE = "porttype"; public static final String CFG_SOURCEDIR = "sourcedir"; public static final String CFG_WSDL = "wsdl"; public static final String CFG_WRAPPERBEAN = "wrapperbean"; // WSDL2Service Constants public static final String CFG_ADDRESS = "address"; public static final String CFG_TRANSPORT = "transport"; public static final String CFG_SERVICE = "service"; public static final String CFG_BINDING_ATTR = "attrbinding"; public static final String CFG_SOAP12 = "soap12"; // WSDL2Soap Constants public static final String CFG_STYLE = "style"; public static final String CFG_USE = "use"; // XSD2WSDL Constants public static final String CFG_XSDURL = "xsdurl"; public static final String CFG_NAME = "name"; // WsdlValidator public static final String CFG_DEEP = "deep"; public static final String CFG_SCHEMA_DIR = "schemaDir"; public static final String CFG_SCHEMA_URL = "schemaURL"; public static final String CXF_SCHEMA_DIR = "cxf_schema_dir"; public static final String CXF_SCHEMAS_DIR_INJAR = "schemas/wsdl/"; public static final String CFG_SUPPRESS_WARNINGS = "suppressWarnings"; // WSDL2Java Processor Constants public static final String SEI_GENERATOR = "sei.generator"; public static final String FAULT_GENERATOR = "fault.generator"; public static final String TYPE_GENERATOR = "type.generator"; public static final String IMPL_GENERATOR = "impl.generator"; public static final String SVR_GENERATOR = "svr.generator"; public static final String CLT_GENERATOR = "clt.generator"; public static final String SERVICE_GENERATOR = "service.generator"; public static final String ANT_GENERATOR = "ant.generator"; public static final String HANDLER_GENERATOR = "handler.generator"; // Binding namespace public static final String NS_JAXWS_BINDINGS = "http://java.sun.com/xml/ns/jaxws"; public static final String NS_JAXB_BINDINGS = "http://java.sun.com/xml/ns/jaxb"; public static final QName JAXWS_BINDINGS = new QName(NS_JAXWS_BINDINGS, "bindings"); public static final QName JAXB_BINDINGS = new QName(NS_JAXB_BINDINGS, "bindings"); public static final String JAXWS_BINDINGS_WSDL_LOCATION = "wsdlLocation"; public static final String JAXWS_BINDING_NODE = "node"; public static final String JAXWS_BINDING_VERSION = "version"; public static final String ASYNC_METHOD_SUFFIX = "Async"; public static final String HANDLER_CHAINS_URI = "http://java.sun.com/xml/ns/javaee"; public static final String HANDLER_CHAIN = "handler-chain"; public static final String HANDLER_CHAINS = "handler-chains"; //public static final String RAW_JAXB_MODEL = "rawjaxbmodel"; // JMS address public static final String NS_JMS_ADDRESS = "http://cxf.apache.org/transports/jms"; public static final QName JMS_ADDRESS = new QName(NS_JMS_ADDRESS, "address"); public static final String JMS_ADDR_DEST_STYLE = "destinationStyle"; public static final String JMS_ADDR_JNDI_URL = "jndiProviderURL"; public static final String JMS_ADDR_JNDI_FAC = "jndiConnectionFactoryName"; public static final String JMS_ADDR_JNDI_DEST = "jndiDestinationName"; public static final String JMS_ADDR_MSG_TYPE = "messageType"; public static final String JMS_ADDR_INIT_CTX = "initialContextFactory"; public static final String JMS_ADDR_SUBSCRIBER_NAME = "durableSubscriberName"; public static final String JMS_ADDR_MSGID_TO_CORRID = "useMessageIDAsCorrelationID"; // XML Binding public static final String XMLBINDING_ROOTNODE = "rootNode"; public static final String XMLBINDING_HTTP_LOCATION = "location"; public static final String NS_XML_FORMAT = "http://cxf.apache.org/bindings/xformat"; public static final String XML_FORMAT_PREFIX = "xformat"; public static final String NS_XML_HTTP = "http://schemas.xmlsoap.org/wsdl/http/"; public static final String XML_HTTP_PREFIX = "http"; public static final QName XML_HTTP_ADDRESS = new QName(NS_XML_HTTP, "address"); public static final QName XML_FORMAT = new QName(NS_XML_FORMAT, "body"); public static final QName XML_BINDING_FORMAT = new QName(NS_XML_FORMAT, "binding"); public static final String XML_SCHEMA_COLLECTION = "xmlSchemaCollection"; public static final String PORTTYPE_MAP = "portTypeMap"; public static final String SCHEMA_TARGET_NAMESPACES = "schemaTargetNameSpaces"; public static final String WSDL_DEFINITION = "wsdlDefinition"; public static final String IMPORTED_DEFINITION = "importedDefinition"; public static final String IMPORTED_PORTTYPE = "importedPortType"; public static final String IMPORTED_SERVICE = "importedService"; public static final String BINDING_GENERATOR = "BindingGenerator"; // Tools framework public static final String FRONTEND_PLUGIN = "frontend"; public static final String DATABINDING_PLUGIN = "databinding"; public static final String RUNTIME_DATABINDING_CLASS = "databinding-class"; public static final String CFG_WSDL_VERSION = "wsdlversion"; // Suppress the code generation, in this case you can just get the generated code model public static final String CFG_SUPPRESS_GEN = "suppress"; public static final String DEFAULT_PACKAGE_NAME = "defaultnamespace"; //For java2ws tool public static final String SERVICE_LIST = "serviceList"; public static final String GEN_FROM_SEI = "genFromSEI"; public static final String JAXWS_FRONTEND = "jaxws"; public static final String SIMPLE_FRONTEND = "simple"; public static final String JAXB_DATABINDING = "jaxb"; public static final String AEGIS_DATABINDING = "aegis"; //For Simple FrontEnd public static final String SEI_CLASS = "seiClass"; public static final String IMPL_CLASS = "implClass"; public static final String SERVICE_NAME = "serviceName"; public static final String PORT_NAME = "portName"; public static final String DEFAULT_DATA_BINDING_NAME = "jaxb"; public static final String DATABIND_BEAN_NAME_SUFFIX = "DatabindingBean"; public static final String CLIENT_CLASS = "clientClass"; public static final String SERVER_CLASS = "serverClass"; public static final String CFG_JSPREFIXMAP = "javascriptPrefixMap"; private ToolConstants() { //utility class } }
zzsoszz/webservice_gzdx
opensource_cxf/org/apache/cxf/tools/common/ToolConstants.java
Java
apache-2.0
12,534
/** * Copyright Pravega Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.pravega.client.connection.impl; import io.netty.bootstrap.ServerBootstrap; import io.netty.buffer.Unpooled; import io.netty.channel.Channel; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; import io.netty.channel.ChannelPipeline; import io.netty.channel.EventLoopGroup; import io.netty.channel.epoll.EpollEventLoopGroup; import io.netty.channel.epoll.EpollServerSocketChannel; import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.SocketChannel; import io.netty.channel.socket.nio.NioServerSocketChannel; import io.netty.handler.codec.LengthFieldBasedFrameDecoder; import io.netty.handler.logging.LogLevel; import io.netty.handler.logging.LoggingHandler; import io.netty.handler.ssl.SslContext; import io.netty.handler.ssl.SslContextBuilder; import io.netty.handler.ssl.SslHandler; import io.pravega.client.ClientConfig; import io.pravega.shared.protocol.netty.CommandDecoder; import io.pravega.shared.protocol.netty.CommandEncoder; import io.pravega.shared.protocol.netty.ConnectionFailedException; import io.pravega.shared.protocol.netty.FailingReplyProcessor; import io.pravega.shared.protocol.netty.PravegaNodeUri; import io.pravega.shared.protocol.netty.WireCommands; import io.pravega.test.common.AssertExtensions; import io.pravega.test.common.SecurityConfigDefaults; import io.pravega.test.common.TestUtils; import java.io.File; import java.net.URI; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.CompletableFuture; import java.util.function.Function; import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLException; import javax.net.ssl.SSLParameters; import lombok.Cleanup; import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; import static io.pravega.shared.metrics.MetricNotifier.NO_OP_METRIC_NOTIFIER; import static io.pravega.shared.protocol.netty.WireCommands.MAX_WIRECOMMAND_SIZE; import static io.pravega.test.common.AssertExtensions.assertThrows; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; public class ConnectionPoolingTest { @Rule public Timeout globalTimeout = Timeout.seconds(1000); boolean ssl = false; private Channel serverChannel; private int port; private final String seg = "Segment-0"; private final long offset = 1234L; private final int length = 1024; private final String data = "data"; private final Function<Long, WireCommands.ReadSegment> readRequestGenerator = id -> new WireCommands.ReadSegment(seg, offset, length, "", id); private final Function<Long, WireCommands.SegmentRead> readResponseGenerator = id -> new WireCommands.SegmentRead(seg, offset, true, false, Unpooled.wrappedBuffer(data.getBytes(StandardCharsets.UTF_8)), id); private class EchoServerHandler extends ChannelInboundHandlerAdapter { @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { cause.printStackTrace(); ctx.close(); } @Override public void channelRead(ChannelHandlerContext ctx, Object message) { if (message instanceof WireCommands.Hello) { ctx.write(message); ctx.flush(); } else if (message instanceof WireCommands.ReadSegment) { WireCommands.ReadSegment msg = (WireCommands.ReadSegment) message; ctx.write(readResponseGenerator.apply(msg.getRequestId())); ctx.flush(); } } } @Before public void setUp() throws Exception { // Configure SSL. port = TestUtils.getAvailableListenPort(); final SslContext sslCtx; if (ssl) { try { sslCtx = SslContextBuilder.forServer( new File(SecurityConfigDefaults.TLS_SERVER_CERT_PATH), new File(SecurityConfigDefaults.TLS_SERVER_PRIVATE_KEY_PATH)) .build(); } catch (SSLException e) { throw new RuntimeException(e); } } else { sslCtx = null; } boolean nio = false; EventLoopGroup bossGroup; EventLoopGroup workerGroup; try { bossGroup = new EpollEventLoopGroup(1); workerGroup = new EpollEventLoopGroup(); } catch (ExceptionInInitializerError | UnsatisfiedLinkError | NoClassDefFoundError e) { nio = true; bossGroup = new NioEventLoopGroup(1); workerGroup = new NioEventLoopGroup(); } ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup) .channel(nio ? NioServerSocketChannel.class : EpollServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, 100) .handler(new LoggingHandler(LogLevel.INFO)) .childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { ChannelPipeline p = ch.pipeline(); if (sslCtx != null) { SslHandler handler = sslCtx.newHandler(ch.alloc()); SSLEngine sslEngine = handler.engine(); SSLParameters sslParameters = sslEngine.getSSLParameters(); sslParameters.setEndpointIdentificationAlgorithm("LDAPS"); sslEngine.setSSLParameters(sslParameters); p.addLast(handler); } p.addLast(new CommandEncoder(null, NO_OP_METRIC_NOTIFIER), new LengthFieldBasedFrameDecoder(MAX_WIRECOMMAND_SIZE, 4, 4), new CommandDecoder(), new EchoServerHandler()); } }); // Start the server. serverChannel = b.bind("localhost", port).awaitUninterruptibly().channel(); } @After public void tearDown() throws Exception { serverChannel.close(); serverChannel.closeFuture(); } @Test public void testNonPooling() throws Exception { ClientConfig clientConfig = ClientConfig.builder() .controllerURI(URI.create((this.ssl ? "tls://" : "tcp://") + "localhost")) .trustStore(SecurityConfigDefaults.TLS_CA_CERT_PATH) .maxConnectionsPerSegmentStore(1) .build(); @Cleanup SocketConnectionFactoryImpl factory = new SocketConnectionFactoryImpl(clientConfig, 1); @Cleanup ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(clientConfig, factory); ArrayBlockingQueue<WireCommands.SegmentRead> msgRead = new ArrayBlockingQueue<>(10); FailingReplyProcessor rp = new FailingReplyProcessor() { @Override public void connectionDropped() { } @Override public void segmentRead(WireCommands.SegmentRead data) { msgRead.add(data); } @Override public void processingFailure(Exception error) { } @Override public void authTokenCheckFailed(WireCommands.AuthTokenCheckFailed authTokenCheckFailed) { } }; Flow flow1 = new Flow(1, 0); @Cleanup ClientConnection connection1 = connectionPool.getClientConnection(flow1, new PravegaNodeUri("localhost", port), rp).join(); connection1.send(readRequestGenerator.apply(flow1.asLong())); WireCommands.SegmentRead msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow1.asLong()), msg); assertEquals(1, connectionPool.getActiveChannels().size()); // create a second connection, since not using a flow. @Cleanup ClientConnection connection2 = connectionPool.getClientConnection(new PravegaNodeUri("localhost", port), rp).join(); Flow flow2 = new Flow(2, 0); // send data over connection2 and verify. connection2.send(readRequestGenerator.apply(flow2.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow2.asLong()), msg); assertEquals(1, connectionPool.getActiveChannels().size()); assertEquals(2, factory.getOpenSocketCount()); // send data over connection1 and verify. connection1.send(readRequestGenerator.apply(flow1.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow1.asLong()), msg); // send data over connection2 and verify. connection2.send(readRequestGenerator.apply(flow2.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow2.asLong()), msg); // close a client connection, this should not close the channel. connection2.close(); assertThrows(ConnectionFailedException.class, () -> connection2.send(readRequestGenerator.apply(flow2.asLong()))); // verify we are able to send data over connection1. connection1.send(readRequestGenerator.apply(flow1.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow1.asLong()), msg); // close connection1 connection1.close(); assertThrows(ConnectionFailedException.class, () -> connection1.send(readRequestGenerator.apply(flow2.asLong()))); AssertExtensions.assertEventuallyEquals(0, () -> { connectionPool.pruneUnusedConnections(); return factory.getOpenSocketCount(); }, 10000); assertEquals(0, connectionPool.getActiveChannels().size()); } @Test public void testConnectionPooling() throws Exception { ClientConfig clientConfig = ClientConfig.builder() .controllerURI(URI.create((this.ssl ? "tls://" : "tcp://") + "localhost")) .trustStore(SecurityConfigDefaults.TLS_CA_CERT_PATH) .maxConnectionsPerSegmentStore(1) .build(); @Cleanup SocketConnectionFactoryImpl factory = new SocketConnectionFactoryImpl(clientConfig, 1); @Cleanup ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(clientConfig, factory); ArrayBlockingQueue<WireCommands.SegmentRead> msgRead = new ArrayBlockingQueue<>(10); FailingReplyProcessor rp = new FailingReplyProcessor() { @Override public void connectionDropped() { } @Override public void segmentRead(WireCommands.SegmentRead data) { msgRead.add(data); } @Override public void processingFailure(Exception error) { } @Override public void authTokenCheckFailed(WireCommands.AuthTokenCheckFailed authTokenCheckFailed) { } }; Flow flow1 = new Flow(1, 0); @Cleanup ClientConnection connection1 = connectionPool.getClientConnection(flow1, new PravegaNodeUri("localhost", port), rp).join(); connection1.send(readRequestGenerator.apply(flow1.asLong())); WireCommands.SegmentRead msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow1.asLong()), msg); assertEquals(1, connectionPool.getActiveChannels().size()); // create a second connection, since the max number of connections is 1 this should reuse the same connection. Flow flow2 = new Flow(2, 0); CompletableFuture<ClientConnection> cf = new CompletableFuture<>(); connectionPool.getClientConnection(flow2, new PravegaNodeUri("localhost", port), rp, cf); @Cleanup ClientConnection connection2 = cf.join(); // send data over connection2 and verify. connection2.send(readRequestGenerator.apply(flow2.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow2.asLong()), msg); assertEquals(1, connectionPool.getActiveChannels().size()); assertEquals(1, factory.getOpenSocketCount()); // send data over connection1 and verify. connection1.send(readRequestGenerator.apply(flow1.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow1.asLong()), msg); // send data over connection2 and verify. connection2.send(readRequestGenerator.apply(flow2.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow2.asLong()), msg); // close a client connection, this should not close the channel. connection2.close(); assertThrows(ConnectionFailedException.class, () -> connection2.send(readRequestGenerator.apply(flow2.asLong()))); // verify we are able to send data over connection1. connection1.send(readRequestGenerator.apply(flow1.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow1.asLong()), msg); // close connection1 connection1.close(); assertThrows(ConnectionFailedException.class, () -> connection1.send(readRequestGenerator.apply(flow2.asLong()))); AssertExtensions.assertEventuallyEquals(0, () -> { connectionPool.pruneUnusedConnections(); return factory.getOpenSocketCount(); }, 10000); assertEquals(0, connectionPool.getActiveChannels().size()); } @Test public void testPoolBalancing() throws Exception { ClientConfig clientConfig = ClientConfig.builder() .controllerURI(URI.create((this.ssl ? "tls://" : "tcp://") + "localhost")) .trustStore(SecurityConfigDefaults.TLS_CA_CERT_PATH) .maxConnectionsPerSegmentStore(2) .build(); @Cleanup SocketConnectionFactoryImpl factory = new SocketConnectionFactoryImpl(clientConfig, 1); @Cleanup ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(clientConfig, factory); ArrayBlockingQueue<WireCommands.SegmentRead> msgRead = new ArrayBlockingQueue<>(10); FailingReplyProcessor rp = new FailingReplyProcessor() { @Override public void connectionDropped() { } @Override public void segmentRead(WireCommands.SegmentRead data) { msgRead.add(data); } @Override public void processingFailure(Exception error) { } @Override public void authTokenCheckFailed(WireCommands.AuthTokenCheckFailed authTokenCheckFailed) { } }; Flow flow1 = new Flow(1, 0); @Cleanup ClientConnection connection1 = connectionPool.getClientConnection(flow1, new PravegaNodeUri("localhost", port), rp).join(); connection1.send(readRequestGenerator.apply(flow1.asLong())); WireCommands.SegmentRead msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow1.asLong()), msg); assertEquals(1, factory.getOpenSocketCount()); // create a second connection, since the max number of connections is 2 this should not reuse the same connection. Flow flow2 = new Flow(2, 0); @Cleanup ClientConnection connection2 = connectionPool.getClientConnection(flow2, new PravegaNodeUri("localhost", port), rp).join(); // send data over connection2 and verify. connection2.send(readRequestGenerator.apply(flow2.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow2.asLong()), msg); assertEquals(2, factory.getOpenSocketCount()); assertNotEquals(((FlowClientConnection) connection1).getChannel(), ((FlowClientConnection) connection2).getChannel()); // create a second connection, since the max number of connections is 2 this should reuse the same connection. Flow flow3 = new Flow(3, 0); @Cleanup ClientConnection connection3 = connectionPool.getClientConnection(flow3, new PravegaNodeUri("localhost", port), rp).join(); // send data over connection3 and verify. connection3.send(readRequestGenerator.apply(flow3.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow3.asLong()), msg); assertEquals(2, factory.getOpenSocketCount()); assertEquals(((FlowClientConnection) connection1).getChannel(), ((FlowClientConnection) connection3).getChannel()); Flow flow4 = new Flow(3, 0); @Cleanup ClientConnection connection4 = connectionPool.getClientConnection(flow4, new PravegaNodeUri("localhost", port), rp).join(); // send data over connection3 and verify. connection3.send(readRequestGenerator.apply(flow4.asLong())); msg = msgRead.take(); assertEquals(readResponseGenerator.apply(flow4.asLong()), msg); assertEquals(2, factory.getOpenSocketCount()); assertEquals(2, connectionPool.getActiveChannels().size()); assertNotEquals(((FlowClientConnection) connection3).getChannel(), ((FlowClientConnection) connection4).getChannel()); assertEquals(((FlowClientConnection) connection2).getChannel(), ((FlowClientConnection) connection4).getChannel()); } @Test public void testConcurrentRequests() throws Exception { ClientConfig clientConfig = ClientConfig.builder() .controllerURI(URI.create((this.ssl ? "tls://" : "tcp://") + "localhost")) .trustStore(SecurityConfigDefaults.TLS_CA_CERT_PATH) .maxConnectionsPerSegmentStore(1) .build(); @Cleanup SocketConnectionFactoryImpl factory = new SocketConnectionFactoryImpl(clientConfig, 1); @Cleanup ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(clientConfig, factory); ArrayBlockingQueue<WireCommands.SegmentRead> msgRead = new ArrayBlockingQueue<>(10); FailingReplyProcessor rp = new FailingReplyProcessor() { @Override public void connectionDropped() { } @Override public void segmentRead(WireCommands.SegmentRead data) { msgRead.add(data); } @Override public void processingFailure(Exception error) { } @Override public void authTokenCheckFailed(WireCommands.AuthTokenCheckFailed authTokenCheckFailed) { } }; Flow flow1 = new Flow(1, 0); ClientConnection connection1 = connectionPool.getClientConnection(flow1, new PravegaNodeUri("localhost", port), rp).join(); // create a second connection, since the max number of connections is 1 this should reuse the same connection. Flow flow2 = new Flow(2, 0); ClientConnection connection2 = connectionPool.getClientConnection(flow2, new PravegaNodeUri("localhost", port), rp).join(); assertEquals(1, factory.getOpenSocketCount()); assertEquals(1, connectionPool.getActiveChannels().size()); connection1.send(readRequestGenerator.apply(flow1.asLong())); connection2.send(readRequestGenerator.apply(flow2.asLong())); List<WireCommands.SegmentRead> msgs = new ArrayList<WireCommands.SegmentRead>(); msgs.add(msgRead.take()); msgs.add(msgRead.take()); assertTrue(msgs.contains(readResponseGenerator.apply(flow1.asLong()))); assertTrue(msgs.contains(readResponseGenerator.apply(flow1.asLong()))); assertEquals(1, factory.getOpenSocketCount()); connection1.close(); connection2.close(); AssertExtensions.assertEventuallyEquals(0, () -> { connectionPool.pruneUnusedConnections(); return factory.getOpenSocketCount(); }, 10000); assertEquals(0, connectionPool.getActiveChannels().size()); } }
pravega/pravega
client/src/test/java/io/pravega/client/connection/impl/ConnectionPoolingTest.java
Java
apache-2.0
21,376
#define DEBUG_TYPE "sil-simplify-cfg" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "swift/SIL/SILInstruction.h" #include "swift/SILOptimizer/Analysis/DominanceAnalysis.h" #include "swift/SILOptimizer/Utils/CFG.h" #include "swift/SILOptimizer/Utils/Local.h" #include "swift/SILOptimizer/Utils/SILInliner.h" using namespace swift; namespace { /// This is a class implementing a dominator-based jump-threading /// for checked_cast_br [exact]. class CheckedCastBrJumpThreading { // The checked_cast_br instruction, which // we try to jump-thread CheckedCastBranchInst *CCBI; // Basic block of the current checked_cast_br instruction. SILBasicBlock *BB; // Condition used by the current checked_cast_br instruction. SILValue Condition; // Success branch of the current checked_cast_br instruction. SILBasicBlock *SuccessBB; // Failure branch of the current checked_cast_br instruction. SILBasicBlock *FailureBB; // Current dominating checked_cast_br instruction. CheckedCastBranchInst *DomCCBI; // Basic block of the dominating checked_cast_br instruction. SILBasicBlock *DomBB; // Condition used by the dominating checked_cast_br instruction. SILValue DomCondition; // Success branch of the dominating checked_cast_br instruction. SILBasicBlock *DomSuccessBB; // Failure branch of the dominating checked_cast_br instruction. SILBasicBlock *DomFailureBB; // Current dominator tree node where we look for a dominating // checked_cast_br instruction. llvm::DomTreeNodeBase<SILBasicBlock> *Node; SILBasicBlock *ArgBB; // Dominator information to be used. DominanceInfo *DT; // Basic block created as a landing BB for all failure predecessors. SILBasicBlock *TargetFailureBB; // Basic block created as a landing BB for all success predecessors. SILBasicBlock *TargetSuccessBB; // Cloner used to clone the BB to FailureSuccessBB. Optional<BasicBlockCloner> FailureBBCloner; // Cloner used to clone the BB to TargetSuccessBB. Optional<BasicBlockCloner> SuccessBBCloner; // Predecessors reached only via a path along the // success branch of the dominating checked_cast_br. SmallVector<SILBasicBlock *, 8> SuccessPreds; // Predecessors reached only via a path along the // failure branch of the dominating checked_cast_br. SmallVector<SILBasicBlock *, 8> FailurePreds; // All other predecessors, where the outcome of the // checked_cast_br along the path is not known. SmallVector<SILBasicBlock *, 8> UnknownPreds; // Basic blocks to be added to for reprocessing // after jump-threading is done. SmallVectorImpl<SILBasicBlock *> &BlocksForWorklist; bool areEquivalentConditionsAlongPaths(); bool areEquivalentConditionsAlongSomePaths(); bool handleArgBBIsEntryBlock(SILBasicBlock *ArgBB); bool checkCloningConstraints(); void modifyCFGForUnknownPreds(); void modifyCFGForFailurePreds(); void modifyCFGForSuccessPreds(); void updateDominatorTree(); void updateSSA(); void addBlockToSimplifyCFGWorklist(SILBasicBlock *BB); void addBlocksToWorklist(); void classifyPredecessor( SILBasicBlock *Pred, SmallVectorImpl<SILBasicBlock *> &SuccessPreds, SmallVectorImpl<SILBasicBlock *> &FailurePreds, SmallVectorImpl<SILBasicBlock *> &UnknownPreds, bool SuccessDominates, bool FailureDominates); SILValue isArgValueEquivalentToCondition(SILValue Value, SILBasicBlock *DomBB, SILValue DomValue, DominanceInfo *DT); public: CheckedCastBrJumpThreading(DominanceInfo *DT, SmallVectorImpl<SILBasicBlock *> &BBs) : DT(DT), BlocksForWorklist(BBs) { } bool trySimplify(TermInst *Term); ArrayRef<SILBasicBlock*> getBlocksForWorklist() { return BlocksForWorklist; } }; } // end anonymous namespace /// Find a nearest common dominator for a given set of basic blocks. static DominanceInfoNode *findCommonDominator(ArrayRef<SILBasicBlock *> BBs, DominanceInfo *DT) { DominanceInfoNode *CommonDom = nullptr; for (auto *BB : BBs) { if (!CommonDom) { CommonDom = DT->getNode(BB); } else { CommonDom = DT->getNode( DT->findNearestCommonDominator(CommonDom->getBlock(), BB)); } } return CommonDom; } /// Find a nearest common dominator for all predecessors of /// a given basic block. static DominanceInfoNode *findCommonDominator(SILBasicBlock *BB, DominanceInfo *DT) { SmallVector<SILBasicBlock *, 8> Preds; for (auto *Pred: BB->getPreds()) Preds.push_back(Pred); return findCommonDominator(Preds, DT); } /// Estimate the cost of inlining a given basic block. static unsigned basicBlockInlineCost(SILBasicBlock *BB, unsigned Cutoff) { unsigned Cost = 0; for (auto &I : *BB) { auto ICost = instructionInlineCost(I); Cost += unsigned(ICost); if (Cost > Cutoff) return Cost; } return Cost; } /// We cannot duplicate blocks with AllocStack instructions (they need to be /// FIFO). Other instructions can be duplicated. static bool canDuplicateBlock(SILBasicBlock *BB) { for (auto &I : *BB) { if (!I.isTriviallyDuplicatable()) return false; } return true; } void CheckedCastBrJumpThreading::addBlockToSimplifyCFGWorklist(SILBasicBlock *BB) { BlocksForWorklist.push_back(BB); } /// Add affected blocks for re-processing by simplifyCFG void CheckedCastBrJumpThreading::addBlocksToWorklist() { if (TargetFailureBB) { if (!TargetFailureBB->pred_empty()) addBlockToSimplifyCFGWorklist(TargetFailureBB); } if (TargetSuccessBB) { if (!TargetSuccessBB->pred_empty()) addBlockToSimplifyCFGWorklist(TargetSuccessBB); } if (!BB->pred_empty()) addBlockToSimplifyCFGWorklist(BB); } /// Classify a predecessor of a BB containing checked_cast_br as being /// reachable via success or failure branches of a dominating checked_cast_br /// or as unknown if it can be reached via success or failure branches /// at the same time. void CheckedCastBrJumpThreading::classifyPredecessor( SILBasicBlock *Pred, SmallVectorImpl<SILBasicBlock *> &SuccessPreds, SmallVectorImpl<SILBasicBlock *> &FailurePreds, SmallVectorImpl<SILBasicBlock *> &UnknownPreds, bool SuccessDominates, bool FailureDominates) { if (SuccessDominates && FailureDominates) { UnknownPreds.push_back(Pred); return; } if (SuccessDominates) { SuccessPreds.push_back(Pred); return; } if (FailureDominates) { FailurePreds.push_back(Pred); return; } UnknownPreds.push_back(Pred); } /// Check if the root value for Value that comes /// along the path from DomBB is equivalent to the /// DomCondition. SILValue CheckedCastBrJumpThreading::isArgValueEquivalentToCondition( SILValue Value, SILBasicBlock *DomBB, SILValue DomValue, DominanceInfo *DT) { SmallPtrSet<ValueBase *, 16> SeenValues; DomValue = DomValue.stripClassCasts(); while (true) { Value = Value.stripClassCasts(); if (Value == DomValue) return Value; // We know how to propagate through BBArgs only. auto *V = dyn_cast<SILArgument>(Value); if (!V) return SILValue(); // Have we visited this BB already? if (!SeenValues.insert(Value.getDef()).second) return SILValue(); if (SeenValues.size() > 10) return SILValue(); SmallVector<SILValue, 4> IncomingValues; if (!V->getIncomingValues(IncomingValues) || IncomingValues.empty()) return SILValue(); ValueBase *Def = nullptr; for (auto IncomingValue : IncomingValues) { // Each incoming value should be either from a block // dominated by DomBB or it should be the value used in // condition in DomBB Value = IncomingValue.stripClassCasts(); if (Value == DomValue) continue; // Values should be the same if (!Def) Def = Value.getDef(); if (Def != Value.getDef()) return SILValue(); if (!DT->dominates(DomBB, Value.getDef()->getParentBB())) return SILValue(); // OK, this value is a potential candidate } Value = IncomingValues[0]; } } /// Update the SSA form after all changes. void CheckedCastBrJumpThreading::updateSSA() { assert(!(SuccessBBCloner.hasValue() && FailureBBCloner.hasValue()) && "Both cloners cannot be used at the same time yet"); // Now update the SSA form. if (!FailurePreds.empty() && FailureBBCloner.hasValue() && !SuccessBBCloner.hasValue()) updateSSAAfterCloning(*FailureBBCloner.getPointer(), TargetFailureBB, BB); if (SuccessBBCloner.hasValue() && !FailureBBCloner.hasValue()) { updateSSAAfterCloning(*SuccessBBCloner.getPointer(), TargetSuccessBB, BB); } } /// Update the SSA form after all changes. void CheckedCastBrJumpThreading::updateDominatorTree() { // Update the dominator tree. // If BB was IDom of something, then PredCBBI becomes the IDOM // of this after jump-threading. auto *BBDomNode = DT->getNode(BB); auto &Children = BBDomNode->getChildren(); if (Children.size() > 1) { SmallVector<DominanceInfoNode *, 16> ChildrenCopy; std::copy(Children.begin(), Children.end(), std::back_inserter(ChildrenCopy)); for (auto *Child : ChildrenCopy) { DT->changeImmediateDominator(Child, Node); } } DominanceInfoNode *CommonDom; // Find a common dominator for all unknown preds. if (!UnknownPreds.empty()) { // Find a new IDom for FailureBB CommonDom = findCommonDominator(FailureBB, DT); if (CommonDom) DT->changeImmediateDominator(FailureBB, CommonDom->getBlock()); CommonDom = findCommonDominator(UnknownPreds, DT); // This common dominator dominates the BB now. if (CommonDom) { DT->changeImmediateDominator(BB, CommonDom->getBlock()); } } // Find a common dominator for all failure preds. CommonDom = findCommonDominator(FailurePreds, DT); // This common dominator dominates the TargetFailureBB now. if (CommonDom) { DT->addNewBlock(TargetFailureBB, CommonDom->getBlock()); // Find a new IDom for FailureBB CommonDom = findCommonDominator(FailureBB, DT); if (CommonDom) DT->changeImmediateDominator(FailureBB, CommonDom->getBlock()); } // Find a common dominator for all success preds. CommonDom = findCommonDominator(SuccessPreds, DT); // This common dominator of all success preds dominates the BB now. if (CommonDom) { if (TargetSuccessBB) { DT->addNewBlock(TargetSuccessBB, CommonDom->getBlock()); } else { DT->changeImmediateDominator(BB, CommonDom->getBlock()); } CommonDom = findCommonDominator(SuccessBB, DT); if (CommonDom) DT->changeImmediateDominator(SuccessBB, CommonDom->getBlock()); } // End of dominator tree update. } void CheckedCastBrJumpThreading::modifyCFGForUnknownPreds() { if (UnknownPreds.empty()) return; // Check the FailureBB if it is a BB that contains a class_method // referring to the same value as a condition. This pattern is typical // for method chaining code like obj.method1().method2().etc() SILInstruction *Inst = &*FailureBB->begin(); if (ClassMethodInst *CMI = dyn_cast<ClassMethodInst>(Inst)) { if (CMI->getOperand() == Condition) { // Replace checked_cast_br by branch to FailureBB. SILBuilder(BB).createBranch(CCBI->getLoc(), FailureBB); CCBI->eraseFromParent(); } } } /// Create a copy of the BB as a landing BB /// for all FailurePreds. void CheckedCastBrJumpThreading::modifyCFGForFailurePreds() { if (FailurePreds.empty()) return; FailureBBCloner.emplace(BasicBlockCloner(BB)); FailureBBCloner->clone(); TargetFailureBB = FailureBBCloner->getDestBB(); auto *TI = TargetFailureBB->getTerminator(); SILBuilderWithScope Builder(TI); // This BB copy branches to a FailureBB. Builder.createBranch(TI->getLoc(), FailureBB); TI->eraseFromParent(); // Redirect all FailurePreds to the copy of BB. for (auto *Pred : FailurePreds) { TermInst *TI = Pred->getTerminator(); // Replace branch to BB by branch to TargetFailureBB. replaceBranchTarget(TI, BB, TargetFailureBB, /*PreserveArgs=*/true); Pred = nullptr; } } /// Create a copy of the BB or reuse BB as /// a landing basic block for all FailurePreds. void CheckedCastBrJumpThreading::modifyCFGForSuccessPreds() { if (!UnknownPreds.empty()) { if (!SuccessPreds.empty()) { // Create a copy of the BB as a landing BB. // for all SuccessPreds. SuccessBBCloner.emplace(BasicBlockCloner(BB)); SuccessBBCloner->clone(); TargetSuccessBB = SuccessBBCloner->getDestBB(); auto *TI = TargetSuccessBB->getTerminator(); SILBuilderWithScope Builder(TI); SmallVector<SILValue, 8> SuccessBBArgs; // Take argument value from the dominating BB. SuccessBBArgs.push_back(DomSuccessBB->getBBArg(0)); // This BB copy branches to SuccessBB. Builder.createBranch(TI->getLoc(), SuccessBB, SuccessBBArgs); TI->eraseFromParent(); // Redirect all SuccessPreds to the copy of BB. for (auto *Pred : SuccessPreds) { TermInst *TI = Pred->getTerminator(); // Replace branch to BB by branch to TargetSuccessBB. replaceBranchTarget(TI, BB, TargetSuccessBB, /*PreserveArgs=*/true); SuccessBBArgs.push_back(DomSuccessBB->getBBArg(0)); Pred = nullptr; } } } else { // There are no predecessors where it is not clear // if they are dominated by a success or failure branch // of DomBB. Therefore, there is no need to clone // the BB for SuccessPreds. Current BB can be re-used // instead as their target. // Add an unconditional jump at the end of the block. SmallVector<SILValue, 1> SuccessBBArgs; // Take argument value from the dominating BB SuccessBBArgs.push_back(DomSuccessBB->getBBArg(0)); SILBuilder(BB).createBranch(CCBI->getLoc(), SuccessBB, SuccessBBArgs); CCBI->eraseFromParent(); } } /// Handle a special case, where ArgBB is the entry block. bool CheckedCastBrJumpThreading::handleArgBBIsEntryBlock(SILBasicBlock *ArgBB) { if (ArgBB->getPreds().begin() == ArgBB->getPreds().end()) { // It must be the entry block // See if it is reached over Success or Failure path. bool SuccessDominates = DomSuccessBB == BB; bool FailureDominates = DomFailureBB == BB; classifyPredecessor(ArgBB, SuccessPreds, FailurePreds, UnknownPreds, SuccessDominates, FailureDominates); return true; } return false; } // Returns false if cloning required by jump threading cannot // be performed, because some of the constraints are violated. bool CheckedCastBrJumpThreading::checkCloningConstraints() { // Check some cloning related constraints. // If this argument from a different BB, then jump-threading // may require too much code duplication. if (ArgBB && ArgBB != BB) return false; // Bail out if current BB cannot be duplicated. if (!canDuplicateBlock(BB)) return false; // Check if code-bloat would be too big when this BB // is jump-threaded. // TODO: Make InlineCostCutoff parameter configurable? // Dec 1, 2014: // We looked at the inline costs of BBs from our benchmark suite // and found that currently the highest inline cost for the // whole benchmark suite is 12. In 95% of all cases it is <=3. const unsigned InlineCostCutoff = 20; if (basicBlockInlineCost(BB, InlineCostCutoff) >= InlineCostCutoff) return false; return true; } /// If conditions are not equivalent along all paths, try harder /// to check if they are actually equivalent along a subset of paths. /// To do it, try to back-propagate the Condition /// backwards and see if it is actually equivalent to DomCondition. /// along some of the paths. bool CheckedCastBrJumpThreading::areEquivalentConditionsAlongSomePaths() { auto *Arg = dyn_cast<SILArgument>(Condition); if (!Arg) return false; ArgBB = Arg->getParent(); if (!DT->dominates(DomBB, ArgBB)) return false; // Incoming values for the BBArg. SmallVector<SILValue, 4> IncomingValues; if (ArgBB != ArgBB->getParent()->begin() && (!Arg->getIncomingValues(IncomingValues) || IncomingValues.empty())) return false; // Check for each predecessor, if the incoming value coming from it // is equivalent to the DomCondition. If this is the case, it is // possible to try jump-threading along this path. if (!handleArgBBIsEntryBlock(ArgBB)) { // ArgBB is not the entry block and has predecessors. unsigned idx = 0; for (auto *PredBB : ArgBB->getPreds()) { auto IncomingValue = IncomingValues[idx]; SILValue ReachingValue = isArgValueEquivalentToCondition( IncomingValue, DomBB, DomCondition, DT); if (ReachingValue == SILValue()) { UnknownPreds.push_back(PredBB); idx++; continue; } // Condition is the same if BB is reached over a pass through Pred. DEBUG(llvm::dbgs() << "Condition is the same if reached over "); DEBUG(PredBB->print(llvm::dbgs())); // See if it is reached over Success or Failure path. bool SuccessDominates = DT->dominates(DomSuccessBB, PredBB) || DT->dominates(DomSuccessBB, BB) || DomSuccessBB == BB; bool FailureDominates = DT->dominates(DomFailureBB, PredBB) || DT->dominates(DomFailureBB, BB) || DomFailureBB == BB; classifyPredecessor( PredBB, SuccessPreds, FailurePreds, UnknownPreds, SuccessDominates, FailureDominates); idx++; } } else { // ArgBB is the entry block. Check that conditions are the equivalent in this // case as well. if (!isArgValueEquivalentToCondition(Condition, DomBB, DomCondition, DT)) return false; } // At this point we know for each predecessor of ArgBB if its reached // over the success, failure or unknown path from DomBB. // Now we can generate a new BB for preds reaching BB over the success // path and a new BB for preds reaching BB over the failure path. // Then we redirect those preds to those new basic blocks. return true; } /// Check if conditions of CCBI and DomCCBI are equivalent along /// all or at least some paths. bool CheckedCastBrJumpThreading::areEquivalentConditionsAlongPaths() { // Are conditions equivalent along all paths? if (DomCondition == Condition) { // Conditions are exactly the same, without any restrictions. // They are equivalent along all paths. // Figure out for each predecessor which branch of // the dominating checked_cast_br is used to reach it. for (auto *PredBB : BB->getPreds()) { // All predecessors should either unconditionally branch // to the current BB or be another checked_cast_br instruction. if (!dyn_cast<CheckedCastBranchInst>(PredBB->getTerminator()) && !dyn_cast<BranchInst>(PredBB->getTerminator())) return false; bool SuccessDominates = DT->dominates(DomSuccessBB, PredBB) || DomSuccessBB == BB; bool FailureDominates = DT->dominates(DomFailureBB, PredBB) || DomFailureBB == BB; classifyPredecessor(PredBB, SuccessPreds, FailurePreds, UnknownPreds, SuccessDominates, FailureDominates); } return true; } // Check if conditions are equivalent along a subset of reaching paths. return areEquivalentConditionsAlongSomePaths(); } /// Try performing a dominator-based jump-threading for /// checked_cast_br instructions. bool CheckedCastBrJumpThreading::trySimplify(TermInst *Term) { CCBI = cast<CheckedCastBranchInst>(Term); if (!CCBI) return false; // Init information about the checked_cast_br we try to // jump-thread. BB = Term->getParent(); Condition = Term->getOperand(0).stripClassCasts(); SuccessBB = CCBI->getSuccessBB(); FailureBB = CCBI->getFailureBB(); // Find a dominating checked_cast_br, which performs the same check. for (Node = DT->getNode(BB)->getIDom(); Node; Node = Node->getIDom()) { // Get current dominating block. DomBB = Node->getBlock(); auto *DomTerm = DomBB->getTerminator(); if (!DomTerm->getNumOperands()) continue; // Check that it is a dominating checked_cast_br. DomCCBI = dyn_cast<CheckedCastBranchInst>(DomTerm); if (!DomCCBI) continue; // We need to verify that the result type is the same in the // dominating checked_cast_br, but only for non-exact casts. // For exact casts, we are interested only in the // fact that the source operand is the same for // both instructions. if (!CCBI->isExact() && !DomCCBI->isExact()) { if (DomCCBI->getCastType() != CCBI->getCastType()) continue; } // Conservatively check that both checked_cast_br instructions // are either exact or non-exact. This is very conservative, // but safe. // // TODO: // If the dominating checked_cast_br is non-exact, then // it is in general not safe to assume that current exact cast // would have the same outcome. But if the dominating non-exact // checked_cast_br fails, then the current exact cast would // always fail as well. // // If the dominating checked_cast_br is exact then then // it is in general not safe to assume that the current non-exact // cast would have the same outcome. But if the dominating exact // checked_cast_br succeeds, then the current non-exact cast // would always succeed as well. // // TODO: In some specific cases, it is possible to prove that // success or failure of the dominating cast is equivalent to // the success or failure of the current cast, even if one // of them is exact and the other not. This is the case // e.g. if the class has no subclasses. if (DomCCBI->isExact() != CCBI->isExact()) continue; // Initialize state variables for the current round of checks // based on the found dominating checked_cast_br. DomSuccessBB = DomCCBI->getSuccessBB(); DomFailureBB = DomCCBI->getFailureBB(); DomCondition = DomTerm->getOperand(0).stripClassCasts(); // Init state variables for paths analysis SuccessPreds.clear(); FailurePreds.clear(); UnknownPreds.clear(); ArgBB = nullptr; // Init state variables for jump-threading transformation. TargetFailureBB = nullptr; TargetSuccessBB = nullptr; // Are conditions of CCBI and DomCCBI equivalent along (some) paths? // If this is the case, classify all incoming paths into SuccessPreds, // FailurePreds or UnknownPreds depending on how they reach CCBI. if (!areEquivalentConditionsAlongPaths()) continue; // Check if any jump-threading is required and possible. if (SuccessPreds.empty() && FailurePreds.empty()) return false; // If this check is reachable via success, failure and unknown // at the same time, then we don't know the outcome of the // dominating check. No jump-threading is possible in this case. if (!SuccessPreds.empty() && !FailurePreds.empty() && !UnknownPreds.empty()) { return false; } unsigned TotalPreds = SuccessPreds.size() + FailurePreds.size() + UnknownPreds.size(); // We only need to clone the BB if not all of its // predecessors are in the same group. if (TotalPreds != SuccessPreds.size() && TotalPreds != UnknownPreds.size()) { // Check some cloning related constraints. if (!checkCloningConstraints()) return false; } bool InvertSuccess = false; if (DomCCBI->isExact() && CCBI->isExact() && DomCCBI->getCastType() != CCBI->getCastType()) { if (TotalPreds == SuccessPreds.size()) { // The dominating exact cast was successful, but it casted to a // different type. Therefore, the current cast fails for sure. // Since we are going to change the BB, // add its successors and predecessors // for re-processing. InvertSuccess = true; } else { // Otherwise, we don't know if the current cast will succeed or // fail. return false; } } // If we have predecessors, where it is not known if they are reached over // success or failure path, we cannot eliminate a checked_cast_br. // We have to generate new dedicated BBs as landing BBs for all // FailurePreds and all SuccessPreds. // Since we are going to change the BB, // add its successors and predecessors // for re-processing. for (auto *B : BB->getPreds()) { addBlockToSimplifyCFGWorklist(B); } for (auto *B : BB->getSuccessorBlocks()) { addBlockToSimplifyCFGWorklist(B); } // Create a copy of the BB as a landing BB // for all FailurePreds. modifyCFGForFailurePreds(); if (InvertSuccess) { SILBuilder(BB).createBranch(CCBI->getLoc(), FailureBB); CCBI->eraseFromParent(); SuccessPreds.clear(); } else { // Create a copy of the BB or reuse BB as // a landing basic block for all SuccessPreds. modifyCFGForSuccessPreds(); } // Handle unknown preds. modifyCFGForUnknownPreds(); // Update the dominator tree after all changes. updateDominatorTree(); // Update the SSA form after all changes. updateSSA(); // Since a few BBs were changed now, add them for re-processing. addBlocksToWorklist(); return true; } // Jump-threading was not possible. return false; } namespace swift { bool tryCheckedCastBrJumpThreading(TermInst *Term, DominanceInfo *DT, SmallVectorImpl<SILBasicBlock *> &BBs) { CheckedCastBrJumpThreading CCBJumpThreading(DT, BBs); return CCBJumpThreading.trySimplify(Term); } } // end namespace swift
adrfer/swift
lib/SILOptimizer/Utils/CheckedCastBrJumpThreading.cpp
C++
apache-2.0
26,094
package jp.co.omana.action; import org.seasar.struts.annotation.Execute; public class ServiceAction { @Execute(validator = false) public String index() { return "board.jsp"; } @Execute(validator = false) public String confirm() { return "index.jsp"; } @Execute(validator = false) public String finish() { return "index.jsp"; } }
ikraikra/bunsekiya
src/main/java/jp/co/omana/action/ServiceAction.java
Java
apache-2.0
387
from turbo.flux import Mutation, register, dispatch, register_dispatch import mutation_types @register_dispatch('user', mutation_types.INCREASE) def increase(rank): pass def decrease(rank): return dispatch('user', mutation_types.DECREASE, rank) @register_dispatch('metric', 'inc_qps') def inc_qps(): pass
tao12345666333/app-turbo
demos/helloworld/store/actions.py
Python
apache-2.0
322
/** * */ package com.sivalabs.demo.orders.repositories; import org.springframework.data.jpa.repository.JpaRepository; import com.sivalabs.demo.orders.entities.Order; /** * @author Siva * */ public interface OrderRepository extends JpaRepository<Order, Integer>{ }
sivaprasadreddy/springboot-learn-by-example
chapter-09/springboot-multiple-datasources-demo/src/main/java/com/sivalabs/demo/orders/repositories/OrderRepository.java
Java
apache-2.0
274
var structCO__config__t = [ [ "CNT_NMT", "structCO__config__t.html#aeef814580eb5ece5156e63bfc1b490c9", null ], [ "ENTRY_H1017", "structCO__config__t.html#ad17f77b55de3d90ec983fcac49eeab6d", null ], [ "CNT_HB_CONS", "structCO__config__t.html#a0031fc8f80e95f8480c918dbf8289671", null ], [ "ENTRY_H1016", "structCO__config__t.html#a0af4cf7d0355861e7f60206d794d6a91", null ], [ "CNT_EM", "structCO__config__t.html#a515e08f68835f71a6f145be8f27b510a", null ], [ "ENTRY_H1001", "structCO__config__t.html#a6a6c19e816fb76882e85b2c07c0d8f42", null ], [ "ENTRY_H1014", "structCO__config__t.html#a4827d94f6152cc12d86bd21312ae86e4", null ], [ "ENTRY_H1015", "structCO__config__t.html#a141f21b4d1730206d1af823fd6b13a01", null ], [ "ENTRY_H1003", "structCO__config__t.html#a7e320b309714b7f623c2006d45fee929", null ], [ "CNT_SDO_SRV", "structCO__config__t.html#aac83faf556924515cc2aa8003753ab58", null ], [ "ENTRY_H1200", "structCO__config__t.html#a05ab8adad4517850e31e5542895f7cc5", null ], [ "CNT_SDO_CLI", "structCO__config__t.html#a2fc9606643a7fb4d4237f01812d3a6d2", null ], [ "ENTRY_H1280", "structCO__config__t.html#a9f871c4ec753e8414cdb47eb78c3e09d", null ], [ "CNT_TIME", "structCO__config__t.html#ada2a43384a544fa2f235de24a874b1e6", null ], [ "ENTRY_H1012", "structCO__config__t.html#abac6be7122af1a8a4f9ae3ff5912d490", null ], [ "CNT_SYNC", "structCO__config__t.html#af6dbc7d9f31b4cb050e23af8cff3df33", null ], [ "ENTRY_H1005", "structCO__config__t.html#a02a4992f47db72816753ff2aa1964318", null ], [ "ENTRY_H1006", "structCO__config__t.html#aa9befdebbaaa22f309b9a1b115612071", null ], [ "ENTRY_H1007", "structCO__config__t.html#ad51ab63ca8b5836bf0dd8543f02db544", null ], [ "ENTRY_H1019", "structCO__config__t.html#a468c82f6a0afd757a6b78ce33532c0d2", null ], [ "CNT_RPDO", "structCO__config__t.html#a7a75302ac077462b67d767b0a11c9f56", null ], [ "ENTRY_H1400", "structCO__config__t.html#a5e0984d93183493d587523888465eaa7", null ], [ "ENTRY_H1600", "structCO__config__t.html#ab2ddc9943fd8c89f3b852d7ac9508d21", null ], [ "CNT_TPDO", "structCO__config__t.html#a1d830617f50e3235de35a403a1513693", null ], [ "ENTRY_H1800", "structCO__config__t.html#a29b98c08edfe0fba2e46c7af7a9edf6f", null ], [ "ENTRY_H1A00", "structCO__config__t.html#a43fd6a448c91910c603f2c7756610432", null ], [ "CNT_LEDS", "structCO__config__t.html#a642809cc681792bca855906241d891cc", null ], [ "CNT_GFC", "structCO__config__t.html#ae282bab830810b61c0b0c3223654d674", null ], [ "ENTRY_H1300", "structCO__config__t.html#a91c9f3ddb67231854af39224a9597e20", null ], [ "CNT_SRDO", "structCO__config__t.html#ae58a44be57069709af3f6acbd10953e1", null ], [ "ENTRY_H1301", "structCO__config__t.html#a87076cb1f9282d9720c21d395ff4e541", null ], [ "ENTRY_H1381", "structCO__config__t.html#a7b3172b29ce8751adcab9e4351dcc31e", null ], [ "ENTRY_H13FE", "structCO__config__t.html#a03fcaca5a8e0e71b86086908cae75f3d", null ], [ "ENTRY_H13FF", "structCO__config__t.html#aa4cb9674209b83e7f0e48b01feaa04ef", null ], [ "CNT_LSS_SLV", "structCO__config__t.html#a00a7a598b946ed13e3af7696e9f92dcc", null ], [ "CNT_LSS_MST", "structCO__config__t.html#ac253cae7039090a6c04bc1e385f3ec21", null ], [ "CNT_GTWA", "structCO__config__t.html#a64725014ecce342843f14ffc4b57e2a2", null ], [ "CNT_TRACE", "structCO__config__t.html#aaafb8ffff236b51cd6d4ab16426d460f", null ] ];
CANopenNode/CANopenSocket
docs/structCO__config__t.js
JavaScript
apache-2.0
3,451
/* Copyright 2017 Processwall Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Company: Processwall Limited Address: The Winnowing House, Mill Lane, Askham Richard, York, YO23 3NW, United Kingdom Tel: +44 113 815 3440 Web: http://www.processwall.com Email: support@processwall.com */ using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace Aras.ViewModel.Design.Applications { [Aras.ViewModel.Attributes.Application("Parts", "PartFamily", "Design", false)] public class Parts : Aras.ViewModel.Containers.Application { public Model.Design.Queries.Searches.Part SearchQuery { get; private set; } public Aras.ViewModel.Grids.Search Search { get; private set; } public Model.Design.Queries.Forms.Part FormQuery { get; private set; } public Forms.Part Form { get; private set; } private void Search_ItemsSelected(object sender, Aras.ViewModel.Grids.Search.ItemsSelectedEventArgs e) { if (this.Search.Selected.Count() > 0) { this.Form.Binding = this.Form.Store.Get(this.Search.Selected.First().ID); } else { this.Form.Binding = null; } } public Parts(Aras.ViewModel.Manager.Session Session) : base(Session) { this.Children.NotifyListChanged = false; // Create Search Query this.SearchQuery = new Model.Design.Queries.Searches.Part(this.Session.Model); // Create Search this.Search = new Aras.ViewModel.Grids.Search(this.Session); this.Search.Width = 300; this.Children.Add(this.Search); this.Search.Region = Aras.ViewModel.Regions.Left; this.Search.Binding = this.SearchQuery.Store; this.Search.Splitter = true; this.Search.ItemsSelected += Search_ItemsSelected; // Create Form Query this.FormQuery = new Model.Design.Queries.Forms.Part(this.Session.Model); // Create Form this.Form = new Forms.Part(this.Session, this.FormQuery.Store); this.Children.Add(this.Form); this.Children.NotifyListChanged = true; // Select First Part if (this.SearchQuery.Store.Count() > 0) { this.Search.Select(this.SearchQuery.Store.First()); } } } }
ArasExtensions/Aras.ViewModel
Aras.ViewModel.Design/Applications/Parts.cs
C#
apache-2.0
3,016
#include <tuple> #include "Vector2.h" Vector2::Vector2(void) { } Vector2::Vector2(float X, float Y) { this->X = X; this->Y = Y; } // Returns the length of the vector float Vector2::Magnitude() { return sqrt(X * X + Y * Y); } // Returns the length of the vector squared // Used for length comparisons without needing roots float Vector2::MagnitudeSquared() { return X * X + Y * Y; } // Normalizes the vector Vector2 Vector2::Normal() { float length = this->Magnitude(); if (length != 0) return Vector2(X / length, Y / length); return Vector2(); } // Sets the magnitude of the vector void Vector2::SetMagnitude(float mag) { Vector2 v = this->Normal(); X = v.X*mag; Y = v.Y*mag; } float Vector2::Dot(Vector2 other) { return X * other.X + Y * other.Y; } float Vector2::Cross(Vector2 other) { return X * other.Y - Y * other.X; } Vector2 Vector2::operator+(Vector2 other) { return Vector2(X + other.X, Y + other.Y); } Vector2 Vector2::operator-(Vector2 other) { return Vector2(X - other.X, Y - other.Y); } Vector2 Vector2::operator*(float scalar) { return Vector2(X * scalar, Y * scalar); } Vector2 Vector2::operator-() { return Vector2(-X, -Y); } Vector2& Vector2::operator+=(const Vector2& other) { X += other.X; Y += other.Y; return *this; } Vector2& Vector2::operator-=(const Vector2& other) { X -= other.X; Y -= other.Y; return *this; } Vector2& Vector2::operator*=(const Vector2& other) { X *= other.X; Y *= other.Y; return *this; } Vector2& Vector2::operator/=(const Vector2& other) { X /= other.X; Y /= other.Y; return *this; } bool operator==(const Vector2& L, const Vector2& R) { return std::tie(L.X, L.Y) == std::tie(R.X, R.Y); }
Henrywald/crispy-waffle
Crispy-Waffle/Vector2.cpp
C++
apache-2.0
1,685
/************************************************************ * * EaseMob CONFIDENTIAL * __________________ * Copyright (C) 2013-2014 EaseMob Technologies. All rights reserved. * * NOTICE: All information contained herein is, and remains * the property of EaseMob Technologies. * Dissemination of this information or reproduction of this material * is strictly forbidden unless prior written permission is obtained * from EaseMob Technologies. */ package com.easemob.chatuidemo.activity; import java.io.BufferedOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.util.Collections; import java.util.List; import android.annotation.SuppressLint; import android.app.AlertDialog; import android.app.ProgressDialog; import android.content.Context; import android.content.DialogInterface; import android.graphics.Bitmap; import android.graphics.PixelFormat; import android.hardware.Camera; import android.hardware.Camera.CameraInfo; import android.hardware.Camera.Parameters; import android.hardware.Camera.Size; import android.media.MediaRecorder; import android.media.MediaRecorder.OnErrorListener; import android.media.MediaRecorder.OnInfoListener; import android.media.MediaScannerConnection; import android.media.MediaScannerConnection.MediaScannerConnectionClient; import android.net.Uri; import android.os.Bundle; import android.os.Environment; import android.os.PowerManager; import android.os.SystemClock; import android.text.TextUtils; import android.view.SurfaceHolder; import android.view.View; import android.view.View.OnClickListener; import android.view.Window; import android.view.WindowManager; import android.widget.Button; import android.widget.Chronometer; import android.widget.ImageView; import android.widget.Toast; import android.widget.VideoView; import com.easemob.chatuidemo.utils.CommonUtils; import com.easemob.chatuidemo.video.util.Utils; import com.easemob.qixin.R; import com.easemob.util.EMLog; import com.easemob.util.PathUtil; public class RecorderVideoActivity extends BaseActivity implements OnClickListener, SurfaceHolder.Callback, OnErrorListener, OnInfoListener { private static final String TAG = "RecorderVideoActivity"; private final static String CLASS_LABEL = "RecordActivity"; private PowerManager.WakeLock mWakeLock; private ImageView btnStart;// 开始录制按钮 private ImageView btnStop;// 停止录制按钮 private MediaRecorder mediaRecorder;// 录制视频的类 private VideoView mVideoView;// 显示视频的控件 String localPath = "";// 录制的视频路径 private Camera mCamera; // 预览的宽高 private int previewWidth = 480; private int previewHeight = 480; private Chronometer chronometer; private int frontCamera = 0;// 0是后置摄像头,1是前置摄像头 private Button btn_switch; Parameters cameraParameters = null; private SurfaceHolder mSurfaceHolder; int defaultVideoFrameRate = -1; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); requestWindowFeature(Window.FEATURE_NO_TITLE);// 去掉标题栏 getWindow().setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN, WindowManager.LayoutParams.FLAG_FULLSCREEN);// 设置全屏 // 选择支持半透明模式,在有surfaceview的activity中使用 getWindow().setFormat(PixelFormat.TRANSLUCENT); setContentView(R.layout.recorder_activity); PowerManager pm = (PowerManager) getSystemService(Context.POWER_SERVICE); mWakeLock = pm.newWakeLock(PowerManager.SCREEN_BRIGHT_WAKE_LOCK, CLASS_LABEL); mWakeLock.acquire(); initViews(); } private void initViews() { btn_switch = (Button) findViewById(R.id.switch_btn); btn_switch.setOnClickListener(this); btn_switch.setVisibility(View.VISIBLE); mVideoView = (VideoView) findViewById(R.id.mVideoView); btnStart = (ImageView) findViewById(R.id.recorder_start); btnStop = (ImageView) findViewById(R.id.recorder_stop); btnStart.setOnClickListener(this); btnStop.setOnClickListener(this); mSurfaceHolder = mVideoView.getHolder(); mSurfaceHolder.addCallback(this); mSurfaceHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS); chronometer = (Chronometer) findViewById(R.id.chronometer); } public void back(View view) { releaseRecorder(); releaseCamera(); finish(); } @Override protected void onResume() { super.onResume(); if (mWakeLock == null) { // 获取唤醒锁,保持屏幕常亮 PowerManager pm = (PowerManager) getSystemService(Context.POWER_SERVICE); mWakeLock = pm.newWakeLock(PowerManager.SCREEN_BRIGHT_WAKE_LOCK, CLASS_LABEL); mWakeLock.acquire(); } // if (!initCamera()) { // showFailDialog(); // } } @SuppressLint("NewApi") private boolean initCamera() { try { if (frontCamera == 0) { mCamera = Camera.open(CameraInfo.CAMERA_FACING_BACK); } else { mCamera = Camera.open(CameraInfo.CAMERA_FACING_FRONT); } Camera.Parameters camParams = mCamera.getParameters(); mCamera.lock(); mSurfaceHolder = mVideoView.getHolder(); mSurfaceHolder.addCallback(this); mSurfaceHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS); mCamera.setDisplayOrientation(90); } catch (RuntimeException ex) { EMLog.e("video", "init Camera fail " + ex.getMessage()); return false; } return true; } private void handleSurfaceChanged() { if (mCamera == null) { finish(); return; } boolean hasSupportRate = false; List<Integer> supportedPreviewFrameRates = mCamera.getParameters() .getSupportedPreviewFrameRates(); if (supportedPreviewFrameRates != null && supportedPreviewFrameRates.size() > 0) { Collections.sort(supportedPreviewFrameRates); for (int i = 0; i < supportedPreviewFrameRates.size(); i++) { int supportRate = supportedPreviewFrameRates.get(i); if (supportRate == 15) { hasSupportRate = true; } } if (hasSupportRate) { defaultVideoFrameRate = 15; } else { defaultVideoFrameRate = supportedPreviewFrameRates.get(0); } } // 获取摄像头的所有支持的分辨率 List<Camera.Size> resolutionList = Utils.getResolutionList(mCamera); if (resolutionList != null && resolutionList.size() > 0) { Collections.sort(resolutionList, new Utils.ResolutionComparator()); Camera.Size previewSize = null; boolean hasSize = false; // 如果摄像头支持640*480,那么强制设为640*480 for (int i = 0; i < resolutionList.size(); i++) { Size size = resolutionList.get(i); if (size != null && size.width == 640 && size.height == 480) { previewSize = size; previewWidth = previewSize.width; previewHeight = previewSize.height; hasSize = true; break; } } // 如果不支持设为中间的那个 if (!hasSize) { int mediumResolution = resolutionList.size() / 2; if (mediumResolution >= resolutionList.size()) mediumResolution = resolutionList.size() - 1; previewSize = resolutionList.get(mediumResolution); previewWidth = previewSize.width; previewHeight = previewSize.height; } } } @Override protected void onPause() { super.onPause(); if (mWakeLock != null) { mWakeLock.release(); mWakeLock = null; } } @Override public void onClick(View view) { switch (view.getId()) { case R.id.switch_btn: switchCamera(); break; case R.id.recorder_start: // start recording if(!startRecording()) return; Toast.makeText(this, R.string.The_video_to_start, Toast.LENGTH_SHORT).show(); btn_switch.setVisibility(View.INVISIBLE); btnStart.setVisibility(View.INVISIBLE); btnStart.setEnabled(false); btnStop.setVisibility(View.VISIBLE); // 重置其他 chronometer.setBase(SystemClock.elapsedRealtime()); chronometer.start(); break; case R.id.recorder_stop: btnStop.setEnabled(false); // 停止拍摄 stopRecording(); btn_switch.setVisibility(View.VISIBLE); chronometer.stop(); btnStart.setVisibility(View.VISIBLE); btnStop.setVisibility(View.INVISIBLE); new AlertDialog.Builder(this) .setMessage(R.string.Whether_to_send) .setPositiveButton(R.string.ok, new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { dialog.dismiss(); sendVideo(null); } }) .setNegativeButton(R.string.cancel, new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { if(localPath != null){ File file = new File(localPath); if(file.exists()) file.delete(); } finish(); } }).setCancelable(false).show(); break; default: break; } } @Override public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) { // 将holder,这个holder为开始在oncreat里面取得的holder,将它赋给surfaceHolder mSurfaceHolder = holder; } @Override public void surfaceCreated(SurfaceHolder holder) { if (mCamera == null){ if(!initCamera()){ showFailDialog(); return; } } try { mCamera.setPreviewDisplay(mSurfaceHolder); mCamera.startPreview(); handleSurfaceChanged(); } catch (Exception e1) { EMLog.e("video", "start preview fail " + e1.getMessage()); showFailDialog(); } } @Override public void surfaceDestroyed(SurfaceHolder arg0) { EMLog.v("video", "surfaceDestroyed"); } public boolean startRecording(){ if (mediaRecorder == null){ if(!initRecorder()) return false; } mediaRecorder.setOnInfoListener(this); mediaRecorder.setOnErrorListener(this); mediaRecorder.start(); return true; } @SuppressLint("NewApi") private boolean initRecorder(){ if(!CommonUtils.isExitsSdcard()){ showNoSDCardDialog(); return false; } if (mCamera == null) { if(!initCamera()){ showFailDialog(); return false; } } mVideoView.setVisibility(View.VISIBLE); // TODO init button mCamera.stopPreview(); mediaRecorder = new MediaRecorder(); mCamera.unlock(); mediaRecorder.setCamera(mCamera); mediaRecorder.setAudioSource(MediaRecorder.AudioSource.DEFAULT); // 设置录制视频源为Camera(相机) mediaRecorder.setVideoSource(MediaRecorder.VideoSource.CAMERA); if (frontCamera == 1) { mediaRecorder.setOrientationHint(270); } else { mediaRecorder.setOrientationHint(90); } // 设置录制完成后视频的封装格式THREE_GPP为3gp.MPEG_4为mp4 mediaRecorder.setOutputFormat(MediaRecorder.OutputFormat.MPEG_4); mediaRecorder.setAudioEncoder(MediaRecorder.AudioEncoder.AAC); // 设置录制的视频编码h263 h264 mediaRecorder.setVideoEncoder(MediaRecorder.VideoEncoder.H264); // 设置视频录制的分辨率。必须放在设置编码和格式的后面,否则报错 mediaRecorder.setVideoSize(previewWidth, previewHeight); // 设置视频的比特率 mediaRecorder.setVideoEncodingBitRate(384 * 1024); // // 设置录制的视频帧率。必须放在设置编码和格式的后面,否则报错 if (defaultVideoFrameRate != -1) { mediaRecorder.setVideoFrameRate(defaultVideoFrameRate); } // 设置视频文件输出的路径 localPath = PathUtil.getInstance().getVideoPath() + "/" + System.currentTimeMillis() + ".mp4"; mediaRecorder.setOutputFile(localPath); mediaRecorder.setMaxDuration(30000); mediaRecorder.setPreviewDisplay(mSurfaceHolder.getSurface()); try { mediaRecorder.prepare(); } catch (IllegalStateException e) { e.printStackTrace(); return false; } catch (IOException e) { e.printStackTrace(); return false; } return true; } public void stopRecording() { if (mediaRecorder != null) { mediaRecorder.setOnErrorListener(null); mediaRecorder.setOnInfoListener(null); try { mediaRecorder.stop(); } catch (IllegalStateException e) { EMLog.e("video", "stopRecording error:" + e.getMessage()); } } releaseRecorder(); if (mCamera != null) { mCamera.stopPreview(); releaseCamera(); } } private void releaseRecorder() { if (mediaRecorder != null) { mediaRecorder.release(); mediaRecorder = null; } } protected void releaseCamera() { try { if (mCamera != null) { mCamera.stopPreview(); mCamera.release(); mCamera = null; } } catch (Exception e) { } } @SuppressLint("NewApi") public void switchCamera() { if (mCamera == null) { return; } if (Camera.getNumberOfCameras() >= 2) { btn_switch.setEnabled(false); if (mCamera != null) { mCamera.stopPreview(); mCamera.release(); mCamera = null; } switch (frontCamera) { case 0: mCamera = Camera.open(CameraInfo.CAMERA_FACING_FRONT); frontCamera = 1; break; case 1: mCamera = Camera.open(CameraInfo.CAMERA_FACING_BACK); frontCamera = 0; break; } try { mCamera.lock(); mCamera.setDisplayOrientation(90); mCamera.setPreviewDisplay(mVideoView.getHolder()); mCamera.startPreview(); } catch (IOException e) { mCamera.release(); mCamera = null; } btn_switch.setEnabled(true); } } MediaScannerConnection msc = null; ProgressDialog progressDialog = null; public void sendVideo(View view) { if (TextUtils.isEmpty(localPath)) { EMLog.e("Recorder", "recorder fail please try again!"); return; } if(msc == null) msc = new MediaScannerConnection(this, new MediaScannerConnectionClient() { @Override public void onScanCompleted(String path, Uri uri) { EMLog.d(TAG, "scanner completed"); msc.disconnect(); progressDialog.dismiss(); setResult(RESULT_OK, getIntent().putExtra("uri", uri)); finish(); } @Override public void onMediaScannerConnected() { msc.scanFile(localPath, "video/*"); } }); if(progressDialog == null){ progressDialog = new ProgressDialog(this); progressDialog.setMessage("processing..."); progressDialog.setCancelable(false); } progressDialog.show(); msc.connect(); } @Override public void onInfo(MediaRecorder mr, int what, int extra) { EMLog.v("video", "onInfo"); if (what == MediaRecorder.MEDIA_RECORDER_INFO_MAX_DURATION_REACHED) { EMLog.v("video", "max duration reached"); stopRecording(); btn_switch.setVisibility(View.VISIBLE); chronometer.stop(); btnStart.setVisibility(View.VISIBLE); btnStop.setVisibility(View.INVISIBLE); chronometer.stop(); if (localPath == null) { return; } String st3 = getResources().getString(R.string.Whether_to_send); new AlertDialog.Builder(this) .setMessage(st3) .setPositiveButton(R.string.ok, new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface arg0, int arg1) { arg0.dismiss(); sendVideo(null); } }).setNegativeButton(R.string.cancel, null) .setCancelable(false).show(); } } @Override public void onError(MediaRecorder mr, int what, int extra) { EMLog.e("video", "recording onError:"); stopRecording(); Toast.makeText(this, "Recording error has occurred. Stopping the recording", Toast.LENGTH_SHORT).show(); } public void saveBitmapFile(Bitmap bitmap) { File file = new File(Environment.getExternalStorageDirectory(), "a.jpg"); try { BufferedOutputStream bos = new BufferedOutputStream( new FileOutputStream(file)); bitmap.compress(Bitmap.CompressFormat.JPEG, 100, bos); bos.flush(); bos.close(); } catch (IOException e) { e.printStackTrace(); } } @Override protected void onDestroy() { super.onDestroy(); releaseCamera(); if (mWakeLock != null) { mWakeLock.release(); mWakeLock = null; } } @Override public void onBackPressed() { back(null); } private void showFailDialog() { new AlertDialog.Builder(this) .setTitle(R.string.prompt) .setMessage(R.string.Open_the_equipment_failure) .setPositiveButton(R.string.ok, new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { finish(); } }).setCancelable(false).show(); } private void showNoSDCardDialog() { new AlertDialog.Builder(this) .setTitle(R.string.prompt) .setMessage("No sd card!") .setPositiveButton(R.string.ok, new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { finish(); } }).setCancelable(false).show(); } }
liyuzhao/enterpriseChat-android
src/com/easemob/chatuidemo/activity/RecorderVideoActivity.java
Java
apache-2.0
16,993
import { Component, OnInit } from '@angular/core'; import { ActivatedRoute } from '@angular/router'; import { Location } from '@angular/common'; import { Merch } from '../data/merch'; import { MerchService } from '../data/merch.service'; @Component({ selector: 'app-merch-display', templateUrl: './merch-display.component.html', styleUrls: ['./merch-display.component.scss'], }) export class MerchDisplayComponent implements OnInit { merch: Merch[] = []; private _serviceWorker: ServiceWorker|null = null; constructor( private route: ActivatedRoute, private merchService: MerchService, private location: Location ) {} ngOnInit(): void { navigator.serviceWorker.ready.then( registration => { this._serviceWorker = registration.active; }); this.route.params.subscribe((routeParams) => { this.getMerch(routeParams.category); if (this._serviceWorker) { this._serviceWorker.postMessage({ page: routeParams.category }); } }); } getMerch(category: string): void { this.merchService .getMerchList(category) .then((merch) => (this.merch = merch)); } goBack(): void { this.location.back(); } }
tensorflow/tfjs-examples
angular-predictive-prefetching/client/src/app/merch-display/merch-display.component.ts
TypeScript
apache-2.0
1,198
package com.github.sergejsamsonow.codegenerator.producer.pojo.renderer; import com.github.sergejsamsonow.codegenerator.api.producer.sc.SCMethodCodeConcatenator; import com.github.sergejsamsonow.codegenerator.api.producer.sc.SCNewLineAndIndentationFormat; import com.github.sergejsamsonow.codegenerator.producer.pojo.model.PojoProperty; import com.github.sergejsamsonow.codegenerator.producer.pojo.renderer.javalang.BeanModifier; public class JavaLangToString extends BeanModifier { public JavaLangToString(SCNewLineAndIndentationFormat format) { super(format); } @Override protected void writeBeforePropertiesIteration() { SCMethodCodeConcatenator writer = getMethodCodeWriter(); writer.annotation("@Override"); writer.start("public String toString() {"); writer.code("StringBuilder builder = new StringBuilder();"); writer.code("builder.append(\"%s (\");", getData().getClassName()); } @Override protected void writePropertyCode(PojoProperty property) { SCMethodCodeConcatenator writer = getMethodCodeWriter(); String end = isLast() ? ");" : " + \", \");"; writer.code("builder.append(\"%s: \" + Objects.toString(%s())%s", property.getFieldName(), property.getGetterName(), end); } @Override protected void writeAfterPropertiesIteration() { SCMethodCodeConcatenator writer = getMethodCodeWriter(); writer.code("builder.append(\")\");"); writer.code("return builder.toString();"); writer.end(); writer.emptyNewLine(); } }
sergej-samsonow/code-generator
producer/pojo/src/main/java/com/github/sergejsamsonow/codegenerator/producer/pojo/renderer/JavaLangToString.java
Java
apache-2.0
1,597
/* * Copyright 2010-2011 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.autoscaling.model; /** * <p> * The output for the TerminateInstanceInAutoScalingGroup action. * </p> */ public class TerminateInstanceInAutoScalingGroupResult { /** * A Scaling Activity. */ private Activity activity; /** * A Scaling Activity. * * @return A Scaling Activity. */ public Activity getActivity() { return activity; } /** * A Scaling Activity. * * @param activity A Scaling Activity. */ public void setActivity(Activity activity) { this.activity = activity; } /** * A Scaling Activity. * <p> * Returns a reference to this object so that method calls can be chained together. * * @param activity A Scaling Activity. * * @return A reference to this updated object so that method calls can be chained * together. */ public TerminateInstanceInAutoScalingGroupResult withActivity(Activity activity) { this.activity = activity; return this; } /** * Returns a string representation of this object; useful for testing and * debugging. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); sb.append("Activity: " + activity + ", "); sb.append("}"); return sb.toString(); } }
apetresc/aws-sdk-for-java-on-gae
src/main/java/com/amazonaws/services/autoscaling/model/TerminateInstanceInAutoScalingGroupResult.java
Java
apache-2.0
2,135
/** * Copyright 2013-present NightWorld. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ var error = require('./error'), runner = require('./runner'), Client = require('./client'); module.exports = Authorise; /** * This is the function order used by the runner * * @type {Array} */ var fns = [ checkAuthoriseType, checkScope ]; /** * Authorise * * @param {Object} config Instance of OAuth object * @param {Object} req * @param {Object} res * @param {Object} options * @param {Function} next */ function Authorise (config, req, res, options, next) { options = options || {}; this.config = config; this.model = config.model; this.req = req; this.res = res; this.options = options; runner(fns, this, next); } function checkAuthoriseType(done) { var client = Client.credsFromBasic(this.req) || Client.credsFromBody(this.req); if (this.options.implicit) { if (this.req.body.response_type === 'token') { if (client.clientId) { this.redirectUri = this.req.body.redirect_uri || this.req.query.redirect_uri; this.clientId = client.clientId; this.req.auth_type = 'implicit'; return checkImplicitClient.call(this, done); } } } if (this.options.client_credentials) { if (client.clientId && client.clientSecret) { this.client = client; this.req.auth_type = 'client_credentials'; return getUserFromClient.call(this, done); } } getBearerToken.call(this, done); } function getUserFromClient(done) { var self = this; this.model.getClient(this.client.clientId, this.client.clientSecret, function (err, client) { if (err) return done(error('server_error', false, err)); if (!client) { return done(error('invalid_client', 'Client credentials are invalid')); } self.model.getUserFromClient(client, function (err, user) { if (err) return done(error('server_error', false, err)); if (!user) { return done(error('invalid_grant', 'Client credentials are invalid')); } self.req.oauth = { bearerToken: user }; self.req.user = { id: user.id }; done(); }); }); } function checkImplicitClient (done) { var self = this; this.model.getClient(this.clientId, null, function (err, client) { if (err) return done(error('server_error', false, err)); if (!client) { return done(error('invalid_client', 'Invalid client credentials')); } else if (self.redirectUri && Array.isArray(client.redirectUri)) { if (client.redirectUri.indexOf(self.redirectUri) === -1) { return done(error('invalid_request', 'redirect_uri does not match')); } client.redirectUri = self.redirectUri; } else if (self.redirectUri && client.redirectUri !== self.redirectUri) { return done(error('invalid_request', 'redirect_uri does not match')); } self.model.getUserFromClient(client, function (err, user) { if (err) return done(error('server_error', false, err)); if (!user) { return done(error('invalid_grant', 'Client credentials are invalid')); } // The request contains valid params so any errors after this point // are redirected to the redirect_uri self.res.redirectUri = client.redirectUri; self.res.oauthRedirect = true; self.req.oauth = { bearerToken: user }; self.req.user = { id: user.id }; done(); }); }); } /** * Get bearer token * * Extract token from request according to RFC6750 * * @param {Function} done * @this OAuth */ function getBearerToken (done) { var headerToken = this.req.get('Authorization'), getToken = this.req.query.access_token, postToken = this.req.body ? this.req.body.access_token : undefined; // Check exactly one method was used var methodsUsed = (headerToken !== undefined) + (getToken !== undefined) + (postToken !== undefined); if (methodsUsed > 1) { return done(error('invalid_request', 'Only one method may be used to authenticate at a time (Auth header, ' + 'GET or POST).')); } else if (methodsUsed === 0) { return done(error('invalid_request', 'The access token was not found')); } // Header: http://tools.ietf.org/html/rfc6750#section-2.1 if (headerToken) { var matches = headerToken.match(/Bearer\s(\S+)/); if (!matches) { return done(error('invalid_request', 'Malformed auth header')); } headerToken = matches[1]; } // POST: http://tools.ietf.org/html/rfc6750#section-2.2 if (postToken) { if (this.req.method === 'GET') { return done(error('invalid_request', 'Method cannot be GET When putting the token in the body.')); } if (!this.req.is('application/x-www-form-urlencoded')) { return done(error('invalid_request', 'When putting the token in the ' + 'body, content type must be application/x-www-form-urlencoded.')); } } this.bearerToken = headerToken || postToken || getToken; checkToken.call(this, done); } /** * Check token * * Check it against model, ensure it's not expired * @param {Function} done * @this OAuth */ function checkToken (done) { var self = this; this.model.getAccessToken(this.bearerToken, function (err, token) { if (err) return done(error('server_error', false, err)); if (!token) { return done(error('invalid_token', 'The access token provided is invalid.')); } if (token.expires !== null && (!token.expires || token.expires < new Date())) { return done(error('invalid_token', 'The access token provided has expired.')); } // Expose params self.req.oauth = { bearerToken: token }; self.req.user = token.user ? token.user : { id: token.userId }; done(); }); } /** * Check scope * * @param {Function} done * @this OAuth */ function checkScope (done) { if (!this.model.authoriseScope) return done(); this.model.authoriseScope(this.req.oauth.bearerToken, this.options.scope, function (err, invalid) { if (err) return done(error('server_error', false, err)); if (invalid) return done(error('invalid_scope', invalid)); done(); }); }
zoltangbereczky/node-oauth2-server
lib/authorise.js
JavaScript
apache-2.0
6,699
from functools import wraps import json import os import traceback import validators from jinja2 import Environment, PackageLoader from notebook.utils import url_path_join from notebook.base.handlers import IPythonHandler import requests from requests.auth import HTTPBasicAuth env = Environment( loader=PackageLoader('saagie', 'jinja2'), ) SAAGIE_ROOT_URL = os.environ.get("SAAGIE_ROOT_URL", None) SAAGIE_USERNAME = None PLATFORMS_URL = None SAAGIE_BASIC_AUTH_TOKEN = None JOBS_URL_PATTERN = None JOB_URL_PATTERN = None JOB_UPGRADE_URL_PATTERN = None SCRIPT_UPLOAD_URL_PATTERN = None def get_absolute_saagie_url(saagie_url): if saagie_url.startswith('/'): return SAAGIE_ROOT_URL + saagie_url return saagie_url class ResponseError(Exception): def __init__(self, status_code): self.status_code = status_code super(ResponseError, self).__init__(status_code) class SaagieHandler(IPythonHandler): def handle_request(self, method): data = {k: v[0].decode() for k, v in self.request.arguments.items()} if 'view' not in data: self.send_error(404) return view_name = data.pop('view') notebook_path = data.pop('notebook_path', None) notebook_json = data.pop('notebook_json', None) notebook = Notebook(notebook_path, notebook_json) try: template_name, template_data = views.render( view_name, notebook=notebook, data=data, method=method) except ResponseError as e: self.send_error(e.status_code) return except: template_name = 'internal_error.html' template_data = {'error': traceback.format_exc()} self.set_status(500) template_data.update( notebook=notebook, ) template = env.get_template(template_name) self.finish(template.render(template_data)) def get(self): self.handle_request('GET') def post(self): self.handle_request('POST') def check_xsrf_cookie(self): return class SaagieCheckHandler(IPythonHandler): def get(self): self.finish() class SaagieJobRun: def __init__(self, job, run_data): self.job = job self.id = run_data['id'] self.status = run_data['status'] self.stderr = run_data.get('logs_err', '') self.stdout = run_data.get('logs_out', '') class SaagieJob: @classmethod def from_id(cls, notebook, platform_id, job_id): return SaagieJob( notebook, requests.get(JOB_URL_PATTERN % (platform_id, job_id), auth=SAAGIE_BASIC_AUTH_TOKEN).json()) def __init__(self, notebook, job_data): self.notebook = notebook self.data = job_data self.platform_id = job_data['platform_id'] self.capsule_type = job_data['capsule_code'] self.id = job_data['id'] self.name = job_data['name'] self.last_run = None def set_as_current(self): self.notebook.current_job = self @property def url(self): return (JOBS_URL_PATTERN + '/%s') % (self.platform_id, self.id) @property def admin_url(self): return get_absolute_saagie_url('/#/manager/%s/job/%s' % (self.platform_id, self.id)) @property def logs_url(self): return self.admin_url + '/logs' @property def is_started(self): return self.last_run is not None def fetch_logs(self): job_data = requests.get(self.url, auth=SAAGIE_BASIC_AUTH_TOKEN).json() run_data = job_data.get('last_instance') if run_data is None or run_data['status'] not in ('SUCCESS', 'FAILED'): return run_data = requests.get( get_absolute_saagie_url('/api/v1/jobtask/%s' % run_data['id']), auth=SAAGIE_BASIC_AUTH_TOKEN).json() self.last_run = SaagieJobRun(self, run_data) @property def details_template_name(self): return 'include/python_job_details.html' def __str__(self): return self.name def __eq__(self, other): if other is None: return False return self.platform_id == other.platform_id and self.id == other.id def __lt__(self, other): if other is None: return False return self.id < other.id class SaagiePlatform: SUPPORTED_CAPSULE_TYPES = {'python'} def __init__(self, notebook, platform_data): self.notebook = notebook self.id = platform_data['id'] self.name = platform_data['name'] self.capsule_types = {c['code'] for c in platform_data['capsules']} @property def is_supported(self): return not self.capsule_types.isdisjoint(self.SUPPORTED_CAPSULE_TYPES) def get_jobs(self): if not self.is_supported: return [] jobs_data = requests.get(JOBS_URL_PATTERN % self.id, auth=SAAGIE_BASIC_AUTH_TOKEN).json() return [SaagieJob(self.notebook, job_data) for job_data in jobs_data if job_data['category'] == 'processing' and job_data['capsule_code'] in self.SUPPORTED_CAPSULE_TYPES] def __eq__(self, other): return self.id == other.id class Notebook: CACHE = {} def __new__(cls, path, json): if path in cls.CACHE: return cls.CACHE[path] cls.CACHE[path] = new = super(Notebook, cls).__new__(cls) return new def __init__(self, path, json_data): if path is None: path = 'Untitled.ipynb' if json_data is None: json_data = json.dumps({ 'cells': [], 'metadata': {'kernelspec': {'name': 'python3'}}}) self.path = path self.json = json.loads(json_data) # In cached instances, current_job is already defined. if not hasattr(self, 'current_job'): self.current_job = None @property def name(self): return os.path.splitext(os.path.basename(self.path))[0] @property def kernel_name(self): return self.json['metadata']['kernelspec']['name'] @property def kernel_display_name(self): return self.json['metadata']['kernelspec']['display_name'] def get_code_cells(self): return [cell['source'] for cell in self.json['cells'] if cell['cell_type'] == 'code'] def get_code(self, indices=None): cells = self.get_code_cells() if indices is None: indices = list(range(len(cells))) return '\n\n\n'.join([cells[i] for i in indices]) def get_platforms(self): return [SaagiePlatform(self, platform_data) for platform_data in requests.get(PLATFORMS_URL, auth=SAAGIE_BASIC_AUTH_TOKEN).json()] class ViewsCollection(dict): def add(self, func): self[func.__name__] = func return func def render(self, view_name, notebook, data=None, method='GET', **kwargs): if data is None: data = {} try: view = views[view_name] except KeyError: raise ResponseError(404) template_data = view(method, notebook, data, **kwargs) if isinstance(template_data, tuple): template_name, template_data = template_data else: template_name = view.__name__ + '.html' return template_name, template_data views = ViewsCollection() @views.add def modal(method, notebook, data): return {} def clear_basic_auth_token(): global SAAGIE_BASIC_AUTH_TOKEN SAAGIE_BASIC_AUTH_TOKEN = None # Init an empty Basic Auth token on first launch clear_basic_auth_token() def is_logged(): if SAAGIE_ROOT_URL is None or SAAGIE_BASIC_AUTH_TOKEN is None: return False else: # Check if Basic token is still valid is_logged_in = False try: response = requests.get(SAAGIE_ROOT_URL + '/api/v1/user-current', auth=SAAGIE_BASIC_AUTH_TOKEN, allow_redirects=False) is_logged_in = response.ok except (requests.ConnectionError, requests.RequestException, requests.HTTPError, requests.Timeout) as err: print ('Error while trying to connect to Saagie: ', err) if is_logged_in is not True: # Remove Basic Auth token from globals. It will force a new login phase. clear_basic_auth_token() return is_logged_in def define_globals(saagie_root_url, saagie_username): if saagie_root_url is not None: global SAAGIE_ROOT_URL global SAAGIE_USERNAME global PLATFORMS_URL global JOBS_URL_PATTERN global JOB_URL_PATTERN global JOB_UPGRADE_URL_PATTERN global SCRIPT_UPLOAD_URL_PATTERN SAAGIE_USERNAME = saagie_username SAAGIE_ROOT_URL = saagie_root_url.strip("/") PLATFORMS_URL = SAAGIE_ROOT_URL + '/api/v1/platform' JOBS_URL_PATTERN = PLATFORMS_URL + '/%s/job' JOB_URL_PATTERN = JOBS_URL_PATTERN + '/%s' JOB_UPGRADE_URL_PATTERN = JOBS_URL_PATTERN + '/%s/version' SCRIPT_UPLOAD_URL_PATTERN = JOBS_URL_PATTERN + '/upload' @views.add def login_form(method, notebook, data): if method == 'POST': # check if the given Saagie URL is well formed if not validators.url(data['saagie_root_url']): return {'error': 'Invalid URL', 'saagie_root_url': data['saagie_root_url'] or '', 'username': data['username'] or ''} define_globals(data['saagie_root_url'], data['username']) try: basic_token = HTTPBasicAuth(data['username'], data['password']) current_user_response = requests.get(SAAGIE_ROOT_URL + '/api/v1/user-current', auth=basic_token, allow_redirects=False) if current_user_response.ok: # Login succeeded, keep the basic token for future API calls global SAAGIE_BASIC_AUTH_TOKEN SAAGIE_BASIC_AUTH_TOKEN = basic_token except (requests.ConnectionError, requests.RequestException, requests.HTTPError, requests.Timeout) as err: print ('Error while trying to connect to Saagie: ', err) return {'error': 'Connection error', 'saagie_root_url': SAAGIE_ROOT_URL, 'username': SAAGIE_USERNAME or ''} if SAAGIE_BASIC_AUTH_TOKEN is not None: return views.render('capsule_type_chooser', notebook) return {'error': 'Invalid URL, username or password.', 'saagie_root_url': SAAGIE_ROOT_URL, 'username': SAAGIE_USERNAME or ''} if is_logged(): return views.render('capsule_type_chooser', notebook) return {'error': None, 'saagie_root_url': SAAGIE_ROOT_URL or '', 'username': SAAGIE_USERNAME or ''} def login_required(view): @wraps(view) def inner(method, notebook, data, *args, **kwargs): if not is_logged(): return views.render('login_form', notebook) return view(method, notebook, data, *args, **kwargs) return inner @views.add @login_required def capsule_type_chooser(method, notebook, data): return {'username': SAAGIE_USERNAME} def get_job_form(method, notebook, data): context = {'platforms': notebook.get_platforms()} context['values'] = ({'current': {'options': {}}} if notebook.current_job is None else notebook.current_job.data) return context def create_job_base_data(data): return { 'platform_id': data['saagie-platform'], 'category': 'processing', 'name': data['job-name'], 'description': data['description'], 'current': { 'cpu': data['cpu'], 'disk': data['disk'], 'memory': data['ram'], 'isInternalSubDomain': False, 'isInternalPort': False, 'options': {} } } def upload_python_script(notebook, data): code = notebook.get_code(map(int, data.get('code-lines', '').split('|'))) files = {'file': (data['job-name'] + '.py', code)} return requests.post( SCRIPT_UPLOAD_URL_PATTERN % data['saagie-platform'], files=files, auth=SAAGIE_BASIC_AUTH_TOKEN).json()['fileName'] @views.add @login_required def python_job_form(method, notebook, data): if method == 'POST': platform_id = data['saagie-platform'] job_data = create_job_base_data(data) job_data['capsule_code'] = 'python' job_data['always_email'] = False job_data['manual'] = True job_data['retry'] = '' current = job_data['current'] current['options']['language_version'] = data['language-version'] current['releaseNote'] = data['release-note'] current['template'] = data['shell-command'] current['file'] = upload_python_script(notebook, data) new_job_data = requests.post(JOBS_URL_PATTERN % platform_id, json=job_data, auth=SAAGIE_BASIC_AUTH_TOKEN).json() job = SaagieJob(notebook, new_job_data) job.set_as_current() return views.render('starting_job', notebook, {'job': job}) context = get_job_form(method, notebook, data) context['action'] = '/saagie?view=python_job_form' context['username'] = SAAGIE_USERNAME return context @views.add @login_required def update_python_job(method, notebook, data): if method == 'POST': job = notebook.current_job platform_id = job.platform_id data['saagie-platform'] = platform_id data['job-name'] = job.name data['description'] = '' current = create_job_base_data(data)['current'] current['options']['language_version'] = data['language-version'] current['releaseNote'] = data['release-note'] current['template'] = data['shell-command'] current['file'] = upload_python_script(notebook, data) requests.post(JOB_UPGRADE_URL_PATTERN % (platform_id, job.id), json={'current': current}, auth=SAAGIE_BASIC_AUTH_TOKEN) job.last_run = None return views.render('starting_job', notebook, {'job': job}) context = get_job_form(method, notebook, data) context['action'] = '/saagie?view=update_python_job' context['username'] = SAAGIE_USERNAME return context @views.add @login_required def select_python_job(method, notebook, data): if method == 'POST': platform_id, job_id = data['job'].split('-') notebook.current_job = SaagieJob.from_id(notebook, platform_id, job_id) return views.render('update_python_job', notebook, data) jobs_by_platform = [] for platform in notebook.get_platforms(): jobs = platform.get_jobs() if jobs: jobs_by_platform.append((platform, list(sorted(jobs, reverse=True)))) return {'jobs_by_platform': jobs_by_platform, 'action': '/saagie?view=select_python_job', 'username': SAAGIE_USERNAME} @views.add @login_required def unsupported_kernel(method, notebook, data): return {'username': SAAGIE_USERNAME} @views.add @login_required def starting_job(method, notebook, data): job = notebook.current_job job.fetch_logs() if job.is_started: return views.render('started_job', notebook, {'job': job}) return {'job': job, 'username': SAAGIE_USERNAME} @views.add @login_required def started_job(method, notebook, data): return {'job': notebook.current_job, 'username': SAAGIE_USERNAME} @views.add def logout(method, notebook, data): global SAAGIE_BASIC_AUTH_TOKEN global SAAGIE_ROOT_URL global SAAGIE_USERNAME SAAGIE_BASIC_AUTH_TOKEN = None SAAGIE_ROOT_URL = None SAAGIE_USERNAME = None return {} def load_jupyter_server_extension(nb_app): web_app = nb_app.web_app base_url = web_app.settings['base_url'] route_pattern = url_path_join(base_url, '/saagie') web_app.add_handlers('.*$', [(route_pattern, SaagieHandler)]) route_pattern = url_path_join(base_url, '/saagie/check') web_app.add_handlers('.*$', [(route_pattern, SaagieCheckHandler)])
saagie/jupyter-saagie-plugin
saagie/server_extension.py
Python
apache-2.0
16,090
//// [contextualTypeWithUnionTypeMembers.ts] //When used as a contextual type, a union type U has those members that are present in any of // its constituent types, with types that are unions of the respective members in the constituent types. interface I1<T> { commonMethodType(a: string): string; commonPropertyType: string; commonMethodWithTypeParameter(a: T): T; methodOnlyInI1(a: string): string; propertyOnlyInI1: string; } interface I2<T> { commonMethodType(a: string): string; commonPropertyType: string; commonMethodWithTypeParameter(a: T): T; methodOnlyInI2(a: string): string; propertyOnlyInI2: string; } // Let S be the set of types in U that has a property P. // If S is not empty, U has a property P of a union type of the types of P from each type in S. var i1: I1<number>; var i2: I2<number>; var i1Ori2: I1<number> | I2<number> = i1; var i1Ori2: I1<number> | I2<number> = i2; var i1Ori2: I1<number> | I2<number> = { // Like i1 commonPropertyType: "hello", commonMethodType: a=> a, commonMethodWithTypeParameter: a => a, methodOnlyInI1: a => a, propertyOnlyInI1: "Hello", }; var i1Ori2: I1<number> | I2<number> = { // Like i2 commonPropertyType: "hello", commonMethodType: a=> a, commonMethodWithTypeParameter: a => a, methodOnlyInI2: a => a, propertyOnlyInI2: "Hello", }; var i1Ori2: I1<number> | I2<number> = { // Like i1 and i2 both commonPropertyType: "hello", commonMethodType: a=> a, commonMethodWithTypeParameter: a => a, methodOnlyInI1: a => a, propertyOnlyInI1: "Hello", methodOnlyInI2: a => a, propertyOnlyInI2: "Hello", }; var arrayI1OrI2: Array<I1<number> | I2<number>> = [i1, i2, { // Like i1 commonPropertyType: "hello", commonMethodType: a=> a, commonMethodWithTypeParameter: a => a, methodOnlyInI1: a => a, propertyOnlyInI1: "Hello", }, { // Like i2 commonPropertyType: "hello", commonMethodType: a=> a, commonMethodWithTypeParameter: a => a, methodOnlyInI2: a => a, propertyOnlyInI2: "Hello", }, { // Like i1 and i2 both commonPropertyType: "hello", commonMethodType: a=> a, commonMethodWithTypeParameter: a => a, methodOnlyInI1: a => a, propertyOnlyInI1: "Hello", methodOnlyInI2: a => a, propertyOnlyInI2: "Hello", }]; interface I11 { commonMethodDifferentReturnType(a: string, b: number): string; commonPropertyDifferentType: string; } interface I21 { commonMethodDifferentReturnType(a: string, b: number): number; commonPropertyDifferentType: number; } var i11: I11; var i21: I21; var i11Ori21: I11 | I21 = i11; var i11Ori21: I11 | I21 = i21; var i11Ori21: I11 | I21 = { // Like i1 commonMethodDifferentReturnType: (a, b) => { var z = a.charAt(b); return z; }, commonPropertyDifferentType: "hello", }; var i11Ori21: I11 | I21 = { // Like i2 commonMethodDifferentReturnType: (a, b) => { var z = a.charCodeAt(b); return z; }, commonPropertyDifferentType: 10, }; var arrayOrI11OrI21: Array<I11 | I21> = [i11, i21, i11 || i21, { // Like i1 commonMethodDifferentReturnType: (a, b) => { var z = a.charAt(b); return z; }, commonPropertyDifferentType: "hello", }, { // Like i2 commonMethodDifferentReturnType: (a, b) => { var z = a.charCodeAt(b); return z; }, commonPropertyDifferentType: 10, }]; //// [contextualTypeWithUnionTypeMembers.js] // Let S be the set of types in U that has a property P. // If S is not empty, U has a property P of a union type of the types of P from each type in S. var i1; var i2; var i1Ori2 = i1; var i1Ori2 = i2; var i1Ori2 = { commonPropertyType: "hello", commonMethodType: function (a) { return a; }, commonMethodWithTypeParameter: function (a) { return a; }, methodOnlyInI1: function (a) { return a; }, propertyOnlyInI1: "Hello" }; var i1Ori2 = { commonPropertyType: "hello", commonMethodType: function (a) { return a; }, commonMethodWithTypeParameter: function (a) { return a; }, methodOnlyInI2: function (a) { return a; }, propertyOnlyInI2: "Hello" }; var i1Ori2 = { commonPropertyType: "hello", commonMethodType: function (a) { return a; }, commonMethodWithTypeParameter: function (a) { return a; }, methodOnlyInI1: function (a) { return a; }, propertyOnlyInI1: "Hello", methodOnlyInI2: function (a) { return a; }, propertyOnlyInI2: "Hello" }; var arrayI1OrI2 = [i1, i2, { commonPropertyType: "hello", commonMethodType: function (a) { return a; }, commonMethodWithTypeParameter: function (a) { return a; }, methodOnlyInI1: function (a) { return a; }, propertyOnlyInI1: "Hello" }, { commonPropertyType: "hello", commonMethodType: function (a) { return a; }, commonMethodWithTypeParameter: function (a) { return a; }, methodOnlyInI2: function (a) { return a; }, propertyOnlyInI2: "Hello" }, { commonPropertyType: "hello", commonMethodType: function (a) { return a; }, commonMethodWithTypeParameter: function (a) { return a; }, methodOnlyInI1: function (a) { return a; }, propertyOnlyInI1: "Hello", methodOnlyInI2: function (a) { return a; }, propertyOnlyInI2: "Hello" }]; var i11; var i21; var i11Ori21 = i11; var i11Ori21 = i21; var i11Ori21 = { // Like i1 commonMethodDifferentReturnType: function (a, b) { var z = a.charAt(b); return z; }, commonPropertyDifferentType: "hello" }; var i11Ori21 = { // Like i2 commonMethodDifferentReturnType: function (a, b) { var z = a.charCodeAt(b); return z; }, commonPropertyDifferentType: 10 }; var arrayOrI11OrI21 = [i11, i21, i11 || i21, { // Like i1 commonMethodDifferentReturnType: function (a, b) { var z = a.charAt(b); return z; }, commonPropertyDifferentType: "hello" }, { // Like i2 commonMethodDifferentReturnType: function (a, b) { var z = a.charCodeAt(b); return z; }, commonPropertyDifferentType: 10 }];
freedot/tstolua
tests/baselines/reference/contextualTypeWithUnionTypeMembers.js
JavaScript
apache-2.0
6,404
using System; using System.Collections.Generic; using System.Linq; using System.Text; using SuperMap.WinRT.Core; using SuperMap.WinRT.Utilities; using Windows.Data.Json; using Windows.UI; namespace SuperMap.WinRT.REST { /// <summary> /// <para>${REST_ServerLayer_Title}</para> /// <para>${REST_ServerLayer_Description}</para> /// </summary> public class ServerLayer { /// <summary>${REST_ServerLayer_constructor_D}</summary> public ServerLayer() { } //Property /// <summary>${REST_ServerLayer_attribute_Bounds_D}</summary> public Rectangle2D Bounds { get; internal set; } //对应服务端的Layer属性 /// <summary>${REST_ServerLayer_attribute_caption_D}</summary> public string Caption { get; internal set; } /// <summary>${REST_ServerLayer_attribute_Description_D}</summary> public string Description { get; internal set; } /// <summary>${REST_ServerLayer_attribute_Name_D}</summary> public string Name { get; internal set; } /// <summary>${REST_ServerLayer_attribute_IsQueryable_D}</summary> public bool IsQueryable { get; internal set; } //图层的子图层先不控制 //public System.Collections.Generic.List<LayerInfo> SubLayers { get; internal set; } //这里默认是UGC了,不开放给用户啦 //private string LayerType = "UGC"; /// <summary>${REST_ServerLayer_attribute_IsVisible_D}</summary> public bool IsVisible { get; internal set; } //对应服务端UGCMapLayer属性 /// <summary>${REST_ServerLayer_attribute_IsCompleteLineSymbolDisplayed_D}</summary> public bool IsCompleteLineSymbolDisplayed { get; internal set; } /// <summary>${REST_ServerLayer_attribute_MaxScale_D}</summary> public double MaxScale { get; internal set; } /// <summary>${REST_ServerLayer_attribute_minScale_D}</summary> public double MinScale { get; internal set; } /// <summary>${REST_ServerLayer_attribute_MinVisibleGeometrySize_D}</summary> public double MinVisibleGeometrySize { get; internal set; } /// <summary>${REST_ServerLayer_attribute_OpaqueRate_D}</summary> public int OpaqueRate { get; internal set; } /// <summary>${REST_ServerLayer_attribute_IsSymbolScalable_D}</summary> public bool IsSymbolScalable { get; internal set; } /// <summary>${REST_ServerLayer_attribute_SymbolScale_D}</summary> public double SymbolScale { get; internal set; } //对应服务端UGCLayer /// <summary>${REST_ServerLayer_attribute_DatasetInfo_D}</summary> public DatasetInfo DatasetInfo { get; internal set; } /// <summary>${REST_ServerLayer_attribute_DisplayFilter_D}</summary> public string DisplayFilter { get; internal set; } /// <summary>${REST_ServerLayer_attribute_JoinItems_D}</summary> public System.Collections.Generic.List<JoinItem> JoinItems { get; internal set; } /// <summary>${REST_ServerLayer_attribute_RepresentationField_D}</summary> public string RepresentationField { get; internal set; } /// <summary>${REST_ServerLayer_attribute_UGCLayerType_D}</summary> public SuperMapLayerType UGCLayerType { get; internal set; } /// <summary>${REST_ServerLayer_attribute_UGCLayer_D}</summary> public UGCLayer UGCLayer { get; internal set; } /// <summary>${REST_ServerLayer_method_FromJson_D}</summary> /// <returns>${REST_ServerLayer_method_FromJson_return}</returns> /// <param name="json">${REST_ServerLayer_method_FromJson_param_jsonObject}</param> internal static ServerLayer FromJson(JsonObject json) { var serverLayer = new ServerLayer(); if (json["bounds"].ValueType != JsonValueType.Null) { serverLayer.Bounds = JsonHelper.ToRectangle2D(json["bounds"].GetObjectEx()); } else { //null } serverLayer.Caption = json["caption"].GetStringEx(); serverLayer.Description = json["description"].GetStringEx(); serverLayer.Name = json["name"].GetStringEx(); serverLayer.IsQueryable = json["queryable"].GetBooleanEx(); serverLayer.IsVisible = json["visible"].GetBooleanEx(); serverLayer.IsCompleteLineSymbolDisplayed = json["completeLineSymbolDisplayed"].GetBooleanEx(); serverLayer.MaxScale = json["maxScale"].GetNumberEx(); serverLayer.MinScale = json["minScale"].GetNumberEx(); serverLayer.MinVisibleGeometrySize = json["minVisibleGeometrySize"].GetNumberEx(); serverLayer.OpaqueRate = (int)json["opaqueRate"].GetNumberEx(); serverLayer.IsSymbolScalable = json["symbolScalable"].GetBooleanEx(); serverLayer.SymbolScale = json["symbolScale"].GetNumberEx(); serverLayer.DatasetInfo = DatasetInfo.FromJson(json["datasetInfo"].GetObjectEx()); serverLayer.DisplayFilter = json["displayFilter"].GetStringEx(); if (json["joinItems"].ValueType != JsonValueType.Null) { List<JoinItem> joinItems = new List<JoinItem>(); foreach (JsonValue item in json["joinItems"].GetArray()) { joinItems.Add(JoinItem.FromJson(item.GetObjectEx())); } serverLayer.JoinItems = joinItems; } serverLayer.RepresentationField = json["representationField"].GetStringEx(); if (json["ugcLayerType"].GetStringEx() == SuperMapLayerType.GRID.ToString()) { UGCGridLayer ugcGridLayer = new UGCGridLayer(); List<Color> colors = new List<Color>(); foreach (JsonValue colorItem in json["colors"].GetArray()) { colors.Add(ServerColor.FromJson(colorItem.GetObjectEx()).ToColor()); } ugcGridLayer.Colors = colors; if (json["dashStyle"].ValueType != JsonValueType.Null) { ugcGridLayer.DashStyle = ServerStyle.FromJson(json["dashStyle"].GetObjectEx()); } if (json["gridType"].ValueType != JsonValueType.Null) { ugcGridLayer.GridType = (GridType)Enum.Parse(typeof(GridType), json["gridType"].GetStringEx(), true); } else { } ugcGridLayer.HorizontalSpacing = json["horizontalSpacing"].GetNumberEx(); ugcGridLayer.SizeFixed = json["sizeFixed"].GetBooleanEx(); if (json["solidStyle"].ValueType != JsonValueType.Null) { ugcGridLayer.SolidStyle = ServerStyle.FromJson(json["solidStyle"].GetObjectEx()); } if (json["specialColor"].ValueType != JsonValueType.Null) { ugcGridLayer.SpecialColor = ServerColor.FromJson(json["specialColor"].GetObjectEx()).ToColor(); } ugcGridLayer.SpecialValue = json["specialValue"].GetNumberEx(); ugcGridLayer.VerticalSpacing = json["verticalSpacing"].GetNumberEx(); serverLayer.UGCLayer = ugcGridLayer; } else if (json["ugcLayerType"].GetStringEx() == SuperMapLayerType.IMAGE.ToString()) { UGCImageLayer ugcImageLayer = new UGCImageLayer(); ugcImageLayer.Brightness = (int)json["brightness"].GetNumberEx(); if (json["colorSpaceType"].ValueType != JsonValueType.Null) { ugcImageLayer.ColorSpaceType = (ColorSpaceType)Enum.Parse(typeof(ColorSpaceType), json["colorSpaceType"].GetStringEx(), true); } else { } ugcImageLayer.Contrast = (int)json["contrast"].GetNumberEx(); List<int> bandIndexes = new List<int>(); if (json["displayBandIndexes"].ValueType != JsonValueType.Null && (json["displayBandIndexes"].GetArray()).Count > 0) { foreach (JsonObject item in json["displayBandIndexes"].GetArray()) { bandIndexes.Add((int)item.GetNumber()); } ugcImageLayer.DisplayBandIndexes = bandIndexes; } ugcImageLayer.Transparent = json["transparent"].GetBooleanEx(); ugcImageLayer.TransparentColor = ServerColor.FromJson(json["transparentColor"].GetObjectEx()).ToColor(); serverLayer.UGCLayer = ugcImageLayer; } else if (json["ugcLayerType"].GetStringEx() == SuperMapLayerType.THEME.ToString()) { UGCThemeLayer ugcThemeLayer = new UGCThemeLayer(); if (json["theme"].ValueType != JsonValueType.Null) { if (json["theme"].GetObjectEx()["type"].GetStringEx() == "UNIQUE") { ugcThemeLayer.Theme = ThemeUnique.FromJson(json["theme"].GetObjectEx()); } else if (json["theme"].GetObjectEx()["type"].GetStringEx() == "RANGE") { ugcThemeLayer.Theme = ThemeRange.FromJson(json["theme"].GetObjectEx()); } else if (json["theme"].GetObjectEx()["type"].GetStringEx() == "LABEL") { ugcThemeLayer.Theme = ThemeLabel.FromJson(json["theme"].GetObjectEx()); } else if (json["theme"].GetObjectEx()["type"].GetStringEx() == "GRAPH") { ugcThemeLayer.Theme = ThemeGraph.FromJson(json["theme"].GetObjectEx()); } else if (json["theme"].GetObjectEx()["type"].GetStringEx() == "DOTDENSITY") { ugcThemeLayer.Theme = ThemeDotDensity.FromJson(json["theme"].GetObjectEx()); } else if (json["theme"].GetObjectEx()["type"].GetStringEx() == "GRADUATEDSYMBOL") { ugcThemeLayer.Theme = ThemeGraduatedSymbol.FromJson(json["theme"].GetObjectEx()); } else { //以后有需求再添加,现在就写到这里,共六个专题图。 } } if (json["theme"].GetObjectEx()["type"].ValueType != JsonValueType.Null) { ugcThemeLayer.ThemeType = (ThemeType)Enum.Parse(typeof(ThemeType), json["theme"].GetObjectEx()["type"].GetStringEx(), true); } serverLayer.UGCLayer = ugcThemeLayer; //ugcThemeLayer.Theme } else if (json["ugcLayerType"].GetStringEx() == SuperMapLayerType.VECTOR.ToString() && json.ContainsKey("style")) { serverLayer.UGCLayer = UGCVectorLayer.FromJson(json["style"].GetObjectEx()); } else { serverLayer.UGCLayer = new UGCLayer(); } if (json["ugcLayerType"].ValueType != JsonValueType.Null) { serverLayer.UGCLayerType = (SuperMapLayerType)Enum.Parse(typeof(SuperMapLayerType), json["ugcLayerType"].GetStringEx(), true); } else { //不做处理 } //这里不判断WMS和WFS图层。 //else if (json["ugcLayerType"] == SuperMapLayerType.WMS.ToString()) //{ //} //根据图层类型增加相应属性。 return serverLayer; } } }
SuperMap/iClient-for-Win8
iClient60ForWinRT/SuperMap.WinRT.REST/Map/GetMapStatusAndLayerInfo/ServerLayer.cs
C#
apache-2.0
12,064
// /* // Copyright The Kubernetes Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // */ // // Code generated by MockGen. DO NOT EDIT. // Source: /go/src/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/interface.go // Package mockdiskclient is a generated GoMock package. package mockdiskclient import ( context "context" reflect "reflect" compute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-12-01/compute" gomock "github.com/golang/mock/gomock" retry "sigs.k8s.io/cloud-provider-azure/pkg/retry" ) // MockInterface is a mock of Interface interface. type MockInterface struct { ctrl *gomock.Controller recorder *MockInterfaceMockRecorder } // MockInterfaceMockRecorder is the mock recorder for MockInterface. type MockInterfaceMockRecorder struct { mock *MockInterface } // NewMockInterface creates a new mock instance. func NewMockInterface(ctrl *gomock.Controller) *MockInterface { mock := &MockInterface{ctrl: ctrl} mock.recorder = &MockInterfaceMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder { return m.recorder } // Get mocks base method. func (m *MockInterface) Get(ctx context.Context, resourceGroupName, diskName string) (compute.Disk, *retry.Error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, diskName) ret0, _ := ret[0].(compute.Disk) ret1, _ := ret[1].(*retry.Error) return ret0, ret1 } // Get indicates an expected call of Get. func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, diskName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, diskName) } // CreateOrUpdate mocks base method. func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, diskName string, diskParameter compute.Disk) *retry.Error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, diskName, diskParameter) ret0, _ := ret[0].(*retry.Error) return ret0 } // CreateOrUpdate indicates an expected call of CreateOrUpdate. func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, diskName, diskParameter interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, diskName, diskParameter) } // Update mocks base method. func (m *MockInterface) Update(ctx context.Context, resourceGroupName, diskName string, diskParameter compute.DiskUpdate) *retry.Error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Update", ctx, resourceGroupName, diskName, diskParameter) ret0, _ := ret[0].(*retry.Error) return ret0 } // Update indicates an expected call of Update. func (mr *MockInterfaceMockRecorder) Update(ctx, resourceGroupName, diskName, diskParameter interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockInterface)(nil).Update), ctx, resourceGroupName, diskName, diskParameter) } // Delete mocks base method. func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, diskName string) *retry.Error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, diskName) ret0, _ := ret[0].(*retry.Error) return ret0 } // Delete indicates an expected call of Delete. func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, diskName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, diskName) } // ListByResourceGroup mocks base method. func (m *MockInterface) ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListByResourceGroup", ctx, resourceGroupName) ret0, _ := ret[0].([]compute.Disk) ret1, _ := ret[1].(*retry.Error) return ret0, ret1 } // ListByResourceGroup indicates an expected call of ListByResourceGroup. func (mr *MockInterfaceMockRecorder) ListByResourceGroup(ctx, resourceGroupName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByResourceGroup", reflect.TypeOf((*MockInterface)(nil).ListByResourceGroup), ctx, resourceGroupName) }
kubernetes/autoscaler
cluster-autoscaler/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/mockdiskclient/interface.go
GO
apache-2.0
5,056
/* * Copyright 2014 Alexey Andreev. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.teavm.classlib.java.util; import org.teavm.classlib.java.io.TSerializable; import org.teavm.classlib.java.lang.TMath; import org.teavm.classlib.java.lang.TObject; import org.teavm.javascript.spi.GeneratedBy; /** * * @author Alexey Andreev */ public class TRandom extends TObject implements TSerializable { public TRandom() { } public TRandom(@SuppressWarnings("unused") long seed) { } public void setSeed(@SuppressWarnings("unused") long seed) { } protected int next(int bits) { return (int)(random() * (1L << TMath.min(32, bits))); } public void nextBytes(byte[] bytes) { for (int i = 0; i < bytes.length; ++i) { bytes[i] = (byte)next(8); } } public int nextInt() { return next(32); } public int nextInt(int n) { return (int)(random() * n); } public long nextLong() { return ((long)nextInt() << 32) | nextInt(); } public boolean nextBoolean() { return nextInt() % 2 == 0; } public float nextFloat() { return (float)random(); } public double nextDouble() { return random(); } @GeneratedBy(RandomNativeGenerator.class) private static native double random(); }
mpoindexter/teavm
teavm-classlib/src/main/java/org/teavm/classlib/java/util/TRandom.java
Java
apache-2.0
1,877
#include <iostream> #include "SParser.hpp" #pragma once namespace Silent { /*static class SymTablePrinter { public: static void Out(std::string str, uint64_t currentTab) { std::string tabs = ""; for (uint64_t i = 0; i < currentTab; i++) tabs += "\t"; std::cout << tabs << str << std::endl; } SymTablePrinter() { } static void PrintSymTable(SymbolTable* symTable) { PrintNode(symTable->self, 0); } static void PrintNode(TableNode node, uint64_t currentTab) { switch (node.nodeType) { case TableNode::Type::Program: { Program* p = (Program*)node.GetNode(); Out("Program", currentTab); currentTab++; for(TableNode node : p->table->GetItems()) PrintNode(node, currentTab); currentTab--; } break; case TableNode::Type::Namespace: { Namespace* n = (Namespace*)node.GetNode(); Out("Namespace " + n->GetId(), currentTab); currentTab++; for (TableNode node : n->GetTable()->GetItems()) PrintNode(node, currentTab); currentTab--; } break; case TableNode::Type::Subroutine: { Subroutine* s = (Subroutine*)node.GetNode(); Out("Subroutine " + s->GetId(), currentTab); currentTab++; for (TableNode node : s->GetTable()->GetItems()) PrintNode(node, currentTab); currentTab--; } break; case TableNode::Type::Variable: { Variable* v = (Variable*)node.GetNode(); Out("Variable " + v->GetId(), currentTab); } break; case TableNode::Type::Structure: { Type* t = (Type*)node.GetNode(); Out("Type " + t->GetId(), currentTab); currentTab++; for (TableNode node : t->GetTable()->GetItems()) PrintNode(node, currentTab); currentTab--; } break; } } };*/ }
EdwinRybarczyk/SilentProgrammingLanguage
SilentCompiler/include/SHelper.hpp
C++
apache-2.0
2,595
/* Copyright (c) The m-m-m Team, Licensed under the Apache License, Version 2.0 * http://www.apache.org/licenses/LICENSE-2.0 */ package net.sf.mmm.util.io.base; import net.sf.mmm.util.exception.api.NlsNullPointerException; /** * This class is similar to {@link java.nio.ByteBuffer} but a lot simpler. * * @see java.nio.ByteBuffer#wrap(byte[], int, int) * * @author Joerg Hohwiller (hohwille at users.sourceforge.net) * @since 1.1.0 */ public class ByteArrayImpl extends AbstractByteArray { private final byte[] buffer; private int minimumIndex; private int maximumIndex; /** * The constructor. * * @param capacity is the {@code length} of the internal {@link #getBytes() buffer}. */ public ByteArrayImpl(int capacity) { this(new byte[capacity], 0, -1); } /** * The constructor. * * @param buffer is the internal {@link #getBytes() buffer}. */ public ByteArrayImpl(byte[] buffer) { this(buffer, 0, buffer.length - 1); } /** * The constructor. * * @param buffer is the internal {@link #getBytes() buffer}. * @param startIndex is the {@link #getCurrentIndex() current index} as well as the {@link #getMinimumIndex() minimum * index}. * @param maximumIndex is the {@link #getMaximumIndex() maximum index}. */ public ByteArrayImpl(byte[] buffer, int startIndex, int maximumIndex) { super(); if (buffer == null) { throw new NlsNullPointerException("buffer"); } this.buffer = buffer; this.minimumIndex = startIndex; this.maximumIndex = maximumIndex; } @Override public byte[] getBytes() { return this.buffer; } @Override public int getCurrentIndex() { return this.minimumIndex; } @Override public int getMinimumIndex() { return this.minimumIndex; } @Override public int getMaximumIndex() { return this.maximumIndex; } /** * This method sets the {@link #getMaximumIndex() maximumIndex}. This may be useful if the buffer should be reused. * <br> * <b>ATTENTION:</b><br> * Be very careful and only use this method if you know what you are doing! * * @param maximumIndex is the {@link #getMaximumIndex() maximumIndex} to set. It has to be in the range from {@code 0} * ( <code>{@link #getCurrentIndex() currentIndex} - 1</code>) to <code>{@link #getBytes()}.length</code>. */ protected void setMaximumIndex(int maximumIndex) { this.maximumIndex = maximumIndex; } @Override public ByteArrayImpl createSubArray(int minimum, int maximum) { checkSubArray(minimum, maximum); return new ByteArrayImpl(this.buffer, minimum, maximum); } @Override public String toString() { return new String(this.buffer, this.minimumIndex, getBytesAvailable()); } }
m-m-m/util
io/src/main/java/net/sf/mmm/util/io/base/ByteArrayImpl.java
Java
apache-2.0
2,897
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespaclient; public class ClusterDef { private final String name; public ClusterDef(String name) { this.name = name; } public String getName() { return name; } public String getRoute() { return "[Content:cluster=" + name + "]"; } }
vespa-engine/vespa
vespaclient-core/src/main/java/com/yahoo/vespaclient/ClusterDef.java
Java
apache-2.0
372
/* * Copyright (C) 2018 the original author or authors. * * This file is part of jBB Application Project. * * Licensed under the Apache License, Version 2.0 (the "License"); * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 */ package org.jbb.security.rest.oauth.client; import io.swagger.annotations.ApiModel; import lombok.AccessLevel; import lombok.AllArgsConstructor; import lombok.Builder; import lombok.Getter; import lombok.NoArgsConstructor; import lombok.Setter; @Getter @Setter @Builder @ApiModel("OAuthClientSecret") @NoArgsConstructor(access = AccessLevel.PUBLIC) @AllArgsConstructor(access = AccessLevel.PRIVATE) public class ClientSecretDto { private String clientSecret; }
jbb-project/jbb
domain-rest/jbb-security-rest/src/main/java/org/jbb/security/rest/oauth/client/ClientSecretDto.java
Java
apache-2.0
751
import numpy as np class WordClusters(object): def __init__(self, vocab, clusters): self.vocab = vocab self.clusters = clusters def ix(self, word): """ Returns the index on self.vocab and self.clusters for 'word' """ temp = np.where(self.vocab == word)[0] if temp.size == 0: raise KeyError("Word not in vocabulary") else: return temp[0] def __getitem__(self, word): return self.get_cluster(word) def get_cluster(self, word): """ Returns the cluster number for a word in the vocabulary """ idx = self.ix(word) return self.clusters[idx] def get_words_on_cluster(self, cluster): return self.vocab[self.clusters == cluster] @classmethod def from_text(cls, fname): vocab = np.genfromtxt(fname, dtype=str, delimiter=" ", usecols=0) clusters = np.genfromtxt(fname, dtype=int, delimiter=" ", usecols=1) return cls(vocab=vocab, clusters=clusters)
danielfrg/word2vec
word2vec/wordclusters.py
Python
apache-2.0
1,041
package weixin.popular.bean.scan.crud; import weixin.popular.bean.scan.base.ProductGet; import weixin.popular.bean.scan.info.BrandInfo; public class ProductCreate extends ProductGet { private BrandInfo brand_info; public BrandInfo getBrand_info() { return brand_info; } public void setBrand_info(BrandInfo brand_info) { this.brand_info = brand_info; } }
liyiorg/weixin-popular
src/main/java/weixin/popular/bean/scan/crud/ProductCreate.java
Java
apache-2.0
395
package com.mattinsler.guiceymongo.data.query; import org.bson.BSON; /** * Created by IntelliJ IDEA. * User: mattinsler * Date: 12/29/10 * Time: 3:28 AM * To change this template use File | Settings | File Templates. */ public enum BSONType { Double(BSON.NUMBER), String(BSON.STRING), Object(BSON.OBJECT), Array(BSON.ARRAY), BinaryData(BSON.BINARY), ObjectId(BSON.OID), Boolean(BSON.BOOLEAN), Date(BSON.DATE), Null(BSON.NULL), RegularExpression(BSON.REGEX), Code(BSON.CODE), Symbol(BSON.SYMBOL), CodeWithScope(BSON.CODE_W_SCOPE), Integer(BSON.NUMBER_INT), Timestamp(BSON.TIMESTAMP), Long(BSON.NUMBER_LONG), MinKey(BSON.MINKEY), MaxKey(BSON.MAXKEY); private final byte _typeCode; BSONType(byte typeCode) { _typeCode = typeCode; } byte getTypeCode() { return _typeCode; } }
mattinsler/guiceymongo
src/main/java/com/mattinsler/guiceymongo/data/query/BSONType.java
Java
apache-2.0
891
/* * DBeaver - Universal Database Manager * Copyright (C) 2010-2017 Serge Rider (serge@jkiss.org) * Copyright (C) 2011-2012 Eugene Fradkin (eugene.fradkin@gmail.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.ext.oracle.views; import org.eclipse.swt.SWT; import org.eclipse.swt.layout.GridData; import org.eclipse.swt.widgets.*; import org.jkiss.dbeaver.core.DBeaverCore; import org.jkiss.dbeaver.ext.oracle.model.OracleConstants; import org.jkiss.dbeaver.model.preferences.DBPPreferenceStore; import org.jkiss.dbeaver.model.DBPDataSourceContainer; import org.jkiss.dbeaver.ui.UIUtils; import org.jkiss.dbeaver.ui.preferences.PreferenceStoreDelegate; import org.jkiss.dbeaver.ui.preferences.TargetPrefPage; import org.jkiss.dbeaver.utils.PrefUtils; /** * PrefPageOracle */ public class PrefPageOracle extends TargetPrefPage { public static final String PAGE_ID = "org.jkiss.dbeaver.preferences.oracle.general"; //$NON-NLS-1$ private Text explainTableText; private Button rowidSupportCheck; private Button enableDbmsOuputCheck; public PrefPageOracle() { super(); setPreferenceStore(new PreferenceStoreDelegate(DBeaverCore.getGlobalPreferenceStore())); } @Override protected boolean hasDataSourceSpecificOptions(DBPDataSourceContainer dataSourceDescriptor) { DBPPreferenceStore store = dataSourceDescriptor.getPreferenceStore(); return store.contains(OracleConstants.PREF_EXPLAIN_TABLE_NAME) || store.contains(OracleConstants.PREF_SUPPORT_ROWID) || store.contains(OracleConstants.PREF_DBMS_OUTPUT) ; } @Override protected boolean supportsDataSourceSpecificOptions() { return true; } @Override protected Control createPreferenceContent(Composite parent) { Composite composite = UIUtils.createPlaceholder(parent, 1); { Group planGroup = UIUtils.createControlGroup(composite, "Execution plan", 2, GridData.FILL_HORIZONTAL, 0); Label descLabel = new Label(planGroup, SWT.WRAP); descLabel.setText("By default plan table in current or SYS schema will be used.\nYou may set some particular fully qualified plan table name here."); GridData gd = new GridData(GridData.HORIZONTAL_ALIGN_BEGINNING); gd.horizontalSpan = 2; descLabel.setLayoutData(gd); explainTableText = UIUtils.createLabelText(planGroup, "Plan table", "", SWT.BORDER, new GridData(GridData.FILL_HORIZONTAL)); } { Group planGroup = UIUtils.createControlGroup(composite, "Misc", 2, GridData.FILL_HORIZONTAL, 0); rowidSupportCheck = UIUtils.createLabelCheckbox(planGroup, "Use ROWID to identify rows", true); enableDbmsOuputCheck = UIUtils.createLabelCheckbox(planGroup, "Enable DBMS Output", true); } return composite; } @Override protected void loadPreferences(DBPPreferenceStore store) { explainTableText.setText(store.getString(OracleConstants.PREF_EXPLAIN_TABLE_NAME)); rowidSupportCheck.setSelection(store.getBoolean(OracleConstants.PREF_SUPPORT_ROWID)); enableDbmsOuputCheck.setSelection(store.getBoolean(OracleConstants.PREF_DBMS_OUTPUT)); } @Override protected void savePreferences(DBPPreferenceStore store) { store.setValue(OracleConstants.PREF_EXPLAIN_TABLE_NAME, explainTableText.getText()); store.setValue(OracleConstants.PREF_SUPPORT_ROWID, rowidSupportCheck.getSelection()); store.setValue(OracleConstants.PREF_DBMS_OUTPUT, enableDbmsOuputCheck.getSelection()); PrefUtils.savePreferenceStore(store); } @Override protected void clearPreferences(DBPPreferenceStore store) { store.setToDefault(OracleConstants.PREF_EXPLAIN_TABLE_NAME); store.setToDefault(OracleConstants.PREF_SUPPORT_ROWID); store.setToDefault(OracleConstants.PREF_DBMS_OUTPUT); } @Override protected String getPropertyPageID() { return PAGE_ID; } }
ruspl-afed/dbeaver
plugins/org.jkiss.dbeaver.ext.oracle/src/org/jkiss/dbeaver/ext/oracle/views/PrefPageOracle.java
Java
apache-2.0
4,753
package com.winsun.fruitmix.model; /** * Created by Administrator on 2016/7/6. */ public class Equipment { private String serviceName; private String host; private int port; public Equipment(String serviceName, String host, int port) { this.serviceName = serviceName; this.host = host; this.port = port; } public Equipment() { } public String getServiceName() { return serviceName; } public void setServiceName(String serviceName) { this.serviceName = serviceName; } public String getHost() { return host; } public void setHost(String host) { this.host = host; } public int getPort() { return port; } public void setPort(int port) { this.port = port; } }
andywu91/fruitMix-android
app/src/main/java/com/winsun/fruitmix/model/Equipment.java
Java
apache-2.0
815
// Copyright 2015 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package types import ( "math" "time" "github.com/pingcap/tidb/util/collate" ) // CompareInt64 returns an integer comparing the int64 x to y. func CompareInt64(x, y int64) int { if x < y { return -1 } else if x == y { return 0 } return 1 } // CompareUint64 returns an integer comparing the uint64 x to y. func CompareUint64(x, y uint64) int { if x < y { return -1 } else if x == y { return 0 } return 1 } //VecCompareUU returns []int64 comparing the []uint64 x to []uint64 y func VecCompareUU(x, y []uint64, res []int64) { n := len(x) for i := 0; i < n; i++ { if x[i] < y[i] { res[i] = -1 } else if x[i] == y[i] { res[i] = 0 } else { res[i] = 1 } } } //VecCompareII returns []int64 comparing the []int64 x to []int64 y func VecCompareII(x, y, res []int64) { n := len(x) for i := 0; i < n; i++ { if x[i] < y[i] { res[i] = -1 } else if x[i] == y[i] { res[i] = 0 } else { res[i] = 1 } } } //VecCompareUI returns []int64 comparing the []uint64 x to []int64y func VecCompareUI(x []uint64, y, res []int64) { n := len(x) for i := 0; i < n; i++ { if y[i] < 0 || x[i] > math.MaxInt64 { res[i] = 1 } else if int64(x[i]) < y[i] { res[i] = -1 } else if int64(x[i]) == y[i] { res[i] = 0 } else { res[i] = 1 } } } //VecCompareIU returns []int64 comparing the []int64 x to []uint64y func VecCompareIU(x []int64, y []uint64, res []int64) { n := len(x) for i := 0; i < n; i++ { if x[i] < 0 || uint64(y[i]) > math.MaxInt64 { res[i] = -1 } else if x[i] < int64(y[i]) { res[i] = -1 } else if x[i] == int64(y[i]) { res[i] = 0 } else { res[i] = 1 } } } // CompareFloat64 returns an integer comparing the float64 x to y. func CompareFloat64(x, y float64) int { if x < y { return -1 } else if x == y { return 0 } return 1 } // CompareString returns an integer comparing the string x to y with the specified collation and length. func CompareString(x, y, collation string, length int) int { return collate.GetCollator(collation).Compare(x, y, collate.NewCollatorOption(length)) } // CompareDuration returns an integer comparing the duration x to y. func CompareDuration(x, y time.Duration) int { if x < y { return -1 } else if x == y { return 0 } return 1 }
Cofyc/tidb
types/compare.go
GO
apache-2.0
2,788
package com.wangshan.service.impl; import com.wangshan.dao.UserDao; import com.wangshan.models.User; import com.wangshan.service.ValidateService; import com.wangshan.utils.gabriel.EncryptUtil; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; /** * Created by Administrator on 2015/11/15. */ @Service public class ValidateServiceImpl implements ValidateService{ @Autowired private UserDao userDao; @Override public Boolean validatePassword(String email, String password){ User user = userDao.getUserByEmail(email); if(user != null && new EncryptUtil().encrypt(password + "-" + user.getSalt(), "SHA-1").equals(user.getPassword())){ return true; } else { return false; } } @Override public Boolean validateMobileRepeat(String mobile){ return false; } @Override public Boolean validateEmailRepeat(String email){ return false; } }
sanyiwangshan/my_space
backend/src/main/java/com/wangshan/service/impl/ValidateServiceImpl.java
Java
apache-2.0
1,047
<?php /** * Created by PhpStorm. * User: Mohammad Eslahi Sani * Date: 04/10/1394 * Time: 9:06 PM */ //dl('php_pdo_sqlsrv_55_ts.dll'); // phpinfo(); if(isset($_SESSION['login'])){ } elseif(isset($_POST['username']) && isset($_POST['password'])){ $u = $_POST['username']; $p = $_POST['password']; // exec("echo username and password are: $u --- $p >> debug.txt"); $serverName = "MMDES"; //serverName\instanceName // Since UID and PWD are not specified in the $connectionInfo array, // The connection will be attempted using Windows Authentication. $connectionInfo = array( "Database"=>"officeAutomation"); $conn = sqlsrv_connect( $serverName, $connectionInfo); if( $conn ) { // echo "Connection established.<br />"; }else{ // echo "Connection could not be established.<br />"; // die( print_r( sqlsrv_errors(), true)); exec("echo connection was not established >> debug.txt"); } $query = ""; $query = "SELECT * FROM sysUser WHERE Username='".$u . "'"; $result = sqlsrv_query( $conn , $query); if (!$result) die( print_r( sqlsrv_errors(), true)); $row = sqlsrv_fetch_array($result); if( $row['Password'] == $p ){ $query2 = "SELECT firstName,lastName,Gender FROM Person JOIN Employee on Person.NationalID=Employee.NationalID WHERE PersonalID='".$row['PersonalID'] . "'"; $result2 = sqlsrv_query( $conn , $query2); if (!$result2) die( print_r( sqlsrv_errors(), true)); $row2 = sqlsrv_fetch_array($result2); // print_r($row2); $tempAry=array('username'=>$row['Username'],'role'=>$row['Role'],'personalId'=>$row['PersonalID'], 'firstName'=>$row2['firstName'],'lastName'=>$row2['lastName'],'gender'=>$row2['Gender']); $_SESSION['login'] = $tempAry; header('location: '); // print_r($_SESSION); } else{ header('location: ?invalid'); die(); } } elseif (isset($_GET['invalid'])){ ?> <body> <div class="container sign-in-container"> <p class="invalid-text">Invalid username or password,<br> Try again!</p> <form method="post" class="form-signin login-form"> <h2 class="form-signin-heading">Please sign in</h2> <label for="inputEmail" class="sr-only">Username</label> <input name="username" type="text" id="inputEmail" class="username-input form-control" placeholder="Username" required autofocus> <label for="inputPassword" class="password-input sr-only">Password</label> <input name="password" type="password" id="inputPassword" class="form-control" placeholder="Password" required> <button class="submit-button btn btn-lg btn-primary btn-block" type="submit">Sign in</button> </form> </div> <!-- /container --> </body> </html> <?php } else{ ?> <body> <div class="container sign-in-container"> <form method="post" class="form-signin login-form"> <h2 class="form-signin-heading">Please sign in</h2> <label for="inputEmail" class="sr-only">Username</label> <input name="username" type="text" id="inputEmail" class="username-input form-control" placeholder="Username" required autofocus> <label for="inputPassword" class="password-input sr-only">Password</label> <input name="password" type="password" id="inputPassword" class="form-control" placeholder="Password" required> <button class="submit-button btn btn-lg btn-primary btn-block" type="submit">Sign in</button> </form> </div> <!-- /container --> </body> </html> <?php } ?>
mrhsce/officeAutomation
login.php
PHP
apache-2.0
3,700
/* ### * IP: GHIDRA * REVIEWED: YES * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ghidra.util.prop; import ghidra.util.*; import java.io.*; /** * Handles general storage and retrieval of saveable objects indexed by long * keys. * */ public class SaveableObjectPropertySet extends PropertySet { private final static long serialVersionUID = 1; /** * Constructor for SaveableObjectPropertySet. * @param name the name associated with this property set. */ public SaveableObjectPropertySet(String name, Class<?> objectClass) { super(name, objectClass); if (!Saveable.class.isAssignableFrom(objectClass)) { throw new IllegalArgumentException("Class "+objectClass+ "does not implement the Saveable interface"); } try { objectClass.newInstance(); } catch(Exception e) { throw new IllegalArgumentException("Class "+objectClass+ "must be public and have a public, no args, constructor"); } } /** * @see PropertySet#getDataSize() */ @Override public int getDataSize() { return 20; } /** * Stores a saveable object at the given index. Any object currently at * that index will be replaced by the new object. * @param index the index at which to store the saveable object. * @param value the saveable object to store. */ public void putObject(long index, Saveable value) { PropertyPage page = getOrCreatePage(getPageID(index)); int n = page.getSize(); page.addSaveableObject(getPageOffset(index), value); numProperties += page.getSize() - n; } /** * Retrieves the saveable object stored at the given index. * @param index the index at which to retrieve the saveable object. * @return the saveable object stored at the given index or null if no * object is stored at the index. */ public Saveable getObject(long index) { PropertyPage page = getPage(getPageID(index)); if (page != null) { return page.getSaveableObject(getPageOffset(index)); } return null; } /* (non-Javadoc) * @see ghidra.util.prop.PropertySet#moveIndex(long, long) */ @Override protected void moveIndex(long from, long to) { Saveable value = getObject(from); remove(from); putObject(to, value); } /** * saves the property at the given index to the given output stream. */ @Override protected void saveProperty(ObjectOutputStream oos, long index) throws IOException { Saveable obj = getObject(index); oos.writeObject(obj.getClass().getName()); obj.save(new ObjectStorageStreamAdapter(oos)); } /** * restores the property from the input stream to the given index. */ @Override protected void restoreProperty(ObjectInputStream ois, long index) throws IOException, ClassNotFoundException { try { String className = (String)ois.readObject(); Class<?> c = Class.forName(className); Saveable obj = (Saveable)c.newInstance(); obj.restore(new ObjectStorageStreamAdapter(ois)); putObject(index, obj); } catch (Exception e) { Msg.showError(this, null, null, null, e); } } /** * * @see ghidra.util.prop.PropertySet#applyValue(PropertyVisitor, long) */ @Override public void applyValue(PropertyVisitor visitor, long addr) { Saveable obj = getObject(addr); if (obj != null) { visitor.visit(obj); } } }
NationalSecurityAgency/ghidra
Ghidra/Framework/Generic/src/main/java/ghidra/util/prop/SaveableObjectPropertySet.java
Java
apache-2.0
3,858
using System; using System.Collections.Generic; using System.Linq; using System.Net; using System.Text.RegularExpressions; using HtmlAgilityPack; namespace Html2Markdown.Replacement { internal static class HtmlParser { private static readonly Regex NoChildren = new Regex(@"<(ul|ol)\b[^>]*>(?:(?!<ul|<ol)[\s\S])*?<\/\1>"); internal static string ReplaceLists(string html) { var finalHtml = html; while (HasNoChildLists(finalHtml)) { var listToReplace = NoChildren.Match(finalHtml).Value; var formattedList = ReplaceList(listToReplace); finalHtml = finalHtml.Replace(listToReplace, formattedList); } return finalHtml; } private static string ReplaceList(string html) { var list = Regex.Match(html, @"<(ul|ol)\b[^>]*>([\s\S]*?)<\/\1>"); var listType = list.Groups[1].Value; var listItems = Regex.Split(list.Groups[2].Value, "<li[^>]*>"); if(listItems.All(string.IsNullOrEmpty)) { return String.Empty; } listItems = listItems.Skip(1).ToArray(); var counter = 0; var markdownList = new List<string>(); listItems.ToList().ForEach(listItem => { var listPrefix = (listType.Equals("ol")) ? $"{++counter}. " : "* "; var finalList = listItem.Replace(@"</li>", string.Empty); if (finalList.Trim().Length == 0) { return; } finalList = Regex.Replace(finalList, @"^\s+", string.Empty); finalList = Regex.Replace(finalList, @"\n{2}", $"{Environment.NewLine}{Environment.NewLine} "); // indent nested lists finalList = Regex.Replace(finalList, @"\n([ ]*)+(\*|\d+\.)", "\n$1 $2"); markdownList.Add($"{listPrefix}{finalList}"); }); return Environment.NewLine + Environment.NewLine + markdownList.Aggregate((current, item) => current + Environment.NewLine + item); } private static bool HasNoChildLists(string html) { return NoChildren.Match(html).Success; } internal static string ReplacePre(string html) { var doc = GetHtmlDocument(html); var nodes = doc.DocumentNode.SelectNodes("//pre"); if (nodes == null) { return html; } nodes.ToList().ForEach(node => { var tagContents = node.InnerHtml; var markdown = ConvertPre(tagContents); ReplaceNode(node, markdown); }); return doc.DocumentNode.OuterHtml; } private static string ConvertPre(string html) { var tag = TabsToSpaces(html); tag = IndentNewLines(tag); return Environment.NewLine + Environment.NewLine + tag + Environment.NewLine; } private static string IndentNewLines(string tag) { return tag.Replace(Environment.NewLine, Environment.NewLine + " "); } private static string TabsToSpaces(string tag) { return tag.Replace("\t", " "); } internal static string ReplaceImg(string html) { var doc = GetHtmlDocument(html); var nodes = doc.DocumentNode.SelectNodes("//img"); if (nodes == null) { return html; } nodes.ToList().ForEach(node => { var src = node.Attributes.GetAttributeOrEmpty("src"); var alt = node.Attributes.GetAttributeOrEmpty("alt"); var title = node.Attributes.GetAttributeOrEmpty("title"); var markdown = $@"![{alt}]({src}{((title.Length > 0) ? $" \"{title}\"" : "")})"; ReplaceNode(node, markdown); }); return doc.DocumentNode.OuterHtml; } public static string ReplaceAnchor(string html) { var doc = GetHtmlDocument(html); var nodes = doc.DocumentNode.SelectNodes("//a"); if (nodes == null) { return html; } nodes.ToList().ForEach(node => { var linkText = node.InnerHtml; var href = node.Attributes.GetAttributeOrEmpty("href"); var title = node.Attributes.GetAttributeOrEmpty("title"); var markdown = ""; if (!IsEmptyLink(linkText, href)) { markdown = $@"[{linkText}]({href}{((title.Length > 0) ? $" \"{title}\"" : "")})"; } ReplaceNode(node, markdown); }); return doc.DocumentNode.OuterHtml; } public static string ReplaceCode(string html) { var finalHtml = html; var doc = GetHtmlDocument(finalHtml); var nodes = doc.DocumentNode.SelectNodes("//code"); if (nodes == null) { return finalHtml; } nodes.ToList().ForEach(node => { var code = node.InnerHtml; string markdown; if(IsSingleLineCodeBlock(code)) { markdown = "`" + code + "`"; } else { markdown = ReplaceBreakTagsWithNewLines(code); markdown = Regex.Replace(markdown, "^\r\n", ""); markdown = Regex.Replace(markdown, "\r\n$", ""); markdown = "```" + Environment.NewLine + markdown + Environment.NewLine + "```"; } ReplaceNode(node, markdown); }); return doc.DocumentNode.OuterHtml; } private static string ReplaceBreakTagsWithNewLines(string code) { return Regex.Replace(code, "<\\s*?/?\\s*?br\\s*?>", ""); } private static bool IsSingleLineCodeBlock(string code) { // single line code blocks do not have new line characters return code.IndexOf(Environment.NewLine, StringComparison.Ordinal) == -1; } public static string ReplaceBlockquote(string html) { var doc = GetHtmlDocument(html); var nodes = doc.DocumentNode.SelectNodes("//blockquote"); if (nodes == null) { return html; } nodes.ToList().ForEach(node => { var quote = node.InnerHtml; var lines = quote.TrimStart().Split(new[] { Environment.NewLine }, StringSplitOptions.None); var markdown = ""; lines.ToList().ForEach(line => { markdown += $"> {line.TrimEnd()}{Environment.NewLine}"; }); markdown = Regex.Replace(markdown, @"(>\s\r\n)+$", ""); markdown = Environment.NewLine + Environment.NewLine + markdown + Environment.NewLine + Environment.NewLine; ReplaceNode(node, markdown); }); return doc.DocumentNode.OuterHtml; } public static string ReplaceEntites(string html) { return WebUtility.HtmlDecode(html); } public static string ReplaceParagraph(string html) { var doc = GetHtmlDocument(html); var nodes = doc.DocumentNode.SelectNodes("//p"); if (nodes == null) { return html; } nodes.ToList().ForEach(node => { var text = node.InnerHtml; var markdown = Regex.Replace(text, @"\s+", " "); markdown = markdown.Replace(Environment.NewLine, " "); markdown = Environment.NewLine + Environment.NewLine + markdown + Environment.NewLine; ReplaceNode(node, markdown); }); return doc.DocumentNode.OuterHtml; } private static bool IsEmptyLink(string linkText, string href) { var length = linkText.Length + href.Length; return length == 0; } private static HtmlDocument GetHtmlDocument(string html) { var doc = new HtmlDocument(); doc.LoadHtml(html); return doc; } private static void ReplaceNode(HtmlNode node, string markdown) { if (string.IsNullOrEmpty(markdown)) { node.ParentNode.RemoveChild(node); } else { node.ReplaceNodeWithString(markdown); } } } }
baynezy/Html2Markdown
src/Html2Markdown/Replacement/HtmlParser.cs
C#
apache-2.0
7,024
"""api_server URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.10/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url, include from django.contrib import admin version = 'v1.0' urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'api/%s/' % version, include('apis.urls')) ]
AutohomeOps/Assets_Report
api_server/api_server/urls.py
Python
apache-2.0
846
// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package coniks provides hashing for maps. package coniks import ( "bytes" "crypto" "encoding/binary" "fmt" "github.com/golang/glog" "github.com/google/trillian" "github.com/google/trillian/merkle/hashers" ) func init() { hashers.RegisterMapHasher(trillian.HashStrategy_CONIKS_SHA512_256, Default) hashers.RegisterMapHasher(trillian.HashStrategy_CONIKS_SHA256, New(crypto.SHA256)) } // Domain separation prefixes var ( leafIdentifier = []byte("L") emptyIdentifier = []byte("E") // Default is the standard CONIKS hasher. Default = New(crypto.SHA512_256) // Some zeroes, to avoid allocating temporary slices. zeroes = make([]byte, 32) ) // hasher implements the sparse merkle tree hashing algorithm specified in the CONIKS paper. type hasher struct { crypto.Hash } // New creates a new hashers.TreeHasher using the passed in hash function. func New(h crypto.Hash) hashers.MapHasher { return &hasher{Hash: h} } // EmptyRoot returns the root of an empty tree. func (m *hasher) EmptyRoot() []byte { panic("EmptyRoot() not defined for coniks.Hasher") } // HashEmpty returns the hash of an empty branch at a given height. // A height of 0 indicates the hash of an empty leaf. // Empty branches within the tree are plain interior nodes e1 = H(e0, e0) etc. func (m *hasher) HashEmpty(treeID int64, index []byte, height int) []byte { depth := m.BitLen() - height buf := bytes.NewBuffer(make([]byte, 0, 32)) h := m.New() buf.Write(emptyIdentifier) binary.Write(buf, binary.BigEndian, uint64(treeID)) m.writeMaskedIndex(buf, index, depth) binary.Write(buf, binary.BigEndian, uint32(depth)) h.Write(buf.Bytes()) r := h.Sum(nil) if glog.V(5) { glog.Infof("HashEmpty(%x, %d): %x", index, depth, r) } return r } // HashLeaf calculate the merkle tree leaf value: // H(Identifier || treeID || depth || index || dataHash) func (m *hasher) HashLeaf(treeID int64, index []byte, leaf []byte) []byte { depth := m.BitLen() buf := bytes.NewBuffer(make([]byte, 0, 32+len(leaf))) h := m.New() buf.Write(leafIdentifier) binary.Write(buf, binary.BigEndian, uint64(treeID)) m.writeMaskedIndex(buf, index, depth) binary.Write(buf, binary.BigEndian, uint32(depth)) buf.Write(leaf) h.Write(buf.Bytes()) p := h.Sum(nil) if glog.V(5) { glog.Infof("HashLeaf(%x, %d, %s): %x", index, depth, leaf, p) } return p } // HashChildren returns the internal Merkle tree node hash of the the two child nodes l and r. // The hashed structure is H(l || r). func (m *hasher) HashChildren(l, r []byte) []byte { buf := bytes.NewBuffer(make([]byte, 0, 32+len(l)+len(r))) h := m.New() buf.Write(l) buf.Write(r) h.Write(buf.Bytes()) p := h.Sum(nil) if glog.V(5) { glog.Infof("HashChildren(%x, %x): %x", l, r, p) } return p } // BitLen returns the number of bits in the hash function. func (m *hasher) BitLen() int { return m.Size() * 8 } // leftmask contains bitmasks indexed such that the left x bits are set. It is // indexed by byte position from 0-7 0 is special cased to 0xFF since 8 mod 8 // is 0. leftmask is only used to mask the last byte. var leftmask = [8]byte{0xFF, 0x80, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC, 0xFE} // writeMaskedIndex writes the left depth bits of index directly to a Buffer (which never // returns an error on writes). This is then padded with zero bits to the Size() // of the index values in use by this hashes. This avoids the need to allocate // space for and copy a value that will then be discarded immediately. func (m *hasher) writeMaskedIndex(b *bytes.Buffer, index []byte, depth int) { if got, want := len(index), m.Size(); got != want { panic(fmt.Sprintf("index len: %d, want %d", got, want)) } if got, want := depth, m.BitLen(); got < 0 || got > want { panic(fmt.Sprintf("depth: %d, want <= %d && >= 0", got, want)) } prevLen := b.Len() if depth > 0 { // Write the first depthBytes, if there are any complete bytes. depthBytes := depth >> 3 if depthBytes > 0 { b.Write(index[:depthBytes]) } // Mask off unwanted bits in the last byte, if there is an incomplete one. if depth%8 != 0 { b.WriteByte(index[depthBytes] & leftmask[depth%8]) } } // Pad to the correct length with zeros. Allow for future hashers that // might be > 256 bits. needZeros := prevLen + len(index) - b.Len() for needZeros > 0 { chunkSize := needZeros if chunkSize > 32 { chunkSize = 32 } b.Write(zeroes[:chunkSize]) needZeros -= chunkSize } }
Martin2112/trillian
merkle/coniks/coniks.go
GO
apache-2.0
5,031
/* * Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /* * Do not modify this file. This file is generated from the waf-2015-08-24.normal.json service model. */ using System; using System.Collections.Generic; using System.Globalization; using System.IO; using System.Net; using System.Text; using System.Xml.Serialization; using Amazon.WAF.Model; using Amazon.Runtime; using Amazon.Runtime.Internal; using Amazon.Runtime.Internal.Transform; using Amazon.Runtime.Internal.Util; using ThirdParty.Json.LitJson; namespace Amazon.WAF.Model.Internal.MarshallTransformations { /// <summary> /// Response Unmarshaller for RuleSummary Object /// </summary> public class RuleSummaryUnmarshaller : IUnmarshaller<RuleSummary, XmlUnmarshallerContext>, IUnmarshaller<RuleSummary, JsonUnmarshallerContext> { /// <summary> /// Unmarshaller the response from the service to the response class. /// </summary> /// <param name="context"></param> /// <returns></returns> RuleSummary IUnmarshaller<RuleSummary, XmlUnmarshallerContext>.Unmarshall(XmlUnmarshallerContext context) { throw new NotImplementedException(); } /// <summary> /// Unmarshaller the response from the service to the response class. /// </summary> /// <param name="context"></param> /// <returns></returns> public RuleSummary Unmarshall(JsonUnmarshallerContext context) { context.Read(); if (context.CurrentTokenType == JsonToken.Null) return null; RuleSummary unmarshalledObject = new RuleSummary(); int targetDepth = context.CurrentDepth; while (context.ReadAtDepth(targetDepth)) { if (context.TestExpression("Name", targetDepth)) { var unmarshaller = StringUnmarshaller.Instance; unmarshalledObject.Name = unmarshaller.Unmarshall(context); continue; } if (context.TestExpression("RuleId", targetDepth)) { var unmarshaller = StringUnmarshaller.Instance; unmarshalledObject.RuleId = unmarshaller.Unmarshall(context); continue; } } return unmarshalledObject; } private static RuleSummaryUnmarshaller _instance = new RuleSummaryUnmarshaller(); /// <summary> /// Gets the singleton. /// </summary> public static RuleSummaryUnmarshaller Instance { get { return _instance; } } } }
rafd123/aws-sdk-net
sdk/src/Services/WAF/Generated/Model/Internal/MarshallTransformations/RuleSummaryUnmarshaller.cs
C#
apache-2.0
3,306
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package endpoints import ( "fmt" "net/http" gpath "path" "reflect" "sort" "strings" "time" "unicode" restful "github.com/emicklei/go-restful" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/endpoints/discovery" "k8s.io/apiserver/pkg/endpoints/handlers" "k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager" "k8s.io/apiserver/pkg/endpoints/handlers/negotiation" "k8s.io/apiserver/pkg/endpoints/metrics" "k8s.io/apiserver/pkg/features" "k8s.io/apiserver/pkg/registry/rest" genericfilters "k8s.io/apiserver/pkg/server/filters" utilfeature "k8s.io/apiserver/pkg/util/feature" ) const ( ROUTE_META_GVK = "x-kubernetes-group-version-kind" ROUTE_META_ACTION = "x-kubernetes-action" ) type APIInstaller struct { group *APIGroupVersion prefix string // Path prefix where API resources are to be registered. minRequestTimeout time.Duration enableAPIResponseCompression bool } // Struct capturing information about an action ("GET", "POST", "WATCH", "PROXY", etc). type action struct { Verb string // Verb identifying the action ("GET", "POST", "WATCH", "PROXY", etc). Path string // The path of the action Params []*restful.Parameter // List of parameters associated with the action. Namer handlers.ScopeNamer AllNamespaces bool // true iff the action is namespaced but works on aggregate result for all namespaces } // An interface to see if one storage supports override its default verb for monitoring type StorageMetricsOverride interface { // OverrideMetricsVerb gives a storage object an opportunity to override the verb reported to the metrics endpoint OverrideMetricsVerb(oldVerb string) (newVerb string) } // An interface to see if an object supports swagger documentation as a method type documentable interface { SwaggerDoc() map[string]string } // toDiscoveryKubeVerb maps an action.Verb to the logical kube verb, used for discovery var toDiscoveryKubeVerb = map[string]string{ "CONNECT": "", // do not list in discovery. "DELETE": "delete", "DELETECOLLECTION": "deletecollection", "GET": "get", "LIST": "list", "PATCH": "patch", "POST": "create", "PROXY": "proxy", "PUT": "update", "WATCH": "watch", "WATCHLIST": "watch", } // Install handlers for API resources. func (a *APIInstaller) Install() ([]metav1.APIResource, *restful.WebService, []error) { var apiResources []metav1.APIResource var errors []error ws := a.newWebService() // Register the paths in a deterministic (sorted) order to get a deterministic swagger spec. paths := make([]string, len(a.group.Storage)) var i int = 0 for path := range a.group.Storage { paths[i] = path i++ } sort.Strings(paths) for _, path := range paths { apiResource, err := a.registerResourceHandlers(path, a.group.Storage[path], ws) if err != nil { errors = append(errors, fmt.Errorf("error in registering resource: %s, %v", path, err)) } if apiResource != nil { apiResources = append(apiResources, *apiResource) } } return apiResources, ws, errors } // newWebService creates a new restful webservice with the api installer's prefix and version. func (a *APIInstaller) newWebService() *restful.WebService { ws := new(restful.WebService) ws.Path(a.prefix) // a.prefix contains "prefix/group/version" ws.Doc("API at " + a.prefix) // Backwards compatibility, we accepted objects with empty content-type at V1. // If we stop using go-restful, we can default empty content-type to application/json on an // endpoint by endpoint basis ws.Consumes("*/*") mediaTypes, streamMediaTypes := negotiation.MediaTypesForSerializer(a.group.Serializer) ws.Produces(append(mediaTypes, streamMediaTypes...)...) ws.ApiVersion(a.group.GroupVersion.String()) return ws } // calculate the storage gvk, the gvk objects are converted to before persisted to the etcd. func getStorageVersionKind(storageVersioner runtime.GroupVersioner, storage rest.Storage, typer runtime.ObjectTyper) (schema.GroupVersionKind, error) { object := storage.New() fqKinds, _, err := typer.ObjectKinds(object) if err != nil { return schema.GroupVersionKind{}, err } gvk, ok := storageVersioner.KindForGroupVersionKinds(fqKinds) if !ok { return schema.GroupVersionKind{}, fmt.Errorf("cannot find the storage version kind for %v", reflect.TypeOf(object)) } return gvk, nil } // GetResourceKind returns the external group version kind registered for the given storage // object. If the storage object is a subresource and has an override supplied for it, it returns // the group version kind supplied in the override. func GetResourceKind(groupVersion schema.GroupVersion, storage rest.Storage, typer runtime.ObjectTyper) (schema.GroupVersionKind, error) { // Let the storage tell us exactly what GVK it has if gvkProvider, ok := storage.(rest.GroupVersionKindProvider); ok { return gvkProvider.GroupVersionKind(groupVersion), nil } object := storage.New() fqKinds, _, err := typer.ObjectKinds(object) if err != nil { return schema.GroupVersionKind{}, err } // a given go type can have multiple potential fully qualified kinds. Find the one that corresponds with the group // we're trying to register here fqKindToRegister := schema.GroupVersionKind{} for _, fqKind := range fqKinds { if fqKind.Group == groupVersion.Group { fqKindToRegister = groupVersion.WithKind(fqKind.Kind) break } } if fqKindToRegister.Empty() { return schema.GroupVersionKind{}, fmt.Errorf("unable to locate fully qualified kind for %v: found %v when registering for %v", reflect.TypeOf(object), fqKinds, groupVersion) } // group is guaranteed to match based on the check above return fqKindToRegister, nil } func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storage, ws *restful.WebService) (*metav1.APIResource, error) { admit := a.group.Admit optionsExternalVersion := a.group.GroupVersion if a.group.OptionsExternalVersion != nil { optionsExternalVersion = *a.group.OptionsExternalVersion } resource, subresource, err := splitSubresource(path) if err != nil { return nil, err } group, version := a.group.GroupVersion.Group, a.group.GroupVersion.Version fqKindToRegister, err := GetResourceKind(a.group.GroupVersion, storage, a.group.Typer) if err != nil { return nil, err } versionedPtr, err := a.group.Creater.New(fqKindToRegister) if err != nil { return nil, err } defaultVersionedObject := indirectArbitraryPointer(versionedPtr) kind := fqKindToRegister.Kind isSubresource := len(subresource) > 0 // If there is a subresource, namespace scoping is defined by the parent resource namespaceScoped := true if isSubresource { parentStorage, ok := a.group.Storage[resource] if !ok { return nil, fmt.Errorf("missing parent storage: %q", resource) } scoper, ok := parentStorage.(rest.Scoper) if !ok { return nil, fmt.Errorf("%q must implement scoper", resource) } namespaceScoped = scoper.NamespaceScoped() } else { scoper, ok := storage.(rest.Scoper) if !ok { return nil, fmt.Errorf("%q must implement scoper", resource) } namespaceScoped = scoper.NamespaceScoped() } // what verbs are supported by the storage, used to know what verbs we support per path creater, isCreater := storage.(rest.Creater) namedCreater, isNamedCreater := storage.(rest.NamedCreater) lister, isLister := storage.(rest.Lister) getter, isGetter := storage.(rest.Getter) getterWithOptions, isGetterWithOptions := storage.(rest.GetterWithOptions) gracefulDeleter, isGracefulDeleter := storage.(rest.GracefulDeleter) collectionDeleter, isCollectionDeleter := storage.(rest.CollectionDeleter) updater, isUpdater := storage.(rest.Updater) patcher, isPatcher := storage.(rest.Patcher) watcher, isWatcher := storage.(rest.Watcher) connecter, isConnecter := storage.(rest.Connecter) storageMeta, isMetadata := storage.(rest.StorageMetadata) storageVersionProvider, isStorageVersionProvider := storage.(rest.StorageVersionProvider) if !isMetadata { storageMeta = defaultStorageMetadata{} } exporter, isExporter := storage.(rest.Exporter) if !isExporter { exporter = nil } versionedExportOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("ExportOptions")) if err != nil { return nil, err } if isNamedCreater { isCreater = true } var versionedList interface{} if isLister { list := lister.NewList() listGVKs, _, err := a.group.Typer.ObjectKinds(list) if err != nil { return nil, err } versionedListPtr, err := a.group.Creater.New(a.group.GroupVersion.WithKind(listGVKs[0].Kind)) if err != nil { return nil, err } versionedList = indirectArbitraryPointer(versionedListPtr) } versionedListOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("ListOptions")) if err != nil { return nil, err } versionedCreateOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("CreateOptions")) if err != nil { return nil, err } versionedPatchOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("PatchOptions")) if err != nil { return nil, err } versionedUpdateOptions, err := a.group.Creater.New(optionsExternalVersion.WithKind("UpdateOptions")) if err != nil { return nil, err } var versionedDeleteOptions runtime.Object var versionedDeleterObject interface{} if isGracefulDeleter { versionedDeleteOptions, err = a.group.Creater.New(optionsExternalVersion.WithKind("DeleteOptions")) if err != nil { return nil, err } versionedDeleterObject = indirectArbitraryPointer(versionedDeleteOptions) } versionedStatusPtr, err := a.group.Creater.New(optionsExternalVersion.WithKind("Status")) if err != nil { return nil, err } versionedStatus := indirectArbitraryPointer(versionedStatusPtr) var ( getOptions runtime.Object versionedGetOptions runtime.Object getOptionsInternalKind schema.GroupVersionKind getSubpath bool ) if isGetterWithOptions { getOptions, getSubpath, _ = getterWithOptions.NewGetOptions() getOptionsInternalKinds, _, err := a.group.Typer.ObjectKinds(getOptions) if err != nil { return nil, err } getOptionsInternalKind = getOptionsInternalKinds[0] versionedGetOptions, err = a.group.Creater.New(a.group.GroupVersion.WithKind(getOptionsInternalKind.Kind)) if err != nil { versionedGetOptions, err = a.group.Creater.New(optionsExternalVersion.WithKind(getOptionsInternalKind.Kind)) if err != nil { return nil, err } } isGetter = true } var versionedWatchEvent interface{} if isWatcher { versionedWatchEventPtr, err := a.group.Creater.New(a.group.GroupVersion.WithKind("WatchEvent")) if err != nil { return nil, err } versionedWatchEvent = indirectArbitraryPointer(versionedWatchEventPtr) } var ( connectOptions runtime.Object versionedConnectOptions runtime.Object connectOptionsInternalKind schema.GroupVersionKind connectSubpath bool ) if isConnecter { connectOptions, connectSubpath, _ = connecter.NewConnectOptions() if connectOptions != nil { connectOptionsInternalKinds, _, err := a.group.Typer.ObjectKinds(connectOptions) if err != nil { return nil, err } connectOptionsInternalKind = connectOptionsInternalKinds[0] versionedConnectOptions, err = a.group.Creater.New(a.group.GroupVersion.WithKind(connectOptionsInternalKind.Kind)) if err != nil { versionedConnectOptions, err = a.group.Creater.New(optionsExternalVersion.WithKind(connectOptionsInternalKind.Kind)) if err != nil { return nil, err } } } } allowWatchList := isWatcher && isLister // watching on lists is allowed only for kinds that support both watch and list. nameParam := ws.PathParameter("name", "name of the "+kind).DataType("string") pathParam := ws.PathParameter("path", "path to the resource").DataType("string") params := []*restful.Parameter{} actions := []action{} var resourceKind string kindProvider, ok := storage.(rest.KindProvider) if ok { resourceKind = kindProvider.Kind() } else { resourceKind = kind } tableProvider, _ := storage.(rest.TableConvertor) var apiResource metav1.APIResource if utilfeature.DefaultFeatureGate.Enabled(features.StorageVersionHash) && isStorageVersionProvider && storageVersionProvider.StorageVersion() != nil { versioner := storageVersionProvider.StorageVersion() gvk, err := getStorageVersionKind(versioner, storage, a.group.Typer) if err != nil { return nil, err } apiResource.StorageVersionHash = discovery.StorageVersionHash(gvk.Group, gvk.Version, gvk.Kind) } // Get the list of actions for the given scope. switch { case !namespaceScoped: // Handle non-namespace scoped resources like nodes. resourcePath := resource resourceParams := params itemPath := resourcePath + "/{name}" nameParams := append(params, nameParam) proxyParams := append(nameParams, pathParam) suffix := "" if isSubresource { suffix = "/" + subresource itemPath = itemPath + suffix resourcePath = itemPath resourceParams = nameParams } apiResource.Name = path apiResource.Namespaced = false apiResource.Kind = resourceKind namer := handlers.ContextBasedNaming{ SelfLinker: a.group.Linker, ClusterScoped: true, SelfLinkPathPrefix: gpath.Join(a.prefix, resource) + "/", SelfLinkPathSuffix: suffix, } // Handler for standard REST verbs (GET, PUT, POST and DELETE). // Add actions at the resource path: /api/apiVersion/resource actions = appendIf(actions, action{"LIST", resourcePath, resourceParams, namer, false}, isLister) actions = appendIf(actions, action{"POST", resourcePath, resourceParams, namer, false}, isCreater) actions = appendIf(actions, action{"DELETECOLLECTION", resourcePath, resourceParams, namer, false}, isCollectionDeleter) // DEPRECATED in 1.11 actions = appendIf(actions, action{"WATCHLIST", "watch/" + resourcePath, resourceParams, namer, false}, allowWatchList) // Add actions at the item path: /api/apiVersion/resource/{name} actions = appendIf(actions, action{"GET", itemPath, nameParams, namer, false}, isGetter) if getSubpath { actions = appendIf(actions, action{"GET", itemPath + "/{path:*}", proxyParams, namer, false}, isGetter) } actions = appendIf(actions, action{"PUT", itemPath, nameParams, namer, false}, isUpdater) actions = appendIf(actions, action{"PATCH", itemPath, nameParams, namer, false}, isPatcher) actions = appendIf(actions, action{"DELETE", itemPath, nameParams, namer, false}, isGracefulDeleter) // DEPRECATED in 1.11 actions = appendIf(actions, action{"WATCH", "watch/" + itemPath, nameParams, namer, false}, isWatcher) actions = appendIf(actions, action{"CONNECT", itemPath, nameParams, namer, false}, isConnecter) actions = appendIf(actions, action{"CONNECT", itemPath + "/{path:*}", proxyParams, namer, false}, isConnecter && connectSubpath) default: namespaceParamName := "namespaces" // Handler for standard REST verbs (GET, PUT, POST and DELETE). namespaceParam := ws.PathParameter("namespace", "object name and auth scope, such as for teams and projects").DataType("string") namespacedPath := namespaceParamName + "/{namespace}/" + resource namespaceParams := []*restful.Parameter{namespaceParam} resourcePath := namespacedPath resourceParams := namespaceParams itemPath := namespacedPath + "/{name}" nameParams := append(namespaceParams, nameParam) proxyParams := append(nameParams, pathParam) itemPathSuffix := "" if isSubresource { itemPathSuffix = "/" + subresource itemPath = itemPath + itemPathSuffix resourcePath = itemPath resourceParams = nameParams } apiResource.Name = path apiResource.Namespaced = true apiResource.Kind = resourceKind namer := handlers.ContextBasedNaming{ SelfLinker: a.group.Linker, ClusterScoped: false, SelfLinkPathPrefix: gpath.Join(a.prefix, namespaceParamName) + "/", SelfLinkPathSuffix: itemPathSuffix, } actions = appendIf(actions, action{"LIST", resourcePath, resourceParams, namer, false}, isLister) actions = appendIf(actions, action{"POST", resourcePath, resourceParams, namer, false}, isCreater) actions = appendIf(actions, action{"DELETECOLLECTION", resourcePath, resourceParams, namer, false}, isCollectionDeleter) // DEPRECATED in 1.11 actions = appendIf(actions, action{"WATCHLIST", "watch/" + resourcePath, resourceParams, namer, false}, allowWatchList) actions = appendIf(actions, action{"GET", itemPath, nameParams, namer, false}, isGetter) if getSubpath { actions = appendIf(actions, action{"GET", itemPath + "/{path:*}", proxyParams, namer, false}, isGetter) } actions = appendIf(actions, action{"PUT", itemPath, nameParams, namer, false}, isUpdater) actions = appendIf(actions, action{"PATCH", itemPath, nameParams, namer, false}, isPatcher) actions = appendIf(actions, action{"DELETE", itemPath, nameParams, namer, false}, isGracefulDeleter) // DEPRECATED in 1.11 actions = appendIf(actions, action{"WATCH", "watch/" + itemPath, nameParams, namer, false}, isWatcher) actions = appendIf(actions, action{"CONNECT", itemPath, nameParams, namer, false}, isConnecter) actions = appendIf(actions, action{"CONNECT", itemPath + "/{path:*}", proxyParams, namer, false}, isConnecter && connectSubpath) // list or post across namespace. // For ex: LIST all pods in all namespaces by sending a LIST request at /api/apiVersion/pods. // TODO: more strongly type whether a resource allows these actions on "all namespaces" (bulk delete) if !isSubresource { actions = appendIf(actions, action{"LIST", resource, params, namer, true}, isLister) // DEPRECATED in 1.11 actions = appendIf(actions, action{"WATCHLIST", "watch/" + resource, params, namer, true}, allowWatchList) } } // Create Routes for the actions. // TODO: Add status documentation using Returns() // Errors (see api/errors/errors.go as well as go-restful router): // http.StatusNotFound, http.StatusMethodNotAllowed, // http.StatusUnsupportedMediaType, http.StatusNotAcceptable, // http.StatusBadRequest, http.StatusUnauthorized, http.StatusForbidden, // http.StatusRequestTimeout, http.StatusConflict, http.StatusPreconditionFailed, // http.StatusUnprocessableEntity, http.StatusInternalServerError, // http.StatusServiceUnavailable // and api error codes // Note that if we specify a versioned Status object here, we may need to // create one for the tests, also // Success: // http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent // // test/integration/auth_test.go is currently the most comprehensive status code test for _, s := range a.group.Serializer.SupportedMediaTypes() { if len(s.MediaTypeSubType) == 0 || len(s.MediaTypeType) == 0 { return nil, fmt.Errorf("all serializers in the group Serializer must have MediaTypeType and MediaTypeSubType set: %s", s.MediaType) } } mediaTypes, streamMediaTypes := negotiation.MediaTypesForSerializer(a.group.Serializer) allMediaTypes := append(mediaTypes, streamMediaTypes...) ws.Produces(allMediaTypes...) kubeVerbs := map[string]struct{}{} reqScope := handlers.RequestScope{ Serializer: a.group.Serializer, ParameterCodec: a.group.ParameterCodec, Creater: a.group.Creater, Convertor: a.group.Convertor, Defaulter: a.group.Defaulter, Typer: a.group.Typer, UnsafeConvertor: a.group.UnsafeConvertor, Authorizer: a.group.Authorizer, EquivalentResourceMapper: a.group.EquivalentResourceRegistry, // TODO: Check for the interface on storage TableConvertor: tableProvider, // TODO: This seems wrong for cross-group subresources. It makes an assumption that a subresource and its parent are in the same group version. Revisit this. Resource: a.group.GroupVersion.WithResource(resource), Subresource: subresource, Kind: fqKindToRegister, HubGroupVersion: schema.GroupVersion{Group: fqKindToRegister.Group, Version: runtime.APIVersionInternal}, MetaGroupVersion: metav1.SchemeGroupVersion, MaxRequestBodyBytes: a.group.MaxRequestBodyBytes, } if a.group.MetaGroupVersion != nil { reqScope.MetaGroupVersion = *a.group.MetaGroupVersion } if a.group.OpenAPIModels != nil && utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) { fm, err := fieldmanager.NewFieldManager( a.group.OpenAPIModels, a.group.UnsafeConvertor, a.group.Defaulter, fqKindToRegister.GroupVersion(), reqScope.HubGroupVersion, ) if err != nil { return nil, fmt.Errorf("failed to create field manager: %v", err) } reqScope.FieldManager = fm } for _, action := range actions { producedObject := storageMeta.ProducesObject(action.Verb) if producedObject == nil { producedObject = defaultVersionedObject } reqScope.Namer = action.Namer requestScope := "cluster" var namespaced string var operationSuffix string if apiResource.Namespaced { requestScope = "namespace" namespaced = "Namespaced" } if strings.HasSuffix(action.Path, "/{path:*}") { requestScope = "resource" operationSuffix = operationSuffix + "WithPath" } if action.AllNamespaces { requestScope = "cluster" operationSuffix = operationSuffix + "ForAllNamespaces" namespaced = "" } if kubeVerb, found := toDiscoveryKubeVerb[action.Verb]; found { if len(kubeVerb) != 0 { kubeVerbs[kubeVerb] = struct{}{} } } else { return nil, fmt.Errorf("unknown action verb for discovery: %s", action.Verb) } routes := []*restful.RouteBuilder{} // If there is a subresource, kind should be the parent's kind. if isSubresource { parentStorage, ok := a.group.Storage[resource] if !ok { return nil, fmt.Errorf("missing parent storage: %q", resource) } fqParentKind, err := GetResourceKind(a.group.GroupVersion, parentStorage, a.group.Typer) if err != nil { return nil, err } kind = fqParentKind.Kind } verbOverrider, needOverride := storage.(StorageMetricsOverride) switch action.Verb { case "GET": // Get a resource. var handler restful.RouteFunction if isGetterWithOptions { handler = restfulGetResourceWithOptions(getterWithOptions, reqScope, isSubresource) } else { handler = restfulGetResource(getter, exporter, reqScope) } if needOverride { // need change the reported verb handler = metrics.InstrumentRouteFunc(verbOverrider.OverrideMetricsVerb(action.Verb), group, version, resource, subresource, requestScope, metrics.APIServerComponent, handler) } else { handler = metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, handler) } if a.enableAPIResponseCompression { handler = genericfilters.RestfulWithCompression(handler) } doc := "read the specified " + kind if isSubresource { doc = "read " + subresource + " of the specified " + kind } route := ws.GET(action.Path).To(handler). Doc(doc). Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("read"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). Returns(http.StatusOK, "OK", producedObject). Writes(producedObject) if isGetterWithOptions { if err := AddObjectParams(ws, route, versionedGetOptions); err != nil { return nil, err } } if isExporter { if err := AddObjectParams(ws, route, versionedExportOptions); err != nil { return nil, err } } addParams(route, action.Params) routes = append(routes, route) case "LIST": // List all resources of a kind. doc := "list objects of kind " + kind if isSubresource { doc = "list " + subresource + " of objects of kind " + kind } handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulListResource(lister, watcher, reqScope, false, a.minRequestTimeout)) if a.enableAPIResponseCompression { handler = genericfilters.RestfulWithCompression(handler) } route := ws.GET(action.Path).To(handler). Doc(doc). Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("list"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), allMediaTypes...)...). Returns(http.StatusOK, "OK", versionedList). Writes(versionedList) if err := AddObjectParams(ws, route, versionedListOptions); err != nil { return nil, err } switch { case isLister && isWatcher: doc := "list or watch objects of kind " + kind if isSubresource { doc = "list or watch " + subresource + " of objects of kind " + kind } route.Doc(doc) case isWatcher: doc := "watch objects of kind " + kind if isSubresource { doc = "watch " + subresource + "of objects of kind " + kind } route.Doc(doc) } addParams(route, action.Params) routes = append(routes, route) case "PUT": // Update a resource. doc := "replace the specified " + kind if isSubresource { doc = "replace " + subresource + " of the specified " + kind } handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulUpdateResource(updater, reqScope, admit)) route := ws.PUT(action.Path).To(handler). Doc(doc). Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("replace"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). Returns(http.StatusOK, "OK", producedObject). // TODO: in some cases, the API may return a v1.Status instead of the versioned object // but currently go-restful can't handle multiple different objects being returned. Returns(http.StatusCreated, "Created", producedObject). Reads(defaultVersionedObject). Writes(producedObject) if err := AddObjectParams(ws, route, versionedUpdateOptions); err != nil { return nil, err } addParams(route, action.Params) routes = append(routes, route) case "PATCH": // Partially update a resource doc := "partially update the specified " + kind if isSubresource { doc = "partially update " + subresource + " of the specified " + kind } supportedTypes := []string{ string(types.JSONPatchType), string(types.MergePatchType), string(types.StrategicMergePatchType), } if utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) { supportedTypes = append(supportedTypes, string(types.ApplyPatchType)) } handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulPatchResource(patcher, reqScope, admit, supportedTypes)) route := ws.PATCH(action.Path).To(handler). Doc(doc). Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Consumes(supportedTypes...). Operation("patch"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). Returns(http.StatusOK, "OK", producedObject). Reads(metav1.Patch{}). Writes(producedObject) if err := AddObjectParams(ws, route, versionedPatchOptions); err != nil { return nil, err } addParams(route, action.Params) routes = append(routes, route) case "POST": // Create a resource. var handler restful.RouteFunction if isNamedCreater { handler = restfulCreateNamedResource(namedCreater, reqScope, admit) } else { handler = restfulCreateResource(creater, reqScope, admit) } handler = metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, handler) article := GetArticleForNoun(kind, " ") doc := "create" + article + kind if isSubresource { doc = "create " + subresource + " of" + article + kind } route := ws.POST(action.Path).To(handler). Doc(doc). Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("create"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). Returns(http.StatusOK, "OK", producedObject). // TODO: in some cases, the API may return a v1.Status instead of the versioned object // but currently go-restful can't handle multiple different objects being returned. Returns(http.StatusCreated, "Created", producedObject). Returns(http.StatusAccepted, "Accepted", producedObject). Reads(defaultVersionedObject). Writes(producedObject) if err := AddObjectParams(ws, route, versionedCreateOptions); err != nil { return nil, err } addParams(route, action.Params) routes = append(routes, route) case "DELETE": // Delete a resource. article := GetArticleForNoun(kind, " ") doc := "delete" + article + kind if isSubresource { doc = "delete " + subresource + " of" + article + kind } handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulDeleteResource(gracefulDeleter, isGracefulDeleter, reqScope, admit)) route := ws.DELETE(action.Path).To(handler). Doc(doc). Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("delete"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). Writes(versionedStatus). Returns(http.StatusOK, "OK", versionedStatus). Returns(http.StatusAccepted, "Accepted", versionedStatus) if isGracefulDeleter { route.Reads(versionedDeleterObject) route.ParameterNamed("body").Required(false) if err := AddObjectParams(ws, route, versionedDeleteOptions); err != nil { return nil, err } } addParams(route, action.Params) routes = append(routes, route) case "DELETECOLLECTION": doc := "delete collection of " + kind if isSubresource { doc = "delete collection of " + subresource + " of a " + kind } handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulDeleteCollection(collectionDeleter, isCollectionDeleter, reqScope, admit)) route := ws.DELETE(action.Path).To(handler). Doc(doc). Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("deletecollection"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(append(storageMeta.ProducesMIMETypes(action.Verb), mediaTypes...)...). Writes(versionedStatus). Returns(http.StatusOK, "OK", versionedStatus) if isCollectionDeleter { route.Reads(versionedDeleterObject) route.ParameterNamed("body").Required(false) if err := AddObjectParams(ws, route, versionedDeleteOptions); err != nil { return nil, err } } if err := AddObjectParams(ws, route, versionedListOptions); err != nil { return nil, err } addParams(route, action.Params) routes = append(routes, route) // deprecated in 1.11 case "WATCH": // Watch a resource. doc := "watch changes to an object of kind " + kind if isSubresource { doc = "watch changes to " + subresource + " of an object of kind " + kind } doc += ". deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter." handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulListResource(lister, watcher, reqScope, true, a.minRequestTimeout)) route := ws.GET(action.Path).To(handler). Doc(doc). Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("watch"+namespaced+kind+strings.Title(subresource)+operationSuffix). Produces(allMediaTypes...). Returns(http.StatusOK, "OK", versionedWatchEvent). Writes(versionedWatchEvent) if err := AddObjectParams(ws, route, versionedListOptions); err != nil { return nil, err } addParams(route, action.Params) routes = append(routes, route) // deprecated in 1.11 case "WATCHLIST": // Watch all resources of a kind. doc := "watch individual changes to a list of " + kind if isSubresource { doc = "watch individual changes to a list of " + subresource + " of " + kind } doc += ". deprecated: use the 'watch' parameter with a list operation instead." handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulListResource(lister, watcher, reqScope, true, a.minRequestTimeout)) route := ws.GET(action.Path).To(handler). Doc(doc). Param(ws.QueryParameter("pretty", "If 'true', then the output is pretty printed.")). Operation("watch"+namespaced+kind+strings.Title(subresource)+"List"+operationSuffix). Produces(allMediaTypes...). Returns(http.StatusOK, "OK", versionedWatchEvent). Writes(versionedWatchEvent) if err := AddObjectParams(ws, route, versionedListOptions); err != nil { return nil, err } addParams(route, action.Params) routes = append(routes, route) case "CONNECT": for _, method := range connecter.ConnectMethods() { connectProducedObject := storageMeta.ProducesObject(method) if connectProducedObject == nil { connectProducedObject = "string" } doc := "connect " + method + " requests to " + kind if isSubresource { doc = "connect " + method + " requests to " + subresource + " of " + kind } handler := metrics.InstrumentRouteFunc(action.Verb, group, version, resource, subresource, requestScope, metrics.APIServerComponent, restfulConnectResource(connecter, reqScope, admit, path, isSubresource)) route := ws.Method(method).Path(action.Path). To(handler). Doc(doc). Operation("connect" + strings.Title(strings.ToLower(method)) + namespaced + kind + strings.Title(subresource) + operationSuffix). Produces("*/*"). Consumes("*/*"). Writes(connectProducedObject) if versionedConnectOptions != nil { if err := AddObjectParams(ws, route, versionedConnectOptions); err != nil { return nil, err } } addParams(route, action.Params) routes = append(routes, route) // transform ConnectMethods to kube verbs if kubeVerb, found := toDiscoveryKubeVerb[method]; found { if len(kubeVerb) != 0 { kubeVerbs[kubeVerb] = struct{}{} } } } default: return nil, fmt.Errorf("unrecognized action verb: %s", action.Verb) } for _, route := range routes { route.Metadata(ROUTE_META_GVK, metav1.GroupVersionKind{ Group: reqScope.Kind.Group, Version: reqScope.Kind.Version, Kind: reqScope.Kind.Kind, }) route.Metadata(ROUTE_META_ACTION, strings.ToLower(action.Verb)) ws.Route(route) } // Note: update GetAuthorizerAttributes() when adding a custom handler. } apiResource.Verbs = make([]string, 0, len(kubeVerbs)) for kubeVerb := range kubeVerbs { apiResource.Verbs = append(apiResource.Verbs, kubeVerb) } sort.Strings(apiResource.Verbs) if shortNamesProvider, ok := storage.(rest.ShortNamesProvider); ok { apiResource.ShortNames = shortNamesProvider.ShortNames() } if categoriesProvider, ok := storage.(rest.CategoriesProvider); ok { apiResource.Categories = categoriesProvider.Categories() } if gvkProvider, ok := storage.(rest.GroupVersionKindProvider); ok { gvk := gvkProvider.GroupVersionKind(a.group.GroupVersion) apiResource.Group = gvk.Group apiResource.Version = gvk.Version apiResource.Kind = gvk.Kind } // Record the existence of the GVR and the corresponding GVK a.group.EquivalentResourceRegistry.RegisterKindFor(reqScope.Resource, reqScope.Subresource, fqKindToRegister) return &apiResource, nil } // indirectArbitraryPointer returns *ptrToObject for an arbitrary pointer func indirectArbitraryPointer(ptrToObject interface{}) interface{} { return reflect.Indirect(reflect.ValueOf(ptrToObject)).Interface() } func appendIf(actions []action, a action, shouldAppend bool) []action { if shouldAppend { actions = append(actions, a) } return actions } func addParams(route *restful.RouteBuilder, params []*restful.Parameter) { for _, param := range params { route.Param(param) } } // AddObjectParams converts a runtime.Object into a set of go-restful Param() definitions on the route. // The object must be a pointer to a struct; only fields at the top level of the struct that are not // themselves interfaces or structs are used; only fields with a json tag that is non empty (the standard // Go JSON behavior for omitting a field) become query parameters. The name of the query parameter is // the JSON field name. If a description struct tag is set on the field, that description is used on the // query parameter. In essence, it converts a standard JSON top level object into a query param schema. func AddObjectParams(ws *restful.WebService, route *restful.RouteBuilder, obj interface{}) error { sv, err := conversion.EnforcePtr(obj) if err != nil { return err } st := sv.Type() switch st.Kind() { case reflect.Struct: for i := 0; i < st.NumField(); i++ { name := st.Field(i).Name sf, ok := st.FieldByName(name) if !ok { continue } switch sf.Type.Kind() { case reflect.Interface, reflect.Struct: case reflect.Ptr: // TODO: This is a hack to let metav1.Time through. This needs to be fixed in a more generic way eventually. bug #36191 if (sf.Type.Elem().Kind() == reflect.Interface || sf.Type.Elem().Kind() == reflect.Struct) && strings.TrimPrefix(sf.Type.String(), "*") != "metav1.Time" { continue } fallthrough default: jsonTag := sf.Tag.Get("json") if len(jsonTag) == 0 { continue } jsonName := strings.SplitN(jsonTag, ",", 2)[0] if len(jsonName) == 0 { continue } var desc string if docable, ok := obj.(documentable); ok { desc = docable.SwaggerDoc()[jsonName] } route.Param(ws.QueryParameter(jsonName, desc).DataType(typeToJSON(sf.Type.String()))) } } } return nil } // TODO: this is incomplete, expand as needed. // Convert the name of a golang type to the name of a JSON type func typeToJSON(typeName string) string { switch typeName { case "bool", "*bool": return "boolean" case "uint8", "*uint8", "int", "*int", "int32", "*int32", "int64", "*int64", "uint32", "*uint32", "uint64", "*uint64": return "integer" case "float64", "*float64", "float32", "*float32": return "number" case "metav1.Time", "*metav1.Time": return "string" case "byte", "*byte": return "string" case "v1.DeletionPropagation", "*v1.DeletionPropagation": return "string" // TODO: Fix these when go-restful supports a way to specify an array query param: // https://github.com/emicklei/go-restful/issues/225 case "[]string", "[]*string": return "string" case "[]int32", "[]*int32": return "integer" default: return typeName } } // defaultStorageMetadata provides default answers to rest.StorageMetadata. type defaultStorageMetadata struct{} // defaultStorageMetadata implements rest.StorageMetadata var _ rest.StorageMetadata = defaultStorageMetadata{} func (defaultStorageMetadata) ProducesMIMETypes(verb string) []string { return nil } func (defaultStorageMetadata) ProducesObject(verb string) interface{} { return nil } // splitSubresource checks if the given storage path is the path of a subresource and returns // the resource and subresource components. func splitSubresource(path string) (string, string, error) { var resource, subresource string switch parts := strings.Split(path, "/"); len(parts) { case 2: resource, subresource = parts[0], parts[1] case 1: resource = parts[0] default: // TODO: support deeper paths return "", "", fmt.Errorf("api_installer allows only one or two segment paths (resource or resource/subresource)") } return resource, subresource, nil } // GetArticleForNoun returns the article needed for the given noun. func GetArticleForNoun(noun string, padding string) string { if noun[len(noun)-2:] != "ss" && noun[len(noun)-1:] == "s" { // Plurals don't have an article. // Don't catch words like class return fmt.Sprintf("%v", padding) } article := "a" if isVowel(rune(noun[0])) { article = "an" } return fmt.Sprintf("%s%s%s", padding, article, padding) } // isVowel returns true if the rune is a vowel (case insensitive). func isVowel(c rune) bool { vowels := []rune{'a', 'e', 'i', 'o', 'u'} for _, value := range vowels { if value == unicode.ToLower(c) { return true } } return false } func restfulListResource(r rest.Lister, rw rest.Watcher, scope handlers.RequestScope, forceWatch bool, minRequestTimeout time.Duration) restful.RouteFunction { return func(req *restful.Request, res *restful.Response) { handlers.ListResource(r, rw, &scope, forceWatch, minRequestTimeout)(res.ResponseWriter, req.Request) } } func restfulCreateNamedResource(r rest.NamedCreater, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction { return func(req *restful.Request, res *restful.Response) { handlers.CreateNamedResource(r, &scope, admit)(res.ResponseWriter, req.Request) } } func restfulCreateResource(r rest.Creater, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction { return func(req *restful.Request, res *restful.Response) { handlers.CreateResource(r, &scope, admit)(res.ResponseWriter, req.Request) } } func restfulDeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction { return func(req *restful.Request, res *restful.Response) { handlers.DeleteResource(r, allowsOptions, &scope, admit)(res.ResponseWriter, req.Request) } } func restfulDeleteCollection(r rest.CollectionDeleter, checkBody bool, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction { return func(req *restful.Request, res *restful.Response) { handlers.DeleteCollection(r, checkBody, &scope, admit)(res.ResponseWriter, req.Request) } } func restfulUpdateResource(r rest.Updater, scope handlers.RequestScope, admit admission.Interface) restful.RouteFunction { return func(req *restful.Request, res *restful.Response) { handlers.UpdateResource(r, &scope, admit)(res.ResponseWriter, req.Request) } } func restfulPatchResource(r rest.Patcher, scope handlers.RequestScope, admit admission.Interface, supportedTypes []string) restful.RouteFunction { return func(req *restful.Request, res *restful.Response) { handlers.PatchResource(r, &scope, admit, supportedTypes)(res.ResponseWriter, req.Request) } } func restfulGetResource(r rest.Getter, e rest.Exporter, scope handlers.RequestScope) restful.RouteFunction { return func(req *restful.Request, res *restful.Response) { handlers.GetResource(r, e, &scope)(res.ResponseWriter, req.Request) } } func restfulGetResourceWithOptions(r rest.GetterWithOptions, scope handlers.RequestScope, isSubresource bool) restful.RouteFunction { return func(req *restful.Request, res *restful.Response) { handlers.GetResourceWithOptions(r, &scope, isSubresource)(res.ResponseWriter, req.Request) } } func restfulConnectResource(connecter rest.Connecter, scope handlers.RequestScope, admit admission.Interface, restPath string, isSubresource bool) restful.RouteFunction { return func(req *restful.Request, res *restful.Response) { handlers.ConnectResource(connecter, &scope, admit, restPath, isSubresource)(res.ResponseWriter, req.Request) } }
NickrenREN/kubernetes
staging/src/k8s.io/apiserver/pkg/endpoints/installer.go
GO
apache-2.0
44,987
// -*- coding: us-ascii-unix -*- // Copyright 2012 Lukas Kemmer // // Licensed under the Apache License, Version 2.0 (the "License"); you // may not use this file except in compliance with the License. You // may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the License. #include <cassert> #include "text/utf8.hh" #include "text/utf8-string.hh" namespace faint{ inline bool outside(const std::string& data, size_t pos){ return utf8::num_characters(data) <= pos; } utf8_string::utf8_string(size_t n, const utf8_char& ch){ for (size_t i = 0; i != n; i++){ m_data += ch.str(); } } utf8_string::utf8_string(const utf8_char& ch) : utf8_string(1, ch) {} utf8_string::utf8_string(const char* str) : m_data(str) {} utf8_string::utf8_string(const std::string& str) : m_data(str) {} utf8_char utf8_string::at(size_t pos) const{ if (outside(m_data, pos)){ throw std::out_of_range("utf8_string::at invalid string position"); } return operator[](pos); } utf8_char utf8_string::back() const{ assert(!m_data.empty()); return operator[](size() - 1); } utf8_char utf8_string::front() const{ assert(!m_data.empty()); return operator[](0); } size_t utf8_string::bytes() const{ return m_data.size(); } void utf8_string::clear(){ m_data.clear(); } utf8_string utf8_string::substr(size_t pos, size_t n) const{ if (outside(m_data, pos)){ throw std::out_of_range("utf8_string::substr invalid string position"); } size_t startByte = utf8::char_num_to_byte_num_checked(pos, m_data); size_t numBytes = (n == utf8_string::npos) ? std::string::npos : utf8::char_num_to_byte_num_clamped(pos + n, m_data) - startByte; return utf8_string(m_data.substr(startByte, numBytes)); } const char* utf8_string::c_str() const{ return m_data.c_str(); } const std::string& utf8_string::str() const{ return m_data; } size_t utf8_string::size() const{ return utf8::num_characters(m_data); } bool utf8_string::empty() const{ return m_data.empty(); } utf8_string& utf8_string::erase(size_t pos, size_t n){ if (outside(m_data, pos)){ throw std::out_of_range("utf8_string::erase invalid string position"); } size_t startByte = utf8::char_num_to_byte_num_clamped(pos, m_data); size_t numBytes = (n == npos ? npos : utf8::char_num_to_byte_num_clamped(pos + n, m_data) - startByte); m_data.erase(startByte, numBytes); return *this; } utf8_string& utf8_string::insert(size_t pos, const utf8_string& inserted){ if (pos > utf8::num_characters(m_data)){ throw std::out_of_range("invalid insertion index"); } m_data.insert(utf8::char_num_to_byte_num_checked(pos, m_data), inserted.str()); return *this; } utf8_string& utf8_string::insert(size_t pos, size_t num, const utf8_char& c){ if (pos > utf8::num_characters(m_data)){ throw std::out_of_range("invalid insertion index"); } insert(pos, utf8_string(num, c)); return *this; } utf8_char utf8_string::operator[](size_t i) const{ size_t pos = utf8::char_num_to_byte_num_checked(i, m_data); size_t numBytes = faint::utf8::prefix_num_bytes(m_data[pos]); return utf8_char(m_data.substr(pos, numBytes)); } size_t utf8_string::find(const utf8_char& ch, size_t start) const{ // Since the leading byte has a unique pattern, using regular // std::string find should be OK, I think. size_t pos = m_data.find(ch.str(), utf8::char_num_to_byte_num_checked(start, m_data)); if (pos == npos){ return pos; } return utf8::byte_num_to_char_num(pos, m_data); } size_t utf8_string::find_last_of(const utf8_string& s, size_t inPos) const{ const size_t endPos = inPos == npos ? size() : inPos; for (size_t i = 0; i != endPos; i++){ auto pos = endPos - i - 1; if (s.find((*this)[pos]) != utf8_string::npos){ return pos; } } return utf8_string::npos; } size_t utf8_string::rfind(const utf8_char& ch, size_t start) const{ // Since the leading byte has a unique pattern, using regular // std::string rfind should be OK, I think. if (m_data.empty()){ return npos; } size_t startByte = (start == npos) ? m_data.size() - 1 : utf8::char_num_to_byte_num_checked(start, m_data); size_t pos = m_data.rfind(ch.str(), startByte); if (pos == npos){ return pos; } return pos == npos ? npos : utf8::byte_num_to_char_num(pos, m_data); } utf8_string& utf8_string::operator=(const utf8_string& other){ if (&other == this){ return *this; } m_data = other.m_data; return *this; } utf8_string& utf8_string::operator+=(const utf8_char& ch){ m_data += ch.str(); return *this; } utf8_string& utf8_string::operator+=(const utf8_string& str){ m_data += str.str(); return *this; } utf8_string operator+(const utf8_string& lhs, const utf8_char& rhs){ return utf8_string(lhs.str() + rhs.str()); } utf8_string operator+(const utf8_string& lhs, const utf8_string& rhs){ return utf8_string(lhs.str() + rhs.str()); } utf8_string operator+(const utf8_char& lhs, const utf8_string& rhs){ return utf8_string(lhs.str() + rhs.str()); } const size_t utf8_string::npos(std::string::npos); bool utf8_string::operator<(const utf8_string& s) const{ return m_data < s.m_data; } bool is_ascii(const utf8_string& s){ const std::string& bytes = s.str(); for (char ch : bytes){ if (utf8::prefix_num_bytes(ch) != 1){ return false; } } return true; } std::ostream& operator<<(std::ostream& o, const utf8_string& s){ o << s.str(); return o; } bool operator==(const utf8_string& lhs, const utf8_string& rhs){ return lhs.str() == rhs.str(); } bool operator!=(const utf8_string& lhs, const utf8_string& rhs){ return !(lhs == rhs); } utf8_string_const_iterator begin(const utf8_string& s){ return utf8_string_const_iterator(s, 0); } utf8_string_const_iterator end(const utf8_string& s){ return utf8_string_const_iterator(s, s.size()); } } // namespace
lukas-ke/faint-graphics-editor
text/utf8-string.cpp
C++
apache-2.0
6,218
package com.asura.monitor.platform.dao; import com.asura.framework.base.paging.PagingResult; import com.asura.framework.base.paging.SearchMap; import com.asura.framework.dao.mybatis.base.MybatisDaoContext; import com.asura.framework.dao.mybatis.paginator.domain.PageBounds; import com.asura.common.dao.BaseDao; import com.asura.monitor.platform.entity.MonitorPlatformServerEntity; import org.springframework.stereotype.Repository; import javax.annotation.Resource; /** * <p></p> * <p/> * <PRE> * <BR> * <BR>----------------------------------------------- * <BR> * </PRE> * * @author zhaozq14 * @version 1.0 * @date 2016-11-07 11:35:05 * @since 1.0 */ @Repository("com.asura.monitor.configure.dao.MonitorPlatformServerDao") public class MonitorPlatformServerDao extends BaseDao<MonitorPlatformServerEntity>{ @Resource(name="monitor.MybatisDaoContext") private MybatisDaoContext mybatisDaoContext; /** * * @param searchMap * @param pageBounds * @return */ public PagingResult<MonitorPlatformServerEntity> findAll(SearchMap searchMap, PageBounds pageBounds, String sqlId){ return mybatisDaoContext.findForPage(this.getClass().getName()+"."+sqlId,MonitorPlatformServerEntity.class,searchMap,pageBounds); } }
AsuraTeam/monitor
server/src/main/java/com/asura/monitor/platform/dao/MonitorPlatformServerDao.java
Java
apache-2.0
1,279
/** * Jakarta Bean Validation TCK * * License: Apache License, Version 2.0 * See the license.txt file in the root directory or <http://www.apache.org/licenses/LICENSE-2.0>. */ package org.hibernate.beanvalidation.tck.tests.constraints.constraintdefinition; import static org.hibernate.beanvalidation.tck.util.ConstraintViolationAssert.assertNoViolations; import static org.hibernate.beanvalidation.tck.util.ConstraintViolationAssert.assertThat; import static org.hibernate.beanvalidation.tck.util.ConstraintViolationAssert.violationOf; import static org.testng.Assert.assertEquals; import java.util.Set; import jakarta.validation.ConstraintViolation; import jakarta.validation.Validator; import jakarta.validation.constraints.Size; import jakarta.validation.groups.Default; import jakarta.validation.metadata.ConstraintDescriptor; import org.hibernate.beanvalidation.tck.beanvalidation.Sections; import org.hibernate.beanvalidation.tck.tests.AbstractTCKTest; import org.hibernate.beanvalidation.tck.util.TestUtil; import org.jboss.arquillian.container.test.api.Deployment; import org.jboss.shrinkwrap.api.spec.WebArchive; import org.jboss.test.audit.annotations.SpecAssertion; import org.jboss.test.audit.annotations.SpecVersion; import org.testng.annotations.Test; /** * @author Hardy Ferentschik * @author Guillaume Smet */ @SpecVersion(spec = "beanvalidation", version = "3.0.0") public class ConstraintDefinitionsTest extends AbstractTCKTest { @Deployment public static WebArchive createTestArchive() { return webArchiveBuilder() .withTestClassPackage( ConstraintDefinitionsTest.class ) .build(); } @Test @SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_CONSTRAINTDEFINITION_PROPERTIES, id = "a") @SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_MULTIPLECONSTRAINTS, id = "a") public void testConstraintWithCustomAttributes() { Validator validator = TestUtil.getValidatorUnderTest(); Set<ConstraintDescriptor<?>> descriptors = validator.getConstraintsForClass( Person.class ) .getConstraintsForProperty( "lastName" ) .getConstraintDescriptors(); assertEquals( descriptors.size(), 2, "There should be two constraints on the lastName property." ); for ( ConstraintDescriptor<?> descriptor : descriptors ) { assertEquals( descriptor.getAnnotation().annotationType().getName(), AlwaysValid.class.getName(), "Wrong annotation type." ); } Set<ConstraintViolation<Person>> constraintViolations = validator.validate( new Person( "John", "Doe" ) ); assertThat( constraintViolations ).containsOnlyViolations( violationOf( AlwaysValid.class ) ); } @Test @SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_MULTIPLECONSTRAINTS, id = "a") @SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_MULTIPLECONSTRAINTS, id = "b") public void testRepeatableConstraint() { Validator validator = TestUtil.getValidatorUnderTest(); Set<ConstraintDescriptor<?>> descriptors = validator.getConstraintsForClass( Movie.class ) .getConstraintsForProperty( "title" ) .getConstraintDescriptors(); assertEquals( descriptors.size(), 2, "There should be two constraints on the title property." ); for ( ConstraintDescriptor<?> descriptor : descriptors ) { assertEquals( descriptor.getAnnotation().annotationType().getName(), Size.class.getName(), "Wrong annotation type." ); } Set<ConstraintViolation<Movie>> constraintViolations = validator.validate( new Movie( "Title" ) ); assertNoViolations( constraintViolations ); constraintViolations = validator.validate( new Movie( "A" ) ); assertThat( constraintViolations ).containsOnlyViolations( violationOf( Size.class ) ); constraintViolations = validator.validate( new Movie( "A movie title far too long that does not respect the constraint" ) ); assertThat( constraintViolations ).containsOnlyViolations( violationOf( Size.class ) ); } @Test @SpecAssertion(section = Sections.CONSTRAINTSDEFINITIONIMPLEMENTATION_CONSTRAINTDEFINITION_PROPERTIES_GROUPS, id = "d") public void testDefaultGroupAssumedWhenNoGroupsSpecified() { Validator validator = TestUtil.getValidatorUnderTest(); ConstraintDescriptor<?> descriptor = validator.getConstraintsForClass( Person.class ) .getConstraintsForProperty( "firstName" ) .getConstraintDescriptors() .iterator() .next(); Set<Class<?>> groups = descriptor.getGroups(); assertEquals( groups.size(), 1, "The group set should only contain one entry." ); assertEquals( groups.iterator().next(), Default.class, "The Default group should be returned." ); } }
beanvalidation/beanvalidation-tck
tests/src/main/java/org/hibernate/beanvalidation/tck/tests/constraints/constraintdefinition/ConstraintDefinitionsTest.java
Java
apache-2.0
4,672
<?php defined('ABSPATH') or die('No script kiddies please!'); /** * Undocumented function * * @return void */ function Yonk_numeric_posts_nav() { if(is_singular()) return; global $wp_query; /** Stop execution if there's only 1 page */ if($wp_query->max_num_pages <= 1) return; $paged = get_query_var('paged') ? absint(get_query_var('paged')) : 1; $max = intval($wp_query->max_num_pages); /** Add current page to the array */ if ($paged >= 1) $links[] = $paged; /** Add the pages around the current page to the array */ if ($paged >= 3) { $links[] = $paged - 1; $links[] = $paged - 2; } if (($paged + 2) <= $max) { $links[] = $paged + 2; $links[] = $paged + 1; } echo '<div class="navigation"><ul class="pagination">' . "\n"; /** Previous Post Link */ if (get_previous_posts_link()) printf( '<li>%s</li>' . "\n", get_previous_posts_link() ); /** Link to first page, plus ellipses if necessary */ if (!in_array(1, $links )) { $class = 1 == $paged ? ' class="active"' : ''; printf('<li%s><a href="%s" aria-label="Previous"><span aria-hidden="true"%s</span></a></li>' . "\n", $class, esc_url(get_pagenum_link(1)), '1'); if (!in_array(2, $links)) echo '<li>…</li>'; } /** Link to current page, plus 2 pages in either direction if necessary */ sort($links); foreach ((array)$links as $link) { $class = $paged == $link ? ' class="active"' : ''; printf('<li%s><a href="%s">%s</a></li>' . "\n", $class, esc_url(get_pagenum_link($link)), $link); } /** Link to last page, plus ellipses if necessary */ if (!in_array($max, $links)) { if (!in_array($max - 1, $links)) echo '<li>…</li>' . "\n"; $class = $paged == $max ? ' class="active"' : ''; printf('<li%s><a href="%s" aria-label="Next"><span aria-hidden="true">%s</span></a></li>' . "\n", $class, esc_url(get_pagenum_link($max)), $max); } /** Next Post Link */ if (get_next_posts_link()) printf('<li>%s</li>' . "\n", get_next_posts_link()); echo '</ul></div>' . "\n"; }
Patreo/yonk
wp-content/themes/YonkTheme/yonk-core/plugins/pagenavi.php
PHP
apache-2.0
2,213
package org.targettest.org.apache.lucene.index; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.Map; import org.targettest.org.apache.lucene.document.Document; import org.targettest.org.apache.lucene.document.FieldSelector; import org.targettest.org.apache.lucene.index.DirectoryReader.MultiTermDocs; import org.targettest.org.apache.lucene.index.DirectoryReader.MultiTermEnum; import org.targettest.org.apache.lucene.index.DirectoryReader.MultiTermPositions; import org.targettest.org.apache.lucene.search.DefaultSimilarity; import org.targettest.org.apache.lucene.search.FieldCache; /** An IndexReader which reads multiple indexes, appending * their content. */ public class MultiReader extends IndexReader implements Cloneable { protected IndexReader[] subReaders; private int[] starts; // 1st docno for each segment private boolean[] decrefOnClose; // remember which subreaders to decRef on close private Map<String,byte[]> normsCache = new HashMap<String,byte[]>(); private int maxDoc = 0; private int numDocs = -1; private boolean hasDeletions = false; /** * <p>Construct a MultiReader aggregating the named set of (sub)readers. * Directory locking for delete, undeleteAll, and setNorm operations is * left to the subreaders. </p> * <p>Note that all subreaders are closed if this Multireader is closed.</p> * @param subReaders set of (sub)readers * @throws IOException */ public MultiReader(IndexReader... subReaders) { initialize(subReaders, true); } /** * <p>Construct a MultiReader aggregating the named set of (sub)readers. * Directory locking for delete, undeleteAll, and setNorm operations is * left to the subreaders. </p> * @param closeSubReaders indicates whether the subreaders should be closed * when this MultiReader is closed * @param subReaders set of (sub)readers * @throws IOException */ public MultiReader(IndexReader[] subReaders, boolean closeSubReaders) { initialize(subReaders, closeSubReaders); } private void initialize(IndexReader[] subReaders, boolean closeSubReaders) { this.subReaders = subReaders.clone(); starts = new int[subReaders.length + 1]; // build starts array decrefOnClose = new boolean[subReaders.length]; for (int i = 0; i < subReaders.length; i++) { starts[i] = maxDoc; maxDoc += subReaders[i].maxDoc(); // compute maxDocs if (!closeSubReaders) { subReaders[i].incRef(); decrefOnClose[i] = true; } else { decrefOnClose[i] = false; } if (subReaders[i].hasDeletions()) hasDeletions = true; } starts[subReaders.length] = maxDoc; } /** * Tries to reopen the subreaders. * <br> * If one or more subreaders could be re-opened (i. e. subReader.reopen() * returned a new instance != subReader), then a new MultiReader instance * is returned, otherwise this instance is returned. * <p> * A re-opened instance might share one or more subreaders with the old * instance. Index modification operations result in undefined behavior * when performed before the old instance is closed. * (see {@link IndexReader#reopen()}). * <p> * If subreaders are shared, then the reference count of those * readers is increased to ensure that the subreaders remain open * until the last referring reader is closed. * * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ @Override public synchronized IndexReader reopen() throws CorruptIndexException, IOException { return doReopen(false); } /** * Clones the subreaders. * (see {@link IndexReader#clone()}). * <br> * <p> * If subreaders are shared, then the reference count of those * readers is increased to ensure that the subreaders remain open * until the last referring reader is closed. */ @Override public synchronized Object clone() { try { return doReopen(true); } catch (Exception ex) { throw new RuntimeException(ex); } } /** * If clone is true then we clone each of the subreaders * @param doClone * @return New IndexReader, or same one (this) if * reopen/clone is not necessary * @throws CorruptIndexException * @throws IOException */ protected IndexReader doReopen(boolean doClone) throws CorruptIndexException, IOException { ensureOpen(); boolean reopened = false; IndexReader[] newSubReaders = new IndexReader[subReaders.length]; boolean success = false; try { for (int i = 0; i < subReaders.length; i++) { if (doClone) newSubReaders[i] = (IndexReader) subReaders[i].clone(); else newSubReaders[i] = subReaders[i].reopen(); // if at least one of the subreaders was updated we remember that // and return a new MultiReader if (newSubReaders[i] != subReaders[i]) { reopened = true; } } success = true; } finally { if (!success && reopened) { for (int i = 0; i < newSubReaders.length; i++) { if (newSubReaders[i] != subReaders[i]) { try { newSubReaders[i].close(); } catch (IOException ignore) { // keep going - we want to clean up as much as possible } } } } } if (reopened) { boolean[] newDecrefOnClose = new boolean[subReaders.length]; for (int i = 0; i < subReaders.length; i++) { if (newSubReaders[i] == subReaders[i]) { newSubReaders[i].incRef(); newDecrefOnClose[i] = true; } } MultiReader mr = new MultiReader(newSubReaders); mr.decrefOnClose = newDecrefOnClose; return mr; } else { return this; } } @Override public TermFreqVector[] getTermFreqVectors(int n) throws IOException { ensureOpen(); int i = readerIndex(n); // find segment num return subReaders[i].getTermFreqVectors(n - starts[i]); // dispatch to segment } @Override public TermFreqVector getTermFreqVector(int n, String field) throws IOException { ensureOpen(); int i = readerIndex(n); // find segment num return subReaders[i].getTermFreqVector(n - starts[i], field); } @Override public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException { ensureOpen(); int i = readerIndex(docNumber); // find segment num subReaders[i].getTermFreqVector(docNumber - starts[i], field, mapper); } @Override public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException { ensureOpen(); int i = readerIndex(docNumber); // find segment num subReaders[i].getTermFreqVector(docNumber - starts[i], mapper); } @Override public boolean isOptimized() { return false; } @Override public int numDocs() { // Don't call ensureOpen() here (it could affect performance) // NOTE: multiple threads may wind up init'ing // numDocs... but that's harmless if (numDocs == -1) { // check cache int n = 0; // cache miss--recompute for (int i = 0; i < subReaders.length; i++) n += subReaders[i].numDocs(); // sum from readers numDocs = n; } return numDocs; } @Override public int maxDoc() { // Don't call ensureOpen() here (it could affect performance) return maxDoc; } // inherit javadoc @Override public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException { ensureOpen(); int i = readerIndex(n); // find segment num return subReaders[i].document(n - starts[i], fieldSelector); // dispatch to segment reader } @Override public boolean isDeleted(int n) { // Don't call ensureOpen() here (it could affect performance) int i = readerIndex(n); // find segment num return subReaders[i].isDeleted(n - starts[i]); // dispatch to segment reader } @Override public boolean hasDeletions() { // Don't call ensureOpen() here (it could affect performance) return hasDeletions; } @Override protected void doDelete(int n) throws CorruptIndexException, IOException { numDocs = -1; // invalidate cache int i = readerIndex(n); // find segment num subReaders[i].deleteDocument(n - starts[i]); // dispatch to segment reader hasDeletions = true; } @Override protected void doUndeleteAll() throws CorruptIndexException, IOException { for (int i = 0; i < subReaders.length; i++) subReaders[i].undeleteAll(); hasDeletions = false; numDocs = -1; // invalidate cache } private int readerIndex(int n) { // find reader for doc n: return DirectoryReader.readerIndex(n, this.starts, this.subReaders.length); } @Override public boolean hasNorms(String field) throws IOException { ensureOpen(); for (int i = 0; i < subReaders.length; i++) { if (subReaders[i].hasNorms(field)) return true; } return false; } @Override public synchronized byte[] norms(String field) throws IOException { ensureOpen(); byte[] bytes = normsCache.get(field); if (bytes != null) return bytes; // cache hit if (!hasNorms(field)) return null; bytes = new byte[maxDoc()]; for (int i = 0; i < subReaders.length; i++) subReaders[i].norms(field, bytes, starts[i]); normsCache.put(field, bytes); // update cache return bytes; } @Override public synchronized void norms(String field, byte[] result, int offset) throws IOException { ensureOpen(); byte[] bytes = normsCache.get(field); for (int i = 0; i < subReaders.length; i++) // read from segments subReaders[i].norms(field, result, offset + starts[i]); if (bytes==null && !hasNorms(field)) { Arrays.fill(result, offset, result.length, DefaultSimilarity.encodeNorm(1.0f)); } else if (bytes != null) { // cache hit System.arraycopy(bytes, 0, result, offset, maxDoc()); } else { for (int i = 0; i < subReaders.length; i++) { // read from segments subReaders[i].norms(field, result, offset + starts[i]); } } } @Override protected void doSetNorm(int n, String field, byte value) throws CorruptIndexException, IOException { synchronized (normsCache) { normsCache.remove(field); // clear cache } int i = readerIndex(n); // find segment num subReaders[i].setNorm(n-starts[i], field, value); // dispatch } @Override public TermEnum terms() throws IOException { ensureOpen(); return new MultiTermEnum(this, subReaders, starts, null); } @Override public TermEnum terms(Term term) throws IOException { ensureOpen(); return new MultiTermEnum(this, subReaders, starts, term); } @Override public int docFreq(Term t) throws IOException { ensureOpen(); int total = 0; // sum freqs in segments for (int i = 0; i < subReaders.length; i++) total += subReaders[i].docFreq(t); return total; } @Override public TermDocs termDocs() throws IOException { ensureOpen(); return new MultiTermDocs(this, subReaders, starts); } @Override public TermPositions termPositions() throws IOException { ensureOpen(); return new MultiTermPositions(this, subReaders, starts); } @Override protected void doCommit(Map<String,String> commitUserData) throws IOException { for (int i = 0; i < subReaders.length; i++) subReaders[i].commit(commitUserData); } @Override protected synchronized void doClose() throws IOException { for (int i = 0; i < subReaders.length; i++) { if (decrefOnClose[i]) { subReaders[i].decRef(); } else { subReaders[i].close(); } } // NOTE: only needed in case someone had asked for // FieldCache for top-level reader (which is generally // not a good idea): FieldCache.DEFAULT.purge(this); } @Override public Collection<String> getFieldNames (IndexReader.FieldOption fieldNames) { ensureOpen(); return DirectoryReader.getFieldNames(fieldNames, this.subReaders); } /** * Checks recursively if all subreaders are up to date. */ @Override public boolean isCurrent() throws CorruptIndexException, IOException { for (int i = 0; i < subReaders.length; i++) { if (!subReaders[i].isCurrent()) { return false; } } // all subreaders are up to date return true; } /** Not implemented. * @throws UnsupportedOperationException */ @Override public long getVersion() { throw new UnsupportedOperationException("MultiReader does not support this method."); } @Override public IndexReader[] getSequentialSubReaders() { return subReaders; } }
chrishumphreys/provocateur
provocateur-thirdparty/src/main/java/org/targettest/org/apache/lucene/index/MultiReader.java
Java
apache-2.0
14,073
import os,json from cgi import escape def unescape(s): s = s.replace("&lt;", "<") s = s.replace("&gt;", ">") # this has to be last: s = s.replace("&amp;", "&") return s class FilesystemMixin: def h_fs_get(_,path,eltName=''): from stat import S_ISDIR data = (escape(open(path).read()) if not S_ISDIR(os.stat(path).st_mode) else [(p,S_ISDIR(os.stat(path+'/'+p).st_mode)) for p in os.listdir(path)]) _.ws.send(json.dumps({"method":"fs_get","result":[path,data,eltName]})) pass def h_fs_put(_,path,data): f=open(path,'w') for x in data: f.write(unescape(x)) f.close() pass def h_fs_system(_,path,eltName='',cwd=None): import subprocess as sp import shlex data=sp.Popen(shlex.split(path),cwd=cwd,stdout=sp.PIPE, stderr=sp.PIPE).communicate() _.ws.send(json.dumps({"method":"fs_system","result":[path,data,eltName]})); pass def h_fs_mkdir (_,path): os.mkdir(path) def h_fs_rmdir (_,path): os.rmdir(path) def h_fs_touch (_,path): open(path,'w').close() def h_fs_unlink(_,path): os.unlink(path) pass class FsApp(FilesystemMixin): def __init__(_,ws):_.ws=ws
val314159/framist
fssvr/fs.py
Python
apache-2.0
1,267
// Copyright 2016 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package main import ( "database/sql" "database/sql/driver" "fmt" "hash/crc32" "strings" "time" pbinlog "github.com/cwen0/cdb-syncer/protocol" "github.com/go-sql-driver/mysql" "github.com/juju/errors" "github.com/ngaut/log" "github.com/pingcap/tidb/ast" tddl "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/infoschema" tmysql "github.com/pingcap/tidb/mysql" "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/terror" ) type job struct { tp pbinlog.BinlogType sql string args []interface{} key string retry bool pos Position } func newJob(tp pbinlog.BinlogType, sql string, args []interface{}, key string, retry bool, pos Position) *job { return &job{tp: tp, sql: sql, args: args, key: key, retry: retry, pos: pos} } func genHashKey(key string) uint32 { return crc32.ChecksumIEEE([]byte(key)) } func genPKey(rows []*pbinlog.Row) string { var values []string for _, row := range rows { values = append(values, row.GetColumnValue()) } return strings.Join(values, ",") } func genInsertSQL(binlog *pbinlog.Binlog) (string, string, []interface{}, error) { var sql string var values []string sql += "replace into " + binlog.GetDbName() + "." + binlog.GetTableName() + "(" rows := binlog.GetRows() for _, row := range rows { sql += row.GetColumnName() + "," values = append(values, row.GetColumnValue()) } sql = sql[0:len(sql)-1] + ") values (" for _, _ = range rows { sql += "?," } sql = sql[0:len(sql)-1] + ")" return sql, binlog.GetTableName() + genPKey(binlog.GetPrimaryKey()), stringToInterface(values), nil } func genUpdateSQL(binlog *pbinlog.Binlog) (string, string, []interface{}, error) { var sql string var values []string sql += "update " + binlog.GetDbName() + "." + binlog.GetTableName() + " set " rows := binlog.GetRows() for _, row := range rows { sql += row.GetColumnName() + "=?," values = append(values, row.GetColumnValue()) } sql = sql[0:len(sql)-1] + " where 1=1 " for _, row := range binlog.GetPrimaryKey() { sql += " and " + row.GetColumnName() + " = ? " values = append(values, row.GetColumnValue()) } return sql, binlog.GetTableName() + genPKey(binlog.GetPrimaryKey()), stringToInterface(values), nil } func genDeleteSQL(binlog *pbinlog.Binlog) (string, string, []interface{}, error) { var sql string var values []string sql += "delete from " + binlog.GetDbName() + "." + binlog.GetTableName() + " where 1=1 " for _, row := range binlog.GetPrimaryKey() { sql += " and " + row.GetColumnName() + " = ? " values = append(values, row.GetColumnValue()) } return sql, binlog.GetTableName() + genPKey(binlog.GetPrimaryKey()), stringToInterface(values), nil } func genDdlSQL(binlog *pbinlog.Binlog) ([]string, string, []interface{}, error) { var sqls []string empty := make([]interface{}, 0) rows := binlog.GetRows() for _, row := range rows { tmpSqls, ok, err := resolveDDLSQL(row.GetSql()) if err != nil { return sqls, "", empty, errors.Errorf("parse ddk sql: %v failed: %v", row.GetSql(), err) } if !ok { continue } for _, sql := range tmpSqls { //var sql string //if binlog.GetDbName() != "" { //sql += "use " + binlog.GetDbName() + ";" //} //sql += s + ";" sqls = append(sqls, sql) } } return sqls, "", empty, nil } func ignoreDDLError(err error) bool { mysqlErr, ok := errors.Cause(err).(*mysql.MySQLError) if !ok { return false } errCode := terror.ErrCode(mysqlErr.Number) switch errCode { case infoschema.ErrDatabaseExists.Code(), infoschema.ErrDatabaseNotExists.Code(), infoschema.ErrDatabaseDropExists.Code(), infoschema.ErrTableExists.Code(), infoschema.ErrTableNotExists.Code(), infoschema.ErrTableDropExists.Code(), infoschema.ErrColumnExists.Code(), infoschema.ErrColumnNotExists.Code(), infoschema.ErrIndexExists.Code(), tddl.ErrCantDropFieldOrKey.Code(): return true default: return false } } func isRetryableError(err error) bool { if err == driver.ErrBadConn { return true } var e error for { e = errors.Cause(err) if err == e { break } err = e } mysqlErr, ok := err.(*mysql.MySQLError) if ok { if mysqlErr.Number == tmysql.ErrUnknown { return true } return false } return true } func querySQL(db *sql.DB, query string) (*sql.Rows, error) { var ( err error rows *sql.Rows ) for i := 0; i < maxRetryCount; i++ { if i > 0 { log.Warnf("query sql retry %d - %s", i, query) time.Sleep(retryTimeout) } log.Debugf("[query][sql]%s", query) rows, err = db.Query(query) if err != nil { if !isRetryableError(err) { return rows, errors.Trace(err) } log.Warnf("[query][sql]%s[error]%v", query, err) continue } return rows, nil } if err != nil { log.Errorf("query sql[%s] failed %v", query, errors.ErrorStack(err)) return nil, errors.Trace(err) } return nil, errors.Errorf("query sql[%s] failed", query) } func executeSQL(db *sql.DB, sqls []string, args [][]interface{}, retry bool) error { if len(sqls) == 0 { return nil } var ( err error txn *sql.Tx ) retryCount := 1 if retry { retryCount = maxRetryCount } LOOP: for i := 0; i < retryCount; i++ { if i > 0 { log.Warnf("exec sql retry %d - %v - %v", i, sqls, args) time.Sleep(retryTimeout) } txn, err = db.Begin() if err != nil { log.Errorf("exec sqls[%v] begin failed %v", sqls, errors.ErrorStack(err)) continue } for i := range sqls { log.Debugf("[exec][sql]%s[args]%v", sqls[i], args[i]) _, err = txn.Exec(sqls[i], args[i]...) if err != nil { if !isRetryableError(err) { rerr := txn.Rollback() if rerr != nil { log.Errorf("[exec][sql]%s[args]%v[error]%v", sqls[i], args[i], rerr) } break LOOP } log.Warnf("[exec][sql]%s[args]%v[error]%v", sqls[i], args[i], err) rerr := txn.Rollback() if rerr != nil { log.Errorf("[exec][sql]%s[args]%v[error]%v", sqls[i], args[i], rerr) } continue LOOP } } err = txn.Commit() if err != nil { log.Errorf("exec sqls[%v] commit failed %v", sqls, errors.ErrorStack(err)) continue } return nil } if err != nil { log.Errorf("exec sqls[%v] failed %v", sqls, errors.ErrorStack(err)) return errors.Trace(err) } return errors.Errorf("exec sqls[%v] failed", sqls) } func createDB(cfg DBConfig) (*sql.DB, error) { dbDSN := fmt.Sprintf("%s:%s@tcp(%s:%d)/?charset=utf8&interpolateParams=true", cfg.User, cfg.Password, cfg.Host, cfg.Port) db, err := sql.Open("mysql", dbDSN) if err != nil { return nil, errors.Trace(err) } return db, nil } func closeDB(db *sql.DB) error { if db == nil { return nil } return errors.Trace(db.Close()) } func createDBs(cfg DBConfig, count int) ([]*sql.DB, error) { dbs := make([]*sql.DB, 0, count) for i := 0; i < count; i++ { db, err := createDB(cfg) if err != nil { return nil, errors.Trace(err) } dbs = append(dbs, db) } return dbs, nil } func closeDBs(dbs ...*sql.DB) { for _, db := range dbs { err := closeDB(db) if err != nil { log.Errorf("close db failed - %v", err) } } } func parserDDLTableName(sql string) (TableName, error) { stmt, err := parser.New().ParseOneStmt(sql, "", "") if err != nil { return TableName{}, errors.Trace(err) } var res TableName switch v := stmt.(type) { case *ast.CreateDatabaseStmt: res = genTableName(v.Name, "") case *ast.DropDatabaseStmt: res = genTableName(v.Name, "") case *ast.CreateIndexStmt: res = genTableName(v.Table.Schema.L, v.Table.Name.L) case *ast.CreateTableStmt: res = genTableName(v.Table.Schema.L, v.Table.Name.L) case *ast.DropIndexStmt: res = genTableName(v.Table.Schema.L, v.Table.Name.L) case *ast.TruncateTableStmt: res = genTableName(v.Table.Schema.L, v.Table.Name.L) case *ast.DropTableStmt: if len(v.Tables) != 1 { return res, errors.Errorf("may resovle DDL sql failed") } res = genTableName(v.Tables[0].Schema.L, v.Tables[0].Name.L) default: return res, errors.Errorf("unkown DDL type") } return res, nil } func genTableName(schema string, table string) TableName { return TableName{Schema: schema, Name: table} } // resolveDDLSQL resolve to one ddl sql // example: drop table test.a,test2.b -> drop table test.a; drop table test2.b; func resolveDDLSQL(sql string) (sqls []string, ok bool, err error) { stmt, err := parser.New().ParseOneStmt(sql, "", "") if err != nil { log.Errorf("Parser SQL error: %s", sql) return nil, false, errors.Trace(err) } _, isDDL := stmt.(ast.DDLNode) if !isDDL { sqls = append(sqls, sql) return } switch v := stmt.(type) { case *ast.DropTableStmt: var ex string if v.IfExists { ex = "if exists" } for _, t := range v.Tables { var db string if t.Schema.O != "" { db = fmt.Sprintf("`%s`.", t.Schema.O) } s := fmt.Sprintf("drop table %s %s`%s`", ex, db, t.Name.O) sqls = append(sqls, s) } default: sqls = append(sqls, sql) } return sqls, true, nil }
cwen0/cdb-syncer
db.go
GO
apache-2.0
9,435
package com.basicalgorithms.coding_games; import java.util.HashSet; import java.util.Objects; import java.util.Scanner; import java.util.Set; /** * Original question: https://www.codingame.com/multiplayer/bot-programming/coders-strike-back */ public class CodersStrikeBack { static double longestDist = Integer.MIN_VALUE; static Point initialPoint = null; static boolean hasFinishedOneLap; static Point from = null; static Point lastCheckpoint = null; static final Set<Point> visitedCheckPoints = new HashSet<>(); static boolean hasBoosted = false; public static void main(String args[]) { Scanner in = new Scanner(System.in); // game loop while (true) { int x = in.nextInt(); int y = in.nextInt(); int nextCheckpointX = in.nextInt(); // x position of the next check point int nextCheckpointY = in.nextInt(); // y position of the next check point int nextCheckpointDist = in.nextInt(); // distance to the next checkpoint int nextCheckpointAngle = in.nextInt(); // angle between your pod orientation and the direction of the next checkpoint int opponentX = in.nextInt(); int opponentY = in.nextInt(); // Write an action using System.out.println() // To debug: System.err.println("Debug messages..."); // You have to output the target position // followed by the power (0 <= thrust <= 100) // i.e.: "x y thrust" final Point nextCheckpoint = new Point(nextCheckpointX, nextCheckpointY); final Point currentPosition = new Point(x, y); final Point enemyPosition = new Point(opponentX, opponentY); if (visitedCheckPoints.size() > 1 && enemyInRange(currentPosition, enemyPosition)) { ramEnemyShip(currentPosition, enemyPosition); } else { cruise(currentPosition, nextCheckpoint, nextCheckpointAngle); } if (!nextCheckpoint.equals(lastCheckpoint)) { from = lastCheckpoint; } lastCheckpoint = nextCheckpoint; } } private static void ramEnemyShip(final Point currentPosition, final Point enemyPosition) { sailToDestination((enemyPosition.x), enemyPosition.y, "100"); } private static boolean enemyInRange(final Point currentPosition, final Point enemyPosition) { return getDistant(currentPosition, enemyPosition) <= 1000; } private static void cruise( final Point currentPosition, final Point nextCheckpoint, final int nextCheckpointAngle) { if (initialPoint == null) { initialPoint = currentPosition; } int thrust = isWithinAngle(nextCheckpointAngle) ? 100 : 0; String power = String.valueOf(thrust); visitedCheckPoints.add(nextCheckpoint); System.err.println( "Checkpoint added:" + " nextCheckpointX=" + nextCheckpoint.x + ", nextCheckpointY=" + nextCheckpoint.y); for (final Point visitedCheckPoint : visitedCheckPoints) { System.err.println("Visited checkpoint: (" + visitedCheckPoint.x + ", " + visitedCheckPoint.y + ")"); } if (shouldSlowDown(currentPosition, nextCheckpoint)) { power = String.valueOf(35); } if (hasFinishedOneLap(nextCheckpoint) && isLongestDistant(from, nextCheckpoint) && isWithinSharpAngle(nextCheckpointAngle) && !hasBoosted) { power = "BOOST"; hasBoosted = true; System.err.println("Boosted!!!"); } sailToDestination(nextCheckpoint.x, nextCheckpoint.y, power); } private static boolean shouldSlowDown( final Point currentPosition, final Point nextCheckpoint) { return getDistant(currentPosition, nextCheckpoint) < 1000; } private static void sailToDestination(final int nextCheckpointX, final int nextCheckpointY, final String power) { System.out.println(nextCheckpointX + " " + nextCheckpointY + " " + power); System.err.println("Thrust:" + power); } private static boolean isWithinAngle(final int nextCheckpointAngle) { return -90 < nextCheckpointAngle && nextCheckpointAngle < 90; } private static boolean isWithinSharpAngle(final int nextCheckpointAngle) { return -15 < nextCheckpointAngle && nextCheckpointAngle < 15; } private static boolean hasFinishedOneLap(final Point point) { if (hasFinishedOneLap) { return true; } if (initialPoint == null) { return false; } hasFinishedOneLap = getDistant(initialPoint, point) <= 600; return hasFinishedOneLap; } private static boolean isLongestDistant(final Point from, final Point endPoint) { if (from == null) { return false; } System.err.println("Start Point: (" + from.x + ", " + from.y + "); End Point: (" + endPoint.x + ", " + endPoint.y + ") "); double dist = getDistant(from, endPoint); System.err.println("dist=" + dist + ", longestDist=" + longestDist); if (dist >= longestDist) { longestDist = dist; return true; } return false; } private static double getDistant(final Point from, final Point endPoint) { return Math.sqrt(Math.pow(from.x - endPoint.x, 2) + Math.pow(from.y - endPoint.y, 2)); } private static class Point { final int x; final int y; private Point(final int t1, final int t2) { this.x = t1; this.y = t2; } @Override public boolean equals(final Object o) { if (this == o) { return true; } if (!(o instanceof Point)) { return false; } final Point point = (Point) o; return x == point.x && y == point.y; } @Override public int hashCode() { return Objects.hash(x, y); } } }
Ericliu001/basic-algorithms
src/test/java/com/basicalgorithms/coding_games/CodersStrikeBack.java
Java
apache-2.0
6,186
'use strict'; var path = require('path'); var util = require('util'); module.exports = function(grunt) { grunt.registerMultiTask('vjslanguages', 'A Grunt plugin for compiling VideoJS language assets.', function() { var createLanguageFile = function(languageName, languageData, jsFilePath) { var jsTemplate = 'videojs.addLanguage("' + languageName + '",' + JSON.stringify(languageData,null,' ') + ');'; grunt.file.write(jsFilePath, jsTemplate); grunt.log.writeln('- [' + languageName +'] Language Built. File "' + jsFilePath + '" created.'); }; this.files.forEach(function(f) { var languageName, languageData, jsFilePath; // Multiple Files Case if(util.isArray(f.src)){ for(var i =0; i < f.src.length; i++) { languageName = path.basename(f.src[i], '.json'); languageData = grunt.file.readJSON(f.src[i]); jsFilePath = path.join(f.dest, languageName + '.js'); createLanguageFile(languageName, languageData, jsFilePath); } } // Singular File Case else { languageName = path.basename(f.src, '.json'); languageData = grunt.file.readJSON(f.src); jsFilePath = path.join(f.dest, languageName + '.js'); createLanguageFile(languageName, languageData, jsFilePath); } }); }); };
videojs/grunt-videojs-languages
tasks/videojs_languages.js
JavaScript
apache-2.0
1,338
<?php /** * Zend Framework * * LICENSE * * This source file is subject to the new BSD license that is bundled * with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://framework.zend.com/license/new-bsd * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to license@zend.com so we can send you a copy immediately. * * @category Zend * @package Zend_Filter * @copyright Copyright (c) 2005-2008 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License * @version $Id: BaseName.php 8064 2008-02-16 10:58:39Z thomas $ */ /** * @see Zend_Filter_Interface */ require_once 'Zend/Filter/Interface.php'; /** * @category Zend * @package Zend_Filter * @copyright Copyright (c) 2005-2008 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License */ class Zend_Filter_BaseName implements Zend_Filter_Interface { /** * Defined by Zend_Filter_Interface * * Returns basename($value) * * @param string $value * @return string */ public function filter($value) { return basename((string) $value); } }
ankuradhey/dealtrip
library/Zend/Filter/BaseName.php
PHP
apache-2.0
1,408
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ tests for catalog module """ import os import fabric.api from fabric.operations import _AttributeString from mock import patch from prestoadmin import catalog from prestoadmin.util import constants from prestoadmin.util.exception import ConfigurationError, \ ConfigFileNotFoundError from prestoadmin.standalone.config import PRESTO_STANDALONE_USER_GROUP from prestoadmin.util.local_config_util import get_catalog_directory from tests.unit.base_unit_case import BaseUnitCase class TestCatalog(BaseUnitCase): def setUp(self): super(TestCatalog, self).setUp(capture_output=True) @patch('prestoadmin.catalog.os.path.isfile') def test_add_not_exist(self, isfile_mock): isfile_mock.return_value = False self.assertRaisesRegexp(ConfigurationError, 'Configuration for catalog dummy not found', catalog.add, 'dummy') @patch('prestoadmin.catalog.validate') @patch('prestoadmin.catalog.deploy_files') @patch('prestoadmin.catalog.os.path.isfile') def test_add_exists(self, isfile_mock, deploy_mock, validate_mock): isfile_mock.return_value = True catalog.add('tpch') filenames = ['tpch.properties'] deploy_mock.assert_called_with(filenames, get_catalog_directory(), constants.REMOTE_CATALOG_DIR, PRESTO_STANDALONE_USER_GROUP) validate_mock.assert_called_with(filenames) @patch('prestoadmin.catalog.deploy_files') @patch('prestoadmin.catalog.os.path.isdir') @patch('prestoadmin.catalog.os.listdir') @patch('prestoadmin.catalog.validate') def test_add_all(self, mock_validate, listdir_mock, isdir_mock, deploy_mock): catalogs = ['tpch.properties', 'another.properties'] listdir_mock.return_value = catalogs catalog.add() deploy_mock.assert_called_with(catalogs, get_catalog_directory(), constants.REMOTE_CATALOG_DIR, PRESTO_STANDALONE_USER_GROUP) @patch('prestoadmin.catalog.deploy_files') @patch('prestoadmin.catalog.os.path.isdir') def test_add_all_fails_if_dir_not_there(self, isdir_mock, deploy_mock): isdir_mock.return_value = False self.assertRaisesRegexp(ConfigFileNotFoundError, r'Cannot add catalogs because directory .+' r' does not exist', catalog.add) self.assertFalse(deploy_mock.called) @patch('prestoadmin.catalog.sudo') @patch('prestoadmin.catalog.os.path.exists') @patch('prestoadmin.catalog.os.remove') def test_remove(self, local_rm_mock, exists_mock, sudo_mock): script = ('if [ -f /etc/presto/catalog/tpch.properties ] ; ' 'then rm /etc/presto/catalog/tpch.properties ; ' 'else echo "Could not remove catalog \'tpch\'. ' 'No such file \'/etc/presto/catalog/tpch.properties\'"; fi') exists_mock.return_value = True fabric.api.env.host = 'localhost' catalog.remove('tpch') sudo_mock.assert_called_with(script) local_rm_mock.assert_called_with(get_catalog_directory() + '/tpch.properties') @patch('prestoadmin.catalog.sudo') @patch('prestoadmin.catalog.os.path.exists') def test_remove_failure(self, exists_mock, sudo_mock): exists_mock.return_value = False fabric.api.env.host = 'localhost' out = _AttributeString() out.succeeded = False sudo_mock.return_value = out self.assertRaisesRegexp(SystemExit, '\\[localhost\\] Failed to remove catalog tpch.', catalog.remove, 'tpch') @patch('prestoadmin.catalog.sudo') @patch('prestoadmin.catalog.os.path.exists') def test_remove_no_such_file(self, exists_mock, sudo_mock): exists_mock.return_value = False fabric.api.env.host = 'localhost' error_msg = ('Could not remove catalog tpch: No such file ' + os.path.join(get_catalog_directory(), 'tpch.properties')) out = _AttributeString(error_msg) out.succeeded = True sudo_mock.return_value = out self.assertRaisesRegexp(SystemExit, '\\[localhost\\] %s' % error_msg, catalog.remove, 'tpch') @patch('prestoadmin.catalog.os.listdir') @patch('prestoadmin.catalog.os.path.isdir') def test_warning_if_connector_dir_empty(self, isdir_mock, listdir_mock): isdir_mock.return_value = True listdir_mock.return_value = [] catalog.add() self.assertEqual('\nWarning: Directory %s is empty. No catalogs will' ' be deployed\n\n' % get_catalog_directory(), self.test_stderr.getvalue()) @patch('prestoadmin.catalog.os.listdir') @patch('prestoadmin.catalog.os.path.isdir') def test_add_permission_denied(self, isdir_mock, listdir_mock): isdir_mock.return_value = True error_msg = ('Permission denied') listdir_mock.side_effect = OSError(13, error_msg) fabric.api.env.host = 'localhost' self.assertRaisesRegexp(SystemExit, '\[localhost\] %s' % error_msg, catalog.add) @patch('prestoadmin.catalog.os.remove') @patch('prestoadmin.catalog.remove_file') def test_remove_os_error(self, remove_file_mock, remove_mock): fabric.api.env.host = 'localhost' error = OSError(13, 'Permission denied') remove_mock.side_effect = error self.assertRaisesRegexp(OSError, 'Permission denied', catalog.remove, 'tpch') @patch('prestoadmin.catalog.secure_create_directory') @patch('prestoadmin.util.fabricapi.put') def test_deploy_files(self, put_mock, create_dir_mock): local_dir = '/my/local/dir' remote_dir = '/my/remote/dir' catalog.deploy_files(['a', 'b'], local_dir, remote_dir, PRESTO_STANDALONE_USER_GROUP) create_dir_mock.assert_called_with(remote_dir, PRESTO_STANDALONE_USER_GROUP) put_mock.assert_any_call('/my/local/dir/a', remote_dir, use_sudo=True, mode=0600) put_mock.assert_any_call('/my/local/dir/b', remote_dir, use_sudo=True, mode=0600) @patch('prestoadmin.catalog.os.path.isfile') @patch("__builtin__.open") def test_validate(self, open_mock, is_file_mock): is_file_mock.return_value = True file_obj = open_mock.return_value.__enter__.return_value file_obj.read.return_value = 'connector.noname=example' self.assertRaisesRegexp(ConfigurationError, 'Catalog configuration example.properties ' 'does not contain connector.name', catalog.add, 'example') @patch('prestoadmin.catalog.os.path.isfile') def test_validate_fail(self, is_file_mock): is_file_mock.return_value = True self.assertRaisesRegexp( SystemExit, 'Error validating ' + os.path.join(get_catalog_directory(), 'example.properties') + '\n\n' 'Underlying exception:\n No such file or directory', catalog.add, 'example') @patch('prestoadmin.catalog.get') @patch('prestoadmin.catalog.files.exists') @patch('prestoadmin.catalog.ensure_directory_exists') @patch('prestoadmin.catalog.os.path.exists') def test_gather_connectors(self, path_exists, ensure_dir_exists, files_exists, get_mock): fabric.api.env.host = 'any_host' path_exists.return_value = False files_exists.return_value = True catalog.gather_catalogs('local_config_dir') get_mock.assert_called_once_with( constants.REMOTE_CATALOG_DIR, 'local_config_dir/any_host/catalog', use_sudo=True) # if remote catalog dir does not exist get_mock.reset_mock() files_exists.return_value = False results = catalog.gather_catalogs('local_config_dir') self.assertEqual([], results) self.assertFalse(get_mock.called)
prestodb/presto-admin
tests/unit/test_catalog.py
Python
apache-2.0
9,157
$(document).ready(function(){ $("#inc_tab #tb1").removeClass(); $("#inc_tab #tb4").addClass("active"); $("#user_name").blur(function(){ var user_name = $.trim($(this).val()); $(this).val(user_name); if (user_name.length==0){ $(this).parent().find("#user_name_null_warn").show(); $(this).parent().find("#user_name_exist_warn").hide(); return; } $(this).parent().find("#user_name_null_warn").hide(); var user_id = $(this).parent().find("#user_id").val(); var obj = $(this).parent().find("#user_name_exist_warn"); $.post(app.global.variable.base_path +"user/name/verify", {user_id:user_id, user_name:user_name}, function(data) { if(data.toString().length > 0){ obj.show(); }else{ obj.hide(); } }) }) $('#user_save_cancel').click(function(){ window.location.href=app.global.variable.base_path +'user/list'; }) selectRoleChange(); }) function selectRoleChange(){ var obj = $("#select_role_id"); var role_id_obj = obj.parent().find("#role_id"); $("#role_authority_"+role_id_obj.val()).hide(); $("#role_authority_"+obj.val()).show(); role_id_obj.val(obj.val()); } function user_sava_check(){ var obj = $("#user_editor_form"); var valid = true; obj.find(".functionWarn").each(function(){ if($(this).is(":visible")){ valid = false; } }) // 用户名 var user_name = obj.find("#user_name").val(); if(isSpace(user_name)){ obj.find("#user_name_null_warn").show(); valid = false; }else{ obj.find("#user_name_null_warn").hide(); } return valid; }
wxiwei/manage
src/main/webapp/WEB-INF/js/user/userEditor.js
JavaScript
apache-2.0
1,536
package sample.multiversion; public interface Core { String getVersion(); String getDependencyVersion(); }
omacarena/only-short-poc
java.multiversion/v1/src/main/sample/multiversion/Core.java
Java
apache-2.0
117
package org.example; import org.camunda.bpm.spring.boot.starter.annotation.EnableProcessApplication; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; @SpringBootApplication @EnableProcessApplication("dynamic-tenant-designation") public class CamundaApplication { public static void main(String... args) { SpringApplication.run(CamundaApplication.class, args); } }
camunda/camunda-consulting
snippets/dynamic-tenant-designation/src/main/java/org/example/CamundaApplication.java
Java
apache-2.0
445
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "flag" "fmt" "io/ioutil" "os" "strings" ) var ( yamlPaths = flag.String("yaml", "", "comma-separated list of input YAML files") printText = flag.Bool("print-text", false, "print generated proto in text format to stdout") outputPath = flag.String("output", "", "output path to save generated protobuf data") ) func errExit(format string, a ...interface{}) { fmt.Fprintf(os.Stderr, format, a...) os.Exit(1) } func main() { flag.Parse() yamlFiles := strings.Split(*yamlPaths, ",") if len(yamlFiles) == 0 || yamlFiles[0] == "" { errExit("Must specify one or more YAML files with --yaml\n") } if !*printText && *outputPath == "" { errExit("Must set --print-text or --output\n") } if *printText && *outputPath != "" { errExit("Cannot set both --print-text and --output\n") } var c Config for _, file := range yamlFiles { b, err := ioutil.ReadFile(file) if err != nil { errExit("IO Error : Cannot Read File %s : %v\n", file, err) } if err = c.Update(b); err != nil { errExit("Error parsing file %s : %v\n", file, err) } } if *printText { if err := c.MarshalText(os.Stdout); err != nil { errExit("err printing proto: %v", err) } } else { b, err := c.MarshalBytes() if err != nil { errExit("err encoding proto: %v", err) } if err = ioutil.WriteFile(*outputPath, b, 0644); err != nil { errExit("IO Error : Cannot Write File %v\n", outputPath) } } }
krousey/test-infra
testgrid/cmd/config/main.go
GO
apache-2.0
2,010
package org.galaxy.myhttp; import org.junit.Test; import static org.junit.Assert.*; /** * To work on unit tests, switch the Test Artifact in the Build Variants view. */ public class ExampleUnitTest { @Test public void addition_isCorrect() throws Exception { assertEquals(4, 2 + 2); } }
galaxy-captain/MyHttp
app/src/test/java/org/galaxy/myhttp/ExampleUnitTest.java
Java
apache-2.0
310
require_relative '../netapp_cmode' Puppet::Type.type(:netapp_lun).provide(:cmode, :parent => Puppet::Provider::NetappCmode) do @doc = "Manage Netapp Lun creation, modification and deletion. [Family: vserver]" confine :feature => :posix defaultfor :feature => :posix netapp_commands :lunlist => {:api => 'lun-get-iter', :iter => true, :result_element => 'attributes-list'} netapp_commands :luncreate => 'lun-create-by-size' netapp_commands :lundestroy => 'lun-destroy' netapp_commands :lunresize => 'lun-resize' netapp_commands :lunonline => 'lun-online' netapp_commands :lunoffline => 'lun-offline' mk_resource_methods def self.instances Puppet.debug("Puppet::Provider::Netapp_lun.cmode: Got to self.instances.") luns = [] #Get a list of all Lun's results = lunlist() || [] # Itterate through the results results.each do |lun| lun_path = lun.child_get_string('path') Puppet.debug("Puppet::Provider::Netapp_lun.cmode: Processing lun #{lun_path}.") # Construct initial hash for lun lun_hash = { :name => lun_path, :ensure => :present } # Grab additional elements # Lun state - Need to map true/false to online/offline lun_state = lun.child_get_string('online') if lun_state == 'true' lun_hash[:state] = 'online' else lun_hash[:state] = 'offline' end # Get size lun_hash[:size] = lun.child_get_string('size') # Create the instance and add to luns array Puppet.debug("Puppet::Provider::Netapp_lun.cmode: Creating instance for #{lun_path}\n Contents = #{lun_hash.inspect}.") luns << new(lun_hash) end # Return the final luns array Puppet.debug("Puppet::Provider::Netapp_lun.cmode: Returning luns array.") luns end def self.prefetch(resources) Puppet.debug("Puppet::Provider::Netapp_lun.cmode: Got to self.prefetch.") # Itterate instances and match provider where relevant. instances.each do |prov| Puppet.debug("Prov.path = #{resources[prov.name]}. ") if resource = resources[prov.name] resource.provider = prov end end end def flush Puppet.debug("Puppet::Provider::Netapp_lun.cmode: flushing Netapp Lun #{@resource[:path]}.") # Are we updating or destroying? Puppet.debug("Puppet::Provider::Netapp_lun.cmode: required resource state = #{@property_hash[:ensure]}") if @property_hash[:ensure] == :absent Puppet.debug("Puppet::Provider::Netapp_lun.cmode: Ensure is absent. Destroying...") # Deleting the lun lundestroy('path', @resource[:path]) Puppet.debug("Puppet::Provider::Netapp_lun.cmode: Lun #{@resource[:path]} has been destroyed successfully. ") return true end end # Set lun size def size=(value) Puppet.debug("Puppet::Provider::Netapp_lun.cmode size=: Setting lun size for #{@resource[:path]} to #{@resource[:size]}.") force if @resource[:force] == nil force = false else force = @resource[:force] end # Resize the volume result = lunresize('force', force, 'path', @resource[:path], 'size', @resource[:size]) if result.results_status() != "failed" Puppet.debug("Puppet::Provider::Netapp_lun.cmode size=: Lun has been resized.") return true end end # Set lun state def state=(value) Puppet.debug("Puppet::Provider::Netapp_lun.cmode state=: Setting lun state for #{@resource[:path]} to #{@resource[:state]}.") case @resource[:state] when :online Puppet.debug("Puppet::Provider::Netapp_lun.cmode state=: Onlineing lun.") result = lunonline('path', @resource[:path]) Puppet.debug("Puppet::Provider::Netapp_lun.cmode state=: Lun has been onlined.") return true when :offline Puppet.debug("Puppet::Provider::Netapp_lun.cmode state=: Offlining lun.") result = lunoffline('path', @resource[:path]) Puppet.debug("Puppet::Provider::Netapp_lun.cmode state=: Lun has been offlined.") return true end end def create Puppet.debug("Puppet::Provider::Netapp_lun.cmode: creating Netapp Lun #{@resource[:path]}.") # Lun create args luncreate_args = [] luncreate_args << 'path' << @resource[:path] luncreate_args << 'size' << @resource[:size] luncreate_args << 'class' << @resource[:lunclass] luncreate_args << 'ostype' << @resource[:ostype] luncreate_args << 'space-reservation-enabled' << @resource[:spaceresenabled] # Optional fields luncreate_args << 'prefix-size' << @resource[:prefixsize] unless @resource[:prefixsize].nil? luncreate_args << 'qos-policy-group' << @resource[:qospolicygroup] unless @resource[:qospolicygroup].nil? # Create the lun result = luncreate(*luncreate_args) # Lun created successfully Puppet.debug("Puppet::Provider::Netapp_lun.cmode: Lun #{@resource[:path]} created successfully.") return true end def destroy Puppet.debug("Puppet::Provider::Netapp_lun.cmode: destroying Netapp Lun #{@resource[:path]}.") @property_hash[:ensure] = :absent end def exists? Puppet.debug("Puppet::Provider::Netapp_lun.cmode: checking existance of Netapp Lun #{@resource[:path]}.") @property_hash[:ensure] == :present end end
puppetlabs/puppetlabs-netapp
lib/puppet/provider/netapp_lun/cmode.rb
Ruby
apache-2.0
5,287
// Code generated by go-swagger; DO NOT EDIT. package models // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( strfmt "github.com/go-openapi/strfmt" "github.com/go-openapi/errors" "github.com/go-openapi/swag" "github.com/go-openapi/validate" ) // SendPhotoLinkBody send photo link body // swagger:model SendPhotoLinkBody type SendPhotoLinkBody struct { // caption Caption string `json:"caption,omitempty"` // chat id // Required: true ChatID interface{} `json:"chat_id"` // disable notification DisableNotification bool `json:"disable_notification,omitempty"` // photo // Required: true Photo *string `json:"photo"` // reply markup ReplyMarkup interface{} `json:"reply_markup,omitempty"` // reply to message id ReplyToMessageID int64 `json:"reply_to_message_id,omitempty"` } // Validate validates this send photo link body func (m *SendPhotoLinkBody) Validate(formats strfmt.Registry) error { var res []error if err := m.validateChatID(formats); err != nil { // prop res = append(res, err) } if err := m.validatePhoto(formats); err != nil { // prop res = append(res, err) } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } func (m *SendPhotoLinkBody) validateChatID(formats strfmt.Registry) error { return nil } func (m *SendPhotoLinkBody) validatePhoto(formats strfmt.Registry) error { if err := validate.Required("photo", "body", m.Photo); err != nil { return err } return nil } // MarshalBinary interface implementation func (m *SendPhotoLinkBody) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } return swag.WriteJSON(m) } // UnmarshalBinary interface implementation func (m *SendPhotoLinkBody) UnmarshalBinary(b []byte) error { var res SendPhotoLinkBody if err := swag.ReadJSON(b, &res); err != nil { return err } *m = res return nil }
olebedev/go-tgbot
models/send_photo_link_body.go
GO
apache-2.0
1,957
/** * Copyright (C) 2014-2015 LinkedIn Corp. (pinot-core@linkedin.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.linkedin.pinot.core.startree; import java.io.BufferedOutputStream; import java.io.DataOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; import org.joda.time.DateTime; import org.json.JSONObject; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Objects; import com.google.common.collect.BiMap; import com.google.common.collect.HashBiMap; import com.linkedin.pinot.common.data.DimensionFieldSpec; import com.linkedin.pinot.common.data.MetricFieldSpec; import com.linkedin.pinot.common.data.FieldSpec.DataType; import com.linkedin.pinot.common.data.Schema; import com.linkedin.pinot.common.utils.Pairs.IntPair; import com.linkedin.pinot.core.data.GenericRow; import com.linkedin.pinot.core.segment.creator.impl.V1Constants; /** * Uses file to build the star tree. Each row is divided into dimension and metrics. Time is added to dimension list. * We use the split order to build the tree. In most cases, split order will be ranked depending on the cardinality (descending order). * Time column will be excluded or last entry in split order irrespective of its cardinality * This is a recursive algorithm where we branch on one dimension at every level. * * <b>Psuedo algo</b> * <code> * * build(){ * let table(1,N) consists of N input rows * table.sort(1,N) //sort the table on all dimensions, according to split order * constructTree(table, 0, N, 0); * } * constructTree(table,start,end, level){ * splitDimensionName = dimensionsSplitOrder[level] * groupByResult<dimName, length> = table.groupBy(dimensionsSplitOrder[level]); //returns the number of rows for each value in splitDimension * int rangeStart = 0; * for each ( entry<dimName,length> groupByResult){ * if(entry.length > minThreshold){ * constructTree(table, rangeStart, rangeStart + entry.length, level +1); * } * rangeStart = rangeStart + entry.length; * updateStarTree() //add new child * } * * //create a star tree node * * aggregatedRows = table.uniqueAfterRemovingAttributeAndAggregateMetrics(start,end, splitDimensionName); * for(each row in aggregatedRows_ * table.add(row); * if(aggregateRows.size > minThreshold) { * table.sort(end, end + aggregatedRows.size); * constructStarTree(table, end, end + aggregatedRows.size, level +1); * } * } * </code> */ public class OffHeapStarTreeBuilder implements StarTreeBuilder { private static final Logger LOG = LoggerFactory.getLogger(OffHeapStarTreeBuilder.class); File dataFile; private DataOutputStream dataBuffer; int rawRecordCount = 0; int aggRecordCount = 0; private List<String> dimensionsSplitOrder; private Set<String> skipStarNodeCreationForDimensions; private Set<String> skipMaterializationForDimensions; private int maxLeafRecords; private StarTree starTree; private StarTreeIndexNode starTreeRootIndexNode; private int numDimensions; private int numMetrics; private List<String> dimensionNames; private List<String> metricNames; private String timeColumnName; private List<DataType> dimensionTypes; private List<DataType> metricTypes; private Map<String, Object> dimensionNameToStarValueMap; private HashBiMap<String, Integer> dimensionNameToIndexMap; private Map<String, Integer> metricNameToIndexMap; private int dimensionSizeBytes; private int metricSizeBytes; private File outDir; private Map<String, HashBiMap<Object, Integer>> dictionaryMap; boolean debugMode = false; private int[] sortOrder; private int skipMaterializationCardinalityThreshold; public void init(StarTreeBuilderConfig builderConfig) throws Exception { Schema schema = builderConfig.schema; timeColumnName = schema.getTimeColumnName(); this.dimensionsSplitOrder = builderConfig.dimensionsSplitOrder; skipStarNodeCreationForDimensions = builderConfig.getSkipStarNodeCreationForDimensions(); skipMaterializationForDimensions = builderConfig.getSkipMaterializationForDimensions(); skipMaterializationCardinalityThreshold = builderConfig.getSkipMaterializationCardinalityThreshold(); this.maxLeafRecords = builderConfig.maxLeafRecords; this.outDir = builderConfig.getOutDir(); if (outDir == null) { outDir = new File(System.getProperty("java.io.tmpdir"), V1Constants.STAR_TREE_INDEX_DIR + "_" + DateTime.now()); } LOG.debug("Index output directory:{}", outDir); dimensionTypes = new ArrayList<>(); dimensionNames = new ArrayList<>(); dimensionNameToIndexMap = HashBiMap.create(); dimensionNameToStarValueMap = new HashMap<>(); dictionaryMap = new HashMap<>(); //READ DIMENSIONS COLUMNS List<DimensionFieldSpec> dimensionFieldSpecs = schema.getDimensionFieldSpecs(); for (int index = 0; index < dimensionFieldSpecs.size(); index++) { DimensionFieldSpec spec = dimensionFieldSpecs.get(index); String dimensionName = spec.getName(); dimensionNames.add(dimensionName); dimensionNameToIndexMap.put(dimensionName, index); Object starValue; starValue = getAllStarValue(spec); dimensionNameToStarValueMap.put(dimensionName, starValue); dimensionTypes.add(spec.getDataType()); HashBiMap<Object, Integer> dictionary = HashBiMap.create(); dictionaryMap.put(dimensionName, dictionary); } //treat time column as just another dimension, only difference is that we will never split on this dimension unless explicitly specified in split order if (timeColumnName != null) { dimensionNames.add(timeColumnName); dimensionTypes.add(schema.getTimeFieldSpec().getDataType()); int index = dimensionNameToIndexMap.size(); dimensionNameToIndexMap.put(timeColumnName, index); HashBiMap<Object, Integer> dictionary = HashBiMap.create(); dictionaryMap.put(schema.getTimeColumnName(), dictionary); } dimensionSizeBytes = dimensionNames.size() * Integer.SIZE / 8; this.numDimensions = dimensionNames.size(); //READ METRIC COLUMNS this.metricTypes = new ArrayList<>(); this.metricNames = new ArrayList<>(); this.metricNameToIndexMap = new HashMap<>(); this.metricSizeBytes = 0; List<MetricFieldSpec> metricFieldSpecs = schema.getMetricFieldSpecs(); for (int index = 0; index < metricFieldSpecs.size(); index++) { MetricFieldSpec spec = metricFieldSpecs.get(index); String metricName = spec.getName(); metricNames.add(metricName); metricNameToIndexMap.put(metricName, index); DataType dataType = spec.getDataType(); metricTypes.add(dataType); metricSizeBytes += dataType.size(); } this.numMetrics = metricNames.size(); builderConfig.getOutDir().mkdirs(); dataFile = new File(outDir, "star-tree.buf"); dataBuffer = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(dataFile))); //INITIALIZE THE ROOT NODE this.starTreeRootIndexNode = new StarTreeIndexNode(); this.starTreeRootIndexNode.setDimensionName(StarTreeIndexNode.all()); this.starTreeRootIndexNode.setDimensionValue(StarTreeIndexNode.all()); this.starTreeRootIndexNode.setLevel(0); LOG.debug("dimensionNames:{}", dimensionNames); LOG.debug("metricNames:{}", metricNames); } /** * Validate the split order by removing any dimensions that may be part of the skip materialization list. * @param dimensionsSplitOrder * @param skipMaterializationForDimensions * @return */ private List<String> sanitizeSplitOrder(List<String> dimensionsSplitOrder, Set<String> skipMaterializationForDimensions) { List<String> validatedSplitOrder = new ArrayList<String>(); for (String dimension : dimensionsSplitOrder) { if (skipMaterializationForDimensions == null || !skipMaterializationForDimensions.contains(dimension)) { LOG.info("Adding dimension {} to split order", dimension); validatedSplitOrder.add(dimension); } else { LOG.info( "Dimension {} cannot be part of 'dimensionSplitOrder' and 'skipMaterializationForDimensions', removing it from split order", dimension); } } return validatedSplitOrder; } private Object getAllStarValue(DimensionFieldSpec spec) throws Exception { switch (spec.getDataType()) { case STRING: return "ALL"; case BOOLEAN: case BYTE: case CHAR: case DOUBLE: case FLOAT: case INT: case LONG: return spec.getDefaultNullValue(); case OBJECT: case SHORT: case DOUBLE_ARRAY: case CHAR_ARRAY: case FLOAT_ARRAY: case INT_ARRAY: case LONG_ARRAY: case SHORT_ARRAY: case STRING_ARRAY: case BYTE_ARRAY: default: throw new Exception("Unsupported dimension data type" + spec); } } public GenericRow toGenericRow(DimensionBuffer dimensionKey, MetricBuffer metricsHolder) { GenericRow row = new GenericRow(); Map<String, Object> map = new HashMap<>(); for (int i = 0; i < dimensionNames.size(); i++) { String dimName = dimensionNames.get(i); BiMap<Integer, Object> inverseDictionary = dictionaryMap.get(dimName).inverse(); Object dimValue = inverseDictionary.get(dimensionKey.getDimension(i)); if (dimValue == null) { dimValue = dimensionNameToStarValueMap.get(dimName); } map.put(dimName, dimValue); } for (int i = 0; i < numMetrics; i++) { String metName = metricNames.get(i); map.put(metName, metricsHolder.get(i)); } row.init(map); return row; } public void append(GenericRow row) throws Exception { DimensionBuffer dimension = new DimensionBuffer(numDimensions); for (int i = 0; i < dimensionNames.size(); i++) { String dimName = dimensionNames.get(i); Map<Object, Integer> dictionary = dictionaryMap.get(dimName); Object dimValue = row.getValue(dimName); if (dimValue == null) { //TODO: Have another default value to represent STAR. Using default value to represent STAR as of now. //It does not matter during query execution, since we know that values is STAR from the star tree dimValue = dimensionNameToStarValueMap.get(dimName); } if (!dictionary.containsKey(dimValue)) { dictionary.put(dimValue, dictionary.size()); } dimension.setDimension(i, dictionary.get(dimValue)); } Number[] numbers = new Number[numMetrics]; for (int i = 0; i < numMetrics; i++) { String metName = metricNames.get(i); numbers[i] = (Number) row.getValue(metName); } MetricBuffer metrics = new MetricBuffer(numbers); append(dimension, metrics); } public void append(DimensionBuffer dimension, MetricBuffer metrics) throws Exception { appendToRawBuffer(dimension, metrics); } private void appendToRawBuffer(DimensionBuffer dimension, MetricBuffer metrics) throws IOException { appendToBuffer(dataBuffer, dimension, metrics); rawRecordCount++; } private void appendToAggBuffer(DimensionBuffer dimension, MetricBuffer metrics) throws IOException { appendToBuffer(dataBuffer, dimension, metrics); aggRecordCount++; } private void appendToBuffer(DataOutputStream dos, DimensionBuffer dimensions, MetricBuffer metricHolder) throws IOException { for (int i = 0; i < numDimensions; i++) { dos.writeInt(dimensions.getDimension(i)); } dos.write(metricHolder.toBytes(metricSizeBytes, metricTypes)); } public void build() throws Exception { if (skipMaterializationForDimensions == null || skipMaterializationForDimensions.isEmpty()) { skipMaterializationForDimensions = computeDefaultDimensionsToSkipMaterialization(); } if (dimensionsSplitOrder == null || dimensionsSplitOrder.isEmpty()) { dimensionsSplitOrder = computeDefaultSplitOrder(); } // Remove any dimensions from split order that would be not be materialized. dimensionsSplitOrder = sanitizeSplitOrder(dimensionsSplitOrder, skipMaterializationForDimensions); LOG.debug("Split order:{}", dimensionsSplitOrder); long start = System.currentTimeMillis(); dataBuffer.flush(); sort(dataFile, 0, rawRecordCount); constructStarTree(starTreeRootIndexNode, 0, rawRecordCount, 0, dataFile); long end = System.currentTimeMillis(); LOG.debug("Took {} ms to build star tree index. Original records:{} Materialized record:{}", (end - start), rawRecordCount, aggRecordCount); starTree = new StarTree(starTreeRootIndexNode, dimensionNameToIndexMap); File treeBinary = new File(outDir, "star-tree.bin"); LOG.debug("Saving tree binary at: {} ", treeBinary); starTree.writeTree(new BufferedOutputStream(new FileOutputStream(treeBinary))); printTree(starTreeRootIndexNode, 0); LOG.debug("Finished build tree. out dir: {} ", outDir); dataBuffer.close(); } private void printTree(StarTreeIndexNode node, int level) { for (int i = 0; i < level; i++) { LOG.debug(" "); } BiMap<Integer, String> inverse = dimensionNameToIndexMap.inverse(); String dimName = "ALL"; Object dimValue = "ALL"; if (node.getDimensionName() != StarTreeIndexNode.all()) { dimName = inverse.get(node.getDimensionName()); } if (node.getDimensionValue() != StarTreeIndexNode.all()) { dimValue = dictionaryMap.get(dimName).inverse().get(node.getDimensionValue()); } String formattedOutput = Objects.toStringHelper(node).add("nodeId", node.getNodeId()).add("level", level).add("dimensionName", dimName) .add("dimensionValue", dimValue).add("childDimensionName", inverse.get(node.getChildDimensionName())) .add("childCount", node.getChildren() == null ? 0 : node.getChildren().size()) .add("startDocumentId", node.getStartDocumentId()).add("endDocumentId", node.getEndDocumentId()) .add("documentCount", (node.getEndDocumentId() - node.getStartDocumentId())).toString(); LOG.debug(formattedOutput); if (!node.isLeaf()) { for (StarTreeIndexNode child : node.getChildren().values()) { printTree(child, level + 1); } } } private List<String> computeDefaultSplitOrder() { ArrayList<String> defaultSplitOrder = new ArrayList<>(); //include only the dimensions not time column. Also, assumes that skipMaterializationForDimensions is built. for (String dimensionName : dimensionNames) { if (skipMaterializationForDimensions != null && !skipMaterializationForDimensions.contains(dimensionName)) { defaultSplitOrder.add(dimensionName); } } if (timeColumnName != null) { defaultSplitOrder.remove(timeColumnName); } Collections.sort(defaultSplitOrder, new Comparator<String>() { @Override public int compare(String o1, String o2) { return dictionaryMap.get(o2).size() - dictionaryMap.get(o1).size(); //descending } }); return defaultSplitOrder; } private Set<String> computeDefaultDimensionsToSkipMaterialization() { Set<String> skipDimensions = new HashSet<String>(); for (String dimensionName : dimensionNames) { if (dictionaryMap.get(dimensionName).size() > skipMaterializationCardinalityThreshold) { skipDimensions.add(dimensionName); } } return skipDimensions; } /* * Sorts the file on all dimensions */ private void sort(File file, int startDocId, int endDocId) throws IOException { if (debugMode) { LOG.info("BEFORE SORTING"); printFile(file, startDocId, endDocId); } StarTreeDataTable dataSorter = new StarTreeDataTable(file, dimensionSizeBytes, metricSizeBytes, getSortOrder()); dataSorter.sort(startDocId, endDocId, 0, dimensionSizeBytes); if (debugMode) { LOG.info("AFTER SORTING"); printFile(file, startDocId, endDocId); } } private int[] getSortOrder() { if (sortOrder == null) { sortOrder = new int[dimensionNames.size()]; for (int i = 0; i < dimensionsSplitOrder.size(); i++) { sortOrder[i] = dimensionNameToIndexMap.get(dimensionsSplitOrder.get(i)); } //add remaining dimensions that were not part of dimensionsSplitOrder int counter = 0; for (String dimName : dimensionNames) { if (!dimensionsSplitOrder.contains(dimName)) { sortOrder[dimensionsSplitOrder.size() + counter] = dimensionNameToIndexMap.get(dimName); counter = counter + 1; } } } return sortOrder; } private void printFile(File file, int startDocId, int endDocId) throws IOException { LOG.info("Contents of file:{} from:{} to:{}", file.getName(), startDocId, endDocId); StarTreeDataTable dataSorter = new StarTreeDataTable(file, dimensionSizeBytes, metricSizeBytes, getSortOrder()); Iterator<Pair<byte[], byte[]>> iterator = dataSorter.iterator(startDocId, endDocId); int numRecordsToPrint = 100; int counter = 0; while (iterator.hasNext()) { Pair<byte[], byte[]> next = iterator.next(); LOG.info("{}, {}", DimensionBuffer.fromBytes(next.getLeft()), MetricBuffer.fromBytes(next.getRight(), metricTypes)); if (counter++ == numRecordsToPrint) { break; } } } private int constructStarTree(StarTreeIndexNode node, int startDocId, int endDocId, int level, File file) throws Exception { //node.setStartDocumentId(startDocId); int docsAdded = 0; if (level == dimensionsSplitOrder.size() - 1) { return 0; } String splitDimensionName = dimensionsSplitOrder.get(level); Integer splitDimensionId = dimensionNameToIndexMap.get(splitDimensionName); LOG.debug("Building tree at level:{} using file:{} from startDoc:{} endDocId:{} splitting on dimension:{}", level, file.getName(), startDocId, endDocId, splitDimensionName); Map<Integer, IntPair> sortGroupBy = groupBy(startDocId, endDocId, splitDimensionId, file); LOG.debug("Group stats:{}", sortGroupBy); node.setChildDimensionName(splitDimensionId); node.setChildren(new HashMap<Integer, StarTreeIndexNode>()); for (int childDimensionValue : sortGroupBy.keySet()) { StarTreeIndexNode child = new StarTreeIndexNode(); child.setDimensionName(splitDimensionId); child.setDimensionValue(childDimensionValue); child.setParent(node); child.setLevel(node.getLevel() + 1); // n.b. We will number the nodes later using BFS after fully split // Add child to parent node.getChildren().put(childDimensionValue, child); int childDocs = 0; IntPair range = sortGroupBy.get(childDimensionValue); if (range.getRight() - range.getLeft() > maxLeafRecords) { childDocs = constructStarTree(child, range.getLeft(), range.getRight(), level + 1, file); docsAdded += childDocs; } // Either range <= maxLeafRecords, or we did not split further (last level). if (childDocs == 0) { child.setStartDocumentId(range.getLeft()); child.setEndDocumentId(range.getRight()); } } // Return if star node does not need to be created. if (skipStarNodeCreationForDimensions != null && skipStarNodeCreationForDimensions.contains(splitDimensionName)) { return docsAdded; } //create star node StarTreeIndexNode starChild = new StarTreeIndexNode(); starChild.setDimensionName(splitDimensionId); starChild.setDimensionValue(StarTreeIndexNode.all()); starChild.setParent(node); starChild.setLevel(node.getLevel() + 1); // n.b. We will number the nodes later using BFS after fully split // Add child to parent node.getChildren().put(StarTreeIndexNode.all(), starChild); Iterator<Pair<DimensionBuffer, MetricBuffer>> iterator = uniqueCombinations(startDocId, endDocId, file, splitDimensionId); int rowsAdded = 0; int startOffset = rawRecordCount + aggRecordCount; while (iterator.hasNext()) { Pair<DimensionBuffer, MetricBuffer> next = iterator.next(); DimensionBuffer dimension = next.getLeft(); MetricBuffer metricsHolder = next.getRight(); LOG.debug("Adding row:{}", dimension); appendToAggBuffer(dimension, metricsHolder); rowsAdded++; } docsAdded += rowsAdded; LOG.debug("Added {} additional records at level {}", rowsAdded, level); //flush dataBuffer.flush(); int childDocs = 0; if (rowsAdded >= maxLeafRecords) { sort(dataFile, startOffset, startOffset + rowsAdded); childDocs = constructStarTree(starChild, startOffset, startOffset + rowsAdded, level + 1, dataFile); docsAdded += childDocs; } // Either rowsAdded < maxLeafRecords, or we did not split further (last level). if (childDocs == 0) { starChild.setStartDocumentId(startOffset); starChild.setEndDocumentId(startOffset + rowsAdded); } //node.setEndDocumentId(endDocId + docsAdded); return docsAdded; } /** * Assumes the file is already sorted, returns the unique combinations after removing a specified dimension. * Aggregates the metrics for each unique combination, currently only sum is supported by default * @param startDocId * @param endDocId * @param file * @param splitDimensionId * @return * @throws Exception */ private Iterator<Pair<DimensionBuffer, MetricBuffer>> uniqueCombinations(int startDocId, int endDocId, File file, int splitDimensionId) throws Exception { StarTreeDataTable dataSorter = new StarTreeDataTable(file, dimensionSizeBytes, metricSizeBytes, getSortOrder()); Iterator<Pair<byte[], byte[]>> iterator1 = dataSorter.iterator(startDocId, endDocId); File tempFile = new File(outDir, file.getName() + "_" + startDocId + "_" + endDocId + ".unique.tmp"); DataOutputStream dos = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(tempFile))); while (iterator1.hasNext()) { Pair<byte[], byte[]> next = iterator1.next(); byte[] dimensionBuffer = next.getLeft(); byte[] metricBuffer = next.getRight(); DimensionBuffer dimensions = DimensionBuffer.fromBytes(dimensionBuffer); for (int i = 0; i < numDimensions; i++) { String dimensionName = dimensionNameToIndexMap.inverse().get(i); if (i == splitDimensionId || (skipMaterializationForDimensions != null && skipMaterializationForDimensions.contains(dimensionName))) { dos.writeInt(StarTreeIndexNode.all()); } else { dos.writeInt(dimensions.getDimension(i)); } } dos.write(metricBuffer); } dos.close(); dataSorter = new StarTreeDataTable(tempFile, dimensionSizeBytes, metricSizeBytes, getSortOrder()); dataSorter.sort(0, endDocId - startDocId); if (debugMode) { printFile(tempFile, 0, endDocId - startDocId); } final Iterator<Pair<byte[], byte[]>> iterator = dataSorter.iterator(0, endDocId - startDocId); return new Iterator<Pair<DimensionBuffer, MetricBuffer>>() { Pair<DimensionBuffer, MetricBuffer> prev = null; boolean done = false; @Override public void remove() { throw new UnsupportedOperationException(); } @Override public boolean hasNext() { return !done; } @Override public Pair<DimensionBuffer, MetricBuffer> next() { while (iterator.hasNext()) { Pair<byte[], byte[]> next = iterator.next(); byte[] dimBuffer = next.getLeft(); byte[] metricBuffer = next.getRight(); if (prev == null) { prev = Pair.of(DimensionBuffer.fromBytes(dimBuffer), MetricBuffer.fromBytes(metricBuffer, metricTypes)); } else { Pair<DimensionBuffer, MetricBuffer> current = Pair.of(DimensionBuffer.fromBytes(dimBuffer), MetricBuffer.fromBytes(metricBuffer, metricTypes)); if (!current.getLeft().equals(prev.getLeft())) { Pair<DimensionBuffer, MetricBuffer> ret = prev; prev = current; LOG.debug("Returning unique {}", prev.getLeft()); return ret; } else { prev.getRight().aggregate(current.getRight(), metricTypes); } } } done = true; LOG.debug("Returning unique {}", prev.getLeft()); return prev; } }; } /** * sorts the file from start to end on a dimension index * @param startDocId * @param endDocId * @param dimension * @param file * @return */ private Map<Integer, IntPair> groupBy(int startDocId, int endDocId, Integer dimension, File file) { StarTreeDataTable dataSorter = new StarTreeDataTable(file, dimensionSizeBytes, metricSizeBytes, getSortOrder()); return dataSorter.groupByIntColumnCount(startDocId, endDocId, dimension); } /** * Iterator to iterate over the records from startDocId to endDocId */ @Override public Iterator<GenericRow> iterator(final int startDocId, final int endDocId) throws Exception { StarTreeDataTable dataSorter = new StarTreeDataTable(dataFile, dimensionSizeBytes, metricSizeBytes, getSortOrder()); final Iterator<Pair<byte[], byte[]>> iterator = dataSorter.iterator(startDocId, endDocId); return new Iterator<GenericRow>() { @Override public boolean hasNext() { return iterator.hasNext(); } @Override public void remove() { throw new UnsupportedOperationException(); } @Override public GenericRow next() { Pair<byte[], byte[]> pair = iterator.next(); DimensionBuffer dimensionKey = DimensionBuffer.fromBytes(pair.getLeft()); MetricBuffer metricsHolder = MetricBuffer.fromBytes(pair.getRight(), metricTypes); return toGenericRow(dimensionKey, metricsHolder); } }; } public JSONObject getStarTreeAsJSON() throws Exception { JSONObject json = new JSONObject(); toJson(json, starTreeRootIndexNode, dictionaryMap); return json; } private void toJson(JSONObject json, StarTreeIndexNode node, Map<String, HashBiMap<Object, Integer>> dictionaryMap) throws Exception { String dimName = "ALL"; Object dimValue = "ALL"; if (node.getDimensionName() != StarTreeIndexNode.all()) { dimName = dimensionNames.get(node.getDimensionName()); } if (node.getDimensionValue() != StarTreeIndexNode.all()) { dimValue = dictionaryMap.get(dimName).inverse().get(node.getDimensionValue()); } json.put("title", dimName + ":" + dimValue); if (node.getChildren() != null) { JSONObject[] childJsons = new JSONObject[node.getChildren().size()]; int index = 0; for (Integer child : node.getChildren().keySet()) { StarTreeIndexNode childNode = node.getChildren().get(child); JSONObject childJson = new JSONObject(); toJson(childJson, childNode, dictionaryMap); childJsons[index++] = childJson; } json.put("nodes", childJsons); } } @Override public void cleanup() { if (outDir != null) { FileUtils.deleteQuietly(outDir); } } @Override public StarTree getTree() { return starTree; } @Override public int getTotalRawDocumentCount() { return rawRecordCount; } @Override public int getTotalAggregateDocumentCount() { return aggRecordCount; } @Override public int getMaxLeafRecords() { return maxLeafRecords; } @Override public List<String> getDimensionsSplitOrder() { return dimensionsSplitOrder; } public Map<String, HashBiMap<Object, Integer>> getDictionaryMap() { return dictionaryMap; } public HashBiMap<String, Integer> getDimensionNameToIndexMap() { return dimensionNameToIndexMap; } @Override public Set<String> getSkipMaterializationForDimensions() { return skipMaterializationForDimensions; } }
tkao1000/pinot
pinot-core/src/main/java/com/linkedin/pinot/core/startree/OffHeapStarTreeBuilder.java
Java
apache-2.0
28,902
/* * Copyright 2011 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.gradle.plugins.signing; import com.google.common.base.Function; import groovy.lang.Closure; import org.gradle.api.artifacts.PublishArtifact; import org.gradle.api.file.FileCollection; import org.gradle.api.internal.file.collections.ImmutableFileCollection; import org.gradle.plugins.signing.signatory.Signatory; import org.gradle.plugins.signing.type.SignatureType; import org.gradle.util.ConfigureUtil; import java.io.File; import java.util.ArrayList; import java.util.List; /** * A sign operation creates digital signatures for one or more files or {@link PublishArtifact publish artifacts}. * * <p>The external representation of the signature is specified by the {@link #getSignatureType() signature type property}, while the {@link #signatory} property specifies who is to sign. <p> A sign * operation manages one or more {@link Signature} objects. The {@code sign} methods are used to register things to generate signatures for. The {@link #execute()} method generates the signatures for * all of the registered items at that time. */ abstract public class SignOperation implements SignatureSpec { /** * The file representation of the signature(s). */ private SignatureType signatureType; /** * The signatory to the generated digital signatures. */ private Signatory signatory; /** * Whether or not it is required that this signature be generated. */ private boolean required; private final List<Signature> signatures = new ArrayList<Signature>(); public String getDisplayName() { return "SignOperation"; } @Override public String toString() { return getDisplayName(); } @Override public void setSignatureType(SignatureType signatureType) { this.signatureType = signatureType; } @Override public SignatureType getSignatureType() { return signatureType; } @Override public void setSignatory(Signatory signatory) { this.signatory = signatory; } @Override public Signatory getSignatory() { return signatory; } @Override public void setRequired(boolean required) { this.required = required; } @Override public boolean isRequired() { return required; } /** * Registers signatures for the given artifacts. * * @return this * @see Signature#Signature(File, SignatureSpec, Object...) */ public SignOperation sign(PublishArtifact... artifacts) { for (PublishArtifact artifact : artifacts) { signatures.add(new Signature(artifact, this)); } return this; } /** * Registers signatures for the given files. * * @return this * @see Signature#Signature(File, SignatureSpec, Object...) */ public SignOperation sign(File... files) { for (File file : files) { signatures.add(new Signature(file, this)); } return this; } /** * Registers signatures (with the given classifier) for the given files * * @return this * @see Signature#Signature(PublishArtifact, SignatureSpec, Object...) */ public SignOperation sign(String classifier, File... files) { for (File file : files) { signatures.add(new Signature(file, classifier, this)); } return this; } /** * Change the signature type for signature generation. */ public SignOperation signatureType(SignatureType type) { this.signatureType = type; return this; } /** * Change the signatory for signature generation. */ public SignOperation signatory(Signatory signatory) { this.signatory = signatory; return this; } /** * Executes the given closure against this object. */ public SignOperation configure(Closure closure) { ConfigureUtil.configureSelf(closure, this); return this; } /** * Generates actual signature files for all of the registered signatures. * * <p>The signatures are generated with the configuration they have at this time, which includes the signature type and signatory of this operation at this time. <p> This method can be called * multiple times, with the signatures being generated with their current configuration each time. * * @return this * @see Signature#generate() */ public SignOperation execute() { for (Signature signature : signatures) { signature.generate(); } return this; } /** * The registered signatures. */ public List<Signature> getSignatures() { return new ArrayList<Signature>(signatures); } /** * Returns the single registered signature. * * @return The signature. * @throws IllegalStateException if there is not exactly one registered signature. */ public Signature getSingleSignature() { final int size = signatures.size(); switch (size) { case 1: return signatures.get(0); case 0: throw new IllegalStateException("Expected operation to contain exactly one signature, however, it contains no signatures."); default: throw new IllegalStateException("Expected operation to contain exactly one signature, however, it contains " + String.valueOf(size) + " signatures."); } } /** * All of the files that will be signed by this operation. */ public FileCollection getFilesToSign() { return newSignatureFileCollection(new Function<Signature, File>() { @Override public File apply(Signature input) { return input.getToSign(); } }); } /** * All of the signature files that will be generated by this operation. */ public FileCollection getSignatureFiles() { return newSignatureFileCollection(new Function<Signature, File>() { @Override public File apply(Signature input) { return input.getFile(); } }); } private FileCollection newSignatureFileCollection(Function<Signature, File> getFile) { return ImmutableFileCollection.of(collectSignatureFiles(getFile)); } private ArrayList<File> collectSignatureFiles(Function<Signature, File> getFile) { ArrayList<File> files = new ArrayList<File>(signatures.size()); for (Signature signature : signatures) { File file = getFile.apply(signature); if (file != null) { files.add(file); } } return files; } }
robinverduijn/gradle
subprojects/signing/src/main/java/org/gradle/plugins/signing/SignOperation.java
Java
apache-2.0
7,371
/* * Copyright 2013-2021 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.cloudfoundry.client.v2.spaces; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import org.cloudfoundry.Nullable; import org.immutables.value.Value; import java.util.List; /** * The request payload for the Update a Space operation */ @JsonSerialize @Value.Immutable abstract class _UpdateSpaceRequest { /** * Allow SSH */ @JsonProperty("allow_ssh") @Nullable abstract Boolean getAllowSsh(); /** * The auditor ids */ @JsonProperty("auditor_guids") @Nullable abstract List<String> getAuditorIds(); /** * The developer ids */ @JsonProperty("developer_guids") @Nullable abstract List<String> getDeveloperIds(); /** * The domain ids */ @JsonProperty("domain_guids") @Nullable abstract List<String> getDomainIds(); /** * The manager ids */ @JsonProperty("manager_guids") @Nullable abstract List<String> getManagerIds(); /** * The name */ @JsonProperty("name") @Nullable abstract String getName(); /** * The organization id */ @JsonProperty("organization_guid") @Nullable abstract String getOrganizationId(); /** * The security group ids */ @JsonProperty("security_group_guids") @Nullable abstract List<String> getSecurityGroupIds(); /** * The space id */ @JsonIgnore abstract String getSpaceId(); }
cloudfoundry/cf-java-client
cloudfoundry-client/src/main/java/org/cloudfoundry/client/v2/spaces/_UpdateSpaceRequest.java
Java
apache-2.0
2,193
/* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package agent.lldb.manager.evt; import agent.lldb.lldb.DebugThreadInfo; /** * The event corresponding with SBThread.eBroadcastBitThreadResumed */ public class LldbThreadResumedEvent extends AbstractLldbEvent<DebugThreadInfo> { public LldbThreadResumedEvent(DebugThreadInfo info) { super(info); } }
NationalSecurityAgency/ghidra
Ghidra/Debug/Debugger-agent-lldb/src/main/java/agent/lldb/manager/evt/LldbThreadResumedEvent.java
Java
apache-2.0
893
/** * Copyright (C) 2013 * by 52 North Initiative for Geospatial Open Source Software GmbH * * Contact: Andreas Wytzisk * 52 North Initiative for Geospatial Open Source Software GmbH * Martin-Luther-King-Weg 24 * 48155 Muenster, Germany * info@52north.org * * This program is free software; you can redistribute and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. * * This program is distributed WITHOUT ANY WARRANTY; even without the implied * WARRANTY OF MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along with * this program (see gnu-gpl v2.txt). If not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA or * visit the Free Software Foundation web page, http://www.fsf.org. */ package org.n52.sos.binding.rest.resources; import org.n52.sos.binding.rest.requests.RestRequest; /** * @author <a href="mailto:e.h.juerrens@52north.org">Eike Hinderk J&uuml;rrens</a> * */ public class OptionsRestRequest implements RestRequest { private String resourceType; private boolean isGlobalResource; private boolean isResourceCollection; public OptionsRestRequest(String resourceType, boolean isGlobalResource, boolean isResourceCollection) { this.resourceType = resourceType; this.isGlobalResource = isGlobalResource; this.isResourceCollection = isResourceCollection; } public String getResourceType() { return resourceType; } public boolean isGlobalResource() { return isGlobalResource; } public boolean isResourceCollection() { return isResourceCollection; } }
sauloperez/sos
src/bindings/rest/code/src/main/java/org/n52/sos/binding/rest/resources/OptionsRestRequest.java
Java
apache-2.0
1,863
using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; namespace Watcher.Model { public class GreatThan : IPredicate { public string Quantifier { get { return "great than"; } } public bool IsSatisfiedBy(int a, int b) { return a > b; } } }
hotjk/ace
Watcher.Model/Predicate/GreatThan.cs
C#
apache-2.0
429
/* Copyright 2018 The OpenEBS Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha2 import ( ndmapis "github.com/openebs/maya/pkg/apis/openebs.io/ndm/v1alpha1" apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" bd "github.com/openebs/maya/pkg/blockdevice/v1alpha2" bdc "github.com/openebs/maya/pkg/blockdeviceclaim/v1alpha1" cspc "github.com/openebs/maya/pkg/cstor/poolcluster/v1alpha1" csp "github.com/openebs/maya/pkg/cstor/poolinstance/v1alpha3" nodeapis "github.com/openebs/maya/pkg/kubernetes/node/v1alpha1" "github.com/openebs/maya/pkg/volume" "github.com/pkg/errors" k8serror "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog" ) // SelectNode returns a node where pool should be created. func (ac *Config) SelectNode() (*apis.PoolSpec, string, error) { usedNodes, err := ac.GetUsedNode() if err != nil { return nil, "", errors.Wrapf(err, "could not get used nodes list for pool creation") } for _, pool := range ac.CSPC.Spec.Pools { // pin it pool := pool nodeName, err := GetNodeFromLabelSelector(pool.NodeSelector) if err != nil || nodeName == "" { klog.Errorf("could not use node for selectors {%v}", pool.NodeSelector) continue } if ac.VisitedNodes[nodeName] { continue } else { ac.VisitedNodes[nodeName] = true if !usedNodes[nodeName] { return &pool, nodeName, nil } } } return nil, "", errors.New("no node qualified for pool creation") } // GetNodeFromLabelSelector returns the node name selected by provided labels // TODO : Move it to node package func GetNodeFromLabelSelector(labels map[string]string) (string, error) { nodeList, err := nodeapis.NewKubeClient().List(metav1.ListOptions{LabelSelector: getLabelSelectorString(labels)}) if err != nil { return "", errors.Wrap(err, "failed to get node list from the node selector") } if len(nodeList.Items) != 1 { return "", errors.Errorf("invalid no.of nodes %d from the given node selectors", len(nodeList.Items)) } return nodeList.Items[0].Name, nil } // getLabelSelectorString returns a string of label selector form label map to be used in // list options. // TODO : Move it to node package func getLabelSelectorString(selector map[string]string) string { var selectorString string for key, value := range selector { selectorString = selectorString + key + "=" + value + "," } selectorString = selectorString[:len(selectorString)-len(",")] return selectorString } // GetUsedNode returns a map of node for which pool has already been created. // Note : Filter function is not used from node builder package as it needs // CSP builder package which cam cause import loops. func (ac *Config) GetUsedNode() (map[string]bool, error) { usedNode := make(map[string]bool) cspList, err := csp. NewKubeClient(). WithNamespace(ac.Namespace). List( metav1. ListOptions{LabelSelector: string(apis.CStorPoolClusterCPK) + "=" + ac.CSPC.Name}, ) if err != nil { return nil, errors.Wrap(err, "could not list already created csp(s)") } for _, cspObj := range cspList.Items { usedNode[cspObj.Labels[string(apis.HostNameCPK)]] = true } return usedNode, nil } // GetBDListForNode returns a list of BD from the pool spec. // TODO : Move it to CStorPoolCluster packgage func (ac *Config) GetBDListForNode(pool *apis.PoolSpec) []string { var BDList []string for _, group := range pool.RaidGroups { for _, bd := range group.BlockDevices { BDList = append(BDList, bd.BlockDeviceName) } } return BDList } // ClaimBDsForNode claims a given BlockDevice for node // If the block device(s) is/are already claimed for any other CSPC it returns error. // If the block device(s) is/are already calimed for the same CSPC -- it is left as it is and can be used for // pool provisioning. // If the block device(s) is/are unclaimed, then those are claimed. func (ac *Config) ClaimBDsForNode(BD []string) error { pendingClaim := 0 for _, bdName := range BD { bdAPIObj, err := bd.NewKubeClient().WithNamespace(ac.Namespace).Get(bdName, metav1.GetOptions{}) if err != nil { return errors.Wrapf(err, "error in getting details for BD {%s} whether it is claimed", bdName) } if bd.BuilderForAPIObject(bdAPIObj).BlockDevice.IsClaimed() { IsClaimedBDUsable, errBD := ac.IsClaimedBDUsable(bdAPIObj) if errBD != nil { return errors.Wrapf(err, "error in getting details for BD {%s} for usability", bdName) } if !IsClaimedBDUsable { return errors.Errorf("BD {%s} already in use", bdName) } continue } err = ac.ClaimBD(bdAPIObj) if err != nil { return errors.Wrapf(err, "Failed to claim BD {%s}", bdName) } pendingClaim++ } if pendingClaim > 0 { return errors.Errorf("%d block device claims are pending", pendingClaim) } return nil } // ClaimBD claims a given BlockDevice func (ac *Config) ClaimBD(bdObj *ndmapis.BlockDevice) error { newBDCObj, err := bdc.NewBuilder(). WithName("bdc-cstor-" + string(bdObj.UID)). WithNamespace(ac.Namespace). WithLabels(map[string]string{string(apis.CStorPoolClusterCPK): ac.CSPC.Name}). WithBlockDeviceName(bdObj.Name). WithHostName(bdObj.Labels[string(apis.HostNameCPK)]). WithCapacity(volume.ByteCount(bdObj.Spec.Capacity.Storage)). WithCSPCOwnerReference(ac.CSPC). WithFinalizer(cspc.CSPCFinalizer). Build() if err != nil { return errors.Wrapf(err, "failed to build block device claim for bd {%s}", bdObj.Name) } _, err = bdc.NewKubeClient().WithNamespace(ac.Namespace).Create(newBDCObj.Object) if k8serror.IsAlreadyExists(err) { klog.Infof("BDC for BD {%s} already created", bdObj.Name) return nil } if err != nil { return errors.Wrapf(err, "failed to create block device claim for bd {%s}", bdObj.Name) } return nil } // IsClaimedBDUsable returns true if the passed BD is already claimed and can be // used for provisioning func (ac *Config) IsClaimedBDUsable(bdAPIObj *ndmapis.BlockDevice) (bool, error) { bdObj := bd.BuilderForAPIObject(bdAPIObj) if bdObj.BlockDevice.IsClaimed() { bdcName := bdObj.BlockDevice.Object.Spec.ClaimRef.Name bdcAPIObject, err := bdc.NewKubeClient().WithNamespace(ac.Namespace).Get(bdcName, metav1.GetOptions{}) if err != nil { return false, errors.Wrapf(err, "could not get block device claim for block device {%s}", bdAPIObj.Name) } bdcObj := bdc.BuilderForAPIObject(bdcAPIObject) if bdcObj.BDC.HasLabel(string(apis.CStorPoolClusterCPK), ac.CSPC.Name) { return true, nil } } else { return false, errors.Errorf("block device {%s} is not claimed", bdAPIObj.Name) } return false, nil } // ValidatePoolSpec validates the pool spec. // TODO: Fix following function -- (Current is mock only ) func ValidatePoolSpec(pool *apis.PoolSpec) bool { return true }
prateekpandey14/maya
pkg/algorithm/nodeselect/v1alpha2/select_node.go
GO
apache-2.0
7,242
define( ['app/models/proto_model'], function(ProtoModel) { var Model = ProtoModel.extend({ // matches first part of method name in @remote.method urlRoot: '/cru_api.order_', must_be_floats: ['sub_total', 'actual_total'], }); return Model; } );
babybunny/rebuildingtogethercaptain
gae/js/app/models/order.js
JavaScript
apache-2.0
317
package com.sequenceiq.freeipa.entity.util; import com.sequenceiq.cloudbreak.converter.DefaultEnumConverter; import com.sequenceiq.freeipa.api.v1.kerberos.model.KerberosType; public class KerberosTypeConverter extends DefaultEnumConverter<KerberosType> { @Override public KerberosType getDefault() { return KerberosType.FREEIPA; } }
hortonworks/cloudbreak
freeipa/src/main/java/com/sequenceiq/freeipa/entity/util/KerberosTypeConverter.java
Java
apache-2.0
356
namespace TrelloToExcel.Trello { public class TextData { public Emoji3 emoji { get; set; } } }
ymotton/TrelloToExcel
TrelloToExcel/Trello/TextData.cs
C#
apache-2.0
117
def power_digit_sum(exponent): power_of_2 = str(2 ** exponent) return sum([int(x) for x in power_of_2])
plilja/project-euler
problem_16/power_digit_sum.py
Python
apache-2.0
111
# Copyright 2022 The Magenta Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Constants for music processing in Magenta.""" # Meter-related constants. DEFAULT_QUARTERS_PER_MINUTE = 120.0 DEFAULT_STEPS_PER_BAR = 16 # 4/4 music sampled at 4 steps per quarter note. DEFAULT_STEPS_PER_QUARTER = 4 # Default absolute quantization. DEFAULT_STEPS_PER_SECOND = 100 # Standard pulses per quarter. # https://en.wikipedia.org/wiki/Pulses_per_quarter_note STANDARD_PPQ = 220 # Special melody events. NUM_SPECIAL_MELODY_EVENTS = 2 MELODY_NOTE_OFF = -1 MELODY_NO_EVENT = -2 # Other melody-related constants. MIN_MELODY_EVENT = -2 MAX_MELODY_EVENT = 127 MIN_MIDI_PITCH = 0 # Inclusive. MAX_MIDI_PITCH = 127 # Inclusive. NUM_MIDI_PITCHES = MAX_MIDI_PITCH - MIN_MIDI_PITCH + 1 NOTES_PER_OCTAVE = 12 # Velocity-related constants. MIN_MIDI_VELOCITY = 1 # Inclusive. MAX_MIDI_VELOCITY = 127 # Inclusive. # Program-related constants. MIN_MIDI_PROGRAM = 0 MAX_MIDI_PROGRAM = 127 # MIDI programs that typically sound unpitched. UNPITCHED_PROGRAMS = ( list(range(96, 104)) + list(range(112, 120)) + list(range(120, 128))) # Chord symbol for "no chord". NO_CHORD = 'N.C.' # The indices of the pitch classes in a major scale. MAJOR_SCALE = [0, 2, 4, 5, 7, 9, 11] # NOTE_KEYS[note] = The major keys that note belongs to. # ex. NOTE_KEYS[0] lists all the major keys that contain the note C, # which are: # [0, 1, 3, 5, 7, 8, 10] # [C, C#, D#, F, G, G#, A#] # # 0 = C # 1 = C# # 2 = D # 3 = D# # 4 = E # 5 = F # 6 = F# # 7 = G # 8 = G# # 9 = A # 10 = A# # 11 = B # # NOTE_KEYS can be generated using the code below, but is explicitly declared # for readability: # NOTE_KEYS = [[j for j in range(12) if (i - j) % 12 in MAJOR_SCALE] # for i in range(12)] NOTE_KEYS = [ [0, 1, 3, 5, 7, 8, 10], [1, 2, 4, 6, 8, 9, 11], [0, 2, 3, 5, 7, 9, 10], [1, 3, 4, 6, 8, 10, 11], [0, 2, 4, 5, 7, 9, 11], [0, 1, 3, 5, 6, 8, 10], [1, 2, 4, 6, 7, 9, 11], [0, 2, 3, 5, 7, 8, 10], [1, 3, 4, 6, 8, 9, 11], [0, 2, 4, 5, 7, 9, 10], [1, 3, 5, 6, 8, 10, 11], [0, 2, 4, 6, 7, 9, 11] ]
magenta/note-seq
note_seq/constants.py
Python
apache-2.0
2,620
# Copyright 2018 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module containing classes related to AWS CloudWatch Logs.""" import json from perfkitbenchmarker import resource from perfkitbenchmarker import vm_util from perfkitbenchmarker.providers.aws import util class LogGroup(resource.BaseResource): """Class representing a CloudWatch log group.""" def __init__(self, region, name, retention_in_days=7): super(LogGroup, self).__init__() self.region = region self.name = name self.retention_in_days = retention_in_days def _Create(self): """Create the log group.""" create_cmd = util.AWS_PREFIX + [ '--region', self.region, 'logs', 'create-log-group', '--log-group-name', self.name ] vm_util.IssueCommand(create_cmd) def _Delete(self): """Delete the log group.""" delete_cmd = util.AWS_PREFIX + [ '--region', self.region, 'logs', 'delete-log-group', '--log-group-name', self.name ] vm_util.IssueCommand(delete_cmd, raise_on_failure=False) def Exists(self): """Returns True if the log group exists.""" describe_cmd = util.AWS_PREFIX + [ '--region', self.region, 'logs', 'describe-log-groups', '--log-group-name-prefix', self.name, '--no-paginate' ] stdout, _, _ = vm_util.IssueCommand(describe_cmd) log_groups = json.loads(stdout)['logGroups'] group = next((group for group in log_groups if group['logGroupName'] == self.name), None) return bool(group) def _PostCreate(self): """Set the retention policy.""" put_cmd = util.AWS_PREFIX + [ '--region', self.region, 'logs', 'put-retention-policy', '--log-group-name', self.name, '--retention-in-days', str(self.retention_in_days) ] vm_util.IssueCommand(put_cmd) def GetLogs(region, stream_name, group_name, token=None): """Fetches the JSON formatted log stream starting at the token.""" get_cmd = util.AWS_PREFIX + [ '--region', region, 'logs', 'get-log-events', '--start-from-head', '--log-group-name', group_name, '--log-stream-name', stream_name, ] if token: get_cmd.extend(['--next-token', token]) stdout, _, _ = vm_util.IssueCommand(get_cmd) return json.loads(stdout) def GetLogStreamAsString(region, stream_name, log_group): """Returns the messages of the log stream as a string.""" log_lines = [] token = None events = [] while token is None or events: response = GetLogs(region, stream_name, log_group, token) events = response['events'] token = response['nextForwardToken'] for event in events: log_lines.append(event['message']) return '\n'.join(log_lines)
GoogleCloudPlatform/PerfKitBenchmarker
perfkitbenchmarker/providers/aws/aws_logs.py
Python
apache-2.0
3,293
package com.lyubenblagoev.postfixrest.security; import com.lyubenblagoev.postfixrest.entity.User; import com.lyubenblagoev.postfixrest.repository.UserRepository; import org.springframework.security.core.userdetails.UserDetails; import org.springframework.security.core.userdetails.UserDetailsService; import org.springframework.security.core.userdetails.UsernameNotFoundException; import org.springframework.stereotype.Service; import java.util.Optional; @Service public class CustomUserDetailsService implements UserDetailsService { private final UserRepository userRepository; public CustomUserDetailsService(UserRepository userRepository) { this.userRepository = userRepository; } @Override public UserDetails loadUserByUsername(String username) throws UsernameNotFoundException { return userRepository.findByEmail(username) .map(u -> new UserPrincipal(u)) .orElseThrow(() -> new UsernameNotFoundException("No user found for " + username)); } }
lyubenblagoev/postfix-rest-server
src/main/java/com/lyubenblagoev/postfixrest/security/CustomUserDetailsService.java
Java
apache-2.0
1,026
/* * Copyright (c) 2021 Citrix Systems, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package network /** * Binding object which returns the resources bound to vrid_binding. */ type Vridbinding struct { /** * Integer value that uniquely identifies the VMAC address.<br/>Minimum value = 1<br/>Maximum value = 255 */ Id int `json:"id,omitempty"` }
citrix/terraform-provider-netscaler
vendor/github.com/citrix/adc-nitro-go/resource/config/network/vrid_binding.go
GO
apache-2.0
881
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "fmt" "io" "os" "strconv" "strings" "time" "github.com/spf13/cobra" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/kubernetes" _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/klog" "k8s.io/kops/cmd/kops/util" api "k8s.io/kops/pkg/apis/kops" "k8s.io/kops/pkg/cloudinstances" "k8s.io/kops/pkg/featureflag" "k8s.io/kops/pkg/instancegroups" "k8s.io/kops/pkg/pretty" "k8s.io/kops/upup/pkg/fi/cloudup" "k8s.io/kops/util/pkg/tables" "k8s.io/kubernetes/pkg/kubectl/util/i18n" "k8s.io/kubernetes/pkg/kubectl/util/templates" ) var ( rollingupdateLong = pretty.LongDesc(i18n.T(` This command updates a kubernetes cluster to match the cloud and kops specifications. To perform a rolling update, you need to update the cloud resources first with the command ` + pretty.Bash("kops update cluster") + `. If rolling-update does not report that the cluster needs to be rolled, you can force the cluster to be rolled with the force flag. Rolling update drains and validates the cluster by default. A cluster is deemed validated when all required nodes are running and all pods in the kube-system namespace are operational. When a node is deleted, rolling-update sleeps the interval for the node type, and then tries for the same period of time for the cluster to be validated. For instance, setting --master-interval=3m causes rolling-update to wait for 3 minutes after a master is rolled, and another 3 minutes for the cluster to stabilize and pass validation. Note: terraform users will need to run all of the following commands from the same directory ` + pretty.Bash("kops update cluster --target=terraform") + ` then ` + pretty.Bash("terraform plan") + ` then ` + pretty.Bash("terraform apply") + ` prior to running ` + pretty.Bash("kops rolling-update cluster") + `.`)) rollingupdateExample = templates.Examples(i18n.T(` # Preview a rolling-update. kops rolling-update cluster # Roll the currently selected kops cluster with defaults. # Nodes will be drained and the cluster will be validated between node replacement. kops rolling-update cluster --yes # Roll the k8s-cluster.example.com kops cluster, # do not fail if the cluster does not validate, # wait 8 min to create new node, and wait at least # 8 min to validate the cluster. kops rolling-update cluster k8s-cluster.example.com --yes \ --fail-on-validate-error="false" \ --master-interval=8m \ --node-interval=8m # Roll the k8s-cluster.example.com kops cluster, # do not validate the cluster because of the cloudonly flag. # Force the entire cluster to roll, even if rolling update # reports that the cluster does not need to be rolled. kops rolling-update cluster k8s-cluster.example.com --yes \ --cloudonly \ --force # Roll the k8s-cluster.example.com kops cluster, # only roll the node instancegroup, # use the new drain and validate functionality. kops rolling-update cluster k8s-cluster.example.com --yes \ --fail-on-validate-error="false" \ --node-interval 8m \ --instance-group nodes `)) rollingupdateShort = i18n.T(`Rolling update a cluster.`) ) // RollingUpdateOptions is the command Object for a Rolling Update. type RollingUpdateOptions struct { Yes bool Force bool CloudOnly bool // The following two variables are when kops is validating a cluster // during a rolling update. // FailOnDrainError fail rolling-update if drain errors. FailOnDrainError bool // FailOnValidate fail the cluster rolling-update when the cluster // does not validate, after a validation period. FailOnValidate bool // PostDrainDelay is the duration of a pause after a drain operation PostDrainDelay time.Duration // ValidationTimeout is the timeout for validation to succeed after the drain and pause ValidationTimeout time.Duration // MasterInterval is the minimum time to wait after stopping a master node. This does not include drain and validate time. MasterInterval time.Duration // NodeInterval is the minimum time to wait after stopping a (non-master) node. This does not include drain and validate time. NodeInterval time.Duration // BastionInterval is the minimum time to wait after stopping a bastion. This does not include drain and validate time. BastionInterval time.Duration // Interactive rolling-update prompts user to continue after each instances is updated. Interactive bool ClusterName string // InstanceGroups is the list of instance groups to rolling-update; // if not specified, all instance groups will be updated InstanceGroups []string // InstanceGroupRoles is the list of roles we should rolling-update // if not specified, all instance groups will be updated InstanceGroupRoles []string } func (o *RollingUpdateOptions) InitDefaults() { o.Yes = false o.Force = false o.CloudOnly = false o.FailOnDrainError = false o.FailOnValidate = true o.MasterInterval = 15 * time.Second o.NodeInterval = 15 * time.Second o.BastionInterval = 15 * time.Second o.Interactive = false o.PostDrainDelay = 5 * time.Second o.ValidationTimeout = 15 * time.Minute } func NewCmdRollingUpdateCluster(f *util.Factory, out io.Writer) *cobra.Command { var options RollingUpdateOptions options.InitDefaults() cmd := &cobra.Command{ Use: "cluster", Short: rollingupdateShort, Long: rollingupdateLong, Example: rollingupdateExample, } cmd.Flags().BoolVarP(&options.Yes, "yes", "y", options.Yes, "Perform rolling update immediately, without --yes rolling-update executes a dry-run") cmd.Flags().BoolVar(&options.Force, "force", options.Force, "Force rolling update, even if no changes") cmd.Flags().BoolVar(&options.CloudOnly, "cloudonly", options.CloudOnly, "Perform rolling update without confirming progress with k8s") cmd.Flags().DurationVar(&options.ValidationTimeout, "validation-timeout", options.ValidationTimeout, "Maximum time to wait for a cluster to validate") cmd.Flags().DurationVar(&options.MasterInterval, "master-interval", options.MasterInterval, "Time to wait between restarting masters") cmd.Flags().DurationVar(&options.NodeInterval, "node-interval", options.NodeInterval, "Time to wait between restarting nodes") cmd.Flags().DurationVar(&options.BastionInterval, "bastion-interval", options.BastionInterval, "Time to wait between restarting bastions") cmd.Flags().DurationVar(&options.PostDrainDelay, "post-drain-delay", options.PostDrainDelay, "Time to wait after draining each node") cmd.Flags().BoolVarP(&options.Interactive, "interactive", "i", options.Interactive, "Prompt to continue after each instance is updated") cmd.Flags().StringSliceVar(&options.InstanceGroups, "instance-group", options.InstanceGroups, "List of instance groups to update (defaults to all if not specified)") cmd.Flags().StringSliceVar(&options.InstanceGroupRoles, "instance-group-roles", options.InstanceGroupRoles, "If specified, only instance groups of the specified role will be updated (e.g. Master,Node,Bastion)") if featureflag.DrainAndValidateRollingUpdate.Enabled() { cmd.Flags().BoolVar(&options.FailOnDrainError, "fail-on-drain-error", true, "The rolling-update will fail if draining a node fails.") cmd.Flags().BoolVar(&options.FailOnValidate, "fail-on-validate-error", true, "The rolling-update will fail if the cluster fails to validate.") } cmd.Run = func(cmd *cobra.Command, args []string) { err := rootCommand.ProcessArgs(args) if err != nil { exitWithError(err) return } clusterName := rootCommand.ClusterName() if clusterName == "" { exitWithError(fmt.Errorf("--name is required")) return } options.ClusterName = clusterName err = RunRollingUpdateCluster(f, os.Stdout, &options) if err != nil { exitWithError(err) return } } return cmd } func RunRollingUpdateCluster(f *util.Factory, out io.Writer, options *RollingUpdateOptions) error { clientset, err := f.Clientset() if err != nil { return err } cluster, err := GetCluster(f, options.ClusterName) if err != nil { return err } contextName := cluster.ObjectMeta.Name clientGetter := genericclioptions.NewConfigFlags() clientGetter.Context = &contextName config, err := clientGetter.ToRESTConfig() if err != nil { return fmt.Errorf("cannot load kubecfg settings for %q: %v", contextName, err) } var nodes []v1.Node var k8sClient kubernetes.Interface if !options.CloudOnly { k8sClient, err = kubernetes.NewForConfig(config) if err != nil { return fmt.Errorf("cannot build kube client for %q: %v", contextName, err) } nodeList, err := k8sClient.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { fmt.Fprintf(os.Stderr, "Unable to reach the kubernetes API.\n") fmt.Fprintf(os.Stderr, "Use --cloudonly to do a rolling-update without confirming progress with the k8s API\n\n") return fmt.Errorf("error listing nodes in cluster: %v", err) } if nodeList != nil { nodes = nodeList.Items } } list, err := clientset.InstanceGroupsFor(cluster).List(metav1.ListOptions{}) if err != nil { return err } var instanceGroups []*api.InstanceGroup for i := range list.Items { instanceGroups = append(instanceGroups, &list.Items[i]) } warnUnmatched := true if len(options.InstanceGroups) != 0 { var filtered []*api.InstanceGroup for _, instanceGroupName := range options.InstanceGroups { var found *api.InstanceGroup for _, ig := range instanceGroups { if ig.ObjectMeta.Name == instanceGroupName { found = ig break } } if found == nil { return fmt.Errorf("InstanceGroup %q not found", instanceGroupName) } filtered = append(filtered, found) } instanceGroups = filtered // Don't warn if we find more ASGs than IGs warnUnmatched = false } if len(options.InstanceGroupRoles) != 0 { var filtered []*api.InstanceGroup for _, ig := range instanceGroups { for _, role := range options.InstanceGroupRoles { if ig.Spec.Role == api.InstanceGroupRole(strings.Title(strings.ToLower(role))) { filtered = append(filtered, ig) continue } } } instanceGroups = filtered // Don't warn if we find more ASGs than IGs warnUnmatched = false } cloud, err := cloudup.BuildCloud(cluster) if err != nil { return err } groups, err := cloud.GetCloudGroups(cluster, instanceGroups, warnUnmatched, nodes) if err != nil { return err } { t := &tables.Table{} t.AddColumn("NAME", func(r *cloudinstances.CloudInstanceGroup) string { return r.InstanceGroup.ObjectMeta.Name }) t.AddColumn("STATUS", func(r *cloudinstances.CloudInstanceGroup) string { return r.Status() }) t.AddColumn("NEEDUPDATE", func(r *cloudinstances.CloudInstanceGroup) string { return strconv.Itoa(len(r.NeedUpdate)) }) t.AddColumn("READY", func(r *cloudinstances.CloudInstanceGroup) string { return strconv.Itoa(len(r.Ready)) }) t.AddColumn("MIN", func(r *cloudinstances.CloudInstanceGroup) string { return strconv.Itoa(r.MinSize) }) t.AddColumn("MAX", func(r *cloudinstances.CloudInstanceGroup) string { return strconv.Itoa(r.MaxSize) }) t.AddColumn("NODES", func(r *cloudinstances.CloudInstanceGroup) string { var nodes []*v1.Node for _, i := range r.Ready { if i.Node != nil { nodes = append(nodes, i.Node) } } for _, i := range r.NeedUpdate { if i.Node != nil { nodes = append(nodes, i.Node) } } return strconv.Itoa(len(nodes)) }) var l []*cloudinstances.CloudInstanceGroup for _, v := range groups { l = append(l, v) } columns := []string{"NAME", "STATUS", "NEEDUPDATE", "READY", "MIN", "MAX"} if !options.CloudOnly { columns = append(columns, "NODES") } err := t.Render(l, out, columns...) if err != nil { return err } } needUpdate := false for _, group := range groups { if len(group.NeedUpdate) != 0 { needUpdate = true } } if !needUpdate && !options.Force { fmt.Printf("\nNo rolling-update required.\n") return nil } if !options.Yes { fmt.Printf("\nMust specify --yes to rolling-update.\n") return nil } if featureflag.DrainAndValidateRollingUpdate.Enabled() { klog.V(2).Infof("Rolling update with drain and validate enabled.") } d := &instancegroups.RollingUpdateCluster{ MasterInterval: options.MasterInterval, NodeInterval: options.NodeInterval, BastionInterval: options.BastionInterval, Interactive: options.Interactive, Force: options.Force, Cloud: cloud, K8sClient: k8sClient, ClientGetter: clientGetter, FailOnDrainError: options.FailOnDrainError, FailOnValidate: options.FailOnValidate, CloudOnly: options.CloudOnly, ClusterName: options.ClusterName, PostDrainDelay: options.PostDrainDelay, ValidationTimeout: options.ValidationTimeout, } return d.RollingUpdate(groups, cluster, list) }
gambol99/kops
cmd/kops/rollingupdatecluster.go
GO
apache-2.0
13,554
/******************************************************************************* * Copyright (c) 2012, 2015 Pivotal Software, Inc. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, * Version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Contributors: * Pivotal Software, Inc. - initial API and implementation ********************************************************************************/ package cn.dockerfoundry.ide.eclipse.server.core.internal; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; import org.cloudfoundry.client.lib.domain.CloudService; import org.eclipse.core.runtime.IStatus; import org.eclipse.core.runtime.Status; import org.eclipse.wst.server.core.IModule; import cn.dockerfoundry.ide.eclipse.server.core.internal.application.ModuleChangeEvent; import cn.dockerfoundry.ide.eclipse.server.core.internal.client.CloudRefreshEvent; /** * Fires server refresh events. Only one handler is active per workbench runtime * session. * */ public class ServerEventHandler { private static ServerEventHandler handler; public static ServerEventHandler getDefault() { if (handler == null) { handler = new ServerEventHandler(); } return handler; } private final List<CloudServerListener> applicationListeners = new CopyOnWriteArrayList<CloudServerListener>(); public synchronized void addServerListener(CloudServerListener listener) { if (listener != null && !applicationListeners.contains(listener)) { applicationListeners.add(listener); } } public synchronized void removeServerListener(CloudServerListener listener) { applicationListeners.remove(listener); } public void fireServicesUpdated(DockerFoundryServer server, List<DockerApplicationService> services) { fireServerEvent(new CloudRefreshEvent(server, null, CloudServerEvent.EVENT_UPDATE_SERVICES, services)); } public void firePasswordUpdated(DockerFoundryServer server) { fireServerEvent(new CloudServerEvent(server, CloudServerEvent.EVENT_UPDATE_PASSWORD)); } public void fireServerRefreshed(DockerFoundryServer server) { fireServerEvent(new CloudServerEvent(server, CloudServerEvent.EVENT_SERVER_REFRESHED)); } public void fireAppInstancesChanged(DockerFoundryServer server, IModule module) { fireServerEvent(new ModuleChangeEvent(server, CloudServerEvent.EVENT_INSTANCES_UPDATED, module, Status.OK_STATUS)); } public void fireApplicationRefreshed(DockerFoundryServer server, IModule module) { fireServerEvent(new ModuleChangeEvent(server, CloudServerEvent.EVENT_APPLICATION_REFRESHED, module, Status.OK_STATUS)); } public void fireAppDeploymentChanged(DockerFoundryServer server, IModule module) { fireServerEvent(new ModuleChangeEvent(server, CloudServerEvent.EVENT_APP_DEPLOYMENT_CHANGED, module, Status.OK_STATUS)); } public void fireError(DockerFoundryServer server, IModule module, IStatus status) { fireServerEvent(new ModuleChangeEvent(server, CloudServerEvent.EVENT_CLOUD_OP_ERROR, module, status)); } public synchronized void fireServerEvent(CloudServerEvent event) { CloudServerListener[] listeners = applicationListeners.toArray(new CloudServerListener[0]); for (CloudServerListener listener : listeners) { listener.serverChanged(event); } } }
osswangxining/dockerfoundry
cn.dockerfoundry.ide.eclipse.server.core/src/cn/dockerfoundry/ide/eclipse/server/core/internal/ServerEventHandler.java
Java
apache-2.0
3,808
<?php /** * MyBB 1.6 Spanish Language Pack * Copyright 2010 MyBB Group, All Rights Reserved * * $Id: report.lang.php 5016 2010-08-10 12:32:33Z Anio_pke $ */ $l['report_post'] = "Reportar mensaje"; $l['report_to_mod'] = "Reporta este mensaje a un moderador"; $l['only_report'] = "Solo debes reportar mensajes que sean spam, de publicidad, o abusivos."; $l['report_reason'] = "Tu razón para reportar este mensaje:"; $l['thank_you'] = "Gracias."; $l['post_reported'] = "El mensaje se ha reportado correctamente. Ya puedes cerrar la ventana."; $l['report_error'] = "Error"; $l['no_reason'] = "No puedes reportar un mensaje sin especificar la razón del reporte."; $l['go_back'] = "Volver"; $l['close_window'] = "Cerrar ventana"; ?>
Flauschbaellchen/florensia-base
forum/inc/languages/espanol/report.lang.php
PHP
apache-2.0
755
/** * @license * Copyright 2020 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ // This file can be replaced during build by using the `fileReplacements` array. // `ng build --prod` replaces `environment.ts` with `environment.prod.ts`. // The list of file replacements can be found in `angular.json`. import { version } from '../../../../package.json'; export const environment = { production: false, interactiveVisualizerUrl: `https://storage.googleapis.com/interactive_visualizer/${version}/index.html`, models: [ { displayName: 'Birds V1', description: 'AIY natural world insects classification model', type: 'image classification', metadataUrl: 'https://storage.googleapis.com/tfhub-visualizers/google/aiy/vision/classifier/birds_V1/1/metadata.json', }, { displayName: 'Insects V1', description: 'AIY natural world birds quantized classification model', type: 'image classification', metadataUrl: 'https://storage.googleapis.com/tfhub-visualizers/google/aiy/vision/classifier/insects_V1/1/metadata.json', }, { displayName: 'Mobile Object Localizer V1', description: 'Mobile model to localize objects in an image', type: 'object detection', metadataUrl: 'https://storage.googleapis.com/tfhub-visualizers/google/object_detection/mobile_object_localizer_v1/1/metadata.json', }, ], }; /* * For easier debugging in development mode, you can import the following file * to ignore zone related error stack frames such as `zone.run`, `zoneDelegate.invokeTask`. * * This import should be commented out in production mode because it will have a negative impact * on performance if an error is thrown. */ // import 'zone.js/dist/zone-error'; // Included with Angular CLI.
tensorflow/tfjs-examples
interactive-visualizers/projects/playground/src/environments/environment.ts
TypeScript
apache-2.0
2,403
using System; using System.Collections.Generic; using System.ComponentModel; using System.Drawing; using System.Linq; using System.Text; using System.Diagnostics; namespace MT5LiquidityIndicator.Net.Settings { [DisplayName("Line Settings")] public class LineSettings { #region contruction public LineSettings() { this.Volume = 0; this.m_bidColor = Color.Black; this.m_askColor = Color.Black; } internal LineSettings(LineSettings settings) { this.Volume = settings.Volume; this.m_bidColor = settings.m_bidColor; this.m_askColor = settings.m_askColor; } internal LineSettings(double volume, Color bidColor, Color askColor) { this.Volume = volume; this.m_bidColor = bidColor; this.m_askColor = askColor; } #endregion #region properties [DefaultValue(1)] public double Volume { get { return m_volume; } set { if ((value < m_minVolume) || (value > m_maxVolume)) { string message = string.Format("Volum can be from {0} to {1}", m_minVolume, m_maxVolume); throw new ArgumentOutOfRangeException("value", value, message); } m_volume = value; } } [DisplayName("Bid Color")] [DefaultValue(typeof(Color), "Black")] public Color BidColor { get { return m_bidColor; } set { m_bidColor = NormalizeColor(value); } } [DisplayName("Ask Color")] [DefaultValue(typeof(Color), "Black")] public Color AskColor { get { return m_askColor; } set { m_askColor = NormalizeColor(value); } } #endregion #region private members private static Color NormalizeColor(Color value) { if (255 == value.A) { return value; } Color result = Color.FromArgb(255, value.R, value.G, value.B); return result; } #endregion #region overrode methods public override string ToString() { string result = string.Format("Volume = {0}", this.Volume); return result; } #endregion #region members private double m_volume; private const double m_minVolume = 0; private const double m_maxVolume = 10000; private Color m_bidColor; private Color m_askColor; #endregion } }
marmysh/MT5-Liquidity-Indicator
Source/MT5LiquidityIndicator/MT5LiquidityIndicator.Net/Settings/LineSettings.cs
C#
apache-2.0
2,168
/* * Copyright 2016 Bjoern Bilger * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.jrestless.core.container; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.ByteArrayOutputStream; import java.io.OutputStream; import javax.ws.rs.core.MultivaluedHashMap; import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.core.Response.Status; import org.glassfish.jersey.server.ContainerResponse; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import com.jrestless.core.container.JRestlessHandlerContainer.JRestlessContainerResponse; import com.jrestless.core.container.JRestlessHandlerContainer.JRestlessContainerResponseWriter; import com.jrestless.core.container.io.JRestlessResponseWriter; public class JRestlessContainerResponseWriterTest { private JRestlessContainerResponseWriter containerResponseWriter; private JRestlessContainerResponse response; @BeforeEach public void setup() { JRestlessResponseWriter responseWriter = mock(JRestlessResponseWriter.class); when(responseWriter.getEntityOutputStream()).thenReturn(new ByteArrayOutputStream()); response = spy(new JRestlessContainerResponse(responseWriter)); containerResponseWriter = new JRestlessContainerResponseWriter(response); } @Test public void commit_ResponseNotYetClosed_ShouldCloseResponse() { containerResponseWriter.commit(); verify(response, times(1)).close(); } @Test public void writeResponseStatusAndHeaders_ContextHeaderAndStatusGiven_ShouldUpdateResponseStatusAndHeaders() { MultivaluedMap<String, String> actualHeaders = new MultivaluedHashMap<>(); actualHeaders.add("header0", "value0_0"); actualHeaders.add("header0", "value0_1"); actualHeaders.add("header1", "value1_0"); MultivaluedMap<String, String> expectedHeaders = new MultivaluedHashMap<>(); expectedHeaders.add("header0", "value0_0"); expectedHeaders.add("header0", "value0_1"); expectedHeaders.add("header1", "value1_0"); ContainerResponse context = mock(ContainerResponse.class); when(context.getStatusInfo()).thenReturn(Status.CONFLICT); when(context.getStringHeaders()).thenReturn(actualHeaders); containerResponseWriter.writeResponseStatusAndHeaders(-1, context); assertEquals(Status.CONFLICT, response.getStatusType()); assertEquals(expectedHeaders, response.getHeaders()); } @Test public void writeResponseStatusAndHeaders_ShouldReturnEntityOutputStreamOfResponse() { ContainerResponse context = mock(ContainerResponse.class); when(context.getStringHeaders()).thenReturn(new MultivaluedHashMap<>()); when(context.getStatusInfo()).thenReturn(Status.OK); OutputStream entityOutputStream = containerResponseWriter.writeResponseStatusAndHeaders(-1, context); assertSame(response.getEntityOutputStream(), entityOutputStream); } @Test public void failure_ResponseNotYetCommitted_ShouldSetInternalServerErrorStatusOnFail() { ContainerResponse context = mock(ContainerResponse.class); when(context.getStatusInfo()).thenReturn(Status.OK); when(context.getStringHeaders()).thenReturn(new MultivaluedHashMap<>()); containerResponseWriter.writeResponseStatusAndHeaders(-1, context); containerResponseWriter.failure(new RuntimeException()); assertEquals(Status.INTERNAL_SERVER_ERROR, response.getStatusType()); } @Test public void failure_ResponseNotYetCommitted_ShouldCommitOnFailure() { containerResponseWriter = spy(containerResponseWriter); containerResponseWriter.failure(new RuntimeException()); verify(containerResponseWriter, times(1)).commit(); } @Test public void failure_ResponseNotYetCommitted_ShouldRethrowOnCommitFailure() { containerResponseWriter = spy(containerResponseWriter); containerResponseWriter.failure(new RuntimeException()); doThrow(CommitException.class).when(containerResponseWriter).commit(); assertThrows(RuntimeException.class, () -> containerResponseWriter.failure(new RuntimeException())); } @Test public void enableResponseBuffering_Always_ShouldBeDisabled() { assertFalse(containerResponseWriter.enableResponseBuffering()); } @Test public void setSuspendTimeout_Always_ShouldBeUnsupported() { assertThrows(UnsupportedOperationException.class, () -> containerResponseWriter.setSuspendTimeout(1, null)); } @Test public void suspend_Always_ShouldBeUnsupported() { assertThrows(UnsupportedOperationException.class, () -> containerResponseWriter.suspend(1, null, null)); } @SuppressWarnings("serial") private static class CommitException extends RuntimeException { } }
bbilger/jrestless
core/jrestless-core-container/src/test/java/com/jrestless/core/container/JRestlessContainerResponseWriterTest.java
Java
apache-2.0
5,472