text
stringlengths
7
1.01M
/******************************************************************************* * Copyright 2011, 2012 Chris Banes. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ package com.handmark.pulltorefresh.samples; import android.app.ListActivity; import android.os.AsyncTask; import android.os.Bundle; import android.text.format.DateUtils; import android.view.ContextMenu; import android.view.ContextMenu.ContextMenuInfo; import android.view.Menu; import android.view.MenuItem; import android.view.View; import android.widget.AdapterView.AdapterContextMenuInfo; import android.widget.ArrayAdapter; import android.widget.ListView; import android.widget.Toast; import com.handmark.pulltorefresh.library.PullToRefreshBase; import com.handmark.pulltorefresh.library.PullToRefreshBase.Mode; import com.handmark.pulltorefresh.library.PullToRefreshBase.OnLastItemVisibleListener; import com.handmark.pulltorefresh.library.PullToRefreshBase.OnRefreshListener; import com.handmark.pulltorefresh.library.PullToRefreshBase.State; import com.handmark.pulltorefresh.library.PullToRefreshListView; import com.handmark.pulltorefresh.library.extras.SoundPullEventListener; import java.util.Arrays; import java.util.LinkedList; public final class PullToRefreshListActivity extends ListActivity { static final int MENU_MANUAL_REFRESH = 0; static final int MENU_DISABLE_SCROLL = 1; static final int MENU_SET_MODE = 2; static final int MENU_DEMO = 3; private LinkedList<String> mListItems; private PullToRefreshListView mPullRefreshListView; private ArrayAdapter<String> mAdapter; /** Called when the activity is first created. */ @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_ptr_list); mPullRefreshListView = (PullToRefreshListView) findViewById(R.id.pull_refresh_list); mPullRefreshListView.setMode(Mode.PULL_FROM_END); // Set a listener to be invoked when the list should be refreshed. mPullRefreshListView.setOnRefreshListener(new OnRefreshListener<ListView>() { @Override public void onRefresh(PullToRefreshBase<ListView> refreshView) { String label = DateUtils.formatDateTime(getApplicationContext(), System.currentTimeMillis(), DateUtils.FORMAT_SHOW_TIME | DateUtils.FORMAT_SHOW_DATE | DateUtils.FORMAT_ABBREV_ALL); // Update the LastUpdatedLabel refreshView.getLoadingLayoutProxy().setLastUpdatedLabel(label); // Do work to refresh the list here. new GetDataTask().execute(); } }); // Add an end-of-list listener mPullRefreshListView.setOnLastItemVisibleListener(new OnLastItemVisibleListener() { @Override public void onLastItemVisible() { Toast.makeText(PullToRefreshListActivity.this, "End of List!", Toast.LENGTH_SHORT).show(); } }); ListView actualListView = mPullRefreshListView.getRefreshableView(); // Need to use the Actual ListView when registering for Context Menu registerForContextMenu(actualListView); mListItems = new LinkedList<String>(); mListItems.addAll(Arrays.asList(mStrings)); mAdapter = new ArrayAdapter<String>(this, android.R.layout.simple_list_item_1, mListItems); /** * Add Sound Event Listener */ SoundPullEventListener<ListView> soundListener = new SoundPullEventListener<ListView>(this); soundListener.addSoundEvent(State.PULL_TO_REFRESH, R.raw.pull_event); soundListener.addSoundEvent(State.RESET, R.raw.reset_sound); soundListener.addSoundEvent(State.REFRESHING, R.raw.refreshing_sound); mPullRefreshListView.setOnPullEventListener(soundListener); // You can also just use setListAdapter(mAdapter) or // mPullRefreshListView.setAdapter(mAdapter) actualListView.setAdapter(mAdapter); } private class GetDataTask extends AsyncTask<Void, Void, String[]> { @Override protected String[] doInBackground(Void... params) { // Simulates a background job. try { Thread.sleep(4000); } catch (InterruptedException e) { } return mStrings; } @Override protected void onPostExecute(String[] result) { mListItems.addFirst("Added after refresh..."); mAdapter.notifyDataSetChanged(); // Call onRefreshComplete when the list has been refreshed. mPullRefreshListView.onRefreshComplete(); super.onPostExecute(result); } } @Override public boolean onCreateOptionsMenu(Menu menu) { menu.add(0, MENU_MANUAL_REFRESH, 0, "Manual Refresh"); menu.add(0, MENU_DISABLE_SCROLL, 1, mPullRefreshListView.isScrollingWhileRefreshingEnabled() ? "Disable Scrolling while Refreshing" : "Enable Scrolling while Refreshing"); menu.add(0, MENU_SET_MODE, 0, mPullRefreshListView.getMode() == Mode.BOTH ? "Change to MODE_PULL_DOWN" : "Change to MODE_PULL_BOTH"); menu.add(0, MENU_DEMO, 0, "Demo"); return super.onCreateOptionsMenu(menu); } @Override public void onCreateContextMenu(ContextMenu menu, View v, ContextMenuInfo menuInfo) { AdapterContextMenuInfo info = (AdapterContextMenuInfo) menuInfo; menu.setHeaderTitle("Item: " + getListView().getItemAtPosition(info.position)); menu.add("Item 1"); menu.add("Item 2"); menu.add("Item 3"); menu.add("Item 4"); super.onCreateContextMenu(menu, v, menuInfo); } @Override public boolean onPrepareOptionsMenu(Menu menu) { MenuItem disableItem = menu.findItem(MENU_DISABLE_SCROLL); disableItem .setTitle(mPullRefreshListView.isScrollingWhileRefreshingEnabled() ? "Disable Scrolling while Refreshing" : "Enable Scrolling while Refreshing"); MenuItem setModeItem = menu.findItem(MENU_SET_MODE); setModeItem.setTitle(mPullRefreshListView.getMode() == Mode.BOTH ? "Change to MODE_FROM_START" : "Change to MODE_PULL_BOTH"); return super.onPrepareOptionsMenu(menu); } @Override public boolean onOptionsItemSelected(MenuItem item) { switch (item.getItemId()) { case MENU_MANUAL_REFRESH: new GetDataTask().execute(); mPullRefreshListView.setRefreshing(false); break; case MENU_DISABLE_SCROLL: mPullRefreshListView.setScrollingWhileRefreshingEnabled(!mPullRefreshListView .isScrollingWhileRefreshingEnabled()); break; case MENU_SET_MODE: mPullRefreshListView.setMode(mPullRefreshListView.getMode() == Mode.BOTH ? Mode.PULL_FROM_START : Mode.BOTH); break; case MENU_DEMO: mPullRefreshListView.demo(); break; } return super.onOptionsItemSelected(item); } private String[] mStrings = { "Abbaye de Belloc", "Abbaye du Mont des Cats", "Abertam", "Abondance", "Ackawi", "Acorn", "Adelost", "Affidelice au Chablis", "Afuega'l Pitu", "Airag", "Airedale", "Aisy Cendre", "Allgauer Emmentaler", "Abbaye de Belloc", "Abbaye du Mont des Cats", "Abertam", "Abondance", "Ackawi", "Acorn", "Adelost", "Affidelice au Chablis", "Afuega'l Pitu", "Airag", "Airedale", "Aisy Cendre", "Allgauer Emmentaler" }; }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.drill.exec.store.easy.text.compliant; import io.netty.buffer.DrillBuf; import io.netty.util.internal.PlatformDependent; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; import org.apache.drill.common.exceptions.ExecutionSetupException; import org.apache.drill.common.expression.SchemaPath; import org.apache.drill.common.types.TypeProtos; import org.apache.drill.common.types.Types; import org.apache.drill.exec.exception.SchemaChangeException; import org.apache.drill.exec.physical.impl.OutputMutator; import org.apache.drill.exec.record.MaterializedField; import org.apache.drill.exec.vector.RepeatedVarCharVector; import com.google.common.base.Preconditions; /** * Class is responsible for generating record batches for text file inputs. We generate * a record batch with a single vector of type repeated varchar vector. Each record is a single * value within the vector containing all the fields in the record as individual array elements. */ class RepeatedVarCharOutput extends TextOutput { static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(RepeatedVarCharOutput.class); static final String COL_NAME = "columns"; static final SchemaPath COLUMNS = SchemaPath.getSimplePath("columns"); public static final int MAXIMUM_NUMBER_COLUMNS = 64 * 1024; // output vector private final RepeatedVarCharVector vector; // mutator for the output vector private final RepeatedVarCharVector.Mutator mutator; // boolean array indicating which fields are selected (if star query entire array is set to true) private final boolean[] collectedFields; // pointer to keep track of the offsets per record private long repeatedOffset; // pointer to keep track of the original offsets per record private long repeatedOffsetOriginal; // pointer to end of the offset buffer private long repeatedOffsetMax; // pointer to the start of the actual data buffer private long characterDataOriginal; // pointer to the current location of the data buffer private long characterData; // pointer to the end of the data buffer private long characterDataMax; // current pointer into the buffer that keeps track of the length of individual fields private long charLengthOffset; // pointer to the start of the length buffer private long charLengthOffsetOriginal; // pointer to the end of length buffer private long charLengthOffsetMax; // pointer to the beginning of the record private long recordStart; // total number of records processed (across batches) private long recordCount; // number of records processed in this current batch private int batchIndex; // current index of the field being processed within the record private int fieldIndex = -1; /* boolean to indicate if we are currently appending data to the output vector * Its set to false when we have hit out of memory or we are not interested in * the particular field */ private boolean collect; // are we currently appending to a field private boolean fieldOpen; // maximum number of fields/columns private final int maxField; /** * We initialize and add the repeated varchar vector to the record batch in this * constructor. Perform some sanity checks if the selected columns are valid or not. * @param outputMutator Used to create/modify schema in the record batch * @param columns List of columns selected in the query * @param isStarQuery boolean to indicate if all fields are selected or not * @throws SchemaChangeException */ public RepeatedVarCharOutput(OutputMutator outputMutator, Collection<SchemaPath> columns, boolean isStarQuery) throws SchemaChangeException { super(); MaterializedField field = MaterializedField.create(COL_NAME, Types.repeated(TypeProtos.MinorType.VARCHAR)); this.vector = outputMutator.addField(field, RepeatedVarCharVector.class); this.mutator = vector.getMutator(); { // setup fields List<Integer> columnIds = new ArrayList<>(); if (!isStarQuery) { String pathStr; for (SchemaPath path : columns) { assert path.getRootSegment().isNamed() : "root segment should be named"; pathStr = path.getRootSegment().getPath(); Preconditions.checkArgument(COL_NAME.equals(pathStr) || (SchemaPath.DYNAMIC_STAR.equals(pathStr) && path.getRootSegment().getChild() == null), String.format("Selected column '%s' must have name 'columns' or must be plain '*'", pathStr)); if (path.getRootSegment().getChild() != null) { Preconditions.checkArgument(path.getRootSegment().getChild().isArray(), String.format("Selected column '%s' must be an array index", pathStr)); int index = path.getRootSegment().getChild().getArraySegment().getIndex(); columnIds.add(index); } } Collections.sort(columnIds); } boolean[] fields = new boolean[MAXIMUM_NUMBER_COLUMNS]; int maxField = fields.length; if(isStarQuery){ Arrays.fill(fields, true); }else{ for(Integer i : columnIds){ maxField = 0; maxField = Math.max(maxField, i); fields[i] = true; } } this.collectedFields = fields; this.maxField = maxField; } } /** * Start a new record batch. Resets all the offsets and pointers that * store buffer addresses */ @Override public void startBatch() { this.recordStart = characterDataOriginal; this.fieldOpen = false; this.batchIndex = 0; this.fieldIndex = -1; this.collect = true; loadRepeatedOffsetAddress(); loadVarCharOffsetAddress(); loadVarCharDataAddress(); } private void loadRepeatedOffsetAddress(){ @SuppressWarnings("resource") DrillBuf buf = vector.getOffsetVector().getBuffer(); checkBuf(buf); this.repeatedOffset = buf.memoryAddress() + 4; this.repeatedOffsetOriginal = buf.memoryAddress() + 4; this.repeatedOffsetMax = buf.memoryAddress() + buf.capacity(); } private void loadVarCharDataAddress(){ @SuppressWarnings("resource") DrillBuf buf = vector.getDataVector().getBuffer(); checkBuf(buf); this.characterData = buf.memoryAddress(); this.characterDataOriginal = buf.memoryAddress(); this.characterDataMax = buf.memoryAddress() + buf.capacity(); } private void loadVarCharOffsetAddress(){ @SuppressWarnings("resource") DrillBuf buf = vector.getDataVector().getOffsetVector().getBuffer(); checkBuf(buf); this.charLengthOffset = buf.memoryAddress() + 4; this.charLengthOffsetOriginal = buf.memoryAddress() + 4; // add four as offsets conceptually start at 1. (first item is 0..1) this.charLengthOffsetMax = buf.memoryAddress() + buf.capacity(); } private void expandVarCharOffsets(){ vector.getDataVector().getOffsetVector().reAlloc(); long diff = charLengthOffset - charLengthOffsetOriginal; loadVarCharOffsetAddress(); charLengthOffset += diff; } private void expandVarCharData(){ vector.getDataVector().reAlloc(); long diff = characterData - characterDataOriginal; loadVarCharDataAddress(); characterData += diff; } private void expandRepeatedOffsets(){ vector.getOffsetVector().reAlloc(); long diff = repeatedOffset - repeatedOffsetOriginal; loadRepeatedOffsetAddress(); repeatedOffset += diff; } /** * Helper method to check if the buffer we are accessing * has a minimum reference count and has not been deallocated * @param b working drill buffer */ private void checkBuf(DrillBuf b){ if(b.refCnt() < 1){ throw new IllegalStateException("Cannot access a dereferenced buffer."); } } @Override public void startField(int index) { fieldIndex = index; collect = collectedFields[index]; fieldOpen = true; } @Override public boolean endField() { fieldOpen = false; if(charLengthOffset >= charLengthOffsetMax){ expandVarCharOffsets(); } int newOffset = (int) (characterData - characterDataOriginal); PlatformDependent.putInt(charLengthOffset, newOffset); charLengthOffset += 4; return fieldIndex < maxField; } @Override public boolean endEmptyField() { return endField(); } @Override public void append(byte data) { if(!collect){ return; } if(characterData >= characterDataMax){ expandVarCharData(); } PlatformDependent.putByte(characterData, data); characterData++; } @Override public long getRecordCount() { return recordCount; } @Override public boolean rowHasData() { return this.recordStart < characterData; } @Override public void finishRecord() { this.recordStart = characterData; if(fieldOpen){ endField(); } if(repeatedOffset >= repeatedOffsetMax){ expandRepeatedOffsets(); } int newOffset = ((int) (charLengthOffset - charLengthOffsetOriginal))/4; PlatformDependent.putInt(repeatedOffset, newOffset); repeatedOffset += 4; // if there were no defined fields, skip. if(fieldIndex > -1){ batchIndex++; recordCount++; } } /** * This method is a helper method added for DRILL-951 * TextRecordReader to call this method to get field names out * @return array of field data strings */ public String [] getTextOutput () throws ExecutionSetupException { if (recordCount == 0 || fieldIndex == -1) { return null; } if (this.recordStart != characterData) { throw new ExecutionSetupException("record text was requested before finishing record"); } //Currently only first line header is supported. Return only first record. int retSize = fieldIndex+1; String [] out = new String [retSize]; RepeatedVarCharVector.Accessor a = this.vector.getAccessor(); for (int i=0; i<retSize; i++){ out[i] = a.getSingleObject(0,i).toString(); } return out; } @Override public void finishBatch() { } }
package com.google.api.ads.adwords.jaxws.v201605.cm; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlSchemaType; import javax.xml.bind.annotation.XmlType; /** * * Errors caused by invalid usage of FeedMappingService * * * <p>Java class for FeedMappingError complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType name="FeedMappingError"> * &lt;complexContent> * &lt;extension base="{https://adwords.google.com/api/adwords/cm/v201605}ApiError"> * &lt;sequence> * &lt;element name="reason" type="{https://adwords.google.com/api/adwords/cm/v201605}FeedMappingError.Reason" minOccurs="0"/> * &lt;/sequence> * &lt;/extension> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "FeedMappingError", propOrder = { "reason" }) public class FeedMappingError extends ApiError { @XmlSchemaType(name = "string") protected FeedMappingErrorReason reason; /** * Gets the value of the reason property. * * @return * possible object is * {@link FeedMappingErrorReason } * */ public FeedMappingErrorReason getReason() { return reason; } /** * Sets the value of the reason property. * * @param value * allowed object is * {@link FeedMappingErrorReason } * */ public void setReason(FeedMappingErrorReason value) { this.reason = value; } }
package io.eventuate.local.java.kafka.consumer; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.TopicPartition; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Map; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; import java.util.function.BiConsumer; /** * Processes a Kafka message and tracks the message offsets that have been successfully processed and can be committed * */ public class KafkaMessageProcessor { private Logger logger = LoggerFactory.getLogger(getClass()); private String subscriberId; private BiConsumer<ConsumerRecord<String, String>, BiConsumer<Void, Throwable>> handler; private OffsetTracker offsetTracker = new OffsetTracker(); private BlockingQueue<ConsumerRecord<String, String>> processedRecords = new LinkedBlockingQueue<>(); public KafkaMessageProcessor(String subscriberId, BiConsumer<ConsumerRecord<String, String>, BiConsumer<Void, Throwable>> handler) { this.subscriberId = subscriberId; this.handler = handler; } public void process(ConsumerRecord<String, String> record) { offsetTracker.noteUnprocessed(new TopicPartition(record.topic(), record.partition()), record.offset()); handler.accept(record, (result, t) -> { if (t != null) { logger.error("Got exception: ", t); } else { logger.debug("Adding processed record to queue {} {}", subscriberId, record.offset()); processedRecords.add(record); } }); } public Map<TopicPartition, OffsetAndMetadata> offsetsToCommit() { int count = 0; while (true) { ConsumerRecord<String, String> record = processedRecords.poll(); if (record == null) break; count++; offsetTracker.noteProcessed(new TopicPartition(record.topic(), record.partition()), record.offset()); } logger.trace("removed {} {} processed records from queue", subscriberId, count); return offsetTracker.offsetsToCommit(); } public void noteOffsetsCommitted(Map<TopicPartition, OffsetAndMetadata> offsetsToCommit) { offsetTracker.noteOffsetsCommitted(offsetsToCommit); } public OffsetTracker getPending() { return offsetTracker; } }
package habr.demo.app; import org.testcontainers.containers.GenericContainer; public interface IntegrationTestLocalDependency { String name(); GenericContainer containerDefinition(); void initializeSystemProperties(GenericContainer it); }
/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.workspaces.model; import javax.annotation.Generated; /** * Base exception for all service exceptions thrown by Amazon WorkSpaces */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class AmazonWorkspacesException extends com.amazonaws.AmazonServiceException { private static final long serialVersionUID = 1L; /** * Constructs a new AmazonWorkspacesException with the specified error message. * * @param message * Describes the error encountered. */ public AmazonWorkspacesException(String message) { super(message); } }
package com.blog.login_check2.controller; import com.blog.login_check2.dao.articlesMapper; import com.blog.login_check2.dao.commentsMapper; import com.blog.login_check2.dao.usersMapper; import com.blog.utils.GetSqlSession; import com.blog.utils.ObjToMap; import com.blog.utils.ReadFile; import org.apache.ibatis.session.SqlSession; import org.springframework.beans.factory.annotation.Value; import org.springframework.stereotype.Controller; import org.springframework.web.bind.annotation.CrossOrigin; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.ResponseBody; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpSession; import java.io.File; import java.io.FileInputStream; import java.io.InputStreamReader; import java.io.Reader; import java.util.List; import java.util.Map; @Controller public class GetController { @Value("${uploadDir}") private String uploadDir; @CrossOrigin(origins = "http://localhost:3000",allowCredentials ="true") // 解决跨域问题 @GetMapping("/getArticles") @ResponseBody public List<Map> getArticles(HttpServletRequest request) { String username; try { // 获取session中的username HttpSession httpSession = request.getSession(); Map<String, Object> user = ObjToMap.ObjectToMap(httpSession.getAttribute("user")); username = user.get("username").toString(); } catch (Exception e) { System.out.println("请重新登入"); // 防止重启项目将session清除 return null; } // 获取文章列表 SqlSession sqlSession = null; List<Map> articlelist; try { sqlSession = GetSqlSession.getSqlSeesion(); articlesMapper articles = sqlSession.getMapper(articlesMapper.class); articlelist = articles.getArticles(username); return articlelist; } catch (Exception e) { e.printStackTrace(); } finally { sqlSession.close(); } return null; } @CrossOrigin(origins = "http://localhost:3000",allowCredentials ="true") // 解决跨域问题 @GetMapping("/getShareArticles") @ResponseBody public List<Map> getShareArticles() { // 获取文章列表 SqlSession sqlSession = null; List<Map> shareArticleList; try { sqlSession = GetSqlSession.getSqlSeesion(); articlesMapper articles = sqlSession.getMapper(articlesMapper.class); shareArticleList = articles.getShareArticles(); return shareArticleList; } catch (Exception e) { e.printStackTrace(); } finally { sqlSession.close(); } return null; } @CrossOrigin(origins = "http://localhost:3000",allowCredentials ="true") // 解决跨域问题 @GetMapping("/getComments") @ResponseBody public List<Map> getComments(@RequestParam(value = "title") String title, @RequestParam(value = "writer") String writer) { // 获取评论列表 SqlSession sqlSession = null; List<Map> commentsList; try { sqlSession = GetSqlSession.getSqlSeesion(); commentsMapper comments = sqlSession.getMapper(commentsMapper.class); commentsList = comments.getComments(title, writer); return commentsList; } catch (Exception e) { e.printStackTrace(); } finally { sqlSession.close(); } return null; } @CrossOrigin(origins = "http://localhost:3000",allowCredentials ="true") // 解决跨域问题 @GetMapping("/getArticleContent") @ResponseBody public String getArticleContent(@RequestParam(value = "title") String title, HttpServletRequest request) { String username; try { // 获取session中的username HttpSession httpSession = request.getSession(); Map<String, Object> user = ObjToMap.ObjectToMap(httpSession.getAttribute("user")); username = user.get("username").toString(); } catch (Exception e) { System.out.println("请重新登入"); // 防止重启项目将session清除 return ""; } // 获取文章url, 在这里并用不上,因为这个url是在chrome插件上的url,所以直接拼接字符串得到url String article_url = uploadDir + "\\articles\\" + username + "\\" + title + ".md"; ReadFile readFile = new ReadFile(); String article_content = readFile.readMd(article_url); // 将markdown的内容返回给前端 return article_content; } @CrossOrigin(origins = "http://localhost:3000",allowCredentials ="true") // 解决跨域问题 @GetMapping("/getShareArticleContent") @ResponseBody public String getShareArticleContent(@RequestParam(value = "title") String title, @RequestParam(value = "username") String username) { // 获取文章url, 在这里并用不上,因为这个url是在chrome插件上的url,所以直接拼接字符串得到url String article_url = uploadDir + "\\articles\\" + username + "\\" + title + ".md"; ReadFile readFile = new ReadFile(); String article_content = readFile.readMd(article_url); // 将markdown的内容返回给前端 return article_content; } @CrossOrigin(origins = "http://localhost:3000",allowCredentials ="true") // 解决跨域问题 @GetMapping("/getShare") @ResponseBody public String getShare(@RequestParam(value = "title") String title, HttpServletRequest request) { String username; try { // 获取session中的username HttpSession httpSession = request.getSession(); Map<String, Object> user = ObjToMap.ObjectToMap(httpSession.getAttribute("user")); username = user.get("username").toString(); } catch (Exception e) { System.out.println("请重新登入"); // 防止重启项目将session清除 return ""; } // 获取文章是否被分享 SqlSession sqlSession = null; try { sqlSession = GetSqlSession.getSqlSeesion(); articlesMapper articles = sqlSession.getMapper(articlesMapper.class); String isShared = articles.getIsShared(username, title); return isShared; } catch (Exception e) { e.printStackTrace(); } finally { sqlSession.close(); } return "fail"; } @CrossOrigin(origins = "http://localhost:3000",allowCredentials ="true") // 解决跨域问题 @GetMapping("/getAllUsers") @ResponseBody public List<Map> getAllUsers() { SqlSession sqlSession = null; try { sqlSession = GetSqlSession.getSqlSeesion(); usersMapper users = sqlSession.getMapper(usersMapper.class); List<Map> list = users.getAllUsers(); return list; } catch (Exception e) { e.printStackTrace(); } finally { sqlSession.close(); } return null; } @CrossOrigin(origins = "http://localhost:3000",allowCredentials ="true") // 解决跨域问题 @GetMapping("/getAllArticles") @ResponseBody public List<Map> getAllArticles() { SqlSession sqlSession = null; try { sqlSession = GetSqlSession.getSqlSeesion(); articlesMapper articles = sqlSession.getMapper(articlesMapper.class); List<Map> list = articles.getAllArticles(); return list; } catch (Exception e) { e.printStackTrace(); } finally { sqlSession.close(); } return null; } }
package net.kothar.compactlist.internal.storage; public class IntArrayStore extends CompactStore<int[]> { private static final long serialVersionUID = 775028421757439454L; public IntArrayStore() { this(0); } public IntArrayStore(long valueOffset) { super(valueOffset); } public IntArrayStore(long valueOffset, Store elements) { super(valueOffset, elements.size(), elements.size()); copy(elements, 0); } public IntArrayStore(Store elements, int offset, int size) { super(elements, offset, size); } @Override public long getArrayElement(int index) { return store[index] & 0xFFFF_FFFFL; } @Override protected void setArrayElement(int index, long value) { store[index] = (int) value; } @Override protected int[] allocateArray(int length) { return new int[length]; } @Override public boolean inRange(long value) { long compactValue = value - valueOffset; return compactValue >= 0 && compactValue < (1L << 32); } @Override public int getWidth() { return Integer.SIZE; } @Override protected ArrayStore<int[]> newInstance() { return new IntArrayStore(valueOffset); } }
package crazypants.enderio.machines.machine.ihopper; import java.util.Random; import javax.annotation.Nonnull; import javax.annotation.Nullable; import com.enderio.core.api.client.gui.IResourceTooltipProvider; import crazypants.enderio.api.IModObject; import crazypants.enderio.base.config.config.PersonalConfig; import crazypants.enderio.base.machine.base.block.AbstractCapabilityPoweredMachineBlock; import crazypants.enderio.base.render.IBlockStateWrapper; import crazypants.enderio.base.render.IRenderMapper; import crazypants.enderio.base.render.IRenderMapper.IItemRenderMapper; import crazypants.enderio.base.render.ISmartRenderAwareBlock; import net.minecraft.block.state.BlockFaceShape; import net.minecraft.block.state.IBlockState; import net.minecraft.client.gui.GuiScreen; import net.minecraft.entity.player.EntityPlayer; import net.minecraft.inventory.Container; import net.minecraft.util.EnumFacing; import net.minecraft.util.EnumParticleTypes; import net.minecraft.util.math.BlockPos; import net.minecraft.world.IBlockAccess; import net.minecraft.world.World; import net.minecraftforge.fml.relauncher.Side; import net.minecraftforge.fml.relauncher.SideOnly; public class BlockImpulseHopper extends AbstractCapabilityPoweredMachineBlock<TileImpulseHopper> implements ISmartRenderAwareBlock, IResourceTooltipProvider { public static BlockImpulseHopper create(@Nonnull IModObject modObject) { BlockImpulseHopper iHopper = new BlockImpulseHopper(modObject); iHopper.init(); return iHopper; } public BlockImpulseHopper(@Nonnull IModObject mo) { super(mo); setShape(mkShape(BlockFaceShape.SOLID)); } @Override protected void setBlockStateWrapperCache(@Nonnull IBlockStateWrapper blockStateWrapper, @Nonnull IBlockAccess world, @Nonnull BlockPos pos, @Nonnull TileImpulseHopper tileEntity) { blockStateWrapper.addCacheKey(tileEntity.isActive()); } @Override @SideOnly(Side.CLIENT) public @Nonnull IItemRenderMapper getItemRenderMapper() { return ImpulseRenderMapper.instance; } @Override @SideOnly(Side.CLIENT) public IRenderMapper.IBlockRenderMapper getBlockRenderMapper() { return ImpulseRenderMapper.instance; } @Override public @Nullable Container getServerGuiElement(@Nonnull EntityPlayer player, @Nonnull World world, @Nonnull BlockPos pos, @Nullable EnumFacing facing, int param1, @Nonnull TileImpulseHopper te) { return new ContainerImpulseHopper(player.inventory, te); } @Override @SideOnly(Side.CLIENT) public @Nullable GuiScreen getClientGuiElement(@Nonnull EntityPlayer player, @Nonnull World world, @Nonnull BlockPos pos, @Nullable EnumFacing facing, int param1, @Nonnull TileImpulseHopper te) { return new GuiImpulseHopper(player.inventory, te); } @SideOnly(Side.CLIENT) @Override public void randomDisplayTick(@Nonnull IBlockState stateIn, @Nonnull World world, @Nonnull BlockPos pos, @Nonnull Random rand) { TileImpulseHopper te = getTileEntity(world, pos); if (PersonalConfig.machineParticlesEnabled.get() && te != null && te.isActive() && !world.getBlockState(pos.up()).isOpaqueCube()) { if (rand.nextInt(8) == 0) { float startX = pos.getX() + 0.8F - rand.nextFloat() * 0.6F; float startY = pos.getY() + 1.0F; float startZ = pos.getZ() + 0.8F - rand.nextFloat() * 0.6F; world.spawnParticle(EnumParticleTypes.REDSTONE, startX, startY, startZ, 0, 0, 0); } } } @Override public boolean hasComparatorInputOverride(@Nonnull IBlockState state) { return true; } }
/** * Copyright (c) 2013-2019 Contributors to the Eclipse Foundation * * <p> See the NOTICE file distributed with this work for additional information regarding copyright * ownership. All rights reserved. This program and the accompanying materials are made available * under the terms of the Apache License, Version 2.0 which accompanies this distribution and is * available at http://www.apache.org/licenses/LICENSE-2.0.txt */ package org.locationtech.geowave.analytic; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import org.geotools.feature.type.BasicFeatureTypes; import org.geotools.filter.text.cql2.CQLException; import org.geotools.filter.text.ecql.ECQL; import org.junit.Test; import org.locationtech.geowave.analytic.AnalyticFeature.ClusterFeatureAttribute; import org.locationtech.geowave.analytic.clustering.ClusteringUtils; import org.locationtech.jts.geom.Coordinate; import org.locationtech.jts.geom.Geometry; import org.locationtech.jts.geom.GeometryFactory; import org.locationtech.jts.io.ParseException; import org.opengis.feature.simple.SimpleFeature; import org.opengis.feature.simple.SimpleFeatureType; import org.opengis.filter.Filter; import org.opengis.geometry.MismatchedDimensionException; import org.opengis.referencing.FactoryException; import org.opengis.referencing.NoSuchAuthorityCodeException; public class AnalyticFeatureTest { @Test public void testGeometryCreation() throws MismatchedDimensionException, NoSuchAuthorityCodeException, FactoryException, CQLException, ParseException { final SimpleFeatureType ftype = AnalyticFeature.createGeometryFeatureAdapter( "centroid", new String[] {"extra1"}, BasicFeatureTypes.DEFAULT_NAMESPACE, ClusteringUtils.CLUSTERING_CRS).getFeatureType(); final GeometryFactory factory = new GeometryFactory(); SimpleFeature feature = AnalyticFeature.createGeometryFeature( ftype, "b1", "123", "fred", "NA", 20.30203, factory.createPoint(new Coordinate(02.33, 0.23)), new String[] {"extra1"}, new double[] {0.022}, 1, 1, 0); assertEquals( new Coordinate(02.33, 0.23), ((Geometry) feature.getDefaultGeometry()).getCoordinate()); System.out.println(((Geometry) feature.getDefaultGeometry()).getPrecisionModel()); System.out.println(((Geometry) feature.getDefaultGeometry()).getEnvelope()); feature = AnalyticFeature.createGeometryFeature( ftype, "b1", "123", "fred", "NA", 20.30203, factory.createPoint(new Coordinate(02.33, 0.23)), new String[] {"extra1"}, new double[] {0.022}, 10, 1, 0); assertEquals( new Coordinate(02.33, 0.23), ((Geometry) feature.getDefaultGeometry()).getCoordinate()); assertEquals( "geometry", feature.getFeatureType().getGeometryDescriptor().getName().getLocalPart()); assertEquals( new Integer(10), feature.getAttribute(ClusterFeatureAttribute.ZOOM_LEVEL.attrName())); Filter gtFilter = ECQL.toFilter("BBOX(geometry,2,0,3,1) and level = 10"); assertTrue(gtFilter.evaluate(feature)); gtFilter = ECQL.toFilter("BBOX(geometry,2,0,3,1) and level = 9"); assertFalse(gtFilter.evaluate(feature)); gtFilter = ECQL.toFilter("BBOX(geometry,2,0,3,1) and batchID = 'b1'"); assertTrue(gtFilter.evaluate(feature)); } }
/* * Copyright 2019 GridGain Systems, Inc. and Contributors. * * Licensed under the GridGain Community Edition License (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.gridgain.com/products/software/community-edition/gridgain-community-edition-license * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.internal.processors.cache.distributed.dht.preloader; import java.io.Externalizable; import java.nio.ByteBuffer; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.GridDirectCollection; import org.apache.ignite.internal.GridDirectMap; import org.apache.ignite.internal.GridDirectTransient; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; import org.jetbrains.annotations.Nullable; /** * Information about partitions of a single node. */ public class GridDhtPartitionsSingleMessage extends GridDhtPartitionsAbstractMessage { /** */ private static final long serialVersionUID = 0L; /** Local partitions. */ @GridToStringInclude @GridDirectTransient private Map<Integer, GridDhtPartitionMap> parts; /** */ @GridDirectMap(keyType = Integer.class, valueType = Integer.class) private Map<Integer, Integer> dupPartsData; /** Serialized partitions. */ private byte[] partsBytes; /** Partitions update counters. */ @GridToStringInclude @GridDirectTransient private Map<Integer, Object> partCntrs; /** Serialized partitions counters. */ private byte[] partCntrsBytes; /** Partitions sizes. */ @GridToStringInclude @GridDirectTransient private Map<Integer, Map<Integer, Long>> partsSizes; /** Serialized partitions counters. */ private byte[] partsSizesBytes; /** Partitions history reservation counters. */ @GridToStringInclude @GridDirectTransient private Map<Integer, Map<Integer, Long>> partHistCntrs; /** Serialized partitions history reservation counters. */ private byte[] partHistCntrsBytes; /** Exception. */ @GridToStringInclude @GridDirectTransient private Exception err; /** */ private byte[] errBytes; /** */ private boolean client; /** */ @GridDirectCollection(Integer.class) private Collection<Integer> grpsAffRequest; /** Start time of exchange on node which sent this message in nanoseconds. */ private long exchangeStartTime; /** * Exchange finish message, sent to new coordinator when it tries to * restore state after previous coordinator failed during exchange. */ private GridDhtPartitionsFullMessage finishMsg; /** * Required by {@link Externalizable}. */ public GridDhtPartitionsSingleMessage() { // No-op. } /** * @param exchId Exchange ID. * @param client Client message flag. * @param lastVer Last version. * @param compress {@code True} if it is possible to use compression for message. */ public GridDhtPartitionsSingleMessage(GridDhtPartitionExchangeId exchId, boolean client, @Nullable GridCacheVersion lastVer, boolean compress ) { super(exchId, lastVer); compressed(compress); this.client = client; } /** * @param finishMsg Exchange finish message (used to restore exchange state on new coordinator). */ void finishMessage(GridDhtPartitionsFullMessage finishMsg) { this.finishMsg = finishMsg; } /** * @return Exchange finish message (used to restore exchange state on new coordinator). */ GridDhtPartitionsFullMessage finishMessage() { return finishMsg; } /** * @param grpsAffRequest Cache groups to get affinity for (affinity is requested when node joins cluster). */ void cacheGroupsAffinityRequest(Collection<Integer> grpsAffRequest) { this.grpsAffRequest = grpsAffRequest; } /** * @return Cache groups to get affinity for (affinity is requested when node joins cluster). */ @Nullable public Collection<Integer> cacheGroupsAffinityRequest() { return grpsAffRequest; } /** {@inheritDoc} */ @Override public int handlerId() { return 0; } /** * @return {@code True} if sent from client node. */ public boolean client() { return client; } /** * @param cacheId Cache ID to add local partition for. * @param locMap Local partition map. * @param dupDataCache Optional ID of cache with the same partition state map. */ public void addLocalPartitionMap(int cacheId, GridDhtPartitionMap locMap, @Nullable Integer dupDataCache) { if (parts == null) parts = new HashMap<>(); parts.put(cacheId, locMap); if (dupDataCache != null) { assert compressed(); assert F.isEmpty(locMap.map()); assert parts.containsKey(dupDataCache); if (dupPartsData == null) dupPartsData = new HashMap<>(); dupPartsData.put(cacheId, dupDataCache); } } /** * @param grpId Cache group ID. * @param cntrMap Partition update counters. */ public void addPartitionUpdateCounters(int grpId, Object cntrMap) { if (partCntrs == null) partCntrs = new HashMap<>(); partCntrs.put(grpId, cntrMap); } /** * @param grpId Cache group ID. * @param partsCnt Total cache partitions. * @return Partition update counters. */ public CachePartitionPartialCountersMap partitionUpdateCounters(int grpId, int partsCnt) { Object res = partCntrs == null ? null : partCntrs.get(grpId); if (res == null) return CachePartitionPartialCountersMap.EMPTY; if (res instanceof CachePartitionPartialCountersMap) return (CachePartitionPartialCountersMap)res; assert res instanceof Map : res; Map<Integer, T2<Long, Long>> map = (Map<Integer, T2<Long, Long>>)res; return CachePartitionPartialCountersMap.fromCountersMap(map, partsCnt); } /** * Adds partition sizes map for specified {@code grpId} to the current message. * * @param grpId Group id. * @param partSizesMap Partition sizes map. */ public void addPartitionSizes(int grpId, Map<Integer, Long> partSizesMap) { if (partSizesMap.isEmpty()) return; if (partsSizes == null) partsSizes = new HashMap<>(); partsSizes.put(grpId, partSizesMap); } /** * Returns partition sizes map for specified {@code grpId}. * * @param grpId Group id. * @return Partition sizes map (partId, partSize). */ public Map<Integer, Long> partitionSizes(int grpId) { if (partsSizes == null) return Collections.emptyMap(); return partsSizes.getOrDefault(grpId, Collections.emptyMap()); } /** * @param grpId Cache group ID. * @param cntrMap Partition history counters. */ public void partitionHistoryCounters(int grpId, Map<Integer, Long> cntrMap) { if (cntrMap.isEmpty()) return; if (partHistCntrs == null) partHistCntrs = new HashMap<>(); partHistCntrs.put(grpId, cntrMap); } /** * @param cntrMap Partition history counters. */ void partitionHistoryCounters(Map<Integer, Map<Integer, Long>> cntrMap) { for (Map.Entry<Integer, Map<Integer, Long>> e : cntrMap.entrySet()) partitionHistoryCounters(e.getKey(), e.getValue()); } /** * @param grpId Cache group ID. * @return Partition history counters. */ Map<Integer, Long> partitionHistoryCounters(int grpId) { if (partHistCntrs != null) { Map<Integer, Long> res = partHistCntrs.get(grpId); return res != null ? res : Collections.<Integer, Long>emptyMap(); } return Collections.emptyMap(); } /** * @return Local partitions. */ public Map<Integer, GridDhtPartitionMap> partitions() { if (parts == null) parts = new HashMap<>(); return parts; } /** * @param ex Exception. */ public void setError(Exception ex) { this.err = ex; } /** * @return Not null exception if exchange processing failed. */ @Nullable public Exception getError() { return err; } /** * Start time of exchange on node which sent this message. */ public long exchangeStartTime() { return exchangeStartTime; } /** * @param exchangeStartTime Start time of exchange. */ public void exchangeStartTime(long exchangeStartTime) { this.exchangeStartTime = exchangeStartTime; } /** {@inheritDoc} * @param ctx*/ @Override public void prepareMarshal(GridCacheSharedContext ctx) throws IgniteCheckedException { super.prepareMarshal(ctx); boolean marshal = (parts != null && partsBytes == null) || (partCntrs != null && partCntrsBytes == null) || (partHistCntrs != null && partHistCntrsBytes == null) || (partsSizes != null && partsSizesBytes == null) || (err != null && errBytes == null); if (marshal) { byte[] partsBytes0 = null; byte[] partCntrsBytes0 = null; byte[] partHistCntrsBytes0 = null; byte[] partsSizesBytes0 = null; byte[] errBytes0 = null; if (parts != null && partsBytes == null) partsBytes0 = U.marshal(ctx, parts); if (partCntrs != null && partCntrsBytes == null) partCntrsBytes0 = U.marshal(ctx, partCntrs); if (partHistCntrs != null && partHistCntrsBytes == null) partHistCntrsBytes0 = U.marshal(ctx, partHistCntrs); if (partsSizes != null && partsSizesBytes == null) partsSizesBytes0 = U.marshal(ctx, partsSizes); if (err != null && errBytes == null) errBytes0 = U.marshal(ctx, err); if (compressed()) { try { byte[] partsBytesZip = U.zip(partsBytes0); byte[] partCntrsBytesZip = U.zip(partCntrsBytes0); byte[] partHistCntrsBytesZip = U.zip(partHistCntrsBytes0); byte[] partsSizesBytesZip = U.zip(partsSizesBytes0); byte[] exBytesZip = U.zip(errBytes0); partsBytes0 = partsBytesZip; partCntrsBytes0 = partCntrsBytesZip; partHistCntrsBytes0 = partHistCntrsBytesZip; partsSizesBytes0 = partsSizesBytesZip; errBytes0 = exBytesZip; } catch (IgniteCheckedException e) { U.error(ctx.logger(getClass()), "Failed to compress partitions data: " + e, e); } } partsBytes = partsBytes0; partCntrsBytes = partCntrsBytes0; partHistCntrsBytes = partHistCntrsBytes0; partsSizesBytes = partsSizesBytes0; errBytes = errBytes0; } } /** {@inheritDoc} */ @Override public void finishUnmarshal(GridCacheSharedContext ctx, ClassLoader ldr) throws IgniteCheckedException { super.finishUnmarshal(ctx, ldr); if (partsBytes != null && parts == null) { if (compressed()) parts = U.unmarshalZip(ctx.marshaller(), partsBytes, U.resolveClassLoader(ldr, ctx.gridConfig())); else parts = U.unmarshal(ctx, partsBytes, U.resolveClassLoader(ldr, ctx.gridConfig())); } if (partCntrsBytes != null && partCntrs == null) { if (compressed()) partCntrs = U.unmarshalZip(ctx.marshaller(), partCntrsBytes, U.resolveClassLoader(ldr, ctx.gridConfig())); else partCntrs = U.unmarshal(ctx, partCntrsBytes, U.resolveClassLoader(ldr, ctx.gridConfig())); } if (partHistCntrsBytes != null && partHistCntrs == null) { if (compressed()) partHistCntrs = U.unmarshalZip(ctx.marshaller(), partHistCntrsBytes, U.resolveClassLoader(ldr, ctx.gridConfig())); else partHistCntrs = U.unmarshal(ctx, partHistCntrsBytes, U.resolveClassLoader(ldr, ctx.gridConfig())); } if (partsSizesBytes != null && partsSizes == null) { if (compressed()) partsSizes = U.unmarshalZip(ctx.marshaller(), partsSizesBytes, U.resolveClassLoader(ldr, ctx.gridConfig())); else partsSizes = U.unmarshal(ctx, partsSizesBytes, U.resolveClassLoader(ldr, ctx.gridConfig())); } if (errBytes != null && err == null) { if (compressed()) err = U.unmarshalZip(ctx.marshaller(), errBytes, U.resolveClassLoader(ldr, ctx.gridConfig())); else err = U.unmarshal(ctx, errBytes, U.resolveClassLoader(ldr, ctx.gridConfig())); } if (dupPartsData != null) { assert parts != null; for (Map.Entry<Integer, Integer> e : dupPartsData.entrySet()) { GridDhtPartitionMap map1 = parts.get(e.getKey()); assert map1 != null : e.getKey(); assert F.isEmpty(map1.map()); assert !map1.hasMovingPartitions(); GridDhtPartitionMap map2 = parts.get(e.getValue()); assert map2 != null : e.getValue(); assert map2.map() != null; for (Map.Entry<Integer, GridDhtPartitionState> e0 : map2.map().entrySet()) map1.put(e0.getKey(), e0.getValue()); } } } /** {@inheritDoc} */ @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { writer.setBuffer(buf); if (!super.writeTo(buf, writer)) return false; if (!writer.isHeaderWritten()) { if (!writer.writeHeader(directType(), fieldsCount())) return false; writer.onHeaderWritten(); } switch (writer.state()) { case 6: if (!writer.writeBoolean("client", client)) return false; writer.incrementState(); case 7: if (!writer.writeMap("dupPartsData", dupPartsData, MessageCollectionItemType.INT, MessageCollectionItemType.INT)) return false; writer.incrementState(); case 8: if (!writer.writeByteArray("errBytes", errBytes)) return false; writer.incrementState(); case 9: if (!writer.writeLong("exchangeStartTime", exchangeStartTime)) return false; writer.incrementState(); case 10: if (!writer.writeMessage("finishMsg", finishMsg)) return false; writer.incrementState(); case 11: if (!writer.writeCollection("grpsAffRequest", grpsAffRequest, MessageCollectionItemType.INT)) return false; writer.incrementState(); case 12: if (!writer.writeByteArray("partCntrsBytes", partCntrsBytes)) return false; writer.incrementState(); case 13: if (!writer.writeByteArray("partHistCntrsBytes", partHistCntrsBytes)) return false; writer.incrementState(); case 14: if (!writer.writeByteArray("partsBytes", partsBytes)) return false; writer.incrementState(); case 15: if (!writer.writeByteArray("partsSizesBytes", partsSizesBytes)) return false; writer.incrementState(); } return true; } /** {@inheritDoc} */ @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) { reader.setBuffer(buf); if (!reader.beforeMessageRead()) return false; if (!super.readFrom(buf, reader)) return false; switch (reader.state()) { case 6: client = reader.readBoolean("client"); if (!reader.isLastRead()) return false; reader.incrementState(); case 7: dupPartsData = reader.readMap("dupPartsData", MessageCollectionItemType.INT, MessageCollectionItemType.INT, false); if (!reader.isLastRead()) return false; reader.incrementState(); case 8: errBytes = reader.readByteArray("errBytes"); if (!reader.isLastRead()) return false; reader.incrementState(); case 9: exchangeStartTime = reader.readLong("exchangeStartTime"); if (!reader.isLastRead()) return false; reader.incrementState(); case 10: finishMsg = reader.readMessage("finishMsg"); if (!reader.isLastRead()) return false; reader.incrementState(); case 11: grpsAffRequest = reader.readCollection("grpsAffRequest", MessageCollectionItemType.INT); if (!reader.isLastRead()) return false; reader.incrementState(); case 12: partCntrsBytes = reader.readByteArray("partCntrsBytes"); if (!reader.isLastRead()) return false; reader.incrementState(); case 13: partHistCntrsBytes = reader.readByteArray("partHistCntrsBytes"); if (!reader.isLastRead()) return false; reader.incrementState(); case 14: partsBytes = reader.readByteArray("partsBytes"); if (!reader.isLastRead()) return false; reader.incrementState(); case 15: partsSizesBytes = reader.readByteArray("partsSizesBytes"); if (!reader.isLastRead()) return false; reader.incrementState(); } return reader.afterMessageRead(GridDhtPartitionsSingleMessage.class); } /** {@inheritDoc} */ @Override public short directType() { return 47; } /** {@inheritDoc} */ @Override public byte fieldsCount() { return 16; } /** {@inheritDoc} */ @Override public String toString() { return S.toString(GridDhtPartitionsSingleMessage.class, this, super.toString()); } }
package leetcode.dp.palindromepartitioningii; public class TestPPII { public static void main(String[] args){ } }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.payara.tooling.server.state; import org.netbeans.modules.payara.tooling.admin.ResultMap; import org.netbeans.modules.payara.tooling.data.PayaraStatusCheckResult; import org.netbeans.modules.payara.tooling.TaskEvent; /** * Server status task execution result for <code>__locations</code> command * including additional information. * <p/> * This class stores task execution result only. Value <code>SUCCESS</code> * means that Locations command task execution finished successfully but it * does not mean that administration command itself returned with * <code>COMPLETED</code> status. * When <code>SUCCESS</code> status is set, stored <code>result</code> value * shall be examined too to see real administration command execution result. * <p/> * @author Tomas Kraus */ class StatusResultLocations extends StatusResult { //////////////////////////////////////////////////////////////////////// // Instance attributes // //////////////////////////////////////////////////////////////////////// /** Command <code>__locations</code> execution result. */ final ResultMap<String, String> result; //////////////////////////////////////////////////////////////////////// // Constructors // //////////////////////////////////////////////////////////////////////// /** * Creates an instance of individual server status result * for <code>__locations</code> command. * <p/> * Command <code>__locations</code> result is stored. * <p/> * @param result Command <code>__locations</code> execution result. * @param status Individual server status returned. * @param failureEvent Failure cause. */ StatusResultLocations(final ResultMap<String, String> result, final PayaraStatusCheckResult status, final TaskEvent failureEvent) { super(status, failureEvent); this.result = result; } //////////////////////////////////////////////////////////////////////// // Getters // //////////////////////////////////////////////////////////////////////// /** * Get <code>__locations</code> command execution result. * <p/> * @return <code>__locations</code> command execution result. */ public ResultMap<String, String> getStatusResult() { return result; } }
package com.duck.feature.gui.impl; import com.duck.LuciderParkour; import com.duck.feature.gui.GUIManager; import com.duck.feature.gui.NonPagedMenu; import com.duck.parkour.ParkourCategory; import com.duck.parkour.ParkourManager; import com.duck.user.User; import com.duck.utils.ChatUtils; import dev.triumphteam.gui.builder.item.ItemBuilder; import dev.triumphteam.gui.guis.Gui; import org.bukkit.enchantments.Enchantment; public class ParkourMainMenu implements NonPagedMenu<LuciderParkour> { private final ParkourManager parkourManager = LuciderParkour.getInstance().getParkourManager(); private Gui gui; private ParkourCategoryMenu parkourCategoryMenu; private final GUIManager guiManager = LuciderParkour.getInstance().getGuiManager(); private final String title; public ParkourMainMenu(String title){ this.title = title; } @Override public void setup(User user) { this.gui = Gui.gui() .title(ChatUtils.component(title)) .rows(6) .create(); parkourManager.getAllCategories() .forEach(parkourCategory -> { gui.setItem(parkourCategory.getGuiIndex(), ItemBuilder.from(parkourCategory.getGuiMaterial()) .name(ChatUtils.component(parkourCategory.getDisplayName().replace("_", " "))) .lore( ChatUtils.component("&7XP: &3" + parkourCategory.getXpReward()), ChatUtils.component("&7Required Level: &3" + parkourCategory.getRequiredLevel()) ) .enchant(Enchantment.THORNS) .asGuiItem()); }); gui.setDefaultClickAction(event -> { if(event.getWhoClicked().getOpenInventory().getTopInventory().equals(gui.getInventory())){ ParkourCategory parkourCategory = parkourManager.getBySlot(event.getSlot()); this.parkourCategoryMenu = new ParkourCategoryMenu(ChatUtils.color( parkourCategory.getDisplayName() ), parkourCategory); parkourCategoryMenu.openInventory(user); } }); } @Override public void openInventory(User user) { setup(user); user.execute(player -> gui.open(player)); user.execute(player -> player.sendMessage("Opened gui")); } public Gui getGui() { return gui; } }
/* * Copyright 2020 RtBrick Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package io.leitstand.inventory.service; import static io.leitstand.commons.model.BuilderUtil.assertNotInvalidated; import io.leitstand.commons.model.CompositeValue; /** * A package version reference. */ public class PackageVersionRef extends CompositeValue implements Comparable<PackageVersionRef>{ /** * Creates a builder for an immutable <code>PackageVersionRef</code> value object. * @return a builder for an immutable <code>PackageVersionRef</code> value object. */ public static Builder newPackageVersionRef(){ return new Builder(); } /** * A builder for an immutable <code>PackageVersionRef</code> value object. */ public static class Builder { private PackageVersionRef qualifier = new PackageVersionRef(); /** * Sets the organization that published the package. * @param org the organization name. * @return a reference to this builder to continue object creation. */ public Builder withOrganization(String org){ assertNotInvalidated(getClass(), qualifier); qualifier.organization = org; return this; } /** * Sets the package name. * @param org the package name. * @return a reference to this builder to continue object creation. */ public Builder withPackageName(String packageName){ assertNotInvalidated(getClass(), qualifier); qualifier.packageName = packageName; return this; } /** * Sets the package version. * @param packageVersion the package version. * @return a reference to this builder to continue object creation. */ public Builder withPackageVersion(Version packageVersion){ assertNotInvalidated(getClass(), qualifier); qualifier.packageVersion = packageVersion; return this; } /** * Creates an immutable <code>PackageVersionRef</code> value object and invalidates this builder. * Subsequent invocations of the <code>build()</code> method raise an exception. * @return the immutable <code>PackageVersionRef</code> value object and invalidates this builder. */ public PackageVersionRef build(){ try{ assertNotInvalidated(getClass(), qualifier); return qualifier; } finally { this.qualifier = null; } } } private String organization; private String packageName; private Version packageVersion; /** * Returns the name of the organization that published the package. * @return the organization name. */ public String getOrganization() { return organization; } /** * Returns the package name. * @return the package name. */ public String getPackageName() { return packageName; } /** * Returns the package version. * @return the package version. */ public Version getPackageVersion() { return packageVersion; } /** * Compares this package version with the given package version by organization, package name and version. * @returns a negative integer, 0, or a positive integer as this version is lower, equal or greater than the specified version. */ @Override public int compareTo(PackageVersionRef o) { int orgOrder = getOrganization().compareTo(o.getOrganization()); if(orgOrder != 0){ return orgOrder; } int nameOrder = getPackageName().compareTo(o.getPackageName()); if(nameOrder != 0){ return nameOrder; } return getPackageVersion().compareTo(o.getPackageVersion()); } /** * Concatenates organization, package name and package version to form a string representation of this package version reference. * @return a string representation of this package version. */ @Override public String toString() { return getOrganization()+"."+getPackageName()+"-"+getPackageVersion(); } }
/* * * * Copyright 2014 Orient Technologies LTD (info(at)orientechnologies.com) * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * * For more information: http://www.orientechnologies.com * */ package com.orientechnologies.orient.core.exception; import com.orientechnologies.common.exception.OException; @SuppressWarnings("serial") public class OValidationException extends OException { public OValidationException(String string) { super(string); } public OValidationException(String message, Throwable cause) { super(message, cause); } }
/* * MIT License * * Copyright 2021 Myndigheten för digital förvaltning (DIGG) */ package se.digg.dgc.service.impl; import java.io.IOException; import java.security.SignatureException; import java.security.cert.CertificateExpiredException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import se.digg.dgc.encoding.Barcode; import se.digg.dgc.encoding.BarcodeDecoder; import se.digg.dgc.encoding.BarcodeException; import se.digg.dgc.payload.v1.DGCSchemaException; import se.digg.dgc.payload.v1.DigitalCovidCertificate; import se.digg.dgc.service.DGCBarcodeDecoder; import se.digg.dgc.signatures.CertificateProvider; import se.digg.dgc.signatures.DGCSignatureVerifier; import se.digg.dgc.signatures.impl.DefaultDGCSignatureVerifier; /** * A bean implementing the {@link DGCBarcodeDecoder} interface. * * @author Martin Lindström (martin@idsec.se) * @author Henrik Bengtsson (extern.henrik.bengtsson@digg.se) * @author Henric Norlander (extern.henric.norlander@digg.se) */ public class DefaultDGCBarcodeDecoder extends DefaultDGCDecoder implements DGCBarcodeDecoder { /** Logger */ private static final Logger log = LoggerFactory.getLogger(DefaultDGCBarcodeDecoder.class); /** The barcode decoder. */ private BarcodeDecoder barcodeDecoder; /** * Constructor. * * @param dgcSignatureVerifier * the signature verifier - if null, an instance of {@link DefaultDGCSignatureVerifier} will be used * @param certificateProvider * the certificate provider that is used to locate certificates to use when verifying signatures * @param barcodeDecoder * the barcode decoder to use */ public DefaultDGCBarcodeDecoder(final DGCSignatureVerifier dgcSignatureVerifier, final CertificateProvider certificateProvider, final BarcodeDecoder barcodeDecoder) { super(dgcSignatureVerifier, certificateProvider); this.barcodeDecoder = barcodeDecoder; } /** {@inheritDoc} */ @Override public DigitalCovidCertificate decodeBarcode(final byte[] image) throws DGCSchemaException, SignatureException, CertificateExpiredException, BarcodeException, IOException { final byte[] encodedDcc = this.decodeBarcodeToBytes(image); log.trace("CBOR decoding DGC ..."); // TODO: In the future, we'll have to handle different versions... final DigitalCovidCertificate dgc = DigitalCovidCertificate.decode(encodedDcc); log.trace("Decoded into: {}", dgc); return dgc; } /** {@inheritDoc} */ @Override public byte[] decodeBarcodeToBytes(final byte[] image) throws SignatureException, CertificateExpiredException, BarcodeException, IOException { // Get the barcode from the image and decode it ... // log.trace("Decoding barcode image ..."); String base45 = this.barcodeDecoder.decodeToString(image, Barcode.BarcodeType.QR, null); log.trace("Decoded barcode image into {}", base45); return this.decodeToBytes(base45); } }
package de.schauderhaft.degraph.examples; public class DependsOnArray { String[] strings = null; }
package org.hisp.dhis.indicator; /* * Copyright (c) 2004-2017, University of Oslo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * Neither the name of the HISP project nor the names of its contributors may * be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ import java.util.List; /** * @author Lars Helge Overland * @version $Id$ */ public interface IndicatorService { String ID = IndicatorService.class.getName(); // ------------------------------------------------------------------------- // Indicator // ------------------------------------------------------------------------- int addIndicator( Indicator indicator ); void updateIndicator( Indicator indicator ); void deleteIndicator( Indicator indicator ); Indicator getIndicator( int id ); Indicator getIndicator( String uid ); List<Indicator> getAllIndicators(); List<Indicator> getIndicatorsWithGroupSets(); List<Indicator> getIndicatorsWithoutGroups(); List<Indicator> getIndicatorsWithDataSets(); // ------------------------------------------------------------------------- // IndicatorType // ------------------------------------------------------------------------- int addIndicatorType( IndicatorType indicatorType ); void updateIndicatorType( IndicatorType indicatorType ); void deleteIndicatorType( IndicatorType indicatorType ); IndicatorType getIndicatorType( int id ); IndicatorType getIndicatorType( String uid ); List<IndicatorType> getAllIndicatorTypes(); // ------------------------------------------------------------------------- // IndicatorGroup // ------------------------------------------------------------------------- int addIndicatorGroup( IndicatorGroup indicatorGroup ); void updateIndicatorGroup( IndicatorGroup indicatorGroup ); void deleteIndicatorGroup( IndicatorGroup indicatorGroup ); IndicatorGroup getIndicatorGroup( int id ); IndicatorGroup getIndicatorGroup( String uid ); List<IndicatorGroup> getAllIndicatorGroups(); // ------------------------------------------------------------------------- // IndicatorGroupSet // ------------------------------------------------------------------------- int addIndicatorGroupSet( IndicatorGroupSet groupSet ); void updateIndicatorGroupSet( IndicatorGroupSet groupSet ); void deleteIndicatorGroupSet( IndicatorGroupSet groupSet ); IndicatorGroupSet getIndicatorGroupSet( int id ); IndicatorGroupSet getIndicatorGroupSet( String uid ); List<IndicatorGroupSet> getAllIndicatorGroupSets(); }
/* * Copyright 2014 Igor Maznitsa (http://www.igormaznitsa.com). * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.igormaznitsa.jcp.directives; import javax.annotation.Nonnull; import com.igormaznitsa.jcp.context.PreprocessingState; import com.igormaznitsa.jcp.containers.PreprocessingFlag; import com.igormaznitsa.jcp.context.PreprocessorContext; /** * The class implements the //#_endif directive handler * * @author Igor Maznitsa (igor.maznitsa@igormaznitsa.com) */ public class GlobalEndIfDirectiveHandler extends AbstractDirectiveHandler { @Override @Nonnull public String getName() { return "_endif"; } @Override public boolean executeOnlyWhenExecutionAllowed() { return false; } @Override @Nonnull public String getReference() { return "end "+DIRECTIVE_PREFIX +"_if.."+DIRECTIVE_PREFIX +"_endif control construction"; } @Override public boolean isGlobalPhaseAllowed() { return true; } @Override public boolean isPreprocessingPhaseAllowed() { return false; } @Override @Nonnull public AfterDirectiveProcessingBehaviour execute(@Nonnull final String string, @Nonnull final PreprocessorContext context) { final PreprocessingState state = context.getPreprocessingState(); if (state.isIfStackEmpty()) { throw context.makeException("Detected "+getFullName() + " without " + DIRECTIVE_PREFIX + "_if",null); } if (!state.isDirectiveCanBeProcessed() && state.isAtActiveIf()) { state.getPreprocessingFlags().remove(PreprocessingFlag.IF_CONDITION_FALSE); } state.popIf(); return AfterDirectiveProcessingBehaviour.PROCESSED; } }
/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.codepipeline.model.transform; import java.math.*; import javax.annotation.Generated; import com.amazonaws.services.codepipeline.model.*; import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*; import com.amazonaws.transform.*; import com.fasterxml.jackson.core.JsonToken; import static com.fasterxml.jackson.core.JsonToken.*; /** * PipelineMetadata JSON Unmarshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class PipelineMetadataJsonUnmarshaller implements Unmarshaller<PipelineMetadata, JsonUnmarshallerContext> { public PipelineMetadata unmarshall(JsonUnmarshallerContext context) throws Exception { PipelineMetadata pipelineMetadata = new PipelineMetadata(); int originalDepth = context.getCurrentDepth(); String currentParentElement = context.getCurrentParentElement(); int targetDepth = originalDepth + 1; JsonToken token = context.getCurrentToken(); if (token == null) token = context.nextToken(); if (token == VALUE_NULL) { return null; } while (true) { if (token == null) break; if (token == FIELD_NAME || token == START_OBJECT) { if (context.testExpression("pipelineArn", targetDepth)) { context.nextToken(); pipelineMetadata.setPipelineArn(context.getUnmarshaller(String.class).unmarshall(context)); } if (context.testExpression("created", targetDepth)) { context.nextToken(); pipelineMetadata.setCreated(DateJsonUnmarshallerFactory.getInstance("unixTimestamp").unmarshall(context)); } if (context.testExpression("updated", targetDepth)) { context.nextToken(); pipelineMetadata.setUpdated(DateJsonUnmarshallerFactory.getInstance("unixTimestamp").unmarshall(context)); } } else if (token == END_ARRAY || token == END_OBJECT) { if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) { if (context.getCurrentDepth() <= originalDepth) break; } } token = context.nextToken(); } return pipelineMetadata; } private static PipelineMetadataJsonUnmarshaller instance; public static PipelineMetadataJsonUnmarshaller getInstance() { if (instance == null) instance = new PipelineMetadataJsonUnmarshaller(); return instance; } }
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.apache.cloudstack.api.command.user.loadbalancer; import java.util.ArrayList; import java.util.List; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseListTaggedResourcesCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.ApplicationLoadBalancerResponse; import org.apache.cloudstack.api.response.FirewallRuleResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.NetworkResponse; import org.apache.cloudstack.network.lb.ApplicationLoadBalancerRule; import org.apache.log4j.Logger; import com.cloud.exception.InvalidParameterValueException; import com.cloud.network.rules.LoadBalancerContainer.Scheme; import com.cloud.utils.Pair; @APICommand(name = "listLoadBalancers", description = "Lists load balancers", responseObject = ApplicationLoadBalancerResponse.class, since = "4.2.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListApplicationLoadBalancersCmd extends BaseListTaggedResourcesCmd { public static final Logger s_logger = Logger.getLogger(ListApplicationLoadBalancersCmd.class.getName()); private static final String s_name = "listloadbalancersresponse"; // /////////////////////////////////////////////////// // ////////////// API parameters ///////////////////// // /////////////////////////////////////////////////// @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = FirewallRuleResponse.class, description = "the ID of the load balancer") private Long id; @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "the name of the load balancer") private String loadBalancerName; @Parameter(name = ApiConstants.SOURCE_IP, type = CommandType.STRING, description = "the source IP address of the load balancer") private String sourceIp; @Parameter(name = ApiConstants.SOURCE_IP_NETWORK_ID, type = CommandType.UUID, entityType = NetworkResponse.class, description = "the network ID of the source IP address") private Long sourceIpNetworkId; @Parameter(name = ApiConstants.SCHEME, type = CommandType.STRING, description = "the scheme of the load balancer. Supported value is internal in the current release") private String scheme; @Parameter(name = ApiConstants.NETWORK_ID, type = CommandType.UUID, entityType = NetworkResponse.class, description = "the network ID of the load balancer") private Long networkId; @Parameter(name = ApiConstants.FOR_DISPLAY, type = CommandType.BOOLEAN, description = "list resources by display flag; only ROOT admin is eligible to pass this parameter", since = "4.4", authorized = {RoleType.Admin}) private Boolean display; // /////////////////////////////////////////////////// // ///////////////// Accessors /////////////////////// // /////////////////////////////////////////////////// public Long getId() { return id; } public String getLoadBalancerRuleName() { return loadBalancerName; } public String getLoadBalancerName() { return loadBalancerName; } public String getSourceIp() { return sourceIp; } public Long getSourceIpNetworkId() { return sourceIpNetworkId; } @Override public Boolean getDisplay() { if (display != null) { return display; } return super.getDisplay(); } @Override public String getCommandName() { return s_name; } public Scheme getScheme() { if (scheme != null) { if (scheme.equalsIgnoreCase(Scheme.Internal.toString())) { return Scheme.Internal; } else { throw new InvalidParameterValueException("Invalid value for scheme. Supported value is internal"); } } return null; } public Long getNetworkId() { return networkId; } // /////////////////////////////////////////////////// // ///////////// API Implementation/////////////////// // /////////////////////////////////////////////////// @Override public void execute() { Pair<List<? extends ApplicationLoadBalancerRule>, Integer> loadBalancers = _appLbService.listApplicationLoadBalancers(this); ListResponse<ApplicationLoadBalancerResponse> response = new ListResponse<ApplicationLoadBalancerResponse>(); List<ApplicationLoadBalancerResponse> lbResponses = new ArrayList<ApplicationLoadBalancerResponse>(); for (ApplicationLoadBalancerRule loadBalancer : loadBalancers.first()) { ApplicationLoadBalancerResponse lbResponse = _responseGenerator.createLoadBalancerContainerReponse(loadBalancer, _lbService.getLbInstances(loadBalancer.getId())); lbResponse.setObjectName("loadbalancer"); lbResponses.add(lbResponse); } response.setResponses(lbResponses, loadBalancers.second()); response.setResponseName(getCommandName()); this.setResponseObject(response); } }
package com.dimple.common.exception.user; /** * @className: UserPasswordNotMatchException * @description: user not exist or password are not match * @author: Dimple * @date: 10/22/19 */ public class UserPasswordNotMatchException extends UserException { public UserPasswordNotMatchException() { super("user.password.not.match", null); } }
/* * Licensed to Elastic Search and Shay Banon under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. Elastic Search licenses this * file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.search.fetch.matchedfilters; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.Filter; import org.elasticsearch.ElasticSearchException; import org.elasticsearch.common.collect.ImmutableMap; import org.elasticsearch.common.collect.Lists; import org.elasticsearch.common.lucene.docset.DocSet; import org.elasticsearch.common.lucene.docset.DocSets; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.fetch.SearchHitPhase; import org.elasticsearch.search.internal.InternalSearchHit; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.List; import java.util.Map; /** * @author kimchy (shay.banon) */ public class MatchedFiltersSearchHitPhase implements SearchHitPhase { @Override public Map<String, ? extends SearchParseElement> parseElements() { return ImmutableMap.of(); } @Override public boolean executionNeeded(SearchContext context) { return !context.parsedQuery().namedFilters().isEmpty(); } @Override public void execute(SearchContext context, HitContext hitContext) throws ElasticSearchException { List<String> matchedFilters = Lists.newArrayListWithCapacity(2); for (Map.Entry<String, Filter> entry : context.parsedQuery().namedFilters().entrySet()) { String name = entry.getKey(); Filter filter = entry.getValue(); try { DocIdSet docIdSet = filter.getDocIdSet(hitContext.reader()); if (docIdSet != null) { DocSet docSet = DocSets.convert(hitContext.reader(), docIdSet); if (docSet.get(hitContext.docId())) { matchedFilters.add(name); } } } catch (IOException e) { // ignore } } hitContext.hit().matchedFilters(matchedFilters.toArray(new String[matchedFilters.size()])); } }
package net.coldice.lycanthropy.registry.items; import net.minecraft.item.Item; import net.minecraft.item.ItemGroup; public class Silver extends Item { public Silver() { super(new Settings().maxCount(64).group(ItemGroup.MATERIALS)); } }
package vn.zalopay.jmeter.grpc.client; import java.util.Map; import java.util.concurrent.TimeUnit; import io.grpc.CallOptions; import io.grpc.Channel; import io.grpc.ClientCall; import io.grpc.ClientInterceptor; import io.grpc.ForwardingClientCall; import io.grpc.ForwardingClientCallListener; import io.grpc.Metadata; import io.grpc.MethodDescriptor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class GrpcClientInterceptor implements ClientInterceptor { private static final Logger LOGGER = LoggerFactory.getLogger(GrpcClientInterceptor.class); protected Map<String, String> headerMap; protected long timeoutInMs; public GrpcClientInterceptor(Map<String, String> headerMap, long timeoutInMs) { this.headerMap = headerMap; this.timeoutInMs = timeoutInMs; } @Override public <ReqT, RespT> ClientCall<ReqT, RespT> interceptCall(MethodDescriptor<ReqT, RespT> methodDescriptor, CallOptions callOptions, Channel channel) { return new ForwardingClientCall.SimpleForwardingClientCall<ReqT, RespT>( channel.newCall(methodDescriptor, callOptions.withDeadlineAfter(timeoutInMs, TimeUnit.MILLISECONDS))) { @Override public void start(ClientCall.Listener<RespT> responseListener, Metadata headers) { headerMap.entrySet().stream().forEach(entry -> { headers.put(Metadata.Key.of(entry.getKey(), Metadata.ASCII_STRING_MARSHALLER), entry.getValue()); LOGGER.debug("Header key: {}, value: {}", entry.getKey(), entry.getValue()); }); super.start(new ForwardingClientCallListener.SimpleForwardingClientCallListener<RespT>(responseListener) { @Override public void onHeaders(Metadata headers) { super.onHeaders(headers); } }, headers); } }; } }
/* * Copyright (c) 2017 the original author or authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.enadim.spring.cloud.ribbon.propagator.feign; import com.github.enadim.spring.cloud.ribbon.context.ExecutionContext; import com.github.enadim.spring.cloud.ribbon.propagator.AbstractExecutionContextCopy; import com.github.enadim.spring.cloud.ribbon.propagator.Filter; import com.github.enadim.spring.cloud.ribbon.propagator.PatternFilter; import feign.RequestInterceptor; import feign.RequestTemplate; import lombok.extern.slf4j.Slf4j; import javax.validation.constraints.NotNull; import java.util.Map; import java.util.Map.Entry; import java.util.Set; /** * Feign request interceptor that copies current {@link ExecutionContext} entries to the feign headers pre-filtering the header names using the provided {@link #filter}. * * @author Nadim Benabdenbi */ @Slf4j public class PreservesHttpHeadersFeignInterceptor extends AbstractExecutionContextCopy<RequestTemplate> implements RequestInterceptor { private final PatternFilter urlFilter; /** * Sole constructor. * * @param urlFilter The url filter. * @param filter The context entry key filter. * @param extraStaticEntries The extra static entries to copy. */ public PreservesHttpHeadersFeignInterceptor(@NotNull PatternFilter urlFilter, @NotNull Filter<String> filter, @NotNull Map<String, String> extraStaticEntries) { super(filter, RequestTemplate::header, extraStaticEntries); this.urlFilter = urlFilter; } /** * {@inheritDoc} */ @Override public void apply(RequestTemplate template) { String url = template.request().url(); if (urlFilter.accept(url)) { Set<Entry<String, String>> propagatedAttributes = copy(template); log.trace("Propagated outbound headers {} for url [{}].", propagatedAttributes, url); } else { log.trace("Propagation disabled for url [{}]", url); } } }
package cz.pojd.rpi.spring; import java.io.IOException; import java.util.ArrayList; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import com.pi4j.gpio.extension.mcp.MCP23017GpioProvider; import com.pi4j.io.gpio.GpioFactory; import com.pi4j.io.gpio.GpioProvider; import com.pi4j.io.i2c.I2CBus; import cz.pojd.rpi.controls.CameraControl; import cz.pojd.rpi.controls.MjpegStreamerCameraControl; import cz.pojd.rpi.sensors.gpio.Gpio; import cz.pojd.rpi.sensors.gpio.GpioImpl; import cz.pojd.rpi.sensors.gpio.MockGpio; import cz.pojd.rpi.state.OsStateService; import cz.pojd.rpi.state.OsStateServiceImpl; import cz.pojd.rpi.state.VmStateService; import cz.pojd.rpi.state.VmStateServiceImpl; import cz.pojd.rpi.system.RuntimeExecutor; import cz.pojd.rpi.system.RuntimeExecutorImpl; import cz.pojd.rpi.system.TimeService; import cz.pojd.rpi.system.TimeServiceImpl; /** * Main configuration for spring in rpi project * * @author Lubos Housa * @since Jul 23, 2014 2:34:22 AM */ @Configuration public class RpiConfig { private static final Log LOG = LogFactory.getLog(RpiConfig.class); /** * Detects whether this is a new RasPI or not (drives further logic in this project) * * @return true if so, false otherwise */ @Bean public boolean newRasPI() { return true; } @Bean public List<String> fileSystems() { List<String> fileSystems = new ArrayList<>(); fileSystems.add("/"); fileSystems.add("/boot"); fileSystems.add("/var/log"); fileSystems.add("/tmp"); return fileSystems; } /** * This command should detect whether the underlying database on the RPi is running or not. This command should either return true or false * * @return String command to invoke at runtime to detect whether the DB is running or not. The command should return String 'true', if DB is * running, or 'false' otherwise */ @Bean public String detectDbIsRunningCommand() { return "if [ `/etc/init.d/mysql status | tail -1 | sed 's/.*: \\(\\w*\\)/\\1/'` = 'started' ]; then echo 'true'; else echo 'false'; fi;"; } @Bean public VmStateService vmStateService() { return new VmStateServiceImpl(); } @Bean public OsStateService osStateService() { return new OsStateServiceImpl(); } @Bean public Runtime runtime() { return Runtime.getRuntime(); } @Bean public RuntimeExecutor runtimeExecutor() { return new RuntimeExecutorImpl(); } @Bean public TimeService timeService() { return new TimeServiceImpl(); } @Bean public CameraControl cameraControl() { return new MjpegStreamerCameraControl("/etc/init.d/mjpg-streamer start", "/etc/init.d/mjpg-streamer stop", 2); } public GpioProvider getMCP23017Provider(int address) { try { return new MCP23017GpioProvider(newRasPI() ? I2CBus.BUS_1 : I2CBus.BUS_0, address); } catch (IOException | UnsatisfiedLinkError e) { LOG.error("Unable to locate MCP23017 at address " + String.format("0x%x", address) + ", using default provider instead.", e); return gpio().getDefaultProvider(); } } @Bean public Gpio gpio() { try { // bump the priority of the low level pi4j libs to give the interrupt threads higher precedence return new GpioImpl(GpioFactory.getInstance(), 99); } catch (UnsatisfiedLinkError e) { LOG.error("Unable to create the GPIO controller. Using a mock one instead.", e); return new MockGpio(); } } }
/************************************************************************** Copyright 2019 Vietnamese-German-University Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. @author: thian ***************************************************************************/ package com.vgu.se.jocl.expressions; public class CollectionLiteralPart { }
/* * Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.tencentcloudapi.iai.v20200303.models; import com.tencentcloudapi.common.AbstractModel; import com.google.gson.annotations.SerializedName; import com.google.gson.annotations.Expose; import java.util.HashMap; public class SearchFacesRequest extends AbstractModel{ /** * List of groups to be searched in (up to 100). The array element value is the `GroupId` in the `CreateGroup` API. */ @SerializedName("GroupIds") @Expose private String [] GroupIds; /** * Base64-encoded image data, which cannot exceed 5 MB. The long side cannot exceed 4,000 px for images in JPG format or 2,000 px for images in other formats. PNG, JPG, JPEG, and BMP images are supported, while GIF images are not. */ @SerializedName("Image") @Expose private String Image; /** * Image URL. The image cannot exceed 5 MB in size after being Base64-encoded. The long side cannot exceed 4,000 px for images in JPG format or 2,000 px for images in other formats. Either `Url` or `Image` must be provided; if both are provided, only `Url` will be used. We recommend storing the image in Tencent Cloud, as a Tencent Cloud URL can guarantee higher download speed and stability. The download speed and stability of non-Tencent Cloud URLs may be low. PNG, JPG, JPEG, and BMP images are supported, while GIF images are not. */ @SerializedName("Url") @Expose private String Url; /** * Maximum number of recognizable faces. Default value: 1 (i.e., detecting only the face with the largest size in the image). Maximum value: 10. `MaxFaceNum` is used to control the number of faces to be searched for if there are multiple faces in the input image to be recognized. For example, if the input image in `Image` or `Url` contains multiple faces and `MaxFaceNum` is 5, top 5 faces with the largest size in the image will be recognized. */ @SerializedName("MaxFaceNum") @Expose private Long MaxFaceNum; /** * Minimum height and width of face in px. Default value: 34. Face images whose value is below 34 cannot be recognized. We recommend setting this parameter to 80. */ @SerializedName("MinFaceSize") @Expose private Long MinFaceSize; /** * Number of the most similar persons returned for one single recognized face image. Default value: 5. Maximum value: 100. For example, if `MaxFaceNum` is 1 and `MaxPersonNum` is 8, information of the top 8 most similar persons will be returned. The greater the value, the longer the processing time. We recommend setting a value below 10. */ @SerializedName("MaxPersonNum") @Expose private Long MaxPersonNum; /** * Whether to return person details. 0: no; 1: yes. Default value: 0. Other values will be considered as 0 by default. */ @SerializedName("NeedPersonInfo") @Expose private Long NeedPersonInfo; /** * Image quality control. 0: no control. 1: low quality requirement. The image has one or more of the following problems: extreme blurriness, covered eyes, covered nose, and covered mouth. 2: average quality requirement. The image has at least three of the following problems: excessive brightness, excessive dimness, blurriness or average blurriness, covered eyebrows, covered cheeks, and covered chin. 3: high-quality requirement. The image has one to two of the following problems: excessive brightness, excessive dimness, average blurriness, covered eyebrows, covered cheeks, and covered chin. 4: very high-quality requirement. The image is optimal in all dimensions or only has a slight problem in one dimension. Default value: 0. If the image quality does not meet the requirement, the returned result will prompt that the detected image quality is unsatisfactory. */ @SerializedName("QualityControl") @Expose private Long QualityControl; /** * In the output parameter `Score`, the result will be returned only if the result value is above the `FaceMatchThreshold` value. Default value: 0. */ @SerializedName("FaceMatchThreshold") @Expose private Float FaceMatchThreshold; /** * Whether to enable the support for rotated image recognition. 0: no; 1: yes. Default value: 0. When the face in the image is rotated and the image has no EXIF information, if this parameter is not enabled, the face in the image cannot be correctly detected and recognized. If you are sure that the input image contains EXIF information or the face in the image will not be rotated, do not enable this parameter, as the overall time consumption may increase by hundreds of milliseconds after it is enabled. */ @SerializedName("NeedRotateDetection") @Expose private Long NeedRotateDetection; /** * Get List of groups to be searched in (up to 100). The array element value is the `GroupId` in the `CreateGroup` API. * @return GroupIds List of groups to be searched in (up to 100). The array element value is the `GroupId` in the `CreateGroup` API. */ public String [] getGroupIds() { return this.GroupIds; } /** * Set List of groups to be searched in (up to 100). The array element value is the `GroupId` in the `CreateGroup` API. * @param GroupIds List of groups to be searched in (up to 100). The array element value is the `GroupId` in the `CreateGroup` API. */ public void setGroupIds(String [] GroupIds) { this.GroupIds = GroupIds; } /** * Get Base64-encoded image data, which cannot exceed 5 MB. The long side cannot exceed 4,000 px for images in JPG format or 2,000 px for images in other formats. PNG, JPG, JPEG, and BMP images are supported, while GIF images are not. * @return Image Base64-encoded image data, which cannot exceed 5 MB. The long side cannot exceed 4,000 px for images in JPG format or 2,000 px for images in other formats. PNG, JPG, JPEG, and BMP images are supported, while GIF images are not. */ public String getImage() { return this.Image; } /** * Set Base64-encoded image data, which cannot exceed 5 MB. The long side cannot exceed 4,000 px for images in JPG format or 2,000 px for images in other formats. PNG, JPG, JPEG, and BMP images are supported, while GIF images are not. * @param Image Base64-encoded image data, which cannot exceed 5 MB. The long side cannot exceed 4,000 px for images in JPG format or 2,000 px for images in other formats. PNG, JPG, JPEG, and BMP images are supported, while GIF images are not. */ public void setImage(String Image) { this.Image = Image; } /** * Get Image URL. The image cannot exceed 5 MB in size after being Base64-encoded. The long side cannot exceed 4,000 px for images in JPG format or 2,000 px for images in other formats. Either `Url` or `Image` must be provided; if both are provided, only `Url` will be used. We recommend storing the image in Tencent Cloud, as a Tencent Cloud URL can guarantee higher download speed and stability. The download speed and stability of non-Tencent Cloud URLs may be low. PNG, JPG, JPEG, and BMP images are supported, while GIF images are not. * @return Url Image URL. The image cannot exceed 5 MB in size after being Base64-encoded. The long side cannot exceed 4,000 px for images in JPG format or 2,000 px for images in other formats. Either `Url` or `Image` must be provided; if both are provided, only `Url` will be used. We recommend storing the image in Tencent Cloud, as a Tencent Cloud URL can guarantee higher download speed and stability. The download speed and stability of non-Tencent Cloud URLs may be low. PNG, JPG, JPEG, and BMP images are supported, while GIF images are not. */ public String getUrl() { return this.Url; } /** * Set Image URL. The image cannot exceed 5 MB in size after being Base64-encoded. The long side cannot exceed 4,000 px for images in JPG format or 2,000 px for images in other formats. Either `Url` or `Image` must be provided; if both are provided, only `Url` will be used. We recommend storing the image in Tencent Cloud, as a Tencent Cloud URL can guarantee higher download speed and stability. The download speed and stability of non-Tencent Cloud URLs may be low. PNG, JPG, JPEG, and BMP images are supported, while GIF images are not. * @param Url Image URL. The image cannot exceed 5 MB in size after being Base64-encoded. The long side cannot exceed 4,000 px for images in JPG format or 2,000 px for images in other formats. Either `Url` or `Image` must be provided; if both are provided, only `Url` will be used. We recommend storing the image in Tencent Cloud, as a Tencent Cloud URL can guarantee higher download speed and stability. The download speed and stability of non-Tencent Cloud URLs may be low. PNG, JPG, JPEG, and BMP images are supported, while GIF images are not. */ public void setUrl(String Url) { this.Url = Url; } /** * Get Maximum number of recognizable faces. Default value: 1 (i.e., detecting only the face with the largest size in the image). Maximum value: 10. `MaxFaceNum` is used to control the number of faces to be searched for if there are multiple faces in the input image to be recognized. For example, if the input image in `Image` or `Url` contains multiple faces and `MaxFaceNum` is 5, top 5 faces with the largest size in the image will be recognized. * @return MaxFaceNum Maximum number of recognizable faces. Default value: 1 (i.e., detecting only the face with the largest size in the image). Maximum value: 10. `MaxFaceNum` is used to control the number of faces to be searched for if there are multiple faces in the input image to be recognized. For example, if the input image in `Image` or `Url` contains multiple faces and `MaxFaceNum` is 5, top 5 faces with the largest size in the image will be recognized. */ public Long getMaxFaceNum() { return this.MaxFaceNum; } /** * Set Maximum number of recognizable faces. Default value: 1 (i.e., detecting only the face with the largest size in the image). Maximum value: 10. `MaxFaceNum` is used to control the number of faces to be searched for if there are multiple faces in the input image to be recognized. For example, if the input image in `Image` or `Url` contains multiple faces and `MaxFaceNum` is 5, top 5 faces with the largest size in the image will be recognized. * @param MaxFaceNum Maximum number of recognizable faces. Default value: 1 (i.e., detecting only the face with the largest size in the image). Maximum value: 10. `MaxFaceNum` is used to control the number of faces to be searched for if there are multiple faces in the input image to be recognized. For example, if the input image in `Image` or `Url` contains multiple faces and `MaxFaceNum` is 5, top 5 faces with the largest size in the image will be recognized. */ public void setMaxFaceNum(Long MaxFaceNum) { this.MaxFaceNum = MaxFaceNum; } /** * Get Minimum height and width of face in px. Default value: 34. Face images whose value is below 34 cannot be recognized. We recommend setting this parameter to 80. * @return MinFaceSize Minimum height and width of face in px. Default value: 34. Face images whose value is below 34 cannot be recognized. We recommend setting this parameter to 80. */ public Long getMinFaceSize() { return this.MinFaceSize; } /** * Set Minimum height and width of face in px. Default value: 34. Face images whose value is below 34 cannot be recognized. We recommend setting this parameter to 80. * @param MinFaceSize Minimum height and width of face in px. Default value: 34. Face images whose value is below 34 cannot be recognized. We recommend setting this parameter to 80. */ public void setMinFaceSize(Long MinFaceSize) { this.MinFaceSize = MinFaceSize; } /** * Get Number of the most similar persons returned for one single recognized face image. Default value: 5. Maximum value: 100. For example, if `MaxFaceNum` is 1 and `MaxPersonNum` is 8, information of the top 8 most similar persons will be returned. The greater the value, the longer the processing time. We recommend setting a value below 10. * @return MaxPersonNum Number of the most similar persons returned for one single recognized face image. Default value: 5. Maximum value: 100. For example, if `MaxFaceNum` is 1 and `MaxPersonNum` is 8, information of the top 8 most similar persons will be returned. The greater the value, the longer the processing time. We recommend setting a value below 10. */ public Long getMaxPersonNum() { return this.MaxPersonNum; } /** * Set Number of the most similar persons returned for one single recognized face image. Default value: 5. Maximum value: 100. For example, if `MaxFaceNum` is 1 and `MaxPersonNum` is 8, information of the top 8 most similar persons will be returned. The greater the value, the longer the processing time. We recommend setting a value below 10. * @param MaxPersonNum Number of the most similar persons returned for one single recognized face image. Default value: 5. Maximum value: 100. For example, if `MaxFaceNum` is 1 and `MaxPersonNum` is 8, information of the top 8 most similar persons will be returned. The greater the value, the longer the processing time. We recommend setting a value below 10. */ public void setMaxPersonNum(Long MaxPersonNum) { this.MaxPersonNum = MaxPersonNum; } /** * Get Whether to return person details. 0: no; 1: yes. Default value: 0. Other values will be considered as 0 by default. * @return NeedPersonInfo Whether to return person details. 0: no; 1: yes. Default value: 0. Other values will be considered as 0 by default. */ public Long getNeedPersonInfo() { return this.NeedPersonInfo; } /** * Set Whether to return person details. 0: no; 1: yes. Default value: 0. Other values will be considered as 0 by default. * @param NeedPersonInfo Whether to return person details. 0: no; 1: yes. Default value: 0. Other values will be considered as 0 by default. */ public void setNeedPersonInfo(Long NeedPersonInfo) { this.NeedPersonInfo = NeedPersonInfo; } /** * Get Image quality control. 0: no control. 1: low quality requirement. The image has one or more of the following problems: extreme blurriness, covered eyes, covered nose, and covered mouth. 2: average quality requirement. The image has at least three of the following problems: excessive brightness, excessive dimness, blurriness or average blurriness, covered eyebrows, covered cheeks, and covered chin. 3: high-quality requirement. The image has one to two of the following problems: excessive brightness, excessive dimness, average blurriness, covered eyebrows, covered cheeks, and covered chin. 4: very high-quality requirement. The image is optimal in all dimensions or only has a slight problem in one dimension. Default value: 0. If the image quality does not meet the requirement, the returned result will prompt that the detected image quality is unsatisfactory. * @return QualityControl Image quality control. 0: no control. 1: low quality requirement. The image has one or more of the following problems: extreme blurriness, covered eyes, covered nose, and covered mouth. 2: average quality requirement. The image has at least three of the following problems: excessive brightness, excessive dimness, blurriness or average blurriness, covered eyebrows, covered cheeks, and covered chin. 3: high-quality requirement. The image has one to two of the following problems: excessive brightness, excessive dimness, average blurriness, covered eyebrows, covered cheeks, and covered chin. 4: very high-quality requirement. The image is optimal in all dimensions or only has a slight problem in one dimension. Default value: 0. If the image quality does not meet the requirement, the returned result will prompt that the detected image quality is unsatisfactory. */ public Long getQualityControl() { return this.QualityControl; } /** * Set Image quality control. 0: no control. 1: low quality requirement. The image has one or more of the following problems: extreme blurriness, covered eyes, covered nose, and covered mouth. 2: average quality requirement. The image has at least three of the following problems: excessive brightness, excessive dimness, blurriness or average blurriness, covered eyebrows, covered cheeks, and covered chin. 3: high-quality requirement. The image has one to two of the following problems: excessive brightness, excessive dimness, average blurriness, covered eyebrows, covered cheeks, and covered chin. 4: very high-quality requirement. The image is optimal in all dimensions or only has a slight problem in one dimension. Default value: 0. If the image quality does not meet the requirement, the returned result will prompt that the detected image quality is unsatisfactory. * @param QualityControl Image quality control. 0: no control. 1: low quality requirement. The image has one or more of the following problems: extreme blurriness, covered eyes, covered nose, and covered mouth. 2: average quality requirement. The image has at least three of the following problems: excessive brightness, excessive dimness, blurriness or average blurriness, covered eyebrows, covered cheeks, and covered chin. 3: high-quality requirement. The image has one to two of the following problems: excessive brightness, excessive dimness, average blurriness, covered eyebrows, covered cheeks, and covered chin. 4: very high-quality requirement. The image is optimal in all dimensions or only has a slight problem in one dimension. Default value: 0. If the image quality does not meet the requirement, the returned result will prompt that the detected image quality is unsatisfactory. */ public void setQualityControl(Long QualityControl) { this.QualityControl = QualityControl; } /** * Get In the output parameter `Score`, the result will be returned only if the result value is above the `FaceMatchThreshold` value. Default value: 0. * @return FaceMatchThreshold In the output parameter `Score`, the result will be returned only if the result value is above the `FaceMatchThreshold` value. Default value: 0. */ public Float getFaceMatchThreshold() { return this.FaceMatchThreshold; } /** * Set In the output parameter `Score`, the result will be returned only if the result value is above the `FaceMatchThreshold` value. Default value: 0. * @param FaceMatchThreshold In the output parameter `Score`, the result will be returned only if the result value is above the `FaceMatchThreshold` value. Default value: 0. */ public void setFaceMatchThreshold(Float FaceMatchThreshold) { this.FaceMatchThreshold = FaceMatchThreshold; } /** * Get Whether to enable the support for rotated image recognition. 0: no; 1: yes. Default value: 0. When the face in the image is rotated and the image has no EXIF information, if this parameter is not enabled, the face in the image cannot be correctly detected and recognized. If you are sure that the input image contains EXIF information or the face in the image will not be rotated, do not enable this parameter, as the overall time consumption may increase by hundreds of milliseconds after it is enabled. * @return NeedRotateDetection Whether to enable the support for rotated image recognition. 0: no; 1: yes. Default value: 0. When the face in the image is rotated and the image has no EXIF information, if this parameter is not enabled, the face in the image cannot be correctly detected and recognized. If you are sure that the input image contains EXIF information or the face in the image will not be rotated, do not enable this parameter, as the overall time consumption may increase by hundreds of milliseconds after it is enabled. */ public Long getNeedRotateDetection() { return this.NeedRotateDetection; } /** * Set Whether to enable the support for rotated image recognition. 0: no; 1: yes. Default value: 0. When the face in the image is rotated and the image has no EXIF information, if this parameter is not enabled, the face in the image cannot be correctly detected and recognized. If you are sure that the input image contains EXIF information or the face in the image will not be rotated, do not enable this parameter, as the overall time consumption may increase by hundreds of milliseconds after it is enabled. * @param NeedRotateDetection Whether to enable the support for rotated image recognition. 0: no; 1: yes. Default value: 0. When the face in the image is rotated and the image has no EXIF information, if this parameter is not enabled, the face in the image cannot be correctly detected and recognized. If you are sure that the input image contains EXIF information or the face in the image will not be rotated, do not enable this parameter, as the overall time consumption may increase by hundreds of milliseconds after it is enabled. */ public void setNeedRotateDetection(Long NeedRotateDetection) { this.NeedRotateDetection = NeedRotateDetection; } public SearchFacesRequest() { } /** * NOTE: Any ambiguous key set via .set("AnyKey", "value") will be a shallow copy, * and any explicit key, i.e Foo, set via .setFoo("value") will be a deep copy. */ public SearchFacesRequest(SearchFacesRequest source) { if (source.GroupIds != null) { this.GroupIds = new String[source.GroupIds.length]; for (int i = 0; i < source.GroupIds.length; i++) { this.GroupIds[i] = new String(source.GroupIds[i]); } } if (source.Image != null) { this.Image = new String(source.Image); } if (source.Url != null) { this.Url = new String(source.Url); } if (source.MaxFaceNum != null) { this.MaxFaceNum = new Long(source.MaxFaceNum); } if (source.MinFaceSize != null) { this.MinFaceSize = new Long(source.MinFaceSize); } if (source.MaxPersonNum != null) { this.MaxPersonNum = new Long(source.MaxPersonNum); } if (source.NeedPersonInfo != null) { this.NeedPersonInfo = new Long(source.NeedPersonInfo); } if (source.QualityControl != null) { this.QualityControl = new Long(source.QualityControl); } if (source.FaceMatchThreshold != null) { this.FaceMatchThreshold = new Float(source.FaceMatchThreshold); } if (source.NeedRotateDetection != null) { this.NeedRotateDetection = new Long(source.NeedRotateDetection); } } /** * Internal implementation, normal users should not use it. */ public void toMap(HashMap<String, String> map, String prefix) { this.setParamArraySimple(map, prefix + "GroupIds.", this.GroupIds); this.setParamSimple(map, prefix + "Image", this.Image); this.setParamSimple(map, prefix + "Url", this.Url); this.setParamSimple(map, prefix + "MaxFaceNum", this.MaxFaceNum); this.setParamSimple(map, prefix + "MinFaceSize", this.MinFaceSize); this.setParamSimple(map, prefix + "MaxPersonNum", this.MaxPersonNum); this.setParamSimple(map, prefix + "NeedPersonInfo", this.NeedPersonInfo); this.setParamSimple(map, prefix + "QualityControl", this.QualityControl); this.setParamSimple(map, prefix + "FaceMatchThreshold", this.FaceMatchThreshold); this.setParamSimple(map, prefix + "NeedRotateDetection", this.NeedRotateDetection); } }
/* * ===============================LICENSE_START====================================== * dcae-analytics * ================================================================================ * Copyright © 2017 AT&T Intellectual Property. All rights reserved. * ================================================================================ * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================LICENSE_END=========================================== */ package org.onap.dcae.apod.analytics.aai; import com.google.inject.AbstractModule; import com.google.inject.Guice; import com.google.inject.Injector; import org.onap.dcae.apod.analytics.aai.domain.config.AAIHttpClientConfig; import org.onap.dcae.apod.analytics.aai.module.AnalyticsAAIModule; import org.onap.dcae.apod.analytics.aai.service.AAIEnrichmentClient; import org.onap.dcae.apod.analytics.aai.service.AAIEnrichmentClientFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Factory to create A&AI API Client. * * @author Rajiv Singla . Creation Date: 9/18/2017. */ public class AAIClientFactory { private static final Logger LOG = LoggerFactory.getLogger(AAIClientFactory.class); private final Injector injector; public AAIClientFactory(final AbstractModule guiceModule) { LOG.info("Creating instance of AAI Client Factory with Module: {}", guiceModule.getClass().getSimpleName()); this.injector = Guice.createInjector(guiceModule); } /** * Creates an instance of {@link AAIEnrichmentClient}. * * @param aaiHttpClientConfig A&AI Http Client Config * * @return An instance of A&AI Enrichment Client to fetch enrichment details from A&AI API. */ public AAIEnrichmentClient getEnrichmentClient(final AAIHttpClientConfig aaiHttpClientConfig) { LOG.info("Creating instance of A&AI Enrichment Client with A&AI HttpClientConfig: {}", aaiHttpClientConfig); final AAIEnrichmentClientFactory aaiEnrichmentClientFactory = injector.getInstance(AAIEnrichmentClientFactory.class); return aaiEnrichmentClientFactory.create(aaiHttpClientConfig); } /** * Static method used to create an instance of {@link AAIClientFactory} itself using default * guice {@link AnalyticsAAIModule} * * @return An instance of AAI Client Factory with {@link AnalyticsAAIModule} guice module configuration */ public static AAIClientFactory create() { return new AAIClientFactory(new AnalyticsAAIModule()); } }
/* * Copyright (c) 2011-2020, baomidou (jobob@qq.com). * <p> * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * <p> * https://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.baomidou.mybatisplus.extension.plugins.pagination; import com.baomidou.mybatisplus.annotation.DbType; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.core.toolkit.Assert; import com.baomidou.mybatisplus.core.toolkit.ClassUtils; import com.baomidou.mybatisplus.core.toolkit.ExceptionUtils; import com.baomidou.mybatisplus.core.toolkit.StringUtils; import com.baomidou.mybatisplus.extension.plugins.pagination.dialects.IDialect; import org.apache.ibatis.session.RowBounds; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; /** * 分页方言工厂类 * * @author hubin * @since 2016-01-23 */ public class DialectFactory { /** * 方言缓存 */ private static final Map<String, IDialect> DIALECT_CACHE = new ConcurrentHashMap<>(); /** * Physical Page Interceptor for all the queries with parameter * {@link RowBounds} * * @param page 翻页对象 * @param buildSql 编译 SQL * @param dbType 数据类型 * @param dialectClazz 数据库方言 * @return 分页模型 */ @Deprecated public static DialectModel buildPaginationSql(IPage<?> page, String buildSql, DbType dbType, String dialectClazz) { // fix #196 return getDialect(dbType, dialectClazz).buildPaginationSql(buildSql, page.offset(), page.getSize()); } /** * 获取数据库方言 * * @param dbType 数据库类型 * @param dialectClazz 自定义方言实现类 * @return ignore * @deprecated 3.3.1 {@link #getDialect(String)} */ @Deprecated private static IDialect getDialect(DbType dbType, String dialectClazz) { String dialectClassName = StringUtils.isBlank(dialectClazz) ? dbType.getDialect() : dialectClazz; return DIALECT_CACHE.computeIfAbsent(dialectClassName, DialectFactory::classToDialect); } /** * 获取实现方言 * * @param dialectClazz 方言全类名 * @return 方言实现对象 * @since 3.3.1 */ public static IDialect getDialect(String dialectClazz) { return DIALECT_CACHE.computeIfAbsent(dialectClazz, DialectFactory::classToDialect); } private static IDialect classToDialect(String dialectClazz){ IDialect dialect = null; try { Class<?> clazz = Class.forName(dialectClazz); if (IDialect.class.isAssignableFrom(clazz)) { dialect = (IDialect) ClassUtils.newInstance(clazz); } } catch (ClassNotFoundException e) { throw ExceptionUtils.mpe("Class : %s is not found", dialectClazz); } /* 未配置方言则抛出异常 */ Assert.notNull(dialect, "The value of the dialect property in mybatis configuration.xml is not defined."); return dialect; } }
package math; import org.junit.Test; /** * 实现 int sqrt(int x) 函数。 * * 计算并返回 x 的平方根,其中 x 是非负整数。 * * 由于返回类型是整数,结果只保留整数的部分,小数部分将被舍去。 * * 示例 1: * * 输入: 4 * 输出: 2 * 示例 2: * * 输入: 8 * 输出: 2 * 说明: 8 的平方根是 2.82842..., * 由于返回类型是整数,小数部分将被舍去。 */ public class Solution69 { //使用二分查找的方法 public int mySqrt(int x) { int l=0,h=x; while (l<=h){ int mid=l+(h-l)/2; //为了避免整型的溢出, if (mid<x/mid){ l=mid+1; }else if (mid==x/mid){ return mid; }else { h=mid-1; } } return h; } @Test public void test(){ int x=2147395599; int r = mySqrt(x); System.out.println(r); } }
/* * Copyright 2020 RtBrick Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package io.leitstand.security.users.rs; import static io.leitstand.security.users.rs.Scopes.ADM; import static io.leitstand.security.users.rs.Scopes.ADM_READ; import static io.leitstand.security.users.rs.Scopes.ADM_USER; import static io.leitstand.security.users.rs.Scopes.ADM_USER_READ; import static java.lang.String.format; import static javax.ws.rs.core.MediaType.APPLICATION_JSON; import static javax.ws.rs.core.Response.created; import java.net.URI; import java.util.List; import javax.inject.Inject; import javax.validation.Valid; import javax.ws.rs.Consumes; import javax.ws.rs.GET; import javax.ws.rs.POST; import javax.ws.rs.Path; import javax.ws.rs.Produces; import javax.ws.rs.QueryParam; import javax.ws.rs.core.Response; import io.leitstand.commons.messages.Messages; import io.leitstand.commons.rs.Resource; import io.leitstand.security.auth.Scopes; import io.leitstand.security.users.service.UserReference; import io.leitstand.security.users.service.UserService; import io.leitstand.security.users.service.UserSubmission; /** * The REST API resource to query for users or add new user accounts. */ @Resource @Path("/users") @Scopes({ADM, ADM_USER}) @Consumes(APPLICATION_JSON) @Produces(APPLICATION_JSON) public class UsersResource { @Inject private Messages messages; @Inject private UserService service; /** * Returns all users matching the given filter expression. * @param filter - the POSIX filter expression * @return all users matching the given filter expression or an empty list if no users were found. */ @GET @Path("/") @Scopes({ADM, ADM_USER, ADM_READ, ADM_USER_READ}) public List<UserReference> findUsers(@QueryParam("filter") String filter){ return service.findUsers(filter); } /** * Creates a new user account and assigns a UUID to the account. * @param user - the user account settings. * @return messages to explain the outcome of the operation */ @POST @Path("/") public Response storeUserSettings(@Valid UserSubmission user) { service.addUser(user); return created(URI.create(format("/api/v1/users/%s", user.getUserName()))) .entity(messages) .build(); } }
// // ======================================================================== // Copyright (c) 1995-2020 Mort Bay Consulting Pty Ltd and others. // // This program and the accompanying materials are made available under // the terms of the Eclipse Public License 2.0 which is available at // https://www.eclipse.org/legal/epl-2.0 // // This Source Code may also be made available under the following // Secondary Licenses when the conditions for such availability set // forth in the Eclipse Public License, v. 2.0 are satisfied: // the Apache License v2.0 which is available at // https://www.apache.org/licenses/LICENSE-2.0 // // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 // ======================================================================== // package org.eclipse.jetty.websocket.javax.common.messages; import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodType; import java.nio.ByteBuffer; import javax.websocket.CloseReason; import javax.websocket.DecodeException; import javax.websocket.Decoder; import org.eclipse.jetty.websocket.core.CoreSession; import org.eclipse.jetty.websocket.core.exception.CloseException; import org.eclipse.jetty.websocket.javax.common.JavaxWebSocketFrameHandlerFactory; import org.eclipse.jetty.websocket.util.messages.ByteBufferMessageSink; import org.eclipse.jetty.websocket.util.messages.MessageSink; public class DecodedBinaryMessageSink<T> extends DecodedMessageSink<Decoder.Binary<T>> { public DecodedBinaryMessageSink(CoreSession session, Decoder.Binary<T> decoder, MethodHandle methodHandle) throws NoSuchMethodException, IllegalAccessException { super(session, decoder, methodHandle); } @Override protected MethodHandle newRawMethodHandle() throws NoSuchMethodException, IllegalAccessException { return JavaxWebSocketFrameHandlerFactory.getServerMethodHandleLookup().findVirtual(DecodedBinaryMessageSink.class, "onWholeMessage", MethodType.methodType(void.class, ByteBuffer.class)) .bindTo(this); } @Override protected MessageSink newRawMessageSink(CoreSession session, MethodHandle rawMethodHandle) { return new ByteBufferMessageSink(session, rawMethodHandle); } @SuppressWarnings("Duplicates") public void onWholeMessage(ByteBuffer wholeMessage) { if (!getDecoder().willDecode(wholeMessage)) { logger.warn("Message lost, decoder " + getDecoder().getClass().getName() + "#willDecode() has rejected it."); return; } try { T obj = getDecoder().decode(wholeMessage); methodHandle.invoke(obj); } catch (DecodeException e) { throw new CloseException(CloseReason.CloseCodes.CANNOT_ACCEPT.getCode(), "Unable to decode", e); } catch (Throwable t) { throw new CloseException(CloseReason.CloseCodes.CANNOT_ACCEPT.getCode(), "Endpoint notification error", t); } } }
package com.elepy.tests; import com.elepy.annotations.Model; import com.elepy.annotations.TextArea; import com.elepy.annotations.View; import javax.persistence.Entity; import javax.persistence.Id; import javax.persistence.Table; @Model(name = "Settings", path = "/settings") @Entity(name = "settings") @Table(name = "settings") @View(View.Defaults.SINGLE) public class Settings { @Id private String id; private String title; @TextArea private String description; public String getId() { return id; } public void setId(String id) { this.id = id; } public String getTitle() { return title; } public void setTitle(String title) { this.title = title; } public String getDescription() { return description; } public void setDescription(String description) { this.description = description; } }
package demo.smart.access.xutlis.views.SlidingLayout.transform; import android.view.View; /** * Created by yarolegovich on 25.03.2017. */ public interface RootTransformation { void transform(float dragProgress, View rootView); }
// -------------------------------------------------------------------------------- // Copyright 2002-2021 Echo Three, LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // -------------------------------------------------------------------------------- package com.echothree.control.user.geo.common.form; import com.echothree.control.user.geo.common.edit.GeoCodeAliasTypeEdit; import com.echothree.control.user.geo.common.spec.GeoCodeAliasTypeSpec; import com.echothree.util.common.form.BaseEditForm; public interface EditGeoCodeAliasTypeForm extends BaseEditForm<GeoCodeAliasTypeSpec, GeoCodeAliasTypeEdit> { // Nothing additional beyond BaseEditForm }
package org.jiakesiws.minipika.components.config; /* ************************************************************************ * * Copyright (C) 2020 2B键盘 All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * ************************************************************************/ /* * Creates on 2020/2/13. */ import com.alibaba.fastjson.JSONArray; import com.alibaba.fastjson.JSONObject; import lombok.Getter; import org.jiakesiws.minipika.components.jdbc.datasource.unpooled.Dsi; import org.jiakesiws.minipika.framework.config.Cfg; import org.jiakesiws.minipika.framework.exception.ConfigException; import org.jiakesiws.minipika.framework.jap.JapLoader; import org.jiakesiws.minipika.framework.tools.Calculator; import org.jiakesiws.minipika.framework.tools.Files; import org.jiakesiws.minipika.framework.tools.StringUtils; import org.jiakesiws.minipika.framework.tools.DateUtils; import java.util.Map; import java.util.Properties; /** * @author 2B键盘 * @since 1.8 */ public abstract class AbstractConfig { @Getter protected Dsi dsi; // 连接池配置 protected String maxSize; protected String minSize; // 数据库表名前缀 protected String tablePrefix; // entity包路径 protected String[] entityPackage; // mapper模板文件存放位置 protected String[] mapperPackage; // 是否开启事物 protected String transaction; // 是否开启缓存 protected String cache; // 缓存过期时间 protected String refresh; // 数据库名 protected String dbname; // norm.json文件 protected JSONObject normJson; // default_entity.json文件 protected JSONObject defaultEntity; private Object configObject; public AbstractConfig() { this(new JapLoader().load().get("minipika")); } /** * 通过Jap配置文件加载 * * @param config */ public AbstractConfig(Map<String, String> config) { this(((Object) config)); } /** * 通过properties文件加载 * * @param config */ public AbstractConfig(Properties config) { this((Object) config); } public AbstractConfig(Cfg cfg){ this((Object) cfg); } public AbstractConfig(Object config) { this.configObject = config; initConfig(); } /** * 初始化 */ private void initConfig() { this.cache = getValue("cache.enable"); this.refresh = getValue("cache.refresh"); this.maxSize = getValue("pool.maximum"); this.minSize = getValue("pool.minimum"); this.tablePrefix = getValue("entity.prefix"); this.transaction = getValue("jdbc.transaction"); if (StringUtils.isEmpty(transaction)) { this.transaction = "false"; } this.dsi = new Dsi( getValue("jdbc.url"), getValue("jdbc.driver"), getValue("jdbc.password"), getValue("jdbc.username"), Boolean.parseBoolean(this.transaction) ); // 模型文件存放目录 String e_pacakges = getValue("entity.package"); if (!StringUtils.isEmpty(e_pacakges)) { this.entityPackage = e_pacakges.split(","); } // mapper映射文件存放位置 String m_packlage = getValue("mapper.package"); if (!StringUtils.isEmpty(m_packlage)) { this.mapperPackage = m_packlage.split(","); } // 获取字段约束配置文件路径 String normJsonName = getValue("entity.norm"); if (!StringUtils.isEmpty(normJsonName)) { this.normJson = JSONObject.parseObject(Files.read(normJsonName)); } // 获取默认数据配置文件路径 String defaultEntityName = getValue("default.entity"); if (!StringUtils.isEmpty(defaultEntityName)) { this.defaultEntity = JSONObject.parseObject(Files.read(defaultEntityName)); } String temp = dsi.getUrl(); for (int i = 0; i < 3; i++) { temp = temp.substring(temp.indexOf("/") + 1); } dbname = temp.substring(0, temp.indexOf("?")); } public String getValue(String key) { if (configObject instanceof Map) { return (String) ((Map) configObject).get(key); } else if (configObject instanceof Properties) { return ((Properties) configObject).getProperty("minipika.".concat(key)); } else if(configObject instanceof Cfg){ int index = key.indexOf("."); String root = key.substring(0,index); return ((Cfg) configObject).get(root,key.substring(index+1)); } throw new ConfigException("unknown config object."); } public String getDbname() { return dbname; } public String[] getEntityPackage() { return entityPackage; } public String getTablePrefix() { return tablePrefix == null ? "" : tablePrefix; } public Integer getMaxSize() { return Integer.valueOf(StringUtils.isEmpty(maxSize) ? "6" : maxSize); } public Integer getMinSize() { return Integer.valueOf(StringUtils.isEmpty(minSize) ? "2" : minSize); } public Boolean gettransaction() { return Boolean.valueOf(transaction); } public boolean getCache() { return Boolean.parseBoolean(cache == null ? "false" : cache); } public long getRefresh() { if (StringUtils.isEmpty(refresh)) { return DateUtils.HOUR * 6; } Calculator calculator = new Calculator(); if (refresh.contains(DateUtils.SECOND_STR) || refresh.contains(DateUtils.MINUTE_STR) || refresh.contains(DateUtils.HOUR_STR) || refresh.contains(DateUtils.DAY_STR) || refresh.contains(DateUtils.WEEK_STR)) { refresh = refresh.toLowerCase(); refresh = refresh.replaceAll(DateUtils.SECOND_STR, String.valueOf(DateUtils.SECOND)); refresh = refresh.replaceAll(DateUtils.MINUTE_STR, String.valueOf(DateUtils.MINUTE)); refresh = refresh.replaceAll(DateUtils.HOUR_STR, String.valueOf(DateUtils.HOUR)); refresh = refresh.replaceAll(DateUtils.DAY_STR, String.valueOf(DateUtils.DAY)); refresh = refresh.replaceAll(DateUtils.WEEK_STR, String.valueOf(DateUtils.WEEK)); return calculator.express(refresh); } else { return calculator.express(refresh) * DateUtils.SECOND; } } public String[] getMapperBasePackage() { return mapperPackage; } /** * 获取正则表达式内容 * * @param name * @return */ public String getNorm(String name) { return (String) this.normJson.get(name); } public JSONObject getDefaultEntity() { return this.defaultEntity; } }
package net.kunmc.lab.followeractionpoint.command.lib.Argument; import java.util.ArrayList; import java.util.List; public class ArgumentsList { private List<Arguments> list = new ArrayList<>(); public void add(Arguments arguments) { list.add(arguments); } public Arguments getMatchArguments(String[] args) { if (list.size() == 0) { return null; } for (Arguments arguments : list) { if (arguments.isMatchArgsLength(args)) { return arguments.setArguments(args); } } return null; } public List<String> argumentsNameList(int index) { List<String> result = new ArrayList<>(); list.forEach(arguments -> { result.add(arguments.get(index).name()); }); return result; } public List<Arguments> get(int index) { List<Arguments> result = new ArrayList<>(); list.forEach(arguments -> { if (arguments.size() >= index) { result.add(arguments); } }); return result; } }
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.upgrades; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; import org.apache.lucene.util.TimeUnits; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; @TimeoutSuite(millis = 5 * TimeUnits.MINUTE) // to account for slow as hell VMs public class UpgradeClusterClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @Override protected boolean preserveIndicesUponCompletion() { return true; } @Override protected boolean preserveReposUponCompletion() { return true; } public UpgradeClusterClientYamlTestSuiteIT(ClientYamlTestCandidate testCandidate) { super(testCandidate); } @ParametersFactory public static Iterable<Object[]> parameters() throws Exception { return createParameters(); } @Override protected Settings restClientSettings() { return Settings.builder().put(super.restClientSettings()) // increase the timeout here to 90 seconds to handle long waits for a green // cluster health. the waits for green need to be longer than a minute to // account for delayed shards .put(ESRestTestCase.CLIENT_RETRY_TIMEOUT, "90s") .put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "90s") .build(); } }
package io.costax.hibernatetunings.entities.blog; import javax.persistence.*; @Entity @Table(name = "topic_statistics") public class TopicStatistic { @Id private Long id; @OneToOne(fetch = FetchType.LAZY) @MapsId @JoinColumn(name = "topic_id", nullable = false, updatable = false) private Topic topic; private int views = 0; void setTopic(final Topic topic) { this.topic = topic; } public void incrementViews() { views++; } public Topic getTopic() { return topic; } }
package com.bootdo.common.utils; import com.bootdo.system.domain.UserDO; import org.apache.shiro.SecurityUtils; import org.apache.shiro.subject.Subject; public class ShiroUtils { public static Subject getSubjct() { return SecurityUtils.getSubject(); } public static UserDO getUser() { Object object = getSubjct().getPrincipal(); return (UserDO)object; } public static Long getUserId() { return getUser().getUserId(); } public static void logout() { getSubjct().logout(); } }
package edu.fiuba.algo3.modelo.Parser; import edu.fiuba.algo3.modelo.JuegoYJugador.Jugador; import edu.fiuba.algo3.modelo.Objetivos.*; import java.util.ArrayList; import java.util.HashMap; import java.util.Set; public class ConstructorObjetivos { HashMap<String, ArrayList<Objetivo>> objetivos; HashMap<Integer, Jugador> jugadores; public ConstructorObjetivos(HashMap<Integer, Jugador> jugadores){ objetivos = new HashMap<>(); this.jugadores = jugadores; } public void construirObjetivos(HashMap<String, ArrayList<String>> objetivos, HashMap<String, Continente> continentes) { Set<String> tipos = objetivos.keySet(); for(String tipo: tipos){ ArrayList<String> objetivosDelTipo = objetivos.get(tipo); if(tipo.equals("Ocupacion")) this.objetivos.put(tipo, crearObjetivosOcupacion(objetivosDelTipo, continentes)); else if(tipo.equals("Destruccion")) this.objetivos.put(tipo, crearObjetivosDestruccion(objetivosDelTipo)); else this.objetivos.put(tipo, crearObjetivosComun(objetivosDelTipo)); } } private ArrayList<Objetivo> crearObjetivosOcupacion(ArrayList<String> objetivosDelTipo, HashMap<String, Continente> continentes){ ArrayList<Objetivo> listaObjetivosOcupacion = new ArrayList<>(); for(String objetivo: objetivosDelTipo){ String[] elementos = objetivo.split(","); listaObjetivosOcupacion.add(new ObjetivoConquistar(continentes.get(elementos[0]), continentes.get(elementos[1]), Integer.parseInt(elementos[2]))); } return listaObjetivosOcupacion; } private ArrayList<Objetivo> crearObjetivosDestruccion(ArrayList<String> objetivosDelTipo){ ArrayList<Objetivo> listaObjetivosDestruccion = new ArrayList<>(); for(String objetivo: objetivosDelTipo){ listaObjetivosDestruccion.add(new ObjetivoDerrotar(this.jugadores.get(Integer.parseInt(objetivo)))); } return listaObjetivosDestruccion; } private ArrayList<Objetivo> crearObjetivosComun(ArrayList<String> objetivosDelTipo){ ArrayList<Objetivo> listaObjetivosComun = new ArrayList<>(); for(String objetivo: objetivosDelTipo){ listaObjetivosComun.add(new ObjetivoGeneral(Integer.parseInt(objetivo))); } return listaObjetivosComun; } public ArrayList<Objetivo> getObjetivos() { return this.construirObjetivos(); } private ArrayList<Objetivo> construirObjetivos() { ArrayList<Objetivo> objetivosFinales = new ArrayList<>(); Objetivo objetivoComun = this.objetivos.get("Comun").get(0); this.terminarObjetivosOcupacion(objetivosFinales, objetivoComun); this.terminarObjetivosDestruccion(objetivosFinales, objetivoComun); return objetivosFinales; } private void terminarObjetivosDestruccion(ArrayList<Objetivo> objetivosFinales, Objetivo objetivoComun) { ArrayList<Objetivo> objetivosDeDestruccion = this.objetivos.get("Destruccion"); this.completarConstruccionDe(objetivosFinales, objetivoComun, objetivosDeDestruccion); } private void completarConstruccionDe(ArrayList<Objetivo> objetivosFinales, Objetivo objetivoComun, ArrayList<Objetivo> objetivosDeTipo) { for(Objetivo objetivo: objetivosDeTipo){ objetivosFinales.add(new ObjetivoJugador(objetivoComun, objetivo)); } } private void terminarObjetivosOcupacion(ArrayList<Objetivo> objetivosFinales, Objetivo objetivoComun) { ArrayList<Objetivo> objetivosDeOcupacion = this.objetivos.get("Ocupacion"); this.completarConstruccionDe(objetivosFinales, objetivoComun, objetivosDeOcupacion); } }
package com.example.imeeting.modules.story.presenter; import android.util.Log; import com.example.imeeting.base.presenter.BasePresenter; import com.example.imeeting.modules.story.contract.SearchStoryActivityContract; import com.example.imeeting.modules.story.model.StoryResponse; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import javax.inject.Inject; import io.reactivex.android.schedulers.AndroidSchedulers; import io.reactivex.observers.ResourceObserver; import io.reactivex.schedulers.Schedulers; /** * description: * created by wangbin on 2019/6/14 */ public class StorySearchPresenter extends BasePresenter<SearchStoryActivityContract.SearchStoryIview> implements SearchStoryActivityContract.SearchStoryIPresenter { @Inject public StorySearchPresenter(){} @Override public void getGuesslike() { addSubscribe(mDataManager.guessLike("") .subscribeOn(Schedulers.io()) .observeOn(AndroidSchedulers.mainThread()) .subscribeWith(new ResourceObserver<String>(){ @Override public void onNext(String s) { Log.d("wangbin",s); List<String> list = new ArrayList<>(); String[] ss = s.split(","); list = Arrays.asList(ss); mView.showGuesslike(list); } @Override public void onError(Throwable e) { } @Override public void onComplete() { } })); } @Override public void getSearchhistory() { addSubscribe(mDataManager.queryAllHistory() .subscribeOn(Schedulers.io()) .observeOn(AndroidSchedulers.mainThread()) .subscribeWith(new ResourceObserver<List<String>>(){ @Override public void onNext(List<String> strings) { mView.showSearchHistory(strings); } @Override public void onError(Throwable e) { } @Override public void onComplete() { } })); } @Override public void searchWithKey(String type, String key) { addSubscribe(mDataManager.getStoryList("",type,key) .subscribeOn(Schedulers.io()) .observeOn(AndroidSchedulers.mainThread()) .subscribeWith(new ResourceObserver<List<StoryResponse>>(){ @Override public void onNext(List<StoryResponse> responses) { mView.showSearchResult(responses); } @Override public void onError(Throwable e) { mView.showSearchResult(null); } @Override public void onComplete() { } })); getSearchhistory(); } @Override public void add(String s) { new Thread(){ @Override public void run() { mDataManager.add(s); } }.start(); } }
/** * Copyright 2020 bejson.com */ package com.binggr.glmall.product.vo; import lombok.Data; /** * Auto-generated: 2020-10-13 14:33:26 * * @author bejson.com (i@bejson.com) * @website http://www.bejson.com/java2pojo/ */ @Data public class Images { private String imgUrl; private int defaultImg; }
package software.amazon.ssm.maintenancewindowtask; import software.amazon.cloudformation.proxy.AmazonWebServicesClientProxy; import software.amazon.cloudformation.proxy.Logger; import software.amazon.cloudformation.proxy.OperationStatus; import software.amazon.cloudformation.proxy.ProgressEvent; import software.amazon.cloudformation.proxy.ResourceHandlerRequest; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.mock; @ExtendWith(MockitoExtension.class) public class ReadHandlerTest { @Mock private AmazonWebServicesClientProxy proxy; @Mock private Logger logger; @BeforeEach public void setup() { proxy = mock(AmazonWebServicesClientProxy.class); logger = mock(Logger.class); } @Test public void handleRequest_SimpleSuccess() { final ReadHandler handler = new ReadHandler(); final ResourceModel model = ResourceModel.builder().build(); final ResourceHandlerRequest<ResourceModel> request = ResourceHandlerRequest.<ResourceModel>builder() .desiredResourceState(model) .build(); final ProgressEvent<ResourceModel, CallbackContext> response = handler.handleRequest(proxy, request, null, logger); assertThat(response).isNotNull(); assertThat(response.getStatus()).isEqualTo(OperationStatus.SUCCESS); assertThat(response.getCallbackContext()).isNull(); assertThat(response.getCallbackDelaySeconds()).isEqualTo(0); assertThat(response.getResourceModel()).isEqualTo(request.getDesiredResourceState()); assertThat(response.getResourceModels()).isNull(); assertThat(response.getMessage()).isNull(); assertThat(response.getErrorCode()).isNull(); } }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.joshua.decoder.ff.lm.bloomfilter_lm; import java.io.Externalizable; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.ObjectInput; import java.io.ObjectInputStream; import java.io.ObjectOutput; import java.io.ObjectOutputStream; import java.util.HashMap; import java.util.zip.GZIPInputStream; import java.util.zip.GZIPOutputStream; import org.apache.joshua.corpus.Vocabulary; import org.apache.joshua.decoder.ff.lm.DefaultNGramLanguageModel; import org.apache.joshua.util.Regex; import org.apache.joshua.util.io.LineReader; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * An n-gram language model with linearly-interpolated Witten-Bell smoothing, using a Bloom filter * as its main data structure. A Bloom filter is a lossy data structure that can be used to test for * set membership. */ public class BloomFilterLanguageModel extends DefaultNGramLanguageModel implements Externalizable { /** * An initial value used for hashing n-grams so that they can be stored in a bloom filter. */ public static final int HASH_SEED = 17; /** * Another value used in the process of hashing n-grams. */ public static final int HASH_OFFSET = 37; /** * The maximum score that a language model feature function can return to the Joshua decoder. */ public static final double MAX_SCORE = 100.0; /** * The logger for this class. */ private static final Logger LOG = LoggerFactory.getLogger(BloomFilterLanguageModel.class); /** * The Bloom filter data structure itself. */ private BloomFilter bf; /** * The base of the logarithm used to quantize n-gram counts. N-gram counts are quantized * logarithmically to reduce the number of times we need to query the Bloom filter. */ private double quantizationBase; /** * Natural log of the number of tokens seen in the training corpus. */ private double numTokens; /** * An array of pairs of long, used as hash functions for storing or retreiving the count of an * n-gram in the Bloom filter. */ private long[][] countFuncs; /** * An array of pairs of long, used as hash functions for storing or retreiving the number of * distinct types observed after an n-gram. */ private long[][] typesFuncs; /** * The smoothed probability of an unseen n-gram. This is also the probability of any n-gram under * the zeroth-order model. */ transient private double p0; /** * The interpolation constant between Witten-Bell models of order zero and one. Stored in a field * because it can be calculated ahead of time; it doesn't depend on the particular n-gram. */ transient private double lambda0; /** * The maximum possible quantized count of any n-gram stored in the Bloom filter. Used as an upper * bound on the count that could be returned when querying the Bloom filter. */ transient private int maxQ; // max quantized count /** * Constructor called from the Joshua decoder. This constructor assumes that the LM has already * been built, and takes the name of the file where the LM is stored. * * @param order the order of the language model * @param filename path to the file where the language model is stored * @throws IOException if the bloom filter language model cannot be rebuilt from the input file */ public BloomFilterLanguageModel(int order, String filename) throws IOException { super(order); try { readExternal(new ObjectInputStream(new GZIPInputStream(new FileInputStream(filename)))); } catch (ClassNotFoundException e) { IOException ioe = new IOException("Could not rebuild bloom filter LM from file " + filename); ioe.initCause(e); throw ioe; } int vocabSize = Vocabulary.size(); p0 = -Math.log(vocabSize + 1); double oneMinusLambda0 = numTokens - logAdd(Math.log(vocabSize), numTokens); p0 += oneMinusLambda0; lambda0 = Math.log(vocabSize) - logAdd(Math.log(vocabSize), numTokens); maxQ = quantize((long) Math.exp(numTokens)); } /** * Constructor to be used by the main function. This constructor is used to build a new language * model from scratch. An LM should be built with the main function before using it in the Joshua * decoder. * * @param filename path to the file of training corpus statistics * @param order the order of the language model * @param size the size of the Bloom filter, in bits * @param base a double. The base of the logarithm for quantization. */ private BloomFilterLanguageModel(String filename, int order, int size, double base) { super(order); quantizationBase = base; populateBloomFilter(size, filename); } /** * calculates the linearly-interpolated Witten-Bell probability for a given ngram. this is * calculated as: p(w|h) = pML(w|h)L(h) - (1 - L(h))p(w|h') where: w is a word and h is a history * h' is the history h with the first word removed pML is the maximum-likelihood estimate of the * probability L(.) is lambda, the interpolation factor, which depends only on the history h: L(h) * = s(h) / s(h) + c(h) where s(.) is the observed number of distinct types after h, and c is the * observed number of counts of h in the training corpus. * <p> * in fact this model calculates the probability starting from the lowest order and working its * way up, to take advantage of the one- sided error rate inherent in using a bloom filter data * structure. * * @param ngram the ngram whose probability is to be calculated * @param ngramOrder the order of the ngram. * * @return the linearly-interpolated Witten-Bell smoothed probability of an ngram */ private float wittenBell(int[] ngram, int ngramOrder) { int end = ngram.length; double p = p0; // current calculated probability // note that p0 and lambda0 are independent of the given // ngram so they are calculated ahead of time. int MAX_QCOUNT = getCount(ngram, ngram.length - 1, ngram.length, maxQ); if (MAX_QCOUNT == 0) // OOV! return (float) p; double pML = Math.log(unQuantize(MAX_QCOUNT)) - numTokens; // p += lambda0 * pML; p = logAdd(p, (lambda0 + pML)); if (ngram.length == 1) { // if it's a unigram, we're done return (float) p; } // otherwise we calculate the linear interpolation // with higher order models. for (int i = end - 2; i >= end - ngramOrder && i >= 0; i--) { int historyCnt = getCount(ngram, i, end, MAX_QCOUNT); // if the count for the history is zero, all higher // terms in the interpolation must be zero, so we // are done here. if (historyCnt == 0) { return (float) p; } int historyTypesAfter = getTypesAfter(ngram, i, end, historyCnt); // unQuantize the counts we got from the BF double HC = unQuantize(historyCnt); double HTA = 1 + unQuantize(historyTypesAfter); // interpolation constant double lambda = Math.log(HTA) - Math.log(HTA + HC); double oneMinusLambda = Math.log(HC) - Math.log(HTA + HC); // p *= 1 - lambda p += oneMinusLambda; int wordCount = getCount(ngram, i + 1, end, historyTypesAfter); double WC = unQuantize(wordCount); // p += lambda * p_ML(w|h) if (WC == 0) return (float) p; p = logAdd(p, lambda + Math.log(WC) - Math.log(HC)); MAX_QCOUNT = wordCount; } return (float) p; } /** * Retrieve the count of a ngram from the Bloom filter. That is, how many times did we see this * ngram in the training corpus? This corresponds roughly to algorithm 2 in Talbot and Osborne's * "Tera-Scale LMs on the Cheap." * * @param ngram array containing the ngram as a sub-array * @param start the index of the first word of the ngram * @param end the index after the last word of the ngram * @param qcount the maximum possible count to be returned * * @return the number of times the ngram was seen in the training corpus, quantized */ private int getCount(int[] ngram, int start, int end, int qcount) { for (int i = 1; i <= qcount; i++) { int hash = hashNgram(ngram, start, end, i); if (!bf.query(hash, countFuncs)) { return i - 1; } } return qcount; } /** * Retrieve the number of distinct types that follow an ngram in the training corpus. * * This is another version of algorithm 2. As noted in the paper, we have different algorithms for * getting ngram counts versus suffix counts because c(x) = 1 is a proxy item for s(x) = 1 * * @param ngram an array the contains the ngram as a sub-array * @param start the index of the first word of the ngram * @param end the index after the last word of the ngram * @param qcount the maximum possible return value * * @return the number of distinct types observed to follow an ngram in the training corpus, * quantized */ private int getTypesAfter(int[] ngram, int start, int end, int qcount) { // first we check c(x) >= 1 int hash = hashNgram(ngram, start, end, 1); if (!bf.query(hash, countFuncs)) { return 0; } // if c(x) >= 1, we check for the stored suffix count for (int i = 1; i < qcount; i++) { hash = hashNgram(ngram, start, end, i); if (!bf.query(hash, typesFuncs)) { return i - 1; } } return qcount; } /** * Logarithmically quantizes raw counts. The quantization scheme is described in Talbot and * Osborne's paper "Tera-Scale LMs on the Cheap." * * @param x long giving the raw count to be quantized * * @return the quantized count */ private int quantize(long x) { return 1 + (int) Math.floor(Math.log(x) / Math.log(quantizationBase)); } /** * Unquantizes a quantized count. * * @param x the quantized count * * @return the expected raw value of the quantized count */ private double unQuantize(int x) { if (x == 0) { return 0; } else { return ((quantizationBase + 1) * Math.pow(quantizationBase, x - 1) - 1) / 2; } } /** * Converts an n-gram and a count into a value that can be stored into a Bloom filter. This is * adapted directly from <code>AbstractPhrase.hashCode()</code> elsewhere in the Joshua code base. * * @param ngram an array containing the ngram as a sub-array * @param start the index of the first word of the ngram * @param end the index after the last word of the ngram * @param val the count of the ngram * * @return a value suitable to be stored in a Bloom filter */ private int hashNgram(int[] ngram, int start, int end, int val) { int result = HASH_OFFSET * HASH_SEED + val; for (int i = start; i < end; i++) result = HASH_OFFSET * result + ngram[i]; return result; } /** * Adds two numbers that are in the log domain, avoiding underflow. * * @param x one summand * @param y the other summand * * @return the log of the sum of the exponent of the two numbers. */ private static double logAdd(double x, double y) { if (y <= x) { return x + Math.log1p(Math.exp(y - x)); } else { return y + Math.log1p(Math.exp(x - y)); } } /** * Builds a language model and stores it in a file. * * @param argv command-line arguments */ public static void main(String[] argv) { if (argv.length < 5) { String msg = "usage: BloomFilterLanguageModel <statistics file> <order> <size>" + " <quantization base> <output file>"; System.err.println(msg); LOG.error(msg); return; } int order = Integer.parseInt(argv[1]); int size = (int) (Integer.parseInt(argv[2]) * Math.pow(2, 23)); double base = Double.parseDouble(argv[3]); try { BloomFilterLanguageModel lm = new BloomFilterLanguageModel(argv[0], order, size, base); ObjectOutputStream out = new ObjectOutputStream(new GZIPOutputStream(new FileOutputStream(argv[4]))); lm.writeExternal(out); out.close(); //TODO: try-with-resources } catch (IOException e) { LOG.error(e.getMessage(), e); } } /** * Adds ngram counts and counts of distinct types after ngrams, read from a file, to the Bloom * filter. * <p> * The file format should look like this: ngram1 count types-after ngram2 count types-after ... * * @param bloomFilterSize the size of the Bloom filter, in bits * @param filename path to the statistics file */ private void populateBloomFilter(int bloomFilterSize, String filename) { HashMap<String, Long> typesAfter = new HashMap<>(); try { FileInputStream file_in = new FileInputStream(filename); FileInputStream file_in_copy = new FileInputStream(filename); InputStream in; InputStream estimateStream; if (filename.endsWith(".gz")) { in = new GZIPInputStream(file_in); estimateStream = new GZIPInputStream(file_in_copy); } else { in = file_in; estimateStream = file_in_copy; } int numObjects = estimateNumberOfObjects(estimateStream); LOG.debug("Estimated number of objects: {}", numObjects); bf = new BloomFilter(bloomFilterSize, numObjects); countFuncs = bf.initializeHashFunctions(); populateFromInputStream(in, typesAfter); in.close(); } catch (IOException e) { LOG.error(e.getMessage(), e); return; } typesFuncs = bf.initializeHashFunctions(); for (String history : typesAfter.keySet()) { String[] toks = Regex.spaces.split(history); int[] hist = new int[toks.length]; for (int i = 0; i < toks.length; i++) hist[i] = Vocabulary.id(toks[i]); add(hist, typesAfter.get(history), typesFuncs); } } /** * Estimate the number of objects that will be stored in the Bloom filter. The optimum number of * hash functions depends on the number of items that will be stored, so we want a guess before we * begin to read the statistics file and store it. * * @param source an InputStream pointing to the training corpus stats * * @return an estimate of the number of objects to be stored in the Bloom filter */ private int estimateNumberOfObjects(InputStream source) { int numLines = 0; long maxCount = 0; for (String line: new LineReader(source)) { if (line.trim().equals("")) continue; String[] toks = Regex.spaces.split(line); if (toks.length > ngramOrder + 1) continue; try { long cnt = Long.parseLong(toks[toks.length - 1]); if (cnt > maxCount) maxCount = cnt; } catch (NumberFormatException e) { LOG.error(e.getMessage(), e); break; } numLines++; } double estimate = Math.log(maxCount) / Math.log(quantizationBase); return (int) Math.round(numLines * estimate); } /** * Reads the statistics from a source and stores them in the Bloom filter. The ngram counts are * stored immediately in the Bloom filter, but the counts of distinct types following each ngram * are accumulated from the file as we go. * * @param source an InputStream pointing to the statistics * @param types a HashMap that will stores the accumulated counts of distinct types observed to * follow each ngram */ private void populateFromInputStream(InputStream source, HashMap<String, Long> types) { numTokens = Double.NEGATIVE_INFINITY; // = log(0) for (String line: new LineReader(source)) { String[] toks = Regex.spaces.split(line); if ((toks.length < 2) || (toks.length > ngramOrder + 1)) continue; int[] ngram = new int[toks.length - 1]; StringBuilder history = new StringBuilder(); for (int i = 0; i < toks.length - 1; i++) { ngram[i] = Vocabulary.id(toks[i]); if (i < toks.length - 2) history.append(toks[i]).append(" "); } long cnt = Long.parseLong(toks[toks.length - 1]); add(ngram, cnt, countFuncs); if (toks.length == 2) { // unigram numTokens = logAdd(numTokens, Math.log(cnt)); // no need to count types after "" // that's what vocabulary.size() is for. continue; } if (types.get(history) == null) types.put(history.toString(), 1L); else { long x = types.get(history); types.put(history.toString(), x + 1); } } } /** * Adds an ngram, along with an associated value, to the Bloom filter. This corresponds to Talbot * and Osborne's "Tera-scale LMs on the cheap", algorithm 1. * * @param ngram an array representing the ngram * @param value the value to be associated with the ngram * @param funcs an array of long to be used as hash functions */ private void add(int[] ngram, long value, long[][] funcs) { if (ngram == null) return; int qValue = quantize(value); for (int i = 1; i <= qValue; i++) { int hash = hashNgram(ngram, 0, ngram.length, i); bf.add(hash, funcs); } } /** * Read a Bloom filter LM from an external file. * * @param in an ObjectInput stream to read from */ public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { int vocabSize = in.readInt(); for (int i = 0; i < vocabSize; i++) { String line = in.readUTF(); Vocabulary.id(line); } numTokens = in.readDouble(); countFuncs = new long[in.readInt()][2]; for (int i = 0; i < countFuncs.length; i++) { countFuncs[i][0] = in.readLong(); countFuncs[i][1] = in.readLong(); } typesFuncs = new long[in.readInt()][2]; for (int i = 0; i < typesFuncs.length; i++) { typesFuncs[i][0] = in.readLong(); typesFuncs[i][1] = in.readLong(); } quantizationBase = in.readDouble(); bf = new BloomFilter(); bf.readExternal(in); } /** * Write a Bloom filter LM to some external location. * * @param out an ObjectOutput stream to write to * * @throws IOException if an input or output exception occurred */ public void writeExternal(ObjectOutput out) throws IOException { out.writeInt(Vocabulary.size()); for (int i = 0; i < Vocabulary.size(); i++) { // out.writeBytes(vocabulary.getWord(i)); // out.writeChar('\n'); // newline out.writeUTF(Vocabulary.word(i)); } out.writeDouble(numTokens); out.writeInt(countFuncs.length); for (long[] countFunc : countFuncs) { out.writeLong(countFunc[0]); out.writeLong(countFunc[1]); } out.writeInt(typesFuncs.length); for (long[] typesFunc : typesFuncs) { out.writeLong(typesFunc[0]); out.writeLong(typesFunc[1]); } out.writeDouble(quantizationBase); bf.writeExternal(out); } /** * Returns the language model score for an n-gram. This is called from the rest of the Joshua * decoder. * * @param ngram the ngram to score * @param order the order of the model * * @return the language model score of the ngram */ @Override protected float ngramLogProbability_helper(int[] ngram, int order) { int[] lm_ngram = new int[ngram.length]; for (int i = 0; i < ngram.length; i++) { lm_ngram[i] = Vocabulary.id(Vocabulary.word(ngram[i])); } return wittenBell(lm_ngram, order); } @Override public boolean isOov(int id) { int[] ngram = new int[] {id}; int MAX_QCOUNT = getCount(ngram, ngram.length - 1, ngram.length, maxQ); return (MAX_QCOUNT == 0); } }
package io.deephaven.engine.table.impl.tuplesource.generated; import io.deephaven.chunk.Chunk; import io.deephaven.chunk.DoubleChunk; import io.deephaven.chunk.FloatChunk; import io.deephaven.chunk.ShortChunk; import io.deephaven.chunk.WritableChunk; import io.deephaven.chunk.WritableObjectChunk; import io.deephaven.chunk.attributes.Values; import io.deephaven.datastructures.util.SmartKey; import io.deephaven.engine.table.ColumnSource; import io.deephaven.engine.table.TupleSource; import io.deephaven.engine.table.WritableColumnSource; import io.deephaven.engine.table.impl.tuplesource.AbstractTupleSource; import io.deephaven.engine.table.impl.tuplesource.ThreeColumnTupleSourceFactory; import io.deephaven.tuple.generated.ShortDoubleFloatTuple; import io.deephaven.util.type.TypeUtils; import org.jetbrains.annotations.NotNull; /** * <p>{@link TupleSource} that produces key column values from {@link ColumnSource} types Short, Double, and Float. * <p>Generated by io.deephaven.replicators.TupleSourceCodeGenerator. */ @SuppressWarnings({"unused", "WeakerAccess"}) public class ShortDoubleFloatColumnTupleSource extends AbstractTupleSource<ShortDoubleFloatTuple> { /** {@link ThreeColumnTupleSourceFactory} instance to create instances of {@link ShortDoubleFloatColumnTupleSource}. **/ public static final ThreeColumnTupleSourceFactory<ShortDoubleFloatTuple, Short, Double, Float> FACTORY = new Factory(); private final ColumnSource<Short> columnSource1; private final ColumnSource<Double> columnSource2; private final ColumnSource<Float> columnSource3; public ShortDoubleFloatColumnTupleSource( @NotNull final ColumnSource<Short> columnSource1, @NotNull final ColumnSource<Double> columnSource2, @NotNull final ColumnSource<Float> columnSource3 ) { super(columnSource1, columnSource2, columnSource3); this.columnSource1 = columnSource1; this.columnSource2 = columnSource2; this.columnSource3 = columnSource3; } @Override public final ShortDoubleFloatTuple createTuple(final long rowKey) { return new ShortDoubleFloatTuple( columnSource1.getShort(rowKey), columnSource2.getDouble(rowKey), columnSource3.getFloat(rowKey) ); } @Override public final ShortDoubleFloatTuple createPreviousTuple(final long rowKey) { return new ShortDoubleFloatTuple( columnSource1.getPrevShort(rowKey), columnSource2.getPrevDouble(rowKey), columnSource3.getPrevFloat(rowKey) ); } @Override public final ShortDoubleFloatTuple createTupleFromValues(@NotNull final Object... values) { return new ShortDoubleFloatTuple( TypeUtils.unbox((Short)values[0]), TypeUtils.unbox((Double)values[1]), TypeUtils.unbox((Float)values[2]) ); } @Override public final ShortDoubleFloatTuple createTupleFromReinterpretedValues(@NotNull final Object... values) { return new ShortDoubleFloatTuple( TypeUtils.unbox((Short)values[0]), TypeUtils.unbox((Double)values[1]), TypeUtils.unbox((Float)values[2]) ); } @SuppressWarnings("unchecked") @Override public final <ELEMENT_TYPE> void exportElement(@NotNull final ShortDoubleFloatTuple tuple, final int elementIndex, @NotNull final WritableColumnSource<ELEMENT_TYPE> writableSource, final long destinationRowKey) { if (elementIndex == 0) { writableSource.set(destinationRowKey, tuple.getFirstElement()); return; } if (elementIndex == 1) { writableSource.set(destinationRowKey, tuple.getSecondElement()); return; } if (elementIndex == 2) { writableSource.set(destinationRowKey, tuple.getThirdElement()); return; } throw new IndexOutOfBoundsException("Invalid element index " + elementIndex + " for export"); } @Override public final Object exportToExternalKey(@NotNull final ShortDoubleFloatTuple tuple) { return new SmartKey( TypeUtils.box(tuple.getFirstElement()), TypeUtils.box(tuple.getSecondElement()), TypeUtils.box(tuple.getThirdElement()) ); } @Override public final Object exportElement(@NotNull final ShortDoubleFloatTuple tuple, int elementIndex) { if (elementIndex == 0) { return TypeUtils.box(tuple.getFirstElement()); } if (elementIndex == 1) { return TypeUtils.box(tuple.getSecondElement()); } if (elementIndex == 2) { return TypeUtils.box(tuple.getThirdElement()); } throw new IllegalArgumentException("Bad elementIndex for 3 element tuple: " + elementIndex); } @Override public final Object exportElementReinterpreted(@NotNull final ShortDoubleFloatTuple tuple, int elementIndex) { if (elementIndex == 0) { return TypeUtils.box(tuple.getFirstElement()); } if (elementIndex == 1) { return TypeUtils.box(tuple.getSecondElement()); } if (elementIndex == 2) { return TypeUtils.box(tuple.getThirdElement()); } throw new IllegalArgumentException("Bad elementIndex for 3 element tuple: " + elementIndex); } @Override protected void convertChunks(@NotNull WritableChunk<? super Values> destination, int chunkSize, Chunk<Values> [] chunks) { WritableObjectChunk<ShortDoubleFloatTuple, ? super Values> destinationObjectChunk = destination.asWritableObjectChunk(); ShortChunk<Values> chunk1 = chunks[0].asShortChunk(); DoubleChunk<Values> chunk2 = chunks[1].asDoubleChunk(); FloatChunk<Values> chunk3 = chunks[2].asFloatChunk(); for (int ii = 0; ii < chunkSize; ++ii) { destinationObjectChunk.set(ii, new ShortDoubleFloatTuple(chunk1.get(ii), chunk2.get(ii), chunk3.get(ii))); } destinationObjectChunk.setSize(chunkSize); } /** {@link ThreeColumnTupleSourceFactory} for instances of {@link ShortDoubleFloatColumnTupleSource}. **/ private static final class Factory implements ThreeColumnTupleSourceFactory<ShortDoubleFloatTuple, Short, Double, Float> { private Factory() { } @Override public TupleSource<ShortDoubleFloatTuple> create( @NotNull final ColumnSource<Short> columnSource1, @NotNull final ColumnSource<Double> columnSource2, @NotNull final ColumnSource<Float> columnSource3 ) { return new ShortDoubleFloatColumnTupleSource( columnSource1, columnSource2, columnSource3 ); } } }
package tracing.backend.scheduler.causal; import tracing.backend.Target; import tracing.backend.TraceQueue; import tracing.backend.scheduler.EvictionService; import tracing.backend.scheduler.QueueSpliterator; import tracing.backend.scheduler.Scheduler; import tracing.backend.trace.EventType; import tracing.backend.trace.MessageEvent; import tracing.backend.trace.TraceEvent; import java.util.Collection; import java.util.Comparator; import java.util.HashMap; import java.util.Map; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; import java.util.stream.Stream; import java.util.stream.StreamSupport; /** * Schedules an event order based on the causality of events, that is, following the happened-before relation. */ public class CausalScheduler implements Scheduler { public static final int EVICTION_INTERVAL = 5000; // all targets of the system private Collection<Target> targets; // maps message IDs to message events to remember send events private final HashMap<String, MessageEvent> sendMsgMap = new HashMap<>(); // output queue public final BlockingQueue<TraceEvent> resultQueue = new LinkedBlockingQueue<>(); // still running? private boolean running = true; // maps target IDs to input queues private final Map<String, TraceQueue> queueMap = new HashMap<>(); // global sequence number assigned to events private final AtomicLong globalSeq = new AtomicLong(0); @Override public void setTargets(Collection<Target> targets) { this.targets = targets; targets.forEach(target -> queueMap.put(target.getTargetId(), target.getTraceQueue())); } @Override public void run() { AtomicLong operations = new AtomicLong(0); // while not stopped... while (running) { // try getting the oldest TraceEvent element from every trace, blocking if one trace is empty var candidates = this.targets.stream() .map(t -> t.getTraceQueue().blockingPeek()) .sorted(Comparator.comparing(TraceEvent::getLocalTimestamp)) // sort by local timestamps to get initial order .collect(Collectors.toList()); // process candidates in order, breaking as soon as the first event can be processed for (var event : candidates) { if (event.getEventType() == EventType.SEND) { var send = (MessageEvent) event; // set vector clock var clock = event.getTarget().incrementVectorClock(); event.setVectorClock(clock); // add to send map to remember for receive event(s) this.sendMsgMap.put(send.getMsgId(), send); } else if (event.getEventType() == EventType.RECEIVE) { var receiveMsg = (MessageEvent) event; var msgId = receiveMsg.getMsgId(); var sendMsg = this.sendMsgMap.get(msgId); if (sendMsg == null) { System.out.println("no send event to this receive event"); continue; // don't process receive yet } // receive event depends on send event event.setDependency(sendMsg.getGlobalEventId()); sendMsg.addParticipant(event.getTargetId()); // set sender receiveMsg.addParticipant(sendMsg.getTargetId()); // add receiver // set vector clock var senderClock = sendMsg.getVectorClock(); var clock = event.getTarget().merge(senderClock); event.setVectorClock(clock); } else { // internal // set vector clock var clock = event.getTarget().incrementVectorClock(); event.setVectorClock(clock); } // remove processed event from input queue this.queueMap.get(event.getTargetId()).remove(); // set global sequence number event.setGlobalEventId(globalSeq.incrementAndGet()); // add to output this.resultQueue.add(event); // remember last event on target event.getTarget().setLastTraceEvent(event); if (operations.incrementAndGet() == EVICTION_INTERVAL) { operations.set(0); new EvictionService(targets, sendMsgMap).start(); } // stop here, continue with next set of candidates break; } } } public Thread start() { var t = new Thread(this); t.start(); return t; } @Override public void stop() { this.running = false; } /** * Create result/output stream from queue. * @return output stream */ @Override public Stream<TraceEvent> resultStream() { return StreamSupport.stream(new QueueSpliterator<>(resultQueue), false); } }
/* * Copyright © 2014 Cask Data, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package co.cask.cdap.common.discovery; import org.apache.twill.discovery.Discoverable; import org.apache.twill.discovery.ServiceDiscovered; import java.util.Iterator; import java.util.concurrent.ThreadLocalRandom; /** * Randomly picks endpoint from the list of available endpoints. */ public final class RandomEndpointStrategy extends AbstractEndpointStrategy { /** * Constructs a random endpoint strategy with the given {@link ServiceDiscovered}. */ public RandomEndpointStrategy(ServiceDiscovered serviceDiscovered) { super(serviceDiscovered); } @Override public Discoverable pick() { // Reservoir sampling Discoverable result = null; Iterator<Discoverable> itor = serviceDiscovered.iterator(); int count = 0; while (itor.hasNext()) { Discoverable next = itor.next(); if (ThreadLocalRandom.current().nextInt(++count) == 0) { result = next; } } return result; } }
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.kafkaconnector.azureeventhubssink; import java.util.Map; import javax.annotation.Generated; import org.apache.camel.kafkaconnector.CamelSinkConnectorConfig; import org.apache.kafka.common.config.ConfigDef; @Generated("This class has been generated by camel-kafka-connector-generator-maven-plugin, remove this annotation to prevent it from being generated.") public class CamelAzureeventhubssinkSinkConnectorConfig extends CamelSinkConnectorConfig { public static final String CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_NAMESPACE_NAME_CONF = "camel.kamelet.azure-eventhubs-sink.namespaceName"; public static final String CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_NAMESPACE_NAME_DOC = "The eventhubs namespace"; public static final String CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_NAMESPACE_NAME_DEFAULT = null; public static final String CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_EVENTHUB_NAME_CONF = "camel.kamelet.azure-eventhubs-sink.eventhubName"; public static final String CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_EVENTHUB_NAME_DOC = "The eventhub name"; public static final String CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_EVENTHUB_NAME_DEFAULT = null; public static final String CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_SHARED_ACCESS_NAME_CONF = "camel.kamelet.azure-eventhubs-sink.sharedAccessName"; public static final String CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_SHARED_ACCESS_NAME_DOC = "EventHubs SAS key name"; public static final String CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_SHARED_ACCESS_NAME_DEFAULT = null; public static final String CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_SHARED_ACCESS_KEY_CONF = "camel.kamelet.azure-eventhubs-sink.sharedAccessKey"; public static final String CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_SHARED_ACCESS_KEY_DOC = "The key for EventHubs SAS key name"; public static final String CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_SHARED_ACCESS_KEY_DEFAULT = null; public CamelAzureeventhubssinkSinkConnectorConfig( ConfigDef config, Map<String, String> parsedConfig) { super(config, parsedConfig); } public CamelAzureeventhubssinkSinkConnectorConfig( Map<String, String> parsedConfig) { this(conf(), parsedConfig); } public static ConfigDef conf() { ConfigDef conf = new ConfigDef(CamelSinkConnectorConfig.conf()); conf.define(CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_NAMESPACE_NAME_CONF, ConfigDef.Type.STRING, CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_NAMESPACE_NAME_DEFAULT, ConfigDef.Importance.HIGH, CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_NAMESPACE_NAME_DOC); conf.define(CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_EVENTHUB_NAME_CONF, ConfigDef.Type.STRING, CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_EVENTHUB_NAME_DEFAULT, ConfigDef.Importance.HIGH, CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_EVENTHUB_NAME_DOC); conf.define(CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_SHARED_ACCESS_NAME_CONF, ConfigDef.Type.STRING, CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_SHARED_ACCESS_NAME_DEFAULT, ConfigDef.Importance.HIGH, CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_SHARED_ACCESS_NAME_DOC); conf.define(CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_SHARED_ACCESS_KEY_CONF, ConfigDef.Type.PASSWORD, CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_SHARED_ACCESS_KEY_DEFAULT, ConfigDef.Importance.HIGH, CAMEL_SINK_AZUREEVENTHUBSSINK_KAMELET_SHARED_ACCESS_KEY_DOC); return conf; } }
package com.xj.util; import com.github.pagehelper.Page; import com.github.pagehelper.PageInfo; import org.apache.commons.beanutils.BeanUtils; import java.io.Serializable; import java.util.Collection; import java.util.List; /** * 分页工具 */ public class PageData<T> implements Serializable { private static final long serialVersionUID = 1L; //当前页 private int pageNum; //每页的数量 private int pageSize; //当前页的数量 private int size; //由于startRow和endRow不常用,这里说个具体的用法 //可以在页面中"显示startRow到endRow 共size条数据" //当前页面第一个元素在数据库中的行号 private int startRow; //当前页面最后一个元素在数据库中的行号 private int endRow; //总记录数 private long total; //总页数 private int pages; //结果集 private List<T> list; private List<T> rows; //前一页 private int prePage; //下一页 private int nextPage; //是否为第一页 private boolean isFirstPage = false; //是否为最后一页 private boolean isLastPage = false; //是否有前一页 private boolean hasPreviousPage = false; //是否有下一页 private boolean hasNextPage = false; //导航页码数 private int navigatePages; //所有导航页号 public List<T> getRows() { return rows; } public void setRows(List<T> rows) { this.rows = rows; } private int[] navigatepageNums; //导航条上的第一页 private int navigateFirstPage; //导航条上的最后一页 private int navigateLastPage; public PageData() { } /** * 包装Page对象 * * @param list */ public PageData(List<T> list) { this(list, 8); } public PageData PageDataConvert(PageInfo pageInfo,PageData pageData) throws Exception{ BeanUtils.copyProperties(pageData, pageInfo); pageData.setRows(pageData.getList()); pageData.setList(null); return pageData; } /** * 包装Page对象 * * @param list page结果 * @param navigatePages 页码数量 */ public PageData(List<T> list, int navigatePages) { if (list instanceof Page) { Page page = (Page) list; this.pageNum = page.getPageNum(); this.pageSize = page.getPageSize(); this.pages = page.getPages(); this.list = page; this.size = page.size(); this.total = page.getTotal(); //由于结果是>startRow的,所以实际的需要+1 if (this.size == 0) { this.startRow = 0; this.endRow = 0; } else { this.startRow = page.getStartRow() + 1; //计算实际的endRow(最后一页的时候特殊) this.endRow = this.startRow - 1 + this.size; } } else if (list instanceof Collection) { this.pageNum = 1; this.pageSize = list.size(); this.pages = 1; this.list = list; this.size = list.size(); this.total = list.size(); this.startRow = 0; this.endRow = list.size() > 0 ? list.size() - 1 : 0; } if (list instanceof Collection) { this.navigatePages = navigatePages; //计算导航页 calcNavigatepageNums(); //计算前后页,第一页,最后一页 calcPage(); //判断页面边界 judgePageBoudary(); } } /** * 计算导航页 */ private void calcNavigatepageNums() { //当总页数小于或等于导航页码数时 if (pages <= navigatePages) { navigatepageNums = new int[pages]; for (int i = 0; i < pages; i++) { navigatepageNums[i] = i + 1; } } else { //当总页数大于导航页码数时 navigatepageNums = new int[navigatePages]; int startNum = pageNum - navigatePages / 2; int endNum = pageNum + navigatePages / 2; if (startNum < 1) { startNum = 1; //(最前navigatePages页 for (int i = 0; i < navigatePages; i++) { navigatepageNums[i] = startNum++; } } else if (endNum > pages) { endNum = pages; //最后navigatePages页 for (int i = navigatePages - 1; i >= 0; i--) { navigatepageNums[i] = endNum--; } } else { //所有中间页 for (int i = 0; i < navigatePages; i++) { navigatepageNums[i] = startNum++; } } } } /** * 计算前后页,第一页,最后一页 */ private void calcPage() { if (navigatepageNums != null && navigatepageNums.length > 0) { navigateFirstPage = navigatepageNums[0]; navigateLastPage = navigatepageNums[navigatepageNums.length - 1]; if (pageNum > 1) { prePage = pageNum - 1; } if (pageNum < pages) { nextPage = pageNum + 1; } } } /** * 判定页面边界 */ private void judgePageBoudary() { isFirstPage = pageNum == 1; isLastPage = pageNum == pages; hasPreviousPage = pageNum > 1; hasNextPage = pageNum < pages; } public int getPageNum() { return pageNum; } public void setPageNum(int pageNum) { this.pageNum = pageNum; } public int getPageSize() { return pageSize; } public void setPageSize(int pageSize) { this.pageSize = pageSize; } public int getSize() { return size; } public void setSize(int size) { this.size = size; } public int getStartRow() { return startRow; } public void setStartRow(int startRow) { this.startRow = startRow; } public int getEndRow() { return endRow; } public void setEndRow(int endRow) { this.endRow = endRow; } public long getTotal() { return total; } public void setTotal(long total) { this.total = total; } public int getPages() { return pages; } public void setPages(int pages) { this.pages = pages; } public List<T> getList() { return list; } public void setList(List<T> list) { this.list = list; } @Deprecated // firstPage就是1, 此函数获取的是导航条上的第一页, 容易产生歧义 public int getFirstPage() { return navigateFirstPage; } @Deprecated public void setFirstPage(int firstPage) { this.navigateFirstPage = firstPage; } public int getPrePage() { return prePage; } public void setPrePage(int prePage) { this.prePage = prePage; } public int getNextPage() { return nextPage; } public void setNextPage(int nextPage) { this.nextPage = nextPage; } @Deprecated // 请用getPages()来获取最后一页, 此函数获取的是导航条上的最后一页, 容易产生歧义. public int getLastPage() { return navigateLastPage; } @Deprecated public void setLastPage(int lastPage) { this.navigateLastPage = lastPage; } public boolean isIsFirstPage() { return isFirstPage; } public void setIsFirstPage(boolean isFirstPage) { this.isFirstPage = isFirstPage; } public boolean isIsLastPage() { return isLastPage; } public void setIsLastPage(boolean isLastPage) { this.isLastPage = isLastPage; } public boolean isHasPreviousPage() { return hasPreviousPage; } public void setHasPreviousPage(boolean hasPreviousPage) { this.hasPreviousPage = hasPreviousPage; } public boolean isHasNextPage() { return hasNextPage; } public void setHasNextPage(boolean hasNextPage) { this.hasNextPage = hasNextPage; } public int getNavigatePages() { return navigatePages; } public void setNavigatePages(int navigatePages) { this.navigatePages = navigatePages; } public int[] getNavigatepageNums() { return navigatepageNums; } public void setNavigatepageNums(int[] navigatepageNums) { this.navigatepageNums = navigatepageNums; } public int getNavigateFirstPage() { return navigateFirstPage; } public int getNavigateLastPage() { return navigateLastPage; } public void setNavigateFirstPage(int navigateFirstPage) { this.navigateFirstPage = navigateFirstPage; } public void setNavigateLastPage(int navigateLastPage) { this.navigateLastPage = navigateLastPage; } @Override public String toString() { final StringBuffer sb = new StringBuffer("PageInfo{"); sb.append("pageNum=").append(pageNum); sb.append(", pageSize=").append(pageSize); sb.append(", size=").append(size); sb.append(", startRow=").append(startRow); sb.append(", endRow=").append(endRow); sb.append(", total=").append(total); sb.append(", pages=").append(pages); sb.append(", list=").append(list); sb.append(", prePage=").append(prePage); sb.append(", nextPage=").append(nextPage); sb.append(", isFirstPage=").append(isFirstPage); sb.append(", isLastPage=").append(isLastPage); sb.append(", hasPreviousPage=").append(hasPreviousPage); sb.append(", hasNextPage=").append(hasNextPage); sb.append(", navigatePages=").append(navigatePages); sb.append(", navigateFirstPage").append(navigateFirstPage); sb.append(", navigateLastPage").append(navigateLastPage); sb.append(", navigatepageNums="); if (navigatepageNums == null) sb.append("null"); else { sb.append('['); for (int i = 0; i < navigatepageNums.length; ++i) sb.append(i == 0 ? "" : ", ").append(navigatepageNums[i]); sb.append(']'); } sb.append('}'); return sb.toString(); } }
package uk.gov.hmcts.reform.finrem.caseorchestration.model.noc; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import lombok.Builder; import lombok.Data; import uk.gov.hmcts.reform.bsp.common.model.document.Addressee; import java.util.Map; @Data @Builder(toBuilder = true) @JsonIgnoreProperties(ignoreUnknown = true) public class NoticeOfChangeLetterDetails { private Map<String, Object> courtDetails; private Addressee addressee; private String caseNumber; private String reference; private String divorceCaseNumber; private String letterDate; private String applicantName; private String respondentName; private String solicitorFirmName; private String noticeOfChangeText; }
package io.antmedia.test; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.math.BigInteger; import java.nio.ByteBuffer; import java.nio.file.Files; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.TimeUnit; import org.apache.http.HttpEntity; import org.apache.http.StatusLine; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.impl.client.CloseableHttpClient; import org.awaitility.Awaitility; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.mockito.ArgumentCaptor; import org.mockito.Mockito; import org.red5.server.api.IContext; import org.red5.server.api.scope.IScope; import org.red5.server.stream.ClientBroadcastStream; import com.jmatio.io.stream.ByteBufferInputStream; import io.antmedia.AntMediaApplicationAdapter; import io.antmedia.AppSettings; import io.antmedia.cluster.IClusterNotifier; import io.antmedia.cluster.IClusterStore; import io.antmedia.datastore.db.DataStore; import io.antmedia.datastore.db.DataStoreFactory; import io.antmedia.datastore.db.InMemoryDataStore; import io.antmedia.datastore.db.types.Broadcast; import io.antmedia.datastore.db.types.VoD; import io.antmedia.integration.AppFunctionalTest; import io.antmedia.muxer.MuxAdaptor; import io.antmedia.security.AcceptOnlyStreamsInDataStore; import io.antmedia.settings.ServerSettings; import io.antmedia.statistic.type.WebRTCAudioReceiveStats; import io.antmedia.statistic.type.WebRTCAudioSendStats; import io.antmedia.statistic.type.WebRTCVideoReceiveStats; import io.antmedia.statistic.type.WebRTCVideoSendStats; import io.antmedia.streamsource.StreamFetcher; import io.antmedia.streamsource.StreamFetcherManager; import io.vertx.core.Vertx; public class AntMediaApplicationAdaptorUnitTest { AntMediaApplicationAdapter adapter; String streamsFolderPath = "webapps/test/streams"; Vertx vertx = Vertx.vertx(); @Before public void before() { adapter = new AntMediaApplicationAdapter(); adapter.setVertx(vertx); File f = new File(streamsFolderPath); try { AppFunctionalTest.delete(f); } catch (IOException e) { e.printStackTrace(); } File webApps = new File("webapps"); if (!webApps.exists()) { webApps.mkdirs(); } File junit = new File(webApps, "junit"); if (!junit.exists()) { junit.mkdirs(); } File webinf = new File(junit, "WEB-INF"); if (!webinf.exists()) { webinf.mkdirs(); } } @After public void after() { adapter = null; try { AppFunctionalTest.delete(new File("webapps")); } catch (IOException e) { e.printStackTrace(); } } @Test public void testAppSettings() { AppSettings settings = new AppSettings(); AppSettings newSettings = Mockito.spy(new AppSettings()); newSettings.setVodFolder(""); newSettings.setHlsPlayListType(""); newSettings.setTokenHashSecret(""); IScope scope = mock(IScope.class); when(scope.getName()).thenReturn("junit"); AntMediaApplicationAdapter spyAdapter = Mockito.spy(adapter); IContext context = mock(IContext.class); when(context.getBean(Mockito.any())).thenReturn(mock(AcceptOnlyStreamsInDataStore.class)); when(scope.getContext()).thenReturn(context); Mockito.doReturn(mock(DataStore.class)).when(spyAdapter).getDataStore(); spyAdapter.setAppSettings(settings); spyAdapter.setScope(scope); spyAdapter.updateSettings(newSettings, true); IClusterNotifier clusterNotifier = mock(IClusterNotifier.class); IClusterStore clusterStore = mock(IClusterStore.class); when(clusterNotifier.getClusterStore()).thenReturn(clusterStore); spyAdapter.setClusterNotifier(clusterNotifier); spyAdapter.updateSettings(newSettings, true); verify(clusterNotifier, times(1)).getClusterStore(); verify(clusterStore, times(1)).saveSettings(settings); spyAdapter.updateSettings(newSettings, false); //it should not change times(1) because we don't want it to update the datastore verify(clusterNotifier, times(1)).getClusterStore(); verify(clusterStore, times(1)).saveSettings(settings); } @Test public void testSynchUserVoD() { File streamsFolder = new File(streamsFolderPath); if (!streamsFolder.exists()) { assertTrue(streamsFolder.mkdirs()); } DataStore dataStore = new InMemoryDataStore("dbname"); DataStoreFactory dsf = Mockito.mock(DataStoreFactory.class); Mockito.when(dsf.getDataStore()).thenReturn(dataStore); adapter.setDataStoreFactory(dsf); IScope scope = Mockito.mock(IScope.class); Mockito.when(scope.getName()).thenReturn("test"); AntMediaApplicationAdapter spyAdapter = Mockito.spy(adapter); Mockito.doReturn(scope).when(spyAdapter).getScope(); File realPath = new File("src/test/resources"); assertTrue(realPath.exists()); String linkFilePath = streamsFolder.getAbsolutePath() + "/resources"; File linkFile = new File(linkFilePath); //Files.isSymbolicLink(linkFile.toPath()); try { Files.deleteIfExists(linkFile.toPath()); } catch (IOException e) { e.printStackTrace(); fail(e.getMessage()); } boolean result = spyAdapter.synchUserVoDFolder(null, realPath.getAbsolutePath()); assertTrue(result); //we know there are 5 files in src/test/resources //test_short.flv //test_video_360p_subtitle.flv //test_Video_360p.flv //test.flv //sample_MP4_480.mp4 List<VoD> vodList = dataStore.getVodList(0, 50); assertEquals(5, vodList.size()); for (VoD voD : vodList) { assertEquals("streams/resources/" + voD.getVodName(), voD.getFilePath()); } linkFile = new File(streamsFolder, "resources"); assertTrue(linkFile.exists()); } @Test public void testMuxingFinished() { AppSettings appSettings = new AppSettings(); appSettings.setMuxerFinishScript("src/test/resources/echo.sh"); adapter.setAppSettings(appSettings); File f = new File ("src/test/resources/hello_script"); DataStore dataStore = new InMemoryDataStore("dbname"); DataStoreFactory dsf = Mockito.mock(DataStoreFactory.class); Mockito.when(dsf.getDataStore()).thenReturn(dataStore); adapter.setDataStoreFactory(dsf); adapter.setVertx(Vertx.vertx()); File anyFile = new File("src/test/resources/sample_MP4_480.mp4"); { assertFalse(f.exists()); adapter.muxingFinished("streamId", anyFile, 100, 480); Awaitility.await().atMost(5, TimeUnit.SECONDS).until(()-> f.exists()); try { Files.delete(f.toPath()); } catch (IOException e) { e.printStackTrace(); fail(e.getMessage()); } } { appSettings.setMuxerFinishScript(""); adapter.setAppSettings(appSettings); assertFalse(f.exists()); adapter.muxingFinished("streamId", anyFile, 100, 480); Awaitility.await().pollDelay(3, TimeUnit.SECONDS).atMost(4, TimeUnit.SECONDS).until(()-> !f.exists()); } } @Test public void testRunMuxerScript() { File f = new File ("src/test/resources/hello_script"); assertFalse(f.exists()); adapter.setVertx(Vertx.vertx()); adapter.runScript("src/test/resources/echo.sh"); Awaitility.await().atMost(5, TimeUnit.SECONDS).until(()-> f.exists()); try { Files.delete(f.toPath()); } catch (IOException e) { e.printStackTrace(); fail(e.getMessage()); } } @Test public void testSendPost() { try { AntMediaApplicationAdapter spyAdaptor = Mockito.spy(adapter); CloseableHttpClient httpClient = Mockito.mock(CloseableHttpClient.class); Mockito.doReturn(httpClient).when(spyAdaptor).getHttpClient(); CloseableHttpResponse httpResponse = Mockito.mock(CloseableHttpResponse.class); Mockito.when(httpClient.execute(Mockito.any())).thenReturn(httpResponse); Mockito.when(httpResponse.getStatusLine()).thenReturn(Mockito.mock(StatusLine.class)); Mockito.when(httpResponse.getEntity()).thenReturn(null); StringBuilder response = spyAdaptor.sendPOST("http://any_url", new HashMap()); assertNull(response); HttpEntity entity = Mockito.mock(HttpEntity.class); InputStream is = new ByteBufferInputStream(ByteBuffer.allocate(10), 10); Mockito.when(entity.getContent()).thenReturn(is); Mockito.when(httpResponse.getEntity()).thenReturn(entity); HashMap map = new HashMap(); map.put("action", "action_any"); response = spyAdaptor.sendPOST("http://any_url", map); assertNotNull(response); assertEquals(10, response.length()); } catch (IOException e) { e.printStackTrace(); fail(e.getMessage()); } } @Test public void testNotifyHook() { AntMediaApplicationAdapter spyAdaptor = Mockito.spy(adapter); StringBuilder notifyHook = spyAdaptor.notifyHook(null, null, null, null, null, null, null); assertNull(notifyHook); notifyHook = spyAdaptor.notifyHook("", null, null, null, null, null, null); assertNull(notifyHook); String id = String.valueOf((Math.random() * 10000)); String action = "any_action"; String streamName = String.valueOf((Math.random() * 10000)); String category = "category"; String vodName = "vod name" + String.valueOf((Math.random() * 10000)); String vodId = String.valueOf((Math.random() * 10000)); String url = "this is url"; notifyHook = spyAdaptor.notifyHook(url, id, action, streamName, category, vodName, vodId); assertNull(notifyHook); try { ArgumentCaptor<String> captureUrl = ArgumentCaptor.forClass(String.class); ArgumentCaptor<Map> variables = ArgumentCaptor.forClass(Map.class); Mockito.verify(spyAdaptor).sendPOST(captureUrl.capture(), variables.capture()); assertEquals(url, captureUrl.getValue()); Map variablesMap = variables.getValue(); assertEquals(id, variablesMap.get("id")); assertEquals(action, variablesMap.get("action")); assertEquals(streamName, variablesMap.get("streamName")); assertEquals(category, variablesMap.get("category")); assertEquals(vodName, variablesMap.get("vodName")); assertEquals(vodId, variablesMap.get("vodId")); } catch (IOException e) { e.printStackTrace(); fail(e.getMessage()); } url = "this is second url"; notifyHook = spyAdaptor.notifyHook(url, id, null, null, null, null, null); assertNull(notifyHook); try { ArgumentCaptor<String> captureUrl = ArgumentCaptor.forClass(String.class); ArgumentCaptor<Map> variables = ArgumentCaptor.forClass(Map.class); Mockito.verify(spyAdaptor, Mockito.times(2)).sendPOST(captureUrl.capture(), variables.capture()); assertEquals(url, captureUrl.getValue()); Map variablesMap = variables.getValue(); assertEquals(id, variablesMap.get("id")); assertNull(variablesMap.get("action")); assertNull(variablesMap.get("streamName")); assertNull(variablesMap.get("category")); assertNull(variablesMap.get("vodName")); assertNull(variablesMap.get("vodId")); } catch (IOException e) { e.printStackTrace(); fail(e.getMessage()); } } @Test public void testNotifyHookFromMuxingFinished() { AntMediaApplicationAdapter spyAdaptor = Mockito.spy(adapter); AppSettings appSettings = new AppSettings(); spyAdaptor.setAppSettings(appSettings); DataStore dataStore = new InMemoryDataStore("testHook"); DataStoreFactory dsf = Mockito.mock(DataStoreFactory.class); Mockito.when(dsf.getDataStore()).thenReturn(dataStore); spyAdaptor.setDataStoreFactory(dsf); //get sample mp4 file from test resources File anyFile = new File("src/test/resources/sample_MP4_480.mp4"); //create new broadcast Broadcast broadcast = new Broadcast(); //save this broadcast to db String streamId = dataStore.save(broadcast); /* * Scenario 1; Stream is saved to DB, but no Hook URL is defined either for stream and in AppSettings * So, no hook is posted */ ArgumentCaptor<String> captureUrl = ArgumentCaptor.forClass(String.class); ArgumentCaptor<String> captureId = ArgumentCaptor.forClass(String.class); ArgumentCaptor<String> captureAction = ArgumentCaptor.forClass(String.class); ArgumentCaptor<String> captureStreamName = ArgumentCaptor.forClass(String.class); ArgumentCaptor<String> captureCategory = ArgumentCaptor.forClass(String.class); ArgumentCaptor<String> captureVodName = ArgumentCaptor.forClass(String.class); ArgumentCaptor<String> captureVodId = ArgumentCaptor.forClass(String.class); //call muxingFinished function spyAdaptor.muxingFinished(streamId, anyFile, 100, 480); //verify that notifyHook is never called verify(spyAdaptor, never()).notifyHook(captureUrl.capture(), captureId.capture(), captureAction.capture(), captureStreamName.capture(), captureCategory.capture(), captureVodName.capture(), captureVodId.capture()); /* * Scenario 2; hook URL is defined for stream and stream is in DB * So hook is posted */ //define hook URL for stream specific broadcast.setListenerHookURL("listenerHookURL"); broadcast.setName("name"); //update broadcast dataStore.updateBroadcastFields(streamId, broadcast); //call muxingFinished function spyAdaptor.muxingFinished(streamId, anyFile, 100, 480); Awaitility.await().atMost(10, TimeUnit.SECONDS).until(()-> { boolean called = false; try { //verify that notifyHook is called 1 time verify(spyAdaptor, times(1)).notifyHook(captureUrl.capture(), captureId.capture(), captureAction.capture(), captureStreamName.capture(), captureCategory.capture(), captureVodName.capture(), captureVodId.capture()); assertEquals(captureUrl.getValue(), broadcast.getListenerHookURL()); assertEquals(captureId.getValue(), broadcast.getStreamId()); assertEquals(captureVodName.getValue()+".mp4", anyFile.getName()); called = true; } catch (Exception e) { e.printStackTrace(); } return called; }); /* * Scenario 3; Stream is deleted from DB (zombi stream) * also no HookURL is defined in AppSettins * so no hook is posted */ //delete broadcast from db dataStore.delete(streamId); //call muxingFinished function spyAdaptor.muxingFinished(streamId, anyFile, 100, 480); Awaitility.await().atMost(10, TimeUnit.SECONDS).until(()-> { boolean called = false; try { //verify that no new notifyHook is called verify(spyAdaptor, times(1)).notifyHook(captureUrl.capture(), captureId.capture(), captureAction.capture(), captureStreamName.capture(), captureCategory.capture(), captureVodName.capture(), captureVodId.capture()); called = true; } catch (Exception e) { e.printStackTrace(); } return called; }); /* * Scenario 4; Stream is deleted from DB (zombi stream) * but HookURL is defined in AppSettins * so new hook is posted */ //set common hook URL appSettings.setListenerHookURL("listenerHookURL"); //call muxingFinished function spyAdaptor.muxingFinished(streamId, anyFile, 100, 480); Awaitility.await().atMost(10, TimeUnit.SECONDS).until(()-> { boolean called = false; try { //verify that notifyHook is called 2 times verify(spyAdaptor, times(2)).notifyHook(captureUrl.capture(), captureId.capture(), captureAction.capture(), captureStreamName.capture(), captureCategory.capture(), captureVodName.capture(), captureVodId.capture()); assertEquals(captureUrl.getValue(), broadcast.getListenerHookURL()); assertEquals(captureId.getValue(), broadcast.getStreamId()); assertEquals(captureVodName.getValue()+".mp4", anyFile.getName()); called = true; } catch (Exception e) { e.printStackTrace(); } return called; }); } @Test public void testSynchUserVodThrowException() { File f = new File(streamsFolderPath); assertTrue(f.mkdirs()); File emptyFile = new File(streamsFolderPath, "emptyfile"); emptyFile.deleteOnExit(); try { assertTrue(emptyFile.createNewFile()); boolean synchUserVoDFolder = adapter.deleteOldFolderPath("", f); assertFalse(synchUserVoDFolder); synchUserVoDFolder = adapter.deleteOldFolderPath(null, f); assertFalse(synchUserVoDFolder); synchUserVoDFolder = adapter.deleteOldFolderPath("anyfile", null); assertFalse(synchUserVoDFolder); synchUserVoDFolder = adapter.deleteOldFolderPath("notexist", f); assertFalse(synchUserVoDFolder); synchUserVoDFolder = adapter.deleteOldFolderPath(emptyFile.getName(), f); assertFalse(synchUserVoDFolder); File oldDir = new File (streamsFolderPath, "dir"); oldDir.mkdirs(); oldDir.deleteOnExit(); synchUserVoDFolder = adapter.deleteOldFolderPath(oldDir.getName(), f); assertTrue(synchUserVoDFolder); } catch (IOException e) { e.printStackTrace(); fail(e.getMessage()); } } @Test public void testShutDown() { IScope scope = mock(IScope.class); when(scope.getName()).thenReturn("test"); adapter.setScope(scope); adapter.setServerSettings(Mockito.spy(new ServerSettings())); StreamFetcherManager sfm = mock(StreamFetcherManager.class); adapter.setStreamFetcherManager(sfm); StreamFetcher sf = mock(StreamFetcher.class); Queue<StreamFetcher> sfQueue = new ConcurrentLinkedQueue<StreamFetcher>(); sfQueue.add(sf); when(sfm.getStreamFetcherList()).thenReturn(sfQueue); MuxAdaptor muxerAdaptor = mock(MuxAdaptor.class); adapter.muxAdaptorAdded(muxerAdaptor); Broadcast broadcast = mock(Broadcast.class); when(broadcast.getType()).thenReturn(AntMediaApplicationAdapter.LIVE_STREAM); ClientBroadcastStream cbs = mock(ClientBroadcastStream.class); when(muxerAdaptor.getBroadcastStream()).thenReturn(cbs); when(muxerAdaptor.getBroadcast()).thenReturn(broadcast); DataStore dataStore = mock(DataStore.class); when(dataStore.getLocalLiveBroadcastCount(Mockito.any())).thenReturn(1L); DataStoreFactory dataStoreFactory = mock(DataStoreFactory.class); when(dataStoreFactory.getDataStore()).thenReturn(dataStore); adapter.setDataStoreFactory(dataStoreFactory); new Thread() { public void run() { try { sleep(3000); } catch (InterruptedException e) { e.printStackTrace(); } when(dataStore.getLocalLiveBroadcastCount(Mockito.any())).thenReturn(0L); }; }.start(); adapter.serverShuttingdown(); verify(sf, times(1)).stopStream(); verify(cbs, times(1)).stop(); verify(muxerAdaptor, times(1)).stop(); } @Test public void testEncoderBlocked() { assertEquals(0, adapter.getNumberOfEncodersBlocked()); assertEquals(0, adapter.getNumberOfEncoderNotOpenedErrors()); adapter.incrementEncoderNotOpenedError(); adapter.incrementEncoderNotOpenedError(); adapter.incrementEncoderNotOpenedError(); assertEquals(3, adapter.getNumberOfEncoderNotOpenedErrors()); } @Test public void testPublishTimeout() { assertEquals(0, adapter.getNumberOfPublishTimeoutError()); adapter.publishTimeoutError("streamId"); assertEquals(1, adapter.getNumberOfPublishTimeoutError()); } @Test public void testStats() { WebRTCVideoReceiveStats receiveStats = new WebRTCVideoReceiveStats(); assertNotNull(receiveStats.getVideoBytesReceivedPerSecond()); assertEquals(BigInteger.ZERO, receiveStats.getVideoBytesReceivedPerSecond()); assertNotNull(receiveStats.getVideoBytesReceived()); assertEquals(BigInteger.ZERO, receiveStats.getVideoBytesReceived()); WebRTCAudioReceiveStats audioReceiveStats = new WebRTCAudioReceiveStats(); assertNotNull(audioReceiveStats.getAudioBytesReceivedPerSecond()); assertEquals(BigInteger.ZERO, audioReceiveStats.getAudioBytesReceivedPerSecond()); assertNotNull(audioReceiveStats.getAudioBytesReceived()); assertEquals(BigInteger.ZERO, audioReceiveStats.getAudioBytesReceived()); WebRTCVideoSendStats videoSendStats = new WebRTCVideoSendStats(); assertNotNull(videoSendStats.getVideoBytesSentPerSecond()); assertEquals(BigInteger.ZERO, videoSendStats.getVideoBytesSentPerSecond()); assertNotNull(videoSendStats.getVideoBytesSent()); assertEquals(BigInteger.ZERO, videoSendStats.getVideoBytesSent()); WebRTCAudioSendStats audioSendStats = new WebRTCAudioSendStats(); assertEquals(BigInteger.ZERO, audioSendStats.getAudioBytesSent()); assertEquals(BigInteger.ZERO, audioSendStats.getAudioBytesSentPerSecond()); } @Test public void testEncoderBlockedList() { assertEquals(0, adapter.getNumberOfEncodersBlocked()); adapter.encoderBlocked("stream1", false); assertEquals(0, adapter.getNumberOfEncodersBlocked()); adapter.encoderBlocked("stream1", true); assertEquals(1, adapter.getNumberOfEncodersBlocked()); adapter.encoderBlocked("stream2", true); adapter.encoderBlocked("stream3", true); assertEquals(3, adapter.getNumberOfEncodersBlocked()); adapter.encoderBlocked("stream2", false); adapter.encoderBlocked("stream3", false); adapter.encoderBlocked("stream1", false); assertEquals(0, adapter.getNumberOfEncodersBlocked()); } }
package com.sphenon.basics.monitoring; /**************************************************************************** Copyright 2001-2018 Sphenon GmbH Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. *****************************************************************************/ import com.sphenon.basics.context.*; import com.sphenon.basics.variatives.*; import com.sphenon.basics.variatives.classes.*; public class MonitoringStringPool extends StringPoolClass { static protected MonitoringStringPool singleton = null; static public MonitoringStringPool getSingleton (CallContext cc) { if (singleton == null) { singleton = new MonitoringStringPool(cc); } return singleton; } static public VariativeString get(CallContext cc, String id) { return VariativeStringClass.createVariativeStringClass(cc, id, getSingleton(cc)); } static public String get(CallContext cc, String id, String isolang) { return getSingleton(cc).getString(cc, id, isolang); } protected MonitoringStringPool (CallContext cc) { super(cc); /*************************************************/ /* THE FOLLOWING SECTION IS PARTIALLY GENERATED. */ /* BE CAREFUL WHEN EDITING MANUALLY ! */ /* */ /* See StringPool.java for explanation. */ /*************************************************/ //BEGINNING-OF-STRINGS //P-0-com.sphenon.basics.monitoring //F-0-0-Factory_ProblemState.java addEntry(cc, "0.0.0", "en", "Invalid state parameter for Factory_ProblemState, got '%(got)', expected one of UNKNOWN, OK, INFO, NOTICE, WARNING, SEVERE_WARNING, ERROR, CRITICAL_ERROR, FATAL_ERROR, EMERGENCY, PANIC"); addEntry(cc, "0.0.0", "en", "Ungültiger Zustands-Parameter für Factory_ProblemState, erhalten '%(got)', erwartet wurde einer der folgenden Werte: UNKNOWN, OK, INFO, NOTICE, WARNING, SEVERE_WARNING, ERROR, CRITICAL_ERROR, FATAL_ERROR, EMERGENCY, PANIC"); addEntry(cc, "0.0.1", "en", "At least one factory parameter is invalid"); addEntry(cc, "0.0.1", "de", "Mindestens ein Fabrik-Parameter ist ungültig"); //END-OF-STRINGS /*************************************************/ } }
/* @@author wayneswq */ package seedu.address.model.person; import static com.google.common.base.Ascii.toUpperCase; import static java.util.Objects.requireNonNull; import static seedu.address.commons.util.AppUtil.checkArgument; /** * Represents a Person's gender in the docX. * Guarantees: immutable; is valid as declared in {@link #isValidGender(String)} */ public class Gender { public static final String MESSAGE_CONSTRAINTS = "Gender should be either F or M or f or m. Auto-converts to uppercase for readability"; public static final String VALIDATION_REGEX = "[MFmf]"; public final String value; /** * Constructs a {@code Gender}. * * @param gender A valid gender string. */ public Gender(String gender) { requireNonNull(gender); checkArgument(isValidGender(gender), MESSAGE_CONSTRAINTS); value = toUpperCase(gender); } /** * Returns true if a given string is a valid gender number. */ public static boolean isValidGender(String test) { return test.matches(VALIDATION_REGEX); } @Override public String toString() { return value; } @Override public boolean equals(Object other) { return other == this // short circuit if same object || (other instanceof Gender // instanceof handles nulls && value.equals(((Gender) other).value)); // state check } @Override public int hashCode() { return value.hashCode(); } }
package com.cmkj.mall.model.ums; import io.swagger.annotations.ApiModelProperty; import java.io.Serializable; import java.math.BigDecimal; public class UmsMemberLevel implements Serializable { private Long id; private String name; private Integer growthPoint; @ApiModelProperty(value = "是否为默认等级:0->不是;1->是") private Integer defaultStatus; @ApiModelProperty(value = "免运费标准") private BigDecimal freeFreightPoint; @ApiModelProperty(value = "每次评价获取的成长值") private Integer commentGrowthPoint; @ApiModelProperty(value = "是否有免邮特权") private Integer priviledgeFreeFreight; @ApiModelProperty(value = "是否有签到特权") private Integer priviledgeSignIn; @ApiModelProperty(value = "是否有评论获奖励特权") private Integer priviledgeComment; @ApiModelProperty(value = "是否有专享活动特权") private Integer priviledgePromotion; @ApiModelProperty(value = "是否有会员价格特权") private Integer priviledgeMemberPrice; @ApiModelProperty(value = "是否有生日特权") private Integer priviledgeBirthday; private String note; private static final long serialVersionUID = 1L; public Long getId() { return id; } public void setId(Long id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } public Integer getGrowthPoint() { return growthPoint; } public void setGrowthPoint(Integer growthPoint) { this.growthPoint = growthPoint; } public Integer getDefaultStatus() { return defaultStatus; } public void setDefaultStatus(Integer defaultStatus) { this.defaultStatus = defaultStatus; } public BigDecimal getFreeFreightPoint() { return freeFreightPoint; } public void setFreeFreightPoint(BigDecimal freeFreightPoint) { this.freeFreightPoint = freeFreightPoint; } public Integer getCommentGrowthPoint() { return commentGrowthPoint; } public void setCommentGrowthPoint(Integer commentGrowthPoint) { this.commentGrowthPoint = commentGrowthPoint; } public Integer getPriviledgeFreeFreight() { return priviledgeFreeFreight; } public void setPriviledgeFreeFreight(Integer priviledgeFreeFreight) { this.priviledgeFreeFreight = priviledgeFreeFreight; } public Integer getPriviledgeSignIn() { return priviledgeSignIn; } public void setPriviledgeSignIn(Integer priviledgeSignIn) { this.priviledgeSignIn = priviledgeSignIn; } public Integer getPriviledgeComment() { return priviledgeComment; } public void setPriviledgeComment(Integer priviledgeComment) { this.priviledgeComment = priviledgeComment; } public Integer getPriviledgePromotion() { return priviledgePromotion; } public void setPriviledgePromotion(Integer priviledgePromotion) { this.priviledgePromotion = priviledgePromotion; } public Integer getPriviledgeMemberPrice() { return priviledgeMemberPrice; } public void setPriviledgeMemberPrice(Integer priviledgeMemberPrice) { this.priviledgeMemberPrice = priviledgeMemberPrice; } public Integer getPriviledgeBirthday() { return priviledgeBirthday; } public void setPriviledgeBirthday(Integer priviledgeBirthday) { this.priviledgeBirthday = priviledgeBirthday; } public String getNote() { return note; } public void setNote(String note) { this.note = note; } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append(getClass().getSimpleName()); sb.append(" ["); sb.append("Hash = ").append(hashCode()); sb.append(", id=").append(id); sb.append(", name=").append(name); sb.append(", growthPoint=").append(growthPoint); sb.append(", defaultStatus=").append(defaultStatus); sb.append(", freeFreightPoint=").append(freeFreightPoint); sb.append(", commentGrowthPoint=").append(commentGrowthPoint); sb.append(", priviledgeFreeFreight=").append(priviledgeFreeFreight); sb.append(", priviledgeSignIn=").append(priviledgeSignIn); sb.append(", priviledgeComment=").append(priviledgeComment); sb.append(", priviledgePromotion=").append(priviledgePromotion); sb.append(", priviledgeMemberPrice=").append(priviledgeMemberPrice); sb.append(", priviledgeBirthday=").append(priviledgeBirthday); sb.append(", note=").append(note); sb.append(", serialVersionUID=").append(serialVersionUID); sb.append("]"); return sb.toString(); } }
/* * Copyright 2004 Clinton Begin * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.ibatis.sqlmap.engine.cache.lru; import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Properties; import com.ibatis.sqlmap.engine.cache.CacheController; import com.ibatis.sqlmap.engine.cache.CacheModel; /** * LRU (least recently used) cache controller implementation */ public class LruCacheController implements CacheController { private int cacheSize; private Map cache; private List keyList; /** * Default constructor */ public LruCacheController() { this.cacheSize = 100; this.cache = Collections.synchronizedMap(new HashMap()); this.keyList = Collections.synchronizedList(new LinkedList()); } public int getCacheSize() { return cacheSize; } public void setCacheSize(int cacheSize) { this.cacheSize = cacheSize; } /** * Configures the cache * * @param props Optionally can contain properties [reference-type=WEAK|SOFT|STRONG] */ public void setProperties(Properties props) { String size = props.getProperty("cache-size"); if (size == null) { size = props.getProperty("size"); } if (size != null) { cacheSize = Integer.parseInt(size); } } /** * Add an object to the cache * * @param cacheModel The cacheModel * @param key The key of the object to be cached * @param value The object to be cached */ public void putObject(CacheModel cacheModel, Object key, Object value) { cache.put(key, value); keyList.add(key); if (keyList.size() > cacheSize) { try { Object oldestKey = keyList.remove(0); cache.remove(oldestKey); } catch (IndexOutOfBoundsException e) { //ignore } } } /** * Get an object out of the cache. * * @param cacheModel The cache model * @param key The key of the object to be returned * @return The cached object (or null) */ public Object getObject(CacheModel cacheModel, Object key) { Object result = cache.get(key); keyList.remove(key); if (result != null) { keyList.add(key); } return result; } public Object removeObject(CacheModel cacheModel, Object key) { keyList.remove(key); return cache.remove(key); } /** * Flushes the cache. * * @param cacheModel The cache model */ public void flush(CacheModel cacheModel) { cache.clear(); keyList.clear(); } }
package caracteristica1.classes.pessoa; import caracteristica1.classes.usuario.SuperUsuario; public class Usuario extends SuperUsuario { public Usuario(final String login, final String senha) { super(login, senha); } }
/*=========================================================================== Copyright (C) 2008-2009 by the Okapi Framework contributors ----------------------------------------------------------------------------- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ===========================================================================*/ package net.sf.okapi.applications.rainbow; import net.sf.okapi.applications.rainbow.lib.ILog; import net.sf.okapi.applications.rainbow.lib.LogType; import net.sf.okapi.common.IHelp; public class BatchLog implements ILog { private int warnCount; private int errCount; private boolean inProgress; public boolean beginProcess (String text) { inProgress = true; return inProgress; } public boolean beginTask (String text) { return true; } public boolean canContinue () { return true; } public void cancel (boolean askConfirmation) { // Do nothing } public void clear () { // Do nothing } public void endProcess (String text) { inProgress = false; } public void endTask (String text) { // Do nothing } public boolean error (String text) { return setLog(LogType.ERROR, 0, text); } public long getCallerData () { return 0; } public int getErrorAndWarningCount () { return errCount+warnCount; } public int getErrorCount () { return errCount; } public int getWarningCount () { return warnCount; } public void hide () { // Do nothing } public boolean inProgress () { return inProgress; } public boolean isVisible () { return true; } public boolean message (String text) { return setLog(LogType.MESSAGE, 0, text); } public boolean newLine () { System.out.println(""); //$NON-NLS-1$ return false; } public void save (String path) { // Do nothing } public void setCallerData (long data) { // Do nothing } public void setHelp (IHelp helpParam, String helpPath) { // Do nothing } public boolean setLog (int p_nType, int p_nValue, String p_sValue) { switch ( p_nType ) { case LogType.ERROR: System.out.println(Res.getString("BatchLog.error")+p_sValue); //$NON-NLS-1$ errCount++; break; case LogType.WARNING: System.out.println(Res.getString("BatchLog.warning")+p_sValue); //$NON-NLS-1$ warnCount++; break; case LogType.MESSAGE: System.out.println(p_sValue); break; case LogType.SUBPROGRESS: case LogType.MAINPROGRESS: break; case LogType.USERFEEDBACK: default: break; } return canContinue(); } public void setMainProgressMode (int value) { // Do nothing } public boolean setOnTop (boolean value) { // Do nothing return false; } public void setSubProgressMode (int value) { // Do nothing } public void setTitle (String value) { // Do nothing } public void show () { // Do nothing } public boolean warning (String text) { return setLog(LogType.WARNING, 0, text); } }
/*! ****************************************************************************** * * Pentaho Data Integration * * Copyright (C) 2002-2016 by Pentaho : http://www.pentaho.com * ******************************************************************************* * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ package org.pentaho.di.trans.steps.salesforceupsert; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import org.junit.Test; public class SalesforceUpsertDataTest { @Test public void testConstructor() { SalesforceUpsertData data = new SalesforceUpsertData(); assertNull( data.inputRowMeta ); assertNull( data.outputRowMeta ); assertEquals( 0, data.nrfields ); assertNull( data.fieldnrs ); assertNull( data.upsertResult ); assertNull( data.sfBuffer ); assertNull( data.outputBuffer ); assertEquals( 0, data.iBufferPos ); assertNull( data.realSalesforceFieldName ); } }
package ucar.unidata.util.test.category; /** * A marker to be used with JUnit categories to indicate that a test method or test class takes a long time to run. * We'll want to avoid running such tests after every commit; instead just run them once-a-night or so. * * To enable these tests, set the runSlowTests system property, for example * * ./gradlew -DrunSlowTests=true :subproject:test * * See gradle/root/testing.gradle * */ public interface Slow { }
package com.penguins.cloud.stream.thread.share; /** * @author 郑冰 * @date 2021/8/30 17:10 * @email mydreambing@126.com * @since jdk8 **/ public class EvenGenerator extends IntGenerator { private int curretEvenValue = 0; @Override public int next() { ++curretEvenValue; Thread.yield(); ++curretEvenValue; return curretEvenValue; } public static void main(String[] args) { EvenChecker.test(new EvenGenerator()); } }
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.update; import java.io.Closeable; import java.io.File; import java.io.FileNotFoundException; import java.io.FilenameFilter; import java.io.IOException; import java.lang.invoke.MethodHandles; import java.nio.charset.Charset; import java.nio.file.Files; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Deque; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.ListIterator; import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.Future; import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import com.codahale.metrics.Gauge; import com.codahale.metrics.Meter; import org.apache.hadoop.fs.FileSystem; import org.apache.lucene.util.BytesRef; import org.apache.solr.common.SolrDocumentBase; import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrException.ErrorCode; import org.apache.solr.common.SolrInputDocument; import org.apache.solr.common.params.ModifiableSolrParams; import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.util.ExecutorUtil; import org.apache.solr.common.util.IOUtils; import org.apache.solr.common.util.TimeSource; import org.apache.solr.core.PluginInfo; import org.apache.solr.core.SolrCore; import org.apache.solr.core.SolrInfoBean; import org.apache.solr.metrics.SolrMetricProducer; import org.apache.solr.metrics.SolrMetricsContext; import org.apache.solr.request.LocalSolrQueryRequest; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.request.SolrRequestInfo; import org.apache.solr.response.SolrQueryResponse; import org.apache.solr.search.SolrIndexSearcher; import org.apache.solr.update.processor.DistributedUpdateProcessor; import org.apache.solr.update.processor.UpdateRequestProcessor; import org.apache.solr.update.processor.UpdateRequestProcessorChain; import org.apache.solr.common.util.SolrNamedThreadFactory; import org.apache.solr.util.OrderedExecutor; import org.apache.solr.util.RTimer; import org.apache.solr.util.RefCounted; import org.apache.solr.util.TestInjection; import org.apache.solr.util.TimeOut; import org.apache.solr.util.plugin.PluginInfoInitialized; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.solr.update.processor.DistributedUpdateProcessor.DistribPhase.FROMLEADER; import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM; /** * This holds references to the transaction logs. It also keeps a map of unique key to location in log * (along with the update's version). This map is only cleared on soft or hard commit * * @lucene.experimental */ public class UpdateLog implements PluginInfoInitialized, SolrMetricProducer { private static final long STATUS_TIME = TimeUnit.NANOSECONDS.convert(60, TimeUnit.SECONDS); public static String LOG_FILENAME_PATTERN = "%s.%019d"; public static String TLOG_NAME="tlog"; public static String BUFFER_TLOG_NAME="buffer.tlog"; private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); private boolean debug = log.isDebugEnabled(); private boolean trace = log.isTraceEnabled(); // TODO: hack public FileSystem getFs() { return null; } public enum SyncLevel { NONE, FLUSH, FSYNC; public static SyncLevel getSyncLevel(String level){ if (level == null) { return SyncLevel.FLUSH; } try{ return SyncLevel.valueOf(level.toUpperCase(Locale.ROOT)); } catch(Exception ex){ log.warn("There was an error reading the SyncLevel - default to {}", SyncLevel.FLUSH, ex); return SyncLevel.FLUSH; } } } // NOTE: when adding new states make sure to keep existing numbers, because external metrics // monitoring may depend on these values being stable. public enum State { REPLAYING(0), BUFFERING(1), APPLYING_BUFFERED(2), ACTIVE(3); private final int value; State(final int value) { this.value = value; } public int getValue() { return value; } } public static final int ADD = 0x01; public static final int DELETE = 0x02; public static final int DELETE_BY_QUERY = 0x03; public static final int COMMIT = 0x04; public static final int UPDATE_INPLACE = 0x08; // For backward-compatibility, we should delete this field in 9.0 public static final int OPERATION_MASK = 0x0f; // mask off flags to get the operation /** * The index of the flags value in an entry from the transaction log. */ public static final int FLAGS_IDX = 0; /** * The index of the _version_ value in an entry from the transaction log. */ public static final int VERSION_IDX = 1; /** * The index of the previous pointer in an entry from the transaction log. * This is only relevant if flags (indexed at FLAGS_IDX) includes UPDATE_INPLACE. */ public static final int PREV_POINTER_IDX = 2; /** * The index of the previous version in an entry from the transaction log. * This is only relevant if flags (indexed at FLAGS_IDX) includes UPDATE_INPLACE. */ public static final int PREV_VERSION_IDX = 3; public static class RecoveryInfo { public long positionOfStart; public int adds; public int deletes; public int deleteByQuery; public int errors; public boolean failed; @Override public String toString() { return "RecoveryInfo{adds="+adds+" deletes="+deletes+ " deleteByQuery="+deleteByQuery+" errors="+errors + " positionOfStart="+positionOfStart+"}"; } } long id = -1; protected volatile State state = State.ACTIVE; protected TransactionLog bufferTlog; protected TransactionLog tlog; protected TransactionLog prevTlog; protected TransactionLog prevTlogOnPrecommit; protected final Deque<TransactionLog> logs = new LinkedList<>(); // list of recent logs, newest first protected LinkedList<TransactionLog> newestLogsOnStartup = new LinkedList<>(); protected int numOldRecords; // number of records in the recent logs protected Map<BytesRef,LogPtr> map = new HashMap<>(); protected Map<BytesRef,LogPtr> prevMap; // used while committing/reopening is happening protected Map<BytesRef,LogPtr> prevMap2; // used while committing/reopening is happening protected TransactionLog prevMapLog; // the transaction log used to look up entries found in prevMap protected TransactionLog prevMapLog2; // the transaction log used to look up entries found in prevMap2 protected final int numDeletesToKeep = 1000; protected final int numDeletesByQueryToKeep = 100; protected int numRecordsToKeep; protected int maxNumLogsToKeep; protected int numVersionBuckets; // This should only be used to initialize VersionInfo... the actual number of buckets may be rounded up to a power of two. protected Long maxVersionFromIndex = null; protected boolean existOldBufferLog = false; // keep track of deletes only... this is not updated on an add protected LinkedHashMap<BytesRef, LogPtr> oldDeletes = new LinkedHashMap<BytesRef, LogPtr>(numDeletesToKeep) { @Override protected boolean removeEldestEntry(@SuppressWarnings({"rawtypes"})Map.Entry eldest) { return size() > numDeletesToKeep; } }; /** * Holds the query and the version for a DeleteByQuery command */ public static class DBQ { public String q; // the query string public long version; // positive version of the DBQ @Override public String toString() { return "DBQ{version=" + version + ",q="+q+"}"; } } protected LinkedList<DBQ> deleteByQueries = new LinkedList<>(); protected String[] tlogFiles; protected File tlogDir; protected Collection<String> globalStrings; protected String dataDir; protected String lastDataDir; protected VersionInfo versionInfo; protected SyncLevel defaultSyncLevel = SyncLevel.FLUSH; volatile UpdateHandler uhandler; // a core reload can change this reference! protected volatile boolean cancelApplyBufferUpdate; List<Long> startingVersions; // metrics protected Gauge<Integer> bufferedOpsGauge; protected Meter applyingBufferedOpsMeter; protected Meter replayOpsMeter; protected Meter copyOverOldUpdatesMeter; protected SolrMetricsContext solrMetricsContext; public static class LogPtr { final long pointer; final long version; final long previousPointer; // used for entries that are in-place updates and need a pointer to a previous update command /** * Creates an object that contains the position and version of an update. In this constructor, * the effective value of the previousPointer is -1. * * @param pointer Position in the transaction log of an update * @param version Version of the update at the given position */ public LogPtr(long pointer, long version) { this(pointer, version, -1); } /** * * @param pointer Position in the transaction log of an update * @param version Version of the update at the given position * @param previousPointer Position, in the transaction log, of an update on which the current update depends */ public LogPtr(long pointer, long version, long previousPointer) { this.pointer = pointer; this.version = version; this.previousPointer = previousPointer; } @Override public String toString() { return "LogPtr(" + pointer + ")"; } } public long getTotalLogsSize() { long size = 0; synchronized (this) { for (TransactionLog log : logs) { size += log.getLogSize(); } } return size; } /** * @return the current transaction log's size (based on its output stream) */ public synchronized long getCurrentLogSizeFromStream() { return tlog == null ? 0 : tlog.getLogSizeFromStream(); } public long getTotalLogsNumber() { synchronized (this) { return logs.size(); } } public VersionInfo getVersionInfo() { return versionInfo; } public int getNumRecordsToKeep() { return numRecordsToKeep; } public int getMaxNumLogsToKeep() { return maxNumLogsToKeep; } public int getNumVersionBuckets() { return numVersionBuckets; } protected static int objToInt(Object obj, int def) { if (obj != null) { return Integer.parseInt(obj.toString()); } else return def; } @Override public void init(PluginInfo info) { dataDir = (String)info.initArgs.get("dir"); defaultSyncLevel = SyncLevel.getSyncLevel((String)info.initArgs.get("syncLevel")); numRecordsToKeep = objToInt(info.initArgs.get("numRecordsToKeep"), 100); maxNumLogsToKeep = objToInt(info.initArgs.get("maxNumLogsToKeep"), 10); numVersionBuckets = objToInt(info.initArgs.get("numVersionBuckets"), 65536); if (numVersionBuckets <= 0) throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Number of version buckets must be greater than 0!"); log.info("Initializing UpdateLog: dataDir={} defaultSyncLevel={} numRecordsToKeep={} maxNumLogsToKeep={} numVersionBuckets={}", dataDir, defaultSyncLevel, numRecordsToKeep, maxNumLogsToKeep, numVersionBuckets); } /* Note, when this is called, uhandler is not completely constructed. * This must be called when a new log is created, or * for an existing log whenever the core or update handler changes. */ public void init(UpdateHandler uhandler, SolrCore core) { dataDir = core.getUlogDir(); this.uhandler = uhandler; if (dataDir.equals(lastDataDir)) { versionInfo.reload(); core.getCoreMetricManager().registerMetricProducer(SolrInfoBean.Category.TLOG.toString(), this); if (debug) { log.debug("UpdateHandler init: tlogDir={}, next id={} this is a reopen...nothing else to do", tlogDir, id); } return; } lastDataDir = dataDir; tlogDir = new File(dataDir, TLOG_NAME); tlogDir.mkdirs(); tlogFiles = getLogList(tlogDir); id = getLastLogId() + 1; // add 1 since we will create a new log for the next update if (debug) { log.debug("UpdateHandler init: tlogDir={}, existing tlogs={}, next id={}", tlogDir, Arrays.asList(tlogFiles), id); } String[] oldBufferTlog = getBufferLogList(tlogDir); if (oldBufferTlog != null && oldBufferTlog.length != 0) { existOldBufferLog = true; } TransactionLog oldLog = null; for (String oldLogName : tlogFiles) { File f = new File(tlogDir, oldLogName); try { oldLog = newTransactionLog(f, null, true); addOldLog(oldLog, false); // don't remove old logs on startup since more than one may be uncapped. } catch (Exception e) { SolrException.log(log, "Failure to open existing log file (non fatal) " + f, e); deleteFile(f); } } // Record first two logs (oldest first) at startup for potential tlog recovery. // It's possible that at abnormal close both "tlog" and "prevTlog" were uncapped. for (TransactionLog ll : logs) { newestLogsOnStartup.addFirst(ll); if (newestLogsOnStartup.size() >= 2) break; } try { versionInfo = new VersionInfo(this, numVersionBuckets); } catch (SolrException e) { log.error("Unable to use updateLog: {}", e.getMessage(), e); throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to use updateLog: " + e.getMessage(), e); } // TODO: these startingVersions assume that we successfully recover from all non-complete tlogs. try (RecentUpdates startingUpdates = getRecentUpdates()) { startingVersions = startingUpdates.getVersions(numRecordsToKeep); // populate recent deletes list (since we can't get that info from the index) for (int i = startingUpdates.deleteList.size() - 1; i >= 0; i--) { DeleteUpdate du = startingUpdates.deleteList.get(i); oldDeletes.put(new BytesRef(du.id), new LogPtr(-1, du.version)); } // populate recent deleteByQuery commands for (int i = startingUpdates.deleteByQueryList.size() - 1; i >= 0; i--) { Update update = startingUpdates.deleteByQueryList.get(i); @SuppressWarnings({"unchecked"}) List<Object> dbq = (List<Object>) update.log.lookup(update.pointer); long version = (Long) dbq.get(1); String q = (String) dbq.get(2); trackDeleteByQuery(q, version); } } core.getCoreMetricManager().registerMetricProducer(SolrInfoBean.Category.TLOG.toString(), this); } @Override public void initializeMetrics(SolrMetricsContext parentContext, String scope) { solrMetricsContext = parentContext.getChildContext(this); bufferedOpsGauge = () -> { if (state == State.BUFFERING) { if (bufferTlog == null) return 0; // numRecords counts header as a record return bufferTlog.numRecords() - 1; } if (tlog == null) { return 0; } else if (state == State.APPLYING_BUFFERED) { // numRecords counts header as a record return tlog.numRecords() - 1 - recoveryInfo.adds - recoveryInfo.deleteByQuery - recoveryInfo.deletes - recoveryInfo.errors; } else { return 0; } }; solrMetricsContext.gauge(bufferedOpsGauge, true, "ops", scope, "buffered"); solrMetricsContext.gauge(() -> logs.size(), true, "logs", scope, "replay", "remaining"); solrMetricsContext.gauge(() -> getTotalLogsSize(), true, "bytes", scope, "replay", "remaining"); applyingBufferedOpsMeter = solrMetricsContext.meter("ops", scope, "applyingBuffered"); replayOpsMeter = solrMetricsContext.meter("ops", scope, "replay"); copyOverOldUpdatesMeter = solrMetricsContext.meter("ops", scope, "copyOverOldUpdates"); solrMetricsContext.gauge(() -> state.getValue(), true, "state", scope); } @Override public SolrMetricsContext getSolrMetricsContext() { return solrMetricsContext; } /** * Returns a new {@link org.apache.solr.update.TransactionLog}. Sub-classes can override this method to * change the implementation of the transaction log. */ public TransactionLog newTransactionLog(File tlogFile, Collection<String> globalStrings, boolean openExisting) { return new TransactionLog(tlogFile, globalStrings, openExisting); } public String getLogDir() { return tlogDir.getAbsolutePath(); } public List<Long> getStartingVersions() { return startingVersions; } public boolean existOldBufferLog() { return existOldBufferLog; } /* Takes over ownership of the log, keeping it until no longer needed and then decrementing its reference and dropping it. */ protected synchronized void addOldLog(TransactionLog oldLog, boolean removeOld) { if (oldLog == null) return; numOldRecords += oldLog.numRecords(); int currRecords = numOldRecords; if (oldLog != tlog && tlog != null) { currRecords += tlog.numRecords(); } while (removeOld && logs.size() > 0) { TransactionLog log = logs.peekLast(); int nrec = log.numRecords(); // remove oldest log if we don't need it to keep at least numRecordsToKeep, or if // we already have the limit of 10 log files. if (currRecords - nrec >= numRecordsToKeep || (maxNumLogsToKeep > 0 && logs.size() >= maxNumLogsToKeep)) { currRecords -= nrec; numOldRecords -= nrec; logs.removeLast().decref(); // dereference so it will be deleted when no longer in use continue; } break; } // don't incref... we are taking ownership from the caller. logs.addFirst(oldLog); } public String[] getBufferLogList(File directory) { final String prefix = BUFFER_TLOG_NAME+'.'; return directory.list((dir, name) -> name.startsWith(prefix)); } /** * Does update from old tlogs (not from buffer tlog)? * If yes we must skip writing {@code cmd} to current tlog */ private boolean updateFromOldTlogs(UpdateCommand cmd) { return (cmd.getFlags() & UpdateCommand.REPLAY) != 0 && state == State.REPLAYING; } public String[] getLogList(File directory) { final String prefix = TLOG_NAME+'.'; String[] names = directory.list(new FilenameFilter() { @Override public boolean accept(File dir, String name) { return name.startsWith(prefix); } }); if (names == null) { throw new RuntimeException(new FileNotFoundException(directory.getAbsolutePath())); } Arrays.sort(names); return names; } public long getLastLogId() { if (id != -1) return id; if (tlogFiles.length == 0) return -1; String last = tlogFiles[tlogFiles.length-1]; return Long.parseLong(last.substring(TLOG_NAME.length() + 1)); } public void add(AddUpdateCommand cmd) { add(cmd, false); } public void add(AddUpdateCommand cmd, boolean clearCaches) { // don't log if we are replaying from another log // TODO: we currently need to log to maintain correct versioning, rtg, etc // if ((cmd.getFlags() & UpdateCommand.REPLAY) != 0) return; synchronized (this) { if ((cmd.getFlags() & UpdateCommand.BUFFERING) != 0) { ensureBufferTlog(); bufferTlog.write(cmd); return; } long pos = -1; long prevPointer = getPrevPointerForUpdate(cmd); // don't log if we are replaying from another log if (!updateFromOldTlogs(cmd)) { ensureLog(); pos = tlog.write(cmd, prevPointer); } if (!clearCaches) { // TODO: in the future we could support a real position for a REPLAY update. // Only currently would be useful for RTG while in recovery mode though. LogPtr ptr = new LogPtr(pos, cmd.getVersion(), prevPointer); map.put(cmd.getIndexedId(), ptr); if (trace) { log.trace("TLOG: added id {} to {} {} map={}", cmd.getPrintableId(), tlog, ptr, System.identityHashCode(map)); } } else { openRealtimeSearcher(); if (log.isTraceEnabled()) { log.trace("TLOG: added id {} to {} clearCaches=true", cmd.getPrintableId(), tlog); } } } } /** * @return If cmd is an in-place update, then returns the pointer (in the tlog) of the previous * update that the given update depends on. * Returns -1 if this is not an in-place update, or if we can't find a previous entry in * the tlog. Upon receiving a -1, it should be clear why it was -1: if the command's * flags|UpdateLog.UPDATE_INPLACE is set, then this command is an in-place update whose * previous update is in the index and not in the tlog; if that flag is not set, it is * not an in-place update at all, and don't bother about the prevPointer value at * all (which is -1 as a dummy value).) */ private synchronized long getPrevPointerForUpdate(AddUpdateCommand cmd) { // note: sync required to ensure maps aren't changed out form under us if (cmd.isInPlaceUpdate()) { BytesRef indexedId = cmd.getIndexedId(); for (Map<BytesRef, LogPtr> currentMap : Arrays.asList(map, prevMap, prevMap2)) { if (currentMap != null) { LogPtr prevEntry = currentMap.get(indexedId); if (null != prevEntry) { return prevEntry.pointer; } } } } return -1; } public void delete(DeleteUpdateCommand cmd) { BytesRef br = cmd.getIndexedId(); synchronized (this) { if ((cmd.getFlags() & UpdateCommand.BUFFERING) != 0) { ensureBufferTlog(); bufferTlog.writeDelete(cmd); return; } long pos = -1; if (!updateFromOldTlogs(cmd)) { ensureLog(); pos = tlog.writeDelete(cmd); } LogPtr ptr = new LogPtr(pos, cmd.version); map.put(br, ptr); oldDeletes.put(br, ptr); if (trace) { log.trace("TLOG: added delete for id {} to {} {} map={}", cmd.id, tlog, ptr, System.identityHashCode(map)); } } } public void deleteByQuery(DeleteUpdateCommand cmd) { synchronized (this) { if ((cmd.getFlags() & UpdateCommand.BUFFERING) != 0) { ensureBufferTlog(); bufferTlog.writeDeleteByQuery(cmd); return; } long pos = -1; if (!updateFromOldTlogs(cmd)) { ensureLog(); pos = tlog.writeDeleteByQuery(cmd); } // skip purge our caches in case of tlog replica if ((cmd.getFlags() & UpdateCommand.IGNORE_INDEXWRITER) == 0) { // given that we just did a delete-by-query, we don't know what documents were // affected and hence we must purge our caches. openRealtimeSearcher(); trackDeleteByQuery(cmd.getQuery(), cmd.getVersion()); if (trace) { LogPtr ptr = new LogPtr(pos, cmd.getVersion()); int hash = System.identityHashCode(map); log.trace("TLOG: added deleteByQuery {} to {} {} map = {}.", cmd.query, tlog, ptr, hash); } } } } /** Opens a new realtime searcher and clears the id caches. * This may also be called when we updates are being buffered (from PeerSync/IndexFingerprint) */ public void openRealtimeSearcher() { synchronized (this) { // We must cause a new IndexReader to be opened before anything looks at these caches again // so that a cache miss will read fresh data. try { RefCounted<SolrIndexSearcher> holder = uhandler.core.openNewSearcher(true, true); holder.decref(); } catch (Exception e) { SolrException.log(log, "Error opening realtime searcher", e); return; } if (map != null) map.clear(); if (prevMap != null) prevMap.clear(); if (prevMap2 != null) prevMap2.clear(); } } /** currently for testing only */ public void deleteAll() { synchronized (this) { try { RefCounted<SolrIndexSearcher> holder = uhandler.core.openNewSearcher(true, true); holder.decref(); } catch (Exception e) { SolrException.log(log, "Error opening realtime searcher for deleteByQuery", e); } if (map != null) map.clear(); if (prevMap != null) prevMap.clear(); if (prevMap2 != null) prevMap2.clear(); oldDeletes.clear(); deleteByQueries.clear(); } } void trackDeleteByQuery(String q, long version) { version = Math.abs(version); DBQ dbq = new DBQ(); dbq.q = q; dbq.version = version; synchronized (this) { if (deleteByQueries.isEmpty() || deleteByQueries.getFirst().version < version) { // common non-reordered case deleteByQueries.addFirst(dbq); } else { // find correct insertion point ListIterator<DBQ> iter = deleteByQueries.listIterator(); iter.next(); // we already checked the first element in the previous "if" clause while (iter.hasNext()) { DBQ oldDBQ = iter.next(); if (oldDBQ.version < version) { iter.previous(); break; } else if (oldDBQ.version == version && oldDBQ.q.equals(q)) { // a duplicate return; } } iter.add(dbq); // this also handles the case of adding at the end when hasNext() == false } if (deleteByQueries.size() > numDeletesByQueryToKeep) { deleteByQueries.removeLast(); } } } public List<DBQ> getDBQNewer(long version) { synchronized (this) { if (deleteByQueries.isEmpty() || deleteByQueries.getFirst().version < version) { // fast common case return null; } List<DBQ> dbqList = new ArrayList<>(); for (DBQ dbq : deleteByQueries) { if (dbq.version <= version) break; dbqList.add(dbq); } return dbqList; } } protected void newMap() { prevMap2 = prevMap; prevMapLog2 = prevMapLog; prevMap = map; prevMapLog = tlog; map = new HashMap<>(); } private void clearOldMaps() { prevMap = null; prevMap2 = null; } public boolean hasUncommittedChanges() { return tlog != null; } public void preCommit(CommitUpdateCommand cmd) { synchronized (this) { if (debug) { log.debug("TLOG: preCommit"); } if (getState() != State.ACTIVE && (cmd.getFlags() & UpdateCommand.REPLAY) == 0) { // if we aren't in the active state, and this isn't a replay // from the recovery process, then we shouldn't mess with // the current transaction log. This normally shouldn't happen // as DistributedUpdateProcessor will prevent this. Commits // that don't use the processor are possible though. return; } // since we're changing the log, we must change the map. newMap(); if (prevTlog != null) { globalStrings = prevTlog.getGlobalStrings(); } // since document additions can happen concurrently with commit, create // a new transaction log first so that we know the old one is definitely // in the index. if (prevTlog != null) { // postCommit for prevTlog is not called, may be the index is corrupted // if we override prevTlog value, the correspond tlog will be leaked, close it first postCommit(cmd); } prevTlog = tlog; tlog = null; id++; } } public void postCommit(CommitUpdateCommand cmd) { synchronized (this) { if (debug) { log.debug("TLOG: postCommit"); } if (prevTlog != null) { // if we made it through the commit, write a commit command to the log // TODO: check that this works to cap a tlog we were using to buffer so we don't replay on startup. prevTlog.writeCommit(cmd); addOldLog(prevTlog, true); // the old log list will decref when no longer needed // prevTlog.decref(); prevTlog = null; } } } public void preSoftCommit(CommitUpdateCommand cmd) { debug = log.isDebugEnabled(); // refresh our view of debugging occasionally trace = log.isTraceEnabled(); synchronized (this) { if (!cmd.softCommit) return; // already handled this at the start of the hard commit newMap(); // start adding documents to a new map since we won't know if // any added documents will make it into this commit or not. // But we do know that any updates already added will definitely // show up in the latest reader after the commit succeeds. map = new HashMap<>(); if (debug) { log.debug("TLOG: preSoftCommit: prevMap={} new map={}", System.identityHashCode(prevMap), System.identityHashCode(map)); } } } public void postSoftCommit(CommitUpdateCommand cmd) { synchronized (this) { // We can clear out all old maps now that a new searcher has been opened. // This currently only works since DUH2 synchronizes around preCommit to avoid // it being called in the middle of a preSoftCommit, postSoftCommit sequence. // If this DUH2 synchronization were to be removed, preSoftCommit should // record what old maps were created and only remove those. if (debug) { SolrCore.verbose("TLOG: postSoftCommit: disposing of prevMap="+ System.identityHashCode(prevMap) + ", prevMap2=" + System.identityHashCode(prevMap2)); } clearOldMaps(); } } /** * Goes over backwards, following the prevPointer, to merge all partial updates into the passed doc. Stops at either a full * document, or if there are no previous entries to follow in the update log. * * @param id Binary representation of the unique key field * @param prevPointer Pointer to the previous entry in the ulog, based on which the current in-place update was made. * @param prevVersion Version of the previous entry in the ulog, based on which the current in-place update was made. * @param onlyTheseFields When a non-null set of field names is passed in, the resolve process only attempts to populate * the given fields in this set. When this set is null, it resolves all fields. * @param latestPartialDoc Partial document that is to be populated * @return Returns 0 if a full document was found in the log, -1 if no full document was found. If full document was supposed * to be found in the tlogs, but couldn't be found (because the logs were rotated) then the prevPointer is returned. */ @SuppressWarnings({"unchecked"}) synchronized public long applyPartialUpdates(BytesRef id, long prevPointer, long prevVersion, Set<String> onlyTheseFields, @SuppressWarnings({"rawtypes"})SolrDocumentBase latestPartialDoc) { SolrInputDocument partialUpdateDoc = null; List<TransactionLog> lookupLogs = Arrays.asList(tlog, prevMapLog, prevMapLog2); while (prevPointer >= 0) { //go through each partial update and apply it on the incoming doc one after another @SuppressWarnings({"rawtypes"}) List entry; entry = getEntryFromTLog(prevPointer, prevVersion, lookupLogs); if (entry == null) { return prevPointer; // a previous update was supposed to be found, but wasn't found (due to log rotation) } int flags = (int) entry.get(UpdateLog.FLAGS_IDX); // since updates can depend only upon ADD updates or other UPDATE_INPLACE updates, we assert that we aren't // getting something else if ((flags & UpdateLog.ADD) != UpdateLog.ADD && (flags & UpdateLog.UPDATE_INPLACE) != UpdateLog.UPDATE_INPLACE) { throw new SolrException(ErrorCode.INVALID_STATE, entry + " should've been either ADD or UPDATE_INPLACE update" + ", while looking for id=" + new String(id.bytes, Charset.forName("UTF-8"))); } // if this is an ADD (i.e. full document update), stop here if ((flags & UpdateLog.ADD) == UpdateLog.ADD) { partialUpdateDoc = (SolrInputDocument) entry.get(entry.size() - 1); applyOlderUpdates(latestPartialDoc, partialUpdateDoc, onlyTheseFields); return 0; // Full document was found in the tlog itself } if (entry.size() < 5) { throw new SolrException(ErrorCode.INVALID_STATE, entry + " is not a partial doc" + ", while looking for id=" + new String(id.bytes, Charset.forName("UTF-8"))); } // This update is an inplace update, get the partial doc. The input doc is always at last position. partialUpdateDoc = (SolrInputDocument) entry.get(entry.size() - 1); applyOlderUpdates(latestPartialDoc, partialUpdateDoc, onlyTheseFields); prevPointer = (long) entry.get(UpdateLog.PREV_POINTER_IDX); prevVersion = (long) entry.get(UpdateLog.PREV_VERSION_IDX); if (onlyTheseFields != null && latestPartialDoc.keySet().containsAll(onlyTheseFields)) { return 0; // all the onlyTheseFields have been resolved, safe to abort now. } } return -1; // last full document is not supposed to be in tlogs, but it must be in the index } /** * Add all fields from olderDoc into newerDoc if not already present in newerDoc */ private void applyOlderUpdates(@SuppressWarnings({"rawtypes"})SolrDocumentBase newerDoc, SolrInputDocument olderDoc, Set<String> mergeFields) { for (String fieldName : olderDoc.getFieldNames()) { // if the newerDoc has this field, then this field from olderDoc can be ignored if (!newerDoc.containsKey(fieldName) && (mergeFields == null || mergeFields.contains(fieldName))) { for (Object val : olderDoc.getFieldValues(fieldName)) { newerDoc.addField(fieldName, val); } } } } /*** * Get the entry that has the given lookupVersion in the given lookupLogs at the lookupPointer position. * * @return The entry if found, otherwise null */ @SuppressWarnings({"rawtypes"}) private synchronized List getEntryFromTLog(long lookupPointer, long lookupVersion, List<TransactionLog> lookupLogs) { for (TransactionLog lookupLog : lookupLogs) { if (lookupLog != null && lookupLog.getLogSize() > lookupPointer) { lookupLog.incref(); try { Object obj = null; try { obj = lookupLog.lookup(lookupPointer); } catch (Exception | Error ex) { // This can happen when trying to deserialize the entry at position lookupPointer, // but from a different tlog than the one containing the desired entry. // Just ignore the exception, so as to proceed to the next tlog. log.debug("Exception reading the log (this is expected, don't worry)={}, for version={}. This can be ignored" , lookupLog, lookupVersion); } if (obj != null && obj instanceof List) { List tmpEntry = (List) obj; if (tmpEntry.size() >= 2 && (tmpEntry.get(UpdateLog.VERSION_IDX) instanceof Long) && ((Long) tmpEntry.get(UpdateLog.VERSION_IDX)).equals(lookupVersion)) { return tmpEntry; } } } finally { lookupLog.decref(); } } } return null; } public Object lookup(BytesRef indexedId) { LogPtr entry; TransactionLog lookupLog; synchronized (this) { entry = map.get(indexedId); lookupLog = tlog; // something found in "map" will always be in "tlog" // SolrCore.verbose("TLOG: lookup: for id ",indexedId.utf8ToString(),"in map",System.identityHashCode(map),"got",entry,"lookupLog=",lookupLog); if (entry == null && prevMap != null) { entry = prevMap.get(indexedId); // something found in prevMap will always be found in prevMapLog (which could be tlog or prevTlog) lookupLog = prevMapLog; // SolrCore.verbose("TLOG: lookup: for id ",indexedId.utf8ToString(),"in prevMap",System.identityHashCode(map),"got",entry,"lookupLog=",lookupLog); } if (entry == null && prevMap2 != null) { entry = prevMap2.get(indexedId); // something found in prevMap2 will always be found in prevMapLog2 (which could be tlog or prevTlog) lookupLog = prevMapLog2; // SolrCore.verbose("TLOG: lookup: for id ",indexedId.utf8ToString(),"in prevMap2",System.identityHashCode(map),"got",entry,"lookupLog=",lookupLog); } if (entry == null) { return null; } lookupLog.incref(); } try { // now do the lookup outside of the sync block for concurrency return lookupLog.lookup(entry.pointer); } finally { lookupLog.decref(); } } // This method works like realtime-get... it only guarantees to return the latest // version of the *completed* update. There can be updates in progress concurrently // that have already grabbed higher version numbers. Higher level coordination or // synchronization is needed for stronger guarantees (as VersionUpdateProcessor does). public Long lookupVersion(BytesRef indexedId) { LogPtr entry; TransactionLog lookupLog; synchronized (this) { entry = map.get(indexedId); lookupLog = tlog; // something found in "map" will always be in "tlog" // SolrCore.verbose("TLOG: lookup ver: for id ",indexedId.utf8ToString(),"in map",System.identityHashCode(map),"got",entry,"lookupLog=",lookupLog); if (entry == null && prevMap != null) { entry = prevMap.get(indexedId); // something found in prevMap will always be found in prevMapLog (which could be tlog or prevTlog) lookupLog = prevMapLog; // SolrCore.verbose("TLOG: lookup ver: for id ",indexedId.utf8ToString(),"in prevMap",System.identityHashCode(map),"got",entry,"lookupLog=",lookupLog); } if (entry == null && prevMap2 != null) { entry = prevMap2.get(indexedId); // something found in prevMap2 will always be found in prevMapLog2 (which could be tlog or prevTlog) lookupLog = prevMapLog2; // SolrCore.verbose("TLOG: lookup ver: for id ",indexedId.utf8ToString(),"in prevMap2",System.identityHashCode(map),"got",entry,"lookupLog=",lookupLog); } } if (entry != null) { return entry.version; } // Now check real index Long version = versionInfo.getVersionFromIndex(indexedId); if (version != null) { return version; } // We can't get any version info for deletes from the index, so if the doc // wasn't found, check a cache of recent deletes. synchronized (this) { entry = oldDeletes.get(indexedId); } if (entry != null) { return entry.version; } return null; } public void finish(SyncLevel syncLevel) { if (syncLevel == null) { syncLevel = defaultSyncLevel; } if (syncLevel == SyncLevel.NONE) { return; } TransactionLog currLog; synchronized (this) { currLog = tlog; if (currLog == null) return; currLog.incref(); } try { currLog.finish(syncLevel); } finally { currLog.decref(); } } public Future<RecoveryInfo> recoverFromLog() { recoveryInfo = new RecoveryInfo(); List<TransactionLog> recoverLogs = new ArrayList<>(1); for (TransactionLog ll : newestLogsOnStartup) { if (!ll.try_incref()) continue; try { if (ll.endsWithCommit()) { ll.closeOutput(); ll.decref(); continue; } } catch (IOException e) { log.error("Error inspecting tlog {}", ll, e); ll.closeOutput(); ll.decref(); continue; } recoverLogs.add(ll); } if (recoverLogs.isEmpty()) return null; ExecutorCompletionService<RecoveryInfo> cs = new ExecutorCompletionService<>(recoveryExecutor); LogReplayer replayer = new LogReplayer(recoverLogs, false); versionInfo.blockUpdates(); try { state = State.REPLAYING; // The deleteByQueries and oldDeletes lists // would've been populated by items from the logs themselves (which we // will replay now). So lets clear them out here before the replay. deleteByQueries.clear(); oldDeletes.clear(); } finally { versionInfo.unblockUpdates(); } // At this point, we are guaranteed that any new updates coming in will see the state as "replaying" return cs.submit(replayer, recoveryInfo); } /** * Replay current tlog, so all updates will be written to index. * This is must do task for a tlog replica become a new leader. * @return future of this task */ public Future<RecoveryInfo> recoverFromCurrentLog() { if (tlog == null) { return null; } map.clear(); recoveryInfo = new RecoveryInfo(); tlog.incref(); ExecutorCompletionService<RecoveryInfo> cs = new ExecutorCompletionService<>(recoveryExecutor); LogReplayer replayer = new LogReplayer(Collections.singletonList(tlog), false, true); versionInfo.blockUpdates(); try { state = State.REPLAYING; } finally { versionInfo.unblockUpdates(); } return cs.submit(replayer, recoveryInfo); } /** * Block updates, append a commit at current tlog, * then copy over buffer updates to new tlog and bring back ulog to active state. * So any updates which hasn't made it to the index is preserved in the current tlog, * this also make RTG work * @param cuc any updates that have version larger than the version of cuc will be copied over */ public void copyOverBufferingUpdates(CommitUpdateCommand cuc) { versionInfo.blockUpdates(); try { synchronized (this) { state = State.ACTIVE; if (bufferTlog == null) { return; } // by calling this, we won't switch to new tlog (compared to applyBufferedUpdates()) // if we switch to new tlog we can possible lose updates on the next fetch copyOverOldUpdates(cuc.getVersion(), bufferTlog); dropBufferTlog(); } } finally { versionInfo.unblockUpdates(); } } /** * Block updates, append a commit at current tlog, then copy over updates to a new tlog. * So any updates which hasn't made it to the index is preserved in the current tlog * @param cuc any updates that have version larger than the version of cuc will be copied over */ public void commitAndSwitchToNewTlog(CommitUpdateCommand cuc) { versionInfo.blockUpdates(); try { synchronized (this) { if (tlog == null) { return; } preCommit(cuc); try { copyOverOldUpdates(cuc.getVersion()); } finally { postCommit(cuc); } } } finally { versionInfo.unblockUpdates(); } } public void copyOverOldUpdates(long commitVersion) { TransactionLog oldTlog = prevTlog; if (oldTlog == null && !logs.isEmpty()) { oldTlog = logs.getFirst(); } if (oldTlog == null || oldTlog.refcount.get() == 0) { return; } try { if (oldTlog.endsWithCommit()) return; } catch (IOException e) { log.warn("Exception reading log", e); return; } copyOverOldUpdates(commitVersion, oldTlog); } /** * Copy over updates from prevTlog or last tlog (in tlog folder) to a new tlog * @param commitVersion any updates that have version larger than the commitVersion will be copied over */ public void copyOverOldUpdates(long commitVersion, TransactionLog oldTlog) { copyOverOldUpdatesMeter.mark(); SolrQueryRequest req = new LocalSolrQueryRequest(uhandler.core, new ModifiableSolrParams()); TransactionLog.LogReader logReader = oldTlog.getReader(0); Object o = null; try { while ( (o = logReader.next()) != null ) { try { @SuppressWarnings({"rawtypes"}) List entry = (List)o; int operationAndFlags = (Integer) entry.get(0); int oper = operationAndFlags & OPERATION_MASK; long version = (Long) entry.get(1); if (Math.abs(version) > commitVersion) { switch (oper) { case UpdateLog.UPDATE_INPLACE: case UpdateLog.ADD: { AddUpdateCommand cmd = convertTlogEntryToAddUpdateCommand(req, entry, oper, version); cmd.setFlags(UpdateCommand.IGNORE_AUTOCOMMIT); add(cmd); break; } case UpdateLog.DELETE: { byte[] idBytes = (byte[]) entry.get(2); DeleteUpdateCommand cmd = new DeleteUpdateCommand(req); cmd.setIndexedId(new BytesRef(idBytes)); cmd.setVersion(version); cmd.setFlags(UpdateCommand.IGNORE_AUTOCOMMIT); delete(cmd); break; } case UpdateLog.DELETE_BY_QUERY: { String query = (String) entry.get(2); DeleteUpdateCommand cmd = new DeleteUpdateCommand(req); cmd.query = query; cmd.setVersion(version); cmd.setFlags(UpdateCommand.IGNORE_AUTOCOMMIT); deleteByQuery(cmd); break; } default: throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown Operation! " + oper); } } } catch (ClassCastException e) { log.warn("Unexpected log entry or corrupt log. Entry={}", o, e); } } // Prev tlog will be closed, so nullify prevMap if (prevTlog == oldTlog) { prevMap = null; } } catch (IOException e) { log.error("Exception reading versions from log",e); } catch (InterruptedException e) { log.warn("Exception reading log", e); } finally { if (logReader != null) logReader.close(); } } protected void ensureBufferTlog() { if (bufferTlog != null) return; String newLogName = String.format(Locale.ROOT, LOG_FILENAME_PATTERN, BUFFER_TLOG_NAME, System.nanoTime()); bufferTlog = newTransactionLog(new File(tlogDir, newLogName), globalStrings, false); bufferTlog.isBuffer = true; } // Cleanup old buffer tlogs protected void deleteBufferLogs() { String[] oldBufferTlog = getBufferLogList(tlogDir); if (oldBufferTlog != null && oldBufferTlog.length != 0) { for (String oldBufferLogName : oldBufferTlog) { deleteFile(new File(tlogDir, oldBufferLogName)); } } } protected void ensureLog() { if (tlog == null) { String newLogName = String.format(Locale.ROOT, LOG_FILENAME_PATTERN, TLOG_NAME, id); tlog = newTransactionLog(new File(tlogDir, newLogName), globalStrings, false); } } private void doClose(TransactionLog theLog, boolean writeCommit) { if (theLog != null) { if (writeCommit) { // record a commit log.info("Recording current closed for {} log={}", uhandler.core, theLog); CommitUpdateCommand cmd = new CommitUpdateCommand(new LocalSolrQueryRequest(uhandler.core, new ModifiableSolrParams((SolrParams)null)), false); theLog.writeCommit(cmd); } theLog.deleteOnClose = false; theLog.decref(); theLog.forceClose(); } } public void close(boolean committed) { close(committed, false); } public void close(boolean committed, boolean deleteOnClose) { recoveryExecutor.shutdown(); // no new tasks synchronized (this) { // Don't delete the old tlogs, we want to be able to replay from them and retrieve old versions doClose(prevTlog, committed); doClose(tlog, committed); for (TransactionLog log : logs) { if (log == prevTlog || log == tlog) continue; log.deleteOnClose = false; log.decref(); log.forceClose(); } if (bufferTlog != null) { // should not delete bufferTlog on close, existing bufferTlog is a sign for skip peerSync bufferTlog.deleteOnClose = false; bufferTlog.decref(); bufferTlog.forceClose(); } } try { ExecutorUtil.shutdownAndAwaitTermination(recoveryExecutor); } catch (Exception e) { SolrException.log(log, e); } } static class Update { TransactionLog log; long version; long previousVersion; // for in-place updates long pointer; } static class DeleteUpdate { long version; byte[] id; public DeleteUpdate(long version, byte[] id) { this.version = version; this.id = id; } } public class RecentUpdates implements Closeable { final Deque<TransactionLog> logList; // newest first List<List<Update>> updateList; HashMap<Long, Update> updates; List<Update> deleteByQueryList; List<DeleteUpdate> deleteList; Set<Long> bufferUpdates = new HashSet<>(); public RecentUpdates(Deque<TransactionLog> logList) { this.logList = logList; boolean success = false; try { update(); success = true; } finally { // defensive: if some unknown exception is thrown, // make sure we close so that the tlogs are decref'd if (!success) { close(); } } } public List<Long> getVersions(int n){ return getVersions(n, Long.MAX_VALUE); } public Set<Long> getBufferUpdates() { return Collections.unmodifiableSet(bufferUpdates); } public List<Long> getVersions(int n, long maxVersion) { List<Long> ret = new ArrayList<>(n); for (List<Update> singleList : updateList) { for (Update ptr : singleList) { if(Math.abs(ptr.version) > Math.abs(maxVersion)) continue; ret.add(ptr.version); if (--n <= 0) return ret; } } return ret; } public Object lookup(long version) { Update update = updates.get(version); if (update == null) return null; return update.log.lookup(update.pointer); } /** Returns the list of deleteByQueries that happened after the given version */ public List<Object> getDeleteByQuery(long afterVersion) { List<Object> result = new ArrayList<>(deleteByQueryList.size()); for (Update update : deleteByQueryList) { if (Math.abs(update.version) > afterVersion) { Object dbq = update.log.lookup(update.pointer); result.add(dbq); } } return result; } private void update() { int numUpdates = 0; updateList = new ArrayList<>(logList.size()); deleteByQueryList = new ArrayList<>(); deleteList = new ArrayList<>(); updates = new HashMap<>(numRecordsToKeep); for (TransactionLog oldLog : logList) { List<Update> updatesForLog = new ArrayList<>(); TransactionLog.ReverseReader reader = null; try { reader = oldLog.getReverseReader(); while (numUpdates < numRecordsToKeep) { Object o = null; try { o = reader.next(); if (o==null) break; // should currently be a List<Oper,Ver,Doc/Id> @SuppressWarnings({"rawtypes"}) List entry = (List)o; // TODO: refactor this out so we get common error handling int opAndFlags = (Integer)entry.get(UpdateLog.FLAGS_IDX); int oper = opAndFlags & UpdateLog.OPERATION_MASK; long version = (Long) entry.get(UpdateLog.VERSION_IDX); if (oldLog.isBuffer) bufferUpdates.add(version); switch (oper) { case UpdateLog.ADD: case UpdateLog.UPDATE_INPLACE: case UpdateLog.DELETE: case UpdateLog.DELETE_BY_QUERY: Update update = new Update(); update.log = oldLog; update.pointer = reader.position(); update.version = version; if (oper == UpdateLog.UPDATE_INPLACE) { if ((update.log instanceof CdcrTransactionLog && entry.size() == 6) || (!(update.log instanceof CdcrTransactionLog) && entry.size() == 5)) { update.previousVersion = (Long) entry.get(UpdateLog.PREV_VERSION_IDX); } } updatesForLog.add(update); updates.put(version, update); if (oper == UpdateLog.DELETE_BY_QUERY) { deleteByQueryList.add(update); } else if (oper == UpdateLog.DELETE) { deleteList.add(new DeleteUpdate(version, (byte[])entry.get(2))); } break; case UpdateLog.COMMIT: break; default: throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown Operation! " + oper); } } catch (ClassCastException cl) { log.warn("Unexpected log entry or corrupt log. Entry={}", o, cl); // would be caused by a corrupt transaction log } catch (Exception ex) { log.warn("Exception reverse reading log", ex); break; } numUpdates++; } } catch (IOException | AssertionError e) { // catch AssertionError to handle certain test failures correctly // failure to read a log record isn't fatal log.error("Exception reading versions from log",e); } finally { if (reader != null) reader.close(); } updateList.add(updatesForLog); } } @Override public void close() { for (TransactionLog log : logList) { log.decref(); } } public long getMaxRecentVersion() { long maxRecentVersion = 0L; if (updates != null) { for (Long key : updates.keySet()) maxRecentVersion = Math.max(maxRecentVersion, Math.abs(key.longValue())); } return maxRecentVersion; } } /** The RecentUpdates object returned must be closed after use */ public RecentUpdates getRecentUpdates() { Deque<TransactionLog> logList; synchronized (this) { logList = new LinkedList<>(logs); for (TransactionLog log : logList) { log.incref(); } if (prevTlog != null) { prevTlog.incref(); logList.addFirst(prevTlog); } if (tlog != null) { tlog.incref(); logList.addFirst(tlog); } if (bufferTlog != null) { bufferTlog.incref(); logList.addFirst(bufferTlog); } } // TODO: what if I hand out a list of updates, then do an update, then hand out another list (and // one of the updates I originally handed out fell off the list). Over-request? return new RecentUpdates(logList); } public void bufferUpdates() { // recovery trips this assert under some race - even when // it checks the state first // assert state == State.ACTIVE; // block all updates to eliminate race conditions // reading state and acting on it in the distributed update processor versionInfo.blockUpdates(); try { if (state != State.ACTIVE && state != State.BUFFERING) { // we don't currently have support for handling other states log.warn("Unexpected state for bufferUpdates: {}, Ignoring request", state); return; } dropBufferTlog(); deleteBufferLogs(); recoveryInfo = new RecoveryInfo(); if (log.isInfoEnabled()) { log.info("Starting to buffer updates. {}", this); } state = State.BUFFERING; } finally { versionInfo.unblockUpdates(); } } /** Returns true if we were able to drop buffered updates and return to the ACTIVE state */ public boolean dropBufferedUpdates() { versionInfo.blockUpdates(); try { if (state != State.BUFFERING) return false; if (log.isInfoEnabled()) { log.info("Dropping buffered updates {}", this); } dropBufferTlog(); state = State.ACTIVE; } finally { versionInfo.unblockUpdates(); } return true; } private void dropBufferTlog() { synchronized (this) { if (bufferTlog != null) { bufferTlog.decref(); bufferTlog = null; } } } /** Returns the Future to wait on, or null if no replay was needed */ public Future<RecoveryInfo> applyBufferedUpdates() { // recovery trips this assert under some race - even when // it checks the state first // assert state == State.BUFFERING; // block all updates to eliminate race conditions // reading state and acting on it in the update processor versionInfo.blockUpdates(); try { cancelApplyBufferUpdate = false; if (state != State.BUFFERING) return null; synchronized (this) { // handle case when no updates were received. if (bufferTlog == null) { state = State.ACTIVE; return null; } bufferTlog.incref(); } state = State.APPLYING_BUFFERED; } finally { versionInfo.unblockUpdates(); } if (recoveryExecutor.isShutdown()) { throw new RuntimeException("executor is not running..."); } ExecutorCompletionService<RecoveryInfo> cs = new ExecutorCompletionService<>(recoveryExecutor); LogReplayer replayer = new LogReplayer(Collections.singletonList(bufferTlog), true); return cs.submit(() -> { replayer.run(); dropBufferTlog(); }, recoveryInfo); } public State getState() { return state; } @Override public String toString() { return "FSUpdateLog{state="+getState()+", tlog="+tlog+"}"; } public static Runnable testing_logReplayHook; // called before each log read public static Runnable testing_logReplayFinishHook; // called when log replay has finished protected RecoveryInfo recoveryInfo; class LogReplayer implements Runnable { private Logger loglog = log; // set to something different? Deque<TransactionLog> translogs; TransactionLog.LogReader tlogReader; boolean activeLog; boolean finishing = false; // state where we lock out other updates and finish those updates that snuck in before we locked boolean debug = loglog.isDebugEnabled(); boolean inSortedOrder; public LogReplayer(List<TransactionLog> translogs, boolean activeLog) { this.translogs = new LinkedList<>(); this.translogs.addAll(translogs); this.activeLog = activeLog; } public LogReplayer(List<TransactionLog> translogs, boolean activeLog, boolean inSortedOrder) { this(translogs, activeLog); this.inSortedOrder = inSortedOrder; } private SolrQueryRequest req; private SolrQueryResponse rsp; @Override public void run() { ModifiableSolrParams params = new ModifiableSolrParams(); params.set(DISTRIB_UPDATE_PARAM, FROMLEADER.toString()); params.set(DistributedUpdateProcessor.LOG_REPLAY, "true"); req = new LocalSolrQueryRequest(uhandler.core, params); rsp = new SolrQueryResponse(); SolrRequestInfo.setRequestInfo(new SolrRequestInfo(req, rsp)); // setting request info will help logging try { for (; ; ) { TransactionLog translog = translogs.pollFirst(); if (translog == null) break; doReplay(translog); } } catch (SolrException e) { if (e.code() == ErrorCode.SERVICE_UNAVAILABLE.code) { SolrException.log(log, e); recoveryInfo.failed = true; } else { recoveryInfo.errors++; SolrException.log(log, e); } } catch (Exception e) { recoveryInfo.errors++; SolrException.log(log, e); } finally { // change the state while updates are still blocked to prevent races state = State.ACTIVE; if (finishing) { // after replay, update the max from the index log.info("Re-computing max version from index after log re-play."); maxVersionFromIndex = null; getMaxVersionFromIndex(); versionInfo.unblockUpdates(); } // clean up in case we hit some unexpected exception and didn't get // to more transaction logs for (TransactionLog translog : translogs) { log.error("ERROR: didn't get to recover from tlog {}", translog); translog.decref(); } } loglog.warn("Log replay finished. recoveryInfo={}", recoveryInfo); if (testing_logReplayFinishHook != null) testing_logReplayFinishHook.run(); SolrRequestInfo.clearRequestInfo(); } public void doReplay(TransactionLog translog) { try { loglog.warn("Starting log replay {} active={} starting pos={} inSortedOrder={}", translog, activeLog, recoveryInfo.positionOfStart, inSortedOrder); long lastStatusTime = System.nanoTime(); if (inSortedOrder) { tlogReader = translog.getSortedReader(recoveryInfo.positionOfStart); } else { tlogReader = translog.getReader(recoveryInfo.positionOfStart); } // NOTE: we don't currently handle a core reload during recovery. This would cause the core // to change underneath us. UpdateRequestProcessorChain processorChain = req.getCore().getUpdateProcessingChain(null); UpdateRequestProcessor proc = processorChain.createProcessor(req, rsp); OrderedExecutor executor = inSortedOrder ? null : req.getCore().getCoreContainer().getReplayUpdatesExecutor(); AtomicInteger pendingTasks = new AtomicInteger(0); AtomicReference<SolrException> exceptionOnExecuteUpdate = new AtomicReference<>(); long commitVersion = 0; int operationAndFlags = 0; long nextCount = 0; for (; ; ) { Object o = null; if (cancelApplyBufferUpdate) break; try { if (testing_logReplayHook != null) testing_logReplayHook.run(); if (nextCount++ % 1000 == 0) { long now = System.nanoTime(); if (now - lastStatusTime > STATUS_TIME) { lastStatusTime = now; long cpos = tlogReader.currentPos(); long csize = tlogReader.currentSize(); if (log.isInfoEnabled()) { loglog.info( "log replay status {} active={} starting pos={} current pos={} current size={} % read={}", translog, activeLog, recoveryInfo.positionOfStart, cpos, csize, Math.floor(cpos / (double) csize * 100.)); } } } o = null; o = tlogReader.next(); if (o == null && activeLog) { if (!finishing) { // about to block all the updates including the tasks in the executor // therefore we must wait for them to be finished waitForAllUpdatesGetExecuted(pendingTasks); // from this point, remain updates will be executed in a single thread executor = null; // block to prevent new adds, but don't immediately unlock since // we could be starved from ever completing recovery. Only unlock // after we've finished this recovery. // NOTE: our own updates won't be blocked since the thread holding a write lock can // lock a read lock. versionInfo.blockUpdates(); finishing = true; o = tlogReader.next(); } else { // we had previously blocked updates, so this "null" from the log is final. // Wait until our final commit to change the state and unlock. // This is only so no new updates are written to the current log file, and is // only an issue if we crash before the commit (and we are paying attention // to incomplete log files). // // versionInfo.unblockUpdates(); } } } catch (Exception e) { SolrException.log(log, e); } if (o == null) break; // fail fast if (exceptionOnExecuteUpdate.get() != null) throw exceptionOnExecuteUpdate.get(); try { // should currently be a List<Oper,Ver,Doc/Id> @SuppressWarnings({"rawtypes"}) List entry = (List) o; operationAndFlags = (Integer) entry.get(UpdateLog.FLAGS_IDX); int oper = operationAndFlags & OPERATION_MASK; long version = (Long) entry.get(UpdateLog.VERSION_IDX); switch (oper) { case UpdateLog.UPDATE_INPLACE: // fall through to ADD case UpdateLog.ADD: { recoveryInfo.adds++; AddUpdateCommand cmd = convertTlogEntryToAddUpdateCommand(req, entry, oper, version); cmd.setFlags(UpdateCommand.REPLAY | UpdateCommand.IGNORE_AUTOCOMMIT); if (debug) log.debug("{} {}", oper == ADD ? "add" : "update", cmd); execute(cmd, executor, pendingTasks, proc, exceptionOnExecuteUpdate); break; } case UpdateLog.DELETE: { recoveryInfo.deletes++; byte[] idBytes = (byte[]) entry.get(2); DeleteUpdateCommand cmd = new DeleteUpdateCommand(req); cmd.setIndexedId(new BytesRef(idBytes)); cmd.setVersion(version); cmd.setFlags(UpdateCommand.REPLAY | UpdateCommand.IGNORE_AUTOCOMMIT); if (debug) log.debug("delete {}", cmd); execute(cmd, executor, pendingTasks, proc, exceptionOnExecuteUpdate); break; } case UpdateLog.DELETE_BY_QUERY: { recoveryInfo.deleteByQuery++; String query = (String) entry.get(2); DeleteUpdateCommand cmd = new DeleteUpdateCommand(req); cmd.query = query; cmd.setVersion(version); cmd.setFlags(UpdateCommand.REPLAY | UpdateCommand.IGNORE_AUTOCOMMIT); if (debug) log.debug("deleteByQuery {}", cmd); waitForAllUpdatesGetExecuted(pendingTasks); // DBQ will be executed in the same thread execute(cmd, null, pendingTasks, proc, exceptionOnExecuteUpdate); break; } case UpdateLog.COMMIT: { commitVersion = version; break; } default: throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown Operation! " + oper); } if (rsp.getException() != null) { loglog.error("REPLAY_ERR: Exception replaying log {}", rsp.getException()); throw rsp.getException(); } if (state == State.REPLAYING) { replayOpsMeter.mark(); } else if (state == State.APPLYING_BUFFERED) { applyingBufferedOpsMeter.mark(); } else { // XXX should not happen? } } catch (ClassCastException cl) { recoveryInfo.errors++; loglog.warn("REPLAY_ERR: Unexpected log entry or corrupt log. Entry={}", o, cl); // would be caused by a corrupt transaction log } catch (Exception ex) { recoveryInfo.errors++; loglog.warn("REPLAY_ERR: Exception replaying log", ex); // something wrong with the request? } assert TestInjection.injectUpdateLogReplayRandomPause(); } waitForAllUpdatesGetExecuted(pendingTasks); if (exceptionOnExecuteUpdate.get() != null) throw exceptionOnExecuteUpdate.get(); CommitUpdateCommand cmd = new CommitUpdateCommand(req, false); cmd.setVersion(commitVersion); cmd.softCommit = false; cmd.waitSearcher = true; cmd.setFlags(UpdateCommand.REPLAY); try { if (debug) log.debug("commit {}", cmd); uhandler.commit(cmd); // this should cause a commit to be added to the incomplete log and avoid it being replayed again after a restart. } catch (IOException ex) { recoveryInfo.errors++; loglog.error("Replay exception: final commit.", ex); } if (!activeLog) { // if we are replaying an old tlog file, we need to add a commit to the end // so we don't replay it again if we restart right after. translog.writeCommit(cmd); } try { proc.finish(); } catch (IOException ex) { recoveryInfo.errors++; loglog.error("Replay exception: finish()", ex); } finally { IOUtils.closeQuietly(proc); } } finally { if (tlogReader != null) tlogReader.close(); translog.decref(); } } private void waitForAllUpdatesGetExecuted(AtomicInteger pendingTasks) { TimeOut timeOut = new TimeOut(Integer.MAX_VALUE, TimeUnit.MILLISECONDS, TimeSource.CURRENT_TIME); try { timeOut.waitFor("Timeout waiting for replay updates finish", () -> { //TODO handle the case when there are no progress after a long time return pendingTasks.get() == 0; }); } catch (TimeoutException e) { throw new SolrException(ErrorCode.SERVER_ERROR, e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new SolrException(ErrorCode.SERVER_ERROR, e); } } private Integer getBucketHash(UpdateCommand cmd) { if (cmd instanceof AddUpdateCommand) { BytesRef idBytes = ((AddUpdateCommand)cmd).getIndexedId(); if (idBytes == null) return null; return DistributedUpdateProcessor.bucketHash(idBytes); } if (cmd instanceof DeleteUpdateCommand) { BytesRef idBytes = ((DeleteUpdateCommand)cmd).getIndexedId(); if (idBytes == null) return null; return DistributedUpdateProcessor.bucketHash(idBytes); } return null; } private void execute(UpdateCommand cmd, OrderedExecutor executor, AtomicInteger pendingTasks, UpdateRequestProcessor proc, AtomicReference<SolrException> exceptionHolder) { assert cmd instanceof AddUpdateCommand || cmd instanceof DeleteUpdateCommand; if (executor != null) { // by using the same hash as DUP, independent updates can avoid waiting for same bucket executor.execute(getBucketHash(cmd), () -> { try { // fail fast if (exceptionHolder.get() != null) return; if (cmd instanceof AddUpdateCommand) { proc.processAdd((AddUpdateCommand) cmd); } else { proc.processDelete((DeleteUpdateCommand) cmd); } } catch (IOException e) { recoveryInfo.errors++; loglog.warn("REPLAY_ERR: IOException reading log", e); // could be caused by an incomplete flush if recovering from log } catch (SolrException e) { if (e.code() == ErrorCode.SERVICE_UNAVAILABLE.code) { exceptionHolder.compareAndSet(null, e); return; } recoveryInfo.errors++; loglog.warn("REPLAY_ERR: IOException reading log", e); } finally { pendingTasks.decrementAndGet(); } }); pendingTasks.incrementAndGet(); } else { try { if (cmd instanceof AddUpdateCommand) { proc.processAdd((AddUpdateCommand) cmd); } else { proc.processDelete((DeleteUpdateCommand) cmd); } } catch (IOException e) { recoveryInfo.errors++; loglog.warn("REPLAY_ERR: IOException replaying log", e); // could be caused by an incomplete flush if recovering from log } catch (SolrException e) { if (e.code() == ErrorCode.SERVICE_UNAVAILABLE.code) { throw e; } recoveryInfo.errors++; loglog.warn("REPLAY_ERR: IOException replaying log", e); } } } } /** * Given a entry from the transaction log containing a document, return a new AddUpdateCommand that * can be applied to ADD the document or do an UPDATE_INPLACE. * * @param req The request to use as the owner of the new AddUpdateCommand * @param entry Entry from the transaction log that contains the document to be added * @param operation The value of the operation flag; this must be either ADD or UPDATE_INPLACE -- * if it is UPDATE_INPLACE then the previous version will also be read from the entry * @param version Version already obtained from the entry. */ public static AddUpdateCommand convertTlogEntryToAddUpdateCommand(SolrQueryRequest req, @SuppressWarnings({"rawtypes"})List entry, int operation, long version) { assert operation == UpdateLog.ADD || operation == UpdateLog.UPDATE_INPLACE; SolrInputDocument sdoc = (SolrInputDocument) entry.get(entry.size()-1); AddUpdateCommand cmd = new AddUpdateCommand(req); cmd.solrDoc = sdoc; cmd.setVersion(version); if (operation == UPDATE_INPLACE) { long prevVersion = (Long) entry.get(UpdateLog.PREV_VERSION_IDX); cmd.prevVersion = prevVersion; } return cmd; } ThreadPoolExecutor recoveryExecutor = new ExecutorUtil.MDCAwareThreadPoolExecutor(0, Integer.MAX_VALUE, 1, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), new SolrNamedThreadFactory("recoveryExecutor")); public static void deleteFile(File file) { boolean success = false; try { Files.deleteIfExists(file.toPath()); success = true; } catch (Exception e) { log.error("Error deleting file: {}", file, e); } if (!success) { try { file.deleteOnExit(); } catch (Exception e) { log.error("Error deleting file on exit: {}", file, e); } } } protected String getTlogDir(SolrCore core, PluginInfo info) { String dataDir = (String) info.initArgs.get("dir"); String ulogDir = core.getCoreDescriptor().getUlogDir(); if (ulogDir != null) { dataDir = ulogDir; } if (dataDir == null || dataDir.length() == 0) { dataDir = core.getDataDir(); } return dataDir + "/" + TLOG_NAME; } /** * Clears the logs on the file system. Only call before init. * * @param core the SolrCore * @param ulogPluginInfo the init info for the UpdateHandler */ public void clearLog(SolrCore core, PluginInfo ulogPluginInfo) { if (ulogPluginInfo == null) return; File tlogDir = new File(getTlogDir(core, ulogPluginInfo)); if (tlogDir.exists()) { String[] files = getLogList(tlogDir); for (String file : files) { File f = new File(tlogDir, file); try { Files.delete(f.toPath()); } catch (IOException cause) { // NOTE: still throws SecurityException as before. log.error("Could not remove tlog file:{}", f, cause); } } } } public Long getCurrentMaxVersion() { return maxVersionFromIndex; } // this method is primarily used for unit testing and is not part of the public API for this class Long getMaxVersionFromIndex() { RefCounted<SolrIndexSearcher> newestSearcher = (uhandler != null && uhandler.core != null) ? uhandler.core.getRealtimeSearcher() : null; if (newestSearcher == null) throw new IllegalStateException("No searcher available to lookup max version from index!"); try { seedBucketsWithHighestVersion(newestSearcher.get()); return getCurrentMaxVersion(); } finally { newestSearcher.decref(); } } /** * Used to seed all version buckets with the max value of the version field in the index. */ protected Long seedBucketsWithHighestVersion(SolrIndexSearcher newSearcher, VersionInfo versions) { Long highestVersion = null; final RTimer timer = new RTimer(); try (RecentUpdates recentUpdates = getRecentUpdates()) { long maxVersionFromRecent = recentUpdates.getMaxRecentVersion(); long maxVersionFromIndex = versions.getMaxVersionFromIndex(newSearcher); long maxVersion = Math.max(maxVersionFromIndex, maxVersionFromRecent); if (maxVersion == 0L) { maxVersion = versions.getNewClock(); log.info("Could not find max version in index or recent updates, using new clock {}", maxVersion); } // seed all version buckets with the highest value from recent and index versions.seedBucketsWithHighestVersion(maxVersion); highestVersion = maxVersion; } catch (IOException ioExc) { log.warn("Failed to determine the max value of the version field due to: ", ioExc); } if (debug) { log.debug("Took {}ms to seed version buckets with highest version {}", timer.getTime(), highestVersion); } return highestVersion; } public void seedBucketsWithHighestVersion(SolrIndexSearcher newSearcher) { log.debug("Looking up max value of version field to seed version buckets"); versionInfo.blockUpdates(); try { maxVersionFromIndex = seedBucketsWithHighestVersion(newSearcher, versionInfo); } finally { versionInfo.unblockUpdates(); } } }
/* * Copyright (c) 2019, Adam <Adam@sigterm.info> * Copyright (c) 2017, Robbie <https://github.com/rbbi> * Copyright (c) 2018, SomeoneWithAnInternetConnection * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package net.runelite.client.plugins.grandexchange; import com.google.common.reflect.TypeToken; import com.google.gson.stream.JsonReader; import com.google.inject.Provides; import io.reactivex.schedulers.Schedulers; import java.awt.image.BufferedImage; import java.io.IOException; import java.io.InputStreamReader; import java.util.Map; import java.util.concurrent.ScheduledExecutorService; import javax.inject.Inject; import javax.inject.Singleton; import javax.swing.SwingUtilities; import lombok.AccessLevel; import lombok.Getter; import lombok.Setter; import lombok.extern.slf4j.Slf4j; import net.runelite.api.ChatMessageType; import net.runelite.api.Client; import net.runelite.api.GameState; import net.runelite.api.GrandExchangeOffer; import net.runelite.api.GrandExchangeOfferState; import net.runelite.api.InventoryID; import net.runelite.api.Item; import net.runelite.api.ItemContainer; import net.runelite.api.ItemDefinition; import static net.runelite.api.ItemID.COINS_995; import net.runelite.api.MenuOpcode; import net.runelite.api.Varbits; import net.runelite.api.events.ChatMessage; import net.runelite.api.events.ConfigChanged; import net.runelite.api.events.FocusChanged; import net.runelite.api.events.GameStateChanged; import net.runelite.api.events.GameTick; import net.runelite.api.events.GrandExchangeOfferChanged; import net.runelite.api.events.MenuEntryAdded; import net.runelite.api.events.ScriptCallbackEvent; import net.runelite.api.events.WidgetLoaded; import net.runelite.api.widgets.Widget; import net.runelite.api.widgets.WidgetID; import net.runelite.api.widgets.WidgetInfo; import net.runelite.client.Notifier; import net.runelite.client.account.AccountSession; import net.runelite.client.account.SessionManager; import net.runelite.client.callback.ClientThread; import net.runelite.client.config.ConfigManager; import net.runelite.client.eventbus.EventBus; import net.runelite.client.events.SessionClose; import net.runelite.client.events.SessionOpen; import net.runelite.client.game.ItemManager; import net.runelite.client.input.KeyManager; import net.runelite.client.input.MouseManager; import net.runelite.client.plugins.Plugin; import net.runelite.client.plugins.PluginDescriptor; import net.runelite.client.ui.ClientToolbar; import net.runelite.client.ui.NavigationButton; import net.runelite.client.util.ImageUtil; import net.runelite.client.util.StackFormatter; import net.runelite.api.util.Text; import net.runelite.http.api.RuneLiteAPI; import net.runelite.http.api.ge.GrandExchangeClient; import net.runelite.http.api.ge.GrandExchangeTrade; import net.runelite.http.api.osbuddy.OSBGrandExchangeClient; @PluginDescriptor( name = "Grand Exchange", description = "Provide additional and/or easier access to Grand Exchange information", tags = {"external", "integration", "notifications", "panel", "prices", "trade"} ) @Slf4j @Singleton public class GrandExchangePlugin extends Plugin { static final String SEARCH_GRAND_EXCHANGE = "Search Grand Exchange"; private static final int OFFER_TYPE_BUY = 0; private static final int OFFER_CONTAINER_ITEM = 21; private static final int OFFER_QUANTITY_HEADING = 28; private static final String OFFER_QUANTITY_DEFAULT_HEADING = "Quantity:"; private static final int OFFER_DEFAULT_ITEM_ID = 6512; private static final OSBGrandExchangeClient CLIENT = new OSBGrandExchangeClient(); private static final String OSB_GE_TEXT = "<br>OSBuddy Actively traded price: "; private static final String BUY_LIMIT_GE_TEXT = "<br>Buy limit: "; private static final TypeToken<Map<Integer, Integer>> BUY_LIMIT_TOKEN = new TypeToken<Map<Integer, Integer>>() { }; @Getter(AccessLevel.PACKAGE) private NavigationButton button; @Getter(AccessLevel.PACKAGE) private GrandExchangePanel panel; @Getter(AccessLevel.PACKAGE) @Setter(AccessLevel.PACKAGE) private boolean hotKeyPressed; @Inject private GrandExchangeInputListener inputListener; @Inject private ItemManager itemManager; @Inject private MouseManager mouseManager; @Inject private KeyManager keyManager; @Inject private Client client; @Inject private ClientThread clientThread; @Inject private ClientToolbar clientToolbar; @Inject private GrandExchangeConfig config; @Inject private Notifier notifier; @Inject private ScheduledExecutorService executorService; @Inject private SessionManager sessionManager; @Inject private ConfigManager configManager; @Inject private EventBus eventBus; private Widget grandExchangeText; private Widget grandExchangeItem; private Widget grandExchangeOfferQuantityHeading; private Map<Integer, Integer> itemGELimits; private GrandExchangeClient grandExchangeClient; private boolean quickLookup; private boolean enableNotifications; private boolean enableOsbPrices; private boolean enableGELimits; private boolean enableAfford; private static Map<Integer, Integer> loadGELimits() throws IOException { try (final JsonReader geLimitData = new JsonReader(new InputStreamReader(GrandExchangePlugin.class.getResourceAsStream("ge_limits.json")))) { final Map<Integer, Integer> itemGELimits = RuneLiteAPI.GSON.fromJson(geLimitData, BUY_LIMIT_TOKEN.getType()); log.debug("Loaded {} limits", itemGELimits.size()); return itemGELimits; } } private SavedOffer getOffer(int slot) { String offer = configManager.getConfiguration("geoffer." + client.getUsername().toLowerCase(), Integer.toString(slot)); if (offer == null) { return null; } return RuneLiteAPI.GSON.fromJson(offer, SavedOffer.class); } private void setOffer(int slot, SavedOffer offer) { configManager.setConfiguration("geoffer." + client.getUsername().toLowerCase(), Integer.toString(slot), RuneLiteAPI.GSON.toJson(offer)); } private void deleteOffer(int slot) { configManager.unsetConfiguration("geoffer." + client.getUsername().toLowerCase(), Integer.toString(slot)); } @Provides GrandExchangeConfig provideConfig(ConfigManager configManager) { return configManager.getConfig(GrandExchangeConfig.class); } @Override protected void startUp() throws Exception { updateConfig(); addSubscriptions(); itemGELimits = loadGELimits(); panel = injector.getInstance(GrandExchangePanel.class); panel.setGELimits(itemGELimits); final BufferedImage icon = ImageUtil.getResourceStreamFromClass(getClass(), "ge_icon.png"); button = NavigationButton.builder() .tooltip("Grand Exchange") .icon(icon) .priority(3) .panel(panel) .build(); clientToolbar.addNavigation(button); if (this.quickLookup) { mouseManager.registerMouseListener(inputListener); keyManager.registerKeyListener(inputListener); } AccountSession accountSession = sessionManager.getAccountSession(); if (accountSession != null) { grandExchangeClient = new GrandExchangeClient(accountSession.getUuid()); } } @Override protected void shutDown() { eventBus.unregister(this); clientToolbar.removeNavigation(button); mouseManager.unregisterMouseListener(inputListener); keyManager.unregisterKeyListener(inputListener); grandExchangeText = null; grandExchangeItem = null; grandExchangeOfferQuantityHeading = null; itemGELimits = null; grandExchangeClient = null; } private void addSubscriptions() { eventBus.subscribe(ConfigChanged.class, this, this::onConfigChanged); eventBus.subscribe(GameTick.class, this, this::onGameTick); eventBus.subscribe(ChatMessage.class, this, this::onChatMessage); eventBus.subscribe(SessionOpen.class, this, this::onSessionOpen); eventBus.subscribe(SessionClose.class, this, this::onSessionClose); eventBus.subscribe(GrandExchangeOfferChanged.class, this, this::onGrandExchangeOfferChanged); eventBus.subscribe(GameStateChanged.class, this, this::onGameStateChanged); eventBus.subscribe(MenuEntryAdded.class, this, this::onMenuEntryAdded); eventBus.subscribe(FocusChanged.class, this, this::onFocusChanged); eventBus.subscribe(WidgetLoaded.class, this, this::onWidgetLoaded); eventBus.subscribe(ScriptCallbackEvent.class, this, this::onScriptCallbackEvent); } private void onSessionOpen(SessionOpen sessionOpen) { AccountSession accountSession = sessionManager.getAccountSession(); if (accountSession.getUuid() != null) { grandExchangeClient = new GrandExchangeClient(accountSession.getUuid()); } else { grandExchangeClient = null; } } private void updateConfig() { this.quickLookup = config.quickLookup(); this.enableNotifications = config.enableNotifications(); this.enableOsbPrices = config.enableOsbPrices(); this.enableGELimits = config.enableGELimits(); this.enableAfford = config.enableAfford(); } private void onSessionClose(SessionClose sessionClose) { grandExchangeClient = null; } private void onConfigChanged(ConfigChanged event) { if (event.getGroup().equals("grandexchange")) { updateConfig(); if (event.getKey().equals("quickLookup")) { if (this.quickLookup) { mouseManager.registerMouseListener(inputListener); keyManager.registerKeyListener(inputListener); } else { mouseManager.unregisterMouseListener(inputListener); keyManager.unregisterKeyListener(inputListener); } } } } private void onGrandExchangeOfferChanged(GrandExchangeOfferChanged offerEvent) { final int slot = offerEvent.getSlot(); final GrandExchangeOffer offer = offerEvent.getOffer(); ItemDefinition offerItem = itemManager.getItemDefinition(offer.getItemId()); boolean shouldStack = offerItem.isStackable() || offer.getTotalQuantity() > 1; BufferedImage itemImage = itemManager.getImage(offer.getItemId(), offer.getTotalQuantity(), shouldStack); SwingUtilities.invokeLater(() -> panel.getOffersPanel().updateOffer(offerItem, itemImage, offer, slot)); submitTrades(slot, offer); updateConfig(slot, offer); } private void submitTrades(int slot, GrandExchangeOffer offer) { if (grandExchangeClient == null) { return; } // Only interested in offers which are fully bought/sold if (offer.getState() != GrandExchangeOfferState.BOUGHT && offer.getState() != GrandExchangeOfferState.SOLD) { return; } SavedOffer savedOffer = getOffer(slot); if (!shouldUpdate(savedOffer, offer)) { return; } // getPrice() is the price of the offer, not necessarily what the item bought at int priceEach = offer.getSpent() / offer.getTotalQuantity(); GrandExchangeTrade grandExchangeTrade = new GrandExchangeTrade(); grandExchangeTrade.setBuy(offer.getState() == GrandExchangeOfferState.BOUGHT); grandExchangeTrade.setItemId(offer.getItemId()); grandExchangeTrade.setQuantity(offer.getTotalQuantity()); grandExchangeTrade.setPrice(priceEach); log.debug("Submitting trade: {}", grandExchangeTrade); grandExchangeClient.submit(grandExchangeTrade); } private void updateConfig(int slot, GrandExchangeOffer offer) { if (offer.getState() == GrandExchangeOfferState.EMPTY) { deleteOffer(slot); } else { SavedOffer savedOffer = new SavedOffer(); savedOffer.setItemId(offer.getItemId()); savedOffer.setQuantitySold(offer.getQuantitySold()); savedOffer.setTotalQuantity(offer.getTotalQuantity()); savedOffer.setPrice(offer.getPrice()); savedOffer.setSpent(offer.getSpent()); savedOffer.setState(offer.getState()); setOffer(slot, savedOffer); } } private boolean shouldUpdate(SavedOffer savedOffer, GrandExchangeOffer grandExchangeOffer) { if (savedOffer == null) { return false; } // Only update offer if state has changed return savedOffer.getState() != grandExchangeOffer.getState(); } private void onChatMessage(ChatMessage event) { if (!this.enableNotifications || event.getType() != ChatMessageType.GAMEMESSAGE) { return; } String message = Text.removeTags(event.getMessage()); if (message.startsWith("Grand Exchange:")) { this.notifier.notify(message); } } private void onGameStateChanged(GameStateChanged gameStateChanged) { if (gameStateChanged.getGameState() == GameState.LOGIN_SCREEN) { panel.getOffersPanel().resetOffers(); } } private void onMenuEntryAdded(MenuEntryAdded menuEntry) { // At the moment, if the user disables quick lookup, the input listener gets disabled. Thus, isHotKeyPressed() // should always return false when quick lookup is disabled. // Replace the default option with "Search ..." when holding alt if (client.getGameState() != GameState.LOGGED_IN || !hotKeyPressed) { return; } final int widgetId = menuEntry.getParam1(); final int groupId = WidgetInfo.TO_GROUP(widgetId); switch (groupId) { case WidgetID.BANK_GROUP_ID: // Don't show for view tabs and such if (WidgetInfo.TO_CHILD(widgetId) != WidgetInfo.BANK_ITEM_CONTAINER.getChildId()) { break; } case WidgetID.INVENTORY_GROUP_ID: case WidgetID.BANK_INVENTORY_GROUP_ID: case WidgetID.GRAND_EXCHANGE_INVENTORY_GROUP_ID: case WidgetID.SHOP_INVENTORY_GROUP_ID: menuEntry.setOption(SEARCH_GRAND_EXCHANGE); menuEntry.setOpcode(MenuOpcode.RUNELITE.getId()); menuEntry.setModified(true); } } private void onFocusChanged(FocusChanged focusChanged) { if (!focusChanged.isFocused()) { setHotKeyPressed(false); } } private void onWidgetLoaded(WidgetLoaded event) { switch (event.getGroupId()) { case WidgetID.GRAND_EXCHANGE_GROUP_ID: Widget grandExchangeOffer = client.getWidget(WidgetInfo.GRAND_EXCHANGE_OFFER_CONTAINER); grandExchangeText = client.getWidget(WidgetInfo.GRAND_EXCHANGE_OFFER_TEXT); grandExchangeItem = grandExchangeOffer.getDynamicChildren()[OFFER_CONTAINER_ITEM]; grandExchangeOfferQuantityHeading = grandExchangeOffer.getDynamicChildren()[OFFER_QUANTITY_HEADING]; break; case WidgetID.INVENTORY_GROUP_ID: grandExchangeText = null; grandExchangeItem = null; grandExchangeOfferQuantityHeading = null; break; } } private void onScriptCallbackEvent(ScriptCallbackEvent event) { if (!event.getEventName().equals("setGETitle") || !config.showTotal()) { return; } long total = 0; GrandExchangeOffer[] offers = client.getGrandExchangeOffers(); for (GrandExchangeOffer offer : offers) { if (offer != null) { total += offer.getPrice() * offer.getTotalQuantity(); } } if (total == 0L) { return; } StringBuilder titleBuilder = new StringBuilder(" ("); if (config.showExact()) { titleBuilder.append(StackFormatter.formatNumber(total)); } else { titleBuilder.append(StackFormatter.quantityToStackSize(total)); } titleBuilder.append(')'); String[] stringStack = client.getStringStack(); int stringStackSize = client.getStringStackSize(); stringStack[stringStackSize - 1] += titleBuilder.toString(); } private void onGameTick(GameTick event) { if (grandExchangeText == null || grandExchangeItem == null || grandExchangeItem.isHidden()) { return; } final Widget geText = grandExchangeText; final String geTextString = geText.getText(); final Widget geQuantityHeading = grandExchangeOfferQuantityHeading; final int offerType = client.getVar(Varbits.GE_OFFER_CREATION_TYPE); final int itemId = grandExchangeItem.getItemId(); if (itemId == OFFER_DEFAULT_ITEM_ID || itemId == -1) { return; } if (this.enableAfford && offerType == OFFER_TYPE_BUY) { final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY); int coins = 0; if (inventory != null) { for (final Item item : inventory.getItems()) { if (item.getId() == COINS_995) { coins = item.getQuantity(); break; } } } final int currentItemPrice = client.getVar(Varbits.GRAND_EXCHANGE_PRICE_PER_ITEM); final int canAfford = currentItemPrice != 0 ? coins / currentItemPrice : 0; final String quantityHeadingText = OFFER_QUANTITY_DEFAULT_HEADING + " (" + canAfford + ")"; geQuantityHeading.setText(quantityHeadingText); } else { geQuantityHeading.setText(OFFER_QUANTITY_DEFAULT_HEADING); } if (this.enableGELimits && itemGELimits != null && !geTextString.contains(BUY_LIMIT_GE_TEXT)) { final Integer itemLimit = itemGELimits.get(itemId); // If we have item buy limit, append it if (itemLimit != null) { final String text = geText.getText() + BUY_LIMIT_GE_TEXT + StackFormatter.formatNumber(itemLimit); geText.setText(text); } } if (!this.enableOsbPrices || geTextString.contains(OSB_GE_TEXT)) { // OSB prices are disabled or price was already looked up, so no need to set it again return; } log.debug("Looking up OSB item price {}", itemId); executorService.submit(() -> { if (geText.getText().contains(OSB_GE_TEXT)) { // If there are multiple tasks queued and one of them have already added the price return; } CLIENT.lookupItem(itemId) .subscribeOn(Schedulers.io()) .observeOn(Schedulers.from(clientThread)) .subscribe( (osbresult) -> { final String text = geText.getText() + OSB_GE_TEXT + StackFormatter.formatNumber(osbresult.getOverall_average()); if (geText.getText().contains(OSB_GE_TEXT)) { // If there are multiple tasks queued and one of them have already added the price return; } geText.setText(text); }, (e) -> log.debug("Error getting price of item {}", itemId, e) ); }); } }
/*L * Copyright Northwestern University. * * Distributed under the OSI-approved BSD 3-Clause License. * See http://ncip.github.io/psc/LICENSE.txt for details. */ package edu.northwestern.bioinformatics.studycalendar.web.subject; import edu.northwestern.bioinformatics.studycalendar.core.Fixtures; import edu.northwestern.bioinformatics.studycalendar.core.StudyCalendarTestCase; import edu.northwestern.bioinformatics.studycalendar.domain.ScheduledActivity; import java.util.Calendar; import java.util.Date; import java.util.Iterator; import static gov.nih.nci.cabig.ctms.lang.DateTools.createDate; /** * @author Rhett Sutphin */ public class ScheduleDayTest extends StudyCalendarTestCase { private static final Date TODAY = new Date(); private ScheduleDay day = new ScheduleDay(TODAY, TODAY); public void testEmptyWhenNoActivitiesAtAll() throws Exception { assertTrue(day.isEmpty()); } public void testNotEmptyWhenHasAnActivity() throws Exception { day.getActivities().add(Fixtures.createScheduledActivity("A", 2008, Calendar.JANUARY, 4)); assertFalse(day.isEmpty()); } public void testNotEmptyWhenHasHiddenActivities() throws Exception { day.setHasHiddenActivities(true); assertFalse(day.isEmpty()); } public void testNaturalOrderIsByDate() throws Exception { ScheduleDay day1 = createDay(2008, Calendar.SEPTEMBER, 1); ScheduleDay day2 = createDay(2008, Calendar.SEPTEMBER, 2); assertNegative(day1.compareTo(day2)); assertPositive(day2.compareTo(day1)); } public void testDetailTimelineClassesAlwaysIncludesDay() throws Exception { assertContains(day.getDetailTimelineClasses(), "day"); } public void testDetailTimelineClassesIncludesDateClass() throws Exception { assertContains(createDay(2005, Calendar.MARCH, 30).getDetailTimelineClasses(), "date-2005-03-30"); } public void testDetailClassesIncludesMonthStartWhenAtStartOfMonth() throws Exception { assertContains(createDay(2008, Calendar.SEPTEMBER, 1).getDetailTimelineClasses(), "month-start"); } public void testDetailClassesDoesNotIncludeMonthStartWhenNotAtStartOfMonth() throws Exception { assertNotContains(createDay(2008, Calendar.SEPTEMBER, 2).getDetailTimelineClasses(), "month-start"); } public void testDetailClassesIncludesYearStartWhenAtStartOfYear() throws Exception { assertContains(createDay(2008, Calendar.JANUARY, 1).getDetailTimelineClasses(), "year-start"); } public void testDetailClassesDoesNotIncludeYearStartWhenNotAtStartOfYear() throws Exception { assertNotContains(createDay(2008, Calendar.DECEMBER, 31).getDetailTimelineClasses(), "year-start"); } public void testDetailClassesIncludesTodayWhenToday() throws Exception { assertContains(day.getDetailTimelineClasses(), "today"); } public void testDetailClassesDoesNotIncludeTodayWhenNotToday() throws Exception { assertNotContains(createDay(2007, Calendar.DECEMBER, 31).getDetailTimelineClasses(), "today"); } public void testDetailClassesIncludesHasActivitiesWhenHas() throws Exception { day.getActivities().add(new ScheduledActivity()); assertContains(day.getDetailTimelineClasses(), "has-activities"); } public void testDetailClassesDoesNotIncludeHasActivitiesWhenDoesNotHave() throws Exception { assertNotContains(day.getDetailTimelineClasses(), "has-activities"); } public void testIsTodayWhenItIs() throws Exception { assertTrue(day.isToday()); } public void testIsTodayWhenItIsWithTimestamp() throws Exception { Calendar laterToday = Calendar.getInstance(); laterToday.setTime(TODAY); laterToday.add(Calendar.SECOND, 3); assertTrue(new ScheduleDay(laterToday.getTime(), TODAY).isToday()); } public void testIsTodayWhenItIsNot() throws Exception { assertFalse(createDay(2006, Calendar.MARCH, 6).isToday()); } public void testActivitiesArePresentedInNaturalOrder() throws Exception { day.getActivities().add(Fixtures.createScheduledActivity("B", 2008, Calendar.JANUARY, 4)); day.getActivities().add(Fixtures.createScheduledActivity("A", 2008, Calendar.JANUARY, 4)); Iterator<ScheduledActivity> it = day.getActivities().iterator(); assertEquals("Wrong first activity", "A", it.next().getActivity().getName()); assertEquals("Wrong second activity", "B", it.next().getActivity().getName()); } private static ScheduleDay createDay(int year, int month, int day) { return new ScheduleDay(createDate(year, month, day), TODAY); } }
package jadx.samples; import java.util.ArrayList; import java.util.EnumSet; import java.util.List; import java.util.Set; public class TestEnum extends AbstractTest { public enum Direction { NORTH, SOUTH, EAST, WEST } public static final String DOG = "DOG"; public enum Animal { CAT, DOG } private static int three = 3; public enum Numbers { ONE(1), TWO(2), THREE(three), FOUR(three + 1); private final int num; private Numbers(int n) { this.num = n; } public int getNum() { return num; } } public enum Operation { PLUS { @Override int apply(int x, int y) { return x + y; } }, MINUS { @Override int apply(int x, int y) { return x - y; } }; abstract int apply(int x, int y); } public interface IOps { double apply(double x, double y); } public enum DoubleOperations implements IOps { TIMES("*") { @Override public double apply(double x, double y) { return x * y; } }, DIVIDE("/") { @Override public double apply(double x, double y) { return x / y; } }; private final String op; private DoubleOperations(String op) { this.op = op; } public String getOp() { return op; } } public enum Types { INT, FLOAT, LONG, DOUBLE, OBJECT, ARRAY; private static Set<Types> primitives = EnumSet.of(INT, FLOAT, LONG, DOUBLE); public static List<Types> references = new ArrayList<Types>(); static { references.add(OBJECT); references.add(ARRAY); } public static Set<Types> getPrimitives() { return primitives; } } public enum EmptyEnum { ; public static String getOp() { return "op"; } } public enum Singleton { INSTANCE; public String test(String arg) { return arg.concat("test"); } } public String testEnumSwitch(final Direction color) { String d; switch (color) { case NORTH: d = "N"; break; case SOUTH: d = "S"; break; default: d = "<>"; break; } return d; } @Override public boolean testRun() throws Exception { Direction d = Direction.EAST; assertTrue(d.toString().equals("EAST")); assertTrue(d.ordinal() == 2); assertTrue(Numbers.THREE.getNum() == 3); assertTrue(Operation.PLUS.apply(2, 2) == 4); assertTrue(DoubleOperations.TIMES.apply(1, 1) == 1); assertTrue(Types.getPrimitives().contains(Types.INT)); assertTrue(Types.references.size() == 2); assertTrue(EmptyEnum.values().length == 0); assertTrue(EmptyEnum.getOp().equals("op")); assertTrue(Singleton.INSTANCE.test("a").equals("atest")); return true; } public static void main(String[] args) throws Exception { new TestEnum().testRun(); } }
package moze_intel.projecte.common.loot; import java.util.HashSet; import java.util.Set; import javax.annotation.Nonnull; import moze_intel.projecte.gameObjs.registries.PEBlocks; import net.minecraft.advancements.critereon.StatePropertiesPredicate; import net.minecraft.data.loot.BlockLoot; import net.minecraft.world.level.ItemLike; import net.minecraft.world.level.block.Block; import net.minecraft.world.level.block.TntBlock; import net.minecraft.world.level.storage.loot.LootPool; import net.minecraft.world.level.storage.loot.LootTable; import net.minecraft.world.level.storage.loot.entries.LootItem; import net.minecraft.world.level.storage.loot.predicates.LootItemBlockStatePropertyCondition; import net.minecraft.world.level.storage.loot.providers.number.ConstantValue; public class PEBlockLootTable extends BlockLoot { private final Set<Block> knownBlocks = new HashSet<>(); @Override protected void addTables() { dropSelf(PEBlocks.AETERNALIS_FUEL.getBlock()); dropSelf(PEBlocks.ALCHEMICAL_CHEST.getBlock()); dropSelf(PEBlocks.ALCHEMICAL_COAL.getBlock()); dropSelf(PEBlocks.COLLECTOR.getBlock()); dropSelf(PEBlocks.COLLECTOR_MK2.getBlock()); dropSelf(PEBlocks.COLLECTOR_MK3.getBlock()); dropSelf(PEBlocks.CONDENSER.getBlock()); dropSelf(PEBlocks.CONDENSER_MK2.getBlock()); dropSelf(PEBlocks.DARK_MATTER.getBlock()); dropSelf(PEBlocks.DARK_MATTER_FURNACE.getBlock()); dropSelf(PEBlocks.DARK_MATTER_PEDESTAL.getBlock()); dropSelf(PEBlocks.INTERDICTION_TORCH.getBlock()); dropSelf(PEBlocks.MOBIUS_FUEL.getBlock()); dropSelf(PEBlocks.RED_MATTER.getBlock()); dropSelf(PEBlocks.RED_MATTER_FURNACE.getBlock()); dropSelf(PEBlocks.RELAY.getBlock()); dropSelf(PEBlocks.RELAY_MK2.getBlock()); dropSelf(PEBlocks.RELAY_MK3.getBlock()); dropSelf(PEBlocks.TRANSMUTATION_TABLE.getBlock()); registerCustomTNT(PEBlocks.NOVA_CATACLYSM.getBlock()); registerCustomTNT(PEBlocks.NOVA_CATALYST.getBlock()); } @Override public void dropOther(@Nonnull Block block, @Nonnull ItemLike drop) { //Override to use our own dropping method that names the loot table add(block, dropping(drop)); } protected static LootTable.Builder dropping(ItemLike item) { return LootTable.lootTable().withPool(applyExplosionCondition(item, LootPool.lootPool().setRolls(ConstantValue.exactly(1)) .name("main") .add(LootItem.lootTableItem(item)) )); } private void registerCustomTNT(Block tnt) { add(tnt, LootTable.lootTable().withPool(applyExplosionCondition(tnt, LootPool.lootPool().setRolls(ConstantValue.exactly(1)) .name("main") .add(LootItem.lootTableItem(tnt).when(LootItemBlockStatePropertyCondition.hasBlockStateProperties(tnt) .setProperties(StatePropertiesPredicate.Builder.properties().hasProperty(TntBlock.UNSTABLE, false))))))); } @Override protected void add(@Nonnull Block block, @Nonnull LootTable.Builder table) { //Overwrite the core register method to add to our list of known blocks super.add(block, table); knownBlocks.add(block); } @Nonnull @Override protected Iterable<Block> getKnownBlocks() { return knownBlocks; } }
import java.util.Arrays; import java.util.List; import java.util.ArrayList; import ui.AbstractUI; public class SimpleMachine { /** * Generic entry-point for executing the simple machine. * @param args command-line arguments, using the following syntax: * "-i [cli|gui] -a [sm213|y86seq|y86pipeminus|y86pipe|y86pipesuper] -v [solution|student]". * additional arguments are defined by specific user-interface implementation. */ public final static class Sm213 { public static void main (String[] args) { SimpleMachine.main (args, "sm213", "solution"); } } public final static class Sm213Vm { public static void main (String[] args) { SimpleMachine.main (args, "sm213-vm", "solution"); } } public final static class Y86Seq { public static void main (String[] args) { SimpleMachine.main (args, "y86-seq", "solution"); } } public final static class Y86PipeMinus { public static void main (String[] args) { SimpleMachine.main (args, "y86-pipe-minus", "solution"); } } public final static class Y86Pipe { public static void main (String[] args) { SimpleMachine.main (args, "y86-pipe", "solution"); } } public final static class Y86PipeSuper { public static void main (String[] args) { SimpleMachine.main (args, "y86-pipesuper", "solution"); } } public final static class Sm213Student { public static void main (String[] args) { SimpleMachine.main (args, "sm213", "student"); } } public final static class Sm213VmStudent { public static void main (String[] args) { SimpleMachine.main (args, "sm213-vm", "student"); } } public final static class Y86SeqStudent { public static void main (String[] args) { SimpleMachine.main (args, "y86-seq", "student"); } } public final static class Y86PipeMinusStudent { public static void main (String[] args) { SimpleMachine.main (args, "y86-pipe-minus", "student"); } } public final static class Y86PipeStudent { public static void main (String[] args) { SimpleMachine.main (args, "y86-pipe", "student"); } } public final static void main (String[] args) { AbstractUI.main (args); } private final static void main (String[] args, String arch, String var) { List <String> argsList = new ArrayList<String> (Arrays.asList (args)); if (! argsList.contains ("-a")) argsList.addAll (Arrays.asList (new String[] {"-a", arch})); if (! argsList.contains ("-v")) argsList.addAll (Arrays.asList (new String[] {"-v", var})); args = argsList.toArray (new String[0]); SimpleMachine.main (args); } }
package jetbrains.mps.lang.editor.diagram.constraints; /*Generated by MPS */ import jetbrains.mps.smodel.runtime.base.BaseConstraintsDescriptor; import jetbrains.mps.smodel.runtime.ConstraintFunction; import jetbrains.mps.smodel.runtime.ConstraintContext_CanBeChild; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import jetbrains.mps.smodel.runtime.CheckingNodeContext; import org.jetbrains.mps.openapi.model.SNode; import org.jetbrains.mps.openapi.language.SAbstractConcept; import org.jetbrains.mps.openapi.language.SContainmentLink; import jetbrains.mps.lang.smodel.generator.smodelAdapter.SNodeOperations; import jetbrains.mps.smodel.SNodePointer; import org.jetbrains.mps.openapi.language.SConcept; import jetbrains.mps.smodel.adapter.structure.MetaAdapterFactory; public class FigureParameterMapping_Constraints extends BaseConstraintsDescriptor { public FigureParameterMapping_Constraints() { super(CONCEPTS.FigureParameterMapping$LN); } @Override protected ConstraintFunction<ConstraintContext_CanBeChild, Boolean> calculateCanBeChildConstraint() { return new ConstraintFunction<ConstraintContext_CanBeChild, Boolean>() { @NotNull public Boolean invoke(@NotNull ConstraintContext_CanBeChild context, @Nullable CheckingNodeContext checkingNodeContext) { boolean result = staticCanBeAChild(context.getNode(), context.getParentNode(), context.getConcept(), context.getLink()); if (!(result) && checkingNodeContext != null) { checkingNodeContext.setBreakingNode(canBeChildBreakingPoint); } return result; } }; } private static boolean staticCanBeAChild(SNode node, SNode parentNode, SAbstractConcept childConcept, SContainmentLink link) { return SNodeOperations.isInstanceOf(parentNode, CONCEPTS.CellModel_DiagramNode$Jl); } private static final SNodePointer canBeChildBreakingPoint = new SNodePointer("r:1af2ba06-e725-4940-9c06-d6b80c641b75(jetbrains.mps.lang.editor.diagram.constraints)", "1227128029536565015"); private static final class CONCEPTS { /*package*/ static final SConcept FigureParameterMapping$LN = MetaAdapterFactory.getConcept(0x6106f6117a7442d1L, 0x80deedc5c602bfd1L, 0xf301bf106a7d7c7L, "jetbrains.mps.lang.editor.diagram.structure.FigureParameterMapping"); /*package*/ static final SConcept CellModel_DiagramNode$Jl = MetaAdapterFactory.getConcept(0x6106f6117a7442d1L, 0x80deedc5c602bfd1L, 0xf301bf106a326e1L, "jetbrains.mps.lang.editor.diagram.structure.CellModel_DiagramNode"); } }
package com.florian.rifts.blocks; import net.fabricmc.fabric.api.object.builder.v1.block.FabricBlockSettings; import net.minecraft.block.Block; import net.minecraft.block.Material; public class BurnedSeaLantern extends Block { public BurnedSeaLantern() { super(FabricBlockSettings.of(Material.AGGREGATE) .strength(0.3f, 0f)); } }
package edu.ucla.library.bucketeer.converters; /** * A type of conversion. First pass will be simple, but future expansion could include LOSSY_80PERCENT, * LOSSY_90PERCENT, etc. */ public enum Conversion { LOSSY, LOSSLESS; }
/* * Copyright (c) 2017, Andreas Fagschlunger. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package at.o2xfs.xfs.idc; import at.o2xfs.xfs.api.XfsConstant; public enum DataSource implements XfsConstant { /* * @since v3.00 */ NOTSUPP(0x0000), /* * @since v3.00 */ TRACK1(0x0001), /* * @since v3.00 */ TRACK2(0x0002), /* * @since v3.00 */ TRACK3(0x0004), /* * @since v3.00 */ CHIP(0x0008), /* * @since v3.00 */ SECURITY(0x0010), /* * @since v3.00 */ FLUXINACTIVE(0x0020), /* * @since v3.00 */ TRACK_WM(0x8000), /* * @since v3.10 */ MEMORY_CHIP(0x0040), /* * @since v3.10 */ FRONTIMAGE(0x0100), /* * @since v3.10 */ BACKIMAGE(0x0200), /* * @since v3.10 */ FRONT_TRACK_1(0x0080), /* * @since v3.20 */ TRACK1_JIS1(0x0400), /* * @since v3.20 */ TRACK3_JIS1(0x0800), /* * @since v3.20 */ DDI(0x4000); private final long value; private DataSource(final long value) { this.value = value; } @Override public long getValue() { return value; } }
package kensydanielle.dependencyinjection.model; import kensydanielle.dependencyinjection.interfaces.IAnimal; import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.context.annotation.Primary; import org.springframework.stereotype.Component; /** * @author daniok * @since 16/04/2020 * @version 1.0 */ @Component @Primary @Qualifier("dog") public class Dog implements IAnimal { @Override public void comunicar() { System.out.println("AAAAAAAAAAAAUUUUUUUUUUUUUU"); } }
package com.example.opengles.adapter; import android.support.annotation.Nullable; import android.widget.ImageView; import com.bumptech.glide.Glide; import com.chad.library.adapter.base.BaseQuickAdapter; import com.chad.library.adapter.base.BaseViewHolder; import com.example.opengles.R; import com.example.opengles.models.RvData; import java.util.List; public class CubeAdapter extends BaseQuickAdapter<RvData, BaseViewHolder> { public CubeAdapter(int layoutResId, @Nullable List<RvData> data) { super(layoutResId, data); } @Override protected void convert(BaseViewHolder helper, RvData item) { helper.setText(R.id.tv_item_cube, item.getName()); Glide.with(mContext).load(item.getUrl()).into((ImageView) helper.getView(R.id.iv_item_cube)); } }
/* * Original author: Daniel Jaschob <djaschob .at. uw.edu> * * Copyright 2018 University of Washington - Seattle, WA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.yeastrc.limelight.limelight_webapp.dao; import org.springframework.transaction.annotation.Propagation; import org.springframework.transaction.annotation.Transactional; import org.yeastrc.limelight.limelight_webapp.db_dto.UserForgotPasswordTrackingDTO; /** * @author danj * */ public interface UserForgotPasswordTrackingDAO_IF { /** * @param forgotPasswordTrackingCode * @return null if not found * @throws Exception */ UserForgotPasswordTrackingDTO getForForgotPasswordTrackingCode(String forgotPasswordTrackingCode) throws Exception; /** * @param item * @throws Exception */ void save(UserForgotPasswordTrackingDTO item) throws Exception; /** * Update used_date = NOW() , useIP = ? * @param id * @param useIP * @throws Exception */ void updateUsedDateUseIP(int id, String useIP) throws Exception; /** * Update code_replaced_by_newer = ? * @param id - ID to use in id < ? comparison * @param codeReplacedByNewer * @throws Exception */ void updateCodeReplacedByNewer(int id, boolean codeReplacedByNewer) throws Exception; }
package controlers; import java.io.IOException; import java.io.PrintWriter; import java.sql.Date; import java.sql.SQLException; import javax.servlet.ServletException; import javax.servlet.annotation.WebServlet; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpSession; import models.Personne; import models.Role; import services.ServicePersonne; /** * Servlet implementation class ServletInscription */ @WebServlet("/inscription") public class ServletInscription extends HttpServlet { private static final long serialVersionUID = 1L; protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { request.getRequestDispatcher("WEB-INF/views/SiteWeb/Pages/Inscription.jsp").include(request,response); } protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { PrintWriter out = response.getWriter(); String date = request.getParameter("date"); String[] CompDate = date.split("-"); Date DN = new Date((Integer.parseInt(CompDate[0])-1900), (Integer.parseInt(CompDate[1])-1),Integer.parseInt(CompDate[2])); Personne p = new Personne(request.getParameter("nom"), request.getParameter("prenom"), request.getParameter("login"), request.getParameter("password"), request.getParameter("cin"), request.getParameter("tel"), request.getParameter("adresse"), DN, null, Role.utilisateur, null, request.getParameter("sex")); int res = 0; try { res = ServicePersonne.AjouterPersonne(p, 1); //traitement authentification et session if(res == 1) { int resultat = ServicePersonne.AuthentificationPersonne(request.getParameter("login"), request.getParameter("password")); if(resultat>0) { Personne pr = ServicePersonne.ChercherPersonneViaId(resultat); HttpSession session = request.getSession(); session.setAttribute("profil", pr); } } } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } out.print(res); } }
package org.jboss.resteasy.reactive.server.vertx.test.framework; import static org.junit.jupiter.api.Assertions.assertTrue; import io.vertx.core.Vertx; import io.vertx.core.http.HttpServer; import io.vertx.ext.web.Route; import io.vertx.ext.web.Router; import java.io.Closeable; import java.io.IOException; import java.nio.file.FileVisitResult; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.SimpleFileVisitor; import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Timer; import java.util.TimerTask; import java.util.concurrent.Executor; import java.util.concurrent.Executors; import java.util.function.Consumer; import java.util.function.Supplier; import java.util.logging.Handler; import java.util.logging.LogManager; import java.util.logging.LogRecord; import java.util.logging.Logger; import javax.ws.rs.core.Application; import javax.ws.rs.core.MediaType; import javax.ws.rs.ext.MessageBodyWriter; import org.jboss.jandex.ClassInfo; import org.jboss.jandex.DotName; import org.jboss.jandex.Index; import org.jboss.resteasy.reactive.common.ResteasyReactiveConfig; import org.jboss.resteasy.reactive.common.model.ResourceClass; import org.jboss.resteasy.reactive.common.model.ResourceWriter; import org.jboss.resteasy.reactive.common.processor.JandexUtil; import org.jboss.resteasy.reactive.common.processor.scanning.ApplicationScanningResult; import org.jboss.resteasy.reactive.common.processor.scanning.ResourceScanningResult; import org.jboss.resteasy.reactive.common.processor.scanning.ResteasyReactiveInterceptorScanner; import org.jboss.resteasy.reactive.common.processor.scanning.ResteasyReactiveScanner; import org.jboss.resteasy.reactive.server.core.Deployment; import org.jboss.resteasy.reactive.server.core.DeploymentInfo; import org.jboss.resteasy.reactive.server.core.ServerSerialisers; import org.jboss.resteasy.reactive.server.core.startup.RuntimeDeploymentManager; import org.jboss.resteasy.reactive.server.handlers.RestInitialHandler; import org.jboss.resteasy.reactive.server.processor.ServerEndpointIndexer; import org.jboss.resteasy.reactive.server.processor.scanning.ResteasyReactiveContextResolverScanner; import org.jboss.resteasy.reactive.server.processor.scanning.ResteasyReactiveExceptionMappingScanner; import org.jboss.resteasy.reactive.server.processor.scanning.ResteasyReactiveFeatureScanner; import org.jboss.resteasy.reactive.server.processor.scanning.ResteasyReactiveParamConverterScanner; import org.jboss.resteasy.reactive.server.providers.serialisers.ServerStringMessageBodyHandler; import org.jboss.resteasy.reactive.server.vertx.ResteasyReactiveVertxHandler; import org.jboss.resteasy.reactive.server.vertx.VertxRequestContextFactory; import org.jboss.resteasy.reactive.spi.BeanFactory; import org.jboss.resteasy.reactive.spi.ThreadSetupAction; import org.jboss.shrinkwrap.api.ShrinkWrap; import org.jboss.shrinkwrap.api.exporter.ExplodedExporter; import org.jboss.shrinkwrap.api.spec.JavaArchive; import org.junit.jupiter.api.extension.AfterAllCallback; import org.junit.jupiter.api.extension.BeforeAllCallback; import org.junit.jupiter.api.extension.ExtensionContext; public class ResteasyReactiveUnitTest implements BeforeAllCallback, AfterAllCallback { private static final Logger rootLogger; private Handler[] originalHandlers; static { System.setProperty("java.util.logging.manager", "org.jboss.logmanager.LogManager"); rootLogger = (Logger) LogManager.getLogManager().getLogger(""); } private Path deploymentDir; private Consumer<Throwable> assertException; private Supplier<JavaArchive> archiveProducer; private Consumer<List<LogRecord>> assertLogRecords; private Timer timeoutTimer; private volatile TimerTask timeoutTask; private InMemoryLogHandler inMemoryLogHandler = new InMemoryLogHandler((r) -> false); static Vertx vertx; static HttpServer httpServer; static Router router; static Route route; static Executor executor = Executors.newFixedThreadPool(10); List<Closeable> closeTasks = new ArrayList<>(); public ResteasyReactiveUnitTest setExpectedException(Class<? extends Throwable> expectedException) { return assertException(t -> { Throwable i = t; boolean found = false; while (i != null) { if (i.getClass().getName().equals(expectedException.getName())) { found = true; break; } i = i.getCause(); } assertTrue(found, "Build failed with wrong exception, expected " + expectedException + " but got " + t); }); } public ResteasyReactiveUnitTest() { } public ResteasyReactiveUnitTest assertException(Consumer<Throwable> assertException) { if (this.assertException != null) { throw new IllegalStateException("Don't set the asserted or excepted exception twice" + " to avoid shadowing out the first call."); } this.assertException = assertException; return this; } public Supplier<JavaArchive> getArchiveProducer() { return archiveProducer; } public ResteasyReactiveUnitTest setArchiveProducer(Supplier<JavaArchive> archiveProducer) { Objects.requireNonNull(archiveProducer); this.archiveProducer = archiveProducer; return this; } public ResteasyReactiveUnitTest assertLogRecords(Consumer<List<LogRecord>> assertLogRecords) { if (this.assertLogRecords != null) { throw new IllegalStateException("Don't set the a log record assertion twice" + " to avoid shadowing out the first call."); } this.assertLogRecords = assertLogRecords; return this; } private void exportArchive(Path deploymentDir, Class<?> testClass) { try { JavaArchive archive = getArchiveProducerOrDefault(); Class<?> c = testClass; archive.addClasses(c.getClasses()); while (c != Object.class) { archive.addClass(c); c = c.getSuperclass(); } archive.as(ExplodedExporter.class).exportExplodedInto(deploymentDir.toFile()); } catch (Exception e) { throw new RuntimeException("Unable to create the archive", e); } } private JavaArchive getArchiveProducerOrDefault() { if (archiveProducer == null) { return ShrinkWrap.create(JavaArchive.class); } else { return archiveProducer.get(); } } @Override public void beforeAll(ExtensionContext extensionContext) throws Exception { originalHandlers = rootLogger.getHandlers(); timeoutTask = new TimerTask() { @Override public void run() { System.err.println("Test has been running for more than 5 minutes, thread dump is:"); for (Map.Entry<Thread, StackTraceElement[]> i : Thread.getAllStackTraces().entrySet()) { System.err.println("\n"); System.err.println(i.toString()); System.err.println("\n"); for (StackTraceElement j : i.getValue()) { System.err.println(j); } } } }; timeoutTimer = new Timer("Test thread dump timer"); timeoutTimer.schedule(timeoutTask, 1000 * 60 * 5); ExtensionContext.Store store = extensionContext.getRoot().getStore(ExtensionContext.Namespace.GLOBAL); Class<?> testClass = extensionContext.getRequiredTestClass(); deploymentDir = Files.createTempDirectory("quarkus-unit-test"); exportArchive(deploymentDir, testClass); if (vertx == null) { vertx = Vertx.vertx(); HttpServer server = vertx.createHttpServer(); router = Router.router(vertx); server.requestHandler(router).listen(8080); store.put(ResteasyReactiveUnitTest.class.getName(), new ExtensionContext.Store.CloseableResource() { @Override public void close() throws Throwable { server.close(); vertx.close(); } }); } Index index = JandexUtil.createIndex(deploymentDir); ApplicationScanningResult applicationScanningResult = ResteasyReactiveScanner.scanForApplicationClass(index); ResourceScanningResult resources = ResteasyReactiveScanner.scanResources(index); if (resources == null) { throw new RuntimeException("no JAX-RS resources found"); } ServerEndpointIndexer serverEndpointIndexer = new ServerEndpointIndexer.Builder() .setIndex(index) .setScannedResourcePaths(resources.getScannedResourcePaths()) .setClassLevelExceptionMappers(new HashMap<>()) .setInjectableBeans(new HashMap<>()) .setConfig(new ResteasyReactiveConfig(10000, true)) .setHttpAnnotationToMethod(resources.getHttpAnnotationToMethod()) .build(); List<ResourceClass> resourceClasses = new ArrayList<>(); List<ResourceClass> subResourceClasses = new ArrayList<>(); for (Map.Entry<DotName, ClassInfo> i : resources.getScannedResources().entrySet()) { ResourceClass res = serverEndpointIndexer.createEndpoints(i.getValue()); resourceClasses.add(res); } for (Map.Entry<DotName, ClassInfo> i : resources.getPossibleSubResources().entrySet()) { ResourceClass res = serverEndpointIndexer.createEndpoints(i.getValue()); subResourceClasses.add(res); } ServerSerialisers serialisers = new ServerSerialisers(); serialisers.addWriter(String.class, new ResourceWriter() .setMediaTypeStrings(Collections.singletonList(MediaType.WILDCARD)) .setFactory(new BeanFactory<MessageBodyWriter<?>>() { @Override public BeanInstance<MessageBodyWriter<?>> createInstance() { return new BeanInstance<MessageBodyWriter<?>>() { @Override public MessageBodyWriter<?> getInstance() { return new ServerStringMessageBodyHandler(); } @Override public void close() { } }; } })); DeploymentInfo info = new DeploymentInfo() .setApplicationPath("/") .setFeatures(ResteasyReactiveFeatureScanner.createFeatures(index, applicationScanningResult)) .setInterceptors( ResteasyReactiveInterceptorScanner.createResourceInterceptors(index, applicationScanningResult)) .setDynamicFeatures(ResteasyReactiveFeatureScanner.createDynamicFeatures(index, applicationScanningResult)) .setParamConverterProviders( ResteasyReactiveParamConverterScanner.createParamConverters(index, applicationScanningResult)) .setSerialisers(serialisers) .setExceptionMapping( ResteasyReactiveExceptionMappingScanner.createExceptionMappers(index, applicationScanningResult)) .setResourceClasses(resourceClasses) .setCtxResolvers( ResteasyReactiveContextResolverScanner.createContextResolvers(index, applicationScanningResult)) .setLocatableResourceClasses(subResourceClasses) .setApplicationSupplier(new Supplier<Application>() { @Override public Application get() { if (applicationScanningResult.getSelectedAppClass() == null) { return new Application(); } else { try { return (Application) Class .forName(applicationScanningResult.getSelectedAppClass().name().toString(), false, Thread.currentThread().getContextClassLoader()) .newInstance(); } catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) { throw new RuntimeException(e); } } } }); RuntimeDeploymentManager runtimeDeploymentManager = new RuntimeDeploymentManager(info, () -> executor, closeable -> closeTasks.add(closeable), new VertxRequestContextFactory(), ThreadSetupAction.NOOP, "/"); Deployment deployment = runtimeDeploymentManager.deploy(); RestInitialHandler initialHandler = new RestInitialHandler(deployment); router.route().handler(new ResteasyReactiveVertxHandler(initialHandler)); } @Override public void afterAll(ExtensionContext extensionContext) throws Exception { if (assertLogRecords != null) { assertLogRecords.accept(inMemoryLogHandler.records); } //rootLogger.setHandlers(originalHandlers); inMemoryLogHandler.clearRecords(); System.clearProperty("test.url"); timeoutTask.cancel(); timeoutTask = null; timeoutTimer = null; if (deploymentDir != null) { deleteDirectory(deploymentDir); } } public static void deleteDirectory(final Path directory) throws IOException { if (!Files.isDirectory(directory)) { return; } Files.walkFileTree(directory, new SimpleFileVisitor<Path>() { @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { try { Files.delete(file); } catch (IOException e) { // ignored } return FileVisitResult.CONTINUE; } @Override public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException { try { Files.delete(dir); } catch (IOException e) { // ignored } return FileVisitResult.CONTINUE; } }); } }
package cn.com.spdb.uds.db.bean; import cn.com.spdb.uds.utils.Symbol; public class UdsSystemBean { // 平台 private String platform; // 系统 private String system; // private short data_keep_day = 0; // private short log_keep_day = 0; // private short record_keep_day = 0; private int max_run_job = 0; private short strategy = 0; private String strategy_pro = ""; private byte use_platform = 1; public String getPlatform() { return platform; } public void setPlatform(String platfrom) { this.platform = platfrom; } public String getSystem() { return system; } public void setSystem(String system) { this.system = system; } public int getMax_run_job() { return max_run_job; } public void setMax_run_job(int max_run_job) { this.max_run_job = max_run_job; } public short getStrategy() { return strategy; } public void setStrategy(short strategy) { this.strategy = strategy; } public String getStrategy_pro() { return strategy_pro; } public void setStrategy_pro(String strategy_pro) { this.strategy_pro = strategy_pro; } public byte getUse_platform() { return use_platform; } public void setUse_platform(byte use_platform) { this.use_platform = use_platform; } public String getPlatformAndSystemKey() { return platform + Symbol.XIA_HUA_XIAN + system; } @Override public String toString() { return "UdsSystemBean [platform=" + platform + ", system=" + system + ", max_run_job=" + max_run_job + ", strategy=" + strategy + ", strategy_pro=" + strategy_pro + ", use_platform=" + use_platform + "]"; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((platform == null) ? 0 : platform.hashCode()); result = prime * result + ((system == null) ? 0 : system.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; UdsSystemBean other = (UdsSystemBean) obj; if (platform == null) { if (other.platform != null) return false; } else if (!platform.equals(other.platform)) return false; if (system == null) { if (other.system != null) return false; } else if (!system.equals(other.system)) return false; return true; } }
/* * <summary></summary> * <author>He Han</author> * <email>hankcs.cn@gmail.com</email> * <create-date>2014/11/2 12:41</create-date> * * <copyright file="PinyinDictionary.java" company="上海林原信息科技有限公司"> * Copyright (c) 2003-2014, 上海林原信息科技有限公司. All Right Reserved, http://www.linrunsoft.com/ * This source is subject to the LinrunSpace License. Please contact 上海林原信息科技有限公司 to get more information. * </copyright> */ package com.hankcs.hanlp.dictionary.py; import com.hankcs.hanlp.HanLP; import com.hankcs.hanlp.collection.AhoCorasick.AhoCorasickDoubleArrayTrie; import com.hankcs.hanlp.collection.trie.DoubleArrayTrie; import com.hankcs.hanlp.corpus.dictionary.StringDictionary; import com.hankcs.hanlp.corpus.io.ByteArray; import com.hankcs.hanlp.corpus.io.IOUtil; import com.hankcs.hanlp.corpus.tag.Nature; import com.hankcs.hanlp.dictionary.BaseSearcher; import com.hankcs.hanlp.seg.common.Term; import com.hankcs.hanlp.utility.Predefine; import java.io.BufferedOutputStream; import java.io.DataOutputStream; import java.io.FileOutputStream; import java.util.*; import static com.hankcs.hanlp.utility.Predefine.logger; /** * @author hankcs */ public class PinyinDictionary { static AhoCorasickDoubleArrayTrie<Pinyin[]> trie = new AhoCorasickDoubleArrayTrie<Pinyin[]>(); public static final Pinyin[] pinyins = Integer2PinyinConverter.pinyins; static { long start = System.currentTimeMillis(); if (!load(HanLP.Config.PinyinDictionaryPath)) { throw new IllegalArgumentException("拼音词典" + HanLP.Config.PinyinDictionaryPath + "加载失败"); } logger.info("拼音词典" + HanLP.Config.PinyinDictionaryPath + "加载成功,耗时" + (System.currentTimeMillis() - start) + "ms"); } /** * 读取词典 * @param path * @return */ static boolean load(String path) { if (loadDat(path)) return true; // 从文本中载入并且尝试生成dat StringDictionary dictionary = new StringDictionary("="); if (!dictionary.load(path)) return false; TreeMap<String, Pinyin[]> map = new TreeMap<String, Pinyin[]>(); for (Map.Entry<String, String> entry : dictionary.entrySet()) { String[] args = entry.getValue().split(","); Pinyin[] pinyinValue = new Pinyin[args.length]; for (int i = 0; i < pinyinValue.length; ++i) { try { Pinyin pinyin = Pinyin.valueOf(args[i]); pinyinValue[i] = pinyin; } catch (IllegalArgumentException e) { logger.severe("读取拼音词典" + path + "失败,问题出在【" + entry + "】,异常是" + e); return false; } } map.put(entry.getKey(), pinyinValue); } trie.build(map); logger.info("正在缓存双数组" + path); saveDat(path, trie, map.entrySet()); return true; } static boolean loadDat(String path) { ByteArray byteArray = ByteArray.createByteArray(path + Predefine.BIN_EXT); if (byteArray == null) return false; int size = byteArray.nextInt(); Pinyin[][] valueArray = new Pinyin[size][]; for (int i = 0; i < valueArray.length; ++i) { int length = byteArray.nextInt(); valueArray[i] = new Pinyin[length]; for (int j = 0; j < length; ++j) { valueArray[i][j] = pinyins[byteArray.nextInt()]; } } if (!trie.load(byteArray, valueArray)) return false; return true; } static boolean saveDat(String path, AhoCorasickDoubleArrayTrie<Pinyin[]> trie, Set<Map.Entry<String, Pinyin[]>> entrySet) { try { DataOutputStream out = new DataOutputStream(new BufferedOutputStream(IOUtil.newOutputStream(path + Predefine.BIN_EXT))); out.writeInt(entrySet.size()); for (Map.Entry<String, Pinyin[]> entry : entrySet) { Pinyin[] value = entry.getValue(); out.writeInt(value.length); for (Pinyin pinyin : value) { out.writeInt(pinyin.ordinal()); } } trie.save(out); out.close(); } catch (Exception e) { logger.warning("缓存值dat" + path + "失败"); return false; } return true; } public static Pinyin[] get(String key) { return trie.get(key); } /** * 转为拼音 * @param text * @return List形式的拼音,对应每一个字(所谓字,指的是任意字符) */ public static List<Pinyin> convertToPinyin(String text) { return segLongest(text.toCharArray(), trie); } public static List<Pinyin> convertToPinyin(String text, boolean remainNone) { return segLongest(text.toCharArray(), trie, remainNone); } /** * 转为拼音 * @param text * @return 数组形式的拼音 */ public static Pinyin[] convertToPinyinArray(String text) { return convertToPinyin(text).toArray(new Pinyin[0]); } public static BaseSearcher getSearcher(char[] charArray, DoubleArrayTrie<Pinyin[]> trie) { return new Searcher(charArray, trie); } /** * 用最长分词算法匹配拼音 * @param charArray * @param trie * @return */ protected static List<Pinyin> segLongest(char[] charArray, AhoCorasickDoubleArrayTrie<Pinyin[]> trie) { return segLongest(charArray, trie, true); } protected static List<Pinyin> segLongest(char[] charArray, AhoCorasickDoubleArrayTrie<Pinyin[]> trie, boolean remainNone) { final Pinyin[][] wordNet = new Pinyin[charArray.length][]; trie.parseText(charArray, new AhoCorasickDoubleArrayTrie.IHit<Pinyin[]>() { @Override public void hit(int begin, int end, Pinyin[] value) { int length = end - begin; if (wordNet[begin] == null || length > wordNet[begin].length) { wordNet[begin] = length == 1 ? new Pinyin[]{value[0]} : value; } } }); List<Pinyin> pinyinList = new ArrayList<Pinyin>(charArray.length); for (int offset = 0; offset < wordNet.length; ) { if (wordNet[offset] == null) { if (remainNone) { pinyinList.add(Pinyin.none5); } ++offset; continue; } for (Pinyin pinyin : wordNet[offset]) { pinyinList.add(pinyin); } offset += wordNet[offset].length; } return pinyinList; } public static class Searcher extends BaseSearcher<Pinyin[]> { /** * 分词从何处开始,这是一个状态 */ int begin; DoubleArrayTrie<Pinyin[]> trie; protected Searcher(char[] c, DoubleArrayTrie<Pinyin[]> trie) { super(c); this.trie = trie; } protected Searcher(String text, DoubleArrayTrie<Pinyin[]> trie) { super(text); this.trie = trie; } @Override public Map.Entry<String, Pinyin[]> next() { // 保证首次调用找到一个词语 Map.Entry<String, Pinyin[]> result = null; while (begin < c.length) { LinkedList<Map.Entry<String, Pinyin[]>> entryList = trie.commonPrefixSearchWithValue(c, begin); if (entryList.size() == 0) { ++begin; } else { result = entryList.getLast(); offset = begin; begin += result.getKey().length(); break; } } if (result == null) { return null; } return result; } } }
/* * Licensed to Elastic Search and Shay Banon under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. Elastic Search licenses this * file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.index.cache.filter.support; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.Filter; import org.elasticsearch.ElasticSearchException; import org.elasticsearch.common.concurrentlinkedhashmap.EvictionListener; import org.elasticsearch.common.concurrentlinkedhashmap.Weigher; import org.elasticsearch.common.lucene.docset.DocSet; import org.elasticsearch.common.lucene.search.NoCacheFilter; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.Index; import org.elasticsearch.index.cache.filter.FilterCache; import org.elasticsearch.index.settings.IndexSettings; import java.io.IOException; import java.util.concurrent.ConcurrentMap; public abstract class AbstractWeightedFilterCache extends AbstractIndexComponent implements FilterCache, IndexReader.ReaderFinishedListener, EvictionListener<AbstractWeightedFilterCache.FilterCacheKey, FilterCacheValue<DocSet>> { final ConcurrentMap<Object, Boolean> seenReaders = ConcurrentCollections.newConcurrentMap(); final CounterMetric seenReadersCount = new CounterMetric(); final CounterMetric evictionsMetric = new CounterMetric(); final MeanMetric totalMetric = new MeanMetric(); protected AbstractWeightedFilterCache(Index index, @IndexSettings Settings indexSettings) { super(index, indexSettings); } protected abstract ConcurrentMap<FilterCacheKey, FilterCacheValue<DocSet>> cache(); @Override public void close() throws ElasticSearchException { clear(); } @Override public void clear() { for (Object readerKey : seenReaders.keySet()) { Boolean removed = seenReaders.remove(readerKey); if (removed == null) { return; } seenReadersCount.dec(); ConcurrentMap<FilterCacheKey, FilterCacheValue<DocSet>> cache = cache(); for (FilterCacheKey key : cache.keySet()) { if (key.readerKey() == readerKey) { FilterCacheValue<DocSet> removed2 = cache.remove(key); if (removed2 != null) { totalMetric.dec(removed2.value().sizeInBytes()); } } } } } @Override public void finished(IndexReader reader) { clear(reader); } @Override public void clear(IndexReader reader) { // we add the seen reader before we add the first cache entry for this reader // so, if we don't see it here, its won't be in the cache Boolean removed = seenReaders.remove(reader.getCoreCacheKey()); if (removed == null) { return; } seenReadersCount.dec(); ConcurrentMap<FilterCacheKey, FilterCacheValue<DocSet>> cache = cache(); for (FilterCacheKey key : cache.keySet()) { if (key.readerKey() == reader.getCoreCacheKey()) { FilterCacheValue<DocSet> removed2 = cache.remove(key); if (removed2 != null) { totalMetric.dec(removed2.value().sizeInBytes()); } } } } @Override public EntriesStats entriesStats() { long seenReadersCount = this.seenReadersCount.count(); return new EntriesStats(totalMetric.sum(), seenReadersCount == 0 ? 0 : totalMetric.count() / seenReadersCount); } @Override public long evictions() { return evictionsMetric.count(); } @Override public Filter cache(Filter filterToCache) { if (filterToCache instanceof NoCacheFilter) { return filterToCache; } if (isCached(filterToCache)) { return filterToCache; } return new FilterCacheFilterWrapper(filterToCache, this); } @Override public boolean isCached(Filter filter) { return filter instanceof FilterCacheFilterWrapper; } static class FilterCacheFilterWrapper extends Filter { private final Filter filter; private final AbstractWeightedFilterCache cache; FilterCacheFilterWrapper(Filter filter, AbstractWeightedFilterCache cache) { this.filter = filter; this.cache = cache; } @Override public DocIdSet getDocIdSet(IndexReader reader) throws IOException { Object filterKey = filter; if (filter instanceof CacheKeyFilter) { filterKey = ((CacheKeyFilter) filter).cacheKey(); } FilterCacheKey cacheKey = new FilterCacheKey(reader.getCoreCacheKey(), filterKey); ConcurrentMap<FilterCacheKey, FilterCacheValue<DocSet>> innerCache = cache.cache(); FilterCacheValue<DocSet> cacheValue = innerCache.get(cacheKey); if (cacheValue == null) { if (!cache.seenReaders.containsKey(reader.getCoreCacheKey())) { Boolean previous = cache.seenReaders.putIfAbsent(reader.getCoreCacheKey(), Boolean.TRUE); if (previous == null) { reader.addReaderFinishedListener(cache); cache.seenReadersCount.inc(); } } DocIdSet docIdSet = filter.getDocIdSet(reader); DocSet docSet = FilterCacheValue.cacheable(reader, docIdSet); cacheValue = new FilterCacheValue<DocSet>(docSet); FilterCacheValue<DocSet> previous = innerCache.putIfAbsent(cacheKey, cacheValue); if (previous == null) { cache.totalMetric.inc(cacheValue.value().sizeInBytes()); } } return cacheValue.value() == DocSet.EMPTY_DOC_SET ? null : cacheValue.value(); } public String toString() { return "FilterCacheFilterWrapper(" + filter + ")"; } public boolean equals(Object o) { if (!(o instanceof FilterCacheFilterWrapper)) return false; return this.filter.equals(((FilterCacheFilterWrapper) o).filter); } public int hashCode() { return filter.hashCode() ^ 0x1117BF25; } } // factored by 10 public static class FilterCacheValueWeigher implements Weigher<FilterCacheValue<DocSet>> { public static final long FACTOR = 10l; @Override public int weightOf(FilterCacheValue<DocSet> value) { int weight = (int) Math.min(value.value().sizeInBytes() / 10, Integer.MAX_VALUE); return weight == 0 ? 1 : weight; } } @Override public void onEviction(FilterCacheKey filterCacheKey, FilterCacheValue<DocSet> docSetFilterCacheValue) { if (filterCacheKey != null) { if (seenReaders.containsKey(filterCacheKey.readerKey())) { evictionsMetric.inc(); if (docSetFilterCacheValue != null) { totalMetric.dec(docSetFilterCacheValue.value().sizeInBytes()); } } } } public static class FilterCacheKey { private final Object readerKey; private final Object filterKey; public FilterCacheKey(Object readerKey, Object filterKey) { this.readerKey = readerKey; this.filterKey = filterKey; } public Object readerKey() { return readerKey; } public Object filterKey() { return filterKey; } @Override public boolean equals(Object o) { if (this == o) return true; // if (o == null || getClass() != o.getClass()) return false; FilterCacheKey that = (FilterCacheKey) o; return (readerKey == that.readerKey && filterKey.equals(that.filterKey)); } @Override public int hashCode() { return readerKey.hashCode() + 31 * filterKey.hashCode(); } } }
/** * Copyright (c) Dell Inc., or its subsidiaries. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 */ package io.pravega.common.util; /** * Exception that is thrown whenever a Property Value is invalid based on what is expected. */ public class InvalidPropertyValueException extends ConfigurationException { /** * */ private static final long serialVersionUID = 1L; /** * Creates a new instance of the InvalidPropertyValueException class. * * @param message The message of the exception. */ public InvalidPropertyValueException(String message) { super(message); } /** * Creates a new instance of the InvalidPropertyValueException class. * * @param fullPropertyName The full name (component + property) of the property. * @param actualValue The actual value that was about to be processed. */ public InvalidPropertyValueException(String fullPropertyName, String actualValue) { super(String.format("Value '%s' is invalid for property '%s'.", actualValue, fullPropertyName)); } /** * Creates a new instance of the InvalidPropertyValueException class. * * @param fullPropertyName The full name (component + property) of the property. * @param actualValue The actual value that was about to be processed. * @param cause The causing Exception for this. */ public InvalidPropertyValueException(String fullPropertyName, String actualValue, Throwable cause) { super(String.format("Value '%s' is invalid for property '%s'.", actualValue, fullPropertyName), cause); } }
package com.patreon.resources.shared; public interface Field { String getPropertyName(); boolean isDefault(); }
package org.mage.test.cards.single.znr; import mage.abilities.common.BeginningOfCombatTriggeredAbility; import mage.abilities.effects.common.GainLifeEffect; import mage.constants.PhaseStep; import mage.constants.TargetController; import mage.constants.Zone; import org.junit.Test; import org.mage.test.serverside.base.CardTestPlayerBase; /** * @author TheElk801 */ public class MoraugFuryOfAkoumTest extends CardTestPlayerBase { private static final String moraug = "Moraug, Fury of Akoum"; private static final String mountain = "Mountain"; private static final String lion = "Silvercoat Lion"; private static final String bear = "Grizzly Bears"; private static final String corpse = "Walking Corpse"; private static final String azusa = "Azusa, Lost but Seeking"; private void makeCombatCounter() { addCustomCardWithAbility( "Combat Counter", playerA, new BeginningOfCombatTriggeredAbility( new GainLifeEffect(1), TargetController.YOU, false ) ); } private void makeAttackers() { addCard(Zone.BATTLEFIELD, playerA, lion); addCard(Zone.BATTLEFIELD, playerA, bear); addCard(Zone.BATTLEFIELD, playerA, corpse); } private void attackWithAttackers() { attack(1, playerA, lion); attack(1, playerA, bear); attack(1, playerA, corpse); } /** * If the landfall ability resolves during your precombat main phase, * the additional combat phase will happen before your regular combat phase. * You’ll untap creatures you control at the beginning of the additional combat * but not at the beginning of your regular combat. * (2020-09-25) */ @Test public void testPrecombatLandfall() { makeCombatCounter(); makeAttackers(); addCard(Zone.BATTLEFIELD, playerA, moraug); addCard(Zone.HAND, playerA, mountain); setLife(playerB, 100); // 9 damage // playing land pre-combat adds extra combat with untap before normal combat playLand(1, PhaseStep.PRECOMBAT_MAIN, playerA, mountain); attackWithAttackers(); setStopAt(1, PhaseStep.END_TURN); execute(); // untap trigger already happened, creatures are still tapped and can't attack again assertTapped(lion, true); assertTapped(bear, true); assertTapped(corpse, true); assertLife(playerA, 20 + 2); assertLife(playerB, 100 - 9 - 0); } @Test public void testPostcombatLandfall() { makeCombatCounter(); makeAttackers(); addCard(Zone.BATTLEFIELD, playerA, moraug); addCard(Zone.HAND, playerA, mountain); setLife(playerB, 100); attackWithAttackers(); // 9 damage playLand(1, PhaseStep.POSTCOMBAT_MAIN, playerA, mountain); attackWithAttackers(); // 12 damage setStopAt(1, PhaseStep.END_TURN); execute(); assertLife(playerA, 20 + 2); assertLife(playerB, 100 - 9 - 12); } @Test public void testDoubleLandfall() { makeCombatCounter(); makeAttackers(); addCard(Zone.BATTLEFIELD, playerA, moraug); addCard(Zone.BATTLEFIELD, playerA, azusa); addCard(Zone.HAND, playerA, mountain, 2); setLife(playerB, 100); attackWithAttackers(); // 9 damage playLand(1, PhaseStep.POSTCOMBAT_MAIN, playerA, mountain); playLand(1, PhaseStep.POSTCOMBAT_MAIN, playerA, mountain); attackWithAttackers(); // 12 damage attackWithAttackers(); // 15 damage setStopAt(1, PhaseStep.END_TURN); execute(); assertLife(playerA, 20 + 3); assertLife(playerB, 100 - 9 - 12 - 15); } @Test public void testDoubleMoraug() { makeCombatCounter(); makeAttackers(); addCard(Zone.BATTLEFIELD, playerA, "Mirror Gallery"); addCard(Zone.BATTLEFIELD, playerA, moraug, 2); addCard(Zone.HAND, playerA, mountain); setLife(playerB, 100); attackWithAttackers(); // 12 damage playLand(1, PhaseStep.POSTCOMBAT_MAIN, playerA, mountain); attackWithAttackers(); // 18 damage attackWithAttackers(); // 24 damage setStopAt(1, PhaseStep.END_TURN); execute(); assertLife(playerA, 20 + 3); assertLife(playerB, 100 - 12 - 18 - 24); } }
package com.ulisesbocchio.jasyptspringboot.encryptor; import com.ulisesbocchio.jasyptspringboot.util.Singleton; import lombok.extern.slf4j.Slf4j; import org.jasypt.encryption.StringEncryptor; import org.jasypt.encryption.pbe.PooledPBEStringEncryptor; import org.jasypt.encryption.pbe.config.SimpleStringPBEConfig; import org.springframework.beans.factory.BeanFactory; import org.springframework.core.env.Environment; import java.util.Optional; /** * Default Lazy Encryptor that delegates to a custom {@link StringEncryptor} bean or creates a default {@link PooledPBEStringEncryptor} * * @author Ulises Bocchio */ @Slf4j public class DefaultLazyEncryptor implements StringEncryptor { private final Singleton<StringEncryptor> singleton; public DefaultLazyEncryptor(final Environment e, final String customEncryptorBeanName, final BeanFactory bf) { singleton = new Singleton<>(() -> Optional.of(customEncryptorBeanName) .filter(bf::containsBean) .map(name -> (StringEncryptor) bf.getBean(name)) .map(bean -> { log.info("Found Custom Encryptor Bean {} with name: {}", bean, customEncryptorBeanName); return bean; }) .orElseGet(() -> { log.info("String Encryptor custom Bean not found with name '{}'. Initializing Default String Encryptor", customEncryptorBeanName); return createDefault(e); })); } public DefaultLazyEncryptor(Environment e) { singleton = new Singleton<>(() -> createDefault(e)); } private StringEncryptor createDefault(Environment e) { PooledPBEStringEncryptor encryptor = new PooledPBEStringEncryptor(); SimpleStringPBEConfig config = new SimpleStringPBEConfig(); config.setPassword(getRequiredProperty(e, "jasypt.encryptor.password")); config.setAlgorithm(getProperty(e, "jasypt.encryptor.algorithm", "PBEWithMD5AndDES")); config.setKeyObtentionIterations(getProperty(e, "jasypt.encryptor.keyObtentionIterations", "1000")); config.setPoolSize(getProperty(e, "jasypt.encryptor.poolSize", "1")); config.setProviderName(getProperty(e, "jasypt.encryptor.providerName", null)); config.setSaltGeneratorClassName(getProperty(e, "jasypt.encryptor.saltGeneratorClassname", "org.jasypt.salt.RandomSaltGenerator")); config.setStringOutputType(getProperty(e, "jasypt.encryptor.stringOutputType", "base64")); encryptor.setConfig(config); return encryptor; } private static String getProperty(Environment environment, String key, String defaultValue) { if (!propertyExists(environment, key)) { log.info("Encryptor config not found for property {}, using default value: {}", key, defaultValue); } return environment.getProperty(key, defaultValue); } private static boolean propertyExists(Environment environment, String key) { return environment.getProperty(key) != null; } private static String getRequiredProperty(Environment environment, String key) { if (!propertyExists(environment, key)) { throw new IllegalStateException(String.format("Required Encryption configuration property missing: %s", key)); } return environment.getProperty(key); } @Override public String encrypt(String message) { return singleton.get().encrypt(message); } @Override public String decrypt(String encryptedMessage) { return singleton.get().decrypt(encryptedMessage); } }
// Copyright © 2008-2010 Esko Luontola <www.orfjackal.net> // This software is released under the Apache License 2.0. // The license text is at http://dimdwarf.sourceforge.net/LICENSE package net.orfjackal.dimdwarf.tasks; import net.orfjackal.dimdwarf.context.*; import javax.annotation.concurrent.*; import javax.inject.*; import java.util.concurrent.Executor; @ThreadSafe public class TaskExecutor implements Executor { private final Provider<Context> context; private final Provider<FilterChain> filters; @Inject public TaskExecutor(@Task Provider<Context> context, @Task Provider<FilterChain> filters) { this.context = context; this.filters = filters; } public void execute(final Runnable command) { ThreadContext.runInContext(context.get(), new Runnable() { public void run() { filters.get().execute(command); } }); } }
package com.asher.snapmailer; import androidx.appcompat.app.AlertDialog; import androidx.appcompat.app.AppCompatActivity; import android.content.DialogInterface; import android.content.Intent; import android.os.Bundle; import android.view.View; import android.widget.Button; import android.widget.TextView; import android.widget.Toast; public class RegisterActivity extends AppCompatActivity implements View.OnClickListener { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_register); //To set dialog box on clicking login button Button signupbtn = findViewById(R.id.btnRegister); signupbtn.setOnClickListener(this); TextView btn= findViewById(R.id.alreadyHaveAccount); btn.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { startActivity(new Intent(RegisterActivity.this, LoginActivity.class) .setFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP)); } }); TextView logo= findViewById(R.id.logo); } public void onClick(View v) { switch (v.getId()){ case R.id.btnRegister: alertDialog(); break; } } private void alertDialog() { AlertDialog.Builder dialog=new AlertDialog.Builder(this); dialog.setMessage("Details Not filled"); dialog.setTitle("ERROR!!"); dialog.setNeutralButton("OK", new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int which) { Toast.makeText(getApplicationContext(),"Please fill your details",Toast.LENGTH_SHORT).show(); } }); AlertDialog alertDialog=dialog.create(); alertDialog.show(); } }
package com.codecool.dungeoncrawl.logic.items; import com.codecool.dungeoncrawl.logic.Cell; import com.codecool.dungeoncrawl.logic.Drawable; public abstract class Item implements Drawable { private Cell cell; public Item(Cell cell) { this.cell = cell; this.cell.setItem(this); } public Cell getCell() { return cell; } public int getX() { return cell.getX(); } public int getY() { return cell.getY(); } public void clearCell(){ cell.setItem(null); } @Override public String toString() { return this.getTileName(); } }
package com.github.zulhilmizainuddin.httprequest; import java.util.HashMap; import java.util.Map; final class StreamConverterFactory { private static final Map<String, Class<? extends StreamConverter>> encodingTypeMap = new HashMap<>(); static { encodingTypeMap.put("", StringStreamConverter.class); encodingTypeMap.put("gzip", GzipStreamConverter.class); encodingTypeMap.put("deflate", DeflateStreamConverter.class); } public static StreamConverter getConverter(String encodingType) { if (encodingType == null) encodingType = ""; StreamConverter streamConverter = null; try { streamConverter = encodingTypeMap.get(encodingType.toLowerCase()).newInstance(); } catch (InstantiationException ex) {} catch (IllegalAccessException ex) {} return streamConverter; } }
package com.ruihai.xingka.api.model; import java.io.Serializable; import java.util.ArrayList; /** * 发布旅拼封装类 * Created by gebixiaozhang on 16/5/21. */ public class TravelTogetherInfo implements Serializable{ public String title;//标题 public String beginTime;//行程开始时间 public String endTime;//行程结束时间 public int costType;//费用类型 public int personNum;//旅伴人数 public String partnerContent;//旅伴要求 public String content;//旅拼描述 public String url; public double x, y;//经度 public String address;//地址 public ArrayList<TravelTogetherImgMoudle> imgModule;//旅拼图片 public ArrayList<TravelLineMoudle> lineModule;//旅拼路线 public ArrayList<String> imgPath; /* { "beginTime": "2016-05-01", "endTime": "2016-05-07", "costType": 1, "personNum": 15, "partnerContent": "对旅伴的要求部分", --最多30字 "content": "旅拼描述部分", --最多1000字 "url": "http://www.xingka.cc", "x": 143.213, "y": 23.538, "address": "蔚蓝商务港", ---最多30字 "imgModule": [ --至少要有一张图片 { "imgSrc": "00000000-0000-0000-0000-000000000000", "content": "图片描述部分1" --最多200字 }, { "imgSrc": "11111111-1111-1111-1111-111111111111", "content": "图片描述部分2" } ], "lineModule": [ --至少要有一个地点 { "x": 11.423, "y": 11.12, "address": "拉萨市" }, { "x": 22.423, "y": 22.12, "address": "合肥市" } ] } */ }
package module1571_public_tests_unittests.a; import javax.naming.directory.*; import javax.net.ssl.*; import javax.rmi.ssl.*; /** * Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut * labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. * Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. * * @see javax.annotation.processing.Completion * @see javax.lang.model.AnnotatedConstruct * @see javax.management.Attribute */ @SuppressWarnings("all") public interface IFoo1<X> extends module1571_public_tests_unittests.a.IFoo0<X> { javax.naming.directory.DirContext f0 = null; javax.net.ssl.ExtendedSSLSession f1 = null; javax.rmi.ssl.SslRMIClientSocketFactory f2 = null; String getName(); void setName(String s); X get(); void set(X e); }
/* SPDX-License-Identifier: Apache-2.0 */ /* Copyright Contributors to the ODPi Egeria project. */ package org.odpi.openmetadata.conformance.tests.repository.instances; import org.odpi.openmetadata.conformance.auditlog.ConformanceSuiteAuditCode; import org.odpi.openmetadata.conformance.tests.repository.RepositoryConformanceTestCase; import org.odpi.openmetadata.conformance.workbenches.repository.RepositoryConformanceProfileRequirement; import org.odpi.openmetadata.conformance.workbenches.repository.RepositoryConformanceWorkPad; import org.odpi.openmetadata.repositoryservices.connectors.stores.metadatacollectionstore.OMRSMetadataCollection; import org.odpi.openmetadata.repositoryservices.connectors.stores.metadatacollectionstore.properties.instances.EntityDetail; import org.odpi.openmetadata.repositoryservices.connectors.stores.metadatacollectionstore.properties.instances.InstanceProperties; import org.odpi.openmetadata.repositoryservices.connectors.stores.metadatacollectionstore.properties.instances.InstanceStatus; import org.odpi.openmetadata.repositoryservices.connectors.stores.metadatacollectionstore.properties.instances.Relationship; import org.odpi.openmetadata.repositoryservices.connectors.stores.metadatacollectionstore.properties.typedefs.EntityDef; import org.odpi.openmetadata.repositoryservices.connectors.stores.metadatacollectionstore.properties.typedefs.RelationshipDef; import org.odpi.openmetadata.repositoryservices.connectors.stores.metadatacollectionstore.repositoryconnector.OMRSRepositoryConnector; import org.odpi.openmetadata.repositoryservices.connectors.stores.metadatacollectionstore.repositoryconnector.OMRSRepositoryHelper; import org.odpi.openmetadata.repositoryservices.ffdc.exception.EntityNotKnownException; import org.odpi.openmetadata.repositoryservices.ffdc.exception.FunctionNotSupportedException; import org.odpi.openmetadata.repositoryservices.ffdc.exception.InvalidParameterException; import org.odpi.openmetadata.repositoryservices.ffdc.exception.RelationshipNotKnownException; import java.io.Serializable; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; /** * Test that all defined entities can be created, retrieved, updated and deleted. */ public class TestSupportedRelationshipReferenceCopyLifecycle extends RepositoryConformanceTestCase { private static final String testCaseId = "repository-relationship-reference-copy-lifecycle"; private static final String testCaseName = "Repository relationship reference copy lifecycle test case"; /* Type */ private static final String assertion0 = testCaseId + "-00"; private static final String assertionMsg0 = " relationship type definition matches known type "; private static final String assertion1 = testCaseId + "-01"; private static final String assertionMsg1 = " reference relationship created; repository supports storage of reference copies."; private static final String assertion2 = testCaseId + "-02"; private static final String assertionMsg2 = " reference relationship can be retrieved as Relationship."; private static final String assertion3 = testCaseId + "-03"; private static final String assertionMsg3 = " reference relationship matches the relationship that was saved."; private static final String assertion4 = testCaseId + "-04"; private static final String assertionMsg4 = " reference relationship status cannot be updated."; private static final String assertion5 = testCaseId + "-05"; private static final String assertionMsg5 = " reference relationship properties cannot be updated."; private static final String assertion6 = testCaseId + "-06"; private static final String assertionMsg6 = " reference relationship type cannot be changed."; private static final String assertion7 = testCaseId + "-07"; private static final String assertionMsg7 = " reference relationship identity cannot be changed."; private static final String assertion8 = testCaseId + "-08"; private static final String assertionMsg8 = " reference relationship copy purged at TUT."; private static final String assertion9 = testCaseId + "-09"; private static final String assertionMsg9 = " reference relationship refresh requested by TUT."; private static final String assertion10 = testCaseId + "-10"; private static final String assertionMsg10 = " reference relationship refreshed."; private static final String assertion11 = testCaseId + "-11"; private static final String assertionMsg11 = " refreshed reference relationship matches original."; private static final String assertion12 = testCaseId + "-12"; private static final String assertionMsg12 = " reference relationship purged following delete at CTS."; private static final String assertion13 = testCaseId + "-13"; private static final String assertionMsg13 = " repository supports types for relationship and ends."; private static final String assertion14 = testCaseId + "-14"; private static final String assertionMsg14 = " master relationship created."; private static final String assertion15 = testCaseId + "-15"; private static final String assertionMsg15 = " reference relationship created with mappingProperties."; private static final String assertion16 = testCaseId + "-16"; private static final String assertionMsg16 = " reference relationship retrieved with mappingProperties."; private static final String assertion17 = testCaseId + "-17"; private static final String assertionMsg17 = " reference relationship re-homed."; private static final String assertion18 = testCaseId + "-18"; private static final String assertionMsg18 = " rehomed master relationship has been retrieved."; private static final String assertion19 = testCaseId + "-19"; private static final String assertionMsg19 = " rehomed master relationship has correct home metadataCollectionId."; private static final String assertion100 = testCaseId + "-100"; private static final String assertionMsg100 = " reference relationship re-homed."; private static final String assertion101 = testCaseId + "-101"; private static final String assertionMsg101 = " repository supports a viable entity type for each end of relationship."; private RepositoryConformanceWorkPad workPad; private String metadataCollectionId; private RelationshipDef relationshipDef; private Map<String, EntityDef> entityDefs; private String testTypeName; /* * A propagation timeout is used to limit how long the testcase will wait for * the propagation of an OMRS instance event and consequent processing at the TUT (or CTS). * Each time the testcase waits it does so in a 100ms polling loop, to minimise overall delay. * The wait loops will wait for pollCount iterations of pollPeriod, so a pollCount of x10 * results in a 1000ms (1s) timeout. * */ private Integer pollCount = 50; private Integer pollPeriod = 100; // milliseconds private List<EntityDetail> createdEntitiesCTS = new ArrayList<>(); private List<EntityDetail> createdEntityRefCopiesTUT = new ArrayList<>(); private List<EntityDetail> createdEntitiesTUT = new ArrayList<>(); private List<Relationship> createdRelationshipsCTS = new ArrayList<>(); private List<Relationship> createdRelationshipRefCopiesTUT = new ArrayList<>(); private List<Relationship> createdRelationshipsTUT = new ArrayList<>(); /** * Typical constructor sets up superclass and discovered information needed for tests * * @param workPad place for parameters and results * @param entityDefs entities to test * @param relationshipDef type of valid relationships */ public TestSupportedRelationshipReferenceCopyLifecycle(RepositoryConformanceWorkPad workPad, Map<String, EntityDef> entityDefs, RelationshipDef relationshipDef) { super(workPad, RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getRequirementId()); this.workPad = workPad; this.metadataCollectionId = workPad.getTutMetadataCollectionId(); this.relationshipDef = relationshipDef; this.entityDefs = entityDefs; this.testTypeName = this.updateTestIdByType(relationshipDef.getName(), testCaseId, testCaseName); /* * Enforce minimum pollPeriod and pollCount. */ this.pollPeriod = Math.max(this.pollPeriod, 100); this.pollCount = Math.max(this.pollCount, 1); } /** * Method implemented by the actual test case. * * @throws Exception something went wrong with the test. */ protected void run() throws Exception { OMRSMetadataCollection metadataCollection = super.getMetadataCollection(); /* * Check that the relationship type matches the known type from the repository helper. * * The entity types used by the ends are not verified on this test - they are verified in the supported entity tests */ OMRSRepositoryConnector cohortRepositoryConnector = null; OMRSRepositoryHelper repositoryHelper = null; if (workPad != null) { cohortRepositoryConnector = workPad.getTutRepositoryConnector(); repositoryHelper = cohortRepositoryConnector.getRepositoryHelper(); } RelationshipDef knownRelationshipDef = (RelationshipDef) repositoryHelper.getTypeDefByName(workPad.getLocalServerUserId(), relationshipDef.getName()); verifyCondition((relationshipDef.equals(knownRelationshipDef)), assertion0, testTypeName + assertionMsg0, RepositoryConformanceProfileRequirement.CONSISTENT_TYPES.getProfileId(), RepositoryConformanceProfileRequirement.CONSISTENT_TYPES.getRequirementId()); /* * This test will: * * Create a pair of entities of the types defined by the relationship type, in the local (CTS) repository. * This will cause instance events to be flowed to the TUT, which should then (by default) save reference copies of the entities. * * Create a local relationship of the defined type, in the local (CTS) repository. * This will cause an instance event to be flowed to the TUT, which should then (by default) save a reference copy of the relationship. * * Attempt to retrieve the relationship reference copy from the TUT. * * If this results in relationship not known, assert that the TUT does not support reference copies, so set the discovered property to disabled, and abandon the remainder of the test. * * Else, if the reference copy is known - add to discovered properties and continue with the remaining test requirements. * * The following tests are run against the reference copy: * * Validate that the reference copy can be retrieved by getRelationship * Validate that the reference copy 'matches' the local relationship. * * Verify that it is not possible to update the status of the reference copy. * Verify that it is not possible to update the properties of the reference copy. * Verify that it is not possible to re-type the reference copy. * Verify that it is not possible to re-identify the reference copy. * * Purge the reference copy (only) and then request a refresh and ensure that a new ref copy is created. * * Delete and purge the original local relationship, at the CTS. * Because the CTS server is using local in-memory repository a soft delete must precede the purge. * This should flow an instance event to the TUT causing the ref copy to be purged. * Attempt to get the ref copy. This should fail. * * THE NEXT TEST IS PERFORMED LAST AS IT PLACES THE COHORT IN AN INVALID STATE. * Create another original relationship, causing the creation of a reference copy in the TUT. * Verify that it IS possible to re-home the reference copy. * Delete and purge the original relationship and REMOTELY delete and purge the TUT's copy of the relationship (which is no longer a reference copy). * Note that this last part must be performed on the TUT. * * Finally, clean up the entities and relationships created during this test. * */ /* * This test needs a connector to the local repository - i.e. the in-memory repository running locally to the CTS server. */ OMRSMetadataCollection ctsMetadataCollection = repositoryConformanceWorkPad.getLocalRepositoryConnector().getMetadataCollection(); /* * In this testcase the repository is believed to support the relationship type defined by * relationshipDef - but may not support all of the entity inheritance hierarchy - it may only * support a subset of entity types. So although the relationship type may have end definitions * each specifying a given entity type - the repository may only support certain sub-types of the * specified type. This is OK, and the testcase needs to only try to use entity types that are * supported by the repository being tested. To do this it needs to start with the specified * end type, e.g. Referenceable, and walk down the hierarchy looking for each subtype that * is supported by the repository (i.e. is in the entityDefs map). The test is run for * each combination of end1Type and end2Type - but only for types that are within the * active set for this repository. */ String end1DefName = relationshipDef.getEndDef1().getEntityType().getName(); List<String> end1DefTypeNames = new ArrayList<>(); end1DefTypeNames.add(end1DefName); if (this.workPad.getEntitySubTypes(end1DefName) != null) { end1DefTypeNames.addAll(this.workPad.getEntitySubTypes(end1DefName)); } String end2DefName = relationshipDef.getEndDef2().getEntityType().getName(); List<String> end2DefTypeNames = new ArrayList<>(); end2DefTypeNames.add(end2DefName); if (this.workPad.getEntitySubTypes(end2DefName) != null) { end2DefTypeNames.addAll(this.workPad.getEntitySubTypes(end2DefName)); } /* * Filter the possible types to only include types that are supported by the repository */ List<String> end1SupportedTypeNames = new ArrayList<>(); for (String end1TypeName : end1DefTypeNames) { if (entityDefs.get(end1TypeName) != null) end1SupportedTypeNames.add(end1TypeName); } List<String> end2SupportedTypeNames = new ArrayList<>(); for (String end2TypeName : end2DefTypeNames) { if (entityDefs.get(end2TypeName) != null) end2SupportedTypeNames.add(end2TypeName); } /* * Check that neither list is empty */ if (end1SupportedTypeNames.isEmpty() || end2SupportedTypeNames.isEmpty()) { /* * There are no supported types for at least one of the ends - the repository cannot test this relationship type. */ assertCondition((false), assertion100, testTypeName + assertionMsg100, RepositoryConformanceProfileRequirement.RELATIONSHIP_LIFECYCLE.getProfileId(), RepositoryConformanceProfileRequirement.RELATIONSHIP_LIFECYCLE.getRequirementId()); } /* * It is not practical to iterate over all combinations of feasible (supported) end types as it takes too long to run. * For now, this test verifies relationship operation over a limited set of end types. The limitation is extreme in * that it ONLY takes the first available type for each end. This is undesirable for two reasons - one is that it * provides less test coverage; the other is that the types chosen depend on the order in the lists and this could * vary, making results non-repeatable. For now though, it seems these limitations are necessary. * * A full permutation across end types would use the following nested loops... * for (String end1TypeName : end1SupportedTypeNames) { * for (String end2TypeName : end2SupportedTypeNames) { * test logic as below... * } * } */ String end1TypeName = end1SupportedTypeNames.get(0); String end2TypeName = end2SupportedTypeNames.get(0); EntityDef end1EntityDef = entityDefs.get(end1TypeName); EntityDef end2EntityDef = entityDefs.get(end2TypeName); EntityDetail entityOne; EntityDetail entityTwo; Relationship newRelationship; /* * Create the local entities. */ entityOne = ctsMetadataCollection.addEntity(workPad.getLocalServerUserId(), end1EntityDef.getGUID(), super.getAllPropertiesForInstance(workPad.getLocalServerUserId(), end1EntityDef), null, null); createdEntitiesCTS.add(entityOne); entityTwo = ctsMetadataCollection.addEntity(workPad.getLocalServerUserId(), end2EntityDef.getGUID(), super.getAllPropertiesForInstance(workPad.getLocalServerUserId(), end2EntityDef), null, null); createdEntitiesCTS.add(entityTwo); /* * Create the local relationship. * * Generate property values for all the type's defined properties, including inherited properties * This ensures that any properties defined as mandatory by Egeria property cardinality are provided * thereby getting into the connector-logic beyond the property validation. It also creates a * relationship that is logically complete - versus an instance with just the locally-defined properties. */ newRelationship = ctsMetadataCollection.addRelationship(workPad.getLocalServerUserId(), relationshipDef.getGUID(), super.getAllPropertiesForInstance(workPad.getLocalServerUserId(), relationshipDef), entityOne.getGUID(), entityTwo.getGUID(), null); createdRelationshipsCTS.add(newRelationship); /* * This test does not verify the content of the relationship - that is tested in the relationship-lifecycle tests */ /* * Try to get the ref copy of each of the entities from the TUT - this is to help synchronise the testcase to the speed of the cohort. */ EntityDetail refCopyEntityOne = null; EntityDetail refCopyEntityTwo = null; try { Integer remainingCount = this.pollCount; while (refCopyEntityOne == null && remainingCount > 0) { refCopyEntityOne = metadataCollection.isEntityKnown(workPad.getLocalServerUserId(), entityOne.getGUID()); Thread.sleep(this.pollPeriod); remainingCount--; } if (refCopyEntityOne == null && remainingCount == 0) { ConformanceSuiteAuditCode overflow = ConformanceSuiteAuditCode.POLLING_OVERFLOW; workPad.getAuditLog() .logRecord(assertion1 + "-" + entityOne.getGUID(), overflow.getLogMessageId(), overflow.getSeverity(), overflow.getFormattedLogMessage(pollCount.toString(), pollPeriod.toString()), null, overflow.getSystemAction(), overflow.getUserAction()); } if (refCopyEntityOne != null) createdEntityRefCopiesTUT.add(refCopyEntityOne); } catch (Exception exc) { /* * We are not expecting any exceptions from this method call. Log and fail the test. */ String methodName = "isEntityKnown"; String operationDescription = "retrieve an entity of type " + end1EntityDef.getName(); Map<String, String> parameters = new HashMap<>(); parameters.put("entityGUID", entityOne.getGUID()); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } try { Integer remainingCount = this.pollCount; while (refCopyEntityTwo == null && remainingCount > 0) { refCopyEntityTwo = metadataCollection.isEntityKnown(workPad.getLocalServerUserId(), entityTwo.getGUID()); Thread.sleep(this.pollPeriod); remainingCount--; } if (refCopyEntityTwo == null && remainingCount == 0) { ConformanceSuiteAuditCode overflow = ConformanceSuiteAuditCode.POLLING_OVERFLOW; workPad.getAuditLog() .logRecord(assertion1 + "-" + entityTwo.getGUID(), overflow.getLogMessageId(), overflow.getSeverity(), overflow.getFormattedLogMessage(pollCount.toString(), pollPeriod.toString()), null, overflow.getSystemAction(), overflow.getUserAction()); } if (refCopyEntityTwo != null) createdEntityRefCopiesTUT.add(refCopyEntityTwo); } catch (Exception exc) { /* * We are not expecting any exceptions from this method call. Log and fail the test. */ String methodName = "isEntityKnown"; String operationDescription = "retrieve an entity of type " + end2EntityDef.getName(); Map<String, String> parameters = new HashMap<>(); parameters.put("entityGUID", entityTwo.getGUID()); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } /* * There should be a reference copy of the entity stored in the TUT. * Retrieve the ref copy from the TUT - if it does not exist, assert that ref copies are not a discovered property */ Relationship refRelationship = null; try { Integer remainingCount = this.pollCount; while (refRelationship == null && remainingCount > 0) { refRelationship = metadataCollection.isRelationshipKnown(workPad.getLocalServerUserId(), newRelationship.getGUID()); Thread.sleep(this.pollPeriod); remainingCount--; } if (refRelationship == null && remainingCount == 0) { ConformanceSuiteAuditCode overflow = ConformanceSuiteAuditCode.POLLING_OVERFLOW; workPad.getAuditLog() .logRecord(assertion1 + "-" + newRelationship.getGUID(), overflow.getLogMessageId(), overflow.getSeverity(), overflow.getFormattedLogMessage(pollCount.toString(), pollPeriod.toString()), null, overflow.getSystemAction(), overflow.getUserAction()); } if (refRelationship != null) createdRelationshipRefCopiesTUT.add(refRelationship); } catch (Exception exc) { /* * We are not expecting any exceptions from this method call. Log and fail the test. */ String methodName = "isRelationshipKnown"; String operationDescription = "retrieve a relationship of type " + relationshipDef.getName(); Map<String, String> parameters = new HashMap<>(); parameters.put("entityGUID", newRelationship.getGUID()); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } /* * If this proves to be a performance problem it might be preferable to refactor the testcase to create all local * instances and batch the GUIDs. On completion of the batch, look for the reference copies. */ if (refRelationship != null) { /* * If we retrieved the reference copy of the relationship - we can assert that the TUT supports reference copies. */ assertCondition((true), assertion1, testTypeName + assertionMsg1, RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getRequirementId()); } else { /* * Report that reference storage requirement is not supported. */ super.addNotSupportedAssertion(assertion1, assertionMsg1, RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getRequirementId()); /* * Terminate the test */ return; } /* * Validate that the reference copy can be retrieved from the TUT and that the retrieved reference copy 'matches' what was saved. */ Relationship retrievedReferenceCopy = null; long start; long elapsedTime; try { start = System.currentTimeMillis(); retrievedReferenceCopy = metadataCollection.getRelationship(workPad.getLocalServerUserId(), newRelationship.getGUID()); elapsedTime = System.currentTimeMillis() - start; } catch (Exception exc) { /* * We are not expecting any other exceptions from this method call. Log and fail the test. */ String methodName = "getRelationship"; String operationDescription = "retrieve a relationship of type " + relationshipDef.getName(); Map<String, String> parameters = new HashMap<>(); parameters.put("relationshipGUID", newRelationship.getGUID()); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } assertCondition((retrievedReferenceCopy != null), assertion2, testTypeName + assertionMsg2, RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getRequirementId(), "getRelationship", elapsedTime); /* * Verify that the retrieved reference copy matches the original relationship */ verifyCondition((newRelationship.equals(retrievedReferenceCopy)), assertion3, testTypeName + assertionMsg3, RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getRequirementId()); /* * If the relationship def has any valid status values (including DELETED), attempt * to modify the status of the retrieved reference copy - this should fail */ for (InstanceStatus validInstanceStatus : relationshipDef.getValidInstanceStatusList()) { try { start = System.currentTimeMillis(); Relationship updatedRelationship = metadataCollection.updateRelationshipStatus(workPad.getLocalServerUserId(), retrievedReferenceCopy.getGUID(), validInstanceStatus); elapsedTime = System.currentTimeMillis() - start; assertCondition((false), assertion4, testTypeName + assertionMsg4, RepositoryConformanceProfileRequirement.REFERENCE_COPY_LOCKING.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_LOCKING.getRequirementId(), "updateRelationshipStatus-negative", elapsedTime); } catch (InvalidParameterException e) { /* * We are not expecting the status update to work - it should have thrown an InvalidParameterException */ elapsedTime = System.currentTimeMillis() - start; assertCondition((true), assertion4, testTypeName + assertionMsg4, RepositoryConformanceProfileRequirement.REFERENCE_COPY_LOCKING.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_LOCKING.getRequirementId(), "updateRelationshipStatus-negative", elapsedTime); } catch (Exception exc) { /* * We are not expecting any exceptions from this method call. Log and fail the test. */ String methodName = "updateRelationshipStatus"; String operationDescription = "update the status of a relationship of type " + relationshipDef.getName(); Map<String, String> parameters = new HashMap<>(); parameters.put("relationshipGUID", retrievedReferenceCopy.getGUID()); parameters.put("newStatus", validInstanceStatus.toString()); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } } /* * Attempt to modify one or more property of the retrieved reference copy. This is illegal so it should fail. */ if ((retrievedReferenceCopy.getProperties() != null) && (retrievedReferenceCopy.getProperties().getInstanceProperties() != null) && (!retrievedReferenceCopy.getProperties().getInstanceProperties().isEmpty())) { InstanceProperties minRelationshipProps = super.getMinPropertiesForInstance(workPad.getLocalServerUserId(), relationshipDef); try { start = System.currentTimeMillis(); Relationship minPropertiesRelationship = metadataCollection.updateRelationshipProperties(workPad.getLocalServerUserId(), retrievedReferenceCopy.getGUID(), minRelationshipProps); elapsedTime = System.currentTimeMillis() - start; assertCondition((false), assertion5, testTypeName + assertionMsg5, RepositoryConformanceProfileRequirement.REFERENCE_COPY_LOCKING.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_LOCKING.getRequirementId(), "updateRelationshipProperties-negative", elapsedTime); } catch (InvalidParameterException e) { /* * We are not expecting the status update to work - it should have thrown an InvalidParameterException */ elapsedTime = System.currentTimeMillis() - start; assertCondition((true), assertion5, testTypeName + assertionMsg5, RepositoryConformanceProfileRequirement.REFERENCE_COPY_LOCKING.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_LOCKING.getRequirementId(), "updateRelationshipProperties-negative", elapsedTime); } catch (Exception exc) { /* * We are not expecting any other exceptions from this method call. Log and fail the test. */ String methodName = "updateRelationshipProperties"; String operationDescription = "update the properties of a relationship of type " + relationshipDef.getName(); Map<String, String> parameters = new HashMap<>(); parameters.put("relationshipGUID", retrievedReferenceCopy.getGUID()); parameters.put("properties", minRelationshipProps.toString()); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } } /* * Verify that it is not possible to re-type the reference copy. * This test is performed using the same type as the original - the repository should not get as far as * even looking at the type or considering changing it. For simplicity of testcode this test therefore * uses the original type. * This test is performed against the TUT. */ try { start = System.currentTimeMillis(); Relationship reTypedRelationship = metadataCollection.reTypeRelationship(workPad.getLocalServerUserId(), newRelationship.getGUID(), relationshipDef, relationshipDef); // see comment above about using original type elapsedTime = System.currentTimeMillis() - start; assertCondition((false), assertion6, testTypeName + assertionMsg6, RepositoryConformanceProfileRequirement.REFERENCE_COPY_LOCKING.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_LOCKING.getRequirementId(), "reTypeRelationship-negative", elapsedTime); } catch (InvalidParameterException e) { /* * We are not expecting the type update to work - it should have thrown an InvalidParameterException */ elapsedTime = System.currentTimeMillis() - start; assertCondition((true), assertion6, testTypeName + assertionMsg6, RepositoryConformanceProfileRequirement.REFERENCE_COPY_LOCKING.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_LOCKING.getRequirementId(), "reTypeRelationship-negative", elapsedTime); } catch (Exception exc) { /* * We are not expecting any other exceptions from this method call. Log and fail the test. */ String methodName = "reTypeRelationship"; String operationDescription = "retype a relationship of type " + relationshipDef.getName(); Map<String, String> parameters = new HashMap<>(); parameters.put("relationshipGUID", newRelationship.getGUID()); parameters.put("currentTypeDefSummary", relationshipDef.toString()); parameters.put("newTypeDefSummary", relationshipDef.toString()); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } /* * Verify that it is not possible to re-identify the reference copy. * This test is performed using a different GUID to the original. The actual value should not be looked at * by the repository - it should reject the re-identify attempt prior to that. * This test is performed against the TUT. */ String newGUID = UUID.randomUUID().toString(); try { start = System.currentTimeMillis(); Relationship reIdentifiedRelationship = metadataCollection.reIdentifyRelationship(workPad.getLocalServerUserId(), relationshipDef.getGUID(), relationshipDef.getName(), newRelationship.getGUID(), newGUID); elapsedTime = System.currentTimeMillis() - start; createdRelationshipsTUT.add(reIdentifiedRelationship); assertCondition((false), assertion7, testTypeName + assertionMsg7, RepositoryConformanceProfileRequirement.REFERENCE_COPY_LOCKING.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_LOCKING.getRequirementId(), "reIdentifyRelationship-negative", elapsedTime); } catch (InvalidParameterException e) { /* * We are not expecting the identity update to work - it should have thrown an InvalidParameterException */ elapsedTime = System.currentTimeMillis() - start; assertCondition((true), assertion7, testTypeName + assertionMsg7, RepositoryConformanceProfileRequirement.REFERENCE_COPY_LOCKING.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_LOCKING.getRequirementId(), "reIdentifyRelationship-negative", elapsedTime); } catch (Exception exc) { /* * We are not expecting any other exceptions from this method call. Log and fail the test. */ String methodName = "reIdentifyRelationship"; String operationDescription = "reidentify a relationship of type " + relationshipDef.getName(); Map<String, String> parameters = new HashMap<>(); parameters.put("typeGUID", relationshipDef.getGUID()); parameters.put("relationshipGUID", newRelationship.getGUID()); parameters.put("newRelationshipGUID", newGUID); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } /* * Purge the reference copy and verify that by requesting a refresh, a new ref copy is created. * This test is performed against the TUT. */ try { start = System.currentTimeMillis(); metadataCollection.purgeRelationshipReferenceCopy(workPad.getLocalServerUserId(), refRelationship); elapsedTime = System.currentTimeMillis() - start; /* * Note that the ref copy could be purged */ assertCondition((true), assertion8, testTypeName + assertionMsg8, RepositoryConformanceProfileRequirement.REFERENCE_COPY_DELETE.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_DELETE.getRequirementId(), "purgeRelationshipReferenceCopy", elapsedTime); } catch (Exception exc) { /* * We are not expecting any other exceptions from this method call. Log and fail the test. */ String methodName = "purgeRelationshipReferenceCopy"; String operationDescription = "purge a reference copy of a relationship of type " + relationshipDef.getName(); Map<String, String> parameters = new HashMap<>(); parameters.put("relationship", refRelationship.toString()); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } try { start = System.currentTimeMillis(); metadataCollection.refreshRelationshipReferenceCopy(workPad.getLocalServerUserId(), refRelationship.getGUID(), relationshipDef.getGUID(), relationshipDef.getName(), ctsMetadataCollection.getMetadataCollectionId(workPad.getLocalServerUserId())); elapsedTime = System.currentTimeMillis() - start; /* * Note that the refresh request failed, fail the test */ assertCondition((true), assertion9, testTypeName + assertionMsg9, RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getRequirementId(), "refreshRelationshipReferenceCopy", elapsedTime); } catch (Exception exc) { /* * We are not expecting any other exceptions from this method call. Log and fail the test. */ String methodName = "refreshRelationshipReferenceCopy"; String operationDescription = "request a refresh of a reference copy of a relationship of type " + relationshipDef.getName(); Map<String, String> parameters = new HashMap<>(); parameters.put("relationshipGUID", refRelationship.getGUID()); parameters.put("typeDefGUID", relationshipDef.getGUID()); parameters.put("typeDefName", relationshipDef.getName()); parameters.put("homeMetadataCollectionId", ctsMetadataCollection.getMetadataCollectionId(workPad.getLocalServerUserId())); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } /* * Wait and verify that a new ref copy is created.... */ /* * There should be a reference copy of the relationship stored in the TUT */ Relationship refreshedRelationshipRefCopy = null; /* * Retrieve the ref copy from the TUT - if it does not exist, assert that ref copies are not a discovered property * Have to be prepared to wait until event has propagated and TUT has created a reference copy of the relationship. */ try { Integer remainingCount = this.pollCount; while (refreshedRelationshipRefCopy == null && remainingCount > 0) { refreshedRelationshipRefCopy = metadataCollection.isRelationshipKnown(workPad.getLocalServerUserId(), newRelationship.getGUID()); Thread.sleep(this.pollPeriod); remainingCount--; } if (refreshedRelationshipRefCopy == null && remainingCount == 0) { ConformanceSuiteAuditCode overflow = ConformanceSuiteAuditCode.POLLING_OVERFLOW; workPad.getAuditLog() .logRecord(assertion10, overflow.getLogMessageId(), overflow.getSeverity(), overflow.getFormattedLogMessage(pollCount.toString(), pollPeriod.toString()), null, overflow.getSystemAction(), overflow.getUserAction()); } } catch (Exception exc) { /* * We are not expecting any exceptions from this method call. Log and fail the test. */ String methodName = "isRelationshipKnown"; String operationDescription = "retrieve a relationship of type " + relationshipDef.getName(); Map<String, String> parameters = new HashMap<>(); parameters.put("relationshipGUID", newRelationship.getGUID()); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } /* * Verify that the reference copy can be retrieved form the TUT and matches the original... */ try { start = System.currentTimeMillis(); refreshedRelationshipRefCopy = metadataCollection.getRelationship(workPad.getLocalServerUserId(), newRelationship.getGUID()); elapsedTime = System.currentTimeMillis() - start; } catch (Exception exc) { /* * We are not expecting any other exceptions from this method call. Log and fail the test. */ String methodName = "getRelationship"; String operationDescription = "retrieve a relationship of type " + relationshipDef.getName(); Map<String, String> parameters = new HashMap<>(); parameters.put("relationshipGUID", newRelationship.getGUID()); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } assertCondition((refreshedRelationshipRefCopy != null), assertion10, testTypeName + assertionMsg10, RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getRequirementId(), "getRelationship", elapsedTime); /* * Verify that the retrieved reference copy matches the original relationship */ verifyCondition((newRelationship.equals(refreshedRelationshipRefCopy)), assertion11, testTypeName + assertionMsg11, RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getRequirementId()); /* * Delete (soft then hard) the CTS local entity - these operations are performed on the local (CTS) repo. * They should cause an OMRS instance event to flow to the TUT and for the ref copy to be purged */ try { Relationship deletedRelationship = ctsMetadataCollection.deleteRelationship(workPad.getLocalServerUserId(), newRelationship.getType().getTypeDefGUID(), newRelationship.getType().getTypeDefName(), newRelationship.getGUID()); } catch (FunctionNotSupportedException exception) { /* * This is OK - we can NO OP and just proceed to purgeEntity */ } ctsMetadataCollection.purgeRelationship(workPad.getLocalServerUserId(), newRelationship.getType().getTypeDefGUID(), newRelationship.getType().getTypeDefName(), newRelationship.getGUID()); /* * Test that the reference copy has been removed from the TUT repository */ /* * Since it may take time to propagate the purge event, retry until the relationship is no longer known at the TUT. */ try { Relationship survivingRelRefCopy; Integer remainingCount = this.pollCount; do { survivingRelRefCopy = metadataCollection.isRelationshipKnown(workPad.getLocalServerUserId(), newRelationship.getGUID()); Thread.sleep(this.pollPeriod); remainingCount--; } while (survivingRelRefCopy != null && remainingCount > 0); if (survivingRelRefCopy == null && remainingCount == 0) { ConformanceSuiteAuditCode overflow = ConformanceSuiteAuditCode.POLLING_OVERFLOW; workPad.getAuditLog() .logRecord(assertion12, overflow.getLogMessageId(), overflow.getSeverity(), overflow.getFormattedLogMessage(pollCount.toString(), pollPeriod.toString()), null, overflow.getSystemAction(), overflow.getUserAction()); } } catch (Exception exc) { /* * We are not expecting any exceptions from this method call. Log and fail the test. */ String methodName = "isRelationshipKnown"; String operationDescription = "retrieve a relationship of type " + relationshipDef.getName(); Map<String, String> parameters = new HashMap<>(); parameters.put("relationshipGUID", newRelationship.getGUID()); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } try { start = System.currentTimeMillis(); metadataCollection.getRelationship(workPad.getLocalServerUserId(), newRelationship.getGUID()); elapsedTime = System.currentTimeMillis() - start; assertCondition((false), assertion12, testTypeName + assertionMsg12, RepositoryConformanceProfileRequirement.REFERENCE_COPY_DELETE.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_DELETE.getRequirementId(), "getRelationship-negative", elapsedTime); } catch (RelationshipNotKnownException exception) { elapsedTime = System.currentTimeMillis() - start; assertCondition((true), assertion12, testTypeName + assertionMsg12, RepositoryConformanceProfileRequirement.REFERENCE_COPY_DELETE.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_DELETE.getRequirementId(), "getRelationship-negative", elapsedTime); } catch (Exception exc) { /* * We are not expecting any other exceptions from this method call. Log and fail the test. */ String methodName = "getRelationship"; String operationDescription = "retrieve a relationship of type " + relationshipDef.getName(); Map<String, String> parameters = new HashMap<>(); parameters.put("relationshipGUID", newRelationship.getGUID()); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } /* * Now is a good time to clean uo the entities we created at teh CTS - which will also have ref copies at the TUT * By deleting and purging the masters (on the CTS) the TUT reference copies should also be purged. * We are not testing this is the case - that's tested in the entity reference lifeycle testcase; and for * relationships it is tested immediately above. This is only for clean up. * * These operations are to the CTS */ try { EntityDetail deletedEntityOne = ctsMetadataCollection.deleteEntity(workPad.getLocalServerUserId(), entityOne.getType().getTypeDefGUID(), entityOne.getType().getTypeDefName(), entityOne.getGUID()); EntityDetail deletedEntityTwo = ctsMetadataCollection.deleteEntity(workPad.getLocalServerUserId(), entityTwo.getType().getTypeDefGUID(), entityTwo.getType().getTypeDefName(), entityTwo.getGUID()); } catch (FunctionNotSupportedException exception) { /* * This is OK - we can NO OP and just proceed to purgeEntity */ } ctsMetadataCollection.purgeEntity(workPad.getLocalServerUserId(), entityOne.getType().getTypeDefGUID(), entityOne.getType().getTypeDefName(), entityOne.getGUID()); ctsMetadataCollection.purgeEntity(workPad.getLocalServerUserId(), entityTwo.getType().getTypeDefGUID(), entityTwo.getType().getTypeDefName(), entityTwo.getGUID()); /* * ====================================================================================================== * The remaining tests in this test case use a different approach to creating the relationship to be saved. * Instead of creating master instances of the entities and relationship at the CTS server, local master * instances of the entities and relationship are created locally to the TUT and then the relationship (only) * is modified to appear to belong to a (virtual) remote repository (not the CTS) and is saved directly to the * TUT using the saveRelationshipReferenceCopy API. */ /* * For the next test, the local save approach is used because the test code needs access to mappingProperties. * The end types used are as selected in the first part of the testcase (above), i.e. end1Type and end2Type. */ /* * To accommodate repositories that do not support the creation of instances, wrap the creation of the relationship * in a try..catch to check for FunctionNotSupportedException. If the connector throws this, then give up * on the test by setting the discovered property to disabled and returning. */ /* * Create a relationship reference copy of the relationship type. * To do this, a local relationship is created, copied and deleted/purged. The copy is modified (so that it * appears to come from an unknown/defunct remote metadata collection). It is then saved as a reference copy */ EntityDetail end1; EntityDetail end2; InstanceProperties entityInstanceProperties = null; EntityDef entityType = null; try { entityType = entityDefs.get(end1TypeName); entityInstanceProperties = this.getAllPropertiesForInstance(workPad.getLocalServerUserId(), entityType); end1 = metadataCollection.addEntity(workPad.getLocalServerUserId(), entityType.getGUID(), entityInstanceProperties, null, null); createdEntitiesTUT.add(end1); entityType = entityDefs.get(end2TypeName); entityInstanceProperties = this.getAllPropertiesForInstance(workPad.getLocalServerUserId(), entityType); end2 = metadataCollection.addEntity(workPad.getLocalServerUserId(), entityType.getGUID(), entityInstanceProperties, null, null); createdEntitiesTUT.add(end2); } catch (FunctionNotSupportedException exception) { /* * The repository does not support creation of entity instances; we need to report and fail the test * */ super.addNotSupportedAssertion(assertion101, assertionMsg101, RepositoryConformanceProfileRequirement.ENTITY_LIFECYCLE.getProfileId(), RepositoryConformanceProfileRequirement.ENTITY_LIFECYCLE.getRequirementId()); return; } catch (Exception exc) { /* * We are not expecting any other exceptions from this method call. Log and fail the test. */ String methodName = "addEntity"; String operationDescription = "add an entity of type " + (entityType != null ? entityType.getName() : "null"); Map<String, String> parameters = new HashMap<>(); parameters.put("typeGUID", entityType != null ? entityType.getGUID() : "null"); parameters.put("initialProperties", entityInstanceProperties != null ? entityInstanceProperties.toString() : "null"); parameters.put("initialClasiifications", "null"); parameters.put("initialStatus", "null"); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } InstanceProperties relationshipInstanceProperties = null; Relationship relationshipWithMappingProperties = null; try { start = System.currentTimeMillis(); relationshipWithMappingProperties = metadataCollection.addRelationship(workPad.getLocalServerUserId(), relationshipDef.getGUID(), super.getPropertiesForInstance(relationshipDef.getPropertiesDefinition()), end1.getGUID(), end2.getGUID(), null); elapsedTime = System.currentTimeMillis() - start; assertCondition((true), assertion14, testTypeName + assertionMsg14, RepositoryConformanceProfileRequirement.RELATIONSHIP_LIFECYCLE.getProfileId(), RepositoryConformanceProfileRequirement.RELATIONSHIP_LIFECYCLE.getRequirementId(), "addRelationship", elapsedTime); createdRelationshipsTUT.add(relationshipWithMappingProperties); } catch (FunctionNotSupportedException exception) { /* * If running against a read-only repository/connector that cannot add * entities or relationships catch FunctionNotSupportedException and give up the test. * * Report the inability to create instances and give up on the testcase.... */ super.addNotSupportedAssertion(assertion14, assertionMsg14, RepositoryConformanceProfileRequirement.RELATIONSHIP_LIFECYCLE.getProfileId(), RepositoryConformanceProfileRequirement.RELATIONSHIP_LIFECYCLE.getRequirementId()); return; } catch (Exception exc) { /* * We are not expecting any exceptions from this method call. Log and fail the test. */ String methodName = "addRelationship"; String operationDescription = "add a relationship of type " + relationshipDef.getName(); Map<String, String> parameters = new HashMap<>(); parameters.put("typeGUID", relationshipDef.getGUID()); parameters.put("end1 entityGUID", end1.getGUID()); parameters.put("end2 entityGUID", end2.getGUID()); parameters.put("initialProperties", relationshipInstanceProperties != null ? relationshipInstanceProperties.toString() : "null"); parameters.put("initialStatus", "null"); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } /* * This test does not verify the content of the relationship - that is tested in the relationship-lifecycle tests */ /* * Make a copy of the relationship under a different variable name - not strictly necessary but makes things clearer - then modify it so * it appears to be from a remote metadata collection. */ Relationship remoteRelationshipWithMappingProperties = relationshipWithMappingProperties; /* * Hard delete the new entity - we have no further use for it * If the repository under test supports soft delete, the entity must be deleted before being purged */ try { metadataCollection.deleteRelationship(workPad.getLocalServerUserId(), relationshipWithMappingProperties.getType().getTypeDefGUID(), relationshipWithMappingProperties.getType().getTypeDefName(), relationshipWithMappingProperties.getGUID()); } catch (FunctionNotSupportedException exception) { /* * This is OK - we can NO OP and just proceed to purgeEntity */ } catch (Exception exc) { /* * We are not expecting any other exceptions from this method call. Log and fail the test. */ String methodName = "deleteRelationship"; String operationDescription = "delete a relationship of type " + relationshipDef.getName(); Map<String, String> parameters = new HashMap<>(); parameters.put("typeDefGUID", relationshipWithMappingProperties.getType().getTypeDefGUID()); parameters.put("typeDefName", relationshipWithMappingProperties.getType().getTypeDefName()); parameters.put("obsoleteRelationshipGUID", relationshipWithMappingProperties.getGUID()); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } try { metadataCollection.purgeRelationship(workPad.getLocalServerUserId(), relationshipWithMappingProperties.getType().getTypeDefGUID(), relationshipWithMappingProperties.getType().getTypeDefName(), relationshipWithMappingProperties.getGUID()); } catch (Exception exc) { /* * We are not expecting any other exceptions from this method call. Log and fail the test. */ String methodName = "purgeRelationship"; String operationDescription = "purge a relationship of type " + relationshipDef.getName(); Map<String, String> parameters = new HashMap<>(); parameters.put("typeDefGUID", relationshipWithMappingProperties.getType().getTypeDefGUID()); parameters.put("typeDefName", relationshipWithMappingProperties.getType().getTypeDefName()); parameters.put("deletedEntityGUID", relationshipWithMappingProperties.getGUID()); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } // Beyond this point, there should be no further references to relationshipWithMappingProperties relationshipWithMappingProperties = null; /* * Modify the 'remote' entity so that it looks like it came from a different home repository */ String REMOTE_PREFIX = "remote"; String localRelationshipGUID = remoteRelationshipWithMappingProperties.getGUID(); String remoteRelationshipGUID = REMOTE_PREFIX + localRelationshipGUID.substring(REMOTE_PREFIX.length()); remoteRelationshipWithMappingProperties.setGUID(remoteRelationshipGUID); String localMetadataCollectionName = remoteRelationshipWithMappingProperties.getMetadataCollectionName(); String remoteMetadataCollectionName = REMOTE_PREFIX + "-metadataCollection-not-" + localMetadataCollectionName; remoteRelationshipWithMappingProperties.setMetadataCollectionName(remoteMetadataCollectionName); String localMetadataCollectionId = remoteRelationshipWithMappingProperties.getMetadataCollectionId(); String remoteMetadataCollectionId = REMOTE_PREFIX + localMetadataCollectionId.substring(REMOTE_PREFIX.length()); remoteRelationshipWithMappingProperties.setMetadataCollectionId(remoteMetadataCollectionId); /* * Set mapping properties on the synthetic remote entity... */ Map<String, Serializable> mappingProperties = new HashMap<>(); mappingProperties.put("stringMappingPropertyKey", "stringMappingPropertyValue"); mappingProperties.put("integerMappingPropertyKey", 12); /* * Save a reference copy of the 'remote' entity */ try { start = System.currentTimeMillis(); metadataCollection.saveRelationshipReferenceCopy(workPad.getLocalServerUserId(), remoteRelationshipWithMappingProperties); elapsedTime = System.currentTimeMillis() - start; assertCondition((true), assertion15, testTypeName + assertionMsg15, RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getRequirementId(), "saveRelationshipReferenceCopy", elapsedTime); createdRelationshipRefCopiesTUT.add(remoteRelationshipWithMappingProperties); Relationship retrievedReferenceCopyWithMappingProperties = null; try { start = System.currentTimeMillis(); retrievedReferenceCopyWithMappingProperties = metadataCollection.getRelationship(workPad.getLocalServerUserId(), remoteRelationshipGUID); elapsedTime = System.currentTimeMillis() - start; } catch (Exception exc) { /* * We are not expecting any other exceptions from this method call. Log and fail the test. */ String methodName = "getRelationship"; String operationDescription = "retrieve a relationship of type " + relationshipDef.getName(); Map<String, String> parameters = new HashMap<>(); parameters.put("relationshipGUID", remoteRelationshipGUID); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } assertCondition((retrievedReferenceCopyWithMappingProperties.equals(remoteRelationshipWithMappingProperties)), assertion16, assertionMsg16 + relationshipDef.getName(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getRequirementId(), "getRelationship", elapsedTime); //--------------------------------------------- /* * Continuing with the tests based on a locally synthesized relationship used as a reference copy, emulating * adoption of an instance from a virtual or defunct remote repository.... * * * The next test verifies whether it is possible to re-home a reference copy. The rehome operation is * effectively a pull of the master-ship of the reference copy - i.e. it is a request made by the TUT. * This should not be performed on a reference copy of a relationship whose master is on the CTS server - the * CTS server is not defunct and we also cannot delete the master instance without also triggering an event * that would trigger clean up of the TUT's reference copy. The bottom line is that performing a rehome on * the CTS's instance woudld place the cohort into an invalid state - and taht is not what we are aiming to * test. Therefore the rehome is performed on the locally synthesized instance used above for the * mappingProperties test. */ /* * Rehome of an instance is a pull operation - i.e. it must be conducted by the TUT as the TUT holds the ref copy. */ Relationship newMasterRelationship = null; try { start = System.currentTimeMillis(); newMasterRelationship = metadataCollection.reHomeRelationship(workPad.getLocalServerUserId(), remoteRelationshipGUID, relationshipDef.getGUID(), relationshipDef.getName(), ctsMetadataCollection.getMetadataCollectionId(workPad.getLocalServerUserId()), metadataCollectionId, repositoryConformanceWorkPad.getTutRepositoryConnector().getMetadataCollectionName()); elapsedTime = System.currentTimeMillis() - start; assertCondition((true), assertion17, testTypeName + assertionMsg17, RepositoryConformanceProfileRequirement.UPDATE_INSTANCE_HOME.getProfileId(), RepositoryConformanceProfileRequirement.UPDATE_INSTANCE_HOME.getRequirementId(), "reHomeRelationship", elapsedTime); createdRelationshipsTUT.add(newMasterRelationship); } catch (FunctionNotSupportedException exception) { /* * Because rehome is an optional method, this is not fatal - just record that the connector does not support rehome */ super.addNotSupportedAssertion(assertion17, assertionMsg17, RepositoryConformanceProfileRequirement.UPDATE_INSTANCE_HOME.getProfileId(), RepositoryConformanceProfileRequirement.UPDATE_INSTANCE_HOME.getRequirementId()); } catch (Exception exc) { /* * We are not expecting any other exceptions from this method call. Log and fail the test. */ String methodName = "reHomeRelationship"; String operationDescription = "rehome a relationship of type " + relationshipDef.getName(); Map<String, String> parameters = new HashMap<>(); parameters.put("entityGUID", remoteRelationshipGUID); parameters.put("typeDefGUID", relationshipDef.getGUID()); parameters.put("typeDefName", relationshipDef.getName()); parameters.put("homeMetadataCollecitonId", ctsMetadataCollection.getMetadataCollectionId(workPad.getLocalServerUserId())); parameters.put("newHomeMetadataCollecitonId", metadataCollectionId); parameters.put("newHomeMetadataCollecitonName", repositoryConformanceWorkPad.getTutRepositoryConnector().getMetadataCollectionName()); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } /* * Verify that the new master instance can be retrieved */ try { start = System.currentTimeMillis(); retrievedReferenceCopyWithMappingProperties = metadataCollection.getRelationship(workPad.getLocalServerUserId(), remoteRelationshipGUID); elapsedTime = System.currentTimeMillis() - start; } catch (Exception exc) { /* * We are not expecting any other exceptions from this method call. Log and fail the test. */ String methodName = "getRelationship"; String operationDescription = "retrieve a relationship of type " + relationshipDef.getName(); Map<String, String> parameters = new HashMap<>(); parameters.put("relationshipGUID", remoteRelationshipGUID); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } assertCondition((retrievedReferenceCopyWithMappingProperties != null), assertion18, assertionMsg18 + relationshipDef.getName(), RepositoryConformanceProfileRequirement.UPDATE_INSTANCE_HOME.getProfileId(), RepositoryConformanceProfileRequirement.UPDATE_INSTANCE_HOME.getRequirementId(), "getRelationship", elapsedTime); /* * Verify that the new master instance has the local metadataCollectionId */ String instanceHome = retrievedReferenceCopyWithMappingProperties.getMetadataCollectionId(); assertCondition((instanceHome.equals(metadataCollectionId)), assertion19, assertionMsg19 + relationshipDef.getName(), RepositoryConformanceProfileRequirement.UPDATE_INSTANCE_HOME.getProfileId(), RepositoryConformanceProfileRequirement.UPDATE_INSTANCE_HOME.getRequirementId()); /* * Now clean up. * * If the rehome worked we have a master instance locally - so we need to (optionally) delete and then (always) purge. * If the rehome did not work then we have a local reference copy to purge. * In both cases the operation is performed at the TUT. */ if (newMasterRelationship != null) { /* * The rehome operation worked - perform a soft delete (optional) followed by a purge. */ try { metadataCollection.deleteRelationship(workPad.getLocalServerUserId(), newMasterRelationship.getType().getTypeDefGUID(), newMasterRelationship.getType().getTypeDefName(), newMasterRelationship.getGUID()); } catch (FunctionNotSupportedException exception) { /* * This is OK - we can NO OP and just proceed to purgeEntity */ } metadataCollection.purgeRelationshipReferenceCopy(workPad.getLocalServerUserId(), newMasterRelationship.getGUID(), newMasterRelationship.getType().getTypeDefGUID(), newMasterRelationship.getType().getTypeDefName(), newMasterRelationship.getMetadataCollectionId()); } else { /* * The rehome operation did not work - the TUT is still holding a reference copy */ /* * Purge the reference copy. */ metadataCollection.purgeRelationshipReferenceCopy(workPad.getLocalServerUserId(), remoteRelationshipWithMappingProperties.getGUID(), remoteRelationshipWithMappingProperties.getType().getTypeDefGUID(), remoteRelationshipWithMappingProperties.getType().getTypeDefName(), remoteRelationshipWithMappingProperties.getMetadataCollectionId()); } } catch (FunctionNotSupportedException e) { super.addNotSupportedAssertion(assertion15, assertionMsg15, RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getProfileId(), RepositoryConformanceProfileRequirement.REFERENCE_COPY_STORAGE.getRequirementId()); } catch (Exception exc) { /* * We are not expecting any other exceptions from this method call. Log and fail the test. */ String methodName = "saveRelationshipReferenceCopy"; String operationDescription = "save a reference copy of a relationship of type " + relationshipDef.getName(); Map<String, String> parameters = new HashMap<>(); parameters.put("entity", remoteRelationshipWithMappingProperties.toString()); String msg = this.buildExceptionMessage(testCaseId, methodName, operationDescription, parameters, exc.getClass().getSimpleName(), exc.getMessage()); throw new Exception(msg, exc); } /* * And finally clean up the entities - these are nown to be locally master (by the TUT) as they * were never subject to a rehome operation. Soft delete (optional) then purge. */ try { metadataCollection.deleteEntity(workPad.getLocalServerUserId(), end1.getType().getTypeDefGUID(), end1.getType().getTypeDefName(), end1.getGUID()); metadataCollection.deleteEntity(workPad.getLocalServerUserId(), end2.getType().getTypeDefGUID(), end2.getType().getTypeDefName(), end2.getGUID()); } catch (FunctionNotSupportedException exception) { /* * This is OK - we can NO OP and just proceed to purgeEntity */ } metadataCollection.purgeEntity(workPad.getLocalServerUserId(), end1.getType().getTypeDefGUID(), end1.getType().getTypeDefName(), end1.getGUID()); metadataCollection.purgeEntity(workPad.getLocalServerUserId(), end2.getType().getTypeDefGUID(), end2.getType().getTypeDefName(), end2.getGUID()); super.setSuccessMessage("Reference copies of relationships can be managed through their lifecycle"); } /** * Method to clean any instance created by the test case that has not already been cleaned by the running of the test. * * @throws Exception something went wrong but there is no particular action to take. */ public void cleanup() throws Exception { /* * In this testcase we have 6 list of instances that may need cleaning up * createdRelationshipsCTS * createdRelationshipRefCopiesTUT * createdRelationshipsTUT * createdEntitiesCTS * createdEntityRefCopiesTUT * createdEntitiesTUT */ OMRSMetadataCollection metadataCollection = super.getMetadataCollection(); if (createdRelationshipsCTS != null && !createdRelationshipsCTS.isEmpty()) { /* * Instances were created - clean them up. */ for (Relationship relationship : createdRelationshipsCTS) { try { metadataCollection.deleteRelationship(workPad.getLocalServerUserId(), relationship.getType().getTypeDefGUID(), relationship.getType().getTypeDefName(), relationship.getGUID()); } catch (FunctionNotSupportedException exception) { // NO OP - can proceed to purge } catch (RelationshipNotKnownException exception) { // Relationship already cleaned up - nothing more to do here. continue; } // If relationship is known then (whether delete was supported or not) issue purge metadataCollection.purgeRelationship(workPad.getLocalServerUserId(), relationship.getType().getTypeDefGUID(), relationship.getType().getTypeDefName(), relationship.getGUID()); } } if (createdRelationshipsTUT != null && !createdRelationshipsTUT.isEmpty()) { /* * Instances were created - clean them up. */ for (Relationship relationship : createdRelationshipsTUT) { try { metadataCollection.deleteRelationship(workPad.getLocalServerUserId(), relationship.getType().getTypeDefGUID(), relationship.getType().getTypeDefName(), relationship.getGUID()); } catch (FunctionNotSupportedException exception) { // NO OP - can proceed to purge } catch (RelationshipNotKnownException exception) { // Relationship already cleaned up - nothing more to do here. continue; } // If relationship is known then (whether delete was supported or not) issue purge metadataCollection.purgeRelationship(workPad.getLocalServerUserId(), relationship.getType().getTypeDefGUID(), relationship.getType().getTypeDefName(), relationship.getGUID()); } } if (createdRelationshipRefCopiesTUT != null && !createdRelationshipRefCopiesTUT.isEmpty()) { /* * Instances were created - clean them up. */ for (Relationship relationship : createdRelationshipRefCopiesTUT) { try { metadataCollection.deleteRelationship(workPad.getLocalServerUserId(), relationship.getType().getTypeDefGUID(), relationship.getType().getTypeDefName(), relationship.getGUID()); } catch (FunctionNotSupportedException exception) { // NO OP - can proceed to purge } catch (RelationshipNotKnownException exception) { // Relationship already cleaned up - nothing more to do here. continue; } // If relationship is known then (whether delete was supported or not) issue purge metadataCollection.purgeRelationship(workPad.getLocalServerUserId(), relationship.getType().getTypeDefGUID(), relationship.getType().getTypeDefName(), relationship.getGUID()); } } if (createdEntitiesCTS != null && !createdEntitiesCTS.isEmpty()) { /* * Instances were created - clean them up. */ for (EntityDetail entity : createdEntitiesCTS) { try { metadataCollection.deleteEntity(workPad.getLocalServerUserId(), entity.getType().getTypeDefGUID(), entity.getType().getTypeDefName(), entity.getGUID()); } catch (FunctionNotSupportedException exception) { // NO OP - can proceed to purge } catch (EntityNotKnownException exception) { // Entity already cleaned up - nothing more to do here. continue; } // If entity is known then (whether delete was supported or not) issue purge metadataCollection.purgeEntity(workPad.getLocalServerUserId(), entity.getType().getTypeDefGUID(), entity.getType().getTypeDefName(), entity.getGUID()); } } if (createdEntitiesTUT != null && !createdEntitiesTUT.isEmpty()) { /* * Instances were created - clean them up. */ for (EntityDetail entity : createdEntitiesTUT) { try { metadataCollection.deleteEntity(workPad.getLocalServerUserId(), entity.getType().getTypeDefGUID(), entity.getType().getTypeDefName(), entity.getGUID()); } catch (FunctionNotSupportedException exception) { // NO OP - can proceed to purge } catch (EntityNotKnownException exception) { // Entity already cleaned up - nothing more to do here. continue; } // If entity is known then (whether delete was supported or not) issue purge metadataCollection.purgeEntity(workPad.getLocalServerUserId(), entity.getType().getTypeDefGUID(), entity.getType().getTypeDefName(), entity.getGUID()); } } if (createdEntityRefCopiesTUT != null && !createdEntityRefCopiesTUT.isEmpty()) { /* * Instances were created - clean them up. */ for (EntityDetail entity : createdEntityRefCopiesTUT) { try { metadataCollection.deleteEntity(workPad.getLocalServerUserId(), entity.getType().getTypeDefGUID(), entity.getType().getTypeDefName(), entity.getGUID()); } catch (FunctionNotSupportedException exception) { // NO OP - can proceed to purge } catch (EntityNotKnownException exception) { // Entity already cleaned up - nothing more to do here. continue; } // If entity is known then (whether delete was supported or not) issue purge metadataCollection.purgeEntity(workPad.getLocalServerUserId(), entity.getType().getTypeDefGUID(), entity.getType().getTypeDefName(), entity.getGUID()); } } } }
package yelpInterview; public class _BST02Delete { static class Node{ int value; Node left,right; public Node(int value) { this.value=value; } } public static void main(String a[]){ Node root=insert(null,10); insert(root,20); insert(root,30); insert(root,40); insert(root,50); insert(root,5); insert(root,7); inOrder(root); deleteNode(root,40); System.out.println(); inOrder(root); } private static Node deleteNode(Node n, int value) { if(n==null) return n; if(n.value>value) n.left=deleteNode(n.left, value); else if(n.value<value) n.right=deleteNode(n.right, value); else{ if(n.left==null) { return n.right; } else if(n.right==null) { return n.left; } Node n1=getMinValueNode(n.right); n.value=n1.value; n.right=deleteNode(n.right, n.value); } return n; } private static Node getMinValueNode(Node n) { if(n==null) return n; while(n.left!=null) n=n.left; return n; } private static Node insert(Node n, int value) { if(n==null) return new Node(value); if(n.value>value) {//move left n.left=insert(n.left,value); } else{//move right n.right=insert(n.right,value); } return n; } private static void inOrder(Node n) { if(n!=null) { inOrder(n.left); System.out.print(n.value+" "); inOrder(n.right); } } }
/* ** GENEREATED FILE - DO NOT MODIFY ** */ package com.wilutions.mslib.outlook; import com.wilutions.com.*; /** * OlkLabelEvents. * */ @CoInterface(guid="{000672E5-0000-0000-C000-000000000046}") public interface OlkLabelEvents extends IDispatch { static boolean __typelib__loaded = __TypeLib.load(); @DeclDISPID(-600) public void onClick() throws ComException; @DeclDISPID(-601) public void onDoubleClick() throws ComException; @DeclDISPID(-605) public void onMouseDown(final OlMouseButton Button, final OlShiftState Shift, final com.wilutions.mslib.stdole.OLE_XPOS_CONTAINER X, final com.wilutions.mslib.stdole.OLE_YPOS_CONTAINER Y) throws ComException; @DeclDISPID(-606) public void onMouseMove(final OlMouseButton Button, final OlShiftState Shift, final com.wilutions.mslib.stdole.OLE_XPOS_CONTAINER X, final com.wilutions.mslib.stdole.OLE_YPOS_CONTAINER Y) throws ComException; @DeclDISPID(-607) public void onMouseUp(final OlMouseButton Button, final OlShiftState Shift, final com.wilutions.mslib.stdole.OLE_XPOS_CONTAINER X, final com.wilutions.mslib.stdole.OLE_YPOS_CONTAINER Y) throws ComException; }
/* * Copyright (C) 2015 Lyft, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.lyft.android.scissors2; import android.app.Activity; import android.app.Fragment; import android.content.Context; import android.content.Intent; import android.graphics.Bitmap; import android.graphics.BitmapFactory; import android.graphics.Canvas; import android.graphics.Matrix; import android.graphics.Paint; import android.graphics.Path; import android.graphics.RectF; import android.graphics.drawable.BitmapDrawable; import android.graphics.drawable.Drawable; import android.net.Uri; import android.support.annotation.ColorInt; import android.support.annotation.DrawableRes; import android.support.annotation.IntDef; import android.support.annotation.NonNull; import android.support.annotation.Nullable; import android.util.AttributeSet; import android.view.MotionEvent; import android.widget.ImageView; import java.io.File; import java.io.OutputStream; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; /** * An {@link ImageView} with a fixed viewport and cropping capabilities. */ public class CropView extends ImageView { private TouchManager touchManager; private CropViewConfig config; private Paint viewportPaint = new Paint(); private Paint bitmapPaint = new Paint(); private Paint borderPaint = new Paint(); private Bitmap bitmap; private Matrix transform = new Matrix(); private Extensions extensions; /** Corresponds to the values in {@link com.lyft.android.scissors2.R.attr#cropviewShape} */ @Retention(RetentionPolicy.SOURCE) @IntDef({ Shape.RECTANGLE, Shape.OVAL }) public @interface Shape { int RECTANGLE = 0; int OVAL = 1; } @Shape private int shape = Shape.RECTANGLE; private Path ovalPath; private RectF ovalRect; public CropView(Context context) { super(context); initCropView(context, null); } public CropView(Context context, AttributeSet attrs) { super(context, attrs); initCropView(context, attrs); } void initCropView(Context context, AttributeSet attrs) { config = CropViewConfig.from(context, attrs); touchManager = new TouchManager(this, config); bitmapPaint.setFilterBitmap(true); setViewportOverlayColor(config.getViewportOverlayColor()); shape = config.shape(); // We need anti-aliased Paint to smooth the curved edges viewportPaint.setFlags(viewportPaint.getFlags() | Paint.ANTI_ALIAS_FLAG); borderPaint.setStrokeWidth(2); borderPaint.setStyle(Paint.Style.STROKE); } @Override protected void onDraw(Canvas canvas) { super.onDraw(canvas); if (bitmap == null) { return; } drawBitmap(canvas); if (shape == Shape.RECTANGLE) { drawSquareOverlay(canvas, true); } else { drawOvalOverlay(canvas); } } private void drawBitmap(Canvas canvas) { transform.reset(); touchManager.applyPositioningAndScale(transform); canvas.drawBitmap(bitmap, transform, bitmapPaint); } private void drawSquareOverlay(Canvas canvas, boolean withBorder) { final int viewportWidth = touchManager.getViewportWidth(); final int viewportHeight = touchManager.getViewportHeight(); final int left = (getWidth() - viewportWidth) / 2; final int top = (getHeight() - viewportHeight) / 2; canvas.drawRect(0, top, left, getHeight() - top, viewportPaint); // left canvas.drawRect(0, 0, getWidth(), top, viewportPaint); // top canvas.drawRect(getWidth() - left, top, getWidth(), getHeight() - top, viewportPaint); // right canvas.drawRect(0, getHeight() - top, getWidth(), getHeight(), viewportPaint); // bottom if (withBorder) { canvas.drawRect(left + borderPaint.getStrokeWidth(), top, left + getWidth() - left, getHeight() - top, borderPaint); } } private void drawOvalOverlay(Canvas canvas) { if (ovalRect == null) { ovalRect = new RectF(); } if (ovalPath == null) { ovalPath = new Path(); } final int viewportWidth = touchManager.getViewportWidth(); final int viewportHeight = touchManager.getViewportHeight(); final int left = (getWidth() - viewportWidth) / 2; final int top = (getHeight() - viewportHeight) / 2; final int right = getWidth() - left; final int bottom = getHeight() - top; ovalRect.left = left; ovalRect.top = top; ovalRect.right = right; ovalRect.bottom = bottom; // top left arc ovalPath.reset(); ovalPath.moveTo(left, getHeight() / 2); // middle of the left side of the circle ovalPath.arcTo(ovalRect, 180, 90, false); // draw arc to top ovalPath.lineTo(left, top); // move to top-left corner ovalPath.lineTo(left, getHeight() / 2); // move back to origin ovalPath.close(); canvas.drawPath(ovalPath, viewportPaint); // top right arc ovalPath.reset(); ovalPath.moveTo(getWidth() / 2, top); // middle of the top side of the circle ovalPath.arcTo(ovalRect, 270, 90, false); // draw arc to the right ovalPath.lineTo(right, top); // move to top-right corner ovalPath.lineTo(getWidth() / 2, top); // move back to origin ovalPath.close(); canvas.drawPath(ovalPath, viewportPaint); // bottom right arc ovalPath.reset(); ovalPath.moveTo(right, getHeight() / 2); // middle of the right side of the circle ovalPath.arcTo(ovalRect, 0, 90, false); // draw arc to the bottom ovalPath.lineTo(right, bottom); // move to bottom-right corner ovalPath.lineTo(right, getHeight() / 2); // move back to origin ovalPath.close(); canvas.drawPath(ovalPath, viewportPaint); // bottom left arc ovalPath.reset(); ovalPath.moveTo(getWidth() / 2, bottom); // middle of the bottom side of the circle ovalPath.arcTo(ovalRect, 90, 90, false); // draw arc to the left ovalPath.lineTo(left, bottom); // move to bottom-left corner ovalPath.lineTo(getWidth() / 2, bottom); // move back to origin ovalPath.close(); canvas.drawPath(ovalPath, viewportPaint); // Draw the square overlay as well drawSquareOverlay(canvas, false); canvas.drawOval(new RectF(left, top,right,bottom), borderPaint); } @Override protected void onSizeChanged(int w, int h, int oldw, int oldh) { super.onSizeChanged(w, h, oldw, oldh); resetTouchManager(); } /** * Sets the color of the viewport overlay * * @param viewportOverlayColor The color to use for the viewport overlay */ public void setViewportOverlayColor(@ColorInt int viewportOverlayColor) { viewportPaint.setColor(viewportOverlayColor); config.setViewportOverlayColor(viewportOverlayColor); } /** * Sets the padding for the viewport overlay * * @param viewportOverlayPadding The new padding of the viewport overlay */ public void setViewportOverlayPadding(int viewportOverlayPadding) { config.setViewportOverlayPadding(viewportOverlayPadding); resetTouchManager(); invalidate(); } public void setBorderStrokeWidth(float strokeWidth) { borderPaint.setStrokeWidth(strokeWidth); } public void setBorderColor(int color) { borderPaint.setColor(color); } /** * Returns the native aspect ratio of the image. * * @return The native aspect ratio of the image. */ public float getImageRatio() { Bitmap bitmap = getImageBitmap(); return bitmap != null ? (float) bitmap.getWidth() / (float) bitmap.getHeight() : 0f; } /** * Returns the aspect ratio of the viewport and crop rect. * * @return The current viewport aspect ratio. */ public float getViewportRatio() { return touchManager.getAspectRatio(); } /** * Sets the aspect ratio of the viewport and crop rect. Defaults to * the native aspect ratio if <code>ratio == 0</code>. * * @param ratio The new aspect ratio of the viewport. */ public void setViewportRatio(float ratio) { if (Float.compare(ratio, 0) == 0) { ratio = getImageRatio(); } touchManager.setAspectRatio(ratio); resetTouchManager(); invalidate(); } public @Shape int getCropViewShape(){ return shape; } public void setCropViewShape(@Shape int shape) { this.shape = shape; invalidate(); } @Override public void setImageResource(@DrawableRes int resId) { final Bitmap bitmap = resId > 0 ? BitmapFactory.decodeResource(getResources(), resId) : null; setImageBitmap(bitmap); } @Override public void setImageDrawable(@Nullable Drawable drawable) { final Bitmap bitmap; if (drawable instanceof BitmapDrawable) { BitmapDrawable bitmapDrawable = (BitmapDrawable) drawable; bitmap = bitmapDrawable.getBitmap(); } else if (drawable != null) { bitmap = Utils.asBitmap(drawable, getWidth(), getHeight()); } else { bitmap = null; } setImageBitmap(bitmap); } @Override public void setImageURI(@Nullable Uri uri) { extensions().load(uri); } @Override public void setImageBitmap(@Nullable Bitmap bitmap) { this.bitmap = bitmap; resetTouchManager(); invalidate(); } /** * @return Current working Bitmap or <code>null</code> if none has been set yet. */ @Nullable public Bitmap getImageBitmap() { return bitmap; } private void resetTouchManager() { final boolean invalidBitmap = bitmap == null; final int bitmapWidth = invalidBitmap ? 0 : bitmap.getWidth(); final int bitmapHeight = invalidBitmap ? 0 : bitmap.getHeight(); touchManager.resetFor(bitmapWidth, bitmapHeight, getWidth(), getHeight()); } @Override public boolean dispatchTouchEvent(MotionEvent event) { boolean result = super.dispatchTouchEvent(event); if (!isEnabled()) { return result; } touchManager.onEvent(event); invalidate(); return true; } /** * Performs synchronous image cropping based on configuration. * * @return A {@link Bitmap} cropped based on viewport and user panning and zooming or <code>null</code> if no {@link Bitmap} has been * provided. */ @Nullable public Bitmap crop() { if (bitmap == null) { return null; } final Bitmap src = bitmap; final Bitmap.Config srcConfig = src.getConfig(); final Bitmap.Config config = srcConfig == null ? Bitmap.Config.ARGB_8888 : srcConfig; final int viewportHeight = touchManager.getViewportHeight(); final int viewportWidth = touchManager.getViewportWidth(); final Bitmap dst = Bitmap.createBitmap(viewportWidth, viewportHeight, config); Canvas canvas = new Canvas(dst); final int left = (getRight() - viewportWidth) / 2; final int top = (getBottom() - viewportHeight) / 2; canvas.translate(-left, -top); drawBitmap(canvas); return dst; } /** * Obtain current viewport width. * * @return Current viewport width. * <p>Note: It might be 0 if layout pass has not been completed.</p> */ public int getViewportWidth() { return touchManager.getViewportWidth(); } /** * Obtain current viewport height. * * @return Current viewport height. * <p>Note: It might be 0 if layout pass has not been completed.</p> */ public int getViewportHeight() { return touchManager.getViewportHeight(); } /** * Offers common utility extensions. * * @return Extensions object used to perform chained calls. */ public Extensions extensions() { if (extensions == null) { extensions = new Extensions(this); } return extensions; } /** * Get the transform matrix */ public Matrix getTransformMatrix() { return transform; } /** * Optional extensions to perform common actions involving a {@link CropView} */ public static class Extensions { private final CropView cropView; Extensions(CropView cropView) { this.cropView = cropView; } /** * Load a {@link Bitmap} using an automatically resolved {@link BitmapLoader} which will attempt to scale image to fill view. * * @param model Model used by {@link BitmapLoader} to load desired {@link Bitmap} * @see PicassoBitmapLoader * @see GlideBitmapLoader */ public void load(@Nullable Object model) { new LoadRequest(cropView) .load(model); } /** * Load a {@link Bitmap} using given {@link BitmapLoader}, you must call {@link LoadRequest#load(Object)} afterwards. * * @param bitmapLoader {@link BitmapLoader} used to load desired {@link Bitmap} * @see PicassoBitmapLoader * @see GlideBitmapLoader */ public LoadRequest using(@Nullable BitmapLoader bitmapLoader) { return new LoadRequest(cropView).using(bitmapLoader); } public enum LoaderType { PICASSO, GLIDE, UIL, CLASS_LOOKUP } /** * Load a {@link Bitmap} using a reference to a {@link BitmapLoader}, you must call {@link LoadRequest#load(Object)} afterwards. * * Please ensure that the library for the {@link BitmapLoader} you reference is available on the classpath. * * @param loaderType the {@link BitmapLoader} to use to load desired (@link Bitmap} * @see PicassoBitmapLoader * @see GlideBitmapLoader */ public LoadRequest using(@NonNull LoaderType loaderType) { return new LoadRequest(cropView).using(loaderType); } /** * Perform an asynchronous crop request. * * @return {@link CropRequest} used to chain a configure cropping request, you must call either one of: * <ul> * <li>{@link CropRequest#into(File)}</li> * <li>{@link CropRequest#into(OutputStream, boolean)}</li> * </ul> */ public CropRequest crop() { return new CropRequest(cropView); } /** * Perform a pick image request using {@link Activity#startActivityForResult(Intent, int)}. */ public void pickUsing(@NonNull Activity activity, int requestCode) { CropViewExtensions.pickUsing(activity, requestCode); } /** * Perform a pick image request using {@link Fragment#startActivityForResult(Intent, int)}. */ public void pickUsing(@NonNull Fragment fragment, int requestCode) { CropViewExtensions.pickUsing(fragment, requestCode); } } }
package com.intellij.tasks.pivotal; import com.intellij.openapi.diagnostic.Logger; import com.intellij.openapi.util.Comparing; import com.intellij.openapi.util.IconLoader; import com.intellij.openapi.util.Ref; import com.intellij.openapi.util.text.StringUtil; import com.intellij.tasks.*; import com.intellij.tasks.impl.BaseRepository; import com.intellij.tasks.impl.BaseRepositoryImpl; import com.intellij.tasks.impl.SimpleComment; import com.intellij.tasks.impl.TaskUtil; import com.intellij.util.NullableFunction; import com.intellij.util.containers.ContainerUtil; import com.intellij.util.net.HTTPMethod; import com.intellij.util.xmlb.annotations.Tag; import org.apache.commons.httpclient.HttpClient; import org.apache.commons.httpclient.HttpMethod; import org.apache.commons.httpclient.methods.GetMethod; import org.apache.commons.httpclient.methods.PostMethod; import org.apache.commons.httpclient.methods.PutMethod; import org.jdom.Element; import org.jdom.input.SAXBuilder; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import javax.swing.*; import java.io.InputStream; import java.text.ParseException; import java.util.ArrayList; import java.util.Date; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * @author Dennis.Ushakov * * TODO: update to REST APIv5 */ @Tag("PivotalTracker") public class PivotalTrackerRepository extends BaseRepositoryImpl { private static final Logger LOG = Logger.getInstance("#com.intellij.tasks.pivotal.PivotalTrackerRepository"); private static final String API_URL = "/services/v3"; private Pattern myPattern; private String myProjectId; private String myAPIKey; //private boolean myTasksSupport = false; { if (StringUtil.isEmpty(getUrl())) { setUrl("https://www.pivotaltracker.com"); } } /** for serialization */ @SuppressWarnings({"UnusedDeclaration"}) public PivotalTrackerRepository() { myCommitMessageFormat = "[fixes #{number}] {summary}"; } public PivotalTrackerRepository(final PivotalTrackerRepositoryType type) { super(type); } private PivotalTrackerRepository(final PivotalTrackerRepository other) { super(other); setProjectId(other.myProjectId); setAPIKey(other.myAPIKey); } @Override public void testConnection() throws Exception { getIssues("", 10, 0); } @Override public boolean isConfigured() { return super.isConfigured() && StringUtil.isNotEmpty(getProjectId()) && StringUtil.isNotEmpty(getAPIKey()); } @Override public Task[] getIssues(@Nullable final String query, final int max, final long since) throws Exception { List<Element> children = getStories(query, max); final List<Task> tasks = ContainerUtil.mapNotNull(children, (NullableFunction<Element, Task>)o -> createIssue(o)); return tasks.toArray(Task.EMPTY_ARRAY); } private List<Element> getStories(@Nullable final String query, final int max) throws Exception { String url = API_URL + "/projects/" + myProjectId + "/stories"; url += "?filter=" + encodeUrl("state:started,unstarted,unscheduled,rejected"); if (!StringUtil.isEmpty(query)) { url += encodeUrl(" \"" + query + '"'); } if (max >= 0) { url += "&limit=" + encodeUrl(String.valueOf(max)); } LOG.info("Getting all the stories with url: " + url); final HttpMethod method = doREST(url, HTTPMethod.GET); final InputStream stream = method.getResponseBodyAsStream(); final Element element = new SAXBuilder(false).build(stream).getRootElement(); if (!"stories".equals(element.getName())) { LOG.warn("Error fetching issues for: " + url + ", HTTP status code: " + method.getStatusCode()); throw new Exception("Error fetching issues for: " + url + ", HTTP status code: " + method.getStatusCode() + "\n" + element.getText()); } return element.getChildren("story"); } @Nullable private Task createIssue(final Element element) { final String id = element.getChildText("id"); if (id == null) { return null; } final String summary = element.getChildText("name"); if (summary == null) { return null; } final String type = element.getChildText("story_type"); if (type == null) { return null; } final Comment[] comments = parseComments(element.getChild("notes")); final boolean isClosed = "accepted".equals(element.getChildText("state")) || "delivered".equals(element.getChildText("state")) || "finished".equals(element.getChildText("state")); final String description = element.getChildText("description"); final Ref<Date> updated = new Ref<>(); final Ref<Date> created = new Ref<>(); try { updated.set(parseDate(element, "updated_at")); created.set(parseDate(element, "created_at")); } catch (ParseException e) { LOG.warn(e); } return new Task() { @Override public boolean isIssue() { return true; } @Override public String getIssueUrl() { final String id = getRealId(getId()); return id != null ? getUrl() + "/story/show/" + id : null; } @NotNull @Override public String getId() { return myProjectId + "-" + id; } @NotNull @Override public String getSummary() { return summary; } @Override public String getDescription() { return description; } @NotNull @Override public Comment[] getComments() { return comments; } @NotNull @Override public Icon getIcon() { return IconLoader.getIcon(getCustomIcon(), PivotalTrackerRepository.class); } @NotNull @Override public TaskType getType() { return TaskType.OTHER; } @Override public Date getUpdated() { return updated.get(); } @Override public Date getCreated() { return created.get(); } @Override public boolean isClosed() { return isClosed; } @Override public TaskRepository getRepository() { return PivotalTrackerRepository.this; } @Override public String getPresentableName() { return getId() + ": " + getSummary(); } @NotNull @Override public String getCustomIcon() { return "/icons/pivotal/" + type + ".png"; } }; } private static Comment[] parseComments(Element notes) { if (notes == null) return Comment.EMPTY_ARRAY; final List<Comment> result = new ArrayList<>(); for (Element note : (List<Element>)notes.getChildren("note")) { final String text = note.getChildText("text"); if (text == null) continue; final Ref<Date> date = new Ref<>(); try { date.set(parseDate(note, "noted_at")); } catch (ParseException e) { LOG.warn(e); } final String author = note.getChildText("author"); result.add(new SimpleComment(date.get(), author, text)); } return result.toArray(Comment.EMPTY_ARRAY); } @Nullable private static Date parseDate(final Element element, final String name) throws ParseException { String date = element.getChildText(name); return TaskUtil.parseDate(date); } private HttpMethod doREST(final String request, final HTTPMethod type) throws Exception { final HttpClient client = getHttpClient(); client.getParams().setContentCharset("UTF-8"); final String uri = getUrl() + request; final HttpMethod method = type == HTTPMethod.POST ? new PostMethod(uri) : type == HTTPMethod.PUT ? new PutMethod(uri) : new GetMethod(uri); configureHttpMethod(method); client.executeMethod(method); return method; } @Nullable @Override public Task findTask(@NotNull final String id) throws Exception { final String realId = getRealId(id); if (realId == null) return null; final String url = API_URL + "/projects/" + myProjectId + "/stories/" + realId; LOG.info("Retrieving issue by id: " + url); final HttpMethod method = doREST(url, HTTPMethod.GET); final InputStream stream = method.getResponseBodyAsStream(); final Element element = new SAXBuilder(false).build(stream).getRootElement(); return element.getName().equals("story") ? createIssue(element) : null; } @Nullable private String getRealId(final String id) { final String[] split = id.split("\\-"); final String projectId = split[0]; return Comparing.strEqual(projectId, myProjectId) ? split[1] : null; } @Override @Nullable public String extractId(@NotNull final String taskName) { Matcher matcher = myPattern.matcher(taskName); return matcher.find() ? matcher.group(1) : null; } @NotNull @Override public BaseRepository clone() { return new PivotalTrackerRepository(this); } @Override protected void configureHttpMethod(final HttpMethod method) { method.addRequestHeader("X-TrackerToken", myAPIKey); //method.setFollowRedirects(true); } public String getProjectId() { return myProjectId; } public void setProjectId(final String projectId) { myProjectId = projectId; myPattern = Pattern.compile("(" + projectId + "\\-\\d+):\\s+"); } public String getAPIKey() { return myAPIKey; } public void setAPIKey(final String APIKey) { myAPIKey = APIKey; } @Override public String getPresentableName() { final String name = super.getPresentableName(); return name + (!StringUtil.isEmpty(getProjectId()) ? "/" + getProjectId() : ""); } @Nullable @Override public String getTaskComment(@NotNull final Task task) { if (isShouldFormatCommitMessage()) { final String id = task.getId(); final String realId = getRealId(id); return realId != null ? myCommitMessageFormat.replace("{id}", realId).replace("{project}", myProjectId) + " " + task.getSummary() : null; } return super.getTaskComment(task); } @Override public void setTaskState(@NotNull Task task, @NotNull TaskState state) throws Exception { final String realId = getRealId(task.getId()); if (realId == null) return; final String stateName; switch (state) { case IN_PROGRESS: stateName = "started"; break; case RESOLVED: stateName = "finished"; break; // may add some others in future default: return; } String url = API_URL + "/projects/" + myProjectId + "/stories/" + realId; url += "?" + encodeUrl("story[current_state]") + "=" + encodeUrl(stateName); LOG.info("Updating issue state by id: " + url); final HttpMethod method = doREST(url, HTTPMethod.PUT); final InputStream stream = method.getResponseBodyAsStream(); final Element element = new SAXBuilder(false).build(stream).getRootElement(); if (!element.getName().equals("story")) { if (element.getName().equals("errors")) { throw new Exception(extractErrorMessage(element)); } else { // unknown error, probably our fault LOG.warn("Error setting state for: " + url + ", HTTP status code: " + method.getStatusCode()); throw new Exception(String.format("Cannot set state '%s' for issue.", stateName)); } } } @NotNull private static String extractErrorMessage(@NotNull Element element) { return StringUtil.notNullize(element.getChild("error").getText()); } @Override public boolean equals(final Object o) { if (!super.equals(o)) return false; if (!(o instanceof PivotalTrackerRepository)) return false; final PivotalTrackerRepository that = (PivotalTrackerRepository)o; if (getAPIKey() != null ? !getAPIKey().equals(that.getAPIKey()) : that.getAPIKey() != null) return false; if (getProjectId() != null ? !getProjectId().equals(that.getProjectId()) : that.getProjectId() != null) return false; if (getCommitMessageFormat() != null ? !getCommitMessageFormat().equals(that.getCommitMessageFormat()) : that.getCommitMessageFormat() != null) return false; return isShouldFormatCommitMessage() == that.isShouldFormatCommitMessage(); } @Override protected int getFeatures() { return super.getFeatures() | BASIC_HTTP_AUTHORIZATION | STATE_UPDATING; } @Override public void setUrl(String url) { if (url.startsWith("http:")) { url = "https:" + StringUtil.trimStart(url, "http:"); } super.setUrl(url); } }
package com.sequenceiq.mock.swagger.model; import java.util.Objects; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonCreator; import io.swagger.annotations.ApiModel; import io.swagger.annotations.ApiModelProperty; import org.springframework.validation.annotation.Validated; import javax.validation.Valid; import javax.validation.constraints.*; /** * Arguments used to install CDP a Private Cloud Control Plane */ @ApiModel(description = "Arguments used to install CDP a Private Cloud Control Plane") @Validated @javax.annotation.Generated(value = "io.swagger.codegen.languages.SpringCodegen", date = "2021-04-23T12:05:48.864+02:00") public class ApiInstallControlPlaneArgs { @JsonProperty("kubernetesType") private String kubernetesType = null; @JsonProperty("remoteRepoUrl") private String remoteRepoUrl = null; @JsonProperty("valuesYaml") private String valuesYaml = null; @JsonProperty("kubeConfig") private String kubeConfig = null; @JsonProperty("namespace") private String namespace = null; @JsonProperty("dockerRegistry") private String dockerRegistry = null; @JsonProperty("isOverrideAllowed") private Boolean isOverrideAllowed = null; public ApiInstallControlPlaneArgs kubernetesType(String kubernetesType) { this.kubernetesType = kubernetesType; return this; } /** * The kubernetes type (e.g. \"openshift\") that the control plane will run on * @return kubernetesType **/ @ApiModelProperty(value = "The kubernetes type (e.g. \"openshift\") that the control plane will run on") public String getKubernetesType() { return kubernetesType; } public void setKubernetesType(String kubernetesType) { this.kubernetesType = kubernetesType; } public ApiInstallControlPlaneArgs remoteRepoUrl(String remoteRepoUrl) { this.remoteRepoUrl = remoteRepoUrl; return this; } /** * The url of the remote repository where the private cloud artifacts to install are hosted * @return remoteRepoUrl **/ @ApiModelProperty(value = "The url of the remote repository where the private cloud artifacts to install are hosted") public String getRemoteRepoUrl() { return remoteRepoUrl; } public void setRemoteRepoUrl(String remoteRepoUrl) { this.remoteRepoUrl = remoteRepoUrl; } public ApiInstallControlPlaneArgs valuesYaml(String valuesYaml) { this.valuesYaml = valuesYaml; return this; } /** * A yaml file containing configuration parameters for the installation. To see sample values.yaml files, read the documentation <a target=\"_blank\" href=http://tiny.cloudera.com/cdp-pvc.install-values-yaml>here</a>. * @return valuesYaml **/ @ApiModelProperty(value = "A yaml file containing configuration parameters for the installation. To see sample values.yaml files, read the documentation <a target=\"_blank\" href=http://tiny.cloudera.com/cdp-pvc.install-values-yaml>here</a>.") public String getValuesYaml() { return valuesYaml; } public void setValuesYaml(String valuesYaml) { this.valuesYaml = valuesYaml; } public ApiInstallControlPlaneArgs kubeConfig(String kubeConfig) { this.kubeConfig = kubeConfig; return this; } /** * The content of the kubeconfig file of the kubernetes environment on which the install will be performed Simplified example:<br> <br> apiVersion: v1<br> clusters:<br> - cluster:<br> &emsp;&emsp;certificate-authority-data: abc123<br> &emsp;&emsp;server: https://example-server.domain.com:6443<br> &emsp;name: example-cluster.domain.com:6443<br> contexts:<br> - context:<br> &emsp;&emsp;cluster: ocp-cluster1<br> &emsp;&emsp;user: admin<br> &emsp;name: admin<br> current-context: admin<br> kind: Config<br> preferences: {}<br> users:<br> - name: admin<br> &emsp;user:<br> &emsp;&emsp;client-certificate-data: abc123<br> &emsp;&emsp;client-key-data: xyz987<br> <br> For more information on the kubeconfig file, read the documentation <a target=\"_blank\" href=http://tiny.cloudera.com/cdp-pvc.kubernetes>here</a>. * @return kubeConfig **/ @ApiModelProperty(value = "The content of the kubeconfig file of the kubernetes environment on which the install will be performed Simplified example:<br> <br> apiVersion: v1<br> clusters:<br> - cluster:<br> &emsp;&emsp;certificate-authority-data: abc123<br> &emsp;&emsp;server: https://example-server.domain.com:6443<br> &emsp;name: example-cluster.domain.com:6443<br> contexts:<br> - context:<br> &emsp;&emsp;cluster: ocp-cluster1<br> &emsp;&emsp;user: admin<br> &emsp;name: admin<br> current-context: admin<br> kind: Config<br> preferences: {}<br> users:<br> - name: admin<br> &emsp;user:<br> &emsp;&emsp;client-certificate-data: abc123<br> &emsp;&emsp;client-key-data: xyz987<br> <br> For more information on the kubeconfig file, read the documentation <a target=\"_blank\" href=http://tiny.cloudera.com/cdp-pvc.kubernetes>here</a>.") public String getKubeConfig() { return kubeConfig; } public void setKubeConfig(String kubeConfig) { this.kubeConfig = kubeConfig; } public ApiInstallControlPlaneArgs namespace(String namespace) { this.namespace = namespace; return this; } /** * A unique namespace where the control plane will be installed * @return namespace **/ @ApiModelProperty(value = "A unique namespace where the control plane will be installed") public String getNamespace() { return namespace; } public void setNamespace(String namespace) { this.namespace = namespace; } public ApiInstallControlPlaneArgs dockerRegistry(String dockerRegistry) { this.dockerRegistry = dockerRegistry; return this; } /** * The url of the Docker Registry where images required for install are hosted. This fields is deprecated. The docker registry should be provided within the values.yaml configuration file. * @return dockerRegistry **/ @ApiModelProperty(value = "The url of the Docker Registry where images required for install are hosted. This fields is deprecated. The docker registry should be provided within the values.yaml configuration file.") public String getDockerRegistry() { return dockerRegistry; } public void setDockerRegistry(String dockerRegistry) { this.dockerRegistry = dockerRegistry; } public ApiInstallControlPlaneArgs isOverrideAllowed(Boolean isOverrideAllowed) { this.isOverrideAllowed = isOverrideAllowed; return this; } /** * * @return isOverrideAllowed **/ @ApiModelProperty(value = "") public Boolean isIsOverrideAllowed() { return isOverrideAllowed; } public void setIsOverrideAllowed(Boolean isOverrideAllowed) { this.isOverrideAllowed = isOverrideAllowed; } @Override public boolean equals(java.lang.Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ApiInstallControlPlaneArgs apiInstallControlPlaneArgs = (ApiInstallControlPlaneArgs) o; return Objects.equals(this.kubernetesType, apiInstallControlPlaneArgs.kubernetesType) && Objects.equals(this.remoteRepoUrl, apiInstallControlPlaneArgs.remoteRepoUrl) && Objects.equals(this.valuesYaml, apiInstallControlPlaneArgs.valuesYaml) && Objects.equals(this.kubeConfig, apiInstallControlPlaneArgs.kubeConfig) && Objects.equals(this.namespace, apiInstallControlPlaneArgs.namespace) && Objects.equals(this.dockerRegistry, apiInstallControlPlaneArgs.dockerRegistry) && Objects.equals(this.isOverrideAllowed, apiInstallControlPlaneArgs.isOverrideAllowed); } @Override public int hashCode() { return Objects.hash(kubernetesType, remoteRepoUrl, valuesYaml, kubeConfig, namespace, dockerRegistry, isOverrideAllowed); } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("class ApiInstallControlPlaneArgs {\n"); sb.append(" kubernetesType: ").append(toIndentedString(kubernetesType)).append("\n"); sb.append(" remoteRepoUrl: ").append(toIndentedString(remoteRepoUrl)).append("\n"); sb.append(" valuesYaml: ").append(toIndentedString(valuesYaml)).append("\n"); sb.append(" kubeConfig: ").append(toIndentedString(kubeConfig)).append("\n"); sb.append(" namespace: ").append(toIndentedString(namespace)).append("\n"); sb.append(" dockerRegistry: ").append(toIndentedString(dockerRegistry)).append("\n"); sb.append(" isOverrideAllowed: ").append(toIndentedString(isOverrideAllowed)).append("\n"); sb.append("}"); return sb.toString(); } /** * Convert the given object to string with each line indented by 4 spaces * (except the first line). */ private String toIndentedString(java.lang.Object o) { if (o == null) { return "null"; } return o.toString().replace("\n", "\n "); } }
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.cxf.jaxrs.client.logging; import javax.ws.rs.GET; import javax.ws.rs.Path; import javax.ws.rs.Produces; import javax.ws.rs.core.MediaType; public class TestServiceRestBinary { @GET @Path("test1") @Produces(MediaType.APPLICATION_OCTET_STREAM) public byte[] getBinary() { return new byte[] {1, 2, 3, 4, 5, 6, 8, 9}; } }
/*! ****************************************************************************** * * Pentaho Data Integration * * Copyright (C) 2002-2016 by Pentaho : http://www.pentaho.com * ******************************************************************************* * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ package org.pentaho.di.ui.job.entries.writetolog; import org.eclipse.swt.SWT; import org.eclipse.swt.custom.CCombo; import org.eclipse.swt.events.ModifyEvent; import org.eclipse.swt.events.ModifyListener; import org.eclipse.swt.events.SelectionAdapter; import org.eclipse.swt.events.SelectionEvent; import org.eclipse.swt.events.ShellAdapter; import org.eclipse.swt.events.ShellEvent; import org.eclipse.swt.layout.FormAttachment; import org.eclipse.swt.layout.FormData; import org.eclipse.swt.layout.FormLayout; import org.eclipse.swt.widgets.Button; import org.eclipse.swt.widgets.Display; import org.eclipse.swt.widgets.Event; import org.eclipse.swt.widgets.Label; import org.eclipse.swt.widgets.Listener; import org.eclipse.swt.widgets.MessageBox; import org.eclipse.swt.widgets.Shell; import org.eclipse.swt.widgets.Text; import org.pentaho.di.core.Const; import org.pentaho.di.core.util.Utils; import org.pentaho.di.core.Props; import org.pentaho.di.core.logging.LogLevel; import org.pentaho.di.i18n.BaseMessages; import org.pentaho.di.job.JobMeta; import org.pentaho.di.job.entries.writetolog.JobEntryWriteToLog; import org.pentaho.di.job.entry.JobEntryDialogInterface; import org.pentaho.di.job.entry.JobEntryInterface; import org.pentaho.di.repository.Repository; import org.pentaho.di.ui.core.gui.WindowProperty; import org.pentaho.di.ui.core.widget.ControlSpaceKeyAdapter; import org.pentaho.di.ui.core.widget.TextVar; import org.pentaho.di.ui.job.dialog.JobDialog; import org.pentaho.di.ui.job.entry.JobEntryDialog; import org.pentaho.di.ui.trans.step.BaseStepDialog; /** * This dialog allows you to edit a JobEntryWriteToLog object. * * @author Samatar * @since 08-08-2007 */ public class JobEntryWriteToLogDialog extends JobEntryDialog implements JobEntryDialogInterface { private static Class<?> PKG = JobEntryWriteToLog.class; // for i18n purposes, needed by Translator2!! private Label wlName; private Text wName; private FormData fdlName, fdName; private Label wlLogMessage; private Text wLogMessage; private FormData fdlLogMessage, fdLogMessage; private Button wOK, wCancel; private Listener lsOK, lsCancel; private JobEntryWriteToLog jobEntry; private Shell shell; private SelectionAdapter lsDef; private boolean changed; // Log subject private Label wlLogSubject; private TextVar wLogSubject; private FormData fdlLogSubject, fdLogSubject; private Label wlLoglevel; private CCombo wLoglevel; private FormData fdlLoglevel, fdLoglevel; public JobEntryWriteToLogDialog( Shell parent, JobEntryInterface jobEntryInt, Repository rep, JobMeta jobMeta ) { super( parent, jobEntryInt, rep, jobMeta ); jobEntry = (JobEntryWriteToLog) jobEntryInt; if ( this.jobEntry.getName() == null ) { this.jobEntry.setName( BaseMessages.getString( PKG, "WriteToLog.Name.Default" ) ); } } public JobEntryInterface open() { Shell parent = getParent(); Display display = parent.getDisplay(); shell = new Shell( parent, props.getJobsDialogStyle() ); props.setLook( shell ); JobDialog.setShellImage( shell, jobEntry ); ModifyListener lsMod = new ModifyListener() { public void modifyText( ModifyEvent e ) { jobEntry.setChanged(); } }; changed = jobEntry.hasChanged(); FormLayout formLayout = new FormLayout(); formLayout.marginWidth = Const.FORM_MARGIN; formLayout.marginHeight = Const.FORM_MARGIN; shell.setLayout( formLayout ); shell.setText( BaseMessages.getString( PKG, "WriteToLog.Title" ) ); int middle = props.getMiddlePct(); int margin = Const.MARGIN; wOK = new Button( shell, SWT.PUSH ); wOK.setText( BaseMessages.getString( PKG, "System.Button.OK" ) ); wCancel = new Button( shell, SWT.PUSH ); wCancel.setText( BaseMessages.getString( PKG, "System.Button.Cancel" ) ); // at the bottom BaseStepDialog.positionBottomButtons( shell, new Button[] { wOK, wCancel }, margin, null ); // Filename line wlName = new Label( shell, SWT.RIGHT ); wlName.setText( BaseMessages.getString( PKG, "WriteToLog.Jobname.Label" ) ); props.setLook( wlName ); fdlName = new FormData(); fdlName.left = new FormAttachment( 0, 0 ); fdlName.right = new FormAttachment( middle, 0 ); fdlName.top = new FormAttachment( 0, margin ); wlName.setLayoutData( fdlName ); wName = new Text( shell, SWT.SINGLE | SWT.LEFT | SWT.BORDER ); props.setLook( wName ); wName.addModifyListener( lsMod ); fdName = new FormData(); fdName.left = new FormAttachment( middle, 0 ); fdName.top = new FormAttachment( 0, margin ); fdName.right = new FormAttachment( 100, 0 ); wName.setLayoutData( fdName ); // Log Level wlLoglevel = new Label( shell, SWT.RIGHT ); wlLoglevel.setText( BaseMessages.getString( PKG, "WriteToLog.Loglevel.Label" ) ); props.setLook( wlLoglevel ); fdlLoglevel = new FormData(); fdlLoglevel.left = new FormAttachment( 0, 0 ); fdlLoglevel.right = new FormAttachment( middle, -margin ); fdlLoglevel.top = new FormAttachment( wName, margin ); wlLoglevel.setLayoutData( fdlLoglevel ); wLoglevel = new CCombo( shell, SWT.SINGLE | SWT.READ_ONLY | SWT.BORDER ); wLoglevel.setItems( LogLevel.getLogLevelDescriptions() ); props.setLook( wLoglevel ); fdLoglevel = new FormData(); fdLoglevel.left = new FormAttachment( middle, 0 ); fdLoglevel.top = new FormAttachment( wName, margin ); fdLoglevel.right = new FormAttachment( 100, 0 ); wLoglevel.setLayoutData( fdLoglevel ); // Subject wlLogSubject = new Label( shell, SWT.RIGHT ); wlLogSubject.setText( BaseMessages.getString( PKG, "WriteToLog.LogSubject.Label" ) ); props.setLook( wlLogSubject ); fdlLogSubject = new FormData(); fdlLogSubject.left = new FormAttachment( 0, 0 ); fdlLogSubject.top = new FormAttachment( wLoglevel, margin ); fdlLogSubject.right = new FormAttachment( middle, -margin ); wlLogSubject.setLayoutData( fdlLogSubject ); wLogSubject = new TextVar( jobMeta, shell, SWT.SINGLE | SWT.LEFT | SWT.BORDER ); wLogSubject.setText( BaseMessages.getString( PKG, "WriteToLog.Name.Default" ) ); props.setLook( wLogSubject ); wLogSubject.addModifyListener( lsMod ); fdLogSubject = new FormData(); fdLogSubject.left = new FormAttachment( middle, 0 ); fdLogSubject.top = new FormAttachment( wLoglevel, margin ); fdLogSubject.right = new FormAttachment( 100, 0 ); wLogSubject.setLayoutData( fdLogSubject ); // Log message to display wlLogMessage = new Label( shell, SWT.RIGHT ); wlLogMessage.setText( BaseMessages.getString( PKG, "WriteToLog.LogMessage.Label" ) ); props.setLook( wlLogMessage ); fdlLogMessage = new FormData(); fdlLogMessage.left = new FormAttachment( 0, 0 ); fdlLogMessage.top = new FormAttachment( wLogSubject, margin ); fdlLogMessage.right = new FormAttachment( middle, -margin ); wlLogMessage.setLayoutData( fdlLogMessage ); wLogMessage = new Text( shell, SWT.MULTI | SWT.LEFT | SWT.BORDER | SWT.H_SCROLL | SWT.V_SCROLL ); wLogMessage.setText( BaseMessages.getString( PKG, "WriteToLog.Name.Default" ) ); props.setLook( wLogMessage, Props.WIDGET_STYLE_FIXED ); wLogMessage.addModifyListener( lsMod ); fdLogMessage = new FormData(); fdLogMessage.left = new FormAttachment( middle, 0 ); fdLogMessage.top = new FormAttachment( wLogSubject, margin ); fdLogMessage.right = new FormAttachment( 100, 0 ); fdLogMessage.bottom = new FormAttachment( wOK, -margin ); wLogMessage.setLayoutData( fdLogMessage ); // SelectionAdapter lsVar = VariableButtonListenerFactory.getSelectionAdapter(shell, wLogMessage, jobMeta); wLogMessage.addKeyListener( new ControlSpaceKeyAdapter( jobMeta, wLogMessage ) ); // Add listeners lsCancel = new Listener() { public void handleEvent( Event e ) { cancel(); } }; lsOK = new Listener() { public void handleEvent( Event e ) { ok(); } }; wCancel.addListener( SWT.Selection, lsCancel ); wOK.addListener( SWT.Selection, lsOK ); lsDef = new SelectionAdapter() { public void widgetDefaultSelected( SelectionEvent e ) { ok(); } }; wName.addSelectionListener( lsDef ); // Detect X or ALT-F4 or something that kills this window... shell.addShellListener( new ShellAdapter() { public void shellClosed( ShellEvent e ) { cancel(); } } ); getData(); BaseStepDialog.setSize( shell, 250, 250, false ); shell.open(); props.setDialogSize( shell, "JobEvalDialogSize" ); while ( !shell.isDisposed() ) { if ( !display.readAndDispatch() ) { display.sleep(); } } return jobEntry; } public void dispose() { WindowProperty winprop = new WindowProperty( shell ); props.setScreen( winprop ); shell.dispose(); } /** * Copy information from the meta-data input to the dialog fields. */ public void getData() { wName.setText( Const.nullToEmpty( jobEntry.getName() ) ); wLogMessage.setText( Const.nullToEmpty( jobEntry.getLogMessage() ) ); wLogSubject.setText( Const.nullToEmpty( jobEntry.getLogSubject() ) ); if ( jobEntry.getEntryLogLevel() != null ) { wLoglevel.select( jobEntry.getEntryLogLevel().getLevel() ); } wName.selectAll(); wName.setFocus(); } private void cancel() { jobEntry.setChanged( changed ); jobEntry = null; dispose(); } private void ok() { if ( Utils.isEmpty( wName.getText() ) ) { MessageBox mb = new MessageBox( shell, SWT.OK | SWT.ICON_ERROR ); mb.setText( BaseMessages.getString( PKG, "System.StepJobEntryNameMissing.Title" ) ); mb.setMessage( BaseMessages.getString( PKG, "System.JobEntryNameMissing.Msg" ) ); mb.open(); return; } jobEntry.setName( wName.getText() ); jobEntry.setLogMessage( wLogMessage.getText() ); jobEntry.setLogSubject( wLogSubject.getText() ); if ( wLoglevel.getSelectionIndex() != -1 ) { jobEntry.setEntryLogLevel( LogLevel.values()[wLoglevel.getSelectionIndex()] ); } dispose(); } }